mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 17:40:38 +00:00
Compare commits
18 Commits
fix/lifecy
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3c14947878 | ||
|
|
2924b4e463 | ||
|
|
b4ba62fa33 | ||
|
|
a5b3522880 | ||
|
|
056a0ee62b | ||
|
|
4603ece708 | ||
|
|
eb33e82b56 | ||
|
|
c7e2b4d8e7 | ||
|
|
71c59d1187 | ||
|
|
e3a0a07495 | ||
|
|
136db7e0c9 | ||
|
|
2e3c5f695a | ||
|
|
fe9609fd17 | ||
|
|
f2d79b485e | ||
|
|
3d6681c9e5 | ||
|
|
07a26fadad | ||
|
|
a083fca17a | ||
|
|
89c3ae77a4 |
64
.config/make/build-docker-buildx-dev.mak
Normal file
64
.config/make/build-docker-buildx-dev.mak
Normal file
@@ -0,0 +1,64 @@
|
||||
## —— Development/Source builds using direct buildx commands ---------------------------------------
|
||||
|
||||
.PHONY: docker-dev
|
||||
docker-dev: ## Build dev multi-arch image (cannot load locally)
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local: ## Build dev single-arch image (local load)
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push: ## Build and push multi-arch development image # e.g (make docker-dev-push REGISTRY=xxx)
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 Pushing to registry: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start: ## Start development container environment
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop: ## Stop development container environment
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start ## Restart development container environment
|
||||
41
.config/make/build-docker-buildx-production.mak
Normal file
41
.config/make/build-docker-buildx-production.mak
Normal file
@@ -0,0 +1,41 @@
|
||||
## —— Production builds using docker buildx (for CI/CD and production) -----------------------------
|
||||
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx: ## Build production multi-arch image (no push)
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push: ## Build and push production multi-arch image
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version: ## Build and version production multi-arch image # e.g (make docker-buildx-version VERSION=v1.0.0)
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version: ## Build and version and push production multi-arch image # e.g (make docker-buildx-push-version VERSION=v1.0.0)
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local: ## Build production single-arch image locally
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
16
.config/make/build-docker-production.mak
Normal file
16
.config/make/build-docker-production.mak
Normal file
@@ -0,0 +1,16 @@
|
||||
## —— Single Architecture Docker Builds (Traditional) ----------------------------------------------
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production: ## Build single-arch production image
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source: ## Build single-arch source image
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
22
.config/make/build-docker.mak
Normal file
22
.config/make/build-docker.mak
Normal file
@@ -0,0 +1,22 @@
|
||||
## —— Docker-based build (alternative approach) ----------------------------------------------------
|
||||
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
|
||||
.PHONY: build-docker
|
||||
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build-docker: ## Build using Docker container # e.g (make build-docker BUILD_OS=ubuntu22.04)
|
||||
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
|
||||
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch: ## Check image architecture support
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
|
||||
docker buildx imagetools inspect $(IMAGE)
|
||||
55
.config/make/build.mak
Normal file
55
.config/make/build.mak
Normal file
@@ -0,0 +1,55 @@
|
||||
## —— Local Native Build using build-rustfs.sh script (Recommended) --------------------------------
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build RustFS binary (includes console by default)
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
.PHONY: build-dev
|
||||
build-dev: ## Build RustFS in Development mode
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl: ## Build x86_64 musl version
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu: ## Build x86_64 GNU version
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
.PHONY: build-musl-arm64
|
||||
build-musl-arm64: ## Build aarch64 musl version
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu-arm64
|
||||
build-gnu-arm64: ## Build aarch64 GNU version
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
|
||||
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all: core-deps ## Build binaries for all architectures
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
24
.config/make/check.mak
Normal file
24
.config/make/check.mak
Normal file
@@ -0,0 +1,24 @@
|
||||
## —— Check and Inform Dependencies ----------------------------------------------------------------
|
||||
|
||||
# Fatal check
|
||||
# Checks all required dependencies and exits with error if not found
|
||||
# (e.g., cargo, rustfmt)
|
||||
check-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "❌ '$*' is not installed."; \
|
||||
exit 1; \
|
||||
}
|
||||
|
||||
# Warning-only check
|
||||
# Checks for optional dependencies and issues a warning if not found
|
||||
# (e.g., cargo-nextest for enhanced testing)
|
||||
warn-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "⚠️ '$*' is not installed."; \
|
||||
}
|
||||
|
||||
# For checking dependencies use check-<dep-name> or warn-<dep-name>
|
||||
.PHONY: core-deps fmt-deps test-deps
|
||||
core-deps: check-cargo ## Check core dependencies
|
||||
fmt-deps: check-rustfmt ## Check lint and formatting dependencies
|
||||
test-deps: warn-cargo-nextest ## Check tests dependencies
|
||||
6
.config/make/deploy.mak
Normal file
6
.config/make/deploy.mak
Normal file
@@ -0,0 +1,6 @@
|
||||
## —— Deploy using dev_deploy.sh script ------------------------------------------------------------
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl ## Deploy to dev server
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
38
.config/make/help.mak
Normal file
38
.config/make/help.mak
Normal file
@@ -0,0 +1,38 @@
|
||||
## —— Help, Help Build and Help Docker -------------------------------------------------------------
|
||||
|
||||
|
||||
.PHONY: help
|
||||
help: ## Shows This Help Menu
|
||||
echo -e "$$HEADER"
|
||||
grep -E '(^[a-zA-Z0-9_-]+:.*?## .*$$)|(^## )' $(MAKEFILE_LIST) | sed 's/^[^:]*://g' | awk 'BEGIN {FS = ":.*?## | #"} ; {printf "${cyan}%-30s${reset} ${white}%s${reset} ${green}%s${reset}\n", $$1, $$2, $$3}' | sed -e 's/\[36m##/\n[32m##/'
|
||||
|
||||
.PHONY: help-build
|
||||
help-build: ## Shows RustFS build help
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
|
||||
@echo ""
|
||||
@echo "🔧 Direct usage of build-rustfs.sh script:"
|
||||
@echo ""
|
||||
@echo " ./build-rustfs.sh --help # View script help"
|
||||
@echo " ./build-rustfs.sh --no-console # Build without console resources"
|
||||
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
|
||||
@echo " ./build-rustfs.sh --dev # Development mode build"
|
||||
@echo " ./build-rustfs.sh --sign # Sign binary files"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
|
||||
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
|
||||
@echo ""
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker: ## Shows docker environment and suggestion help
|
||||
@echo ""
|
||||
@echo "📋 Environment Variables:"
|
||||
@echo " REGISTRY Image registry address (required for push)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub username"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub access token"
|
||||
@echo " GITHUB_TOKEN GitHub access token"
|
||||
@echo ""
|
||||
@echo "💡 Suggestions:"
|
||||
@echo " Production use: Use docker-buildx* commands (based on precompiled binaries)"
|
||||
@echo " Local development: Use docker-dev* commands (build from source)"
|
||||
@echo " Development environment: Use dev-env-* commands to manage dev containers"
|
||||
@echo ""
|
||||
22
.config/make/lint-fmt.mak
Normal file
22
.config/make/lint-fmt.mak
Normal file
@@ -0,0 +1,22 @@
|
||||
## —— Code quality and Formatting ------------------------------------------------------------------
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: core-deps fmt-deps ## Format code
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: core-deps fmt-deps ## Check code formatting
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
.PHONY: clippy-check
|
||||
clippy-check: core-deps ## Run clippy checks
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: compilation-check
|
||||
compilation-check: core-deps ## Run compilation check
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
11
.config/make/pre-commit.mak
Normal file
11
.config/make/pre-commit.mak
Normal file
@@ -0,0 +1,11 @@
|
||||
## —— Pre Commit Checks ----------------------------------------------------------------------------
|
||||
|
||||
.PHONY: setup-hooks
|
||||
setup-hooks: ## Set up git hooks
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy-check compilation-check test ## Run pre-commit checks
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
20
.config/make/tests.mak
Normal file
20
.config/make/tests.mak
Normal file
@@ -0,0 +1,20 @@
|
||||
## —— Tests and e2e test ---------------------------------------------------------------------------
|
||||
|
||||
.PHONY: test
|
||||
test: core-deps test-deps ## Run all tests
|
||||
@echo "🧪 Running tests..."
|
||||
@if command -v cargo-nextest >/dev/null 2>&1; then \
|
||||
cargo nextest run --all --exclude e2e_test; \
|
||||
else \
|
||||
echo "ℹ️ cargo-nextest not found; falling back to 'cargo test'"; \
|
||||
cargo test --workspace --exclude e2e_test -- --nocapture; \
|
||||
fi
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server: ## Run e2e-server tests
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
|
||||
.PHONY: probe-e2e
|
||||
probe-e2e: ## Probe e2e tests
|
||||
sh $(shell pwd)/scripts/probe.sh
|
||||
260
.github/workflows/e2e-mint.yml
vendored
260
.github/workflows/e2e-mint.yml
vendored
@@ -1,260 +0,0 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: e2e-mint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- ".github/workflows/e2e-mint.yml"
|
||||
- "Dockerfile.source"
|
||||
- "rustfs/**"
|
||||
- "crates/**"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
run-multi:
|
||||
description: "Run multi-node Mint as well"
|
||||
required: false
|
||||
default: "false"
|
||||
|
||||
env:
|
||||
ACCESS_KEY: rustfsadmin
|
||||
SECRET_KEY: rustfsadmin
|
||||
RUST_LOG: info
|
||||
PLATFORM: linux/amd64
|
||||
|
||||
jobs:
|
||||
mint-single:
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 40
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Enable buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build RustFS image (source)
|
||||
run: |
|
||||
DOCKER_BUILDKIT=1 docker buildx build --load \
|
||||
--platform ${PLATFORM} \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source .
|
||||
|
||||
- name: Create network
|
||||
run: |
|
||||
docker network inspect rustfs-net >/dev/null 2>&1 || docker network create rustfs-net
|
||||
|
||||
- name: Remove existing rustfs-single (if any)
|
||||
run: docker rm -f rustfs-single >/dev/null 2>&1 || true
|
||||
|
||||
- name: Start single RustFS
|
||||
run: |
|
||||
docker run -d --name rustfs-single \
|
||||
--network rustfs-net \
|
||||
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
|
||||
-e RUSTFS_ACCESS_KEY=$ACCESS_KEY \
|
||||
-e RUSTFS_SECRET_KEY=$SECRET_KEY \
|
||||
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
|
||||
-v /tmp/rustfs-single:/data \
|
||||
rustfs-ci
|
||||
|
||||
- name: Wait for RustFS ready
|
||||
run: |
|
||||
for i in {1..30}; do
|
||||
if docker exec rustfs-single curl -sf http://localhost:9000/health >/dev/null; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "RustFS did not become ready" >&2
|
||||
docker logs rustfs-single || true
|
||||
exit 1
|
||||
|
||||
- name: Run Mint (single, S3-only)
|
||||
run: |
|
||||
mkdir -p artifacts/mint-single
|
||||
docker run --rm --network rustfs-net \
|
||||
--platform ${PLATFORM} \
|
||||
-e SERVER_ENDPOINT=rustfs-single:9000 \
|
||||
-e ACCESS_KEY=$ACCESS_KEY \
|
||||
-e SECRET_KEY=$SECRET_KEY \
|
||||
-e ENABLE_HTTPS=0 \
|
||||
-e SERVER_REGION=us-east-1 \
|
||||
-e RUN_ON_FAIL=1 \
|
||||
-e MINT_MODE=core \
|
||||
-v ${GITHUB_WORKSPACE}/artifacts/mint-single:/mint/log \
|
||||
--entrypoint /mint/mint.sh \
|
||||
minio/mint:edge \
|
||||
awscli aws-sdk-go aws-sdk-java-v2 aws-sdk-php aws-sdk-ruby s3cmd s3select
|
||||
|
||||
- name: Collect RustFS logs
|
||||
run: |
|
||||
mkdir -p artifacts/rustfs-single
|
||||
docker logs rustfs-single > artifacts/rustfs-single/rustfs.log || true
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: mint-single
|
||||
path: artifacts/**
|
||||
|
||||
mint-multi:
|
||||
if: github.event_name == 'workflow_dispatch' && github.event.inputs.run-multi == 'true'
|
||||
needs: mint-single
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Enable buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build RustFS image (source)
|
||||
run: |
|
||||
DOCKER_BUILDKIT=1 docker buildx build --load \
|
||||
--platform ${PLATFORM} \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source .
|
||||
|
||||
- name: Prepare cluster compose
|
||||
run: |
|
||||
cat > compose.yml <<'EOF'
|
||||
version: '3.8'
|
||||
services:
|
||||
rustfs1:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs1
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs1-data:/data
|
||||
rustfs2:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs2
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs2-data:/data
|
||||
rustfs3:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs3
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs3-data:/data
|
||||
rustfs4:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs4
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs4-data:/data
|
||||
lb:
|
||||
image: haproxy:2.9
|
||||
hostname: lb
|
||||
networks: [rustfs-net]
|
||||
ports:
|
||||
- "9000:9000"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
networks:
|
||||
rustfs-net:
|
||||
name: rustfs-net
|
||||
volumes:
|
||||
rustfs1-data:
|
||||
rustfs2-data:
|
||||
rustfs3-data:
|
||||
rustfs4-data:
|
||||
EOF
|
||||
|
||||
cat > haproxy.cfg <<'EOF'
|
||||
defaults
|
||||
mode http
|
||||
timeout connect 5s
|
||||
timeout client 30s
|
||||
timeout server 30s
|
||||
|
||||
frontend fe_s3
|
||||
bind *:9000
|
||||
default_backend be_s3
|
||||
|
||||
backend be_s3
|
||||
balance roundrobin
|
||||
server s1 rustfs1:9000 check
|
||||
server s2 rustfs2:9000 check
|
||||
server s3 rustfs3:9000 check
|
||||
server s4 rustfs4:9000 check
|
||||
EOF
|
||||
|
||||
- name: Launch cluster
|
||||
run: docker compose -f compose.yml up -d
|
||||
|
||||
- name: Wait for LB ready
|
||||
run: |
|
||||
for i in {1..60}; do
|
||||
if docker run --rm --network rustfs-net curlimages/curl -sf http://lb:9000/health >/dev/null; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "LB or backend not ready" >&2
|
||||
docker compose -f compose.yml logs --tail=200 || true
|
||||
exit 1
|
||||
|
||||
- name: Run Mint (multi, S3-only)
|
||||
run: |
|
||||
mkdir -p artifacts/mint-multi
|
||||
docker run --rm --network rustfs-net \
|
||||
--platform ${PLATFORM} \
|
||||
-e SERVER_ENDPOINT=lb:9000 \
|
||||
-e ACCESS_KEY=$ACCESS_KEY \
|
||||
-e SECRET_KEY=$SECRET_KEY \
|
||||
-e ENABLE_HTTPS=0 \
|
||||
-e SERVER_REGION=us-east-1 \
|
||||
-e RUN_ON_FAIL=1 \
|
||||
-e MINT_MODE=core \
|
||||
-v ${GITHUB_WORKSPACE}/artifacts/mint-multi:/mint/log \
|
||||
--entrypoint /mint/mint.sh \
|
||||
minio/mint:edge \
|
||||
awscli aws-sdk-go aws-sdk-java-v2 aws-sdk-php aws-sdk-ruby s3cmd s3select
|
||||
|
||||
- name: Collect logs
|
||||
run: |
|
||||
mkdir -p artifacts/cluster
|
||||
docker compose -f compose.yml logs --no-color > artifacts/cluster/cluster.log || true
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: mint-multi
|
||||
path: artifacts/**
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -25,7 +25,7 @@ profile.json
|
||||
*.pb
|
||||
*.svg
|
||||
deploy/logs/*.log.*
|
||||
|
||||
artifacts/
|
||||
# s3-tests local artifacts (root directory only)
|
||||
/s3-tests/
|
||||
/s3-tests-local/
|
||||
@@ -33,4 +33,4 @@ deploy/logs/*.log.*
|
||||
/s3tests.conf.*
|
||||
*.events
|
||||
*.audit
|
||||
*.snappy
|
||||
*.snappy
|
||||
|
||||
34
.vscode/launch.json
vendored
34
.vscode/launch.json
vendored
@@ -99,17 +99,7 @@
|
||||
"name": "Debug executable target/debug/rustfs",
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"cargo": {
|
||||
"args": [
|
||||
"run",
|
||||
"--bin",
|
||||
"rustfs",
|
||||
"-j",
|
||||
"1",
|
||||
"--profile",
|
||||
"dev"
|
||||
]
|
||||
},
|
||||
"program": "${workspaceFolder}/target/debug/rustfs",
|
||||
"args": [],
|
||||
"cwd": "${workspaceFolder}",
|
||||
//"stopAtEntry": false,
|
||||
@@ -117,7 +107,7 @@
|
||||
"env": {
|
||||
"RUSTFS_ACCESS_KEY": "rustfsadmin",
|
||||
"RUSTFS_SECRET_KEY": "rustfsadmin",
|
||||
//"RUSTFS_VOLUMES": "./target/volume/test{1...4}",
|
||||
"RUSTFS_VOLUMES": "./target/volume/test{1...4}",
|
||||
"RUSTFS_ADDRESS": ":9000",
|
||||
"RUSTFS_CONSOLE_ENABLE": "true",
|
||||
// "RUSTFS_OBS_TRACE_ENDPOINT": "http://127.0.0.1:4318/v1/traces", // jeager otlp http endpoint
|
||||
@@ -126,31 +116,11 @@
|
||||
// "RUSTFS_COMPRESS_ENABLE": "true",
|
||||
"RUSTFS_CONSOLE_ADDRESS": "127.0.0.1:9001",
|
||||
"RUSTFS_OBS_LOG_DIRECTORY": "./target/logs",
|
||||
"RUST_LOG":"rustfs=debug,ecstore=debug,s3s=debug,iam=debug",
|
||||
},
|
||||
"sourceLanguages": [
|
||||
"rust"
|
||||
],
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Debug test_lifecycle_transition_basic",
|
||||
"cargo": {
|
||||
"args": [
|
||||
"test",
|
||||
"-p",
|
||||
"rustfs-scanner",
|
||||
"--test",
|
||||
"lifecycle_integration_test",
|
||||
"serial_tests::test_lifecycle_transition_basic",
|
||||
"-j",
|
||||
"1"
|
||||
]
|
||||
},
|
||||
"args": [],
|
||||
"cwd": "${workspaceFolder}"
|
||||
},
|
||||
{
|
||||
"name": "Debug executable target/debug/test",
|
||||
"type": "lldb",
|
||||
|
||||
210
Cargo.lock
generated
210
Cargo.lock
generated
@@ -24,7 +24,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "03d2d54c4d9e7006f132f615a167865bff927a79ca63d8f637237575ce0a9795"
|
||||
dependencies = [
|
||||
"crypto-common 0.2.0-rc.5",
|
||||
"inout 0.2.1",
|
||||
"inout 0.2.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1068,9 +1068,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "axum-core"
|
||||
version = "0.5.5"
|
||||
version = "0.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22"
|
||||
checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
@@ -1085,27 +1085,6 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-extra"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6dfbd6109d91702d55fc56df06aae7ed85c465a7a451db6c0e54a4b9ca5983d1"
|
||||
dependencies = [
|
||||
"axum",
|
||||
"axum-core",
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"http 1.4.0",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
"mime",
|
||||
"pin-project-lite",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-server"
|
||||
version = "0.8.0"
|
||||
@@ -1185,9 +1164,9 @@ checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a"
|
||||
|
||||
[[package]]
|
||||
name = "bigdecimal"
|
||||
version = "0.4.9"
|
||||
version = "0.4.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "560f42649de9fa436b73517378a147ec21f6c997a546581df4b4b31677828934"
|
||||
checksum = "4d6867f1565b3aad85681f1015055b087fcfd840d6aeee6eee7f2da317603695"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"libm",
|
||||
@@ -1459,9 +1438,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.50"
|
||||
version = "1.2.51"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9f50d563227a1c37cc0a263f64eca3334388c01c5e4c4861a9def205c614383c"
|
||||
checksum = "7a0aeaff4ff1a90589618835a598e545176939b97874f7abc7851caa0618f203"
|
||||
dependencies = [
|
||||
"find-msvc-tools",
|
||||
"jobserver",
|
||||
@@ -1574,7 +1553,7 @@ checksum = "155e4a260750fa4f7754649f049748aacc31db238a358d85fd721002f230f92f"
|
||||
dependencies = [
|
||||
"block-buffer 0.11.0",
|
||||
"crypto-common 0.2.0-rc.5",
|
||||
"inout 0.2.1",
|
||||
"inout 0.2.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3367,9 +3346,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "find-msvc-tools"
|
||||
version = "0.1.5"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844"
|
||||
checksum = "645cbb3a84e60b7531617d5ae4e57f7e27308f6445f5abf653209ea76dec8dff"
|
||||
|
||||
[[package]]
|
||||
name = "findshlibs"
|
||||
@@ -3473,9 +3452,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "fs-err"
|
||||
version = "3.2.1"
|
||||
version = "3.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "824f08d01d0f496b3eca4f001a13cf17690a6ee930043d20817f547455fd98f8"
|
||||
checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"tokio",
|
||||
@@ -3629,6 +3608,18 @@ dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.4.0-rc.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b99f0d993a2b9b97b9a201193aa8ad21305cde06a3be9a7e1f8f4201e5cc27e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"r-efi",
|
||||
"wasip2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getset"
|
||||
version = "0.1.6"
|
||||
@@ -4546,9 +4537,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "inout"
|
||||
version = "0.2.1"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7357b6e7aa75618c7864ebd0634b115a7218b0615f4cb1df33ac3eca23943d4"
|
||||
checksum = "4250ce6452e92010fdf7268ccc5d14faa80bb12fc741938534c58f16804e03c7"
|
||||
dependencies = [
|
||||
"hybrid-array",
|
||||
]
|
||||
@@ -4576,9 +4567,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "iri-string"
|
||||
version = "0.7.9"
|
||||
version = "0.7.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397"
|
||||
checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"serde",
|
||||
@@ -4627,9 +4618,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.16"
|
||||
version = "1.0.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010"
|
||||
checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
|
||||
|
||||
[[package]]
|
||||
name = "jemalloc_pprof"
|
||||
@@ -4817,13 +4808,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libredox"
|
||||
version = "0.1.11"
|
||||
version = "0.1.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df15f6eac291ed1cf25865b1ee60399f57e7c227e7f51bdbd4c5270396a9ed50"
|
||||
checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"libc",
|
||||
"redox_syscall 0.6.0",
|
||||
"redox_syscall 0.7.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5007,9 +4998,9 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3"
|
||||
|
||||
[[package]]
|
||||
name = "matchit"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ea5f97102eb9e54ab99fb70bb175589073f554bdadfb74d9bd656482ea73e2a"
|
||||
checksum = "b3eede3bdf92f3b4f9dc04072a9ce5ab557d5ec9038773bf9ffcd5588b3cc05b"
|
||||
|
||||
[[package]]
|
||||
name = "md-5"
|
||||
@@ -5517,9 +5508,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
|
||||
|
||||
[[package]]
|
||||
name = "openssl-probe"
|
||||
version = "0.1.6"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
|
||||
checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391"
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry"
|
||||
@@ -5761,11 +5752,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "password-hash"
|
||||
version = "0.6.0-rc.6"
|
||||
version = "0.6.0-rc.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "383d290055c99f2dd7dece082088d89494dff6d79277fbac4a7da21c1bf2ab6b"
|
||||
checksum = "c351143b5ab27b1f1d24712f21ea4d0458fe74f60dd5839297dabcc2ecd24d58"
|
||||
dependencies = [
|
||||
"getrandom 0.3.4",
|
||||
"getrandom 0.4.0-rc.0",
|
||||
"phc",
|
||||
]
|
||||
|
||||
@@ -5883,12 +5874,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "phc"
|
||||
version = "0.6.0-rc.0"
|
||||
version = "0.6.0-rc.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c61f960577aaac5c259bc0866d685ba315c0ed30793c602d7287f54980913863"
|
||||
checksum = "71d390c5fe8d102c2c18ff39f1e72b9ad5996de282c2d831b0312f56910f5508"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
"getrandom 0.3.4",
|
||||
"getrandom 0.4.0-rc.0",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
@@ -6098,9 +6089,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic"
|
||||
version = "1.12.0"
|
||||
version = "1.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f59e70c4aef1e55797c2e8fd94a4f2a973fc972cfde0e0b05f683667b0cd39dd"
|
||||
checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950"
|
||||
|
||||
[[package]]
|
||||
name = "potential_utf"
|
||||
@@ -6233,9 +6224,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.103"
|
||||
version = "1.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
|
||||
checksum = "9695f8df41bb4f3d222c95a67532365f569318332d03d5f3f67f37b20e6ebdf0"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -6635,9 +6626,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec96166dafa0886eb81fe1c0a388bece180fbef2135f97c1e2cf8302e74b43b5"
|
||||
checksum = "49f3fe0889e69e2ae9e41f4d6c4c0181701d00e4697b356fb1f74173a5e0ee27"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
]
|
||||
@@ -6830,7 +6821,7 @@ dependencies = [
|
||||
"pastey",
|
||||
"pin-project-lite",
|
||||
"rmcp-macros",
|
||||
"schemars 1.1.0",
|
||||
"schemars 1.2.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror 2.0.17",
|
||||
@@ -7025,7 +7016,6 @@ dependencies = [
|
||||
"atoi",
|
||||
"atomic_enum",
|
||||
"axum",
|
||||
"axum-extra",
|
||||
"axum-server",
|
||||
"base64",
|
||||
"base64-simd",
|
||||
@@ -7045,7 +7035,7 @@ dependencies = [
|
||||
"hyper-util",
|
||||
"jemalloc_pprof",
|
||||
"libsystemd",
|
||||
"matchit 0.9.0",
|
||||
"matchit 0.9.1",
|
||||
"md5",
|
||||
"metrics",
|
||||
"mimalloc",
|
||||
@@ -7061,6 +7051,7 @@ dependencies = [
|
||||
"rustfs-audit",
|
||||
"rustfs-common",
|
||||
"rustfs-config",
|
||||
"rustfs-credentials",
|
||||
"rustfs-ecstore",
|
||||
"rustfs-filemeta",
|
||||
"rustfs-iam",
|
||||
@@ -7074,11 +7065,11 @@ dependencies = [
|
||||
"rustfs-rio",
|
||||
"rustfs-s3select-api",
|
||||
"rustfs-s3select-query",
|
||||
"rustfs-scanner",
|
||||
"rustfs-targets",
|
||||
"rustfs-utils",
|
||||
"rustfs-zip",
|
||||
"rustls 0.23.35",
|
||||
"rustls-pemfile",
|
||||
"s3s",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -7211,6 +7202,17 @@ dependencies = [
|
||||
"const-str",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-credentials"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"base64-simd",
|
||||
"rand 0.10.0-rc.5",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-crypto"
|
||||
version = "0.0.5"
|
||||
@@ -7251,15 +7253,12 @@ dependencies = [
|
||||
"faster-hex",
|
||||
"flatbuffers",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"glob",
|
||||
"google-cloud-auth",
|
||||
"google-cloud-storage",
|
||||
"hex-simd",
|
||||
"hmac 0.13.0-rc.3",
|
||||
"http 1.4.0",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
"hyper 1.8.1",
|
||||
"hyper-rustls 0.27.7",
|
||||
"hyper-util",
|
||||
@@ -7280,6 +7279,7 @@ dependencies = [
|
||||
"rustfs-checksums",
|
||||
"rustfs-common",
|
||||
"rustfs-config",
|
||||
"rustfs-credentials",
|
||||
"rustfs-filemeta",
|
||||
"rustfs-lock",
|
||||
"rustfs-madmin",
|
||||
@@ -7322,7 +7322,6 @@ dependencies = [
|
||||
"bytes",
|
||||
"crc-fast",
|
||||
"criterion",
|
||||
"lazy_static",
|
||||
"regex",
|
||||
"rmp",
|
||||
"rmp-serde",
|
||||
@@ -7348,6 +7347,7 @@ dependencies = [
|
||||
"jsonwebtoken",
|
||||
"pollster",
|
||||
"rand 0.10.0-rc.5",
|
||||
"rustfs-credentials",
|
||||
"rustfs-crypto",
|
||||
"rustfs-ecstore",
|
||||
"rustfs-madmin",
|
||||
@@ -7431,7 +7431,7 @@ dependencies = [
|
||||
"clap",
|
||||
"mime_guess",
|
||||
"rmcp",
|
||||
"schemars 1.1.0",
|
||||
"schemars 1.2.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
@@ -7509,10 +7509,10 @@ dependencies = [
|
||||
"jsonwebtoken",
|
||||
"moka",
|
||||
"pollster",
|
||||
"rand 0.10.0-rc.5",
|
||||
"regex",
|
||||
"reqwest",
|
||||
"rustfs-config",
|
||||
"rustfs-credentials",
|
||||
"rustfs-crypto",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -7532,6 +7532,7 @@ dependencies = [
|
||||
"flatbuffers",
|
||||
"prost 0.14.1",
|
||||
"rustfs-common",
|
||||
"rustfs-credentials",
|
||||
"tonic",
|
||||
"tonic-prost",
|
||||
"tonic-prost-build",
|
||||
@@ -7554,6 +7555,7 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
"rand 0.10.0-rc.5",
|
||||
"reqwest",
|
||||
"rustfs-config",
|
||||
"rustfs-utils",
|
||||
"s3s",
|
||||
"serde",
|
||||
@@ -7609,41 +7611,6 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-scanner"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"chrono",
|
||||
"futures",
|
||||
"heed",
|
||||
"http 1.4.0",
|
||||
"path-clean",
|
||||
"rand 0.10.0-rc.5",
|
||||
"rmp-serde",
|
||||
"rustfs-ahm",
|
||||
"rustfs-common",
|
||||
"rustfs-config",
|
||||
"rustfs-ecstore",
|
||||
"rustfs-filemeta",
|
||||
"rustfs-madmin",
|
||||
"rustfs-utils",
|
||||
"s3s",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serial_test",
|
||||
"tempfile",
|
||||
"thiserror 2.0.17",
|
||||
"time",
|
||||
"tokio",
|
||||
"tokio-test",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-signer"
|
||||
version = "0.0.5"
|
||||
@@ -7834,9 +7801,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustls-native-certs"
|
||||
version = "0.8.2"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923"
|
||||
checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63"
|
||||
dependencies = [
|
||||
"openssl-probe",
|
||||
"rustls-pki-types",
|
||||
@@ -7904,14 +7871,14 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.21"
|
||||
version = "1.0.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62049b2877bf12821e8f9ad256ee38fdc31db7387ec2d3b3f403024de2034aea"
|
||||
checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984"
|
||||
|
||||
[[package]]
|
||||
name = "s3s"
|
||||
version = "0.13.0-alpha"
|
||||
source = "git+https://github.com/s3s-project/s3s.git?branch=main#f6198bbf49abe60066fe47cbbefcb7078863b3e9"
|
||||
source = "git+https://github.com/s3s-project/s3s.git?branch=main#9e41304ed549b89cfb03ede98e9c0d2ac7522051"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"async-trait",
|
||||
@@ -7998,9 +7965,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "schemars"
|
||||
version = "1.1.0"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289"
|
||||
checksum = "54e910108742c57a770f492731f99be216a52fadd361b06c8fb59d74ccc267d2"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"dyn-clone",
|
||||
@@ -8012,9 +7979,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "schemars_derive"
|
||||
version = "1.1.0"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "301858a4023d78debd2353c7426dc486001bddc91ae31a76fb1f55132f7e2633"
|
||||
checksum = "4908ad288c5035a8eb12cfdf0d49270def0a268ee162b75eeee0f85d155a7c45"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -8163,9 +8130,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.147"
|
||||
version = "1.0.148"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6af14725505314343e673e9ecb7cd7e8a36aa9791eb936235a3567cc31447ae4"
|
||||
checksum = "3084b546a1dd6289475996f182a22aba973866ea8e8b02c51d9f46b1336a22da"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
@@ -8218,7 +8185,7 @@ dependencies = [
|
||||
"indexmap 1.9.3",
|
||||
"indexmap 2.12.1",
|
||||
"schemars 0.9.0",
|
||||
"schemars 1.1.0",
|
||||
"schemars 1.2.0",
|
||||
"serde_core",
|
||||
"serde_json",
|
||||
"serde_with_macros",
|
||||
@@ -8356,10 +8323,11 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-registry"
|
||||
version = "1.4.7"
|
||||
version = "1.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad"
|
||||
checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b"
|
||||
dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
]
|
||||
|
||||
@@ -10393,9 +10361,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zeroize_derive"
|
||||
version = "1.4.2"
|
||||
version = "1.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
|
||||
checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -10471,9 +10439,9 @@ checksum = "40990edd51aae2c2b6907af74ffb635029d5788228222c4bb811e9351c0caad3"
|
||||
|
||||
[[package]]
|
||||
name = "zmij"
|
||||
version = "0.1.7"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e404bcd8afdaf006e529269d3e85a743f9480c3cef60034d77860d02964f3ba"
|
||||
checksum = "e9747e91771f56fd7893e1164abd78febd14a670ceec257caad15e051de35f06"
|
||||
|
||||
[[package]]
|
||||
name = "zopfli"
|
||||
|
||||
13
Cargo.toml
13
Cargo.toml
@@ -19,6 +19,7 @@ members = [
|
||||
"crates/audit", # Audit target management system with multi-target fan-out
|
||||
"crates/common", # Shared utilities and data structures
|
||||
"crates/config", # Configuration management
|
||||
"crates/credentials", # Credential management system
|
||||
"crates/crypto", # Cryptography and security features
|
||||
"crates/ecstore", # Erasure coding storage implementation
|
||||
"crates/e2e_test", # End-to-end test suite
|
||||
@@ -34,7 +35,6 @@ members = [
|
||||
"crates/targets", # Target-specific configurations and utilities
|
||||
"crates/s3select-api", # S3 Select API interface
|
||||
"crates/s3select-query", # S3 Select query engine
|
||||
"crates/scanner", # Scanner for data integrity checks and health monitoring
|
||||
"crates/signer", # client signer
|
||||
"crates/checksums", # client checksums
|
||||
"crates/utils", # Utility functions and helpers
|
||||
@@ -72,6 +72,7 @@ rustfs-audit = { path = "crates/audit", version = "0.0.5" }
|
||||
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.5" }
|
||||
rustfs-credentials = { path = "crates/credentials", version = "0.0.5" }
|
||||
rustfs-crypto = { path = "crates/crypto", version = "0.0.5" }
|
||||
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
@@ -87,7 +88,6 @@ rustfs-protos = { path = "crates/protos", version = "0.0.5" }
|
||||
rustfs-rio = { path = "crates/rio", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-s3select-query = { path = "crates/s3select-query", version = "0.0.5" }
|
||||
rustfs-scanner = { path = "crates/scanner", version = "0.0.5" }
|
||||
rustfs-signer = { path = "crates/signer", version = "0.0.5" }
|
||||
rustfs-targets = { path = "crates/targets", version = "0.0.5" }
|
||||
rustfs-utils = { path = "crates/utils", version = "0.0.5" }
|
||||
@@ -100,7 +100,6 @@ async-compression = { version = "0.4.19" }
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
axum = "0.8.8"
|
||||
axum-extra = "0.12.3"
|
||||
axum-server = { version = "0.8.0", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
@@ -137,9 +136,9 @@ rmcp = { version = "0.12.0" }
|
||||
rmp = { version = "0.8.15" }
|
||||
rmp-serde = { version = "1.3.1" }
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.147", features = ["raw_value"] }
|
||||
serde_json = { version = "1.0.148", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
schemars = "1.1.0"
|
||||
schemars = "1.2.0"
|
||||
|
||||
# Cryptography and Security
|
||||
aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
|
||||
@@ -202,7 +201,7 @@ libc = "0.2.178"
|
||||
libsystemd = "0.7.2"
|
||||
local-ip-address = "0.6.8"
|
||||
lz4 = "1.28.1"
|
||||
matchit = "0.9.0"
|
||||
matchit = "0.9.1"
|
||||
md-5 = "0.11.0-rc.3"
|
||||
md5 = "0.8.0"
|
||||
mime_guess = "2.0.5"
|
||||
@@ -278,8 +277,6 @@ jemalloc_pprof = { version = "0.8.1", features = ["symbolize", "flamegraph"] }
|
||||
# Used to generate CPU performance analysis data and flame diagrams
|
||||
pprof = { version = "0.15.0", features = ["flamegraph", "protobuf-codec"] }
|
||||
|
||||
|
||||
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = ["rustfs", "rustfs-mcp"]
|
||||
|
||||
|
||||
@@ -148,8 +148,8 @@ ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
RUSTFS_GID="1000"
|
||||
RUSTFS_UID="10001" \
|
||||
RUSTFS_GID="10001"
|
||||
|
||||
# Note: We don't COPY source here because we expect it to be mounted at /app
|
||||
# We rely on cargo run to build and run
|
||||
@@ -187,8 +187,8 @@ RUN set -eux; \
|
||||
|
||||
# Create a conventional runtime user/group (final switch happens in entrypoint via chroot --userspec)
|
||||
RUN set -eux; \
|
||||
groupadd -g 1000 rustfs; \
|
||||
useradd -u 1000 -g rustfs -M -s /usr/sbin/nologin rustfs
|
||||
groupadd -g 10001 rustfs; \
|
||||
useradd -u 10001 -g rustfs -M -s /usr/sbin/nologin rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
@@ -212,8 +212,8 @@ ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
RUSTFS_GID="1000"
|
||||
RUSTFS_UID="10001" \
|
||||
RUSTFS_GID="10001"
|
||||
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data"]
|
||||
|
||||
430
Makefile
430
Makefile
@@ -2,394 +2,80 @@
|
||||
# Remote development requires VSCode with Dev Containers, Remote SSH, Remote Explorer
|
||||
# https://code.visualstudio.com/docs/remote/containers
|
||||
###########
|
||||
|
||||
.PHONY: SHELL
|
||||
|
||||
# Makefile global config
|
||||
# Use config.mak to override any of the following variables.
|
||||
# Do not make changes here.
|
||||
|
||||
.DEFAULT_GOAL := help
|
||||
.EXPORT_ALL_VARIABLES:
|
||||
.ONESHELL:
|
||||
.SILENT:
|
||||
|
||||
NUM_CORES := $(shell nproc 2>/dev/null || sysctl -n hw.ncpu)
|
||||
|
||||
MAKEFLAGS += -j$(NUM_CORES) -l$(NUM_CORES)
|
||||
MAKEFLAGS += --silent
|
||||
|
||||
SHELL:= /bin/bash
|
||||
.SHELLFLAGS = -eu -o pipefail -c
|
||||
|
||||
DOCKER_CLI ?= docker
|
||||
IMAGE_NAME ?= rustfs:v1.0.0
|
||||
CONTAINER_NAME ?= rustfs-dev
|
||||
# Docker build configurations
|
||||
DOCKERFILE_PRODUCTION = Dockerfile
|
||||
DOCKERFILE_SOURCE = Dockerfile.source
|
||||
|
||||
# Fatal check
|
||||
# Checks all required dependencies and exits with error if not found
|
||||
# (e.g., cargo, rustfmt)
|
||||
check-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "❌ '$*' is not installed."; \
|
||||
exit 1; \
|
||||
}
|
||||
|
||||
# Warning-only check
|
||||
# Checks for optional dependencies and issues a warning if not found
|
||||
# (e.g., cargo-nextest for enhanced testing)
|
||||
warn-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "⚠️ '$*' is not installed."; \
|
||||
}
|
||||
|
||||
# For checking dependencies use check-<dep-name> or warn-<dep-name>
|
||||
.PHONY: core-deps fmt-deps test-deps
|
||||
core-deps: check-cargo
|
||||
fmt-deps: check-rustfmt
|
||||
test-deps: warn-cargo-nextest
|
||||
|
||||
# Code quality and formatting targets
|
||||
.PHONY: fmt
|
||||
fmt: core-deps fmt-deps
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: core-deps fmt-deps
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
.PHONY: clippy
|
||||
clippy: core-deps
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: check
|
||||
check: core-deps
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
|
||||
.PHONY: test
|
||||
test: core-deps test-deps
|
||||
@echo "🧪 Running tests..."
|
||||
@if command -v cargo-nextest >/dev/null 2>&1; then \
|
||||
cargo nextest run --all --exclude e2e_test; \
|
||||
else \
|
||||
echo "ℹ️ cargo-nextest not found; falling back to 'cargo test'"; \
|
||||
cargo test --workspace --exclude e2e_test -- --nocapture; \
|
||||
fi
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: setup-hooks
|
||||
setup-hooks:
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy check test
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server:
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
|
||||
.PHONY: probe-e2e
|
||||
probe-e2e:
|
||||
sh $(shell pwd)/scripts/probe.sh
|
||||
|
||||
# Native build using build-rustfs.sh script
|
||||
.PHONY: build
|
||||
build:
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
.PHONY: build-dev
|
||||
build-dev:
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
# Docker-based build (alternative approach)
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
BUILD_OS ?= rockylinux9.3
|
||||
.PHONY: build-docker
|
||||
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build-docker:
|
||||
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
|
||||
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
# Makefile colors config
|
||||
bold := $(shell tput bold)
|
||||
normal := $(shell tput sgr0)
|
||||
errorTitle := $(shell tput setab 1 && tput bold && echo '\n')
|
||||
recommendation := $(shell tput setab 4)
|
||||
underline := $(shell tput smul)
|
||||
reset := $(shell tput -Txterm sgr0)
|
||||
black := $(shell tput setaf 0)
|
||||
red := $(shell tput setaf 1)
|
||||
green := $(shell tput setaf 2)
|
||||
yellow := $(shell tput setaf 3)
|
||||
blue := $(shell tput setaf 4)
|
||||
magenta := $(shell tput setaf 5)
|
||||
cyan := $(shell tput setaf 6)
|
||||
white := $(shell tput setaf 7)
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
define HEADER
|
||||
How to use me:
|
||||
# To get help for each target
|
||||
${bold}make help${reset}
|
||||
|
||||
.PHONY: build-musl-arm64
|
||||
build-musl-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
# To run and execute a target
|
||||
${bold}make ${cyan}<target>${reset}
|
||||
|
||||
.PHONY: build-gnu-arm64
|
||||
build-gnu-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
💡 For more help use 'make help', 'make help-build' or 'make help-docker'
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
🦀 RustFS Makefile Help:
|
||||
|
||||
# ========================================================================================
|
||||
# Docker Multi-Architecture Builds (Primary Methods)
|
||||
# ========================================================================================
|
||||
📋 Main Command Categories:
|
||||
make help-build # Show build-related help
|
||||
make help-docker # Show Docker-related help
|
||||
|
||||
# Production builds using docker-buildx.sh (for CI/CD and production)
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx:
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
🔧 Code Quality:
|
||||
make fmt # Format code
|
||||
make clippy # Run clippy checks
|
||||
make test # Run tests
|
||||
make pre-commit # Run all pre-commit checks
|
||||
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
🚀 Quick Start:
|
||||
make build # Build RustFS binary
|
||||
make docker-dev-local # Build development Docker image (local)
|
||||
make dev-env-start # Start development environment
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
endef
|
||||
export HEADER
|
||||
|
||||
# Development/Source builds using direct buildx commands
|
||||
.PHONY: docker-dev
|
||||
docker-dev:
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
-include $(addsuffix /*.mak, $(shell find .config/make -type d))
|
||||
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local:
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push:
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 Pushing to registry: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local:
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
|
||||
# ========================================================================================
|
||||
# Single Architecture Docker Builds (Traditional)
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production:
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source:
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start:
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop:
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
# ========================================================================================
|
||||
# Build Utilities
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch:
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
|
||||
docker buildx imagetools inspect $(IMAGE)
|
||||
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
@echo "🔨 Building x86_64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
@echo "🔨 Building aarch64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
@echo "🔨 Building aarch64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
@echo "✅ All architectures built successfully!"
|
||||
|
||||
# ========================================================================================
|
||||
# Help and Documentation
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: help-build
|
||||
help-build:
|
||||
@echo "🔨 RustFS Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 Local Build (Recommended):"
|
||||
@echo " make build # Build RustFS binary (includes console by default)"
|
||||
@echo " make build-dev # Development mode build"
|
||||
@echo " make build-musl # Build x86_64 musl version"
|
||||
@echo " make build-gnu # Build x86_64 GNU version"
|
||||
@echo " make build-musl-arm64 # Build aarch64 musl version"
|
||||
@echo " make build-gnu-arm64 # Build aarch64 GNU version"
|
||||
@echo ""
|
||||
@echo "🐳 Docker Build:"
|
||||
@echo " make build-docker # Build using Docker container"
|
||||
@echo " make build-docker BUILD_OS=ubuntu22.04 # Specify build system"
|
||||
@echo ""
|
||||
@echo "🏗️ Cross-architecture Build:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo ""
|
||||
@echo "🔧 Direct usage of build-rustfs.sh script:"
|
||||
@echo " ./build-rustfs.sh --help # View script help"
|
||||
@echo " ./build-rustfs.sh --no-console # Build without console resources"
|
||||
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
|
||||
@echo " ./build-rustfs.sh --dev # Development mode build"
|
||||
@echo " ./build-rustfs.sh --sign # Sign binary files"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
|
||||
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker:
|
||||
@echo "🐳 Docker Multi-architecture Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 Production Image Build (Recommended to use docker-buildx.sh):"
|
||||
@echo " make docker-buildx # Build production multi-arch image (no push)"
|
||||
@echo " make docker-buildx-push # Build and push production multi-arch image"
|
||||
@echo " make docker-buildx-version VERSION=v1.0.0 # Build specific version"
|
||||
@echo " make docker-buildx-push-version VERSION=v1.0.0 # Build and push specific version"
|
||||
@echo ""
|
||||
@echo "🔧 Development/Source Image Build (Local development testing):"
|
||||
@echo " make docker-dev # Build dev multi-arch image (cannot load locally)"
|
||||
@echo " make docker-dev-local # Build dev single-arch image (local load)"
|
||||
@echo " make docker-dev-push REGISTRY=xxx # Build and push dev image"
|
||||
@echo ""
|
||||
@echo "🏗️ Local Production Image Build (Alternative):"
|
||||
@echo " make docker-buildx-production-local # Build production single-arch image locally"
|
||||
@echo ""
|
||||
@echo "📦 Single-architecture Build (Traditional way):"
|
||||
@echo " make docker-build-production # Build single-arch production image"
|
||||
@echo " make docker-build-source # Build single-arch source image"
|
||||
@echo ""
|
||||
@echo "🚀 Development Environment Management:"
|
||||
@echo " make dev-env-start # Start development container environment"
|
||||
@echo " make dev-env-stop # Stop development container environment"
|
||||
@echo " make dev-env-restart # Restart development container environment"
|
||||
@echo ""
|
||||
@echo "🔧 Auxiliary Tools:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # Check image architecture support"
|
||||
@echo ""
|
||||
@echo "📋 Environment Variables:"
|
||||
@echo " REGISTRY Image registry address (required for push)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub username"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub access token"
|
||||
@echo " GITHUB_TOKEN GitHub access token"
|
||||
@echo ""
|
||||
@echo "💡 Suggestions:"
|
||||
@echo " - Production use: Use docker-buildx* commands (based on precompiled binaries)"
|
||||
@echo " - Local development: Use docker-dev* commands (build from source)"
|
||||
@echo " - Development environment: Use dev-env-* commands to manage dev containers"
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "🦀 RustFS Makefile Help:"
|
||||
@echo ""
|
||||
@echo "📋 Main Command Categories:"
|
||||
@echo " make help-build # Show build-related help"
|
||||
@echo " make help-docker # Show Docker-related help"
|
||||
@echo ""
|
||||
@echo "🔧 Code Quality:"
|
||||
@echo " make fmt # Format code"
|
||||
@echo " make clippy # Run clippy checks"
|
||||
@echo " make test # Run tests"
|
||||
@echo " make pre-commit # Run all pre-commit checks"
|
||||
@echo ""
|
||||
@echo "🚀 Quick Start:"
|
||||
@echo " make build # Build RustFS binary"
|
||||
@echo " make docker-dev-local # Build development Docker image (local)"
|
||||
@echo " make dev-env-start # Start development environment"
|
||||
@echo ""
|
||||
@echo "💡 For more help use 'make help-build' or 'make help-docker'"
|
||||
|
||||
14
README.md
14
README.md
@@ -10,6 +10,11 @@
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14181" alt="rustfs%2Frustfs | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</p>
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/installation/">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/">Docs</a>
|
||||
@@ -45,10 +50,10 @@ Unlike other storage systems, RustFS is released under the permissible Apache 2.
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **S3 Core Features** | ✅ Available | **Bitrot Protection** | ✅ Available |
|
||||
| **Upload / Download** | ✅ Available | **Single Node Mode** | ✅ Available |
|
||||
| **Versioning** | ✅ Available | **Bucket Replication** | ⚠️ Partial Support |
|
||||
| **Versioning** | ✅ Available | **Bucket Replication** | ✅ Available |
|
||||
| **Logging** | ✅ Available | **Lifecycle Management** | 🚧 Under Testing |
|
||||
| **Event Notifications** | ✅ Available | **Distributed Mode** | 🚧 Under Testing |
|
||||
| **K8s Helm Charts** | ✅ Available | **OPA (Open Policy Agent)** | 🚧 Under Testing |
|
||||
| **K8s Helm Charts** | ✅ Available | **RustFS KMS** | 🚧 Under Testing |
|
||||
|
||||
|
||||
|
||||
@@ -215,11 +220,6 @@ RustFS is a community-driven project, and we appreciate all contributions. Check
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending top charts.
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star History
|
||||
|
||||
|
||||
12
README_ZH.md
12
README_ZH.md
@@ -10,6 +10,10 @@
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14181" alt="rustfs%2Frustfs | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/installation/">快速开始</a>
|
||||
· <a href="https://docs.rustfs.com/">文档</a>
|
||||
@@ -17,6 +21,8 @@
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">社区讨论</a>
|
||||
</p>
|
||||
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a> | 简体中文 |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
@@ -46,7 +52,7 @@ RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **S3 核心功能** | ✅ 可用 | **Bitrot (防数据腐烂)** | ✅ 可用 |
|
||||
| **上传 / 下载** | ✅ 可用 | **单机模式** | ✅ 可用 |
|
||||
| **版本控制** | ✅ 可用 | **存储桶复制** | ⚠️ 部分可用 |
|
||||
| **版本控制** | ✅ 可用 | **存储桶复制** | ✅ 可用 |
|
||||
| **日志功能** | ✅ 可用 | **生命周期管理** | 🚧 测试中 |
|
||||
| **事件通知** | ✅ 可用 | **分布式模式** | 🚧 测试中 |
|
||||
| **K8s Helm Chart** | ✅ 可用 | **OPA (策略引擎)** | 🚧 测试中 |
|
||||
@@ -200,11 +206,7 @@ RustFS 是一个社区驱动的项目,我们感谢所有的贡献。请查看
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS 深受全球开源爱好者和企业用户的喜爱,经常荣登 GitHub Trending 榜单。
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star 历史
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ clen = "clen"
|
||||
datas = "datas"
|
||||
bre = "bre"
|
||||
abd = "abd"
|
||||
mak = "mak"
|
||||
|
||||
[files]
|
||||
extend-exclude = []
|
||||
@@ -183,7 +183,7 @@ impl HealChannelProcessor {
|
||||
HealType::Object {
|
||||
bucket: request.bucket.clone(),
|
||||
object: prefix.clone(),
|
||||
version_id: request.object_version_id.clone(),
|
||||
version_id: None,
|
||||
}
|
||||
} else {
|
||||
HealType::Bucket {
|
||||
@@ -366,7 +366,6 @@ mod tests {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
object_version_id: None,
|
||||
disk: None,
|
||||
priority: HealChannelPriority::Normal,
|
||||
scan_mode: None,
|
||||
@@ -395,7 +394,6 @@ mod tests {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: Some("test-object".to_string()),
|
||||
object_version_id: None,
|
||||
disk: None,
|
||||
priority: HealChannelPriority::High,
|
||||
scan_mode: Some(HealScanMode::Deep),
|
||||
@@ -427,7 +425,6 @@ mod tests {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
object_version_id: None,
|
||||
disk: Some("pool_0_set_1".to_string()),
|
||||
priority: HealChannelPriority::Critical,
|
||||
scan_mode: None,
|
||||
@@ -456,7 +453,6 @@ mod tests {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
object_version_id: None,
|
||||
disk: Some("invalid-disk-id".to_string()),
|
||||
priority: HealChannelPriority::Normal,
|
||||
scan_mode: None,
|
||||
@@ -492,7 +488,6 @@ mod tests {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
object_version_id: None,
|
||||
disk: None,
|
||||
priority: channel_priority,
|
||||
scan_mode: None,
|
||||
@@ -521,7 +516,6 @@ mod tests {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
object_version_id: None,
|
||||
disk: None,
|
||||
priority: HealChannelPriority::Normal,
|
||||
scan_mode: None,
|
||||
@@ -551,7 +545,6 @@ mod tests {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: Some("".to_string()), // Empty prefix should be treated as bucket heal
|
||||
object_version_id: None,
|
||||
disk: None,
|
||||
priority: HealChannelPriority::Normal,
|
||||
scan_mode: None,
|
||||
|
||||
@@ -495,26 +495,6 @@ mod serial_tests {
|
||||
object_name,
|
||||
} = &elm.1;
|
||||
println!("cache row:{ver_no} {ver_id} {mod_time} {type_:?} {object_name}");
|
||||
//eval_inner(&oi.to_lifecycle_opts(), OffsetDateTime::now_utc()).await;
|
||||
eval_inner(
|
||||
&lifecycle::ObjectOpts {
|
||||
name: oi.name.clone(),
|
||||
user_tags: oi.user_tags.clone(),
|
||||
version_id: oi.version_id.map(|v| v.to_string()).unwrap_or_default(),
|
||||
mod_time: oi.mod_time,
|
||||
size: oi.size as usize,
|
||||
is_latest: oi.is_latest,
|
||||
num_versions: oi.num_versions,
|
||||
delete_marker: oi.delete_marker,
|
||||
successor_mod_time: oi.successor_mod_time,
|
||||
restore_ongoing: oi.restore_ongoing,
|
||||
restore_expires: oi.restore_expires,
|
||||
transition_status: oi.transitioned_object.status.clone(),
|
||||
..Default::default()
|
||||
},
|
||||
OffsetDateTime::now_utc(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
println!("row:{row:?}");
|
||||
}
|
||||
@@ -526,261 +506,3 @@ mod serial_tests {
|
||||
println!("Lifecycle cache test completed");
|
||||
}
|
||||
}
|
||||
|
||||
async fn eval_inner(&self, obj: &ObjectOpts, now: OffsetDateTime) -> Event {
|
||||
let mut events = Vec::<Event>::new();
|
||||
info!(
|
||||
"eval_inner: object={}, mod_time={:?}, now={:?}, is_latest={}, delete_marker={}",
|
||||
obj.name, obj.mod_time, now, obj.is_latest, obj.delete_marker
|
||||
);
|
||||
if obj.mod_time.expect("err").unix_timestamp() == 0 {
|
||||
info!("eval_inner: mod_time is 0, returning default event");
|
||||
return Event::default();
|
||||
}
|
||||
|
||||
if let Some(restore_expires) = obj.restore_expires {
|
||||
if !restore_expires.unix_timestamp() == 0 && now.unix_timestamp() > restore_expires.unix_timestamp() {
|
||||
let mut action = IlmAction::DeleteRestoredAction;
|
||||
if !obj.is_latest {
|
||||
action = IlmAction::DeleteRestoredVersionAction;
|
||||
}
|
||||
|
||||
events.push(Event {
|
||||
action,
|
||||
due: Some(now),
|
||||
rule_id: "".into(),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref lc_rules) = self.filter_rules(obj).await {
|
||||
for rule in lc_rules.iter() {
|
||||
if obj.expired_object_deletemarker() {
|
||||
if let Some(expiration) = rule.expiration.as_ref() {
|
||||
if let Some(expired_object_delete_marker) = expiration.expired_object_delete_marker {
|
||||
events.push(Event {
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(now),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
});
|
||||
break;
|
||||
}
|
||||
|
||||
if let Some(days) = expiration.days {
|
||||
let expected_expiry = expected_expiry_time(obj.mod_time.unwrap(), days /*, date*/);
|
||||
if now.unix_timestamp() >= expected_expiry.unix_timestamp() {
|
||||
events.push(Event {
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(expected_expiry),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if obj.is_latest {
|
||||
if let Some(ref expiration) = rule.expiration {
|
||||
if let Some(expired_object_delete_marker) = expiration.expired_object_delete_marker {
|
||||
if obj.delete_marker && expired_object_delete_marker {
|
||||
let due = expiration.next_due(obj);
|
||||
if let Some(due) = due {
|
||||
if now.unix_timestamp() >= due.unix_timestamp() {
|
||||
events.push(Event {
|
||||
action: IlmAction::DelMarkerDeleteAllVersionsAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(due),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
});
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !obj.is_latest {
|
||||
if let Some(ref noncurrent_version_expiration) = rule.noncurrent_version_expiration {
|
||||
if let Some(newer_noncurrent_versions) = noncurrent_version_expiration.newer_noncurrent_versions {
|
||||
if newer_noncurrent_versions > 0 {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !obj.is_latest {
|
||||
if let Some(ref noncurrent_version_expiration) = rule.noncurrent_version_expiration {
|
||||
if let Some(noncurrent_days) = noncurrent_version_expiration.noncurrent_days {
|
||||
if noncurrent_days != 0 {
|
||||
if let Some(successor_mod_time) = obj.successor_mod_time {
|
||||
let expected_expiry = expected_expiry_time(successor_mod_time, noncurrent_days);
|
||||
if now.unix_timestamp() >= expected_expiry.unix_timestamp() {
|
||||
events.push(Event {
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(expected_expiry),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !obj.is_latest {
|
||||
if let Some(ref noncurrent_version_transitions) = rule.noncurrent_version_transitions {
|
||||
if let Some(ref storage_class) = noncurrent_version_transitions[0].storage_class {
|
||||
if storage_class.as_str() != "" && !obj.delete_marker && obj.transition_status != TRANSITION_COMPLETE {
|
||||
let due = rule.noncurrent_version_transitions.as_ref().unwrap()[0].next_due(obj);
|
||||
if let Some(due0) = due {
|
||||
if now.unix_timestamp() == 0 || now.unix_timestamp() > due0.unix_timestamp() {
|
||||
events.push(Event {
|
||||
action: IlmAction::TransitionVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due,
|
||||
storage_class: rule.noncurrent_version_transitions.as_ref().unwrap()[0]
|
||||
.storage_class
|
||||
.clone()
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"eval_inner: checking expiration condition - is_latest={}, delete_marker={}, version_id={:?}, condition_met={}",
|
||||
obj.is_latest,
|
||||
obj.delete_marker,
|
||||
obj.version_id,
|
||||
(obj.is_latest || obj.version_id.is_empty()) && !obj.delete_marker
|
||||
);
|
||||
// Allow expiration for latest objects OR non-versioned objects (empty version_id)
|
||||
if (obj.is_latest || obj.version_id.is_empty()) && !obj.delete_marker {
|
||||
info!("eval_inner: entering expiration check");
|
||||
if let Some(ref expiration) = rule.expiration {
|
||||
if let Some(ref date) = expiration.date {
|
||||
let date0 = OffsetDateTime::from(date.clone());
|
||||
if date0.unix_timestamp() != 0 && (now.unix_timestamp() >= date0.unix_timestamp()) {
|
||||
info!("eval_inner: expiration by date - date0={:?}", date0);
|
||||
events.push(Event {
|
||||
action: IlmAction::DeleteAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(date0),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
});
|
||||
}
|
||||
} else if let Some(days) = expiration.days {
|
||||
let expected_expiry: OffsetDateTime = expected_expiry_time(obj.mod_time.unwrap(), days);
|
||||
info!(
|
||||
"eval_inner: expiration check - days={}, obj_time={:?}, expiry_time={:?}, now={:?}, should_expire={}",
|
||||
days,
|
||||
obj.mod_time.expect("err!"),
|
||||
expected_expiry,
|
||||
now,
|
||||
now.unix_timestamp() > expected_expiry.unix_timestamp()
|
||||
);
|
||||
if now.unix_timestamp() >= expected_expiry.unix_timestamp() {
|
||||
info!("eval_inner: object should expire, adding DeleteAction");
|
||||
let mut event = Event {
|
||||
action: IlmAction::DeleteAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(expected_expiry),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
};
|
||||
/*if rule.expiration.expect("err!").delete_all.val {
|
||||
event.action = IlmAction::DeleteAllVersionsAction
|
||||
}*/
|
||||
events.push(event);
|
||||
}
|
||||
} else {
|
||||
info!("eval_inner: expiration.days is None");
|
||||
}
|
||||
} else {
|
||||
info!("eval_inner: rule.expiration is None");
|
||||
}
|
||||
|
||||
if obj.transition_status != TRANSITION_COMPLETE {
|
||||
if let Some(ref transitions) = rule.transitions {
|
||||
let due = transitions[0].next_due(obj);
|
||||
if let Some(due0) = due {
|
||||
if now.unix_timestamp() == 0 || now.unix_timestamp() > due0.unix_timestamp() {
|
||||
events.push(Event {
|
||||
action: IlmAction::TransitionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due,
|
||||
storage_class: transitions[0].storage_class.clone().expect("err!").as_str().to_string(),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if events.len() > 0 {
|
||||
events.sort_by(|a, b| {
|
||||
if now.unix_timestamp() > a.due.expect("err!").unix_timestamp()
|
||||
&& now.unix_timestamp() > b.due.expect("err").unix_timestamp()
|
||||
|| a.due.expect("err").unix_timestamp() == b.due.expect("err").unix_timestamp()
|
||||
{
|
||||
match a.action {
|
||||
IlmAction::DeleteAllVersionsAction
|
||||
| IlmAction::DelMarkerDeleteAllVersionsAction
|
||||
| IlmAction::DeleteAction
|
||||
| IlmAction::DeleteVersionAction => {
|
||||
return Ordering::Less;
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
match b.action {
|
||||
IlmAction::DeleteAllVersionsAction
|
||||
| IlmAction::DelMarkerDeleteAllVersionsAction
|
||||
| IlmAction::DeleteAction
|
||||
| IlmAction::DeleteVersionAction => {
|
||||
return Ordering::Greater;
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
return Ordering::Less;
|
||||
}
|
||||
|
||||
if a.due.expect("err").unix_timestamp() < b.due.expect("err").unix_timestamp() {
|
||||
return Ordering::Less;
|
||||
}
|
||||
return Ordering::Greater;
|
||||
});
|
||||
return events[0].clone();
|
||||
}
|
||||
|
||||
Event::default()
|
||||
}
|
||||
|
||||
@@ -263,16 +263,6 @@ async fn create_test_tier(server: u32) {
|
||||
region: "".to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
} else if server == 2 {
|
||||
Some(TierMinIO {
|
||||
access_key: "minioadmin".to_string(),
|
||||
secret_key: "minioadmin".to_string(),
|
||||
bucket: "mblock2".to_string(),
|
||||
endpoint: "http://m1ddns.pvtool.com:9020".to_string(),
|
||||
prefix: format!("mypre{}/", uuid::Uuid::new_v4()),
|
||||
region: "".to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
} else {
|
||||
Some(TierMinIO {
|
||||
access_key: "minioadmin".to_string(),
|
||||
@@ -612,7 +602,7 @@ mod serial_tests {
|
||||
async fn test_lifecycle_transition_basic() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
create_test_tier(2).await;
|
||||
create_test_tier(1).await;
|
||||
|
||||
// Create test bucket and object
|
||||
let suffix = uuid::Uuid::new_v4().simple().to_string();
|
||||
@@ -620,15 +610,8 @@ mod serial_tests {
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(
|
||||
&ecstore,
|
||||
bucket_name.as_str(),
|
||||
object_name,
|
||||
b"Hello, this is test data for lifecycle expiry 1111-11111111-1111 !",
|
||||
)
|
||||
.await;
|
||||
//create_test_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
//create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
create_test_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
|
||||
@@ -39,4 +39,4 @@ path-clean = { workspace = true }
|
||||
rmp-serde = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
s3s = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
@@ -25,18 +25,42 @@ pub static GLOBAL_RUSTFS_PORT: LazyLock<RwLock<String>> = LazyLock::new(|| RwLoc
|
||||
pub static GLOBAL_RUSTFS_ADDR: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_CONN_MAP: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
pub static GLOBAL_ROOT_CERT: LazyLock<RwLock<Option<Vec<u8>>>> = LazyLock::new(|| RwLock::new(None));
|
||||
pub static GLOBAL_MTLS_IDENTITY: LazyLock<RwLock<Option<MtlsIdentityPem>>> = LazyLock::new(|| RwLock::new(None));
|
||||
|
||||
/// Set the global RustFS address used for gRPC connections.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `addr` - A string slice representing the RustFS address (e.g., "https://node1:9000").
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_RUSTFS_ADDR.write().await = addr.to_string();
|
||||
}
|
||||
|
||||
/// Set the global root CA certificate for outbound gRPC clients.
|
||||
/// This certificate is used to validate server TLS certificates.
|
||||
/// When set to None, clients use the system default root CAs.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `cert` - A vector of bytes representing the PEM-encoded root CA certificate.
|
||||
pub async fn set_global_root_cert(cert: Vec<u8>) {
|
||||
*GLOBAL_ROOT_CERT.write().await = Some(cert);
|
||||
}
|
||||
|
||||
/// Set the global mTLS identity (cert+key PEM) for outbound gRPC clients.
|
||||
/// When set, clients will present this identity to servers requesting/requiring mTLS.
|
||||
/// When None, clients proceed with standard server-authenticated TLS.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `identity` - An optional MtlsIdentityPem struct containing the cert and key PEM.
|
||||
pub async fn set_global_mtls_identity(identity: Option<MtlsIdentityPem>) {
|
||||
*GLOBAL_MTLS_IDENTITY.write().await = identity;
|
||||
}
|
||||
|
||||
/// Evict a stale/dead connection from the global connection cache.
|
||||
/// This is critical for cluster recovery when a node dies unexpectedly (e.g., power-off).
|
||||
/// By removing the cached connection, subsequent requests will establish a fresh connection.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `addr` - The address of the connection to evict.
|
||||
pub async fn evict_connection(addr: &str) {
|
||||
let removed = GLOBAL_CONN_MAP.write().await.remove(addr);
|
||||
if removed.is_some() {
|
||||
@@ -45,6 +69,12 @@ pub async fn evict_connection(addr: &str) {
|
||||
}
|
||||
|
||||
/// Check if a connection exists in the cache for the given address.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `addr` - The address to check.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - True if a cached connection exists, false otherwise.
|
||||
pub async fn has_cached_connection(addr: &str) -> bool {
|
||||
GLOBAL_CONN_MAP.read().await.contains_key(addr)
|
||||
}
|
||||
@@ -58,3 +88,12 @@ pub async fn clear_all_connections() {
|
||||
tracing::warn!("Cleared {} cached connections from global map", count);
|
||||
}
|
||||
}
|
||||
/// Optional client identity (cert+key PEM) for outbound mTLS.
|
||||
///
|
||||
/// When present, gRPC clients will present this identity to servers requesting/requiring mTLS.
|
||||
/// When absent, clients proceed with standard server-authenticated TLS.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MtlsIdentityPem {
|
||||
pub cert_pem: Vec<u8>,
|
||||
pub key_pem: Vec<u8>,
|
||||
}
|
||||
|
||||
@@ -212,8 +212,6 @@ pub struct HealChannelRequest {
|
||||
pub bucket: String,
|
||||
/// Object prefix (optional)
|
||||
pub object_prefix: Option<String>,
|
||||
/// Object version ID (optional)
|
||||
pub object_version_id: Option<String>,
|
||||
/// Force start heal
|
||||
pub force_start: bool,
|
||||
/// Priority
|
||||
@@ -348,7 +346,6 @@ pub fn create_heal_request(
|
||||
id: Uuid::new_v4().to_string(),
|
||||
bucket,
|
||||
object_prefix,
|
||||
object_version_id: None,
|
||||
force_start,
|
||||
priority: priority.unwrap_or_default(),
|
||||
pool_index: None,
|
||||
@@ -377,7 +374,6 @@ pub fn create_heal_request_with_options(
|
||||
id: Uuid::new_v4().to_string(),
|
||||
bucket,
|
||||
object_prefix,
|
||||
object_version_id: None,
|
||||
force_start,
|
||||
priority: priority.unwrap_or_default(),
|
||||
pool_index,
|
||||
@@ -507,7 +503,6 @@ pub async fn send_heal_disk(set_disk_id: String, priority: Option<HealChannelPri
|
||||
bucket: "".to_string(),
|
||||
object_prefix: None,
|
||||
disk: Some(set_disk_id),
|
||||
object_version_id: None,
|
||||
force_start: false,
|
||||
priority: priority.unwrap_or_default(),
|
||||
pool_index: None,
|
||||
|
||||
@@ -49,21 +49,6 @@ pub const SERVICE_VERSION: &str = "1.0.0";
|
||||
/// Default value: production
|
||||
pub const ENVIRONMENT: &str = "production";
|
||||
|
||||
/// Default Access Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_ACCESS_KEY
|
||||
/// Command line argument: --access-key
|
||||
/// Example: RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
/// Example: --access-key rustfsadmin
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
/// Default Secret Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_SECRET_KEY
|
||||
/// Command line argument: --secret-key
|
||||
/// Example: RUSTFS_SECRET_KEY=rustfsadmin
|
||||
/// Example: --secret-key rustfsadmin
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Default console enable
|
||||
/// This is the default value for the console server.
|
||||
/// It is used to enable or disable the console server.
|
||||
@@ -185,6 +170,12 @@ pub const KI_B: usize = 1024;
|
||||
/// Default value: 1048576
|
||||
pub const MI_B: usize = 1024 * 1024;
|
||||
|
||||
/// Environment variable for gRPC authentication token
|
||||
/// Used to set the authentication token for gRPC communication
|
||||
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -225,20 +216,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_constants() {
|
||||
// Test security related constants
|
||||
assert_eq!(DEFAULT_ACCESS_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
|
||||
assert_eq!(DEFAULT_SECRET_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// In production environment, access key and secret key should be different
|
||||
// These are default values, so being the same is acceptable, but should be warned in documentation
|
||||
println!("Warning: Default access key and secret key are the same. Change them in production!");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_path_constants() {
|
||||
assert_eq!(RUSTFS_TLS_KEY, "rustfs_key.pem");
|
||||
@@ -300,8 +277,6 @@ mod tests {
|
||||
DEFAULT_LOG_LEVEL,
|
||||
SERVICE_VERSION,
|
||||
ENVIRONMENT,
|
||||
DEFAULT_ACCESS_KEY,
|
||||
DEFAULT_SECRET_KEY,
|
||||
RUSTFS_TLS_KEY,
|
||||
RUSTFS_TLS_CERT,
|
||||
DEFAULT_ADDRESS,
|
||||
@@ -331,29 +306,6 @@ mod tests {
|
||||
assert_ne!(DEFAULT_CONSOLE_PORT, 0, "Console port should not be zero");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_best_practices() {
|
||||
// Test security best practices
|
||||
|
||||
// These are default values, should be changed in production environments
|
||||
println!("Security Warning: Default credentials detected!");
|
||||
println!("Access Key: {DEFAULT_ACCESS_KEY}");
|
||||
println!("Secret Key: {DEFAULT_SECRET_KEY}");
|
||||
println!("These should be changed in production environments!");
|
||||
|
||||
// Verify that key lengths meet minimum security requirements
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// Check if default credentials contain common insecure patterns
|
||||
let _insecure_patterns = ["admin", "password", "123456", "default"];
|
||||
let _access_key_lower = DEFAULT_ACCESS_KEY.to_lowercase();
|
||||
let _secret_key_lower = DEFAULT_SECRET_KEY.to_lowercase();
|
||||
|
||||
// Note: More security check logic can be added here
|
||||
// For example, check if keys contain insecure patterns
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_configuration_consistency() {
|
||||
// Test configuration consistency
|
||||
|
||||
@@ -21,6 +21,5 @@ pub(crate) mod heal;
|
||||
pub(crate) mod object;
|
||||
pub(crate) mod profiler;
|
||||
pub(crate) mod runtime;
|
||||
pub(crate) mod scanner;
|
||||
pub(crate) mod targets;
|
||||
pub(crate) mod tls;
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Environment variable name that specifies the data scanner start delay in seconds.
|
||||
/// - Purpose: Define the delay between data scanner operations.
|
||||
/// - Unit: seconds (u64).
|
||||
/// - Valid values: any positive integer.
|
||||
/// - Semantics: This delay controls how frequently the data scanner checks for and processes data; shorter delays lead to more responsive scanning but may increase system load.
|
||||
/// - Example: `export RUSTFS_DATA_SCANNER_START_DELAY_SECS=10`
|
||||
/// - Note: Choose an appropriate delay that balances scanning responsiveness with overall system performance.
|
||||
pub const ENV_DATA_SCANNER_START_DELAY_SECS: &str = "RUSTFS_DATA_SCANNER_START_DELAY_SECS";
|
||||
|
||||
/// Default data scanner start delay in seconds if not specified in the environment variable.
|
||||
/// - Value: 10 seconds.
|
||||
/// - Rationale: This default interval provides a reasonable balance between scanning responsiveness and system load for most deployments.
|
||||
/// - Adjustments: Users may modify this value via the `RUSTFS_DATA_SCANNER_START_DELAY_SECS` environment variable based on their specific scanning requirements and system performance.
|
||||
pub const DEFAULT_DATA_SCANNER_START_DELAY_SECS: u64 = 60;
|
||||
@@ -35,3 +35,52 @@ pub const ENV_TRUST_SYSTEM_CA: &str = "RUSTFS_TRUST_SYSTEM_CA";
|
||||
/// By default, RustFS does not trust system CA certificates.
|
||||
/// To change this behavior, set the environment variable RUSTFS_TRUST_SYSTEM_CA=1
|
||||
pub const DEFAULT_TRUST_SYSTEM_CA: bool = false;
|
||||
|
||||
/// Environment variable to trust leaf certificates as CA
|
||||
/// When set to "1", RustFS will treat leaf certificates as CA certificates for trust validation.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_TRUST_LEAF_CERT_AS_CA=1
|
||||
pub const ENV_TRUST_LEAF_CERT_AS_CA: &str = "RUSTFS_TRUST_LEAF_CERT_AS_CA";
|
||||
|
||||
/// Default value for trusting leaf certificates as CA
|
||||
/// By default, RustFS does not trust leaf certificates as CA.
|
||||
/// To change this behavior, set the environment variable RUSTFS_TRUST_LEAF_CERT_AS_CA=1
|
||||
pub const DEFAULT_TRUST_LEAF_CERT_AS_CA: bool = false;
|
||||
|
||||
/// Default filename for client CA certificate
|
||||
/// client_ca.crt (CA bundle for verifying client certificates in server mTLS)
|
||||
pub const RUSTFS_CLIENT_CA_CERT_FILENAME: &str = "client_ca.crt";
|
||||
|
||||
/// Environment variable for client certificate file path
|
||||
/// RUSTFS_MTLS_CLIENT_CERT
|
||||
/// Specifies the file path to the client certificate used for mTLS authentication.
|
||||
/// If not set, RustFS will look for the default filename "client_cert.pem" in the current directory.
|
||||
/// To set, use the environment variable RUSTFS_MTLS_CLIENT_CERT=/path/to/client_cert.pem
|
||||
pub const ENV_MTLS_CLIENT_CERT: &str = "RUSTFS_MTLS_CLIENT_CERT";
|
||||
|
||||
/// Default filename for client certificate
|
||||
/// client_cert.pem
|
||||
pub const RUSTFS_CLIENT_CERT_FILENAME: &str = "client_cert.pem";
|
||||
|
||||
/// Environment variable for client private key file path
|
||||
/// RUSTFS_MTLS_CLIENT_KEY
|
||||
/// Specifies the file path to the client private key used for mTLS authentication.
|
||||
/// If not set, RustFS will look for the default filename "client_key.pem" in the current directory.
|
||||
/// To set, use the environment variable RUSTFS_MTLS_CLIENT_KEY=/path/to/client_key.pem
|
||||
pub const ENV_MTLS_CLIENT_KEY: &str = "RUSTFS_MTLS_CLIENT_KEY";
|
||||
|
||||
/// Default filename for client private key
|
||||
/// client_key.pem
|
||||
pub const RUSTFS_CLIENT_KEY_FILENAME: &str = "client_key.pem";
|
||||
|
||||
/// RUSTFS_SERVER_MTLS_ENABLE
|
||||
/// Environment variable to enable server mTLS
|
||||
/// When set to "1", RustFS server will require client certificates for authentication.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_SERVER_MTLS_ENABLE=1
|
||||
pub const ENV_SERVER_MTLS_ENABLE: &str = "RUSTFS_SERVER_MTLS_ENABLE";
|
||||
|
||||
/// Default value for enabling server mTLS
|
||||
/// By default, RustFS server mTLS is disabled.
|
||||
/// To change this behavior, set the environment variable RUSTFS_SERVER_MTLS_ENABLE=1
|
||||
pub const DEFAULT_SERVER_MTLS_ENABLE: bool = false;
|
||||
|
||||
@@ -33,8 +33,6 @@ pub use constants::profiler::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::runtime::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::scanner::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::targets::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::tls::*;
|
||||
|
||||
21
crates/credentials/Cargo.toml
Normal file
21
crates/credentials/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "rustfs-credentials"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Credentials management utilities for RustFS, enabling secure handling of authentication and authorization data."
|
||||
keywords = ["rustfs", "Minio", "credentials", "authentication", "authorization"]
|
||||
categories = ["web-programming", "development-tools", "data-structures", "security"]
|
||||
|
||||
[dependencies]
|
||||
base64-simd = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json.workspace = true
|
||||
time = { workspace = true, features = ["serde-human-readable"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
44
crates/credentials/README.md
Normal file
44
crates/credentials/README.md
Normal file
@@ -0,0 +1,44 @@
|
||||
[](https://rustfs.com)
|
||||
|
||||
# RustFS Credentials - Credential Management Module
|
||||
|
||||
<p align="center">
|
||||
<strong>A module for managing credentials within the RustFS distributed object storage system.</strong>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
This module provides a secure and efficient way to handle various types of credentials,
|
||||
such as API keys, access tokens, and cryptographic keys, required for interacting with
|
||||
the RustFS ecosystem and external services.
|
||||
|
||||
## 📖 Overview
|
||||
|
||||
**RustFS Credentials** is a module dedicated to managing credentials for the [RustFS](https://rustfs.com) distributed
|
||||
object storage system. For the complete RustFS experience,
|
||||
please visit the [main RustFS repository](https://github.com/rustfs/rustfs)
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- Secure storage and retrieval of credentials
|
||||
- Support for multiple credential types (API keys, tokens, etc.)
|
||||
- Encryption of sensitive credential data
|
||||
- Integration with external secret management systems
|
||||
- Easy-to-use API for credential management
|
||||
- Credential rotation and expiration handling
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
For comprehensive documentation, examples, and usage guides, please visit the
|
||||
main [RustFS repository](https://github.com/rustfs/rustfs).
|
||||
|
||||
## 📄 License
|
||||
|
||||
This project is licensed under the Apache License 2.0 - see the [LICENSE](../../LICENSE) file for details.
|
||||
94
crates/credentials/src/constants.rs
Normal file
94
crates/credentials/src/constants.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Default Access Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_ACCESS_KEY
|
||||
/// Command line argument: --access-key
|
||||
/// Example: RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
/// Example: --access-key rustfsadmin
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
/// Default Secret Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_SECRET_KEY
|
||||
/// Command line argument: --secret-key
|
||||
/// Example: RUSTFS_SECRET_KEY=rustfsadmin
|
||||
/// Example: --secret-key rustfsadmin
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Environment variable for gRPC authentication token
|
||||
/// Used to set the authentication token for gRPC communication
|
||||
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
|
||||
|
||||
/// IAM Policy Types
|
||||
/// Used to differentiate between embedded and inherited policies
|
||||
/// Example: "embedded-policy" or "inherited-policy"
|
||||
/// Default value: "embedded-policy"
|
||||
pub const EMBEDDED_POLICY_TYPE: &str = "embedded-policy";
|
||||
|
||||
/// IAM Policy Types
|
||||
/// Used to differentiate between embedded and inherited policies
|
||||
/// Example: "embedded-policy" or "inherited-policy"
|
||||
/// Default value: "inherited-policy"
|
||||
pub const INHERITED_POLICY_TYPE: &str = "inherited-policy";
|
||||
|
||||
/// IAM Policy Claim Name for Service Account
|
||||
/// Used to identify the service account policy claim in JWT tokens
|
||||
/// Example: "sa-policy"
|
||||
/// Default value: "sa-policy"
|
||||
pub const IAM_POLICY_CLAIM_NAME_SA: &str = "sa-policy";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_security_constants() {
|
||||
// Test security related constants
|
||||
assert_eq!(DEFAULT_ACCESS_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
|
||||
assert_eq!(DEFAULT_SECRET_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// In production environment, access key and secret key should be different
|
||||
// These are default values, so being the same is acceptable, but should be warned in documentation
|
||||
println!("Warning: Default access key and secret key are the same. Change them in production!");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_best_practices() {
|
||||
// Test security best practices
|
||||
|
||||
// These are default values, should be changed in production environments
|
||||
println!("Security Warning: Default credentials detected!");
|
||||
println!("Access Key: {DEFAULT_ACCESS_KEY}");
|
||||
println!("Secret Key: {DEFAULT_SECRET_KEY}");
|
||||
println!("These should be changed in production environments!");
|
||||
|
||||
// Verify that key lengths meet minimum security requirements
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// Check if default credentials contain common insecure patterns
|
||||
let _insecure_patterns = ["admin", "password", "123456", "default"];
|
||||
let _access_key_lower = DEFAULT_ACCESS_KEY.to_lowercase();
|
||||
let _secret_key_lower = DEFAULT_SECRET_KEY.to_lowercase();
|
||||
|
||||
// Note: More security check logic can be added here
|
||||
// For example, check if keys contain insecure patterns
|
||||
}
|
||||
}
|
||||
386
crates/credentials/src/credentials.rs
Normal file
386
crates/credentials/src/credentials.rs
Normal file
@@ -0,0 +1,386 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{DEFAULT_SECRET_KEY, ENV_GRPC_AUTH_TOKEN, IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use rand::{Rng, RngCore};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::io::Error;
|
||||
use std::sync::OnceLock;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
/// Global active credentials
|
||||
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
|
||||
|
||||
/// Global gRPC authentication token
|
||||
static GLOBAL_GRPC_AUTH_TOKEN: OnceLock<String> = OnceLock::new();
|
||||
|
||||
/// Initialize the global action credentials
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `ak` - Optional access key
|
||||
/// * `sk` - Optional secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<(), Box<Credentials>>` - Ok if successful, Err with existing credentials if already initialized
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if automatic credential generation fails when `ak` or `sk`
|
||||
/// are `None`, for example if the random number generator fails while calling
|
||||
/// `gen_access_key` or `gen_secret_key`.
|
||||
pub fn init_global_action_credentials(ak: Option<String>, sk: Option<String>) -> Result<(), Box<Credentials>> {
|
||||
let ak = ak.unwrap_or_else(|| gen_access_key(20).expect("Failed to generate access key"));
|
||||
let sk = sk.unwrap_or_else(|| gen_secret_key(32).expect("Failed to generate secret key"));
|
||||
|
||||
let cred = Credentials {
|
||||
access_key: ak,
|
||||
secret_key: sk,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
GLOBAL_ACTIVE_CRED.set(cred).map_err(|e| {
|
||||
Box::new(Credentials {
|
||||
access_key: e.access_key.clone(),
|
||||
..Default::default()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the global action credentials
|
||||
pub fn get_global_action_cred() -> Option<Credentials> {
|
||||
GLOBAL_ACTIVE_CRED.get().cloned()
|
||||
}
|
||||
|
||||
/// Get the global secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<String>` - The global secret key, if set
|
||||
///
|
||||
pub fn get_global_secret_key_opt() -> Option<String> {
|
||||
GLOBAL_ACTIVE_CRED.get().map(|cred| cred.secret_key.clone())
|
||||
}
|
||||
|
||||
/// Get the global secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The global secret key, or empty string if not set
|
||||
///
|
||||
pub fn get_global_secret_key() -> String {
|
||||
GLOBAL_ACTIVE_CRED
|
||||
.get()
|
||||
.map(|cred| cred.secret_key.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Get the global access key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<String>` - The global access key, if set
|
||||
///
|
||||
pub fn get_global_access_key_opt() -> Option<String> {
|
||||
GLOBAL_ACTIVE_CRED.get().map(|cred| cred.access_key.clone())
|
||||
}
|
||||
|
||||
/// Get the global access key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The global access key, or empty string if not set
|
||||
///
|
||||
pub fn get_global_access_key() -> String {
|
||||
GLOBAL_ACTIVE_CRED
|
||||
.get()
|
||||
.map(|cred| cred.access_key.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Generates a random access key of the specified length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `length` - The length of the access key to generate
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<String>` - A result containing the generated access key or an error if the length is too short
|
||||
///
|
||||
/// # Errors
|
||||
/// This function will return an error if the specified length is less than 3.
|
||||
///
|
||||
/// Examples
|
||||
/// ```no_run
|
||||
/// use rustfs_credentials::gen_access_key;
|
||||
///
|
||||
/// let access_key = gen_access_key(16).unwrap();
|
||||
/// println!("Generated access key: {}", access_key);
|
||||
/// ```
|
||||
///
|
||||
pub fn gen_access_key(length: usize) -> std::io::Result<String> {
|
||||
const ALPHA_NUMERIC_TABLE: [char; 36] = [
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
|
||||
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
|
||||
];
|
||||
|
||||
if length < 3 {
|
||||
return Err(Error::other("access key length is too short"));
|
||||
}
|
||||
|
||||
let mut result = String::with_capacity(length);
|
||||
let mut rng = rand::rng();
|
||||
|
||||
for _ in 0..length {
|
||||
result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Generates a random secret key of the specified length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `length` - The length of the secret key to generate
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<String>` - A result containing the generated secret key or an error if the length is too short
|
||||
///
|
||||
/// # Errors
|
||||
/// This function will return an error if the specified length is less than 8.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```no_run
|
||||
/// use rustfs_credentials::gen_secret_key;
|
||||
///
|
||||
/// let secret_key = gen_secret_key(32).unwrap();
|
||||
/// println!("Generated secret key: {}", secret_key);
|
||||
/// ```
|
||||
///
|
||||
pub fn gen_secret_key(length: usize) -> std::io::Result<String> {
|
||||
use base64_simd::URL_SAFE_NO_PAD;
|
||||
|
||||
if length < 8 {
|
||||
return Err(Error::other("secret key length is too short"));
|
||||
}
|
||||
let mut rng = rand::rng();
|
||||
|
||||
let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)];
|
||||
rng.fill_bytes(&mut key);
|
||||
|
||||
let encoded = URL_SAFE_NO_PAD.encode_to_string(&key);
|
||||
let key_str = encoded.replace("/", "+");
|
||||
|
||||
Ok(key_str)
|
||||
}
|
||||
|
||||
/// Get the gRPC authentication token from environment variable
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The gRPC authentication token
|
||||
///
|
||||
pub fn get_grpc_token() -> String {
|
||||
GLOBAL_GRPC_AUTH_TOKEN
|
||||
.get_or_init(|| {
|
||||
env::var(ENV_GRPC_AUTH_TOKEN)
|
||||
.unwrap_or_else(|_| get_global_secret_key_opt().unwrap_or_else(|| DEFAULT_SECRET_KEY.to_string()))
|
||||
})
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// Credentials structure
|
||||
///
|
||||
/// Fields:
|
||||
/// - access_key: Access key string
|
||||
/// - secret_key: Secret key string
|
||||
/// - session_token: Session token string
|
||||
/// - expiration: Optional expiration time as OffsetDateTime
|
||||
/// - status: Status string (e.g., "active", "off")
|
||||
/// - parent_user: Parent user string
|
||||
/// - groups: Optional list of groups
|
||||
/// - claims: Optional map of claims
|
||||
/// - name: Optional name string
|
||||
/// - description: Optional description string
|
||||
///
|
||||
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
|
||||
pub struct Credentials {
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
pub session_token: String,
|
||||
pub expiration: Option<OffsetDateTime>,
|
||||
pub status: String,
|
||||
pub parent_user: String,
|
||||
pub groups: Option<Vec<String>>,
|
||||
pub claims: Option<HashMap<String, Value>>,
|
||||
pub name: Option<String>,
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
impl Credentials {
|
||||
pub fn is_expired(&self) -> bool {
|
||||
if self.expiration.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.expiration
|
||||
.as_ref()
|
||||
.map(|e| OffsetDateTime::now_utc() > *e)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
pub fn is_temp(&self) -> bool {
|
||||
!self.session_token.is_empty() && !self.is_expired()
|
||||
}
|
||||
|
||||
pub fn is_service_account(&self) -> bool {
|
||||
self.claims
|
||||
.as_ref()
|
||||
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|_| !self.parent_user.is_empty()))
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn is_implied_policy(&self) -> bool {
|
||||
if self.is_service_account() {
|
||||
return self
|
||||
.claims
|
||||
.as_ref()
|
||||
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|v| v == INHERITED_POLICY_TYPE))
|
||||
.unwrap_or_default();
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if self.status == "off" {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.access_key.len() >= 3 && self.secret_key.len() >= 8 && !self.is_expired()
|
||||
}
|
||||
|
||||
pub fn is_owner(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_expired() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_expired());
|
||||
|
||||
cred.expiration = Some(OffsetDateTime::now_utc() + Duration::hours(1));
|
||||
assert!(!cred.is_expired());
|
||||
|
||||
cred.expiration = Some(OffsetDateTime::now_utc() - Duration::hours(1));
|
||||
assert!(cred.is_expired());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_temp() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_temp());
|
||||
|
||||
cred.session_token = "token".to_string();
|
||||
assert!(cred.is_temp());
|
||||
|
||||
cred.expiration = Some(OffsetDateTime::now_utc() - Duration::hours(1));
|
||||
assert!(!cred.is_temp());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_service_account() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_service_account());
|
||||
|
||||
let mut claims = HashMap::new();
|
||||
claims.insert(IAM_POLICY_CLAIM_NAME_SA.to_string(), Value::String("policy".to_string()));
|
||||
cred.claims = Some(claims);
|
||||
cred.parent_user = "parent".to_string();
|
||||
assert!(cred.is_service_account());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_implied_policy() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_implied_policy());
|
||||
|
||||
let mut claims = HashMap::new();
|
||||
claims.insert(IAM_POLICY_CLAIM_NAME_SA.to_string(), Value::String(INHERITED_POLICY_TYPE.to_string()));
|
||||
cred.claims = Some(claims);
|
||||
cred.parent_user = "parent".to_string();
|
||||
assert!(cred.is_implied_policy());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_valid() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_valid());
|
||||
|
||||
cred.access_key = "abc".to_string();
|
||||
cred.secret_key = "12345678".to_string();
|
||||
assert!(cred.is_valid());
|
||||
|
||||
cred.status = "off".to_string();
|
||||
assert!(!cred.is_valid());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_owner() {
|
||||
let cred = Credentials::default();
|
||||
assert!(!cred.is_owner());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_global_credentials_flow() {
|
||||
// Since OnceLock can only be set once, we put together all globally related tests
|
||||
// If it has already been initialized (possibly from other tests), we verify the results directly
|
||||
if get_global_action_cred().is_none() {
|
||||
// Verify that the initial state is empty
|
||||
assert!(get_global_access_key_opt().is_none());
|
||||
assert_eq!(get_global_access_key(), "");
|
||||
assert!(get_global_secret_key_opt().is_none());
|
||||
assert_eq!(get_global_secret_key(), "");
|
||||
|
||||
// Initialize
|
||||
let test_ak = "test_access_key".to_string();
|
||||
let test_sk = "test_secret_key_123456".to_string();
|
||||
init_global_action_credentials(Some(test_ak.clone()), Some(test_sk.clone())).ok();
|
||||
}
|
||||
|
||||
// Verify the state after initialization
|
||||
let cred = get_global_action_cred().expect("Global credentials should be set");
|
||||
assert!(!cred.access_key.is_empty());
|
||||
assert!(!cred.secret_key.is_empty());
|
||||
|
||||
assert!(get_global_access_key_opt().is_some());
|
||||
assert!(!get_global_access_key().is_empty());
|
||||
assert!(get_global_secret_key_opt().is_some());
|
||||
assert!(!get_global_secret_key().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_init_global_credentials_auto_gen() {
|
||||
// If it hasn't already been initialized, the test automatically generates logic
|
||||
if get_global_action_cred().is_none() {
|
||||
init_global_action_credentials(None, None).ok();
|
||||
let ak = get_global_access_key();
|
||||
let sk = get_global_secret_key();
|
||||
assert_eq!(ak.len(), 20);
|
||||
assert_eq!(sk.len(), 32);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,23 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![warn(
|
||||
// missing_docs,
|
||||
rustdoc::missing_crate_level_docs,
|
||||
unreachable_pub,
|
||||
rust_2018_idioms
|
||||
)]
|
||||
mod constants;
|
||||
mod credentials;
|
||||
|
||||
pub mod data_usage;
|
||||
pub mod data_usage_define;
|
||||
pub mod error;
|
||||
pub mod last_minute;
|
||||
pub mod metrics;
|
||||
pub mod scanner;
|
||||
pub mod scanner_folder;
|
||||
pub mod scanner_io;
|
||||
|
||||
pub use data_usage_define::*;
|
||||
pub use error::ScannerError;
|
||||
pub use scanner::init_data_scanner;
|
||||
pub use constants::*;
|
||||
pub use credentials::*;
|
||||
@@ -18,6 +18,9 @@ mod reliant;
|
||||
#[cfg(test)]
|
||||
pub mod common;
|
||||
|
||||
#[cfg(test)]
|
||||
mod version_id_regression_test;
|
||||
|
||||
// Data usage regression tests
|
||||
#[cfg(test)]
|
||||
mod data_usage_test;
|
||||
|
||||
398
crates/e2e_test/src/version_id_regression_test.rs
Normal file
398
crates/e2e_test/src/version_id_regression_test.rs
Normal file
@@ -0,0 +1,398 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Regression test for Issue #1066: Veeam VBR - S3 returned empty versionId
|
||||
//!
|
||||
//! This test verifies that:
|
||||
//! 1. PutObject returns version_id when versioning is enabled
|
||||
//! 2. CopyObject returns version_id when versioning is enabled
|
||||
//! 3. CompleteMultipartUpload returns version_id when versioning is enabled
|
||||
//! 4. Basic S3 operations still work correctly (no regression)
|
||||
//! 5. Operations on non-versioned buckets work as expected
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::common::{RustFSTestEnvironment, init_logging};
|
||||
use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use aws_sdk_s3::types::{BucketVersioningStatus, CompletedMultipartUpload, CompletedPart, VersioningConfiguration};
|
||||
use serial_test::serial;
|
||||
use tracing::info;
|
||||
|
||||
fn create_s3_client(env: &RustFSTestEnvironment) -> Client {
|
||||
env.create_s3_client()
|
||||
}
|
||||
|
||||
async fn create_bucket(client: &Client, bucket: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
match client.create_bucket().bucket(bucket).send().await {
|
||||
Ok(_) => {
|
||||
info!("✅ Bucket {} created successfully", bucket);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
if e.to_string().contains("BucketAlreadyOwnedByYou") || e.to_string().contains("BucketAlreadyExists") {
|
||||
info!("ℹ️ Bucket {} already exists", bucket);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Box::new(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn enable_versioning(client: &Client, bucket: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let versioning_config = VersioningConfiguration::builder()
|
||||
.status(BucketVersioningStatus::Enabled)
|
||||
.build();
|
||||
|
||||
client
|
||||
.put_bucket_versioning()
|
||||
.bucket(bucket)
|
||||
.versioning_configuration(versioning_config)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
info!("✅ Versioning enabled for bucket {}", bucket);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test 1: PutObject should return version_id when versioning is enabled
|
||||
/// This directly addresses the Veeam issue from #1066
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_put_object_returns_version_id_with_versioning() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: PutObject returns version_id with versioning enabled");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-put-version-id";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
let key = "test-file.txt";
|
||||
let content = b"Test content for version ID test";
|
||||
|
||||
info!("📤 Uploading object with key: {}", key);
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "PutObject failed: {:?}", result.err());
|
||||
let output = result.unwrap();
|
||||
|
||||
info!("📥 PutObject response - version_id: {:?}", output.version_id);
|
||||
assert!(
|
||||
output.version_id.is_some(),
|
||||
"❌ FAILED: version_id should be present when versioning is enabled"
|
||||
);
|
||||
assert!(
|
||||
!output.version_id.as_ref().unwrap().is_empty(),
|
||||
"❌ FAILED: version_id should not be empty"
|
||||
);
|
||||
|
||||
info!("✅ PASSED: PutObject correctly returns version_id");
|
||||
}
|
||||
|
||||
/// Test 2: CopyObject should return version_id when versioning is enabled
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_copy_object_returns_version_id_with_versioning() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: CopyObject returns version_id with versioning enabled");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-copy-version-id";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
let source_key = "source-file.txt";
|
||||
let dest_key = "dest-file.txt";
|
||||
let content = b"Content to copy";
|
||||
|
||||
// First, create source object
|
||||
client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(source_key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to create source object");
|
||||
|
||||
info!("📤 Copying object from {} to {}", source_key, dest_key);
|
||||
let copy_result = client
|
||||
.copy_object()
|
||||
.bucket(bucket)
|
||||
.key(dest_key)
|
||||
.copy_source(format!("{}/{}", bucket, source_key))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(copy_result.is_ok(), "CopyObject failed: {:?}", copy_result.err());
|
||||
let output = copy_result.unwrap();
|
||||
|
||||
info!("📥 CopyObject response - version_id: {:?}", output.version_id);
|
||||
assert!(
|
||||
output.version_id.is_some(),
|
||||
"❌ FAILED: version_id should be present when versioning is enabled"
|
||||
);
|
||||
assert!(
|
||||
!output.version_id.as_ref().unwrap().is_empty(),
|
||||
"❌ FAILED: version_id should not be empty"
|
||||
);
|
||||
|
||||
info!("✅ PASSED: CopyObject correctly returns version_id");
|
||||
}
|
||||
|
||||
/// Test 3: CompleteMultipartUpload should return version_id when versioning is enabled
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_multipart_upload_returns_version_id_with_versioning() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: CompleteMultipartUpload returns version_id with versioning enabled");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-multipart-version-id";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
let key = "multipart-file.txt";
|
||||
let content = b"Part 1 content for multipart upload test";
|
||||
|
||||
info!("📤 Creating multipart upload for key: {}", key);
|
||||
let create_result = client
|
||||
.create_multipart_upload()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to create multipart upload");
|
||||
|
||||
let upload_id = create_result.upload_id().expect("No upload_id returned");
|
||||
|
||||
info!("📤 Uploading part 1");
|
||||
let upload_part_result = client
|
||||
.upload_part()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.upload_id(upload_id)
|
||||
.part_number(1)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to upload part");
|
||||
|
||||
let etag = upload_part_result.e_tag().expect("No etag returned").to_string();
|
||||
|
||||
let completed_part = CompletedPart::builder().part_number(1).e_tag(etag).build();
|
||||
|
||||
let completed_upload = CompletedMultipartUpload::builder().parts(completed_part).build();
|
||||
|
||||
info!("📤 Completing multipart upload");
|
||||
let complete_result = client
|
||||
.complete_multipart_upload()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.upload_id(upload_id)
|
||||
.multipart_upload(completed_upload)
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(complete_result.is_ok(), "CompleteMultipartUpload failed: {:?}", complete_result.err());
|
||||
let output = complete_result.unwrap();
|
||||
|
||||
info!("📥 CompleteMultipartUpload response - version_id: {:?}", output.version_id);
|
||||
assert!(
|
||||
output.version_id.is_some(),
|
||||
"❌ FAILED: version_id should be present when versioning is enabled"
|
||||
);
|
||||
assert!(
|
||||
!output.version_id.as_ref().unwrap().is_empty(),
|
||||
"❌ FAILED: version_id should not be empty"
|
||||
);
|
||||
|
||||
info!("✅ PASSED: CompleteMultipartUpload correctly returns version_id");
|
||||
}
|
||||
|
||||
/// Test 4: PutObject should NOT return version_id when versioning is NOT enabled
|
||||
/// This ensures we didn't break non-versioned buckets
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_put_object_without_versioning() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: PutObject behavior without versioning (no regression)");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-no-versioning";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
// Note: NOT enabling versioning here
|
||||
|
||||
let key = "test-file.txt";
|
||||
let content = b"Test content without versioning";
|
||||
|
||||
info!("📤 Uploading object to non-versioned bucket");
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "PutObject failed: {:?}", result.err());
|
||||
let output = result.unwrap();
|
||||
|
||||
info!("📥 PutObject response - version_id: {:?}", output.version_id);
|
||||
// version_id can be None or Some("null") for non-versioned buckets
|
||||
info!("✅ PASSED: PutObject works correctly without versioning");
|
||||
}
|
||||
|
||||
/// Test 5: Basic S3 operations still work correctly (no regression)
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_basic_s3_operations_no_regression() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: Basic S3 operations work correctly (no regression)");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-basic-operations";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
let key = "test-basic-file.txt";
|
||||
let content = b"Basic operations test content";
|
||||
|
||||
// Test PUT
|
||||
info!("📤 Testing PUT operation");
|
||||
let put_result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await;
|
||||
assert!(put_result.is_ok(), "PUT operation failed");
|
||||
let _version_id = put_result.unwrap().version_id;
|
||||
|
||||
// Test GET
|
||||
info!("📥 Testing GET operation");
|
||||
let get_result = client.get_object().bucket(bucket).key(key).send().await;
|
||||
assert!(get_result.is_ok(), "GET operation failed");
|
||||
let body = get_result.unwrap().body.collect().await.unwrap().to_vec();
|
||||
assert_eq!(body, content, "Content mismatch after GET");
|
||||
|
||||
// Test HEAD
|
||||
info!("📋 Testing HEAD operation");
|
||||
let head_result = client.head_object().bucket(bucket).key(key).send().await;
|
||||
assert!(head_result.is_ok(), "HEAD operation failed");
|
||||
|
||||
// Test LIST
|
||||
info!("📝 Testing LIST operation");
|
||||
let list_result = client.list_objects_v2().bucket(bucket).send().await;
|
||||
assert!(list_result.is_ok(), "LIST operation failed");
|
||||
let list_output = list_result.unwrap();
|
||||
let objects = list_output.contents();
|
||||
assert!(objects.iter().any(|obj| obj.key() == Some(key)), "Object not found in LIST");
|
||||
|
||||
// Test DELETE
|
||||
info!("🗑️ Testing DELETE operation");
|
||||
let delete_result = client.delete_object().bucket(bucket).key(key).send().await;
|
||||
assert!(delete_result.is_ok(), "DELETE operation failed");
|
||||
|
||||
// Verify object is deleted (should return NoSuchKey or version marker)
|
||||
let get_after_delete = client.get_object().bucket(bucket).key(key).send().await;
|
||||
assert!(
|
||||
get_after_delete.is_err() || get_after_delete.unwrap().delete_marker == Some(true),
|
||||
"Object should be deleted or have delete marker"
|
||||
);
|
||||
|
||||
info!("✅ PASSED: All basic S3 operations work correctly");
|
||||
}
|
||||
|
||||
/// Test 6: Veeam-specific scenario simulation
|
||||
/// Simulates the exact workflow that Veeam uses when backing up data
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_veeam_backup_workflow_simulation() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: Veeam VBR backup workflow simulation (Issue #1066)");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "veeam-backup-test";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
// Veeam typically creates multiple objects in a backup session
|
||||
let test_paths = vec![
|
||||
"Veeam/Backup/Clients/test-client-id/test-backup-id/CloudStg/Meta/Blocks/History/CheckpointHistory.dat",
|
||||
"Veeam/Backup/Clients/test-client-id/test-backup-id/Metadata/Lock/create.checkpoint/declare",
|
||||
];
|
||||
|
||||
for path in test_paths {
|
||||
info!("📤 Simulating Veeam upload to: {}", path);
|
||||
let content = format!("Veeam backup data for {}", path);
|
||||
|
||||
let put_result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(path)
|
||||
.body(ByteStream::from(content.into_bytes()))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(put_result.is_ok(), "Veeam upload failed for path: {}", path);
|
||||
let output = put_result.unwrap();
|
||||
|
||||
info!("📥 Response version_id: {:?}", output.version_id);
|
||||
assert!(output.version_id.is_some(), "❌ FAILED: Veeam expects version_id for path: {}", path);
|
||||
assert!(
|
||||
!output.version_id.as_ref().unwrap().is_empty(),
|
||||
"❌ FAILED: version_id should not be empty for path: {}",
|
||||
path
|
||||
);
|
||||
|
||||
info!("✅ Veeam upload successful with version_id for: {}", path);
|
||||
}
|
||||
|
||||
info!("✅ PASSED: Veeam backup workflow simulation completed successfully");
|
||||
}
|
||||
}
|
||||
@@ -34,18 +34,24 @@ workspace = true
|
||||
default = []
|
||||
|
||||
[dependencies]
|
||||
rustfs-filemeta.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["full"] }
|
||||
rustfs-rio.workspace = true
|
||||
rustfs-signer.workspace = true
|
||||
rustfs-checksums.workspace = true
|
||||
rustfs-config = { workspace = true, features = ["constants", "notify", "audit"] }
|
||||
rustfs-credentials = { workspace = true }
|
||||
rustfs-common.workspace = true
|
||||
rustfs-policy.workspace = true
|
||||
rustfs-protos.workspace = true
|
||||
async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
byteorder = { workspace = true }
|
||||
rustfs-common.workspace = true
|
||||
rustfs-policy.workspace = true
|
||||
chrono.workspace = true
|
||||
glob = { workspace = true }
|
||||
thiserror.workspace = true
|
||||
flatbuffers.workspace = true
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
tracing.workspace = true
|
||||
serde.workspace = true
|
||||
time.workspace = true
|
||||
@@ -54,8 +60,6 @@ serde_json.workspace = true
|
||||
quick-xml = { workspace = true, features = ["serialize", "async-tokio"] }
|
||||
s3s.workspace = true
|
||||
http.workspace = true
|
||||
http-body = { workspace = true }
|
||||
http-body-util.workspace = true
|
||||
url.workspace = true
|
||||
uuid = { workspace = true, features = ["v4", "fast-rng", "serde"] }
|
||||
reed-solomon-simd = { workspace = true }
|
||||
@@ -63,7 +67,6 @@ lazy_static.workspace = true
|
||||
rustfs-lock.workspace = true
|
||||
regex = { workspace = true }
|
||||
path-absolutize = { workspace = true }
|
||||
rustfs-protos.workspace = true
|
||||
rmp.workspace = true
|
||||
rmp-serde.workspace = true
|
||||
tokio-util = { workspace = true, features = ["io", "compat"] }
|
||||
@@ -94,11 +97,6 @@ aws-sdk-s3 = { workspace = true }
|
||||
urlencoding = { workspace = true }
|
||||
smallvec = { workspace = true }
|
||||
shadow-rs.workspace = true
|
||||
rustfs-filemeta.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["full"] }
|
||||
rustfs-rio.workspace = true
|
||||
rustfs-signer.workspace = true
|
||||
rustfs-checksums.workspace = true
|
||||
async-recursion.workspace = true
|
||||
aws-credential-types = { workspace = true }
|
||||
aws-smithy-types = { workspace = true }
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
# ECStore - Erasure Coding Storage
|
||||
|
||||
ECStore provides erasure coding functionality for the RustFS project, using high-performance Reed-Solomon SIMD implementation for optimal performance.
|
||||
|
||||
## Features
|
||||
|
||||
- **Reed-Solomon Implementation**: High-performance SIMD-optimized erasure coding
|
||||
- **Cross-Platform Compatibility**: Support for x86_64, aarch64, and other architectures
|
||||
- **Performance Optimized**: SIMD instructions for maximum throughput
|
||||
- **Thread Safety**: Safe concurrent access with caching optimizations
|
||||
- **Scalable**: Excellent performance for high-throughput scenarios
|
||||
|
||||
## Documentation
|
||||
|
||||
For complete documentation, examples, and usage information, please visit the main [RustFS repository](https://github.com/rustfs/rustfs).
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the Apache License, Version 2.0.
|
||||
@@ -953,7 +953,7 @@ impl LifecycleOps for ObjectInfo {
|
||||
lifecycle::ObjectOpts {
|
||||
name: self.name.clone(),
|
||||
user_tags: self.user_tags.clone(),
|
||||
version_id: self.version_id.clone(),
|
||||
version_id: self.version_id.map(|v| v.to_string()).unwrap_or_default(),
|
||||
mod_time: self.mod_time,
|
||||
size: self.size as usize,
|
||||
is_latest: self.is_latest,
|
||||
@@ -1067,7 +1067,7 @@ pub async fn eval_action_from_lifecycle(
|
||||
event
|
||||
}
|
||||
|
||||
pub async fn apply_transition_rule(event: &lifecycle::Event, src: &LcEventSrc, oi: &ObjectInfo) -> bool {
|
||||
async fn apply_transition_rule(event: &lifecycle::Event, src: &LcEventSrc, oi: &ObjectInfo) -> bool {
|
||||
if oi.delete_marker || oi.is_dir {
|
||||
return false;
|
||||
}
|
||||
@@ -1161,7 +1161,7 @@ pub async fn apply_expiry_on_non_transitioned_objects(
|
||||
true
|
||||
}
|
||||
|
||||
pub async fn apply_expiry_rule(event: &lifecycle::Event, src: &LcEventSrc, oi: &ObjectInfo) -> bool {
|
||||
async fn apply_expiry_rule(event: &lifecycle::Event, src: &LcEventSrc, oi: &ObjectInfo) -> bool {
|
||||
let mut expiry_state = GLOBAL_ExpiryState.write().await;
|
||||
expiry_state.enqueue_by_days(oi, event, src).await;
|
||||
true
|
||||
|
||||
@@ -1,192 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, ObjectLockConfiguration, ObjectLockEnabled, ObjectLockLegalHoldStatus, ObjectLockRetentionMode,
|
||||
};
|
||||
use time::OffsetDateTime;
|
||||
use tracing::info;
|
||||
|
||||
use crate::bucket::lifecycle::lifecycle::{Event, Lifecycle, ObjectOpts};
|
||||
use crate::bucket::object_lock::ObjectLockStatusExt;
|
||||
use crate::bucket::object_lock::objectlock::{get_object_legalhold_meta, get_object_retention_meta, utc_now_ntp};
|
||||
use crate::bucket::replication::ReplicationConfig;
|
||||
use rustfs_common::metrics::IlmAction;
|
||||
|
||||
/// Evaluator - evaluates lifecycle policy on objects for the given lifecycle
|
||||
/// configuration, lock retention configuration and replication configuration.
|
||||
pub struct Evaluator {
|
||||
policy: Arc<BucketLifecycleConfiguration>,
|
||||
lock_retention: Option<Arc<ObjectLockConfiguration>>,
|
||||
repl_cfg: Option<Arc<ReplicationConfig>>,
|
||||
}
|
||||
|
||||
impl Evaluator {
|
||||
/// NewEvaluator - creates a new evaluator with the given lifecycle
|
||||
pub fn new(policy: Arc<BucketLifecycleConfiguration>) -> Self {
|
||||
Self {
|
||||
policy,
|
||||
lock_retention: None,
|
||||
repl_cfg: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// WithLockRetention - sets the lock retention configuration for the evaluator
|
||||
pub fn with_lock_retention(mut self, lr: Option<Arc<ObjectLockConfiguration>>) -> Self {
|
||||
self.lock_retention = lr;
|
||||
self
|
||||
}
|
||||
|
||||
/// WithReplicationConfig - sets the replication configuration for the evaluator
|
||||
pub fn with_replication_config(mut self, rcfg: Option<Arc<ReplicationConfig>>) -> Self {
|
||||
self.repl_cfg = rcfg;
|
||||
self
|
||||
}
|
||||
|
||||
/// IsPendingReplication checks if the object is pending replication.
|
||||
pub fn is_pending_replication(&self, obj: &ObjectOpts) -> bool {
|
||||
use crate::bucket::replication::ReplicationConfigurationExt;
|
||||
if self.repl_cfg.is_none() {
|
||||
return false;
|
||||
}
|
||||
if let Some(rcfg) = &self.repl_cfg {
|
||||
if rcfg
|
||||
.config
|
||||
.as_ref()
|
||||
.is_some_and(|config| config.has_active_rules(obj.name.as_str(), true))
|
||||
&& !obj.version_purge_status.is_empty()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// IsObjectLocked checks if it is appropriate to remove an
|
||||
/// object according to locking configuration when this is lifecycle/ bucket quota asking.
|
||||
/// (copied over from enforceRetentionForDeletion)
|
||||
pub fn is_object_locked(&self, obj: &ObjectOpts) -> bool {
|
||||
if self.lock_retention.as_ref().is_none_or(|v| {
|
||||
v.object_lock_enabled
|
||||
.as_ref()
|
||||
.is_none_or(|v| v.as_str() != ObjectLockEnabled::ENABLED)
|
||||
}) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if obj.delete_marker {
|
||||
return false;
|
||||
}
|
||||
|
||||
let lhold = get_object_legalhold_meta(obj.user_defined.clone());
|
||||
if lhold
|
||||
.status
|
||||
.is_some_and(|v| v.valid() && v.as_str() == ObjectLockLegalHoldStatus::ON)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
let ret = get_object_retention_meta(obj.user_defined.clone());
|
||||
if ret
|
||||
.mode
|
||||
.is_some_and(|v| matches!(v.as_str(), ObjectLockRetentionMode::COMPLIANCE | ObjectLockRetentionMode::GOVERNANCE))
|
||||
{
|
||||
let t = utc_now_ntp();
|
||||
if let Some(retain_until) = ret.retain_until_date {
|
||||
if OffsetDateTime::from(retain_until).gt(&t) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// eval will return a lifecycle event for each object in objs for a given time.
|
||||
async fn eval_inner(&self, objs: &[ObjectOpts], now: OffsetDateTime) -> Vec<Event> {
|
||||
let mut events = vec![Event::default(); objs.len()];
|
||||
let mut newer_noncurrent_versions = 0;
|
||||
|
||||
'top_loop: {
|
||||
for (i, obj) in objs.iter().enumerate() {
|
||||
let mut event = self.policy.eval_inner(obj, now, newer_noncurrent_versions).await;
|
||||
match event.action {
|
||||
IlmAction::DeleteAllVersionsAction | IlmAction::DelMarkerDeleteAllVersionsAction => {
|
||||
// Skip if bucket has object locking enabled; To prevent the
|
||||
// possibility of violating an object retention on one of the
|
||||
// noncurrent versions of this object.
|
||||
if self.lock_retention.as_ref().is_some_and(|v| {
|
||||
v.object_lock_enabled
|
||||
.as_ref()
|
||||
.is_some_and(|v| v.as_str() == ObjectLockEnabled::ENABLED)
|
||||
}) {
|
||||
event = Event::default();
|
||||
} else {
|
||||
// No need to evaluate remaining versions' lifecycle
|
||||
// events after DeleteAllVersionsAction*
|
||||
events[i] = event;
|
||||
|
||||
info!("eval_inner: skipping remaining versions' lifecycle events after DeleteAllVersionsAction*");
|
||||
|
||||
break 'top_loop;
|
||||
}
|
||||
}
|
||||
IlmAction::DeleteVersionAction | IlmAction::DeleteRestoredVersionAction => {
|
||||
// Defensive code, should never happen
|
||||
if obj.version_id.is_none_or(|v| v.is_nil()) {
|
||||
event.action = IlmAction::NoneAction;
|
||||
}
|
||||
if self.is_object_locked(obj) {
|
||||
event = Event::default();
|
||||
}
|
||||
|
||||
if self.is_pending_replication(obj) {
|
||||
event = Event::default();
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
if !obj.is_latest {
|
||||
match event.action {
|
||||
IlmAction::DeleteVersionAction => {
|
||||
// this noncurrent version will be expired, nothing to add
|
||||
}
|
||||
_ => {
|
||||
// this noncurrent version will be spared
|
||||
newer_noncurrent_versions += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
events[i] = event;
|
||||
}
|
||||
}
|
||||
events
|
||||
}
|
||||
|
||||
/// Eval will return a lifecycle event for each object in objs
|
||||
pub async fn eval(&self, objs: &[ObjectOpts]) -> Result<Vec<Event>, std::io::Error> {
|
||||
if objs.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
if objs.len() != objs[0].num_versions {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidInput,
|
||||
format!("number of versions mismatch, expected {}, got {}", objs[0].num_versions, objs.len()),
|
||||
));
|
||||
}
|
||||
Ok(self.eval_inner(objs, OffsetDateTime::now_utc()).await)
|
||||
}
|
||||
}
|
||||
@@ -18,23 +18,19 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use rustfs_filemeta::{ReplicationStatusType, VersionPurgeStatusType};
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, ExpirationStatus, LifecycleExpiration, LifecycleRule, NoncurrentVersionTransition,
|
||||
ObjectLockConfiguration, ObjectLockEnabled, Prefix, RestoreRequest, Transition,
|
||||
ObjectLockConfiguration, ObjectLockEnabled, RestoreRequest, Transition,
|
||||
};
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::fmt::Display;
|
||||
use std::sync::Arc;
|
||||
use time::macros::{datetime, offset};
|
||||
use time::{self, Duration, OffsetDateTime};
|
||||
use tracing::info;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::bucket::lifecycle::rule::TransitionOps;
|
||||
use crate::store_api::ObjectInfo;
|
||||
|
||||
pub const TRANSITION_COMPLETE: &str = "complete";
|
||||
pub const TRANSITION_PENDING: &str = "pending";
|
||||
@@ -135,11 +131,11 @@ impl RuleValidate for LifecycleRule {
|
||||
pub trait Lifecycle {
|
||||
async fn has_transition(&self) -> bool;
|
||||
fn has_expiry(&self) -> bool;
|
||||
fn has_active_rules(&self, prefix: &str) -> bool;
|
||||
async fn has_active_rules(&self, prefix: &str) -> bool;
|
||||
async fn validate(&self, lr: &ObjectLockConfiguration) -> Result<(), std::io::Error>;
|
||||
async fn filter_rules(&self, obj: &ObjectOpts) -> Option<Vec<LifecycleRule>>;
|
||||
async fn eval(&self, obj: &ObjectOpts) -> Event;
|
||||
async fn eval_inner(&self, obj: &ObjectOpts, now: OffsetDateTime, newer_noncurrent_versions: usize) -> Event;
|
||||
async fn eval_inner(&self, obj: &ObjectOpts, now: OffsetDateTime) -> Event;
|
||||
//fn set_prediction_headers(&self, w: http.ResponseWriter, obj: ObjectOpts);
|
||||
async fn noncurrent_versions_expiration_limit(self: Arc<Self>, obj: &ObjectOpts) -> Event;
|
||||
}
|
||||
@@ -164,7 +160,7 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
false
|
||||
}
|
||||
|
||||
fn has_active_rules(&self, prefix: &str) -> bool {
|
||||
async fn has_active_rules(&self, prefix: &str) -> bool {
|
||||
if self.rules.len() == 0 {
|
||||
return false;
|
||||
}
|
||||
@@ -173,51 +169,44 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
continue;
|
||||
}
|
||||
|
||||
let rule_prefix = &rule.prefix.clone().unwrap_or_default();
|
||||
let rule_prefix = rule.prefix.as_ref().expect("err!");
|
||||
if prefix.len() > 0 && rule_prefix.len() > 0 && !prefix.starts_with(rule_prefix) && !rule_prefix.starts_with(&prefix)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(rule_noncurrent_version_expiration) = &rule.noncurrent_version_expiration {
|
||||
if let Some(noncurrent_days) = rule_noncurrent_version_expiration.noncurrent_days {
|
||||
if noncurrent_days > 0 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if let Some(newer_noncurrent_versions) = rule_noncurrent_version_expiration.newer_noncurrent_versions {
|
||||
if newer_noncurrent_versions > 0 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if rule.noncurrent_version_transitions.is_some() {
|
||||
let rule_noncurrent_version_expiration = rule.noncurrent_version_expiration.as_ref().expect("err!");
|
||||
if rule_noncurrent_version_expiration.noncurrent_days.expect("err!") > 0 {
|
||||
return true;
|
||||
}
|
||||
if let Some(rule_expiration) = &rule.expiration {
|
||||
if let Some(date1) = rule_expiration.date.clone() {
|
||||
if OffsetDateTime::from(date1).unix_timestamp() < OffsetDateTime::now_utc().unix_timestamp() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if rule_expiration.date.is_some() {
|
||||
return true;
|
||||
}
|
||||
if let Some(expired_object_delete_marker) = rule_expiration.expired_object_delete_marker
|
||||
&& expired_object_delete_marker
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if rule_noncurrent_version_expiration.newer_noncurrent_versions.expect("err!") > 0 {
|
||||
return true;
|
||||
}
|
||||
if let Some(rule_transitions) = &rule.transitions {
|
||||
let rule_transitions_0 = rule_transitions[0].clone();
|
||||
if let Some(date1) = rule_transitions_0.date {
|
||||
if OffsetDateTime::from(date1).unix_timestamp() < OffsetDateTime::now_utc().unix_timestamp() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if !rule.noncurrent_version_transitions.is_none() {
|
||||
return true;
|
||||
}
|
||||
if rule.transitions.is_some() {
|
||||
let rule_expiration = rule.expiration.as_ref().expect("err!");
|
||||
if !rule_expiration.date.is_none()
|
||||
&& OffsetDateTime::from(rule_expiration.date.clone().expect("err!")).unix_timestamp()
|
||||
< OffsetDateTime::now_utc().unix_timestamp()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if !rule_expiration.date.is_none() {
|
||||
return true;
|
||||
}
|
||||
if rule_expiration.expired_object_delete_marker.expect("err!") {
|
||||
return true;
|
||||
}
|
||||
let rule_transitions: &[Transition] = &rule.transitions.as_ref().expect("err!");
|
||||
let rule_transitions_0 = rule_transitions[0].clone();
|
||||
if !rule_transitions_0.date.is_none()
|
||||
&& OffsetDateTime::from(rule_transitions_0.date.expect("err!")).unix_timestamp()
|
||||
< OffsetDateTime::now_utc().unix_timestamp()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if !rule.transitions.is_none() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -285,10 +274,10 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
}
|
||||
|
||||
async fn eval(&self, obj: &ObjectOpts) -> Event {
|
||||
self.eval_inner(obj, OffsetDateTime::now_utc(), 0).await
|
||||
self.eval_inner(obj, OffsetDateTime::now_utc()).await
|
||||
}
|
||||
|
||||
async fn eval_inner(&self, obj: &ObjectOpts, now: OffsetDateTime, newer_noncurrent_versions: usize) -> Event {
|
||||
async fn eval_inner(&self, obj: &ObjectOpts, now: OffsetDateTime) -> Event {
|
||||
let mut events = Vec::<Event>::new();
|
||||
info!(
|
||||
"eval_inner: object={}, mod_time={:?}, now={:?}, is_latest={}, delete_marker={}",
|
||||
@@ -447,10 +436,10 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
obj.is_latest,
|
||||
obj.delete_marker,
|
||||
obj.version_id,
|
||||
(obj.is_latest || obj.version_id.is_none_or(|v| v.is_nil())) && !obj.delete_marker
|
||||
(obj.is_latest || obj.version_id.is_empty()) && !obj.delete_marker
|
||||
);
|
||||
// Allow expiration for latest objects OR non-versioned objects (empty version_id)
|
||||
if (obj.is_latest || obj.version_id.is_none_or(|v| v.is_nil())) && !obj.delete_marker {
|
||||
if (obj.is_latest || obj.version_id.is_empty()) && !obj.delete_marker {
|
||||
info!("eval_inner: entering expiration check");
|
||||
if let Some(ref expiration) = rule.expiration {
|
||||
if let Some(ref date) = expiration.date {
|
||||
@@ -670,7 +659,7 @@ pub struct ObjectOpts {
|
||||
pub user_tags: String,
|
||||
pub mod_time: Option<OffsetDateTime>,
|
||||
pub size: usize,
|
||||
pub version_id: Option<Uuid>,
|
||||
pub version_id: String,
|
||||
pub is_latest: bool,
|
||||
pub delete_marker: bool,
|
||||
pub num_versions: usize,
|
||||
@@ -680,37 +669,12 @@ pub struct ObjectOpts {
|
||||
pub restore_expires: Option<OffsetDateTime>,
|
||||
pub versioned: bool,
|
||||
pub version_suspended: bool,
|
||||
pub user_defined: HashMap<String, String>,
|
||||
pub version_purge_status: VersionPurgeStatusType,
|
||||
pub replication_status: ReplicationStatusType,
|
||||
}
|
||||
|
||||
impl ObjectOpts {
|
||||
pub fn expired_object_deletemarker(&self) -> bool {
|
||||
self.delete_marker && self.num_versions == 1
|
||||
}
|
||||
|
||||
pub fn from_object_info(oi: &ObjectInfo) -> Self {
|
||||
Self {
|
||||
name: oi.name.clone(),
|
||||
user_tags: oi.user_tags.clone(),
|
||||
mod_time: oi.mod_time,
|
||||
size: oi.size as usize,
|
||||
version_id: oi.version_id.clone(),
|
||||
is_latest: oi.is_latest,
|
||||
delete_marker: oi.delete_marker,
|
||||
num_versions: oi.num_versions,
|
||||
successor_mod_time: oi.successor_mod_time,
|
||||
transition_status: oi.transitioned_object.status.clone(),
|
||||
restore_ongoing: oi.restore_ongoing,
|
||||
restore_expires: oi.restore_expires,
|
||||
versioned: false,
|
||||
version_suspended: false,
|
||||
user_defined: oi.user_defined.clone(),
|
||||
version_purge_status: oi.version_purge_status.clone(),
|
||||
replication_status: oi.replication_status.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
pub mod bucket_lifecycle_audit;
|
||||
pub mod bucket_lifecycle_ops;
|
||||
pub mod evaluator;
|
||||
pub mod lifecycle;
|
||||
pub mod rule;
|
||||
pub mod tier_last_day_stats;
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::any::Any;
|
||||
use std::io::Write;
|
||||
use uuid::Uuid;
|
||||
use xxhash_rust::xxh64;
|
||||
|
||||
use super::bucket_lifecycle_ops::{ExpiryOp, GLOBAL_ExpiryState, TransitionedObject};
|
||||
@@ -35,7 +34,7 @@ static XXHASH_SEED: u64 = 0;
|
||||
struct ObjSweeper {
|
||||
object: String,
|
||||
bucket: String,
|
||||
version_id: Option<Uuid>,
|
||||
version_id: String,
|
||||
versioned: bool,
|
||||
suspended: bool,
|
||||
transition_status: String,
|
||||
@@ -55,8 +54,8 @@ impl ObjSweeper {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn with_version(&mut self, vid: Option<Uuid>) -> &Self {
|
||||
self.version_id = vid.clone();
|
||||
pub fn with_version(&mut self, vid: String) -> &Self {
|
||||
self.version_id = vid;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -73,8 +72,8 @@ impl ObjSweeper {
|
||||
version_suspended: self.suspended,
|
||||
..Default::default()
|
||||
};
|
||||
if self.suspended && self.version_id.is_none_or(|v| v.is_nil()) {
|
||||
opts.version_id = None;
|
||||
if self.suspended && self.version_id == "" {
|
||||
opts.version_id = String::from("");
|
||||
}
|
||||
opts
|
||||
}
|
||||
@@ -95,7 +94,7 @@ impl ObjSweeper {
|
||||
if !self.versioned || self.suspended {
|
||||
// 1, 2.a, 2.b
|
||||
del_tier = true;
|
||||
} else if self.versioned && self.version_id.is_some_and(|v| !v.is_nil()) {
|
||||
} else if self.versioned && self.version_id != "" {
|
||||
// 3.a
|
||||
del_tier = true;
|
||||
}
|
||||
|
||||
@@ -175,13 +175,6 @@ pub async fn created_at(bucket: &str) -> Result<OffsetDateTime> {
|
||||
bucket_meta_sys.created_at(bucket).await
|
||||
}
|
||||
|
||||
pub async fn list_bucket_targets(bucket: &str) -> Result<BucketTargets> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.get_bucket_targets_config(bucket).await
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BucketMetadataSys {
|
||||
metadata_map: RwLock<HashMap<String, Arc<BucketMetadata>>>,
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
pub mod objectlock;
|
||||
pub mod objectlock_sys;
|
||||
|
||||
use s3s::dto::{ObjectLockConfiguration, ObjectLockEnabled, ObjectLockLegalHoldStatus};
|
||||
use s3s::dto::{ObjectLockConfiguration, ObjectLockEnabled};
|
||||
|
||||
pub trait ObjectLockApi {
|
||||
fn enabled(&self) -> bool;
|
||||
@@ -28,13 +28,3 @@ impl ObjectLockApi for ObjectLockConfiguration {
|
||||
.is_some_and(|v| v.as_str() == ObjectLockEnabled::ENABLED)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ObjectLockStatusExt {
|
||||
fn valid(&self) -> bool;
|
||||
}
|
||||
|
||||
impl ObjectLockStatusExt for ObjectLockLegalHoldStatus {
|
||||
fn valid(&self) -> bool {
|
||||
matches!(self.as_str(), ObjectLockLegalHoldStatus::ON | ObjectLockLegalHoldStatus::OFF)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,11 +9,8 @@ use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicI32;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use crate::bucket::bucket_target_sys::BucketTargetSys;
|
||||
use crate::bucket::metadata_sys;
|
||||
use crate::bucket::replication::replication_resyncer::{
|
||||
BucketReplicationResyncStatus, DeletedObjectReplicationInfo, ReplicationConfig, ReplicationResyncer,
|
||||
get_heal_replicate_object_info,
|
||||
BucketReplicationResyncStatus, DeletedObjectReplicationInfo, ReplicationResyncer,
|
||||
};
|
||||
use crate::bucket::replication::replication_state::ReplicationStats;
|
||||
use crate::config::com::read_config;
|
||||
@@ -29,10 +26,8 @@ use rustfs_filemeta::ReplicationStatusType;
|
||||
use rustfs_filemeta::ReplicationType;
|
||||
use rustfs_filemeta::ReplicationWorkerOperation;
|
||||
use rustfs_filemeta::ResyncDecision;
|
||||
use rustfs_filemeta::VersionPurgeStatusType;
|
||||
use rustfs_filemeta::replication_statuses_map;
|
||||
use rustfs_filemeta::version_purge_statuses_map;
|
||||
use rustfs_filemeta::{REPLICATE_EXISTING, REPLICATE_HEAL, REPLICATE_HEAL_DELETE};
|
||||
use rustfs_utils::http::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use time::OffsetDateTime;
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
@@ -1038,152 +1033,3 @@ pub async fn schedule_replication_delete(dv: DeletedObjectReplicationInfo) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// QueueReplicationHeal is a wrapper for queue_replication_heal_internal
|
||||
pub async fn queue_replication_heal(bucket: &str, oi: ObjectInfo, retry_count: u32) {
|
||||
// ignore modtime zero objects
|
||||
if oi.mod_time.is_none() || oi.mod_time == Some(OffsetDateTime::UNIX_EPOCH) {
|
||||
return;
|
||||
}
|
||||
|
||||
let rcfg = match metadata_sys::get_replication_config(bucket).await {
|
||||
Ok((config, _)) => config,
|
||||
Err(err) => {
|
||||
warn!("Failed to get replication config for bucket {}: {}", bucket, err);
|
||||
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let tgts = match BucketTargetSys::get().list_bucket_targets(bucket).await {
|
||||
Ok(targets) => Some(targets),
|
||||
Err(err) => {
|
||||
warn!("Failed to list bucket targets for bucket {}: {}", bucket, err);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let rcfg_wrapper = ReplicationConfig::new(Some(rcfg), tgts);
|
||||
queue_replication_heal_internal(bucket, oi, rcfg_wrapper, retry_count).await;
|
||||
}
|
||||
|
||||
/// queue_replication_heal_internal enqueues objects that failed replication OR eligible for resyncing through
|
||||
/// an ongoing resync operation or via existing objects replication configuration setting.
|
||||
pub async fn queue_replication_heal_internal(
|
||||
_bucket: &str,
|
||||
oi: ObjectInfo,
|
||||
rcfg: ReplicationConfig,
|
||||
retry_count: u32,
|
||||
) -> ReplicateObjectInfo {
|
||||
let mut roi = ReplicateObjectInfo::default();
|
||||
|
||||
// ignore modtime zero objects
|
||||
if oi.mod_time.is_none() || oi.mod_time == Some(OffsetDateTime::UNIX_EPOCH) {
|
||||
return roi;
|
||||
}
|
||||
|
||||
if rcfg.config.is_none() || rcfg.remotes.is_none() {
|
||||
return roi;
|
||||
}
|
||||
|
||||
roi = get_heal_replicate_object_info(&oi, &rcfg).await;
|
||||
roi.retry_count = retry_count;
|
||||
|
||||
if !roi.dsc.replicate_any() {
|
||||
return roi;
|
||||
}
|
||||
|
||||
// early return if replication already done, otherwise we need to determine if this
|
||||
// version is an existing object that needs healing.
|
||||
if roi.replication_status == ReplicationStatusType::Completed
|
||||
&& roi.version_purge_status.is_empty()
|
||||
&& !roi.existing_obj_resync.must_resync()
|
||||
{
|
||||
return roi;
|
||||
}
|
||||
|
||||
if roi.delete_marker || !roi.version_purge_status.is_empty() {
|
||||
let (version_id, dm_version_id) = if roi.version_purge_status.is_empty() {
|
||||
(None, roi.version_id)
|
||||
} else {
|
||||
(roi.version_id, None)
|
||||
};
|
||||
|
||||
let dv = DeletedObjectReplicationInfo {
|
||||
delete_object: crate::store_api::DeletedObject {
|
||||
object_name: roi.name.clone(),
|
||||
delete_marker_version_id: dm_version_id,
|
||||
version_id,
|
||||
replication_state: roi.replication_state.clone(),
|
||||
delete_marker_mtime: roi.mod_time,
|
||||
delete_marker: roi.delete_marker,
|
||||
..Default::default()
|
||||
},
|
||||
bucket: roi.bucket.clone(),
|
||||
op_type: ReplicationType::Heal,
|
||||
event_type: REPLICATE_HEAL_DELETE.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// heal delete marker replication failure or versioned delete replication failure
|
||||
if roi.replication_status == ReplicationStatusType::Pending
|
||||
|| roi.replication_status == ReplicationStatusType::Failed
|
||||
|| roi.version_purge_status == VersionPurgeStatusType::Failed
|
||||
|| roi.version_purge_status == VersionPurgeStatusType::Pending
|
||||
{
|
||||
if let Some(pool) = GLOBAL_REPLICATION_POOL.get() {
|
||||
pool.queue_replica_delete_task(dv).await;
|
||||
}
|
||||
return roi;
|
||||
}
|
||||
|
||||
// if replication status is Complete on DeleteMarker and existing object resync required
|
||||
let existing_obj_resync = roi.existing_obj_resync.clone();
|
||||
if existing_obj_resync.must_resync()
|
||||
&& (roi.replication_status == ReplicationStatusType::Completed || roi.replication_status.is_empty())
|
||||
{
|
||||
queue_replicate_deletes_wrapper(dv, existing_obj_resync).await;
|
||||
return roi;
|
||||
}
|
||||
|
||||
return roi;
|
||||
}
|
||||
|
||||
if roi.existing_obj_resync.must_resync() {
|
||||
roi.op_type = ReplicationType::ExistingObject;
|
||||
}
|
||||
|
||||
match roi.replication_status {
|
||||
ReplicationStatusType::Pending | ReplicationStatusType::Failed => {
|
||||
roi.event_type = REPLICATE_HEAL.to_string();
|
||||
if let Some(pool) = GLOBAL_REPLICATION_POOL.get() {
|
||||
pool.queue_replica_task(roi.clone()).await;
|
||||
}
|
||||
return roi;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
if roi.existing_obj_resync.must_resync() {
|
||||
roi.event_type = REPLICATE_EXISTING.to_string();
|
||||
if let Some(pool) = GLOBAL_REPLICATION_POOL.get() {
|
||||
pool.queue_replica_task(roi.clone()).await;
|
||||
}
|
||||
}
|
||||
|
||||
roi
|
||||
}
|
||||
|
||||
/// Wrapper function for queueing replicate deletes with resync decision
|
||||
async fn queue_replicate_deletes_wrapper(doi: DeletedObjectReplicationInfo, existing_obj_resync: ResyncDecision) {
|
||||
for (k, v) in existing_obj_resync.targets.iter() {
|
||||
if v.replicate {
|
||||
let mut dv = doi.clone();
|
||||
dv.reset_id = v.reset_id.clone();
|
||||
dv.target_arn = k.clone();
|
||||
if let Some(pool) = GLOBAL_REPLICATION_POOL.get() {
|
||||
pool.queue_replica_delete_task(dv).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -744,7 +744,7 @@ impl ReplicationWorkerOperation for DeletedObjectReplicationInfo {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ReplicationConfig {
|
||||
pub config: Option<ReplicationConfiguration>,
|
||||
pub remotes: Option<BucketTargets>,
|
||||
|
||||
@@ -18,10 +18,8 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::body::Body;
|
||||
use hyper::body::Bytes;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::client::{
|
||||
@@ -63,19 +61,9 @@ impl TransitionClient {
|
||||
|
||||
let resp = self.execute_method(http::Method::PUT, &mut req_metadata).await?;
|
||||
//defer closeResponse(resp)
|
||||
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
|
||||
//if resp != nil {
|
||||
if resp_status != StatusCode::NO_CONTENT && resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(
|
||||
resp_status,
|
||||
&h,
|
||||
vec![],
|
||||
bucket_name,
|
||||
"",
|
||||
)));
|
||||
if resp.status() != StatusCode::NO_CONTENT && resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(&resp, vec![], bucket_name, "")));
|
||||
}
|
||||
//}
|
||||
Ok(())
|
||||
@@ -109,17 +97,8 @@ impl TransitionClient {
|
||||
.await?;
|
||||
//defer closeResponse(resp)
|
||||
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
|
||||
if resp_status != StatusCode::NO_CONTENT {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(
|
||||
resp_status,
|
||||
&h,
|
||||
vec![],
|
||||
bucket_name,
|
||||
"",
|
||||
)));
|
||||
if resp.status() != StatusCode::NO_CONTENT {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(&resp, vec![], bucket_name, "")));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -157,15 +136,7 @@ impl TransitionClient {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut body_vec = Vec::new();
|
||||
let mut body = resp.into_body();
|
||||
while let Some(frame) = body.frame().await {
|
||||
let frame = frame.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
if let Some(data) = frame.data_ref() {
|
||||
body_vec.extend_from_slice(data);
|
||||
}
|
||||
}
|
||||
let policy = String::from_utf8_lossy(&body_vec).to_string();
|
||||
let policy = String::from_utf8_lossy(&resp.body().bytes().expect("err").to_vec()).to_string();
|
||||
Ok(policy)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,12 +18,12 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use http::StatusCode;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::{de::Deserializer, ser::Serializer};
|
||||
use std::fmt::Display;
|
||||
|
||||
//use s3s::Body;
|
||||
use s3s::Body;
|
||||
use s3s::S3ErrorCode;
|
||||
|
||||
const _REPORT_ISSUE: &str = "Please report this issue at https://github.com/rustfs/rustfs/issues.";
|
||||
@@ -95,8 +95,7 @@ pub fn to_error_response(err: &std::io::Error) -> ErrorResponse {
|
||||
}
|
||||
|
||||
pub fn http_resp_to_error_response(
|
||||
resp_status: StatusCode,
|
||||
h: &HeaderMap,
|
||||
resp: &http::Response<Body>,
|
||||
b: Vec<u8>,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
@@ -105,11 +104,11 @@ pub fn http_resp_to_error_response(
|
||||
let err_resp_ = quick_xml::de::from_str::<ErrorResponse>(&err_body);
|
||||
let mut err_resp = ErrorResponse::default();
|
||||
if err_resp_.is_err() {
|
||||
match resp_status {
|
||||
match resp.status() {
|
||||
StatusCode::NOT_FOUND => {
|
||||
if object_name == "" {
|
||||
err_resp = ErrorResponse {
|
||||
status_code: resp_status,
|
||||
status_code: resp.status(),
|
||||
code: S3ErrorCode::NoSuchBucket,
|
||||
message: "The specified bucket does not exist.".to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
@@ -117,7 +116,7 @@ pub fn http_resp_to_error_response(
|
||||
};
|
||||
} else {
|
||||
err_resp = ErrorResponse {
|
||||
status_code: resp_status,
|
||||
status_code: resp.status(),
|
||||
code: S3ErrorCode::NoSuchKey,
|
||||
message: "The specified key does not exist.".to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
@@ -128,7 +127,7 @@ pub fn http_resp_to_error_response(
|
||||
}
|
||||
StatusCode::FORBIDDEN => {
|
||||
err_resp = ErrorResponse {
|
||||
status_code: resp_status,
|
||||
status_code: resp.status(),
|
||||
code: S3ErrorCode::AccessDenied,
|
||||
message: "Access Denied.".to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
@@ -138,7 +137,7 @@ pub fn http_resp_to_error_response(
|
||||
}
|
||||
StatusCode::CONFLICT => {
|
||||
err_resp = ErrorResponse {
|
||||
status_code: resp_status,
|
||||
status_code: resp.status(),
|
||||
code: S3ErrorCode::BucketNotEmpty,
|
||||
message: "Bucket not empty.".to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
@@ -147,7 +146,7 @@ pub fn http_resp_to_error_response(
|
||||
}
|
||||
StatusCode::PRECONDITION_FAILED => {
|
||||
err_resp = ErrorResponse {
|
||||
status_code: resp_status,
|
||||
status_code: resp.status(),
|
||||
code: S3ErrorCode::PreconditionFailed,
|
||||
message: "Pre condition failed.".to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
@@ -156,13 +155,13 @@ pub fn http_resp_to_error_response(
|
||||
};
|
||||
}
|
||||
_ => {
|
||||
let mut msg = resp_status.to_string();
|
||||
let mut msg = resp.status().to_string();
|
||||
if err_body.len() > 0 {
|
||||
msg = err_body;
|
||||
}
|
||||
err_resp = ErrorResponse {
|
||||
status_code: resp_status,
|
||||
code: S3ErrorCode::Custom(resp_status.to_string().into()),
|
||||
status_code: resp.status(),
|
||||
code: S3ErrorCode::Custom(resp.status().to_string().into()),
|
||||
message: msg,
|
||||
bucket_name: bucket_name.to_string(),
|
||||
..Default::default()
|
||||
@@ -172,32 +171,32 @@ pub fn http_resp_to_error_response(
|
||||
} else {
|
||||
err_resp = err_resp_.unwrap();
|
||||
}
|
||||
err_resp.status_code = resp_status;
|
||||
if let Some(server_name) = h.get("Server") {
|
||||
err_resp.status_code = resp.status();
|
||||
if let Some(server_name) = resp.headers().get("Server") {
|
||||
err_resp.server = server_name.to_str().expect("err").to_string();
|
||||
}
|
||||
|
||||
let code = h.get("x-minio-error-code");
|
||||
let code = resp.headers().get("x-minio-error-code");
|
||||
if code.is_some() {
|
||||
err_resp.code = S3ErrorCode::Custom(code.expect("err").to_str().expect("err").into());
|
||||
}
|
||||
let desc = h.get("x-minio-error-desc");
|
||||
let desc = resp.headers().get("x-minio-error-desc");
|
||||
if desc.is_some() {
|
||||
err_resp.message = desc.expect("err").to_str().expect("err").trim_matches('"').to_string();
|
||||
}
|
||||
|
||||
if err_resp.request_id == "" {
|
||||
if let Some(x_amz_request_id) = h.get("x-amz-request-id") {
|
||||
if let Some(x_amz_request_id) = resp.headers().get("x-amz-request-id") {
|
||||
err_resp.request_id = x_amz_request_id.to_str().expect("err").to_string();
|
||||
}
|
||||
}
|
||||
if err_resp.host_id == "" {
|
||||
if let Some(x_amz_id_2) = h.get("x-amz-id-2") {
|
||||
if let Some(x_amz_id_2) = resp.headers().get("x-amz-id-2") {
|
||||
err_resp.host_id = x_amz_id_2.to_str().expect("err").to_string();
|
||||
}
|
||||
}
|
||||
if err_resp.region == "" {
|
||||
if let Some(x_amz_bucket_region) = h.get("x-amz-bucket-region") {
|
||||
if let Some(x_amz_bucket_region) = resp.headers().get("x-amz-bucket-region") {
|
||||
err_resp.region = x_amz_bucket_region.to_str().expect("err").to_string();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,26 +19,17 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
//use bytes::Bytes;
|
||||
use futures_util::ready;
|
||||
use bytes::Bytes;
|
||||
use http::HeaderMap;
|
||||
use std::io::{Cursor, Error as IoError, ErrorKind as IoErrorKind, Read};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::io::Cursor;
|
||||
use tokio::io::BufReader;
|
||||
use tokio_util::io::StreamReader;
|
||||
|
||||
use crate::client::{
|
||||
api_error_response::err_invalid_argument,
|
||||
api_get_options::GetObjectOptions,
|
||||
transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info},
|
||||
};
|
||||
use futures_util::StreamExt;
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::body::Body;
|
||||
use hyper::body::Bytes;
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
use tokio_util::io::ReaderStream;
|
||||
|
||||
impl TransitionClient {
|
||||
pub fn get_object(&self, bucket_name: &str, object_name: &str, opts: &GetObjectOptions) -> Result<Object, std::io::Error> {
|
||||
@@ -74,19 +65,11 @@ impl TransitionClient {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let resp = &resp;
|
||||
let object_stat = to_object_info(bucket_name, object_name, resp.headers())?;
|
||||
|
||||
let h = resp.headers().clone();
|
||||
|
||||
let mut body_vec = Vec::new();
|
||||
let mut body = resp.into_body();
|
||||
while let Some(frame) = body.frame().await {
|
||||
let frame = frame.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
if let Some(data) = frame.data_ref() {
|
||||
body_vec.extend_from_slice(data);
|
||||
}
|
||||
}
|
||||
Ok((object_stat, h, BufReader::new(Cursor::new(body_vec))))
|
||||
let b = resp.body().bytes().expect("err").to_vec();
|
||||
Ok((object_stat, resp.headers().clone(), BufReader::new(Cursor::new(b))))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ use crate::client::{
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
use http_body_util::BodyExt;
|
||||
use rustfs_config::MAX_S3_CLIENT_RESPONSE_SIZE;
|
||||
use rustfs_utils::EMPTY_STRING_SHA256_HASH;
|
||||
use s3s::dto::Owner;
|
||||
@@ -84,29 +83,18 @@ impl TransitionClient {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
|
||||
let mut body_vec = Vec::new();
|
||||
let mut body = resp.into_body();
|
||||
while let Some(frame) = body.frame().await {
|
||||
let frame = frame.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
if let Some(data) = frame.data_ref() {
|
||||
body_vec.extend_from_slice(data);
|
||||
}
|
||||
if resp.status() != http::StatusCode::OK {
|
||||
let b = resp.body().bytes().expect("err").to_vec();
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(&resp, b, bucket_name, object_name)));
|
||||
}
|
||||
|
||||
if resp_status != http::StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(
|
||||
resp_status,
|
||||
&h,
|
||||
body_vec,
|
||||
bucket_name,
|
||||
object_name,
|
||||
)));
|
||||
}
|
||||
|
||||
let mut res = match quick_xml::de::from_str::<AccessControlPolicy>(&String::from_utf8(body_vec).unwrap()) {
|
||||
let b = resp
|
||||
.body_mut()
|
||||
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
|
||||
.await
|
||||
.unwrap()
|
||||
.to_vec();
|
||||
let mut res = match quick_xml::de::from_str::<AccessControlPolicy>(&String::from_utf8(b).unwrap()) {
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
return Err(std::io::Error::other(err.to_string()));
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
use std::collections::HashMap;
|
||||
use time::OffsetDateTime;
|
||||
@@ -29,11 +30,7 @@ use crate::client::{
|
||||
};
|
||||
use rustfs_config::MAX_S3_CLIENT_RESPONSE_SIZE;
|
||||
use rustfs_utils::EMPTY_STRING_SHA256_HASH;
|
||||
//use s3s::Body;
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::body::Body;
|
||||
use hyper::body::Bytes;
|
||||
use hyper::body::Incoming;
|
||||
use s3s::Body;
|
||||
use s3s::header::{X_AMZ_MAX_PARTS, X_AMZ_OBJECT_ATTRIBUTES, X_AMZ_PART_NUMBER_MARKER, X_AMZ_VERSION_ID};
|
||||
|
||||
pub struct ObjectAttributesOptions {
|
||||
@@ -133,12 +130,19 @@ struct ObjectAttributePart {
|
||||
}
|
||||
|
||||
impl ObjectAttributes {
|
||||
pub async fn parse_response(&mut self, h: &HeaderMap, body_vec: Vec<u8>) -> Result<(), std::io::Error> {
|
||||
pub async fn parse_response(&mut self, resp: &mut http::Response<Body>) -> Result<(), std::io::Error> {
|
||||
let h = resp.headers();
|
||||
let mod_time = OffsetDateTime::parse(h.get("Last-Modified").unwrap().to_str().unwrap(), ISO8601_DATEFORMAT).unwrap(); //RFC7231Time
|
||||
self.last_modified = mod_time;
|
||||
self.version_id = h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap().to_string();
|
||||
|
||||
let mut response = match quick_xml::de::from_str::<ObjectAttributesResponse>(&String::from_utf8(body_vec).unwrap()) {
|
||||
let b = resp
|
||||
.body_mut()
|
||||
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
|
||||
.await
|
||||
.unwrap()
|
||||
.to_vec();
|
||||
let mut response = match quick_xml::de::from_str::<ObjectAttributesResponse>(&String::from_utf8(b).unwrap()) {
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
return Err(std::io::Error::other(err.to_string()));
|
||||
@@ -209,8 +213,7 @@ impl TransitionClient {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
let h = resp.headers();
|
||||
let has_etag = h.get("ETag").unwrap().to_str().unwrap();
|
||||
if !has_etag.is_empty() {
|
||||
return Err(std::io::Error::other(
|
||||
@@ -218,17 +221,14 @@ impl TransitionClient {
|
||||
));
|
||||
}
|
||||
|
||||
let mut body_vec = Vec::new();
|
||||
let mut body = resp.into_body();
|
||||
while let Some(frame) = body.frame().await {
|
||||
let frame = frame.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
if let Some(data) = frame.data_ref() {
|
||||
body_vec.extend_from_slice(data);
|
||||
}
|
||||
}
|
||||
|
||||
if resp_status != http::StatusCode::OK {
|
||||
let err_body = String::from_utf8(body_vec).unwrap();
|
||||
if resp.status() != http::StatusCode::OK {
|
||||
let b = resp
|
||||
.body_mut()
|
||||
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
|
||||
.await
|
||||
.unwrap()
|
||||
.to_vec();
|
||||
let err_body = String::from_utf8(b).unwrap();
|
||||
let mut er = match quick_xml::de::from_str::<AccessControlPolicy>(&err_body) {
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
@@ -240,7 +240,7 @@ impl TransitionClient {
|
||||
}
|
||||
|
||||
let mut oa = ObjectAttributes::new();
|
||||
oa.parse_response(&h, body_vec).await?;
|
||||
oa.parse_response(&mut resp).await?;
|
||||
|
||||
Ok(oa)
|
||||
}
|
||||
|
||||
@@ -27,11 +27,8 @@ use crate::client::{
|
||||
transition_api::{ReaderImpl, RequestMetadata, TransitionClient},
|
||||
};
|
||||
use crate::store_api::BucketInfo;
|
||||
//use bytes::Bytes;
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::body::Body;
|
||||
use hyper::body::Bytes;
|
||||
use rustfs_config::MAX_S3_CLIENT_RESPONSE_SIZE;
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
use std::collections::HashMap;
|
||||
@@ -100,30 +97,18 @@ impl TransitionClient {
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
|
||||
if resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(
|
||||
resp_status,
|
||||
&h,
|
||||
vec![],
|
||||
bucket_name,
|
||||
"",
|
||||
)));
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(&resp, vec![], bucket_name, "")));
|
||||
}
|
||||
|
||||
//let mut list_bucket_result = ListBucketV2Result::default();
|
||||
let mut body_vec = Vec::new();
|
||||
let mut body = resp.into_body();
|
||||
while let Some(frame) = body.frame().await {
|
||||
let frame = frame.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
if let Some(data) = frame.data_ref() {
|
||||
body_vec.extend_from_slice(data);
|
||||
}
|
||||
}
|
||||
let mut list_bucket_result = match quick_xml::de::from_str::<ListBucketV2Result>(&String::from_utf8(body_vec).unwrap()) {
|
||||
let b = resp
|
||||
.body_mut()
|
||||
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
|
||||
.await
|
||||
.unwrap()
|
||||
.to_vec();
|
||||
let mut list_bucket_result = match quick_xml::de::from_str::<ListBucketV2Result>(&String::from_utf8(b).unwrap()) {
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
return Err(std::io::Error::other(err.to_string()));
|
||||
|
||||
@@ -17,9 +17,8 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
//use bytes::Bytes;
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, HeaderName, StatusCode};
|
||||
use hyper::body::Bytes;
|
||||
use s3s::S3ErrorCode;
|
||||
use std::collections::HashMap;
|
||||
use time::OffsetDateTime;
|
||||
@@ -226,15 +225,10 @@ impl TransitionClient {
|
||||
};
|
||||
|
||||
let resp = self.execute_method(http::Method::POST, &mut req_metadata).await?;
|
||||
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
|
||||
//if resp.is_none() {
|
||||
if resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(
|
||||
resp_status,
|
||||
&h,
|
||||
&resp,
|
||||
vec![],
|
||||
bucket_name,
|
||||
object_name,
|
||||
@@ -293,14 +287,9 @@ impl TransitionClient {
|
||||
};
|
||||
|
||||
let resp = self.execute_method(http::Method::PUT, &mut req_metadata).await?;
|
||||
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
|
||||
if resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(
|
||||
resp_status,
|
||||
&h,
|
||||
&resp,
|
||||
vec![],
|
||||
&p.bucket_name.clone(),
|
||||
&p.object_name,
|
||||
@@ -381,8 +370,7 @@ impl TransitionClient {
|
||||
|
||||
let resp = self.execute_method(http::Method::POST, &mut req_metadata).await?;
|
||||
|
||||
let h = resp.headers().clone();
|
||||
|
||||
let b = resp.body().bytes().expect("err").to_vec();
|
||||
let complete_multipart_upload_result: CompleteMultipartUploadResult = CompleteMultipartUploadResult::default();
|
||||
|
||||
let (exp_time, rule_id) = if let Some(h_x_amz_expiration) = resp.headers().get(X_AMZ_EXPIRATION) {
|
||||
@@ -394,6 +382,7 @@ impl TransitionClient {
|
||||
(OffsetDateTime::now_utc(), "".to_string())
|
||||
};
|
||||
|
||||
let h = resp.headers();
|
||||
Ok(UploadInfo {
|
||||
bucket: complete_multipart_upload_result.bucket,
|
||||
key: complete_multipart_upload_result.key,
|
||||
|
||||
@@ -479,13 +479,9 @@ impl TransitionClient {
|
||||
|
||||
let resp = self.execute_method(http::Method::PUT, &mut req_metadata).await?;
|
||||
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
|
||||
if resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(
|
||||
resp_status,
|
||||
&h,
|
||||
&resp,
|
||||
vec![],
|
||||
bucket_name,
|
||||
object_name,
|
||||
|
||||
@@ -18,10 +18,8 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, HeaderValue, Method, StatusCode};
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::body::Body;
|
||||
use hyper::body::Bytes;
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use s3s::S3ErrorCode;
|
||||
use s3s::dto::ReplicationStatus;
|
||||
@@ -346,15 +344,8 @@ impl TransitionClient {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut body_vec = Vec::new();
|
||||
let mut body = resp.into_body();
|
||||
while let Some(frame) = body.frame().await {
|
||||
let frame = frame.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
if let Some(data) = frame.data_ref() {
|
||||
body_vec.extend_from_slice(data);
|
||||
}
|
||||
}
|
||||
process_remove_multi_objects_response(ReaderImpl::Body(Bytes::from(body_vec)), result_tx.clone());
|
||||
let body_bytes: Vec<u8> = resp.body().bytes().expect("err").to_vec();
|
||||
process_remove_multi_objects_response(ReaderImpl::Body(Bytes::from(body_bytes)), result_tx.clone());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -399,10 +390,6 @@ impl TransitionClient {
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
|
||||
//if resp.is_some() {
|
||||
if resp.status() != StatusCode::NO_CONTENT {
|
||||
let error_response: ErrorResponse;
|
||||
@@ -439,8 +426,7 @@ impl TransitionClient {
|
||||
}
|
||||
_ => {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(
|
||||
resp_status,
|
||||
&h,
|
||||
&resp,
|
||||
vec![],
|
||||
bucket_name,
|
||||
object_name,
|
||||
|
||||
@@ -24,10 +24,8 @@ use crate::client::{
|
||||
api_get_options::GetObjectOptions,
|
||||
transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info},
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use http::HeaderMap;
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::body::Body;
|
||||
use hyper::body::Bytes;
|
||||
use s3s::dto::RestoreRequest;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Cursor;
|
||||
@@ -109,25 +107,9 @@ impl TransitionClient {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
|
||||
let mut body_vec = Vec::new();
|
||||
let mut body = resp.into_body();
|
||||
while let Some(frame) = body.frame().await {
|
||||
let frame = frame.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
if let Some(data) = frame.data_ref() {
|
||||
body_vec.extend_from_slice(data);
|
||||
}
|
||||
}
|
||||
if resp_status != http::StatusCode::ACCEPTED && resp_status != http::StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(
|
||||
resp_status,
|
||||
&h,
|
||||
body_vec,
|
||||
bucket_name,
|
||||
"",
|
||||
)));
|
||||
let b = resp.body().bytes().expect("err").to_vec();
|
||||
if resp.status() != http::StatusCode::ACCEPTED && resp.status() != http::StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(&resp, b, bucket_name, "")));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -18,10 +18,8 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::body::Body;
|
||||
use hyper::body::Bytes;
|
||||
use rustfs_utils::EMPTY_STRING_SHA256_HASH;
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
use tokio::io::BufReader;
|
||||
@@ -68,20 +66,10 @@ impl TransitionClient {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
let b = resp.body().bytes().expect("err").to_vec();
|
||||
let resperr = http_resp_to_error_response(&resp, b, bucket_name, "");
|
||||
|
||||
let mut body_vec = Vec::new();
|
||||
let mut body = resp.into_body();
|
||||
while let Some(frame) = body.frame().await {
|
||||
let frame = frame.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
if let Some(data) = frame.data_ref() {
|
||||
body_vec.extend_from_slice(data);
|
||||
}
|
||||
}
|
||||
let resperr = http_resp_to_error_response(resp_status, &h, body_vec, bucket_name, "");
|
||||
|
||||
warn!("bucket exists, resperr: {:?}", resperr);
|
||||
warn!("bucket exists, resp: {:?}, resperr: {:?}", resp, resperr);
|
||||
/*if to_error_response(resperr).code == "NoSuchBucket" {
|
||||
return Ok(false);
|
||||
}
|
||||
@@ -120,20 +108,10 @@ impl TransitionClient {
|
||||
|
||||
match resp {
|
||||
Ok(resp) => {
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
let b = resp.body().bytes().expect("get bucket versioning err").to_vec();
|
||||
let resperr = http_resp_to_error_response(&resp, b, bucket_name, "");
|
||||
|
||||
let mut body_vec = Vec::new();
|
||||
let mut body = resp.into_body();
|
||||
while let Some(frame) = body.frame().await {
|
||||
let frame = frame.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
if let Some(data) = frame.data_ref() {
|
||||
body_vec.extend_from_slice(data);
|
||||
}
|
||||
}
|
||||
let resperr = http_resp_to_error_response(resp_status, &h, body_vec, bucket_name, "");
|
||||
|
||||
warn!("get bucket versioning, resperr: {:?}", resperr);
|
||||
warn!("get bucket versioning, resp: {:?}, resperr: {:?}", resp, resperr);
|
||||
|
||||
Ok(VersioningConfiguration::default())
|
||||
}
|
||||
|
||||
@@ -25,13 +25,10 @@ use crate::client::{
|
||||
transition_api::{CreateBucketConfiguration, LocationConstraint, TransitionClient},
|
||||
};
|
||||
use http::Request;
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::StatusCode;
|
||||
use hyper::body::Body;
|
||||
use hyper::body::Bytes;
|
||||
use hyper::body::Incoming;
|
||||
use rustfs_config::MAX_S3_CLIENT_RESPONSE_SIZE;
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
use s3s::Body;
|
||||
use s3s::S3ErrorCode;
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -89,7 +86,7 @@ impl TransitionClient {
|
||||
Ok(location)
|
||||
}
|
||||
|
||||
fn get_bucket_location_request(&self, bucket_name: &str) -> Result<http::Request<s3s::Body>, std::io::Error> {
|
||||
fn get_bucket_location_request(&self, bucket_name: &str) -> Result<http::Request<Body>, std::io::Error> {
|
||||
let mut url_values = HashMap::new();
|
||||
url_values.insert("location".to_string(), "".to_string());
|
||||
|
||||
@@ -123,11 +120,7 @@ impl TransitionClient {
|
||||
url_str = target_url.to_string();
|
||||
}
|
||||
|
||||
let Ok(mut req) = Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri(url_str)
|
||||
.body(s3s::Body::empty())
|
||||
else {
|
||||
let Ok(mut req) = Request::builder().method(http::Method::GET).uri(url_str).body(Body::empty()) else {
|
||||
return Err(std::io::Error::other("create request error"));
|
||||
};
|
||||
|
||||
@@ -179,16 +172,13 @@ impl TransitionClient {
|
||||
}
|
||||
|
||||
async fn process_bucket_location_response(
|
||||
mut resp: http::Response<Incoming>,
|
||||
mut resp: http::Response<Body>,
|
||||
bucket_name: &str,
|
||||
tier_type: &str,
|
||||
) -> Result<String, std::io::Error> {
|
||||
//if resp != nil {
|
||||
if resp.status() != StatusCode::OK {
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
|
||||
let err_resp = http_resp_to_error_response(resp_status, &h, vec![], bucket_name, "");
|
||||
let err_resp = http_resp_to_error_response(&resp, vec![], bucket_name, "");
|
||||
match err_resp.code {
|
||||
S3ErrorCode::NotImplemented => {
|
||||
match err_resp.server.as_str() {
|
||||
@@ -218,22 +208,18 @@ async fn process_bucket_location_response(
|
||||
}
|
||||
//}
|
||||
|
||||
let mut body_vec = Vec::new();
|
||||
let mut body = resp.into_body();
|
||||
while let Some(frame) = body.frame().await {
|
||||
let frame = frame.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
if let Some(data) = frame.data_ref() {
|
||||
body_vec.extend_from_slice(data);
|
||||
}
|
||||
}
|
||||
let b = resp
|
||||
.body_mut()
|
||||
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
|
||||
.await
|
||||
.unwrap()
|
||||
.to_vec();
|
||||
let mut location = "".to_string();
|
||||
if tier_type == "huaweicloud" {
|
||||
let d = quick_xml::de::from_str::<CreateBucketConfiguration>(&String::from_utf8(body_vec).unwrap()).unwrap();
|
||||
let d = quick_xml::de::from_str::<CreateBucketConfiguration>(&String::from_utf8(b).unwrap()).unwrap();
|
||||
location = d.location_constraint;
|
||||
} else {
|
||||
if let Ok(LocationConstraint { field }) =
|
||||
quick_xml::de::from_str::<LocationConstraint>(&String::from_utf8(body_vec).unwrap())
|
||||
{
|
||||
if let Ok(LocationConstraint { field }) = quick_xml::de::from_str::<LocationConstraint>(&String::from_utf8(b).unwrap()) {
|
||||
location = field;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,51 +118,38 @@ pub fn new_getobjectreader<'a>(
|
||||
let mut is_encrypted = false;
|
||||
let is_compressed = false; //oi.is_compressed_ok();
|
||||
|
||||
let rs_;
|
||||
let mut rs_ = None;
|
||||
if rs.is_none() && opts.part_number.is_some() && opts.part_number.unwrap() > 0 {
|
||||
rs_ = part_number_to_rangespec(oi.clone(), opts.part_number.unwrap());
|
||||
} else {
|
||||
rs_ = rs.clone();
|
||||
}
|
||||
|
||||
let mut get_fn: ObjReaderFn;
|
||||
|
||||
if let Some(rs_) = rs_ {
|
||||
let (off, length) = match rs_.get_offset_length(oi.size) {
|
||||
Ok(x) => x,
|
||||
Err(err) => {
|
||||
return Err(ErrorResponse {
|
||||
code: S3ErrorCode::InvalidRange,
|
||||
message: err.to_string(),
|
||||
key: None,
|
||||
bucket_name: None,
|
||||
region: None,
|
||||
request_id: None,
|
||||
host_id: "".to_string(),
|
||||
});
|
||||
}
|
||||
let (off, length) = match rs_.unwrap().get_offset_length(oi.size) {
|
||||
Ok(x) => x,
|
||||
Err(err) => {
|
||||
return Err(ErrorResponse {
|
||||
code: S3ErrorCode::InvalidRange,
|
||||
message: err.to_string(),
|
||||
key: None,
|
||||
bucket_name: None,
|
||||
region: None,
|
||||
request_id: None,
|
||||
host_id: "".to_string(),
|
||||
});
|
||||
}
|
||||
};
|
||||
get_fn = Arc::new(move |input_reader: BufReader<Cursor<Vec<u8>>>, _: HeaderMap| {
|
||||
//Box::pin({
|
||||
let r = GetObjectReader {
|
||||
object_info: oi.clone(),
|
||||
stream: Box::new(input_reader),
|
||||
};
|
||||
get_fn = Arc::new(move |input_reader: BufReader<Cursor<Vec<u8>>>, _: HeaderMap| {
|
||||
//Box::pin({
|
||||
let r = GetObjectReader {
|
||||
object_info: oi.clone(),
|
||||
stream: Box::new(input_reader),
|
||||
};
|
||||
r
|
||||
//})
|
||||
});
|
||||
r
|
||||
//})
|
||||
});
|
||||
|
||||
return Ok((get_fn, off as i64, length as i64));
|
||||
}
|
||||
Err(ErrorResponse {
|
||||
code: S3ErrorCode::InvalidRange,
|
||||
message: "Invalid range".to_string(),
|
||||
key: Some(oi.name.clone()),
|
||||
bucket_name: Some(oi.bucket.clone()),
|
||||
region: Some("".to_string()),
|
||||
request_id: None,
|
||||
host_id: "".to_string(),
|
||||
})
|
||||
Ok((get_fn, off as i64, length as i64))
|
||||
}
|
||||
|
||||
/// Convert a raw stored ETag into the strongly-typed `s3s::dto::ETag`.
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
|
||||
use crate::client::bucket_cache::BucketLocationCache;
|
||||
use crate::client::{
|
||||
api_error_response::ErrorResponse,
|
||||
api_error_response::{err_invalid_argument, http_resp_to_error_response, to_error_response},
|
||||
api_get_options::GetObjectOptions,
|
||||
api_put_object::PutObjectOptions,
|
||||
@@ -33,17 +32,13 @@ use crate::client::{
|
||||
credentials::{CredContext, Credentials, SignatureType, Static},
|
||||
};
|
||||
use crate::{client::checksum::ChecksumMode, store_api::GetObjectReader};
|
||||
//use bytes::Bytes;
|
||||
use bytes::Bytes;
|
||||
use futures::{Future, StreamExt};
|
||||
use http::{HeaderMap, HeaderName};
|
||||
use http::{
|
||||
HeaderValue, Response, StatusCode,
|
||||
request::{Builder, Request},
|
||||
};
|
||||
use http_body::Body;
|
||||
use http_body_util::BodyExt;
|
||||
use hyper::body::Bytes;
|
||||
use hyper::body::Incoming;
|
||||
use hyper_rustls::{ConfigBuilderExt, HttpsConnector};
|
||||
use hyper_util::{client::legacy::Client, client::legacy::connect::HttpConnector, rt::TokioExecutor};
|
||||
use md5::Digest;
|
||||
@@ -59,8 +54,8 @@ use rustfs_utils::{
|
||||
},
|
||||
};
|
||||
use s3s::S3ErrorCode;
|
||||
use s3s::dto::Owner;
|
||||
use s3s::dto::ReplicationStatus;
|
||||
use s3s::{Body, dto::Owner};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::Sha256;
|
||||
use std::io::Cursor;
|
||||
@@ -100,7 +95,7 @@ pub struct TransitionClient {
|
||||
pub creds_provider: Arc<Mutex<Credentials<Static>>>,
|
||||
pub override_signer_type: SignatureType,
|
||||
pub secure: bool,
|
||||
pub http_client: Client<HttpsConnector<HttpConnector>, s3s::Body>,
|
||||
pub http_client: Client<HttpsConnector<HttpConnector>, Body>,
|
||||
pub bucket_loc_cache: Arc<Mutex<BucketLocationCache>>,
|
||||
pub is_trace_enabled: Arc<Mutex<bool>>,
|
||||
pub trace_errors_only: Arc<Mutex<bool>>,
|
||||
@@ -137,6 +132,25 @@ pub enum BucketLookupType {
|
||||
BucketLookupPath,
|
||||
}
|
||||
|
||||
fn load_root_store_from_tls_path() -> Option<rustls::RootCertStore> {
|
||||
// Load the root certificate bundle from the path specified by the
|
||||
// RUSTFS_TLS_PATH environment variable.
|
||||
let tp = std::env::var("RUSTFS_TLS_PATH").ok()?;
|
||||
let ca = std::path::Path::new(&tp).join(rustfs_config::RUSTFS_CA_CERT);
|
||||
if !ca.exists() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let der_list = rustfs_utils::load_cert_bundle_der_bytes(ca.to_str().unwrap_or_default()).ok()?;
|
||||
let mut store = rustls::RootCertStore::empty();
|
||||
for der in der_list {
|
||||
if let Err(e) = store.add(der.into()) {
|
||||
warn!("Warning: failed to add certificate from '{}' to root store: {e}", ca.display());
|
||||
}
|
||||
}
|
||||
Some(store)
|
||||
}
|
||||
|
||||
impl TransitionClient {
|
||||
pub async fn new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
|
||||
let clnt = Self::private_new(endpoint, opts, tier_type).await?;
|
||||
@@ -147,18 +161,22 @@ impl TransitionClient {
|
||||
async fn private_new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
|
||||
let endpoint_url = get_endpoint_url(endpoint, opts.secure)?;
|
||||
|
||||
//#[cfg(feature = "ring")]
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
//#[cfg(feature = "aws-lc-rs")]
|
||||
// let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
|
||||
|
||||
let scheme = endpoint_url.scheme();
|
||||
let client;
|
||||
let tls = rustls::ClientConfig::builder().with_native_roots()?.with_no_client_auth();
|
||||
let tls = if let Some(store) = load_root_store_from_tls_path() {
|
||||
rustls::ClientConfig::builder()
|
||||
.with_root_certificates(store)
|
||||
.with_no_client_auth()
|
||||
} else {
|
||||
rustls::ClientConfig::builder().with_native_roots()?.with_no_client_auth()
|
||||
};
|
||||
|
||||
let https = hyper_rustls::HttpsConnectorBuilder::new()
|
||||
.with_tls_config(tls)
|
||||
.https_or_http()
|
||||
.enable_http1()
|
||||
.enable_http2()
|
||||
.build();
|
||||
client = Client::builder(TokioExecutor::new()).build(https);
|
||||
|
||||
@@ -253,7 +271,7 @@ impl TransitionClient {
|
||||
todo!();
|
||||
}
|
||||
|
||||
fn dump_http(&self, req: &http::Request<s3s::Body>, resp: &http::Response<Incoming>) -> Result<(), std::io::Error> {
|
||||
fn dump_http(&self, req: &http::Request<Body>, resp: &http::Response<Body>) -> Result<(), std::io::Error> {
|
||||
let mut resp_trace: Vec<u8>;
|
||||
|
||||
//info!("{}{}", self.trace_output, "---------BEGIN-HTTP---------");
|
||||
@@ -262,7 +280,7 @@ impl TransitionClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn doit(&self, req: http::Request<s3s::Body>) -> Result<http::Response<Incoming>, std::io::Error> {
|
||||
pub async fn doit(&self, req: http::Request<Body>) -> Result<http::Response<Body>, std::io::Error> {
|
||||
let req_method;
|
||||
let req_uri;
|
||||
let req_headers;
|
||||
@@ -277,7 +295,9 @@ impl TransitionClient {
|
||||
debug!("endpoint_url: {}", self.endpoint_url.as_str().to_string());
|
||||
resp = http_client.request(req);
|
||||
}
|
||||
let resp = resp.await;
|
||||
let resp = resp
|
||||
.await /*.map_err(Into::into)*/
|
||||
.map(|res| res.map(Body::from));
|
||||
debug!("http_client url: {} {}", req_method, req_uri);
|
||||
debug!("http_client headers: {:?}", req_headers);
|
||||
if let Err(err) = resp {
|
||||
@@ -285,7 +305,7 @@ impl TransitionClient {
|
||||
return Err(std::io::Error::other(err));
|
||||
}
|
||||
|
||||
let resp = resp.unwrap();
|
||||
let mut resp = resp.unwrap();
|
||||
debug!("http_resp: {:?}", resp);
|
||||
|
||||
//let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
|
||||
@@ -294,26 +314,23 @@ impl TransitionClient {
|
||||
//if self.is_trace_enabled && !(self.trace_errors_only && resp.status() == StatusCode::OK) {
|
||||
if resp.status() != StatusCode::OK {
|
||||
//self.dump_http(&cloned_req, &resp)?;
|
||||
let mut body_vec = Vec::new();
|
||||
let mut body = resp.into_body();
|
||||
while let Some(frame) = body.frame().await {
|
||||
let frame = frame.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
if let Some(data) = frame.data_ref() {
|
||||
body_vec.extend_from_slice(data);
|
||||
}
|
||||
}
|
||||
warn!("err_body: {}", String::from_utf8(body_vec).unwrap());
|
||||
Err(std::io::Error::other("http_client call error."))
|
||||
} else {
|
||||
Ok(resp)
|
||||
let b = resp
|
||||
.body_mut()
|
||||
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
|
||||
.await
|
||||
.unwrap()
|
||||
.to_vec();
|
||||
warn!("err_body: {}", String::from_utf8(b).unwrap());
|
||||
}
|
||||
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
pub async fn execute_method(
|
||||
&self,
|
||||
method: http::Method,
|
||||
metadata: &mut RequestMetadata,
|
||||
) -> Result<http::Response<Incoming>, std::io::Error> {
|
||||
) -> Result<http::Response<Body>, std::io::Error> {
|
||||
if self.is_offline() {
|
||||
let mut s = self.endpoint_url.to_string();
|
||||
s.push_str(" is offline.");
|
||||
@@ -323,7 +340,7 @@ impl TransitionClient {
|
||||
let retryable: bool;
|
||||
//let mut body_seeker: BufferReader;
|
||||
let mut req_retry = self.max_retries;
|
||||
let mut resp: http::Response<Incoming>;
|
||||
let mut resp: http::Response<Body>;
|
||||
|
||||
//if metadata.content_body != nil {
|
||||
//body_seeker = BufferReader::new(metadata.content_body.read_all().await?);
|
||||
@@ -345,19 +362,13 @@ impl TransitionClient {
|
||||
}
|
||||
}
|
||||
|
||||
let resp_status = resp.status();
|
||||
let h = resp.headers().clone();
|
||||
|
||||
let mut body_vec = Vec::new();
|
||||
let mut body = resp.into_body();
|
||||
while let Some(frame) = body.frame().await {
|
||||
let frame = frame.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
if let Some(data) = frame.data_ref() {
|
||||
body_vec.extend_from_slice(data);
|
||||
}
|
||||
}
|
||||
let mut err_response =
|
||||
http_resp_to_error_response(resp_status, &h, body_vec.clone(), &metadata.bucket_name, &metadata.object_name);
|
||||
let b = resp
|
||||
.body_mut()
|
||||
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
|
||||
.await
|
||||
.unwrap()
|
||||
.to_vec();
|
||||
let mut err_response = http_resp_to_error_response(&resp, b.clone(), &metadata.bucket_name, &metadata.object_name);
|
||||
err_response.message = format!("remote tier error: {}", err_response.message);
|
||||
|
||||
if self.region == "" {
|
||||
@@ -393,7 +404,7 @@ impl TransitionClient {
|
||||
continue;
|
||||
}
|
||||
|
||||
if is_http_status_retryable(&resp_status) {
|
||||
if is_http_status_retryable(&resp.status()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -407,7 +418,7 @@ impl TransitionClient {
|
||||
&self,
|
||||
method: &http::Method,
|
||||
metadata: &mut RequestMetadata,
|
||||
) -> Result<http::Request<s3s::Body>, std::io::Error> {
|
||||
) -> Result<http::Request<Body>, std::io::Error> {
|
||||
let mut location = metadata.bucket_location.clone();
|
||||
if location == "" && metadata.bucket_name != "" {
|
||||
location = self.get_bucket_location(&metadata.bucket_name).await?;
|
||||
@@ -427,7 +438,7 @@ impl TransitionClient {
|
||||
let Ok(mut req) = Request::builder()
|
||||
.method(method)
|
||||
.uri(target_url.to_string())
|
||||
.body(s3s::Body::empty())
|
||||
.body(Body::empty())
|
||||
else {
|
||||
return Err(std::io::Error::other("create request error"));
|
||||
};
|
||||
@@ -539,10 +550,10 @@ impl TransitionClient {
|
||||
if metadata.content_length > 0 {
|
||||
match &mut metadata.content_body {
|
||||
ReaderImpl::Body(content_body) => {
|
||||
*req.body_mut() = s3s::Body::from(content_body.clone());
|
||||
*req.body_mut() = Body::from(content_body.clone());
|
||||
}
|
||||
ReaderImpl::ObjectBody(content_body) => {
|
||||
*req.body_mut() = s3s::Body::from(content_body.read_all().await?);
|
||||
*req.body_mut() = Body::from(content_body.read_all().await?);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -550,7 +561,7 @@ impl TransitionClient {
|
||||
Ok(req)
|
||||
}
|
||||
|
||||
pub fn set_user_agent(&self, req: &mut Request<s3s::Body>) {
|
||||
pub fn set_user_agent(&self, req: &mut Request<Body>) {
|
||||
let headers = req.headers_mut();
|
||||
headers.insert("User-Agent", C_USER_AGENT.parse().expect("err"));
|
||||
}
|
||||
@@ -988,217 +999,25 @@ impl Default for UploadInfo {
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert HTTP headers to ObjectInfo struct
|
||||
/// This function parses various S3 response headers to construct an ObjectInfo struct
|
||||
/// containing metadata about an S3 object.
|
||||
pub fn to_object_info(bucket_name: &str, object_name: &str, h: &HeaderMap) -> Result<ObjectInfo, std::io::Error> {
|
||||
// Helper function to get header value as string
|
||||
let get_header = |name: &str| -> String { h.get(name).and_then(|val| val.to_str().ok()).unwrap_or("").to_string() };
|
||||
|
||||
// Get and process the ETag
|
||||
let etag = {
|
||||
let etag_raw = get_header("ETag");
|
||||
// Remove surrounding quotes if present (trimming ETag)
|
||||
let trimmed = etag_raw.trim_start_matches('"').trim_end_matches('"');
|
||||
Some(trimmed.to_string())
|
||||
};
|
||||
|
||||
// Parse content length if it exists
|
||||
let size = {
|
||||
let content_length_str = get_header("Content-Length");
|
||||
if !content_length_str.is_empty() {
|
||||
content_length_str
|
||||
.parse::<i64>()
|
||||
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidData, "Content-Length is not an integer"))?
|
||||
} else {
|
||||
-1
|
||||
}
|
||||
};
|
||||
|
||||
// Parse Last-Modified time
|
||||
let mod_time = {
|
||||
let last_modified_str = get_header("Last-Modified");
|
||||
if !last_modified_str.is_empty() {
|
||||
// Parse HTTP date format (RFC 7231)
|
||||
// Using time crate to parse HTTP dates
|
||||
let parsed_time = OffsetDateTime::parse(&last_modified_str, &time::format_description::well_known::Rfc2822)
|
||||
.or_else(|_| OffsetDateTime::parse(&last_modified_str, &time::format_description::well_known::Rfc3339))
|
||||
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidData, "Last-Modified time format is invalid"))?;
|
||||
Some(parsed_time)
|
||||
} else {
|
||||
Some(OffsetDateTime::now_utc())
|
||||
}
|
||||
};
|
||||
|
||||
// Get content type
|
||||
let content_type = {
|
||||
let content_type_raw = get_header("Content-Type");
|
||||
let content_type_trimmed = content_type_raw.trim();
|
||||
if content_type_trimmed.is_empty() {
|
||||
Some("application/octet-stream".to_string())
|
||||
} else {
|
||||
Some(content_type_trimmed.to_string())
|
||||
}
|
||||
};
|
||||
|
||||
// Parse Expires time
|
||||
let expiration = {
|
||||
let expiry_str = get_header("Expires");
|
||||
if !expiry_str.is_empty() {
|
||||
OffsetDateTime::parse(&expiry_str, &time::format_description::well_known::Rfc2822)
|
||||
.or_else(|_| OffsetDateTime::parse(&expiry_str, &time::format_description::well_known::Rfc3339))
|
||||
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidData, "'Expires' is not in supported format"))?
|
||||
} else {
|
||||
OffsetDateTime::now_utc()
|
||||
}
|
||||
};
|
||||
|
||||
// Extract user metadata (headers prefixed with "X-Amz-Meta-")
|
||||
let user_metadata = {
|
||||
let mut meta = HashMap::new();
|
||||
for (name, value) in h.iter() {
|
||||
let header_name = name.as_str().to_lowercase();
|
||||
if header_name.starts_with("x-amz-meta-") {
|
||||
let key = header_name.strip_prefix("x-amz-meta-").unwrap().to_string();
|
||||
if let Ok(value_str) = value.to_str() {
|
||||
meta.insert(key, value_str.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
meta
|
||||
};
|
||||
|
||||
let user_tag = {
|
||||
let user_tag_str = get_header("X-Amz-Tagging");
|
||||
user_tag_str
|
||||
};
|
||||
|
||||
// Extract user tags count
|
||||
let user_tag_count = {
|
||||
let count_str = get_header("x-amz-tagging-count");
|
||||
if !count_str.is_empty() {
|
||||
count_str
|
||||
.parse::<usize>()
|
||||
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidData, "x-amz-tagging-count is not an integer"))?
|
||||
} else {
|
||||
0
|
||||
}
|
||||
};
|
||||
|
||||
// Handle restore info
|
||||
let restore = {
|
||||
let restore_hdr = get_header("x-amz-restore");
|
||||
if !restore_hdr.is_empty() {
|
||||
// Simplified restore header parsing - in real implementation, this would parse the specific format
|
||||
// "ongoing-request=\"true\"" or "ongoing-request=\"false\", expiry-date=\"..."
|
||||
let ongoing_restore = restore_hdr.contains("ongoing-request=\"true\"");
|
||||
RestoreInfo {
|
||||
ongoing_restore,
|
||||
expiry_time: if ongoing_restore {
|
||||
OffsetDateTime::now_utc()
|
||||
} else {
|
||||
// Try to extract expiry date from the header
|
||||
// This is simplified - real parsing would be more complex
|
||||
OffsetDateTime::now_utc()
|
||||
},
|
||||
}
|
||||
} else {
|
||||
RestoreInfo::default()
|
||||
}
|
||||
};
|
||||
|
||||
// Extract version ID
|
||||
let version_id = {
|
||||
let version_id_str = get_header("x-amz-version-id");
|
||||
if !version_id_str.is_empty() {
|
||||
Some(Uuid::parse_str(&version_id_str).unwrap_or_else(|_| Uuid::nil()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
// Check if it's a delete marker
|
||||
let is_delete_marker = get_header("x-amz-delete-marker") == "true";
|
||||
|
||||
// Get replication status
|
||||
let replication_status = {
|
||||
let status_str = get_header("x-amz-replication-status");
|
||||
ReplicationStatus::from_static(match status_str.as_str() {
|
||||
"COMPLETE" => ReplicationStatus::COMPLETE,
|
||||
"PENDING" => ReplicationStatus::PENDING,
|
||||
"FAILED" => ReplicationStatus::FAILED,
|
||||
"REPLICA" => ReplicationStatus::REPLICA,
|
||||
_ => ReplicationStatus::PENDING,
|
||||
})
|
||||
};
|
||||
|
||||
// Extract expiration rule ID and time (simplified)
|
||||
let (expiration_time, expiration_rule_id) = {
|
||||
// In a real implementation, this would parse the x-amz-expiration header
|
||||
// which typically has format: "expiry-date="Fri, 11 Dec 2020 00:00:00 GMT", rule-id="myrule""
|
||||
let exp_header = get_header("x-amz-expiration");
|
||||
if !exp_header.is_empty() {
|
||||
// Simplified parsing - real implementation would be more thorough
|
||||
(OffsetDateTime::now_utc(), exp_header) // Placeholder
|
||||
} else {
|
||||
(OffsetDateTime::now_utc(), "".to_string())
|
||||
}
|
||||
};
|
||||
|
||||
// Extract checksums
|
||||
let checksum_crc32 = get_header("x-amz-checksum-crc32");
|
||||
let checksum_crc32c = get_header("x-amz-checksum-crc32c");
|
||||
let checksum_sha1 = get_header("x-amz-checksum-sha1");
|
||||
let checksum_sha256 = get_header("x-amz-checksum-sha256");
|
||||
let checksum_crc64nvme = get_header("x-amz-checksum-crc64nvme");
|
||||
let checksum_mode = get_header("x-amz-checksum-mode");
|
||||
|
||||
// Build and return the ObjectInfo struct
|
||||
Ok(ObjectInfo {
|
||||
etag,
|
||||
name: object_name.to_string(),
|
||||
mod_time,
|
||||
size,
|
||||
content_type,
|
||||
metadata: h.clone(),
|
||||
user_metadata,
|
||||
user_tags: "".to_string(), // Tags would need separate parsing
|
||||
user_tag_count,
|
||||
owner: Owner::default(),
|
||||
storage_class: get_header("x-amz-storage-class"),
|
||||
is_latest: true, // Would be determined by versioning settings
|
||||
is_delete_marker,
|
||||
version_id,
|
||||
replication_status,
|
||||
replication_ready: false, // Would be computed based on status
|
||||
expiration: expiration_time,
|
||||
expiration_rule_id,
|
||||
num_versions: 1, // Would be determined by versioning
|
||||
restore,
|
||||
checksum_crc32,
|
||||
checksum_crc32c,
|
||||
checksum_sha1,
|
||||
checksum_sha256,
|
||||
checksum_crc64nvme,
|
||||
checksum_mode,
|
||||
})
|
||||
todo!()
|
||||
}
|
||||
|
||||
type BoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;
|
||||
|
||||
//#[derive(Clone)]
|
||||
pub struct SendRequest {
|
||||
inner: hyper::client::conn::http1::SendRequest<s3s::Body>,
|
||||
inner: hyper::client::conn::http1::SendRequest<Body>,
|
||||
}
|
||||
|
||||
impl From<hyper::client::conn::http1::SendRequest<s3s::Body>> for SendRequest {
|
||||
fn from(inner: hyper::client::conn::http1::SendRequest<s3s::Body>) -> Self {
|
||||
impl From<hyper::client::conn::http1::SendRequest<Body>> for SendRequest {
|
||||
fn from(inner: hyper::client::conn::http1::SendRequest<Body>) -> Self {
|
||||
Self { inner }
|
||||
}
|
||||
}
|
||||
|
||||
impl tower::Service<Request<s3s::Body>> for SendRequest {
|
||||
type Response = Response<Incoming>;
|
||||
impl tower::Service<Request<Body>> for SendRequest {
|
||||
type Response = Response<Body>;
|
||||
type Error = std::io::Error;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
@@ -1206,13 +1025,13 @@ impl tower::Service<Request<s3s::Body>> for SendRequest {
|
||||
self.inner.poll_ready(cx).map_err(std::io::Error::other)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Request<s3s::Body>) -> Self::Future {
|
||||
fn call(&mut self, req: Request<Body>) -> Self::Future {
|
||||
//let req = hyper::Request::builder().uri("/").body(http_body_util::Empty::<Bytes>::new()).unwrap();
|
||||
//let req = hyper::Request::builder().uri("/").body(Body::empty()).unwrap();
|
||||
|
||||
let fut = self.inner.send_request(req);
|
||||
|
||||
Box::pin(async move { fut.await.map_err(std::io::Error::other) })
|
||||
Box::pin(async move { fut.await.map_err(std::io::Error::other).map(|res| res.map(Body::from)) })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,8 +15,7 @@
|
||||
use crate::disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskError, DiskInfo, DiskInfoOptions, DiskLocation, Endpoint, Error,
|
||||
FileInfoVersions, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, Result, UpdateMetadataOpts, VolumeInfo,
|
||||
WalkDirOptions,
|
||||
local::{LocalDisk, ScanGuard},
|
||||
WalkDirOptions, local::LocalDisk,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
@@ -260,7 +259,6 @@ impl LocalDiskWrapper {
|
||||
debug!("health check: performing health check");
|
||||
if Self::perform_health_check(disk.clone(), &TEST_BUCKET, &TEST_OBJ, &TEST_DATA, true, CHECK_TIMEOUT_DURATION).await.is_err() && health.swap_ok_to_faulty() {
|
||||
// Health check failed, disk is considered faulty
|
||||
warn!("health check: failed, disk is considered faulty");
|
||||
|
||||
health.increment_waiting(); // Balance the increment from failed operation
|
||||
|
||||
@@ -430,7 +428,7 @@ impl LocalDiskWrapper {
|
||||
{
|
||||
// Check if disk is faulty
|
||||
if self.health.is_faulty() {
|
||||
warn!("local disk {} health is faulty, returning error", self.to_string());
|
||||
warn!("disk {} health is faulty, returning error", self.to_string());
|
||||
return Err(DiskError::FaultyDisk);
|
||||
}
|
||||
|
||||
@@ -477,15 +475,6 @@ impl LocalDiskWrapper {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DiskAPI for LocalDiskWrapper {
|
||||
async fn read_metadata(&self, volume: &str, path: &str) -> Result<Bytes> {
|
||||
self.track_disk_health(|| async { self.disk.read_metadata(volume, path).await }, Duration::ZERO)
|
||||
.await
|
||||
}
|
||||
|
||||
fn start_scan(&self) -> ScanGuard {
|
||||
self.disk.start_scan()
|
||||
}
|
||||
|
||||
fn to_string(&self) -> String {
|
||||
self.disk.to_string()
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ pub struct LocalDisk {
|
||||
pub format_info: RwLock<FormatInfo>,
|
||||
pub endpoint: Endpoint,
|
||||
pub disk_info_cache: Arc<Cache<DiskInfo>>,
|
||||
pub scanning: Arc<AtomicU32>,
|
||||
pub scanning: AtomicU32,
|
||||
pub rotational: bool,
|
||||
pub fstype: String,
|
||||
pub major: u64,
|
||||
@@ -215,7 +215,7 @@ impl LocalDisk {
|
||||
format_path,
|
||||
format_info: RwLock::new(format_info),
|
||||
disk_info_cache: Arc::new(cache),
|
||||
scanning: Arc::new(AtomicU32::new(0)),
|
||||
scanning: AtomicU32::new(0),
|
||||
rotational: Default::default(),
|
||||
fstype: Default::default(),
|
||||
minor: Default::default(),
|
||||
@@ -673,8 +673,6 @@ impl LocalDisk {
|
||||
return Err(DiskError::FileNotFound);
|
||||
}
|
||||
|
||||
debug!("read_raw: file_path: {:?}", file_path.as_ref());
|
||||
|
||||
let meta_path = file_path.as_ref().join(Path::new(STORAGE_FORMAT_FILE));
|
||||
|
||||
let res = {
|
||||
@@ -684,7 +682,6 @@ impl LocalDisk {
|
||||
match self.read_metadata_with_dmtime(meta_path).await {
|
||||
Ok(res) => Ok(res),
|
||||
Err(err) => {
|
||||
warn!("read_raw: error: {:?}", err);
|
||||
if err == Error::FileNotFound
|
||||
&& !skip_access_checks(volume_dir.as_ref().to_string_lossy().to_string().as_str())
|
||||
{
|
||||
@@ -710,6 +707,20 @@ impl LocalDisk {
|
||||
Ok((buf, mtime))
|
||||
}
|
||||
|
||||
async fn read_metadata(&self, file_path: impl AsRef<Path>) -> Result<Vec<u8>> {
|
||||
// Try to use cached file content reading for better performance, with safe fallback
|
||||
let path = file_path.as_ref().to_path_buf();
|
||||
|
||||
// First, try the cache
|
||||
if let Ok(bytes) = get_global_file_cache().get_file_content(path.clone()).await {
|
||||
return Ok(bytes.to_vec());
|
||||
}
|
||||
|
||||
// Fallback to direct read if cache fails
|
||||
let (data, _) = self.read_metadata_with_dmtime(file_path.as_ref()).await?;
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
async fn read_metadata_with_dmtime(&self, file_path: impl AsRef<Path>) -> Result<(Vec<u8>, Option<OffsetDateTime>)> {
|
||||
check_path_length(file_path.as_ref().to_string_lossy().as_ref())?;
|
||||
|
||||
@@ -871,7 +882,7 @@ impl LocalDisk {
|
||||
}
|
||||
|
||||
// write_all_private with check_path_length
|
||||
#[tracing::instrument(level = "debug", skip(self, buf, sync, skip_parent))]
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
pub async fn write_all_private(&self, volume: &str, path: &str, buf: Bytes, sync: bool, skip_parent: &Path) -> Result<()> {
|
||||
let volume_dir = self.get_bucket_path(volume)?;
|
||||
let file_path = volume_dir.join(Path::new(&path));
|
||||
@@ -1063,7 +1074,7 @@ impl LocalDisk {
|
||||
|
||||
if entry.ends_with(STORAGE_FORMAT_FILE) {
|
||||
let metadata = self
|
||||
.read_metadata(bucket, format!("{}/{}", ¤t, &entry).as_str())
|
||||
.read_metadata(self.get_object_path(bucket, format!("{}/{}", ¤t, &entry).as_str())?)
|
||||
.await?;
|
||||
|
||||
let entry = entry.strip_suffix(STORAGE_FORMAT_FILE).unwrap_or_default().to_owned();
|
||||
@@ -1079,7 +1090,7 @@ impl LocalDisk {
|
||||
|
||||
out.write_obj(&MetaCacheEntry {
|
||||
name: name.clone(),
|
||||
metadata: metadata.to_vec(),
|
||||
metadata,
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
@@ -1146,14 +1157,14 @@ impl LocalDisk {
|
||||
|
||||
let fname = format!("{}/{}", &meta.name, STORAGE_FORMAT_FILE);
|
||||
|
||||
match self.read_metadata(&opts.bucket, fname.as_str()).await {
|
||||
match self.read_metadata(self.get_object_path(&opts.bucket, fname.as_str())?).await {
|
||||
Ok(res) => {
|
||||
if is_dir_obj {
|
||||
meta.name = meta.name.trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH).to_owned();
|
||||
meta.name.push_str(SLASH_SEPARATOR);
|
||||
}
|
||||
|
||||
meta.metadata = res.to_vec();
|
||||
meta.metadata = res;
|
||||
|
||||
out.write_obj(&meta).await?;
|
||||
|
||||
@@ -1200,14 +1211,6 @@ impl LocalDisk {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ScanGuard(pub Arc<AtomicU32>);
|
||||
|
||||
impl Drop for ScanGuard {
|
||||
fn drop(&mut self) {
|
||||
self.0.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
fn is_root_path(path: impl AsRef<Path>) -> bool {
|
||||
path.as_ref().components().count() == 1 && path.as_ref().has_root()
|
||||
}
|
||||
@@ -1855,20 +1858,19 @@ impl DiskAPI for LocalDisk {
|
||||
let mut objs_returned = 0;
|
||||
|
||||
if opts.base_dir.ends_with(SLASH_SEPARATOR) {
|
||||
if let Ok(data) = self
|
||||
.read_metadata(
|
||||
&opts.bucket,
|
||||
path_join_buf(&[
|
||||
format!("{}{}", opts.base_dir.trim_end_matches(SLASH_SEPARATOR), GLOBAL_DIR_SUFFIX).as_str(),
|
||||
STORAGE_FORMAT_FILE,
|
||||
])
|
||||
.as_str(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
let fpath = self.get_object_path(
|
||||
&opts.bucket,
|
||||
path_join_buf(&[
|
||||
format!("{}{}", opts.base_dir.trim_end_matches(SLASH_SEPARATOR), GLOBAL_DIR_SUFFIX).as_str(),
|
||||
STORAGE_FORMAT_FILE,
|
||||
])
|
||||
.as_str(),
|
||||
)?;
|
||||
|
||||
if let Ok(data) = self.read_metadata(fpath).await {
|
||||
let meta = MetaCacheEntry {
|
||||
name: opts.base_dir.clone(),
|
||||
metadata: data.to_vec(),
|
||||
metadata: data,
|
||||
..Default::default()
|
||||
};
|
||||
out.write_obj(&meta).await?;
|
||||
@@ -2447,26 +2449,6 @@ impl DiskAPI for LocalDisk {
|
||||
|
||||
Ok(info)
|
||||
}
|
||||
#[tracing::instrument(skip(self))]
|
||||
fn start_scan(&self) -> ScanGuard {
|
||||
self.scanning.fetch_add(1, Ordering::Relaxed);
|
||||
ScanGuard(Arc::clone(&self.scanning))
|
||||
}
|
||||
|
||||
async fn read_metadata(&self, volume: &str, path: &str) -> Result<Bytes> {
|
||||
// Try to use cached file content reading for better performance, with safe fallback
|
||||
let file_path = self.get_object_path(volume, path)?;
|
||||
// let file_path = file_path.join(Path::new(STORAGE_FORMAT_FILE));
|
||||
|
||||
// First, try the cache
|
||||
if let Ok(bytes) = get_global_file_cache().get_file_content(file_path.clone()).await {
|
||||
return Ok(bytes);
|
||||
}
|
||||
|
||||
// Fallback to direct read if cache fails
|
||||
let (data, _) = self.read_metadata_with_dmtime(file_path).await?;
|
||||
Ok(data.into())
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_disk_info(drive_path: PathBuf) -> Result<(rustfs_utils::os::DiskInfo, bool)> {
|
||||
|
||||
@@ -32,7 +32,6 @@ pub const STORAGE_FORMAT_FILE: &str = "xl.meta";
|
||||
pub const STORAGE_FORMAT_FILE_BACKUP: &str = "xl.meta.bkp";
|
||||
|
||||
use crate::disk::disk_store::LocalDiskWrapper;
|
||||
use crate::disk::local::ScanGuard;
|
||||
use crate::rpc::RemoteDisk;
|
||||
use bytes::Bytes;
|
||||
use endpoint::Endpoint;
|
||||
@@ -396,20 +395,6 @@ impl DiskAPI for Disk {
|
||||
Disk::Remote(remote_disk) => remote_disk.disk_info(opts).await,
|
||||
}
|
||||
}
|
||||
|
||||
fn start_scan(&self) -> ScanGuard {
|
||||
match self {
|
||||
Disk::Local(local_disk) => local_disk.start_scan(),
|
||||
Disk::Remote(remote_disk) => remote_disk.start_scan(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn read_metadata(&self, volume: &str, path: &str) -> Result<Bytes> {
|
||||
match self {
|
||||
Disk::Local(local_disk) => local_disk.read_metadata(volume, path).await,
|
||||
Disk::Remote(remote_disk) => remote_disk.read_metadata(volume, path).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn new_disk(ep: &Endpoint, opt: &DiskOption) -> Result<DiskStore> {
|
||||
@@ -473,7 +458,6 @@ pub trait DiskAPI: Debug + Send + Sync + 'static {
|
||||
opts: &ReadOptions,
|
||||
) -> Result<FileInfo>;
|
||||
async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result<RawFileInfo>;
|
||||
async fn read_metadata(&self, volume: &str, path: &str) -> Result<Bytes>;
|
||||
async fn rename_data(
|
||||
&self,
|
||||
src_volume: &str,
|
||||
@@ -505,7 +489,6 @@ pub trait DiskAPI: Debug + Send + Sync + 'static {
|
||||
async fn write_all(&self, volume: &str, path: &str, data: Bytes) -> Result<()>;
|
||||
async fn read_all(&self, volume: &str, path: &str) -> Result<Bytes>;
|
||||
async fn disk_info(&self, opts: &DiskInfoOptions) -> Result<DiskInfo>;
|
||||
fn start_scan(&self) -> ScanGuard;
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
|
||||
@@ -21,7 +21,6 @@ use crate::{
|
||||
tier::tier::TierConfigMgr,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_policy::auth::Credentials;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{Arc, OnceLock},
|
||||
@@ -61,49 +60,6 @@ lazy_static! {
|
||||
/// Global cancellation token for background services (data scanner and auto heal)
|
||||
static GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
|
||||
/// Global active credentials
|
||||
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
|
||||
|
||||
/// Initialize the global action credentials
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `ak` - Optional access key
|
||||
/// * `sk` - Optional secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * None
|
||||
///
|
||||
pub fn init_global_action_credentials(ak: Option<String>, sk: Option<String>) {
|
||||
let ak = {
|
||||
if let Some(k) = ak {
|
||||
k
|
||||
} else {
|
||||
rustfs_utils::string::gen_access_key(20).unwrap_or_default()
|
||||
}
|
||||
};
|
||||
|
||||
let sk = {
|
||||
if let Some(k) = sk {
|
||||
k
|
||||
} else {
|
||||
rustfs_utils::string::gen_secret_key(32).unwrap_or_default()
|
||||
}
|
||||
};
|
||||
|
||||
GLOBAL_ACTIVE_CRED
|
||||
.set(Credentials {
|
||||
access_key: ak,
|
||||
secret_key: sk,
|
||||
..Default::default()
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Get the global action credentials
|
||||
pub fn get_global_action_cred() -> Option<Credentials> {
|
||||
GLOBAL_ACTIVE_CRED.get().cloned()
|
||||
}
|
||||
|
||||
/// Get the global rustfs port
|
||||
///
|
||||
/// # Returns
|
||||
|
||||
@@ -440,11 +440,11 @@ impl PoolMeta {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn path2_bucket_object(name: &str) -> (String, String) {
|
||||
fn path2_bucket_object(name: &str) -> (String, String) {
|
||||
path2_bucket_object_with_base_path("", name)
|
||||
}
|
||||
|
||||
pub fn path2_bucket_object_with_base_path(base_path: &str, path: &str) -> (String, String) {
|
||||
fn path2_bucket_object_with_base_path(base_path: &str, path: &str) -> (String, String) {
|
||||
// Trim the base path and leading slash
|
||||
let trimmed_path = path
|
||||
.strip_prefix(base_path)
|
||||
@@ -452,11 +452,7 @@ pub fn path2_bucket_object_with_base_path(base_path: &str, path: &str) -> (Strin
|
||||
.strip_prefix(SLASH_SEPARATOR)
|
||||
.unwrap_or(path);
|
||||
// Find the position of the first '/'
|
||||
#[cfg(windows)]
|
||||
let trimmed_path = trimmed_path.replace('\\', "/");
|
||||
let Some(pos) = trimmed_path.find(SLASH_SEPARATOR) else {
|
||||
return (trimmed_path.to_string(), "".to_string());
|
||||
};
|
||||
let pos = trimmed_path.find(SLASH_SEPARATOR).unwrap_or(trimmed_path.len());
|
||||
// Split into bucket and prefix
|
||||
let bucket = &trimmed_path[0..pos];
|
||||
let prefix = &trimmed_path[pos + 1..]; // +1 to skip the '/' character if it exists
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::global::get_global_action_cred;
|
||||
use base64::Engine as _;
|
||||
use base64::engine::general_purpose;
|
||||
use hmac::{Hmac, KeyInit, Mac};
|
||||
@@ -20,6 +19,7 @@ use http::HeaderMap;
|
||||
use http::HeaderValue;
|
||||
use http::Method;
|
||||
use http::Uri;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use sha2::Sha256;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::error;
|
||||
|
||||
@@ -12,6 +12,29 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{Arc, atomic::Ordering},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::lock::Mutex;
|
||||
use http::{HeaderMap, HeaderValue, Method, header::CONTENT_TYPE};
|
||||
use rustfs_protos::{
|
||||
node_service_time_out_client,
|
||||
proto_gen::node_service::{
|
||||
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
|
||||
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest,
|
||||
ReadMultipleRequest, ReadPartsRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest, RenameFileRequest,
|
||||
StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
|
||||
},
|
||||
};
|
||||
use rustfs_utils::string::parse_bool_with_default;
|
||||
use tokio::time;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions,
|
||||
ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions,
|
||||
@@ -19,7 +42,6 @@ use crate::disk::{
|
||||
CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE, get_max_timeout_duration,
|
||||
},
|
||||
endpoint::Endpoint,
|
||||
local::ScanGuard,
|
||||
};
|
||||
use crate::disk::{FileReader, FileWriter};
|
||||
use crate::disk::{disk_store::DiskHealthTracker, error::DiskError};
|
||||
@@ -27,35 +49,11 @@ use crate::{
|
||||
disk::error::{Error, Result},
|
||||
rpc::build_auth_headers,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use futures::lock::Mutex;
|
||||
use http::{HeaderMap, HeaderValue, Method, header::CONTENT_TYPE};
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_protos::proto_gen::node_service::RenamePartRequest;
|
||||
use rustfs_protos::{
|
||||
node_service_time_out_client,
|
||||
proto_gen::node_service::{
|
||||
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
|
||||
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest,
|
||||
ReadMetadataRequest, ReadMultipleRequest, ReadPartsRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest,
|
||||
RenameFileRequest, StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
|
||||
},
|
||||
};
|
||||
use rustfs_rio::{HttpReader, HttpWriter};
|
||||
use rustfs_utils::string::parse_bool_with_default;
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicU32, Ordering},
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::time;
|
||||
use tokio::{io::AsyncWrite, net::TcpStream, time::timeout};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tonic::Request;
|
||||
use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -65,7 +63,6 @@ pub struct RemoteDisk {
|
||||
pub url: url::Url,
|
||||
pub root: PathBuf,
|
||||
endpoint: Endpoint,
|
||||
pub scanning: Arc<AtomicU32>,
|
||||
/// Whether health checking is enabled
|
||||
health_check: bool,
|
||||
/// Health tracker for connection monitoring
|
||||
@@ -94,7 +91,6 @@ impl RemoteDisk {
|
||||
url: ep.url.clone(),
|
||||
root,
|
||||
endpoint: ep.clone(),
|
||||
scanning: Arc::new(AtomicU32::new(0)),
|
||||
health_check: opt.health_check && env_health_check,
|
||||
health: Arc::new(DiskHealthTracker::new()),
|
||||
cancel_token: CancellationToken::new(),
|
||||
@@ -231,7 +227,7 @@ impl RemoteDisk {
|
||||
{
|
||||
// Check if disk is faulty
|
||||
if self.health.is_faulty() {
|
||||
warn!("remote disk {} health is faulty, returning error", self.to_string());
|
||||
warn!("disk {} health is faulty, returning error", self.to_string());
|
||||
return Err(DiskError::FaultyDisk);
|
||||
}
|
||||
|
||||
@@ -730,25 +726,6 @@ impl DiskAPI for RemoteDisk {
|
||||
.await
|
||||
}
|
||||
|
||||
async fn read_metadata(&self, volume: &str, path: &str) -> Result<Bytes> {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadMetadataRequest {
|
||||
volume: volume.to_string(),
|
||||
path: path.to_string(),
|
||||
disk: self.endpoint.to_string(),
|
||||
});
|
||||
|
||||
let response = client.read_metadata(request).await?.into_inner();
|
||||
|
||||
if !response.success {
|
||||
return Err(response.error.unwrap_or_default().into());
|
||||
}
|
||||
|
||||
Ok(response.data)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()> {
|
||||
info!("update_metadata");
|
||||
@@ -1354,12 +1331,6 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
Ok(disk_info)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
fn start_scan(&self) -> ScanGuard {
|
||||
self.scanning.fetch_add(1, Ordering::Relaxed);
|
||||
ScanGuard(Arc::clone(&self.scanning))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -73,7 +73,6 @@ use rustfs_filemeta::{
|
||||
FileInfo, FileMeta, FileMetaShallowVersion, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, ObjectPartInfo,
|
||||
RawFileInfo, ReplicationStatusType, VersionPurgeStatusType, file_info_from_raw, merge_file_meta_versions,
|
||||
};
|
||||
use rustfs_lock::FastLockGuard;
|
||||
use rustfs_lock::fast_lock::types::LockResult;
|
||||
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use rustfs_rio::{EtagResolvable, HashReader, HashReaderMut, TryGetIndex as _, WarpReader};
|
||||
@@ -1487,8 +1486,20 @@ impl SetDisks {
|
||||
let object = object.clone();
|
||||
let version_id = version_id.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Some(disk) = disk {
|
||||
disk.read_version(&org_bucket, &bucket, &object, &version_id, &opts).await
|
||||
if let Some(disk) = disk
|
||||
&& disk.is_online().await
|
||||
{
|
||||
if version_id.is_empty() {
|
||||
match disk.read_xl(&bucket, &object, read_data).await {
|
||||
Ok(info) => {
|
||||
let fi = file_info_from_raw(info, &bucket, &object, read_data).await?;
|
||||
Ok(fi)
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
} else {
|
||||
disk.read_version(&org_bucket, &bucket, &object, &version_id, &opts).await
|
||||
}
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
}
|
||||
@@ -2682,7 +2693,7 @@ impl SetDisks {
|
||||
let (mut parts_metadata, errs) = {
|
||||
let mut retry_count = 0;
|
||||
loop {
|
||||
let (parts, errs) = Self::read_all_fileinfo(&disks, "", bucket, object, version_id, false, false).await?;
|
||||
let (parts, errs) = Self::read_all_fileinfo(&disks, "", bucket, object, version_id, true, true).await?;
|
||||
|
||||
// Check if we have enough valid metadata to proceed
|
||||
// If we have too many errors, and we haven't exhausted retries, try again
|
||||
@@ -2709,14 +2720,7 @@ impl SetDisks {
|
||||
retry_count += 1;
|
||||
}
|
||||
};
|
||||
info!(
|
||||
parts_count = parts_metadata.len(),
|
||||
bucket = bucket,
|
||||
object = object,
|
||||
version_id = version_id,
|
||||
?errs,
|
||||
"File info read complete"
|
||||
);
|
||||
info!(parts_count = parts_metadata.len(), ?errs, "File info read complete");
|
||||
if DiskError::is_all_not_found(&errs) {
|
||||
warn!(
|
||||
"heal_object failed, all obj part not found, bucket: {}, obj: {}, version_id: {}",
|
||||
@@ -4076,14 +4080,6 @@ impl ObjectIO for SetDisks {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageAPI for SetDisks {
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn new_ns_lock(&self, bucket: &str, object: &str) -> Result<FastLockGuard> {
|
||||
self.fast_lock_manager
|
||||
.acquire_write_lock(bucket, object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|e| Error::other(self.format_lock_error(bucket, object, "write", &e)))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn backend_info(&self) -> rustfs_madmin::BackendInfo {
|
||||
unimplemented!()
|
||||
@@ -4852,18 +4848,10 @@ impl StorageAPI for SetDisks {
|
||||
// Normalize ETags by removing quotes before comparison (PR #592 compatibility)
|
||||
let transition_etag = rustfs_utils::path::trim_etag(&opts.transition.etag);
|
||||
let stored_etag = rustfs_utils::path::trim_etag(&get_raw_etag(&fi.metadata));
|
||||
if let Some(mod_time1) = opts.mod_time {
|
||||
if let Some(mod_time2) = fi.mod_time.as_ref() {
|
||||
if mod_time1.unix_timestamp() != mod_time2.unix_timestamp()
|
||||
/*|| transition_etag != stored_etag*/
|
||||
{
|
||||
return Err(to_object_err(Error::other(DiskError::FileNotFound), vec![bucket, object]));
|
||||
}
|
||||
} else {
|
||||
return Err(Error::other("mod_time 2 error.".to_string()));
|
||||
}
|
||||
} else {
|
||||
return Err(Error::other("mod_time 1 error.".to_string()));
|
||||
if opts.mod_time.expect("err").unix_timestamp() != fi.mod_time.as_ref().expect("err").unix_timestamp()
|
||||
|| transition_etag != stored_etag
|
||||
{
|
||||
return Err(to_object_err(Error::other(DiskError::FileNotFound), vec![bucket, object]));
|
||||
}
|
||||
if fi.transition_status == TRANSITION_COMPLETE {
|
||||
return Ok(());
|
||||
@@ -4993,10 +4981,8 @@ impl StorageAPI for SetDisks {
|
||||
oi = ObjectInfo::from_file_info(&actual_fi, bucket, object, opts.versioned || opts.version_suspended);
|
||||
let ropts = put_restore_opts(bucket, object, &opts.transition.restore_request, &oi).await?;
|
||||
if oi.parts.len() == 1 {
|
||||
let mut opts = opts.clone();
|
||||
opts.part_number = Some(1);
|
||||
let rs: Option<HTTPRangeSpec> = None;
|
||||
let gr = get_transitioned_object_reader(bucket, object, &rs, &HeaderMap::new(), &oi, &opts).await;
|
||||
let gr = get_transitioned_object_reader(bucket, object, &rs, &HeaderMap::new(), &oi, opts).await;
|
||||
if let Err(err) = gr {
|
||||
return set_restore_header_fn(&mut oi, Some(to_object_err(err.into(), vec![bucket, object]))).await;
|
||||
}
|
||||
@@ -6548,10 +6534,6 @@ async fn disks_with_all_parts(
|
||||
let corrupted = !meta.mod_time.eq(&latest_meta.mod_time) || !meta.data_dir.eq(&latest_meta.data_dir);
|
||||
|
||||
if corrupted {
|
||||
warn!(
|
||||
"disks_with_all_partsv2: metadata is corrupted, object_name={}, index: {index}",
|
||||
object_name
|
||||
);
|
||||
meta_errs[index] = Some(DiskError::FileCorrupt);
|
||||
parts_metadata[index] = FileInfo::default();
|
||||
continue;
|
||||
@@ -6559,10 +6541,6 @@ async fn disks_with_all_parts(
|
||||
|
||||
if erasure_distribution_reliable {
|
||||
if !meta.is_valid() {
|
||||
warn!(
|
||||
"disks_with_all_partsv2: metadata is not valid, object_name={}, index: {index}",
|
||||
object_name
|
||||
);
|
||||
parts_metadata[index] = FileInfo::default();
|
||||
meta_errs[index] = Some(DiskError::FileCorrupt);
|
||||
continue;
|
||||
@@ -6573,10 +6551,6 @@ async fn disks_with_all_parts(
|
||||
// Erasure distribution is not the same as onlineDisks
|
||||
// attempt a fix if possible, assuming other entries
|
||||
// might have the right erasure distribution.
|
||||
warn!(
|
||||
"disks_with_all_partsv2: erasure distribution is not the same as onlineDisks, object_name={}, index: {index}",
|
||||
object_name
|
||||
);
|
||||
parts_metadata[index] = FileInfo::default();
|
||||
meta_errs[index] = Some(DiskError::FileCorrupt);
|
||||
continue;
|
||||
|
||||
@@ -45,7 +45,6 @@ use rustfs_common::{
|
||||
};
|
||||
use rustfs_filemeta::FileInfo;
|
||||
|
||||
use rustfs_lock::FastLockGuard;
|
||||
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use rustfs_utils::{crc_hash, path::path_join_buf, sip_hash};
|
||||
use tokio::sync::RwLock;
|
||||
@@ -367,10 +366,6 @@ impl ObjectIO for Sets {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageAPI for Sets {
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn new_ns_lock(&self, bucket: &str, object: &str) -> Result<FastLockGuard> {
|
||||
self.disk_set[0].new_ns_lock(bucket, object).await
|
||||
}
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn backend_info(&self) -> rustfs_madmin::BackendInfo {
|
||||
unimplemented!()
|
||||
|
||||
@@ -58,7 +58,6 @@ use rand::Rng as _;
|
||||
use rustfs_common::heal_channel::{HealItemType, HealOpts};
|
||||
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, GLOBAL_RUSTFS_HOST, GLOBAL_RUSTFS_PORT};
|
||||
use rustfs_filemeta::FileInfo;
|
||||
use rustfs_lock::FastLockGuard;
|
||||
use rustfs_madmin::heal_commands::HealResultItem;
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, decode_dir_object, encode_dir_object, path_join_buf};
|
||||
use s3s::dto::{BucketVersioningStatus, ObjectLockConfiguration, ObjectLockEnabled, VersioningConfiguration};
|
||||
@@ -1152,10 +1151,6 @@ lazy_static! {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageAPI for ECStore {
|
||||
#[instrument(skip(self))]
|
||||
async fn new_ns_lock(&self, bucket: &str, object: &str) -> Result<FastLockGuard> {
|
||||
self.pools[0].new_ns_lock(bucket, object).await
|
||||
}
|
||||
#[instrument(skip(self))]
|
||||
async fn backend_info(&self) -> rustfs_madmin::BackendInfo {
|
||||
let (standard_sc_parity, rr_sc_parity) = {
|
||||
|
||||
@@ -30,7 +30,6 @@ use rustfs_filemeta::{
|
||||
FileInfo, MetaCacheEntriesSorted, ObjectPartInfo, REPLICATION_RESET, REPLICATION_STATUS, ReplicateDecision, ReplicationState,
|
||||
ReplicationStatusType, VersionPurgeStatusType, replication_statuses_map, version_purge_statuses_map,
|
||||
};
|
||||
use rustfs_lock::FastLockGuard;
|
||||
use rustfs_madmin::heal_commands::HealResultItem;
|
||||
use rustfs_rio::Checksum;
|
||||
use rustfs_rio::{DecompressReader, HashReader, LimitReader, WarpReader};
|
||||
@@ -1300,7 +1299,6 @@ pub trait ObjectIO: Send + Sync + Debug + 'static {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub trait StorageAPI: ObjectIO + Debug {
|
||||
// NewNSLock TODO:
|
||||
async fn new_ns_lock(&self, bucket: &str, object: &str) -> Result<FastLockGuard>;
|
||||
// Shutdown TODO:
|
||||
// NSScanner TODO:
|
||||
|
||||
|
||||
@@ -15,8 +15,7 @@
|
||||
use crate::config::storageclass::STANDARD;
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use regex::Regex;
|
||||
use rustfs_utils::http::headers::AMZ_OBJECT_TAGGING;
|
||||
use rustfs_utils::http::headers::AMZ_STORAGE_CLASS;
|
||||
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, AMZ_STORAGE_CLASS};
|
||||
use std::collections::HashMap;
|
||||
use std::io::{Error, Result};
|
||||
|
||||
|
||||
@@ -35,12 +35,11 @@ uuid = { workspace = true, features = ["v4", "fast-rng", "serde"] }
|
||||
tokio = { workspace = true, features = ["io-util", "macros", "sync"] }
|
||||
xxhash-rust = { workspace = true, features = ["xxh64"] }
|
||||
bytes.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["hash","http"] }
|
||||
rustfs-utils = { workspace = true, features = ["hash", "http"] }
|
||||
byteorder = { workspace = true }
|
||||
tracing.workspace = true
|
||||
thiserror.workspace = true
|
||||
s3s.workspace = true
|
||||
lazy_static.workspace = true
|
||||
regex.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -942,41 +942,6 @@ impl FileMeta {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_file_info_versions(&self, volume: &str, path: &str, include_free_versions: bool) -> Result<FileInfoVersions> {
|
||||
let mut versions = self.into_file_info_versions(volume, path, true)?;
|
||||
|
||||
let mut n = 0;
|
||||
|
||||
let mut versions_vec = Vec::new();
|
||||
|
||||
for fi in versions.versions.iter() {
|
||||
if fi.tier_free_version() {
|
||||
if !include_free_versions {
|
||||
versions.free_versions.push(fi.clone());
|
||||
}
|
||||
} else {
|
||||
if !include_free_versions {
|
||||
versions_vec.push(fi.clone());
|
||||
}
|
||||
n += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if !include_free_versions {
|
||||
versions.versions = versions_vec;
|
||||
}
|
||||
|
||||
for fi in versions.free_versions.iter_mut() {
|
||||
fi.num_versions = n;
|
||||
}
|
||||
|
||||
Ok(versions)
|
||||
}
|
||||
|
||||
pub fn get_all_file_info_versions(&self, volume: &str, path: &str, all_parts: bool) -> Result<FileInfoVersions> {
|
||||
self.into_file_info_versions(volume, path, all_parts)
|
||||
}
|
||||
|
||||
pub fn into_file_info_versions(&self, volume: &str, path: &str, all_parts: bool) -> Result<FileInfoVersions> {
|
||||
let mut versions = Vec::new();
|
||||
for version in self.versions.iter() {
|
||||
|
||||
@@ -19,6 +19,7 @@ use rustfs_utils::http::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Duration;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
@@ -709,7 +710,7 @@ pub fn parse_replicate_decision(_bucket: &str, s: &str) -> std::io::Result<Repli
|
||||
// }
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ReplicateObjectInfo {
|
||||
pub name: String,
|
||||
pub size: i64,
|
||||
@@ -773,9 +774,7 @@ impl ReplicationWorkerOperation for ReplicateObjectInfo {
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref REPL_STATUS_REGEX: Regex = Regex::new(r"([^=].*?)=([^,].*?);").unwrap();
|
||||
}
|
||||
static REPL_STATUS_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"([^=].*?)=([^,].*?);").unwrap());
|
||||
|
||||
impl ReplicateObjectInfo {
|
||||
/// Returns replication status of a target
|
||||
|
||||
@@ -29,6 +29,7 @@ documentation = "https://docs.rs/rustfs-iam/latest/rustfs_iam/"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustfs-credentials = { workspace = true }
|
||||
tokio.workspace = true
|
||||
time = { workspace = true, features = ["serde-human-readable"] }
|
||||
serde = { workspace = true, features = ["derive", "rc"] }
|
||||
|
||||
@@ -24,15 +24,13 @@ use crate::{
|
||||
},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_credentials::{Credentials, EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, get_global_action_cred};
|
||||
use rustfs_madmin::{AccountStatus, AddOrUpdateUserReq, GroupDesc};
|
||||
use rustfs_policy::{
|
||||
arn::ARN,
|
||||
auth::{self, Credentials, UserIdentity, is_secret_key_valid, jwt_sign},
|
||||
auth::{self, UserIdentity, is_secret_key_valid, jwt_sign},
|
||||
format::Format,
|
||||
policy::{
|
||||
EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, Policy, PolicyDoc, default::DEFAULT_POLICIES, iam_policy_claim_name_sa,
|
||||
},
|
||||
policy::{Policy, PolicyDoc, default::DEFAULT_POLICIES, iam_policy_claim_name_sa},
|
||||
};
|
||||
use rustfs_utils::path::path_join_buf;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -20,6 +20,7 @@ use crate::{
|
||||
manager::{extract_jwt_claims, get_default_policyes},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use rustfs_ecstore::StorageAPI as _;
|
||||
use rustfs_ecstore::store_api::{ObjectInfoOrErr, WalkOptions};
|
||||
use rustfs_ecstore::{
|
||||
@@ -27,7 +28,6 @@ use rustfs_ecstore::{
|
||||
RUSTFS_CONFIG_PREFIX,
|
||||
com::{delete_config, read_config, read_config_with_metadata, save_config},
|
||||
},
|
||||
global::get_global_action_cred,
|
||||
store::ECStore,
|
||||
store_api::{ObjectInfo, ObjectOptions},
|
||||
};
|
||||
@@ -410,9 +410,8 @@ impl Store for ObjectStore {
|
||||
data = match Self::decrypt_data(&data) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
warn!("delete the config file when decrypt failed failed: {}, path: {}", err, path.as_ref());
|
||||
// delete the config file when decrypt failed
|
||||
let _ = self.delete_iam_config(path.as_ref()).await;
|
||||
warn!("config decrypt failed, keeping file: {}, path: {}", err, path.as_ref());
|
||||
// keep the config file when decrypt failed - do not delete
|
||||
return Err(Error::ConfigNotFound);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -24,19 +24,18 @@ use crate::store::MappedPolicy;
|
||||
use crate::store::Store;
|
||||
use crate::store::UserType;
|
||||
use crate::utils::extract_claims;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_credentials::{Credentials, EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, get_global_action_cred};
|
||||
use rustfs_ecstore::notification_sys::get_global_notification_sys;
|
||||
use rustfs_madmin::AddOrUpdateUserReq;
|
||||
use rustfs_madmin::GroupDesc;
|
||||
use rustfs_policy::arn::ARN;
|
||||
use rustfs_policy::auth::Credentials;
|
||||
use rustfs_policy::auth::{
|
||||
ACCOUNT_ON, UserIdentity, contains_reserved_chars, create_new_credentials_with_metadata, generate_credentials,
|
||||
is_access_key_valid, is_secret_key_valid,
|
||||
};
|
||||
use rustfs_policy::policy::Args;
|
||||
use rustfs_policy::policy::opa;
|
||||
use rustfs_policy::policy::{EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, Policy, PolicyDoc, iam_policy_claim_name_sa};
|
||||
use rustfs_policy::policy::{Policy, PolicyDoc, iam_policy_claim_name_sa};
|
||||
use serde_json::Value;
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -29,7 +29,8 @@ documentation = "https://docs.rs/rustfs-policy/latest/rustfs_policy/"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustfs-config = { workspace = true, features = ["constants","opa"] }
|
||||
rustfs-credentials = { workspace = true }
|
||||
rustfs-config = { workspace = true, features = ["constants", "opa"] }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
time = { workspace = true, features = ["serde-human-readable"] }
|
||||
serde = { workspace = true, features = ["derive", "rc"] }
|
||||
@@ -38,7 +39,6 @@ thiserror.workspace = true
|
||||
strum = { workspace = true, features = ["derive"] }
|
||||
rustfs-crypto = { workspace = true }
|
||||
ipnetwork = { workspace = true, features = ["serde"] }
|
||||
rand.workspace = true
|
||||
base64-simd = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
|
||||
@@ -12,13 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::Error as IamError;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::policy::{INHERITED_POLICY_TYPE, Policy, Validator, iam_policy_claim_name_sa};
|
||||
use crate::policy::{Policy, Validator};
|
||||
use crate::utils;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use rustfs_credentials::Credentials;
|
||||
use serde::Serialize;
|
||||
use serde_json::{Value, json};
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryFrom;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::warn;
|
||||
|
||||
@@ -32,178 +33,82 @@ pub const ACCOUNT_OFF: &str = "off";
|
||||
|
||||
const RESERVED_CHARS: &str = "=,";
|
||||
|
||||
// ContainsReservedChars - returns whether the input string contains reserved characters.
|
||||
/// ContainsReservedChars - returns whether the input string contains reserved characters.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `s` - input string to check.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - true if contains reserved characters, false otherwise.
|
||||
///
|
||||
pub fn contains_reserved_chars(s: &str) -> bool {
|
||||
s.contains(RESERVED_CHARS)
|
||||
}
|
||||
|
||||
// IsAccessKeyValid - validate access key for right length.
|
||||
/// IsAccessKeyValid - validate access key for right length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `access_key` - access key to validate.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - true if valid, false otherwise.
|
||||
///
|
||||
pub fn is_access_key_valid(access_key: &str) -> bool {
|
||||
access_key.len() >= ACCESS_KEY_MIN_LEN
|
||||
}
|
||||
|
||||
// IsSecretKeyValid - validate secret key for right length.
|
||||
/// IsSecretKeyValid - validate secret key for right length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `secret_key` - secret key to validate.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - true if valid, false otherwise.
|
||||
///
|
||||
pub fn is_secret_key_valid(secret_key: &str) -> bool {
|
||||
secret_key.len() >= SECRET_KEY_MIN_LEN
|
||||
}
|
||||
|
||||
// #[cfg_attr(test, derive(PartialEq, Eq, Debug))]
|
||||
// struct CredentialHeader {
|
||||
// access_key: String,
|
||||
// scop: CredentialHeaderScope,
|
||||
// }
|
||||
|
||||
// #[cfg_attr(test, derive(PartialEq, Eq, Debug))]
|
||||
// struct CredentialHeaderScope {
|
||||
// date: Date,
|
||||
// region: String,
|
||||
// service: ServiceType,
|
||||
// request: String,
|
||||
// }
|
||||
|
||||
// impl TryFrom<&str> for CredentialHeader {
|
||||
// type Error = Error;
|
||||
// fn try_from(value: &str) -> Result<Self, Self::Error> {
|
||||
// let mut elem = value.trim().splitn(2, '=');
|
||||
// let (Some(h), Some(cred_elems)) = (elem.next(), elem.next()) else {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// };
|
||||
|
||||
// if h != "Credential" {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// }
|
||||
|
||||
// let mut cred_elems = cred_elems.trim().rsplitn(5, '/');
|
||||
|
||||
// let Some(request) = cred_elems.next() else {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// };
|
||||
|
||||
// let Some(service) = cred_elems.next() else {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// };
|
||||
|
||||
// let Some(region) = cred_elems.next() else {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// };
|
||||
|
||||
// let Some(date) = cred_elems.next() else {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// };
|
||||
|
||||
// let Some(ak) = cred_elems.next() else {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// };
|
||||
|
||||
// if ak.len() < 3 {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// }
|
||||
|
||||
// if request != "aws4_request" {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// }
|
||||
|
||||
// Ok(CredentialHeader {
|
||||
// access_key: ak.to_owned(),
|
||||
// scop: CredentialHeaderScope {
|
||||
// date: {
|
||||
// const FORMATTER: LazyCell<Vec<BorrowedFormatItem<'static>>> =
|
||||
// LazyCell::new(|| time::format_description::parse("[year][month][day]").unwrap());
|
||||
|
||||
// Date::parse(date, &FORMATTER).map_err(|_| IamError::ErrCredMalformed))?
|
||||
// },
|
||||
// region: region.to_owned(),
|
||||
// service: service.try_into()?,
|
||||
// request: request.to_owned(),
|
||||
// },
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
|
||||
pub struct Credentials {
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
pub session_token: String,
|
||||
pub expiration: Option<OffsetDateTime>,
|
||||
pub status: String,
|
||||
pub parent_user: String,
|
||||
pub groups: Option<Vec<String>>,
|
||||
pub claims: Option<HashMap<String, Value>>,
|
||||
pub name: Option<String>,
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
impl Credentials {
|
||||
// pub fn new(elem: &str) -> Result<Self> {
|
||||
// let header: CredentialHeader = elem.try_into()?;
|
||||
// Self::check_key_value(header)
|
||||
// }
|
||||
|
||||
// pub fn check_key_value(_header: CredentialHeader) -> Result<Self> {
|
||||
// todo!()
|
||||
// }
|
||||
|
||||
pub fn is_expired(&self) -> bool {
|
||||
if self.expiration.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.expiration
|
||||
.as_ref()
|
||||
.map(|e| time::OffsetDateTime::now_utc() > *e)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
pub fn is_temp(&self) -> bool {
|
||||
!self.session_token.is_empty() && !self.is_expired()
|
||||
}
|
||||
|
||||
pub fn is_service_account(&self) -> bool {
|
||||
const IAM_POLICY_CLAIM_NAME_SA: &str = "sa-policy";
|
||||
self.claims
|
||||
.as_ref()
|
||||
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|_| !self.parent_user.is_empty()))
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn is_implied_policy(&self) -> bool {
|
||||
if self.is_service_account() {
|
||||
return self
|
||||
.claims
|
||||
.as_ref()
|
||||
.map(|x| x.get(&iam_policy_claim_name_sa()).is_some_and(|v| v == INHERITED_POLICY_TYPE))
|
||||
.unwrap_or_default();
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if self.status == "off" {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.access_key.len() >= 3 && self.secret_key.len() >= 8 && !self.is_expired()
|
||||
}
|
||||
|
||||
pub fn is_owner(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// GenerateCredentials - generate a new access key and secret key pair.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok((String, String))` - access key and secret key pair.
|
||||
/// * `Err(Error)` - if an error occurs during generation.
|
||||
///
|
||||
pub fn generate_credentials() -> Result<(String, String)> {
|
||||
let ak = utils::gen_access_key(20)?;
|
||||
let sk = utils::gen_secret_key(40)?;
|
||||
let ak = rustfs_credentials::gen_access_key(20)?;
|
||||
let sk = rustfs_credentials::gen_secret_key(40)?;
|
||||
Ok((ak, sk))
|
||||
}
|
||||
|
||||
/// GetNewCredentialsWithMetadata - generate new credentials with metadata claims and token secret.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `claims` - metadata claims to be included in the token.
|
||||
/// * `token_secret` - secret used to sign the token.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(Credentials)` - newly generated credentials.
|
||||
/// * `Err(Error)` - if an error occurs during generation.
|
||||
///
|
||||
pub fn get_new_credentials_with_metadata(claims: &HashMap<String, Value>, token_secret: &str) -> Result<Credentials> {
|
||||
let (ak, sk) = generate_credentials()?;
|
||||
|
||||
create_new_credentials_with_metadata(&ak, &sk, claims, token_secret)
|
||||
}
|
||||
|
||||
/// CreateNewCredentialsWithMetadata - create new credentials with provided access key, secret key, metadata claims, and token secret.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `ak` - access key.
|
||||
/// * `sk` - secret key.
|
||||
/// * `claims` - metadata claims to be included in the token.
|
||||
/// * `token_secret` - secret used to sign the token.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(Credentials)` - newly created credentials.
|
||||
/// * `Err(Error)` - if an error occurs during creation.
|
||||
///
|
||||
pub fn create_new_credentials_with_metadata(
|
||||
ak: &str,
|
||||
sk: &str,
|
||||
@@ -211,11 +116,11 @@ pub fn create_new_credentials_with_metadata(
|
||||
token_secret: &str,
|
||||
) -> Result<Credentials> {
|
||||
if ak.len() < ACCESS_KEY_MIN_LEN || ak.len() > ACCESS_KEY_MAX_LEN {
|
||||
return Err(IamError::InvalidAccessKeyLength);
|
||||
return Err(Error::InvalidAccessKeyLength);
|
||||
}
|
||||
|
||||
if sk.len() < SECRET_KEY_MIN_LEN || sk.len() > SECRET_KEY_MAX_LEN {
|
||||
return Err(IamError::InvalidAccessKeyLength);
|
||||
return Err(Error::InvalidAccessKeyLength);
|
||||
}
|
||||
|
||||
if token_secret.is_empty() {
|
||||
@@ -253,6 +158,16 @@ pub fn create_new_credentials_with_metadata(
|
||||
})
|
||||
}
|
||||
|
||||
/// JWTSign - sign the provided claims with the given token secret to generate a JWT token.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `claims` - claims to be included in the token.
|
||||
/// * `token_secret` - secret used to sign the token.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(String)` - generated JWT token.
|
||||
/// * `Err(Error)` - if an error occurs during signing.
|
||||
///
|
||||
pub fn jwt_sign<T: Serialize>(claims: &T, token_secret: &str) -> Result<String> {
|
||||
let token = utils::generate_jwt(claims, token_secret)?;
|
||||
Ok(token)
|
||||
@@ -267,16 +182,29 @@ pub struct CredentialsBuilder {
|
||||
description: Option<String>,
|
||||
expiration: Option<OffsetDateTime>,
|
||||
allow_site_replicator_account: bool,
|
||||
claims: Option<serde_json::Value>,
|
||||
claims: Option<Value>,
|
||||
parent_user: String,
|
||||
groups: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
impl CredentialsBuilder {
|
||||
/// Create a new CredentialsBuilder instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `CredentialsBuilder` - a new instance of CredentialsBuilder.
|
||||
///
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Set the session policy for the credentials.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `policy` - an optional Policy to set as the session policy.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Self` - the updated CredentialsBuilder instance.
|
||||
///
|
||||
pub fn session_policy(mut self, policy: Option<Policy>) -> Self {
|
||||
self.session_policy = policy;
|
||||
self
|
||||
@@ -312,7 +240,7 @@ impl CredentialsBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn claims(mut self, claims: serde_json::Value) -> Self {
|
||||
pub fn claims(mut self, claims: Value) -> Self {
|
||||
self.claims = Some(claims);
|
||||
self
|
||||
}
|
||||
@@ -336,7 +264,7 @@ impl TryFrom<CredentialsBuilder> for Credentials {
|
||||
type Error = Error;
|
||||
fn try_from(mut value: CredentialsBuilder) -> std::result::Result<Self, Self::Error> {
|
||||
if value.parent_user.is_empty() {
|
||||
return Err(IamError::InvalidArgument);
|
||||
return Err(Error::InvalidArgument);
|
||||
}
|
||||
|
||||
if (value.access_key.is_empty() && !value.secret_key.is_empty())
|
||||
@@ -346,27 +274,27 @@ impl TryFrom<CredentialsBuilder> for Credentials {
|
||||
}
|
||||
|
||||
if value.parent_user == value.access_key.as_str() {
|
||||
return Err(IamError::InvalidArgument);
|
||||
return Err(Error::InvalidArgument);
|
||||
}
|
||||
|
||||
if value.access_key == "site-replicator-0" && !value.allow_site_replicator_account {
|
||||
return Err(IamError::InvalidArgument);
|
||||
return Err(Error::InvalidArgument);
|
||||
}
|
||||
|
||||
let mut claim = serde_json::json!({
|
||||
let mut claim = json!({
|
||||
"parent": value.parent_user
|
||||
});
|
||||
|
||||
if let Some(p) = value.session_policy {
|
||||
p.is_valid()?;
|
||||
let policy_buf = serde_json::to_vec(&p).map_err(|_| IamError::InvalidArgument)?;
|
||||
let policy_buf = serde_json::to_vec(&p).map_err(|_| Error::InvalidArgument)?;
|
||||
if policy_buf.len() > 4096 {
|
||||
return Err(Error::other("session policy is too large"));
|
||||
}
|
||||
claim["sessionPolicy"] = serde_json::json!(base64_simd::STANDARD.encode_to_string(&policy_buf));
|
||||
claim["sa-policy"] = serde_json::json!("embedded-policy");
|
||||
claim["sessionPolicy"] = json!(base64_simd::STANDARD.encode_to_string(&policy_buf));
|
||||
claim[rustfs_credentials::IAM_POLICY_CLAIM_NAME_SA] = json!(rustfs_credentials::EMBEDDED_POLICY_TYPE);
|
||||
} else {
|
||||
claim["sa-policy"] = serde_json::json!("inherited-policy");
|
||||
claim[rustfs_credentials::IAM_POLICY_CLAIM_NAME_SA] = json!(rustfs_credentials::INHERITED_POLICY_TYPE);
|
||||
}
|
||||
|
||||
if let Some(Value::Object(obj)) = value.claims {
|
||||
@@ -379,11 +307,11 @@ impl TryFrom<CredentialsBuilder> for Credentials {
|
||||
}
|
||||
|
||||
if value.access_key.is_empty() {
|
||||
value.access_key = utils::gen_access_key(20)?;
|
||||
value.access_key = rustfs_credentials::gen_access_key(20)?;
|
||||
}
|
||||
|
||||
if value.secret_key.is_empty() {
|
||||
value.access_key = utils::gen_secret_key(40)?;
|
||||
value.secret_key = rustfs_credentials::gen_secret_key(40)?;
|
||||
}
|
||||
|
||||
claim["accessKey"] = json!(&value.access_key);
|
||||
|
||||
@@ -14,8 +14,9 @@
|
||||
|
||||
mod credentials;
|
||||
|
||||
pub use credentials::Credentials;
|
||||
pub use credentials::*;
|
||||
|
||||
use rustfs_credentials::Credentials;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
@@ -27,6 +28,13 @@ pub struct UserIdentity {
|
||||
}
|
||||
|
||||
impl UserIdentity {
|
||||
/// Create a new UserIdentity
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `credentials` - Credentials object
|
||||
///
|
||||
/// # Returns
|
||||
/// * UserIdentity
|
||||
pub fn new(credentials: Credentials) -> Self {
|
||||
UserIdentity {
|
||||
version: 1,
|
||||
@@ -28,7 +28,6 @@ pub mod variables;
|
||||
|
||||
pub use action::ActionSet;
|
||||
pub use doc::PolicyDoc;
|
||||
|
||||
pub use effect::Effect;
|
||||
pub use function::Functions;
|
||||
pub use id::ID;
|
||||
@@ -37,9 +36,6 @@ pub use principal::Principal;
|
||||
pub use resource::ResourceSet;
|
||||
pub use statement::Statement;
|
||||
|
||||
pub const EMBEDDED_POLICY_TYPE: &str = "embedded-policy";
|
||||
pub const INHERITED_POLICY_TYPE: &str = "inherited-policy";
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
#[cfg_attr(test, derive(Eq, PartialEq))]
|
||||
pub enum Error {
|
||||
|
||||
@@ -258,7 +258,7 @@ pub fn get_policies_from_claims(claims: &HashMap<String, Value>, policy_claim_na
|
||||
}
|
||||
|
||||
pub fn iam_policy_claim_name_sa() -> String {
|
||||
"sa-policy".to_string()
|
||||
rustfs_credentials::IAM_POLICY_CLAIM_NAME_SA.to_string()
|
||||
}
|
||||
|
||||
pub mod default {
|
||||
|
||||
@@ -13,46 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey, Header};
|
||||
use rand::{Rng, RngCore};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use std::io::{Error, Result};
|
||||
|
||||
pub fn gen_access_key(length: usize) -> Result<String> {
|
||||
const ALPHA_NUMERIC_TABLE: [char; 36] = [
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
|
||||
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
|
||||
];
|
||||
|
||||
if length < 3 {
|
||||
return Err(Error::other("access key length is too short"));
|
||||
}
|
||||
|
||||
let mut result = String::with_capacity(length);
|
||||
let mut rng = rand::rng();
|
||||
|
||||
for _ in 0..length {
|
||||
result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn gen_secret_key(length: usize) -> Result<String> {
|
||||
use base64_simd::URL_SAFE_NO_PAD;
|
||||
|
||||
if length < 8 {
|
||||
return Err(Error::other("secret key length is too short"));
|
||||
}
|
||||
let mut rng = rand::rng();
|
||||
|
||||
let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)];
|
||||
rng.fill_bytes(&mut key);
|
||||
|
||||
let encoded = URL_SAFE_NO_PAD.encode_to_string(&key);
|
||||
let key_str = encoded.replace("/", "+");
|
||||
|
||||
Ok(key_str)
|
||||
}
|
||||
|
||||
pub fn generate_jwt<T: Serialize>(claims: &T, secret: &str) -> std::result::Result<String, jsonwebtoken::errors::Error> {
|
||||
let header = Header::new(Algorithm::HS512);
|
||||
@@ -72,26 +33,9 @@ pub fn extract_claims<T: DeserializeOwned + Clone>(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{gen_access_key, gen_secret_key, generate_jwt};
|
||||
use super::generate_jwt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[test]
|
||||
fn test_gen_access_key() {
|
||||
let a = gen_access_key(10).unwrap();
|
||||
let b = gen_access_key(10).unwrap();
|
||||
|
||||
assert_eq!(a.len(), 10);
|
||||
assert_eq!(b.len(), 10);
|
||||
assert_ne!(a, b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gen_secret_key() {
|
||||
let a = gen_secret_key(10).unwrap();
|
||||
let b = gen_secret_key(10).unwrap();
|
||||
assert_ne!(a, b);
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
struct Claims {
|
||||
sub: String,
|
||||
|
||||
@@ -34,6 +34,7 @@ path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
rustfs-common.workspace = true
|
||||
rustfs-credentials = { workspace = true }
|
||||
flatbuffers = { workspace = true }
|
||||
prost = { workspace = true }
|
||||
tonic = { workspace = true, features = ["transport"] }
|
||||
|
||||
@@ -438,24 +438,6 @@ pub struct DeletePathsResponse {
|
||||
pub error: ::core::option::Option<Error>,
|
||||
}
|
||||
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
|
||||
pub struct ReadMetadataRequest {
|
||||
#[prost(string, tag = "1")]
|
||||
pub disk: ::prost::alloc::string::String,
|
||||
#[prost(string, tag = "2")]
|
||||
pub volume: ::prost::alloc::string::String,
|
||||
#[prost(string, tag = "3")]
|
||||
pub path: ::prost::alloc::string::String,
|
||||
}
|
||||
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
|
||||
pub struct ReadMetadataResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(message, optional, tag = "2")]
|
||||
pub error: ::core::option::Option<Error>,
|
||||
#[prost(bytes = "bytes", tag = "3")]
|
||||
pub data: ::prost::bytes::Bytes,
|
||||
}
|
||||
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
|
||||
pub struct UpdateMetadataRequest {
|
||||
#[prost(string, tag = "1")]
|
||||
pub disk: ::prost::alloc::string::String,
|
||||
@@ -1542,21 +1524,6 @@ pub mod node_service_client {
|
||||
.insert(GrpcMethod::new("node_service.NodeService", "UpdateMetadata"));
|
||||
self.inner.unary(req, path, codec).await
|
||||
}
|
||||
pub async fn read_metadata(
|
||||
&mut self,
|
||||
request: impl tonic::IntoRequest<super::ReadMetadataRequest>,
|
||||
) -> std::result::Result<tonic::Response<super::ReadMetadataResponse>, tonic::Status> {
|
||||
self.inner
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?;
|
||||
let codec = tonic_prost::ProstCodec::default();
|
||||
let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadMetadata");
|
||||
let mut req = request.into_request();
|
||||
req.extensions_mut()
|
||||
.insert(GrpcMethod::new("node_service.NodeService", "ReadMetadata"));
|
||||
self.inner.unary(req, path, codec).await
|
||||
}
|
||||
pub async fn write_metadata(
|
||||
&mut self,
|
||||
request: impl tonic::IntoRequest<super::WriteMetadataRequest>,
|
||||
@@ -2436,10 +2403,6 @@ pub mod node_service_server {
|
||||
&self,
|
||||
request: tonic::Request<super::UpdateMetadataRequest>,
|
||||
) -> std::result::Result<tonic::Response<super::UpdateMetadataResponse>, tonic::Status>;
|
||||
async fn read_metadata(
|
||||
&self,
|
||||
request: tonic::Request<super::ReadMetadataRequest>,
|
||||
) -> std::result::Result<tonic::Response<super::ReadMetadataResponse>, tonic::Status>;
|
||||
async fn write_metadata(
|
||||
&self,
|
||||
request: tonic::Request<super::WriteMetadataRequest>,
|
||||
@@ -3444,34 +3407,6 @@ pub mod node_service_server {
|
||||
};
|
||||
Box::pin(fut)
|
||||
}
|
||||
"/node_service.NodeService/ReadMetadata" => {
|
||||
#[allow(non_camel_case_types)]
|
||||
struct ReadMetadataSvc<T: NodeService>(pub Arc<T>);
|
||||
impl<T: NodeService> tonic::server::UnaryService<super::ReadMetadataRequest> for ReadMetadataSvc<T> {
|
||||
type Response = super::ReadMetadataResponse;
|
||||
type Future = BoxFuture<tonic::Response<Self::Response>, tonic::Status>;
|
||||
fn call(&mut self, request: tonic::Request<super::ReadMetadataRequest>) -> Self::Future {
|
||||
let inner = Arc::clone(&self.0);
|
||||
let fut = async move { <T as NodeService>::read_metadata(&inner, request).await };
|
||||
Box::pin(fut)
|
||||
}
|
||||
}
|
||||
let accept_compression_encodings = self.accept_compression_encodings;
|
||||
let send_compression_encodings = self.send_compression_encodings;
|
||||
let max_decoding_message_size = self.max_decoding_message_size;
|
||||
let max_encoding_message_size = self.max_encoding_message_size;
|
||||
let inner = self.inner.clone();
|
||||
let fut = async move {
|
||||
let method = ReadMetadataSvc(inner);
|
||||
let codec = tonic_prost::ProstCodec::default();
|
||||
let mut grpc = tonic::server::Grpc::new(codec)
|
||||
.apply_compression_config(accept_compression_encodings, send_compression_encodings)
|
||||
.apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size);
|
||||
let res = grpc.unary(method, req).await;
|
||||
Ok(res)
|
||||
};
|
||||
Box::pin(fut)
|
||||
}
|
||||
"/node_service.NodeService/WriteMetadata" => {
|
||||
#[allow(non_camel_case_types)]
|
||||
struct WriteMetadataSvc<T: NodeService>(pub Arc<T>);
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
mod generated;
|
||||
|
||||
use proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_common::{GLOBAL_CONN_MAP, GLOBAL_ROOT_CERT, evict_connection};
|
||||
use rustfs_common::{GLOBAL_CONN_MAP, GLOBAL_MTLS_IDENTITY, GLOBAL_ROOT_CERT, evict_connection};
|
||||
use std::{error::Error, time::Duration};
|
||||
use tonic::{
|
||||
Request, Status,
|
||||
@@ -24,7 +24,7 @@ use tonic::{
|
||||
service::interceptor::InterceptedService,
|
||||
transport::{Certificate, Channel, ClientTlsConfig, Endpoint},
|
||||
};
|
||||
use tracing::{debug, warn};
|
||||
use tracing::{debug, error, warn};
|
||||
|
||||
// Type alias for the complex client type
|
||||
pub type NodeServiceClientType = NodeServiceClient<
|
||||
@@ -83,6 +83,11 @@ async fn create_new_channel(addr: &str) -> Result<Channel, Box<dyn Error>> {
|
||||
|
||||
let root_cert = GLOBAL_ROOT_CERT.read().await;
|
||||
if addr.starts_with(RUSTFS_HTTPS_PREFIX) {
|
||||
if root_cert.is_none() {
|
||||
debug!("No custom root certificate configured; using system roots for TLS: {}", addr);
|
||||
// If no custom root cert is configured, try to use system roots.
|
||||
connector = connector.tls_config(ClientTlsConfig::new())?;
|
||||
}
|
||||
if let Some(cert_pem) = root_cert.as_ref() {
|
||||
let ca = Certificate::from_pem(cert_pem);
|
||||
// Derive the hostname from the HTTPS URL for TLS hostname verification.
|
||||
@@ -95,7 +100,13 @@ async fn create_new_channel(addr: &str) -> Result<Channel, Box<dyn Error>> {
|
||||
.next()
|
||||
.unwrap_or("");
|
||||
let tls = if !domain.is_empty() {
|
||||
ClientTlsConfig::new().ca_certificate(ca).domain_name(domain)
|
||||
let mut cfg = ClientTlsConfig::new().ca_certificate(ca).domain_name(domain);
|
||||
let mtls_identity = GLOBAL_MTLS_IDENTITY.read().await;
|
||||
if let Some(id) = mtls_identity.as_ref() {
|
||||
let identity = tonic::transport::Identity::from_pem(id.cert_pem.clone(), id.key_pem.clone());
|
||||
cfg = cfg.identity(identity);
|
||||
}
|
||||
cfg
|
||||
} else {
|
||||
// Fallback: configure TLS without explicit domain if parsing fails.
|
||||
ClientTlsConfig::new().ca_certificate(ca)
|
||||
@@ -103,12 +114,9 @@ async fn create_new_channel(addr: &str) -> Result<Channel, Box<dyn Error>> {
|
||||
connector = connector.tls_config(tls)?;
|
||||
debug!("Configured TLS with custom root certificate for: {}", addr);
|
||||
} else {
|
||||
debug!("Using system root certificates for TLS: {}", addr);
|
||||
}
|
||||
} else {
|
||||
// Custom root certificates are configured but will be ignored for non-HTTPS addresses.
|
||||
if root_cert.is_some() {
|
||||
warn!("Custom root certificates are configured but not used because the address does not use HTTPS: {addr}");
|
||||
return Err(std::io::Error::other(
|
||||
"HTTPS requested but no trusted roots are configured. Provide tls/ca.crt (or enable system roots via RUSTFS_TRUST_SYSTEM_CA=true)."
|
||||
).into());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -150,7 +158,18 @@ pub async fn node_service_time_out_client(
|
||||
>,
|
||||
Box<dyn Error>,
|
||||
> {
|
||||
let token: MetadataValue<_> = "rustfs rpc".parse()?;
|
||||
debug!("Obtaining gRPC client for NodeService at: {}", addr);
|
||||
let token_str = rustfs_credentials::get_grpc_token();
|
||||
let token: MetadataValue<_> = token_str.parse().map_err(|e| {
|
||||
error!(
|
||||
"Failed to parse gRPC auth token into MetadataValue: {:?}; env={} token_len={} token_prefix={}",
|
||||
e,
|
||||
rustfs_credentials::ENV_GRPC_AUTH_TOKEN,
|
||||
token_str.len(),
|
||||
token_str.chars().take(2).collect::<String>(),
|
||||
);
|
||||
e
|
||||
})?;
|
||||
|
||||
// Try to get cached channel
|
||||
let cached_channel = { GLOBAL_CONN_MAP.read().await.get(addr).cloned() };
|
||||
|
||||
@@ -313,18 +313,6 @@ message DeletePathsResponse {
|
||||
optional Error error = 2;
|
||||
}
|
||||
|
||||
message ReadMetadataRequest {
|
||||
string disk = 1;
|
||||
string volume = 2;
|
||||
string path = 3;
|
||||
}
|
||||
|
||||
message ReadMetadataResponse {
|
||||
bool success = 1;
|
||||
optional Error error = 2;
|
||||
bytes data = 3;
|
||||
}
|
||||
|
||||
message UpdateMetadataRequest {
|
||||
string disk = 1;
|
||||
string volume = 2;
|
||||
@@ -798,7 +786,6 @@ service NodeService {
|
||||
rpc StatVolume(StatVolumeRequest) returns (StatVolumeResponse) {};
|
||||
rpc DeletePaths(DeletePathsRequest) returns (DeletePathsResponse) {};
|
||||
rpc UpdateMetadata(UpdateMetadataRequest) returns (UpdateMetadataResponse) {};
|
||||
rpc ReadMetadata(ReadMetadataRequest) returns (ReadMetadataResponse) {};
|
||||
rpc WriteMetadata(WriteMetadataRequest) returns (WriteMetadataResponse) {};
|
||||
rpc ReadVersion(ReadVersionRequest) returns (ReadVersionResponse) {};
|
||||
rpc ReadXL(ReadXLRequest) returns (ReadXLResponse) {};
|
||||
@@ -807,7 +794,6 @@ service NodeService {
|
||||
rpc ReadMultiple(ReadMultipleRequest) returns (ReadMultipleResponse) {};
|
||||
rpc DeleteVolume(DeleteVolumeRequest) returns (DeleteVolumeResponse) {};
|
||||
rpc DiskInfo(DiskInfoRequest) returns (DiskInfoResponse) {};
|
||||
|
||||
|
||||
/* -------------------------------lock service-------------------------- */
|
||||
|
||||
|
||||
@@ -41,7 +41,8 @@ reqwest.workspace = true
|
||||
tokio-util.workspace = true
|
||||
faster-hex.workspace = true
|
||||
futures.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["io", "hash", "compress"] }
|
||||
rustfs-config = { workspace = true, features = ["constants"] }
|
||||
rustfs-utils = { workspace = true, features = ["io", "hash", "compress", "tls"] }
|
||||
serde_json.workspace = true
|
||||
md-5 = { workspace = true }
|
||||
tracing.workspace = true
|
||||
|
||||
@@ -12,11 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{EtagResolvable, HashReaderDetector, HashReaderMut};
|
||||
use bytes::Bytes;
|
||||
use futures::{Stream, TryStreamExt as _};
|
||||
use http::HeaderMap;
|
||||
use pin_project_lite::pin_project;
|
||||
use reqwest::{Client, Method, RequestBuilder};
|
||||
use reqwest::{Certificate, Client, Identity, Method, RequestBuilder};
|
||||
use std::error::Error as _;
|
||||
use std::io::{self, Error};
|
||||
use std::ops::Not as _;
|
||||
@@ -26,21 +27,88 @@ use std::task::{Context, Poll};
|
||||
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::io::StreamReader;
|
||||
use tracing::error;
|
||||
|
||||
use crate::{EtagResolvable, HashReaderDetector, HashReaderMut};
|
||||
/// Get the TLS path from the RUSTFS_TLS_PATH environment variable.
|
||||
/// If the variable is not set, return None.
|
||||
fn tls_path() -> Option<&'static std::path::PathBuf> {
|
||||
static TLS_PATH: LazyLock<Option<std::path::PathBuf>> = LazyLock::new(|| {
|
||||
std::env::var("RUSTFS_TLS_PATH")
|
||||
.ok()
|
||||
.and_then(|s| if s.is_empty() { None } else { Some(s.into()) })
|
||||
});
|
||||
TLS_PATH.as_ref()
|
||||
}
|
||||
|
||||
/// Load CA root certificates from the RUSTFS_TLS_PATH directory.
|
||||
/// The CA certificates should be in PEM format and stored in the file
|
||||
/// specified by the RUSTFS_CA_CERT constant.
|
||||
/// If the file does not exist or cannot be read, return the builder unchanged.
|
||||
fn load_ca_roots_from_tls_path(builder: reqwest::ClientBuilder) -> reqwest::ClientBuilder {
|
||||
let Some(tp) = tls_path() else {
|
||||
return builder;
|
||||
};
|
||||
let ca_path = tp.join(rustfs_config::RUSTFS_CA_CERT);
|
||||
if !ca_path.exists() {
|
||||
return builder;
|
||||
}
|
||||
|
||||
let Ok(certs_der) = rustfs_utils::load_cert_bundle_der_bytes(ca_path.to_str().unwrap_or_default()) else {
|
||||
return builder;
|
||||
};
|
||||
|
||||
let mut b = builder;
|
||||
for der in certs_der {
|
||||
if let Ok(cert) = Certificate::from_der(&der) {
|
||||
b = b.add_root_certificate(cert);
|
||||
}
|
||||
}
|
||||
b
|
||||
}
|
||||
|
||||
/// Load optional mTLS identity from the RUSTFS_TLS_PATH directory.
|
||||
/// The client certificate and private key should be in PEM format and stored in the files
|
||||
/// specified by RUSTFS_CLIENT_CERT_FILENAME and RUSTFS_CLIENT_KEY_FILENAME constants.
|
||||
/// If the files do not exist or cannot be read, return None.
|
||||
fn load_optional_mtls_identity_from_tls_path() -> Option<Identity> {
|
||||
let tp = tls_path()?;
|
||||
let cert = std::fs::read(tp.join(rustfs_config::RUSTFS_CLIENT_CERT_FILENAME)).ok()?;
|
||||
let key = std::fs::read(tp.join(rustfs_config::RUSTFS_CLIENT_KEY_FILENAME)).ok()?;
|
||||
|
||||
let mut pem = Vec::with_capacity(cert.len() + key.len() + 1);
|
||||
pem.extend_from_slice(&cert);
|
||||
if !pem.ends_with(b"\n") {
|
||||
pem.push(b'\n');
|
||||
}
|
||||
pem.extend_from_slice(&key);
|
||||
|
||||
match Identity::from_pem(&pem) {
|
||||
Ok(id) => Some(id),
|
||||
Err(e) => {
|
||||
error!("Failed to load mTLS identity from PEM: {e}");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_http_client() -> Client {
|
||||
// Reuse the HTTP connection pool in the global `reqwest::Client` instance
|
||||
// TODO: interact with load balancing?
|
||||
static CLIENT: LazyLock<Client> = LazyLock::new(|| {
|
||||
Client::builder()
|
||||
let mut builder = Client::builder()
|
||||
.connect_timeout(std::time::Duration::from_secs(5))
|
||||
.tcp_keepalive(std::time::Duration::from_secs(10))
|
||||
.http2_keep_alive_interval(std::time::Duration::from_secs(5))
|
||||
.http2_keep_alive_timeout(std::time::Duration::from_secs(3))
|
||||
.http2_keep_alive_while_idle(true)
|
||||
.build()
|
||||
.expect("Failed to create global HTTP client")
|
||||
.http2_keep_alive_while_idle(true);
|
||||
|
||||
// HTTPS root trust + optional mTLS identity from RUSTFS_TLS_PATH
|
||||
builder = load_ca_roots_from_tls_path(builder);
|
||||
if let Some(id) = load_optional_mtls_identity_from_tls_path() {
|
||||
builder = builder.identity(id);
|
||||
}
|
||||
|
||||
builder.build().expect("Failed to create global HTTP client")
|
||||
});
|
||||
CLIENT.clone()
|
||||
}
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[package]
|
||||
name = "rustfs-scanner"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors = ["RustFS Team"]
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "RustFS Scanner provides scanning capabilities for data integrity checks, health monitoring, and storage analysis."
|
||||
keywords = ["RustFS", "scanner", "health-monitoring", "data-integrity", "storage-analysis", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "filesystem"]
|
||||
documentation = "https://docs.rs/rustfs-scanner/latest/rustfs_scanner/"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustfs-config = { workspace = true }
|
||||
rustfs-common = { workspace = true }
|
||||
rustfs-utils = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tracing = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
uuid = { workspace = true, features = ["v4", "serde"] }
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
time = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
path-clean = { workspace = true }
|
||||
rmp-serde = { workspace = true }
|
||||
rustfs-filemeta = { workspace = true }
|
||||
rustfs-madmin = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
rustfs-ecstore = { workspace = true }
|
||||
rustfs-ahm = { workspace = true }
|
||||
http = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
s3s = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
serial_test = { workspace = true }
|
||||
heed = { workspace = true }
|
||||
@@ -1,36 +0,0 @@
|
||||
# RustFS Scanner
|
||||
|
||||
RustFS Scanner 提供了数据完整性检查、健康监控和存储分析等扫描功能。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- 数据完整性扫描
|
||||
- 健康监控
|
||||
- 存储分析
|
||||
- 可扩展的扫描框架
|
||||
|
||||
## 使用示例
|
||||
|
||||
```rust
|
||||
use rustfs_scanner::ScannerError;
|
||||
|
||||
// TODO: 添加使用示例
|
||||
```
|
||||
|
||||
## 开发
|
||||
|
||||
### 构建
|
||||
|
||||
```bash
|
||||
cargo build --package rustfs-scanner
|
||||
```
|
||||
|
||||
### 测试
|
||||
|
||||
```bash
|
||||
cargo test --package rustfs-scanner
|
||||
```
|
||||
|
||||
## 许可证
|
||||
|
||||
Apache License 2.0
|
||||
@@ -1,39 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_ecstore::disk::{BUCKET_META_PREFIX, RUSTFS_META_BUCKET};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
// Data usage constants
|
||||
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR;
|
||||
|
||||
const DATA_USAGE_OBJ_NAME: &str = ".usage.json";
|
||||
|
||||
const DATA_USAGE_BLOOM_NAME: &str = ".bloomcycle.bin";
|
||||
|
||||
pub const DATA_USAGE_CACHE_NAME: &str = ".usage-cache.bin";
|
||||
|
||||
// Data usage paths (computed at runtime)
|
||||
pub static DATA_USAGE_BUCKET: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_META_BUCKET}{SLASH_SEPARATOR}{BUCKET_META_PREFIX}"));
|
||||
|
||||
pub static DATA_USAGE_OBJ_NAME_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{BUCKET_META_PREFIX}{SLASH_SEPARATOR}{DATA_USAGE_OBJ_NAME}"));
|
||||
|
||||
pub static DATA_USAGE_BLOOM_NAME_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{BUCKET_META_PREFIX}{SLASH_SEPARATOR}{DATA_USAGE_BLOOM_NAME}"));
|
||||
|
||||
pub static BACKGROUND_HEAL_INFO_PATH: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{BUCKET_META_PREFIX}{SLASH_SEPARATOR}.background-heal.json"));
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,886 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Default)]
|
||||
pub struct TimedAction {
|
||||
count: u64,
|
||||
acc_time: u64,
|
||||
min_time: Option<u64>,
|
||||
max_time: Option<u64>,
|
||||
bytes: u64,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl TimedAction {
|
||||
// Avg returns the average time spent on the action.
|
||||
pub fn avg(&self) -> Option<Duration> {
|
||||
if self.count == 0 {
|
||||
return None;
|
||||
}
|
||||
Some(Duration::from_nanos(self.acc_time / self.count))
|
||||
}
|
||||
|
||||
// AvgBytes returns the average bytes processed.
|
||||
pub fn avg_bytes(&self) -> u64 {
|
||||
if self.count == 0 {
|
||||
return 0;
|
||||
}
|
||||
self.bytes / self.count
|
||||
}
|
||||
|
||||
// Merge other into t.
|
||||
pub fn merge(&mut self, other: TimedAction) {
|
||||
self.count += other.count;
|
||||
self.acc_time += other.acc_time;
|
||||
self.bytes += other.bytes;
|
||||
|
||||
if self.count == 0 {
|
||||
self.min_time = other.min_time;
|
||||
}
|
||||
if let Some(other_min) = other.min_time {
|
||||
self.min_time = self.min_time.map_or(Some(other_min), |min| Some(min.min(other_min)));
|
||||
}
|
||||
|
||||
self.max_time = self
|
||||
.max_time
|
||||
.map_or(other.max_time, |max| Some(max.max(other.max_time.unwrap_or(0))));
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
enum SizeCategory {
|
||||
SizeLessThan1KiB = 0,
|
||||
SizeLessThan1MiB,
|
||||
SizeLessThan10MiB,
|
||||
SizeLessThan100MiB,
|
||||
SizeLessThan1GiB,
|
||||
SizeGreaterThan1GiB,
|
||||
// Add new entries here
|
||||
SizeLastElemMarker,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for SizeCategory {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let s = match *self {
|
||||
SizeCategory::SizeLessThan1KiB => "SizeLessThan1KiB",
|
||||
SizeCategory::SizeLessThan1MiB => "SizeLessThan1MiB",
|
||||
SizeCategory::SizeLessThan10MiB => "SizeLessThan10MiB",
|
||||
SizeCategory::SizeLessThan100MiB => "SizeLessThan100MiB",
|
||||
SizeCategory::SizeLessThan1GiB => "SizeLessThan1GiB",
|
||||
SizeCategory::SizeGreaterThan1GiB => "SizeGreaterThan1GiB",
|
||||
SizeCategory::SizeLastElemMarker => "SizeLastElemMarker",
|
||||
};
|
||||
write!(f, "{s}")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Copy)]
|
||||
pub struct AccElem {
|
||||
pub total: u64,
|
||||
pub size: u64,
|
||||
pub n: u64,
|
||||
}
|
||||
|
||||
impl AccElem {
|
||||
pub fn add(&mut self, dur: &Duration) {
|
||||
let dur = dur.as_secs();
|
||||
self.total = self.total.wrapping_add(dur);
|
||||
self.n = self.n.wrapping_add(1);
|
||||
}
|
||||
|
||||
pub fn merge(&mut self, b: &AccElem) {
|
||||
self.n = self.n.wrapping_add(b.n);
|
||||
self.total = self.total.wrapping_add(b.total);
|
||||
self.size = self.size.wrapping_add(b.size);
|
||||
}
|
||||
|
||||
pub fn avg(&self) -> Duration {
|
||||
if self.n >= 1 && self.total > 0 {
|
||||
return Duration::from_secs(self.total / self.n);
|
||||
}
|
||||
Duration::from_secs(0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LastMinuteLatency {
|
||||
pub totals: Vec<AccElem>,
|
||||
pub last_sec: u64,
|
||||
}
|
||||
|
||||
impl Default for LastMinuteLatency {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
totals: vec![AccElem::default(); 60],
|
||||
last_sec: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LastMinuteLatency {
|
||||
pub fn merge(&mut self, o: &LastMinuteLatency) -> LastMinuteLatency {
|
||||
let mut merged = LastMinuteLatency::default();
|
||||
let mut x = o.clone();
|
||||
if self.last_sec > o.last_sec {
|
||||
x.forward_to(self.last_sec);
|
||||
merged.last_sec = self.last_sec;
|
||||
} else {
|
||||
self.forward_to(o.last_sec);
|
||||
merged.last_sec = o.last_sec;
|
||||
}
|
||||
|
||||
for i in 0..merged.totals.len() {
|
||||
merged.totals[i] = AccElem {
|
||||
total: self.totals[i].total + o.totals[i].total,
|
||||
n: self.totals[i].n + o.totals[i].n,
|
||||
size: self.totals[i].size + o.totals[i].size,
|
||||
}
|
||||
}
|
||||
merged
|
||||
}
|
||||
|
||||
pub fn add(&mut self, t: &Duration) {
|
||||
let sec = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards")
|
||||
.as_secs();
|
||||
self.forward_to(sec);
|
||||
let win_idx = sec % 60;
|
||||
self.totals[win_idx as usize].add(t);
|
||||
self.last_sec = sec;
|
||||
}
|
||||
|
||||
pub fn add_all(&mut self, sec: u64, a: &AccElem) {
|
||||
self.forward_to(sec);
|
||||
let win_idx = sec % 60;
|
||||
self.totals[win_idx as usize].merge(a);
|
||||
self.last_sec = sec;
|
||||
}
|
||||
|
||||
pub fn get_total(&mut self) -> AccElem {
|
||||
let mut res = AccElem::default();
|
||||
let sec = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards")
|
||||
.as_secs();
|
||||
self.forward_to(sec);
|
||||
for elem in self.totals.iter() {
|
||||
res.merge(elem);
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub fn forward_to(&mut self, t: u64) {
|
||||
if self.last_sec >= t {
|
||||
return;
|
||||
}
|
||||
if t - self.last_sec >= 60 {
|
||||
self.totals = vec![AccElem::default(); 60];
|
||||
self.last_sec = t;
|
||||
return;
|
||||
}
|
||||
while self.last_sec != t {
|
||||
let idx = (self.last_sec + 1) % 60;
|
||||
self.totals[idx as usize] = AccElem::default();
|
||||
self.last_sec += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_default() {
|
||||
let elem = AccElem::default();
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.size, 0);
|
||||
assert_eq!(elem.n, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_add_single_duration() {
|
||||
let mut elem = AccElem::default();
|
||||
let duration = Duration::from_secs(5);
|
||||
|
||||
elem.add(&duration);
|
||||
|
||||
assert_eq!(elem.total, 5);
|
||||
assert_eq!(elem.n, 1);
|
||||
assert_eq!(elem.size, 0); // size is not modified by add
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_add_multiple_durations() {
|
||||
let mut elem = AccElem::default();
|
||||
|
||||
elem.add(&Duration::from_secs(3));
|
||||
elem.add(&Duration::from_secs(7));
|
||||
elem.add(&Duration::from_secs(2));
|
||||
|
||||
assert_eq!(elem.total, 12);
|
||||
assert_eq!(elem.n, 3);
|
||||
assert_eq!(elem.size, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_add_zero_duration() {
|
||||
let mut elem = AccElem::default();
|
||||
let duration = Duration::from_secs(0);
|
||||
|
||||
elem.add(&duration);
|
||||
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.n, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_add_subsecond_duration() {
|
||||
let mut elem = AccElem::default();
|
||||
// Duration less than 1 second should be truncated to 0
|
||||
let duration = Duration::from_millis(500);
|
||||
|
||||
elem.add(&duration);
|
||||
|
||||
assert_eq!(elem.total, 0); // as_secs() truncates subsecond values
|
||||
assert_eq!(elem.n, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_merge_empty_elements() {
|
||||
let mut elem1 = AccElem::default();
|
||||
let elem2 = AccElem::default();
|
||||
|
||||
elem1.merge(&elem2);
|
||||
|
||||
assert_eq!(elem1.total, 0);
|
||||
assert_eq!(elem1.size, 0);
|
||||
assert_eq!(elem1.n, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_merge_with_data() {
|
||||
let mut elem1 = AccElem {
|
||||
total: 10,
|
||||
size: 100,
|
||||
n: 2,
|
||||
};
|
||||
let elem2 = AccElem {
|
||||
total: 15,
|
||||
size: 200,
|
||||
n: 3,
|
||||
};
|
||||
|
||||
elem1.merge(&elem2);
|
||||
|
||||
assert_eq!(elem1.total, 25);
|
||||
assert_eq!(elem1.size, 300);
|
||||
assert_eq!(elem1.n, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_merge_one_empty() {
|
||||
let mut elem1 = AccElem {
|
||||
total: 10,
|
||||
size: 100,
|
||||
n: 2,
|
||||
};
|
||||
let elem2 = AccElem::default();
|
||||
|
||||
elem1.merge(&elem2);
|
||||
|
||||
assert_eq!(elem1.total, 10);
|
||||
assert_eq!(elem1.size, 100);
|
||||
assert_eq!(elem1.n, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_avg_with_data() {
|
||||
let elem = AccElem {
|
||||
total: 15,
|
||||
size: 0,
|
||||
n: 3,
|
||||
};
|
||||
|
||||
let avg = elem.avg();
|
||||
assert_eq!(avg, Duration::from_secs(5)); // 15 / 3 = 5
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_avg_zero_count() {
|
||||
let elem = AccElem {
|
||||
total: 10,
|
||||
size: 0,
|
||||
n: 0,
|
||||
};
|
||||
|
||||
let avg = elem.avg();
|
||||
assert_eq!(avg, Duration::from_secs(0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_avg_zero_total() {
|
||||
let elem = AccElem { total: 0, size: 0, n: 5 };
|
||||
|
||||
let avg = elem.avg();
|
||||
assert_eq!(avg, Duration::from_secs(0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_avg_rounding() {
|
||||
let elem = AccElem {
|
||||
total: 10,
|
||||
size: 0,
|
||||
n: 3,
|
||||
};
|
||||
|
||||
let avg = elem.avg();
|
||||
assert_eq!(avg, Duration::from_secs(3)); // 10 / 3 = 3 (integer division)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_default() {
|
||||
let latency = LastMinuteLatency::default();
|
||||
|
||||
assert_eq!(latency.totals.len(), 60);
|
||||
assert_eq!(latency.last_sec, 0);
|
||||
|
||||
// All elements should be default (empty)
|
||||
for elem in &latency.totals {
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.size, 0);
|
||||
assert_eq!(elem.n, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_forward_to_same_time() {
|
||||
let mut latency = LastMinuteLatency {
|
||||
last_sec: 100,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Add some data to verify it's not cleared
|
||||
latency.totals[0].total = 10;
|
||||
latency.totals[0].n = 1;
|
||||
|
||||
latency.forward_to(100); // Same time
|
||||
|
||||
assert_eq!(latency.last_sec, 100);
|
||||
assert_eq!(latency.totals[0].total, 10); // Data should remain
|
||||
assert_eq!(latency.totals[0].n, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_forward_to_past_time() {
|
||||
let mut latency = LastMinuteLatency {
|
||||
last_sec: 100,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Add some data to verify it's not cleared
|
||||
latency.totals[0].total = 10;
|
||||
latency.totals[0].n = 1;
|
||||
|
||||
latency.forward_to(50); // Past time
|
||||
|
||||
assert_eq!(latency.last_sec, 100); // Should not change
|
||||
assert_eq!(latency.totals[0].total, 10); // Data should remain
|
||||
assert_eq!(latency.totals[0].n, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_forward_to_large_gap() {
|
||||
let mut latency = LastMinuteLatency {
|
||||
last_sec: 100,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Add some data to verify it's cleared
|
||||
latency.totals[0].total = 10;
|
||||
latency.totals[0].n = 1;
|
||||
|
||||
latency.forward_to(200); // Gap >= 60 seconds
|
||||
|
||||
assert_eq!(latency.last_sec, 200); // last_sec should be updated to target time
|
||||
|
||||
// All data should be cleared
|
||||
for elem in &latency.totals {
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.size, 0);
|
||||
assert_eq!(elem.n, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_forward_to_small_gap() {
|
||||
let mut latency = LastMinuteLatency {
|
||||
last_sec: 100,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Add data at specific indices
|
||||
latency.totals[41].total = 10; // (100 + 1) % 60 = 41
|
||||
latency.totals[42].total = 20; // (100 + 2) % 60 = 42
|
||||
|
||||
latency.forward_to(102); // Forward by 2 seconds
|
||||
|
||||
assert_eq!(latency.last_sec, 102);
|
||||
|
||||
// The slots that were advanced should be cleared
|
||||
assert_eq!(latency.totals[41].total, 0); // Cleared during forward
|
||||
assert_eq!(latency.totals[42].total, 0); // Cleared during forward
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_add_all() {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
let acc_elem = AccElem {
|
||||
total: 15,
|
||||
size: 100,
|
||||
n: 3,
|
||||
};
|
||||
|
||||
latency.add_all(1000, &acc_elem);
|
||||
|
||||
assert_eq!(latency.last_sec, 1000);
|
||||
let idx = 1000 % 60; // Should be 40
|
||||
assert_eq!(latency.totals[idx as usize].total, 15);
|
||||
assert_eq!(latency.totals[idx as usize].size, 100);
|
||||
assert_eq!(latency.totals[idx as usize].n, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_add_all_multiple() {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
|
||||
let acc_elem1 = AccElem {
|
||||
total: 10,
|
||||
size: 50,
|
||||
n: 2,
|
||||
};
|
||||
let acc_elem2 = AccElem {
|
||||
total: 20,
|
||||
size: 100,
|
||||
n: 4,
|
||||
};
|
||||
|
||||
latency.add_all(1000, &acc_elem1);
|
||||
latency.add_all(1000, &acc_elem2); // Same second
|
||||
|
||||
let idx = 1000 % 60;
|
||||
assert_eq!(latency.totals[idx as usize].total, 30); // 10 + 20
|
||||
assert_eq!(latency.totals[idx as usize].size, 150); // 50 + 100
|
||||
assert_eq!(latency.totals[idx as usize].n, 6); // 2 + 4
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_merge_same_time() {
|
||||
let mut latency1 = LastMinuteLatency::default();
|
||||
let mut latency2 = LastMinuteLatency::default();
|
||||
|
||||
latency1.last_sec = 1000;
|
||||
latency2.last_sec = 1000;
|
||||
|
||||
// Add data to both
|
||||
latency1.totals[0].total = 10;
|
||||
latency1.totals[0].n = 2;
|
||||
latency2.totals[0].total = 20;
|
||||
latency2.totals[0].n = 3;
|
||||
|
||||
let merged = latency1.merge(&latency2);
|
||||
|
||||
assert_eq!(merged.last_sec, 1000);
|
||||
assert_eq!(merged.totals[0].total, 30); // 10 + 20
|
||||
assert_eq!(merged.totals[0].n, 5); // 2 + 3
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_merge_different_times() {
|
||||
let mut latency1 = LastMinuteLatency::default();
|
||||
let mut latency2 = LastMinuteLatency::default();
|
||||
|
||||
latency1.last_sec = 1000;
|
||||
latency2.last_sec = 1010; // 10 seconds later
|
||||
|
||||
// Add data to both
|
||||
latency1.totals[0].total = 10;
|
||||
latency2.totals[0].total = 20;
|
||||
|
||||
let merged = latency1.merge(&latency2);
|
||||
|
||||
assert_eq!(merged.last_sec, 1010); // Should use the later time
|
||||
assert_eq!(merged.totals[0].total, 30);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_merge_empty() {
|
||||
let mut latency1 = LastMinuteLatency::default();
|
||||
let latency2 = LastMinuteLatency::default();
|
||||
|
||||
let merged = latency1.merge(&latency2);
|
||||
|
||||
assert_eq!(merged.last_sec, 0);
|
||||
for elem in &merged.totals {
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.size, 0);
|
||||
assert_eq!(elem.n, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_window_wraparound() {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
|
||||
// Test that indices wrap around correctly
|
||||
for sec in 0..120 {
|
||||
// Test for 2 minutes
|
||||
let acc_elem = AccElem {
|
||||
total: sec,
|
||||
size: 0,
|
||||
n: 1,
|
||||
};
|
||||
latency.add_all(sec, &acc_elem);
|
||||
|
||||
let expected_idx = sec % 60;
|
||||
assert_eq!(latency.totals[expected_idx as usize].total, sec);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_time_progression() {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
|
||||
// Add data at time 1000
|
||||
latency.add_all(
|
||||
1000,
|
||||
&AccElem {
|
||||
total: 10,
|
||||
size: 0,
|
||||
n: 1,
|
||||
},
|
||||
);
|
||||
|
||||
// Forward to time 1030 (30 seconds later)
|
||||
latency.forward_to(1030);
|
||||
|
||||
// Original data should still be there
|
||||
let idx_1000 = 1000 % 60;
|
||||
assert_eq!(latency.totals[idx_1000 as usize].total, 10);
|
||||
|
||||
// Forward to time 1070 (70 seconds from original, > 60 seconds)
|
||||
latency.forward_to(1070);
|
||||
|
||||
// All data should be cleared due to large gap
|
||||
for elem in &latency.totals {
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.n, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_realistic_scenario() {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
let base_time = 1000u64;
|
||||
|
||||
// Add data for exactly 60 seconds to fill the window
|
||||
for i in 0..60 {
|
||||
let current_time = base_time + i;
|
||||
let duration_secs = i % 10 + 1; // Varying durations 1-10 seconds
|
||||
let acc_elem = AccElem {
|
||||
total: duration_secs,
|
||||
size: 1024 * (i % 5 + 1), // Varying sizes
|
||||
n: 1,
|
||||
};
|
||||
|
||||
latency.add_all(current_time, &acc_elem);
|
||||
}
|
||||
|
||||
// Count non-empty slots after filling the window
|
||||
let mut non_empty_count = 0;
|
||||
let mut total_n = 0;
|
||||
let mut total_sum = 0;
|
||||
|
||||
for elem in &latency.totals {
|
||||
if elem.n > 0 {
|
||||
non_empty_count += 1;
|
||||
total_n += elem.n;
|
||||
total_sum += elem.total;
|
||||
}
|
||||
}
|
||||
|
||||
// We should have exactly 60 non-empty slots (one for each second in the window)
|
||||
assert_eq!(non_empty_count, 60);
|
||||
assert_eq!(total_n, 60); // 60 data points total
|
||||
assert!(total_sum > 0);
|
||||
|
||||
// Test manual total calculation (get_total uses system time which interferes with test)
|
||||
let mut manual_total = AccElem::default();
|
||||
for elem in &latency.totals {
|
||||
manual_total.merge(elem);
|
||||
}
|
||||
assert_eq!(manual_total.n, 60);
|
||||
assert_eq!(manual_total.total, total_sum);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_clone_and_debug() {
|
||||
let elem = AccElem {
|
||||
total: 100,
|
||||
size: 200,
|
||||
n: 5,
|
||||
};
|
||||
|
||||
let cloned = elem;
|
||||
assert_eq!(elem.total, cloned.total);
|
||||
assert_eq!(elem.size, cloned.size);
|
||||
assert_eq!(elem.n, cloned.n);
|
||||
|
||||
// Test Debug trait
|
||||
let debug_str = format!("{elem:?}");
|
||||
assert!(debug_str.contains("100"));
|
||||
assert!(debug_str.contains("200"));
|
||||
assert!(debug_str.contains("5"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_last_minute_latency_clone() {
|
||||
let mut latency = LastMinuteLatency {
|
||||
last_sec: 1000,
|
||||
..Default::default()
|
||||
};
|
||||
latency.totals[0].total = 100;
|
||||
latency.totals[0].n = 5;
|
||||
|
||||
let cloned = latency.clone();
|
||||
assert_eq!(latency.last_sec, cloned.last_sec);
|
||||
assert_eq!(latency.totals[0].total, cloned.totals[0].total);
|
||||
assert_eq!(latency.totals[0].n, cloned.totals[0].n);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_case_max_values() {
|
||||
let mut elem = AccElem {
|
||||
total: u64::MAX - 50,
|
||||
size: u64::MAX - 50,
|
||||
n: u64::MAX - 50,
|
||||
};
|
||||
|
||||
let other = AccElem {
|
||||
total: 100,
|
||||
size: 100,
|
||||
n: 100,
|
||||
};
|
||||
|
||||
// This should not panic due to overflow, values will wrap around
|
||||
elem.merge(&other);
|
||||
|
||||
// Values should wrap around due to overflow (wrapping_add behavior)
|
||||
assert_eq!(elem.total, 49); // (u64::MAX - 50) + 100 wraps to 49
|
||||
assert_eq!(elem.size, 49);
|
||||
assert_eq!(elem.n, 49);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_forward_to_boundary_conditions() {
|
||||
let mut latency = LastMinuteLatency {
|
||||
last_sec: 59,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Add data at the last slot
|
||||
latency.totals[59].total = 100;
|
||||
latency.totals[59].n = 1;
|
||||
|
||||
// Forward exactly 60 seconds (boundary case)
|
||||
latency.forward_to(119);
|
||||
|
||||
// All data should be cleared
|
||||
for elem in &latency.totals {
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.n, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_total_with_data() {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
|
||||
// Set a recent timestamp to avoid forward_to clearing data
|
||||
let current_time = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards")
|
||||
.as_secs();
|
||||
latency.last_sec = current_time;
|
||||
|
||||
// Add data to multiple slots
|
||||
latency.totals[0] = AccElem {
|
||||
total: 10,
|
||||
size: 100,
|
||||
n: 1,
|
||||
};
|
||||
latency.totals[1] = AccElem {
|
||||
total: 20,
|
||||
size: 200,
|
||||
n: 2,
|
||||
};
|
||||
latency.totals[59] = AccElem {
|
||||
total: 30,
|
||||
size: 300,
|
||||
n: 3,
|
||||
};
|
||||
|
||||
let total = latency.get_total();
|
||||
|
||||
assert_eq!(total.total, 60);
|
||||
assert_eq!(total.size, 600);
|
||||
assert_eq!(total.n, 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_window_index_calculation() {
|
||||
// Test that window index calculation works correctly
|
||||
let _latency = LastMinuteLatency::default();
|
||||
|
||||
let acc_elem = AccElem { total: 1, size: 1, n: 1 };
|
||||
|
||||
// Test various timestamps
|
||||
let test_cases = [(0, 0), (1, 1), (59, 59), (60, 0), (61, 1), (119, 59), (120, 0)];
|
||||
|
||||
for (timestamp, expected_idx) in test_cases {
|
||||
let mut test_latency = LastMinuteLatency::default();
|
||||
test_latency.add_all(timestamp, &acc_elem);
|
||||
|
||||
assert_eq!(
|
||||
test_latency.totals[expected_idx].n, 1,
|
||||
"Failed for timestamp {timestamp} (expected index {expected_idx})"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_safety_simulation() {
|
||||
// Simulate concurrent access patterns
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
|
||||
// Use current time to ensure data doesn't get cleared by get_total
|
||||
let current_time = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards")
|
||||
.as_secs();
|
||||
|
||||
// Simulate rapid additions within a 60-second window
|
||||
for i in 0..1000 {
|
||||
let acc_elem = AccElem {
|
||||
total: (i % 10) + 1, // Ensure non-zero values
|
||||
size: (i % 100) + 1,
|
||||
n: 1,
|
||||
};
|
||||
// Keep all timestamps within the current minute window
|
||||
latency.add_all(current_time - (i % 60), &acc_elem);
|
||||
}
|
||||
|
||||
let total = latency.get_total();
|
||||
assert!(total.n > 0, "Total count should be greater than 0");
|
||||
assert!(total.total > 0, "Total time should be greater than 0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_debug_format() {
|
||||
let elem = AccElem {
|
||||
total: 123,
|
||||
size: 456,
|
||||
n: 789,
|
||||
};
|
||||
|
||||
let debug_str = format!("{elem:?}");
|
||||
assert!(debug_str.contains("123"));
|
||||
assert!(debug_str.contains("456"));
|
||||
assert!(debug_str.contains("789"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_large_values() {
|
||||
let mut elem = AccElem::default();
|
||||
|
||||
// Test with large duration values
|
||||
let large_duration = Duration::from_secs(u64::MAX / 2);
|
||||
elem.add(&large_duration);
|
||||
|
||||
assert_eq!(elem.total, u64::MAX / 2);
|
||||
assert_eq!(elem.n, 1);
|
||||
|
||||
// Test average calculation with large values
|
||||
let avg = elem.avg();
|
||||
assert_eq!(avg, Duration::from_secs(u64::MAX / 2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_duration_handling() {
|
||||
let mut elem = AccElem::default();
|
||||
|
||||
let zero_duration = Duration::from_secs(0);
|
||||
elem.add(&zero_duration);
|
||||
|
||||
assert_eq!(elem.total, 0);
|
||||
assert_eq!(elem.n, 1);
|
||||
assert_eq!(elem.avg(), Duration::from_secs(0));
|
||||
}
|
||||
}
|
||||
|
||||
const SIZE_LAST_ELEM_MARKER: usize = 10; // Assumed marker size is 10, modify according to actual situation
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Default)]
|
||||
pub struct LastMinuteHistogram {
|
||||
histogram: Vec<LastMinuteLatency>,
|
||||
size: u32,
|
||||
}
|
||||
|
||||
impl LastMinuteHistogram {
|
||||
pub fn merge(&mut self, other: &LastMinuteHistogram) {
|
||||
for i in 0..self.histogram.len() {
|
||||
self.histogram[i].merge(&other.histogram[i]);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add(&mut self, size: i64, t: Duration) {
|
||||
let index = size_to_tag(size);
|
||||
self.histogram[index].add(&t);
|
||||
}
|
||||
|
||||
pub fn get_avg_data(&mut self) -> [AccElem; SIZE_LAST_ELEM_MARKER] {
|
||||
let mut res = [AccElem::default(); SIZE_LAST_ELEM_MARKER];
|
||||
for (i, elem) in self.histogram.iter_mut().enumerate() {
|
||||
res[i] = elem.get_total();
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
fn size_to_tag(size: i64) -> usize {
|
||||
match size {
|
||||
_ if size < 1024 => 0, // sizeLessThan1KiB
|
||||
_ if size < 1024 * 1024 => 1, // sizeLessThan1MiB
|
||||
_ if size < 10 * 1024 * 1024 => 2, // sizeLessThan10MiB
|
||||
_ if size < 100 * 1024 * 1024 => 3, // sizeLessThan100MiB
|
||||
_ if size < 1024 * 1024 * 1024 => 4, // sizeLessThan1GiB
|
||||
_ => 5, // sizeGreaterThan1GiB
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user