mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
Compare commits
50 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3c14947878 | ||
|
|
2924b4e463 | ||
|
|
b4ba62fa33 | ||
|
|
a5b3522880 | ||
|
|
056a0ee62b | ||
|
|
4603ece708 | ||
|
|
eb33e82b56 | ||
|
|
c7e2b4d8e7 | ||
|
|
71c59d1187 | ||
|
|
e3a0a07495 | ||
|
|
136db7e0c9 | ||
|
|
2e3c5f695a | ||
|
|
fe9609fd17 | ||
|
|
f2d79b485e | ||
|
|
3d6681c9e5 | ||
|
|
07a26fadad | ||
|
|
a083fca17a | ||
|
|
89c3ae77a4 | ||
|
|
82a6e78845 | ||
|
|
7e75c9b1f5 | ||
|
|
8bdff3fbcb | ||
|
|
65d32e693f | ||
|
|
1ff28b3157 | ||
|
|
2186f46ea3 | ||
|
|
add6453aea | ||
|
|
4418c882ad | ||
|
|
00c607b5ce | ||
|
|
79585f98e0 | ||
|
|
2a3517f1d5 | ||
|
|
3942e07487 | ||
|
|
04811c0006 | ||
|
|
73c15d6be1 | ||
|
|
af5c0b13ef | ||
|
|
f17990f746 | ||
|
|
80cfb4feab | ||
|
|
08f1a31f3f | ||
|
|
1c51e204ab | ||
|
|
958f054123 | ||
|
|
3e2252e4bb | ||
|
|
f3a1431fa5 | ||
|
|
3bd96bcf10 | ||
|
|
20ea591049 | ||
|
|
cc31e88c91 | ||
|
|
b5535083de | ||
|
|
1e35edf079 | ||
|
|
8dd3e8b534 | ||
|
|
8e0aeb4fdc | ||
|
|
abe8a50b5a | ||
|
|
61f4d307b5 | ||
|
|
3eafeb0ff0 |
64
.config/make/build-docker-buildx-dev.mak
Normal file
64
.config/make/build-docker-buildx-dev.mak
Normal file
@@ -0,0 +1,64 @@
|
||||
## —— Development/Source builds using direct buildx commands ---------------------------------------
|
||||
|
||||
.PHONY: docker-dev
|
||||
docker-dev: ## Build dev multi-arch image (cannot load locally)
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local: ## Build dev single-arch image (local load)
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push: ## Build and push multi-arch development image # e.g (make docker-dev-push REGISTRY=xxx)
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 Pushing to registry: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start: ## Start development container environment
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop: ## Stop development container environment
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start ## Restart development container environment
|
||||
41
.config/make/build-docker-buildx-production.mak
Normal file
41
.config/make/build-docker-buildx-production.mak
Normal file
@@ -0,0 +1,41 @@
|
||||
## —— Production builds using docker buildx (for CI/CD and production) -----------------------------
|
||||
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx: ## Build production multi-arch image (no push)
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push: ## Build and push production multi-arch image
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version: ## Build and version production multi-arch image # e.g (make docker-buildx-version VERSION=v1.0.0)
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version: ## Build and version and push production multi-arch image # e.g (make docker-buildx-push-version VERSION=v1.0.0)
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local: ## Build production single-arch image locally
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
16
.config/make/build-docker-production.mak
Normal file
16
.config/make/build-docker-production.mak
Normal file
@@ -0,0 +1,16 @@
|
||||
## —— Single Architecture Docker Builds (Traditional) ----------------------------------------------
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production: ## Build single-arch production image
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source: ## Build single-arch source image
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
22
.config/make/build-docker.mak
Normal file
22
.config/make/build-docker.mak
Normal file
@@ -0,0 +1,22 @@
|
||||
## —— Docker-based build (alternative approach) ----------------------------------------------------
|
||||
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
|
||||
.PHONY: build-docker
|
||||
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build-docker: ## Build using Docker container # e.g (make build-docker BUILD_OS=ubuntu22.04)
|
||||
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
|
||||
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch: ## Check image architecture support
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
|
||||
docker buildx imagetools inspect $(IMAGE)
|
||||
55
.config/make/build.mak
Normal file
55
.config/make/build.mak
Normal file
@@ -0,0 +1,55 @@
|
||||
## —— Local Native Build using build-rustfs.sh script (Recommended) --------------------------------
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build RustFS binary (includes console by default)
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
.PHONY: build-dev
|
||||
build-dev: ## Build RustFS in Development mode
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl: ## Build x86_64 musl version
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu: ## Build x86_64 GNU version
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
.PHONY: build-musl-arm64
|
||||
build-musl-arm64: ## Build aarch64 musl version
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu-arm64
|
||||
build-gnu-arm64: ## Build aarch64 GNU version
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
|
||||
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all: core-deps ## Build binaries for all architectures
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
24
.config/make/check.mak
Normal file
24
.config/make/check.mak
Normal file
@@ -0,0 +1,24 @@
|
||||
## —— Check and Inform Dependencies ----------------------------------------------------------------
|
||||
|
||||
# Fatal check
|
||||
# Checks all required dependencies and exits with error if not found
|
||||
# (e.g., cargo, rustfmt)
|
||||
check-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "❌ '$*' is not installed."; \
|
||||
exit 1; \
|
||||
}
|
||||
|
||||
# Warning-only check
|
||||
# Checks for optional dependencies and issues a warning if not found
|
||||
# (e.g., cargo-nextest for enhanced testing)
|
||||
warn-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "⚠️ '$*' is not installed."; \
|
||||
}
|
||||
|
||||
# For checking dependencies use check-<dep-name> or warn-<dep-name>
|
||||
.PHONY: core-deps fmt-deps test-deps
|
||||
core-deps: check-cargo ## Check core dependencies
|
||||
fmt-deps: check-rustfmt ## Check lint and formatting dependencies
|
||||
test-deps: warn-cargo-nextest ## Check tests dependencies
|
||||
6
.config/make/deploy.mak
Normal file
6
.config/make/deploy.mak
Normal file
@@ -0,0 +1,6 @@
|
||||
## —— Deploy using dev_deploy.sh script ------------------------------------------------------------
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl ## Deploy to dev server
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
38
.config/make/help.mak
Normal file
38
.config/make/help.mak
Normal file
@@ -0,0 +1,38 @@
|
||||
## —— Help, Help Build and Help Docker -------------------------------------------------------------
|
||||
|
||||
|
||||
.PHONY: help
|
||||
help: ## Shows This Help Menu
|
||||
echo -e "$$HEADER"
|
||||
grep -E '(^[a-zA-Z0-9_-]+:.*?## .*$$)|(^## )' $(MAKEFILE_LIST) | sed 's/^[^:]*://g' | awk 'BEGIN {FS = ":.*?## | #"} ; {printf "${cyan}%-30s${reset} ${white}%s${reset} ${green}%s${reset}\n", $$1, $$2, $$3}' | sed -e 's/\[36m##/\n[32m##/'
|
||||
|
||||
.PHONY: help-build
|
||||
help-build: ## Shows RustFS build help
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
|
||||
@echo ""
|
||||
@echo "🔧 Direct usage of build-rustfs.sh script:"
|
||||
@echo ""
|
||||
@echo " ./build-rustfs.sh --help # View script help"
|
||||
@echo " ./build-rustfs.sh --no-console # Build without console resources"
|
||||
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
|
||||
@echo " ./build-rustfs.sh --dev # Development mode build"
|
||||
@echo " ./build-rustfs.sh --sign # Sign binary files"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
|
||||
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
|
||||
@echo ""
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker: ## Shows docker environment and suggestion help
|
||||
@echo ""
|
||||
@echo "📋 Environment Variables:"
|
||||
@echo " REGISTRY Image registry address (required for push)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub username"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub access token"
|
||||
@echo " GITHUB_TOKEN GitHub access token"
|
||||
@echo ""
|
||||
@echo "💡 Suggestions:"
|
||||
@echo " Production use: Use docker-buildx* commands (based on precompiled binaries)"
|
||||
@echo " Local development: Use docker-dev* commands (build from source)"
|
||||
@echo " Development environment: Use dev-env-* commands to manage dev containers"
|
||||
@echo ""
|
||||
22
.config/make/lint-fmt.mak
Normal file
22
.config/make/lint-fmt.mak
Normal file
@@ -0,0 +1,22 @@
|
||||
## —— Code quality and Formatting ------------------------------------------------------------------
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: core-deps fmt-deps ## Format code
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: core-deps fmt-deps ## Check code formatting
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
.PHONY: clippy-check
|
||||
clippy-check: core-deps ## Run clippy checks
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: compilation-check
|
||||
compilation-check: core-deps ## Run compilation check
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
11
.config/make/pre-commit.mak
Normal file
11
.config/make/pre-commit.mak
Normal file
@@ -0,0 +1,11 @@
|
||||
## —— Pre Commit Checks ----------------------------------------------------------------------------
|
||||
|
||||
.PHONY: setup-hooks
|
||||
setup-hooks: ## Set up git hooks
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy-check compilation-check test ## Run pre-commit checks
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
20
.config/make/tests.mak
Normal file
20
.config/make/tests.mak
Normal file
@@ -0,0 +1,20 @@
|
||||
## —— Tests and e2e test ---------------------------------------------------------------------------
|
||||
|
||||
.PHONY: test
|
||||
test: core-deps test-deps ## Run all tests
|
||||
@echo "🧪 Running tests..."
|
||||
@if command -v cargo-nextest >/dev/null 2>&1; then \
|
||||
cargo nextest run --all --exclude e2e_test; \
|
||||
else \
|
||||
echo "ℹ️ cargo-nextest not found; falling back to 'cargo test'"; \
|
||||
cargo test --workspace --exclude e2e_test -- --nocapture; \
|
||||
fi
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server: ## Run e2e-server tests
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
|
||||
.PHONY: probe-e2e
|
||||
probe-e2e: ## Probe e2e tests
|
||||
sh $(shell pwd)/scripts/probe.sh
|
||||
4
.github/workflows/audit.yml
vendored
4
.github/workflows/audit.yml
vendored
@@ -40,7 +40,7 @@ env:
|
||||
jobs:
|
||||
security-audit:
|
||||
name: Security Audit
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
|
||||
dependency-review:
|
||||
name: Dependency Review
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
if: github.event_name == 'pull_request'
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
22
.github/workflows/build.yml
vendored
22
.github/workflows/build.yml
vendored
@@ -83,7 +83,7 @@ jobs:
|
||||
# Build strategy check - determine build type based on trigger
|
||||
build-check:
|
||||
name: Build Strategy Check
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
build_type: ${{ steps.check.outputs.build_type }}
|
||||
@@ -167,19 +167,19 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
# Linux builds
|
||||
- os: ubicloud-standard-4
|
||||
- os: ubicloud-standard-2
|
||||
target: x86_64-unknown-linux-musl
|
||||
cross: false
|
||||
platform: linux
|
||||
- os: ubicloud-standard-4
|
||||
- os: ubicloud-standard-2
|
||||
target: aarch64-unknown-linux-musl
|
||||
cross: true
|
||||
platform: linux
|
||||
- os: ubicloud-standard-4
|
||||
- os: ubicloud-standard-2
|
||||
target: x86_64-unknown-linux-gnu
|
||||
cross: false
|
||||
platform: linux
|
||||
- os: ubicloud-standard-4
|
||||
- os: ubicloud-standard-2
|
||||
target: aarch64-unknown-linux-gnu
|
||||
cross: true
|
||||
platform: linux
|
||||
@@ -454,7 +454,7 @@ jobs:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
OSS_ENDPOINT: https://oss-accelerate.aliyuncs.com
|
||||
shell: bash
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
@@ -532,7 +532,7 @@ jobs:
|
||||
name: Build Summary
|
||||
needs: [ build-check, build-rustfs ]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
steps:
|
||||
- name: Build completion summary
|
||||
shell: bash
|
||||
@@ -584,7 +584,7 @@ jobs:
|
||||
name: Create GitHub Release
|
||||
needs: [ build-check, build-rustfs ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
permissions:
|
||||
contents: write
|
||||
outputs:
|
||||
@@ -670,7 +670,7 @@ jobs:
|
||||
name: Upload Release Assets
|
||||
needs: [ build-check, build-rustfs, create-release ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
permissions:
|
||||
contents: write
|
||||
actions: read
|
||||
@@ -751,7 +751,7 @@ jobs:
|
||||
name: Update Latest Version
|
||||
needs: [ build-check, upload-release-assets ]
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
steps:
|
||||
- name: Update latest.json
|
||||
env:
|
||||
@@ -801,7 +801,7 @@ jobs:
|
||||
name: Publish Release
|
||||
needs: [ build-check, create-release, upload-release-assets ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
|
||||
10
.github/workflows/ci.yml
vendored
10
.github/workflows/ci.yml
vendored
@@ -69,7 +69,7 @@ concurrency:
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_BUILD_JOBS: 8
|
||||
CARGO_BUILD_JOBS: 2
|
||||
|
||||
jobs:
|
||||
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
permissions:
|
||||
actions: write
|
||||
contents: read
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
@@ -93,7 +93,7 @@ jobs:
|
||||
|
||||
typos:
|
||||
name: Typos
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
@@ -136,7 +136,7 @@ jobs:
|
||||
name: End-to-End Tests
|
||||
needs: skip-check
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -166,7 +166,7 @@ jobs:
|
||||
run: |
|
||||
touch rustfs/build.rs
|
||||
# Limit concurrency to prevent OOM
|
||||
cargo build -p rustfs --bins --jobs 4
|
||||
cargo build -p rustfs --bins --jobs 2
|
||||
|
||||
- name: Run end-to-end tests
|
||||
run: |
|
||||
|
||||
6
.github/workflows/docker.yml
vendored
6
.github/workflows/docker.yml
vendored
@@ -72,7 +72,7 @@ jobs:
|
||||
# Check if we should build Docker images
|
||||
build-check:
|
||||
name: Docker Build Check
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
should_push: ${{ steps.check.outputs.should_push }}
|
||||
@@ -264,7 +264,7 @@ jobs:
|
||||
name: Build Docker Images
|
||||
needs: build-check
|
||||
if: needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -404,7 +404,7 @@ jobs:
|
||||
name: Docker Build Summary
|
||||
needs: [ build-check, build-docker ]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
steps:
|
||||
- name: Docker build completion summary
|
||||
run: |
|
||||
|
||||
260
.github/workflows/e2e-mint.yml
vendored
260
.github/workflows/e2e-mint.yml
vendored
@@ -1,260 +0,0 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: e2e-mint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- ".github/workflows/e2e-mint.yml"
|
||||
- "Dockerfile.source"
|
||||
- "rustfs/**"
|
||||
- "crates/**"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
run-multi:
|
||||
description: "Run multi-node Mint as well"
|
||||
required: false
|
||||
default: "false"
|
||||
|
||||
env:
|
||||
ACCESS_KEY: rustfsadmin
|
||||
SECRET_KEY: rustfsadmin
|
||||
RUST_LOG: info
|
||||
PLATFORM: linux/amd64
|
||||
|
||||
jobs:
|
||||
mint-single:
|
||||
runs-on: ubicloud-standard-4
|
||||
timeout-minutes: 40
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Enable buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build RustFS image (source)
|
||||
run: |
|
||||
DOCKER_BUILDKIT=1 docker buildx build --load \
|
||||
--platform ${PLATFORM} \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source .
|
||||
|
||||
- name: Create network
|
||||
run: |
|
||||
docker network inspect rustfs-net >/dev/null 2>&1 || docker network create rustfs-net
|
||||
|
||||
- name: Remove existing rustfs-single (if any)
|
||||
run: docker rm -f rustfs-single >/dev/null 2>&1 || true
|
||||
|
||||
- name: Start single RustFS
|
||||
run: |
|
||||
docker run -d --name rustfs-single \
|
||||
--network rustfs-net \
|
||||
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
|
||||
-e RUSTFS_ACCESS_KEY=$ACCESS_KEY \
|
||||
-e RUSTFS_SECRET_KEY=$SECRET_KEY \
|
||||
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
|
||||
-v /tmp/rustfs-single:/data \
|
||||
rustfs-ci
|
||||
|
||||
- name: Wait for RustFS ready
|
||||
run: |
|
||||
for i in {1..30}; do
|
||||
if docker exec rustfs-single curl -sf http://localhost:9000/health >/dev/null; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "RustFS did not become ready" >&2
|
||||
docker logs rustfs-single || true
|
||||
exit 1
|
||||
|
||||
- name: Run Mint (single, S3-only)
|
||||
run: |
|
||||
mkdir -p artifacts/mint-single
|
||||
docker run --rm --network rustfs-net \
|
||||
--platform ${PLATFORM} \
|
||||
-e SERVER_ENDPOINT=rustfs-single:9000 \
|
||||
-e ACCESS_KEY=$ACCESS_KEY \
|
||||
-e SECRET_KEY=$SECRET_KEY \
|
||||
-e ENABLE_HTTPS=0 \
|
||||
-e SERVER_REGION=us-east-1 \
|
||||
-e RUN_ON_FAIL=1 \
|
||||
-e MINT_MODE=core \
|
||||
-v ${GITHUB_WORKSPACE}/artifacts/mint-single:/mint/log \
|
||||
--entrypoint /mint/mint.sh \
|
||||
minio/mint:edge \
|
||||
awscli aws-sdk-go aws-sdk-java-v2 aws-sdk-php aws-sdk-ruby s3cmd s3select
|
||||
|
||||
- name: Collect RustFS logs
|
||||
run: |
|
||||
mkdir -p artifacts/rustfs-single
|
||||
docker logs rustfs-single > artifacts/rustfs-single/rustfs.log || true
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: mint-single
|
||||
path: artifacts/**
|
||||
|
||||
mint-multi:
|
||||
if: github.event_name == 'workflow_dispatch' && github.event.inputs.run-multi == 'true'
|
||||
needs: mint-single
|
||||
runs-on: ubicloud-standard-4
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Enable buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build RustFS image (source)
|
||||
run: |
|
||||
DOCKER_BUILDKIT=1 docker buildx build --load \
|
||||
--platform ${PLATFORM} \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source .
|
||||
|
||||
- name: Prepare cluster compose
|
||||
run: |
|
||||
cat > compose.yml <<'EOF'
|
||||
version: '3.8'
|
||||
services:
|
||||
rustfs1:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs1
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs1-data:/data
|
||||
rustfs2:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs2
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs2-data:/data
|
||||
rustfs3:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs3
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs3-data:/data
|
||||
rustfs4:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs4
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs4-data:/data
|
||||
lb:
|
||||
image: haproxy:2.9
|
||||
hostname: lb
|
||||
networks: [rustfs-net]
|
||||
ports:
|
||||
- "9000:9000"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
networks:
|
||||
rustfs-net:
|
||||
name: rustfs-net
|
||||
volumes:
|
||||
rustfs1-data:
|
||||
rustfs2-data:
|
||||
rustfs3-data:
|
||||
rustfs4-data:
|
||||
EOF
|
||||
|
||||
cat > haproxy.cfg <<'EOF'
|
||||
defaults
|
||||
mode http
|
||||
timeout connect 5s
|
||||
timeout client 30s
|
||||
timeout server 30s
|
||||
|
||||
frontend fe_s3
|
||||
bind *:9000
|
||||
default_backend be_s3
|
||||
|
||||
backend be_s3
|
||||
balance roundrobin
|
||||
server s1 rustfs1:9000 check
|
||||
server s2 rustfs2:9000 check
|
||||
server s3 rustfs3:9000 check
|
||||
server s4 rustfs4:9000 check
|
||||
EOF
|
||||
|
||||
- name: Launch cluster
|
||||
run: docker compose -f compose.yml up -d
|
||||
|
||||
- name: Wait for LB ready
|
||||
run: |
|
||||
for i in {1..60}; do
|
||||
if docker run --rm --network rustfs-net curlimages/curl -sf http://lb:9000/health >/dev/null; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "LB or backend not ready" >&2
|
||||
docker compose -f compose.yml logs --tail=200 || true
|
||||
exit 1
|
||||
|
||||
- name: Run Mint (multi, S3-only)
|
||||
run: |
|
||||
mkdir -p artifacts/mint-multi
|
||||
docker run --rm --network rustfs-net \
|
||||
--platform ${PLATFORM} \
|
||||
-e SERVER_ENDPOINT=lb:9000 \
|
||||
-e ACCESS_KEY=$ACCESS_KEY \
|
||||
-e SECRET_KEY=$SECRET_KEY \
|
||||
-e ENABLE_HTTPS=0 \
|
||||
-e SERVER_REGION=us-east-1 \
|
||||
-e RUN_ON_FAIL=1 \
|
||||
-e MINT_MODE=core \
|
||||
-v ${GITHUB_WORKSPACE}/artifacts/mint-multi:/mint/log \
|
||||
--entrypoint /mint/mint.sh \
|
||||
minio/mint:edge \
|
||||
awscli aws-sdk-go aws-sdk-java-v2 aws-sdk-php aws-sdk-ruby s3cmd s3select
|
||||
|
||||
- name: Collect logs
|
||||
run: |
|
||||
mkdir -p artifacts/cluster
|
||||
docker compose -f compose.yml logs --no-color > artifacts/cluster/cluster.log || true
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: mint-multi
|
||||
path: artifacts/**
|
||||
4
.github/workflows/e2e-s3tests.yml
vendored
4
.github/workflows/e2e-s3tests.yml
vendored
@@ -58,7 +58,7 @@ defaults:
|
||||
jobs:
|
||||
s3tests-single:
|
||||
if: github.event.inputs.test-mode == 'single'
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
@@ -212,7 +212,7 @@ jobs:
|
||||
|
||||
s3tests-multi:
|
||||
if: github.event_name == 'workflow_dispatch' && github.event.inputs.test-mode == 'multi'
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 150
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
4
.github/workflows/helm-package.yml
vendored
4
.github/workflows/helm-package.yml
vendored
@@ -27,7 +27,7 @@ env:
|
||||
|
||||
jobs:
|
||||
build-helm-package:
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
# Only run on successful builds triggered by tag pushes (version format: x.y.z or x.y.z-suffix)
|
||||
if: |
|
||||
github.event.workflow_run.conclusion == 'success' &&
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
publish-helm-package:
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
needs: [ build-helm-package ]
|
||||
|
||||
steps:
|
||||
|
||||
4
.github/workflows/performance.yml
vendored
4
.github/workflows/performance.yml
vendored
@@ -40,7 +40,7 @@ env:
|
||||
jobs:
|
||||
performance-profile:
|
||||
name: Performance Profiling
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -115,7 +115,7 @@ jobs:
|
||||
|
||||
benchmark:
|
||||
name: Benchmark Tests
|
||||
runs-on: ubicloud-standard-4
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 45
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -25,7 +25,7 @@ profile.json
|
||||
*.pb
|
||||
*.svg
|
||||
deploy/logs/*.log.*
|
||||
|
||||
artifacts/
|
||||
# s3-tests local artifacts (root directory only)
|
||||
/s3-tests/
|
||||
/s3-tests-local/
|
||||
@@ -33,4 +33,4 @@ deploy/logs/*.log.*
|
||||
/s3tests.conf.*
|
||||
*.events
|
||||
*.audit
|
||||
*.snappy
|
||||
*.snappy
|
||||
|
||||
324
Cargo.lock
generated
324
Cargo.lock
generated
@@ -24,7 +24,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "03d2d54c4d9e7006f132f615a167865bff927a79ca63d8f637237575ce0a9795"
|
||||
dependencies = [
|
||||
"crypto-common 0.2.0-rc.5",
|
||||
"inout 0.2.1",
|
||||
"inout 0.2.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -216,9 +216,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "1.7.1"
|
||||
version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
|
||||
checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e"
|
||||
dependencies = [
|
||||
"rustversion",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "argon2"
|
||||
@@ -515,9 +518,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "async-lock"
|
||||
version = "3.4.1"
|
||||
version = "3.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc"
|
||||
checksum = "290f7f2596bd5b78a9fec8088ccd89180d7f9f55b94b0576823bbbdc72ee8311"
|
||||
dependencies = [
|
||||
"event-listener",
|
||||
"event-listener-strategy",
|
||||
@@ -691,9 +694,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-s3"
|
||||
version = "1.118.0"
|
||||
version = "1.119.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d3e6b7079f85d9ea9a70643c9f89f50db70f5ada868fa9cfe08c1ffdf51abc13"
|
||||
checksum = "1d65fddc3844f902dfe1864acb8494db5f9342015ee3ab7890270d36fbd2e01c"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -942,9 +945,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-runtime"
|
||||
version = "1.9.6"
|
||||
version = "1.9.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "65fda37911905ea4d3141a01364bc5509a0f32ae3f3b22d6e330c0abfb62d247"
|
||||
checksum = "a392db6c583ea4a912538afb86b7be7c5d8887d91604f50eb55c262ee1b4a5f5"
|
||||
dependencies = [
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-http",
|
||||
@@ -1032,9 +1035,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "axum"
|
||||
version = "0.8.7"
|
||||
version = "0.8.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425"
|
||||
checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8"
|
||||
dependencies = [
|
||||
"axum-core",
|
||||
"bytes",
|
||||
@@ -1065,9 +1068,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "axum-core"
|
||||
version = "0.5.5"
|
||||
version = "0.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22"
|
||||
checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
@@ -1082,27 +1085,6 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-extra"
|
||||
version = "0.12.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbfe9f610fe4e99cf0cfcd03ccf8c63c28c616fe714d80475ef731f3b13dd21b"
|
||||
dependencies = [
|
||||
"axum",
|
||||
"axum-core",
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"http 1.4.0",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
"mime",
|
||||
"pin-project-lite",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-server"
|
||||
version = "0.8.0"
|
||||
@@ -1182,9 +1164,9 @@ checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a"
|
||||
|
||||
[[package]]
|
||||
name = "bigdecimal"
|
||||
version = "0.4.9"
|
||||
version = "0.4.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "560f42649de9fa436b73517378a147ec21f6c997a546581df4b4b31677828934"
|
||||
checksum = "4d6867f1565b3aad85681f1015055b087fcfd840d6aeee6eee7f2da317603695"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"libm",
|
||||
@@ -1434,31 +1416,14 @@ dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cargo-util-schemas"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7dc1a6f7b5651af85774ae5a34b4e8be397d9cf4bc063b7e6dbd99a841837830"
|
||||
dependencies = [
|
||||
"semver",
|
||||
"serde",
|
||||
"serde-untagged",
|
||||
"serde-value",
|
||||
"thiserror 2.0.17",
|
||||
"toml",
|
||||
"unicode-xid",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cargo_metadata"
|
||||
version = "0.22.0"
|
||||
version = "0.23.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c3f56c207c76c07652489840ff98687dcf213de178ac0974660d6fefeaf5ec6"
|
||||
checksum = "ef987d17b0a113becdd19d3d0022d04d7ef41f9efe4f3fb63ac44ba61df3ade9"
|
||||
dependencies = [
|
||||
"camino",
|
||||
"cargo-platform",
|
||||
"cargo-util-schemas",
|
||||
"semver",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -1473,9 +1438,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.49"
|
||||
version = "1.2.51"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215"
|
||||
checksum = "7a0aeaff4ff1a90589618835a598e545176939b97874f7abc7851caa0618f203"
|
||||
dependencies = [
|
||||
"find-msvc-tools",
|
||||
"jobserver",
|
||||
@@ -1576,7 +1541,7 @@ version = "0.4.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"
|
||||
dependencies = [
|
||||
"crypto-common 0.1.6",
|
||||
"crypto-common 0.1.7",
|
||||
"inout 0.1.4",
|
||||
]
|
||||
|
||||
@@ -1588,7 +1553,7 @@ checksum = "155e4a260750fa4f7754649f049748aacc31db238a358d85fd721002f230f92f"
|
||||
dependencies = [
|
||||
"block-buffer 0.11.0",
|
||||
"crypto-common 0.2.0-rc.5",
|
||||
"inout 0.2.1",
|
||||
"inout 0.2.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1798,9 +1763,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crc"
|
||||
version = "3.4.0"
|
||||
version = "3.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5eb8a2a1cd12ab0d987a5d5e825195d372001a4094a0376319d5a0ad71c1ba0d"
|
||||
checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675"
|
||||
dependencies = [
|
||||
"crc-catalog",
|
||||
]
|
||||
@@ -1965,9 +1930,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.1.6"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
|
||||
checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
"typenum",
|
||||
@@ -2997,7 +2962,7 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||
dependencies = [
|
||||
"block-buffer 0.10.4",
|
||||
"const-oid 0.9.6",
|
||||
"crypto-common 0.1.6",
|
||||
"crypto-common 0.1.7",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
@@ -3381,9 +3346,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "find-msvc-tools"
|
||||
version = "0.1.5"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844"
|
||||
checksum = "645cbb3a84e60b7531617d5ae4e57f7e27308f6445f5abf653209ea76dec8dff"
|
||||
|
||||
[[package]]
|
||||
name = "findshlibs"
|
||||
@@ -3405,9 +3370,9 @@ checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99"
|
||||
|
||||
[[package]]
|
||||
name = "flatbuffers"
|
||||
version = "25.9.23"
|
||||
version = "25.12.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09b6620799e7340ebd9968d2e0708eb82cf1971e9a16821e2091b6d6e475eed5"
|
||||
checksum = "35f6839d7b3b98adde531effaf34f0c2badc6f4735d26fe74709d8e513a96ef3"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"rustc_version",
|
||||
@@ -3487,9 +3452,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "fs-err"
|
||||
version = "3.2.1"
|
||||
version = "3.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "824f08d01d0f496b3eca4f001a13cf17690a6ee930043d20817f547455fd98f8"
|
||||
checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"tokio",
|
||||
@@ -3607,9 +3572,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "generic-array"
|
||||
version = "0.14.9"
|
||||
version = "0.14.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2"
|
||||
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
|
||||
dependencies = [
|
||||
"typenum",
|
||||
"version_check",
|
||||
@@ -3643,6 +3608,18 @@ dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.4.0-rc.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b99f0d993a2b9b97b9a201193aa8ad21305cde06a3be9a7e1f8f4201e5cc27e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"r-efi",
|
||||
"wasip2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getset"
|
||||
version = "0.1.6"
|
||||
@@ -4560,9 +4537,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "inout"
|
||||
version = "0.2.1"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7357b6e7aa75618c7864ebd0634b115a7218b0615f4cb1df33ac3eca23943d4"
|
||||
checksum = "4250ce6452e92010fdf7268ccc5d14faa80bb12fc741938534c58f16804e03c7"
|
||||
dependencies = [
|
||||
"hybrid-array",
|
||||
]
|
||||
@@ -4590,9 +4567,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "iri-string"
|
||||
version = "0.7.9"
|
||||
version = "0.7.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397"
|
||||
checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"serde",
|
||||
@@ -4641,9 +4618,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.15"
|
||||
version = "1.0.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
|
||||
checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
|
||||
|
||||
[[package]]
|
||||
name = "jemalloc_pprof"
|
||||
@@ -4831,13 +4808,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libredox"
|
||||
version = "0.1.11"
|
||||
version = "0.1.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df15f6eac291ed1cf25865b1ee60399f57e7c227e7f51bdbd4c5270396a9ed50"
|
||||
checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"libc",
|
||||
"redox_syscall 0.6.0",
|
||||
"redox_syscall 0.7.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4860,9 +4837,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libz-rs-sys"
|
||||
version = "0.5.4"
|
||||
version = "0.5.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "15413ef615ad868d4d65dce091cb233b229419c7c0c4bcaa746c0901c49ff39c"
|
||||
checksum = "c10501e7805cee23da17c7790e59df2870c0d4043ec6d03f67d31e2b53e77415"
|
||||
dependencies = [
|
||||
"zlib-rs",
|
||||
]
|
||||
@@ -4972,9 +4949,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lzma-rust2"
|
||||
version = "0.13.0"
|
||||
version = "0.15.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c60a23ffb90d527e23192f1246b14746e2f7f071cb84476dd879071696c18a4a"
|
||||
checksum = "48172246aa7c3ea28e423295dd1ca2589a24617cc4e588bb8cfe177cb2c54d95"
|
||||
dependencies = [
|
||||
"crc",
|
||||
"sha2 0.10.9",
|
||||
@@ -5021,9 +4998,9 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3"
|
||||
|
||||
[[package]]
|
||||
name = "matchit"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ea5f97102eb9e54ab99fb70bb175589073f554bdadfb74d9bd656482ea73e2a"
|
||||
checksum = "b3eede3bdf92f3b4f9dc04072a9ce5ab557d5ec9038773bf9ffcd5588b3cc05b"
|
||||
|
||||
[[package]]
|
||||
name = "md-5"
|
||||
@@ -5134,9 +5111,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "moka"
|
||||
version = "0.12.11"
|
||||
version = "0.12.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077"
|
||||
checksum = "a3dec6bd31b08944e08b58fd99373893a6c17054d6f3ea5006cc894f4f4eee2a"
|
||||
dependencies = [
|
||||
"async-lock",
|
||||
"crossbeam-channel",
|
||||
@@ -5147,7 +5124,6 @@ dependencies = [
|
||||
"futures-util",
|
||||
"parking_lot",
|
||||
"portable-atomic",
|
||||
"rustc_version",
|
||||
"smallvec",
|
||||
"tagptr",
|
||||
"uuid",
|
||||
@@ -5281,9 +5257,9 @@ checksum = "5e0826a989adedc2a244799e823aece04662b66609d96af8dff7ac6df9a8925d"
|
||||
|
||||
[[package]]
|
||||
name = "ntapi"
|
||||
version = "0.4.1"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4"
|
||||
checksum = "c70f219e21142367c70c0b30c6a9e3a14d55b4d12a204d897fbec83a0363f081"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
@@ -5532,9 +5508,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
|
||||
|
||||
[[package]]
|
||||
name = "openssl-probe"
|
||||
version = "0.1.6"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
|
||||
checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391"
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry"
|
||||
@@ -5776,11 +5752,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "password-hash"
|
||||
version = "0.6.0-rc.6"
|
||||
version = "0.6.0-rc.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "383d290055c99f2dd7dece082088d89494dff6d79277fbac4a7da21c1bf2ab6b"
|
||||
checksum = "c351143b5ab27b1f1d24712f21ea4d0458fe74f60dd5839297dabcc2ecd24d58"
|
||||
dependencies = [
|
||||
"getrandom 0.3.4",
|
||||
"getrandom 0.4.0-rc.0",
|
||||
"phc",
|
||||
]
|
||||
|
||||
@@ -5898,12 +5874,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "phc"
|
||||
version = "0.6.0-rc.0"
|
||||
version = "0.6.0-rc.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c61f960577aaac5c259bc0866d685ba315c0ed30793c602d7287f54980913863"
|
||||
checksum = "71d390c5fe8d102c2c18ff39f1e72b9ad5996de282c2d831b0312f56910f5508"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
"getrandom 0.3.4",
|
||||
"getrandom 0.4.0-rc.0",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
@@ -6113,9 +6089,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic"
|
||||
version = "1.11.1"
|
||||
version = "1.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483"
|
||||
checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950"
|
||||
|
||||
[[package]]
|
||||
name = "potential_utf"
|
||||
@@ -6248,9 +6224,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.103"
|
||||
version = "1.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
|
||||
checksum = "9695f8df41bb4f3d222c95a67532365f569318332d03d5f3f67f37b20e6ebdf0"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -6650,9 +6626,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec96166dafa0886eb81fe1c0a388bece180fbef2135f97c1e2cf8302e74b43b5"
|
||||
checksum = "49f3fe0889e69e2ae9e41f4d6c4c0181701d00e4697b356fb1f74173a5e0ee27"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
]
|
||||
@@ -6743,9 +6719,9 @@ checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2"
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.12.26"
|
||||
version = "0.12.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b4c14b2d9afca6a60277086b0cc6a6ae0b568f6f7916c943a8cdc79f8be240f"
|
||||
checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"bytes",
|
||||
@@ -6845,7 +6821,7 @@ dependencies = [
|
||||
"pastey",
|
||||
"pin-project-lite",
|
||||
"rmcp-macros",
|
||||
"schemars 1.1.0",
|
||||
"schemars 1.2.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror 2.0.17",
|
||||
@@ -6869,22 +6845,19 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rmp"
|
||||
version = "0.8.14"
|
||||
version = "0.8.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4"
|
||||
checksum = "4ba8be72d372b2c9b35542551678538b562e7cf86c3315773cae48dfbfe7790c"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"num-traits",
|
||||
"paste",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rmp-serde"
|
||||
version = "1.3.0"
|
||||
version = "1.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "52e599a477cf9840e92f2cde9a7189e67b42c57532749bf90aea6ec10facd4db"
|
||||
checksum = "72f81bee8c8ef9b577d1681a70ebbc962c232461e397b22c208c43c04b67a155"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"rmp",
|
||||
"serde",
|
||||
]
|
||||
@@ -7043,7 +7016,6 @@ dependencies = [
|
||||
"atoi",
|
||||
"atomic_enum",
|
||||
"axum",
|
||||
"axum-extra",
|
||||
"axum-server",
|
||||
"base64",
|
||||
"base64-simd",
|
||||
@@ -7058,11 +7030,12 @@ dependencies = [
|
||||
"hex-simd",
|
||||
"http 1.4.0",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
"hyper 1.8.1",
|
||||
"hyper-util",
|
||||
"jemalloc_pprof",
|
||||
"libsystemd",
|
||||
"matchit 0.9.0",
|
||||
"matchit 0.9.1",
|
||||
"md5",
|
||||
"metrics",
|
||||
"mimalloc",
|
||||
@@ -7078,6 +7051,7 @@ dependencies = [
|
||||
"rustfs-audit",
|
||||
"rustfs-common",
|
||||
"rustfs-config",
|
||||
"rustfs-credentials",
|
||||
"rustfs-ecstore",
|
||||
"rustfs-filemeta",
|
||||
"rustfs-iam",
|
||||
@@ -7095,10 +7069,12 @@ dependencies = [
|
||||
"rustfs-utils",
|
||||
"rustfs-zip",
|
||||
"rustls 0.23.35",
|
||||
"rustls-pemfile",
|
||||
"s3s",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"serial_test",
|
||||
"shadow-rs",
|
||||
"socket2 0.6.1",
|
||||
"subtle",
|
||||
@@ -7226,6 +7202,17 @@ dependencies = [
|
||||
"const-str",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-credentials"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"base64-simd",
|
||||
"rand 0.10.0-rc.5",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-crypto"
|
||||
version = "0.0.5"
|
||||
@@ -7292,6 +7279,7 @@ dependencies = [
|
||||
"rustfs-checksums",
|
||||
"rustfs-common",
|
||||
"rustfs-config",
|
||||
"rustfs-credentials",
|
||||
"rustfs-filemeta",
|
||||
"rustfs-lock",
|
||||
"rustfs-madmin",
|
||||
@@ -7319,6 +7307,7 @@ dependencies = [
|
||||
"tonic",
|
||||
"tower",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"url",
|
||||
"urlencoding",
|
||||
"uuid",
|
||||
@@ -7333,7 +7322,6 @@ dependencies = [
|
||||
"bytes",
|
||||
"crc-fast",
|
||||
"criterion",
|
||||
"lazy_static",
|
||||
"regex",
|
||||
"rmp",
|
||||
"rmp-serde",
|
||||
@@ -7359,6 +7347,7 @@ dependencies = [
|
||||
"jsonwebtoken",
|
||||
"pollster",
|
||||
"rand 0.10.0-rc.5",
|
||||
"rustfs-credentials",
|
||||
"rustfs-crypto",
|
||||
"rustfs-ecstore",
|
||||
"rustfs-madmin",
|
||||
@@ -7442,7 +7431,7 @@ dependencies = [
|
||||
"clap",
|
||||
"mime_guess",
|
||||
"rmcp",
|
||||
"schemars 1.1.0",
|
||||
"schemars 1.2.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
@@ -7454,6 +7443,7 @@ dependencies = [
|
||||
name = "rustfs-notify"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
"axum",
|
||||
"chrono",
|
||||
@@ -7519,10 +7509,10 @@ dependencies = [
|
||||
"jsonwebtoken",
|
||||
"moka",
|
||||
"pollster",
|
||||
"rand 0.10.0-rc.5",
|
||||
"regex",
|
||||
"reqwest",
|
||||
"rustfs-config",
|
||||
"rustfs-credentials",
|
||||
"rustfs-crypto",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -7542,6 +7532,7 @@ dependencies = [
|
||||
"flatbuffers",
|
||||
"prost 0.14.1",
|
||||
"rustfs-common",
|
||||
"rustfs-credentials",
|
||||
"tonic",
|
||||
"tonic-prost",
|
||||
"tonic-prost-build",
|
||||
@@ -7564,6 +7555,7 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
"rand 0.10.0-rc.5",
|
||||
"reqwest",
|
||||
"rustfs-config",
|
||||
"rustfs-utils",
|
||||
"s3s",
|
||||
"serde",
|
||||
@@ -7768,9 +7760,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "1.1.2"
|
||||
version = "1.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e"
|
||||
checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"errno",
|
||||
@@ -7809,9 +7801,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustls-native-certs"
|
||||
version = "0.8.2"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923"
|
||||
checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63"
|
||||
dependencies = [
|
||||
"openssl-probe",
|
||||
"rustls-pki-types",
|
||||
@@ -7879,14 +7871,14 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.20"
|
||||
version = "1.0.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
|
||||
checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984"
|
||||
|
||||
[[package]]
|
||||
name = "s3s"
|
||||
version = "0.12.0-rc.6"
|
||||
source = "git+https://github.com/s3s-project/s3s.git?branch=main#df2434d7ad2f0b774e68f25cae90c053dcb84f24"
|
||||
version = "0.13.0-alpha"
|
||||
source = "git+https://github.com/s3s-project/s3s.git?branch=main#9e41304ed549b89cfb03ede98e9c0d2ac7522051"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"async-trait",
|
||||
@@ -7973,9 +7965,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "schemars"
|
||||
version = "1.1.0"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289"
|
||||
checksum = "54e910108742c57a770f492731f99be216a52fadd361b06c8fb59d74ccc267d2"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"dyn-clone",
|
||||
@@ -7987,9 +7979,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "schemars_derive"
|
||||
version = "1.1.0"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "301858a4023d78debd2353c7426dc486001bddc91ae31a76fb1f55132f7e2633"
|
||||
checksum = "4908ad288c5035a8eb12cfdf0d49270def0a268ee162b75eeee0f85d155a7c45"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -8096,28 +8088,6 @@ dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde-untagged"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f9faf48a4a2d2693be24c6289dbe26552776eb7737074e6722891fadbe6c5058"
|
||||
dependencies = [
|
||||
"erased-serde",
|
||||
"serde",
|
||||
"serde_core",
|
||||
"typeid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde-value"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c"
|
||||
dependencies = [
|
||||
"ordered-float",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_core"
|
||||
version = "1.0.228"
|
||||
@@ -8160,15 +8130,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.145"
|
||||
version = "1.0.148"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
|
||||
checksum = "3084b546a1dd6289475996f182a22aba973866ea8e8b02c51d9f46b1336a22da"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
"serde",
|
||||
"serde_core",
|
||||
"zmij",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8215,7 +8185,7 @@ dependencies = [
|
||||
"indexmap 1.9.3",
|
||||
"indexmap 2.12.1",
|
||||
"schemars 0.9.0",
|
||||
"schemars 1.1.0",
|
||||
"schemars 1.2.0",
|
||||
"serde_core",
|
||||
"serde_json",
|
||||
"serde_with_macros",
|
||||
@@ -8315,9 +8285,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "shadow-rs"
|
||||
version = "1.4.0"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72d18183cef626bce22836103349c7050d73db799be0171386b80947d157ae32"
|
||||
checksum = "ff351910f271e7065781b6b4f0f43cb515d474d812f31176a0246d9058e47d5d"
|
||||
dependencies = [
|
||||
"cargo_metadata",
|
||||
"const_format",
|
||||
@@ -8353,10 +8323,11 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-registry"
|
||||
version = "1.4.7"
|
||||
version = "1.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad"
|
||||
checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b"
|
||||
dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
]
|
||||
|
||||
@@ -8888,14 +8859,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.23.0"
|
||||
version = "3.24.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16"
|
||||
checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c"
|
||||
dependencies = [
|
||||
"fastrand",
|
||||
"getrandom 0.3.4",
|
||||
"once_cell",
|
||||
"rustix 1.1.2",
|
||||
"rustix 1.1.3",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
@@ -10285,7 +10256,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rustix 1.1.2",
|
||||
"rustix 1.1.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -10390,9 +10361,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zeroize_derive"
|
||||
version = "1.4.2"
|
||||
version = "1.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
|
||||
checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -10434,9 +10405,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zip"
|
||||
version = "6.0.0"
|
||||
version = "7.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eb2a05c7c36fde6c09b08576c9f7fb4cda705990f73b58fe011abf7dfb24168b"
|
||||
checksum = "bdd8a47718a4ee5fe78e07667cd36f3de80e7c2bfe727c7074245ffc7303c037"
|
||||
dependencies = [
|
||||
"aes 0.8.4",
|
||||
"arbitrary",
|
||||
@@ -10445,6 +10416,7 @@ dependencies = [
|
||||
"crc32fast",
|
||||
"deflate64",
|
||||
"flate2",
|
||||
"generic-array",
|
||||
"getrandom 0.3.4",
|
||||
"hmac 0.12.1",
|
||||
"indexmap 2.12.1",
|
||||
@@ -10461,9 +10433,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zlib-rs"
|
||||
version = "0.5.4"
|
||||
version = "0.5.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51f936044d677be1a1168fae1d03b583a285a5dd9d8cbf7b24c23aa1fc775235"
|
||||
checksum = "40990edd51aae2c2b6907af74ffb635029d5788228222c4bb811e9351c0caad3"
|
||||
|
||||
[[package]]
|
||||
name = "zmij"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e9747e91771f56fd7893e1164abd78febd14a670ceec257caad15e051de35f06"
|
||||
|
||||
[[package]]
|
||||
name = "zopfli"
|
||||
|
||||
36
Cargo.toml
36
Cargo.toml
@@ -19,6 +19,7 @@ members = [
|
||||
"crates/audit", # Audit target management system with multi-target fan-out
|
||||
"crates/common", # Shared utilities and data structures
|
||||
"crates/config", # Configuration management
|
||||
"crates/credentials", # Credential management system
|
||||
"crates/crypto", # Cryptography and security features
|
||||
"crates/ecstore", # Erasure coding storage implementation
|
||||
"crates/e2e_test", # End-to-end test suite
|
||||
@@ -71,6 +72,7 @@ rustfs-audit = { path = "crates/audit", version = "0.0.5" }
|
||||
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.5" }
|
||||
rustfs-credentials = { path = "crates/credentials", version = "0.0.5" }
|
||||
rustfs-crypto = { path = "crates/crypto", version = "0.0.5" }
|
||||
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
@@ -97,8 +99,7 @@ async-channel = "2.5.0"
|
||||
async-compression = { version = "0.4.19" }
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
axum = "0.8.7"
|
||||
axum-extra = "0.12.2"
|
||||
axum = "0.8.8"
|
||||
axum-server = { version = "0.8.0", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
@@ -109,7 +110,8 @@ hyper-rustls = { version = "0.27.7", default-features = false, features = ["nati
|
||||
hyper-util = { version = "0.1.19", features = ["tokio", "server-auto", "server-graceful"] }
|
||||
http = "1.4.0"
|
||||
http-body = "1.0.1"
|
||||
reqwest = { version = "0.12.26", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
http-body-util = "0.1.3"
|
||||
reqwest = { version = "0.12.28", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
socket2 = "0.6.1"
|
||||
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||
@@ -126,17 +128,17 @@ tower-http = { version = "0.6.8", features = ["cors"] }
|
||||
bytes = { version = "1.11.0", features = ["serde"] }
|
||||
bytesize = "2.3.1"
|
||||
byteorder = "1.5.0"
|
||||
flatbuffers = "25.9.23"
|
||||
flatbuffers = "25.12.19"
|
||||
form_urlencoded = "1.2.2"
|
||||
prost = "0.14.1"
|
||||
quick-xml = "0.38.4"
|
||||
rmcp = { version = "0.12.0" }
|
||||
rmp = { version = "0.8.14" }
|
||||
rmp-serde = { version = "1.3.0" }
|
||||
rmp = { version = "0.8.15" }
|
||||
rmp-serde = { version = "1.3.1" }
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.145", features = ["raw_value"] }
|
||||
serde_json = { version = "1.0.148", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
schemars = "1.1.0"
|
||||
schemars = "1.2.0"
|
||||
|
||||
# Cryptography and Security
|
||||
aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
|
||||
@@ -163,13 +165,13 @@ time = { version = "0.3.44", features = ["std", "parsing", "formatting", "macros
|
||||
|
||||
# Utilities and Tools
|
||||
anyhow = "1.0.100"
|
||||
arc-swap = "1.7.1"
|
||||
arc-swap = "1.8.0"
|
||||
astral-tokio-tar = "0.5.6"
|
||||
atoi = "2.0.0"
|
||||
atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.12" }
|
||||
aws-credential-types = { version = "1.2.11" }
|
||||
aws-sdk-s3 = { version = "1.117.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
aws-sdk-s3 = { version = "1.119.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
aws-smithy-types = { version = "1.3.5" }
|
||||
base64 = "0.22.1"
|
||||
base64-simd = "0.8.0"
|
||||
@@ -199,11 +201,11 @@ libc = "0.2.178"
|
||||
libsystemd = "0.7.2"
|
||||
local-ip-address = "0.6.8"
|
||||
lz4 = "1.28.1"
|
||||
matchit = "0.9.0"
|
||||
matchit = "0.9.1"
|
||||
md-5 = "0.11.0-rc.3"
|
||||
md5 = "0.8.0"
|
||||
mime_guess = "2.0.5"
|
||||
moka = { version = "0.12.11", features = ["future"] }
|
||||
moka = { version = "0.12.12", features = ["future"] }
|
||||
netif = "0.1.6"
|
||||
nix = { version = "0.30.1", features = ["fs"] }
|
||||
nu-ansi-term = "0.50.3"
|
||||
@@ -222,9 +224,9 @@ regex = { version = "1.12.2" }
|
||||
rumqttc = { version = "0.25.1" }
|
||||
rust-embed = { version = "8.9.0" }
|
||||
rustc-hash = { version = "2.1.1" }
|
||||
s3s = { version = "0.12.0-rc.6", features = ["minio"], git = "https://github.com/s3s-project/s3s.git", branch = "main" }
|
||||
s3s = { version = "0.13.0-alpha", features = ["minio"], git = "https://github.com/s3s-project/s3s.git", branch = "main" }
|
||||
serial_test = "3.2.0"
|
||||
shadow-rs = { version = "1.4.0", default-features = false }
|
||||
shadow-rs = { version = "1.5.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
smartstring = "1.0.1"
|
||||
@@ -235,7 +237,7 @@ strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysctl = "0.7.1"
|
||||
sysinfo = "0.37.2"
|
||||
temp-env = "0.3.6"
|
||||
tempfile = "3.23.0"
|
||||
tempfile = "3.24.0"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.17"
|
||||
tracing = { version = "0.1.44" }
|
||||
@@ -252,7 +254,7 @@ walkdir = "2.5.0"
|
||||
wildmatch = { version = "2.6.1", features = ["serde"] }
|
||||
winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "6.0.0"
|
||||
zip = "7.0.0"
|
||||
zstd = "0.13.3"
|
||||
|
||||
# Observability and Metrics
|
||||
@@ -275,8 +277,6 @@ jemalloc_pprof = { version = "0.8.1", features = ["symbolize", "flamegraph"] }
|
||||
# Used to generate CPU performance analysis data and flame diagrams
|
||||
pprof = { version = "0.15.0", features = ["flamegraph", "protobuf-codec"] }
|
||||
|
||||
|
||||
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = ["rustfs", "rustfs-mcp"]
|
||||
|
||||
|
||||
@@ -148,8 +148,8 @@ ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
RUSTFS_GID="1000"
|
||||
RUSTFS_UID="10001" \
|
||||
RUSTFS_GID="10001"
|
||||
|
||||
# Note: We don't COPY source here because we expect it to be mounted at /app
|
||||
# We rely on cargo run to build and run
|
||||
@@ -187,8 +187,8 @@ RUN set -eux; \
|
||||
|
||||
# Create a conventional runtime user/group (final switch happens in entrypoint via chroot --userspec)
|
||||
RUN set -eux; \
|
||||
groupadd -g 1000 rustfs; \
|
||||
useradd -u 1000 -g rustfs -M -s /usr/sbin/nologin rustfs
|
||||
groupadd -g 10001 rustfs; \
|
||||
useradd -u 10001 -g rustfs -M -s /usr/sbin/nologin rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
@@ -212,8 +212,8 @@ ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
RUSTFS_GID="1000"
|
||||
RUSTFS_UID="10001" \
|
||||
RUSTFS_GID="10001"
|
||||
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data"]
|
||||
|
||||
411
Makefile
411
Makefile
@@ -2,375 +2,80 @@
|
||||
# Remote development requires VSCode with Dev Containers, Remote SSH, Remote Explorer
|
||||
# https://code.visualstudio.com/docs/remote/containers
|
||||
###########
|
||||
|
||||
.PHONY: SHELL
|
||||
|
||||
# Makefile global config
|
||||
# Use config.mak to override any of the following variables.
|
||||
# Do not make changes here.
|
||||
|
||||
.DEFAULT_GOAL := help
|
||||
.EXPORT_ALL_VARIABLES:
|
||||
.ONESHELL:
|
||||
.SILENT:
|
||||
|
||||
NUM_CORES := $(shell nproc 2>/dev/null || sysctl -n hw.ncpu)
|
||||
|
||||
MAKEFLAGS += -j$(NUM_CORES) -l$(NUM_CORES)
|
||||
MAKEFLAGS += --silent
|
||||
|
||||
SHELL:= /bin/bash
|
||||
.SHELLFLAGS = -eu -o pipefail -c
|
||||
|
||||
DOCKER_CLI ?= docker
|
||||
IMAGE_NAME ?= rustfs:v1.0.0
|
||||
CONTAINER_NAME ?= rustfs-dev
|
||||
# Docker build configurations
|
||||
DOCKERFILE_PRODUCTION = Dockerfile
|
||||
DOCKERFILE_SOURCE = Dockerfile.source
|
||||
|
||||
# Code quality and formatting targets
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check:
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
.PHONY: clippy
|
||||
clippy:
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: check
|
||||
check:
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
@if command -v cargo-nextest >/dev/null 2>&1; then \
|
||||
cargo nextest run --all --exclude e2e_test; \
|
||||
else \
|
||||
echo "ℹ️ cargo-nextest not found; falling back to 'cargo test'"; \
|
||||
cargo test --workspace --exclude e2e_test -- --nocapture; \
|
||||
fi
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy check test
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
|
||||
.PHONY: setup-hooks
|
||||
setup-hooks:
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server:
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
|
||||
.PHONY: probe-e2e
|
||||
probe-e2e:
|
||||
sh $(shell pwd)/scripts/probe.sh
|
||||
|
||||
# Native build using build-rustfs.sh script
|
||||
.PHONY: build
|
||||
build:
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
.PHONY: build-dev
|
||||
build-dev:
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
# Docker-based build (alternative approach)
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
BUILD_OS ?= rockylinux9.3
|
||||
.PHONY: build-docker
|
||||
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build-docker:
|
||||
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
|
||||
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
# Makefile colors config
|
||||
bold := $(shell tput bold)
|
||||
normal := $(shell tput sgr0)
|
||||
errorTitle := $(shell tput setab 1 && tput bold && echo '\n')
|
||||
recommendation := $(shell tput setab 4)
|
||||
underline := $(shell tput smul)
|
||||
reset := $(shell tput -Txterm sgr0)
|
||||
black := $(shell tput setaf 0)
|
||||
red := $(shell tput setaf 1)
|
||||
green := $(shell tput setaf 2)
|
||||
yellow := $(shell tput setaf 3)
|
||||
blue := $(shell tput setaf 4)
|
||||
magenta := $(shell tput setaf 5)
|
||||
cyan := $(shell tput setaf 6)
|
||||
white := $(shell tput setaf 7)
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
define HEADER
|
||||
How to use me:
|
||||
# To get help for each target
|
||||
${bold}make help${reset}
|
||||
|
||||
.PHONY: build-musl-arm64
|
||||
build-musl-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
# To run and execute a target
|
||||
${bold}make ${cyan}<target>${reset}
|
||||
|
||||
.PHONY: build-gnu-arm64
|
||||
build-gnu-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
💡 For more help use 'make help', 'make help-build' or 'make help-docker'
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
🦀 RustFS Makefile Help:
|
||||
|
||||
# ========================================================================================
|
||||
# Docker Multi-Architecture Builds (Primary Methods)
|
||||
# ========================================================================================
|
||||
📋 Main Command Categories:
|
||||
make help-build # Show build-related help
|
||||
make help-docker # Show Docker-related help
|
||||
|
||||
# Production builds using docker-buildx.sh (for CI/CD and production)
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx:
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
🔧 Code Quality:
|
||||
make fmt # Format code
|
||||
make clippy # Run clippy checks
|
||||
make test # Run tests
|
||||
make pre-commit # Run all pre-commit checks
|
||||
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
|
||||
# Development/Source builds using direct buildx commands
|
||||
.PHONY: docker-dev
|
||||
docker-dev:
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local:
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push:
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 Pushing to registry: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
🚀 Quick Start:
|
||||
make build # Build RustFS binary
|
||||
make docker-dev-local # Build development Docker image (local)
|
||||
make dev-env-start # Start development environment
|
||||
|
||||
|
||||
endef
|
||||
export HEADER
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local:
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
-include $(addsuffix /*.mak, $(shell find .config/make -type d))
|
||||
|
||||
# ========================================================================================
|
||||
# Single Architecture Docker Builds (Traditional)
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production:
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source:
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start:
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop:
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
|
||||
|
||||
# ========================================================================================
|
||||
# Build Utilities
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch:
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
|
||||
docker buildx imagetools inspect $(IMAGE)
|
||||
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
@echo "🔨 Building x86_64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
@echo "🔨 Building aarch64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
@echo "🔨 Building aarch64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
@echo "✅ All architectures built successfully!"
|
||||
|
||||
# ========================================================================================
|
||||
# Help and Documentation
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: help-build
|
||||
help-build:
|
||||
@echo "🔨 RustFS Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 Local Build (Recommended):"
|
||||
@echo " make build # Build RustFS binary (includes console by default)"
|
||||
@echo " make build-dev # Development mode build"
|
||||
@echo " make build-musl # Build x86_64 musl version"
|
||||
@echo " make build-gnu # Build x86_64 GNU version"
|
||||
@echo " make build-musl-arm64 # Build aarch64 musl version"
|
||||
@echo " make build-gnu-arm64 # Build aarch64 GNU version"
|
||||
@echo ""
|
||||
@echo "🐳 Docker Build:"
|
||||
@echo " make build-docker # Build using Docker container"
|
||||
@echo " make build-docker BUILD_OS=ubuntu22.04 # Specify build system"
|
||||
@echo ""
|
||||
@echo "🏗️ Cross-architecture Build:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo ""
|
||||
@echo "🔧 Direct usage of build-rustfs.sh script:"
|
||||
@echo " ./build-rustfs.sh --help # View script help"
|
||||
@echo " ./build-rustfs.sh --no-console # Build without console resources"
|
||||
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
|
||||
@echo " ./build-rustfs.sh --dev # Development mode build"
|
||||
@echo " ./build-rustfs.sh --sign # Sign binary files"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
|
||||
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker:
|
||||
@echo "🐳 Docker Multi-architecture Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 Production Image Build (Recommended to use docker-buildx.sh):"
|
||||
@echo " make docker-buildx # Build production multi-arch image (no push)"
|
||||
@echo " make docker-buildx-push # Build and push production multi-arch image"
|
||||
@echo " make docker-buildx-version VERSION=v1.0.0 # Build specific version"
|
||||
@echo " make docker-buildx-push-version VERSION=v1.0.0 # Build and push specific version"
|
||||
@echo ""
|
||||
@echo "🔧 Development/Source Image Build (Local development testing):"
|
||||
@echo " make docker-dev # Build dev multi-arch image (cannot load locally)"
|
||||
@echo " make docker-dev-local # Build dev single-arch image (local load)"
|
||||
@echo " make docker-dev-push REGISTRY=xxx # Build and push dev image"
|
||||
@echo ""
|
||||
@echo "🏗️ Local Production Image Build (Alternative):"
|
||||
@echo " make docker-buildx-production-local # Build production single-arch image locally"
|
||||
@echo ""
|
||||
@echo "📦 Single-architecture Build (Traditional way):"
|
||||
@echo " make docker-build-production # Build single-arch production image"
|
||||
@echo " make docker-build-source # Build single-arch source image"
|
||||
@echo ""
|
||||
@echo "🚀 Development Environment Management:"
|
||||
@echo " make dev-env-start # Start development container environment"
|
||||
@echo " make dev-env-stop # Stop development container environment"
|
||||
@echo " make dev-env-restart # Restart development container environment"
|
||||
@echo ""
|
||||
@echo "🔧 Auxiliary Tools:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # Check image architecture support"
|
||||
@echo ""
|
||||
@echo "📋 Environment Variables:"
|
||||
@echo " REGISTRY Image registry address (required for push)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub username"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub access token"
|
||||
@echo " GITHUB_TOKEN GitHub access token"
|
||||
@echo ""
|
||||
@echo "💡 Suggestions:"
|
||||
@echo " - Production use: Use docker-buildx* commands (based on precompiled binaries)"
|
||||
@echo " - Local development: Use docker-dev* commands (build from source)"
|
||||
@echo " - Development environment: Use dev-env-* commands to manage dev containers"
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "🦀 RustFS Makefile Help:"
|
||||
@echo ""
|
||||
@echo "📋 Main Command Categories:"
|
||||
@echo " make help-build # Show build-related help"
|
||||
@echo " make help-docker # Show Docker-related help"
|
||||
@echo ""
|
||||
@echo "🔧 Code Quality:"
|
||||
@echo " make fmt # Format code"
|
||||
@echo " make clippy # Run clippy checks"
|
||||
@echo " make test # Run tests"
|
||||
@echo " make pre-commit # Run all pre-commit checks"
|
||||
@echo ""
|
||||
@echo "🚀 Quick Start:"
|
||||
@echo " make build # Build RustFS binary"
|
||||
@echo " make docker-dev-local # Build development Docker image (local)"
|
||||
@echo " make dev-env-start # Start development environment"
|
||||
@echo ""
|
||||
@echo "💡 For more help use 'make help-build' or 'make help-docker'"
|
||||
|
||||
18
README.md
18
README.md
@@ -10,6 +10,11 @@
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14181" alt="rustfs%2Frustfs | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</p>
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/installation/">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/">Docs</a>
|
||||
@@ -45,10 +50,10 @@ Unlike other storage systems, RustFS is released under the permissible Apache 2.
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **S3 Core Features** | ✅ Available | **Bitrot Protection** | ✅ Available |
|
||||
| **Upload / Download** | ✅ Available | **Single Node Mode** | ✅ Available |
|
||||
| **Versioning** | ✅ Available | **Bucket Replication** | ⚠️ Partial Support |
|
||||
| **Versioning** | ✅ Available | **Bucket Replication** | ✅ Available |
|
||||
| **Logging** | ✅ Available | **Lifecycle Management** | 🚧 Under Testing |
|
||||
| **Event Notifications** | ✅ Available | **Distributed Mode** | 🚧 Under Testing |
|
||||
| **K8s Helm Charts** | ✅ Available | **OPA (Open Policy Agent)** | 🚧 Under Testing |
|
||||
| **K8s Helm Charts** | ✅ Available | **RustFS KMS** | 🚧 Under Testing |
|
||||
|
||||
|
||||
|
||||
@@ -103,7 +108,7 @@ The RustFS container runs as a non-root user `rustfs` (UID `10001`). If you run
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
|
||||
|
||||
# Using specific version
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.68
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0-alpha.76
|
||||
```
|
||||
|
||||
You can also use Docker Compose. Using the `docker-compose.yml` file in the root directory:
|
||||
@@ -174,7 +179,7 @@ nix run
|
||||
|
||||
### Accessing RustFS
|
||||
|
||||
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console.
|
||||
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9001` to access the RustFS console.
|
||||
* Default credentials: `rustfsadmin` / `rustfsadmin`
|
||||
6. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
7. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs/clients to interact with your RustFS instance.
|
||||
@@ -215,11 +220,6 @@ RustFS is a community-driven project, and we appreciate all contributions. Check
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending top charts.
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star History
|
||||
|
||||
|
||||
12
README_ZH.md
12
README_ZH.md
@@ -10,6 +10,10 @@
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14181" alt="rustfs%2Frustfs | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/installation/">快速开始</a>
|
||||
· <a href="https://docs.rustfs.com/">文档</a>
|
||||
@@ -17,6 +21,8 @@
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">社区讨论</a>
|
||||
</p>
|
||||
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a> | 简体中文 |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
@@ -46,7 +52,7 @@ RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **S3 核心功能** | ✅ 可用 | **Bitrot (防数据腐烂)** | ✅ 可用 |
|
||||
| **上传 / 下载** | ✅ 可用 | **单机模式** | ✅ 可用 |
|
||||
| **版本控制** | ✅ 可用 | **存储桶复制** | ⚠️ 部分可用 |
|
||||
| **版本控制** | ✅ 可用 | **存储桶复制** | ✅ 可用 |
|
||||
| **日志功能** | ✅ 可用 | **生命周期管理** | 🚧 测试中 |
|
||||
| **事件通知** | ✅ 可用 | **分布式模式** | 🚧 测试中 |
|
||||
| **K8s Helm Chart** | ✅ 可用 | **OPA (策略引擎)** | 🚧 测试中 |
|
||||
@@ -200,11 +206,7 @@ RustFS 是一个社区驱动的项目,我们感谢所有的贡献。请查看
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS 深受全球开源爱好者和企业用户的喜爱,经常荣登 GitHub Trending 榜单。
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star 历史
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ clen = "clen"
|
||||
datas = "datas"
|
||||
bre = "bre"
|
||||
abd = "abd"
|
||||
mak = "mak"
|
||||
|
||||
[files]
|
||||
extend-exclude = []
|
||||
@@ -468,14 +468,17 @@ impl HealManager {
|
||||
let active_heals = self.active_heals.clone();
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
let storage = self.storage.clone();
|
||||
|
||||
info!(
|
||||
"start_auto_disk_scanner: Starting auto disk scanner with interval: {:?}",
|
||||
config.read().await.heal_interval
|
||||
);
|
||||
let mut duration = {
|
||||
let config = config.read().await;
|
||||
config.heal_interval
|
||||
};
|
||||
if duration < Duration::from_secs(1) {
|
||||
duration = Duration::from_secs(1);
|
||||
}
|
||||
info!("start_auto_disk_scanner: Starting auto disk scanner with interval: {:?}", duration);
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut interval = interval(config.read().await.heal_interval);
|
||||
let mut interval = interval(duration);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
|
||||
@@ -30,7 +30,7 @@ use rustfs_ecstore::{
|
||||
bucket::versioning::VersioningApi,
|
||||
bucket::versioning_sys::BucketVersioningSys,
|
||||
data_usage::{aggregate_local_snapshots, compute_bucket_usage, store_data_usage_in_backend},
|
||||
disk::{Disk, DiskAPI, DiskStore, RUSTFS_META_BUCKET, WalkDirOptions},
|
||||
disk::{DiskAPI, DiskStore, RUSTFS_META_BUCKET, WalkDirOptions},
|
||||
set_disk::SetDisks,
|
||||
store_api::ObjectInfo,
|
||||
};
|
||||
@@ -1977,7 +1977,7 @@ impl Scanner {
|
||||
} else {
|
||||
// Apply lifecycle actions
|
||||
if let Some(lifecycle_config) = &lifecycle_config {
|
||||
if let Disk::Local(_local_disk) = &**disk {
|
||||
if disk.is_local() {
|
||||
let vcfg = BucketVersioningSys::get(bucket).await.ok();
|
||||
|
||||
let mut scanner_item = ScannerItem {
|
||||
|
||||
@@ -21,10 +21,11 @@ use rustfs_ecstore::bucket::metadata_sys::{BucketMetadataSys, GLOBAL_BucketMetad
|
||||
use rustfs_ecstore::endpoints::EndpointServerPools;
|
||||
use rustfs_ecstore::store::ECStore;
|
||||
use rustfs_ecstore::store_api::{ObjectIO, PutObjReader, StorageAPI};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, Once};
|
||||
use tempfile::TempDir;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::Level;
|
||||
|
||||
/// Build a minimal single-node ECStore over a temp directory and populate objects.
|
||||
async fn create_store_with_objects(count: usize) -> (TempDir, std::sync::Arc<ECStore>) {
|
||||
@@ -74,8 +75,22 @@ async fn create_store_with_objects(count: usize) -> (TempDir, std::sync::Arc<ECS
|
||||
(temp_dir, store)
|
||||
}
|
||||
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
fn init_tracing(filter_level: Level) {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.with_max_level(filter_level)
|
||||
.with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339())
|
||||
.with_thread_names(true)
|
||||
.try_init();
|
||||
});
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fallback_builds_full_counts_over_100_objects() {
|
||||
init_tracing(Level::ERROR);
|
||||
let (_tmp, store) = create_store_with_objects(1000).await;
|
||||
let scanner = Scanner::new(None, None);
|
||||
|
||||
|
||||
@@ -38,9 +38,13 @@ use walkdir::WalkDir;
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
fn init_tracing() {
|
||||
pub fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt::try_init();
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339())
|
||||
.with_thread_names(true)
|
||||
.try_init();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -356,7 +360,7 @@ mod serial_tests {
|
||||
|
||||
// Create heal manager with faster interval
|
||||
let cfg = HealConfig {
|
||||
heal_interval: Duration::from_secs(2),
|
||||
heal_interval: Duration::from_secs(1),
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
|
||||
|
||||
@@ -60,8 +60,9 @@ impl TargetFactory for WebhookTargetFactory {
|
||||
let endpoint = config
|
||||
.lookup(WEBHOOK_ENDPOINT)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
|
||||
let endpoint_url = Url::parse(&endpoint)
|
||||
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{endpoint}')")))?;
|
||||
let parsed_endpoint = endpoint.trim();
|
||||
let endpoint_url = Url::parse(parsed_endpoint)
|
||||
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{parsed_endpoint}')")))?;
|
||||
|
||||
let args = WebhookArgs {
|
||||
enable: true, // If we are here, it's already enabled.
|
||||
|
||||
@@ -39,4 +39,4 @@ path-clean = { workspace = true }
|
||||
rmp-serde = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
s3s = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
@@ -24,14 +24,43 @@ pub static GLOBAL_RUSTFS_HOST: LazyLock<RwLock<String>> = LazyLock::new(|| RwLoc
|
||||
pub static GLOBAL_RUSTFS_PORT: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
|
||||
pub static GLOBAL_RUSTFS_ADDR: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_CONN_MAP: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
pub static GLOBAL_ROOT_CERT: LazyLock<RwLock<Option<Vec<u8>>>> = LazyLock::new(|| RwLock::new(None));
|
||||
pub static GLOBAL_MTLS_IDENTITY: LazyLock<RwLock<Option<MtlsIdentityPem>>> = LazyLock::new(|| RwLock::new(None));
|
||||
|
||||
/// Set the global RustFS address used for gRPC connections.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `addr` - A string slice representing the RustFS address (e.g., "https://node1:9000").
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_RUSTFS_ADDR.write().await = addr.to_string();
|
||||
}
|
||||
|
||||
/// Set the global root CA certificate for outbound gRPC clients.
|
||||
/// This certificate is used to validate server TLS certificates.
|
||||
/// When set to None, clients use the system default root CAs.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `cert` - A vector of bytes representing the PEM-encoded root CA certificate.
|
||||
pub async fn set_global_root_cert(cert: Vec<u8>) {
|
||||
*GLOBAL_ROOT_CERT.write().await = Some(cert);
|
||||
}
|
||||
|
||||
/// Set the global mTLS identity (cert+key PEM) for outbound gRPC clients.
|
||||
/// When set, clients will present this identity to servers requesting/requiring mTLS.
|
||||
/// When None, clients proceed with standard server-authenticated TLS.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `identity` - An optional MtlsIdentityPem struct containing the cert and key PEM.
|
||||
pub async fn set_global_mtls_identity(identity: Option<MtlsIdentityPem>) {
|
||||
*GLOBAL_MTLS_IDENTITY.write().await = identity;
|
||||
}
|
||||
|
||||
/// Evict a stale/dead connection from the global connection cache.
|
||||
/// This is critical for cluster recovery when a node dies unexpectedly (e.g., power-off).
|
||||
/// By removing the cached connection, subsequent requests will establish a fresh connection.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `addr` - The address of the connection to evict.
|
||||
pub async fn evict_connection(addr: &str) {
|
||||
let removed = GLOBAL_CONN_MAP.write().await.remove(addr);
|
||||
if removed.is_some() {
|
||||
@@ -40,6 +69,12 @@ pub async fn evict_connection(addr: &str) {
|
||||
}
|
||||
|
||||
/// Check if a connection exists in the cache for the given address.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `addr` - The address to check.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - True if a cached connection exists, false otherwise.
|
||||
pub async fn has_cached_connection(addr: &str) -> bool {
|
||||
GLOBAL_CONN_MAP.read().await.contains_key(addr)
|
||||
}
|
||||
@@ -53,3 +88,12 @@ pub async fn clear_all_connections() {
|
||||
tracing::warn!("Cleared {} cached connections from global map", count);
|
||||
}
|
||||
}
|
||||
/// Optional client identity (cert+key PEM) for outbound mTLS.
|
||||
///
|
||||
/// When present, gRPC clients will present this identity to servers requesting/requiring mTLS.
|
||||
/// When absent, clients proceed with standard server-authenticated TLS.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MtlsIdentityPem {
|
||||
pub cert_pem: Vec<u8>,
|
||||
pub key_pem: Vec<u8>,
|
||||
}
|
||||
|
||||
@@ -19,6 +19,10 @@ pub mod globals;
|
||||
pub mod heal_channel;
|
||||
pub mod last_minute;
|
||||
pub mod metrics;
|
||||
mod readiness;
|
||||
|
||||
pub use globals::*;
|
||||
pub use readiness::{GlobalReadiness, SystemStage};
|
||||
|
||||
// is ','
|
||||
pub static DEFAULT_DELIMITER: u8 = 44;
|
||||
|
||||
136
crates/common/src/readiness.rs
Normal file
136
crates/common/src/readiness.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicU8, Ordering};
|
||||
|
||||
/// Represents the various stages of system startup
|
||||
#[repr(u8)]
|
||||
pub enum SystemStage {
|
||||
Booting = 0,
|
||||
StorageReady = 1, // Disks online, Quorum met
|
||||
IamReady = 2, // Users and Policies loaded into cache
|
||||
FullReady = 3, // System ready to serve all traffic
|
||||
}
|
||||
|
||||
/// Global readiness tracker for the service
|
||||
/// This struct uses atomic operations to track the readiness status of various components
|
||||
/// of the service in a thread-safe manner.
|
||||
pub struct GlobalReadiness {
|
||||
status: AtomicU8,
|
||||
}
|
||||
|
||||
impl Default for GlobalReadiness {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl GlobalReadiness {
|
||||
/// Create a new GlobalReadiness instance with initial status as Starting
|
||||
/// # Returns
|
||||
/// A new instance of GlobalReadiness
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
status: AtomicU8::new(SystemStage::Booting as u8),
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the system to a new stage
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `step` - The SystemStage step to mark as ready
|
||||
pub fn mark_stage(&self, step: SystemStage) {
|
||||
self.status.fetch_max(step as u8, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
/// Check if the service is fully ready
|
||||
/// # Returns
|
||||
/// `true` if the service is fully ready, `false` otherwise
|
||||
pub fn is_ready(&self) -> bool {
|
||||
self.status.load(Ordering::SeqCst) == SystemStage::FullReady as u8
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
#[test]
|
||||
fn test_initial_state() {
|
||||
let readiness = GlobalReadiness::new();
|
||||
assert!(!readiness.is_ready());
|
||||
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::Booting as u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mark_stage_progression() {
|
||||
let readiness = GlobalReadiness::new();
|
||||
readiness.mark_stage(SystemStage::StorageReady);
|
||||
assert!(!readiness.is_ready());
|
||||
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::StorageReady as u8);
|
||||
|
||||
readiness.mark_stage(SystemStage::IamReady);
|
||||
assert!(!readiness.is_ready());
|
||||
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::IamReady as u8);
|
||||
|
||||
readiness.mark_stage(SystemStage::FullReady);
|
||||
assert!(readiness.is_ready());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_regression() {
|
||||
let readiness = GlobalReadiness::new();
|
||||
readiness.mark_stage(SystemStage::FullReady);
|
||||
readiness.mark_stage(SystemStage::IamReady); // Should not regress
|
||||
assert!(readiness.is_ready());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_marking() {
|
||||
let readiness = Arc::new(GlobalReadiness::new());
|
||||
let mut handles = vec![];
|
||||
|
||||
for _ in 0..10 {
|
||||
let r = Arc::clone(&readiness);
|
||||
handles.push(thread::spawn(move || {
|
||||
r.mark_stage(SystemStage::StorageReady);
|
||||
r.mark_stage(SystemStage::IamReady);
|
||||
r.mark_stage(SystemStage::FullReady);
|
||||
}));
|
||||
}
|
||||
|
||||
for h in handles {
|
||||
h.join().unwrap();
|
||||
}
|
||||
|
||||
assert!(readiness.is_ready());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_ready_only_at_full_ready() {
|
||||
let readiness = GlobalReadiness::new();
|
||||
assert!(!readiness.is_ready());
|
||||
|
||||
readiness.mark_stage(SystemStage::StorageReady);
|
||||
assert!(!readiness.is_ready());
|
||||
|
||||
readiness.mark_stage(SystemStage::IamReady);
|
||||
assert!(!readiness.is_ready());
|
||||
|
||||
readiness.mark_stage(SystemStage::FullReady);
|
||||
assert!(readiness.is_ready());
|
||||
}
|
||||
}
|
||||
@@ -49,21 +49,6 @@ pub const SERVICE_VERSION: &str = "1.0.0";
|
||||
/// Default value: production
|
||||
pub const ENVIRONMENT: &str = "production";
|
||||
|
||||
/// Default Access Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_ACCESS_KEY
|
||||
/// Command line argument: --access-key
|
||||
/// Example: RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
/// Example: --access-key rustfsadmin
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
/// Default Secret Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_SECRET_KEY
|
||||
/// Command line argument: --secret-key
|
||||
/// Example: RUSTFS_SECRET_KEY=rustfsadmin
|
||||
/// Example: --secret-key rustfsadmin
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Default console enable
|
||||
/// This is the default value for the console server.
|
||||
/// It is used to enable or disable the console server.
|
||||
@@ -89,6 +74,30 @@ pub const RUSTFS_TLS_KEY: &str = "rustfs_key.pem";
|
||||
/// This is the default cert for TLS.
|
||||
pub const RUSTFS_TLS_CERT: &str = "rustfs_cert.pem";
|
||||
|
||||
/// Default public certificate filename for rustfs
|
||||
/// This is the default public certificate filename for rustfs.
|
||||
/// It is used to store the public certificate of the application.
|
||||
/// Default value: public.crt
|
||||
pub const RUSTFS_PUBLIC_CERT: &str = "public.crt";
|
||||
|
||||
/// Default CA certificate filename for rustfs
|
||||
/// This is the default CA certificate filename for rustfs.
|
||||
/// It is used to store the CA certificate of the application.
|
||||
/// Default value: ca.crt
|
||||
pub const RUSTFS_CA_CERT: &str = "ca.crt";
|
||||
|
||||
/// Default HTTP prefix for rustfs
|
||||
/// This is the default HTTP prefix for rustfs.
|
||||
/// It is used to identify HTTP URLs.
|
||||
/// Default value: http://
|
||||
pub const RUSTFS_HTTP_PREFIX: &str = "http://";
|
||||
|
||||
/// Default HTTPS prefix for rustfs
|
||||
/// This is the default HTTPS prefix for rustfs.
|
||||
/// It is used to identify HTTPS URLs.
|
||||
/// Default value: https://
|
||||
pub const RUSTFS_HTTPS_PREFIX: &str = "https://";
|
||||
|
||||
/// Default port for rustfs
|
||||
/// This is the default port for rustfs.
|
||||
/// This is used to bind the server to a specific port.
|
||||
@@ -161,6 +170,12 @@ pub const KI_B: usize = 1024;
|
||||
/// Default value: 1048576
|
||||
pub const MI_B: usize = 1024 * 1024;
|
||||
|
||||
/// Environment variable for gRPC authentication token
|
||||
/// Used to set the authentication token for gRPC communication
|
||||
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -201,20 +216,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_constants() {
|
||||
// Test security related constants
|
||||
assert_eq!(DEFAULT_ACCESS_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
|
||||
assert_eq!(DEFAULT_SECRET_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// In production environment, access key and secret key should be different
|
||||
// These are default values, so being the same is acceptable, but should be warned in documentation
|
||||
println!("Warning: Default access key and secret key are the same. Change them in production!");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_path_constants() {
|
||||
assert_eq!(RUSTFS_TLS_KEY, "rustfs_key.pem");
|
||||
@@ -276,8 +277,6 @@ mod tests {
|
||||
DEFAULT_LOG_LEVEL,
|
||||
SERVICE_VERSION,
|
||||
ENVIRONMENT,
|
||||
DEFAULT_ACCESS_KEY,
|
||||
DEFAULT_SECRET_KEY,
|
||||
RUSTFS_TLS_KEY,
|
||||
RUSTFS_TLS_CERT,
|
||||
DEFAULT_ADDRESS,
|
||||
@@ -307,29 +306,6 @@ mod tests {
|
||||
assert_ne!(DEFAULT_CONSOLE_PORT, 0, "Console port should not be zero");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_best_practices() {
|
||||
// Test security best practices
|
||||
|
||||
// These are default values, should be changed in production environments
|
||||
println!("Security Warning: Default credentials detected!");
|
||||
println!("Access Key: {DEFAULT_ACCESS_KEY}");
|
||||
println!("Secret Key: {DEFAULT_SECRET_KEY}");
|
||||
println!("These should be changed in production environments!");
|
||||
|
||||
// Verify that key lengths meet minimum security requirements
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// Check if default credentials contain common insecure patterns
|
||||
let _insecure_patterns = ["admin", "password", "123456", "default"];
|
||||
let _access_key_lower = DEFAULT_ACCESS_KEY.to_lowercase();
|
||||
let _secret_key_lower = DEFAULT_SECRET_KEY.to_lowercase();
|
||||
|
||||
// Note: More security check logic can be added here
|
||||
// For example, check if keys contain insecure patterns
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_configuration_consistency() {
|
||||
// Test configuration consistency
|
||||
|
||||
@@ -39,3 +39,10 @@ pub const DEFAULT_MAX_IO_EVENTS_PER_TICK: usize = 1024;
|
||||
/// Event polling default (Tokio default 61)
|
||||
pub const DEFAULT_EVENT_INTERVAL: u32 = 61;
|
||||
pub const DEFAULT_RNG_SEED: Option<u64> = None; // None means random
|
||||
|
||||
/// Threshold for small object seek support in megabytes.
|
||||
///
|
||||
/// When an object is smaller than this size, rustfs will provide seek support.
|
||||
///
|
||||
/// Default is set to 10MB.
|
||||
pub const DEFAULT_OBJECT_SEEK_SUPPORT_THRESHOLD: usize = 10 * 1024 * 1024;
|
||||
|
||||
@@ -12,4 +12,75 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// TLS related environment variable names and default values
|
||||
/// Environment variable to enable TLS key logging
|
||||
/// When set to "1", RustFS will log TLS keys to the specified file for debugging purposes.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_TLS_KEYLOG=1
|
||||
pub const ENV_TLS_KEYLOG: &str = "RUSTFS_TLS_KEYLOG";
|
||||
|
||||
/// Default value for TLS key logging
|
||||
/// By default, RustFS does not log TLS keys.
|
||||
/// To change this behavior, set the environment variable RUSTFS_TLS_KEYLOG=1
|
||||
pub const DEFAULT_TLS_KEYLOG: bool = false;
|
||||
|
||||
/// Environment variable to trust system CA certificates
|
||||
/// When set to "1", RustFS will trust system CA certificates in addition to any
|
||||
/// custom CA certificates provided in the configuration.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_TRUST_SYSTEM_CA=1
|
||||
pub const ENV_TRUST_SYSTEM_CA: &str = "RUSTFS_TRUST_SYSTEM_CA";
|
||||
|
||||
/// Default value for trusting system CA certificates
|
||||
/// By default, RustFS does not trust system CA certificates.
|
||||
/// To change this behavior, set the environment variable RUSTFS_TRUST_SYSTEM_CA=1
|
||||
pub const DEFAULT_TRUST_SYSTEM_CA: bool = false;
|
||||
|
||||
/// Environment variable to trust leaf certificates as CA
|
||||
/// When set to "1", RustFS will treat leaf certificates as CA certificates for trust validation.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_TRUST_LEAF_CERT_AS_CA=1
|
||||
pub const ENV_TRUST_LEAF_CERT_AS_CA: &str = "RUSTFS_TRUST_LEAF_CERT_AS_CA";
|
||||
|
||||
/// Default value for trusting leaf certificates as CA
|
||||
/// By default, RustFS does not trust leaf certificates as CA.
|
||||
/// To change this behavior, set the environment variable RUSTFS_TRUST_LEAF_CERT_AS_CA=1
|
||||
pub const DEFAULT_TRUST_LEAF_CERT_AS_CA: bool = false;
|
||||
|
||||
/// Default filename for client CA certificate
|
||||
/// client_ca.crt (CA bundle for verifying client certificates in server mTLS)
|
||||
pub const RUSTFS_CLIENT_CA_CERT_FILENAME: &str = "client_ca.crt";
|
||||
|
||||
/// Environment variable for client certificate file path
|
||||
/// RUSTFS_MTLS_CLIENT_CERT
|
||||
/// Specifies the file path to the client certificate used for mTLS authentication.
|
||||
/// If not set, RustFS will look for the default filename "client_cert.pem" in the current directory.
|
||||
/// To set, use the environment variable RUSTFS_MTLS_CLIENT_CERT=/path/to/client_cert.pem
|
||||
pub const ENV_MTLS_CLIENT_CERT: &str = "RUSTFS_MTLS_CLIENT_CERT";
|
||||
|
||||
/// Default filename for client certificate
|
||||
/// client_cert.pem
|
||||
pub const RUSTFS_CLIENT_CERT_FILENAME: &str = "client_cert.pem";
|
||||
|
||||
/// Environment variable for client private key file path
|
||||
/// RUSTFS_MTLS_CLIENT_KEY
|
||||
/// Specifies the file path to the client private key used for mTLS authentication.
|
||||
/// If not set, RustFS will look for the default filename "client_key.pem" in the current directory.
|
||||
/// To set, use the environment variable RUSTFS_MTLS_CLIENT_KEY=/path/to/client_key.pem
|
||||
pub const ENV_MTLS_CLIENT_KEY: &str = "RUSTFS_MTLS_CLIENT_KEY";
|
||||
|
||||
/// Default filename for client private key
|
||||
/// client_key.pem
|
||||
pub const RUSTFS_CLIENT_KEY_FILENAME: &str = "client_key.pem";
|
||||
|
||||
/// RUSTFS_SERVER_MTLS_ENABLE
|
||||
/// Environment variable to enable server mTLS
|
||||
/// When set to "1", RustFS server will require client certificates for authentication.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_SERVER_MTLS_ENABLE=1
|
||||
pub const ENV_SERVER_MTLS_ENABLE: &str = "RUSTFS_SERVER_MTLS_ENABLE";
|
||||
|
||||
/// Default value for enabling server mTLS
|
||||
/// By default, RustFS server mTLS is disabled.
|
||||
/// To change this behavior, set the environment variable RUSTFS_SERVER_MTLS_ENABLE=1
|
||||
pub const DEFAULT_SERVER_MTLS_ENABLE: bool = false;
|
||||
|
||||
@@ -51,6 +51,18 @@ pub const ENV_NOTIFY_TARGET_STREAM_CONCURRENCY: &str = "RUSTFS_NOTIFY_TARGET_STR
|
||||
/// Adjust this value based on your system's capabilities and expected load.
|
||||
pub const DEFAULT_NOTIFY_TARGET_STREAM_CONCURRENCY: usize = 20;
|
||||
|
||||
/// Name of the environment variable that configures send concurrency.
|
||||
/// Controls how many send operations are processed in parallel by the notification system.
|
||||
/// Defaults to [`DEFAULT_NOTIFY_SEND_CONCURRENCY`] if not set.
|
||||
/// Example: `RUSTFS_NOTIFY_SEND_CONCURRENCY=64`.
|
||||
pub const ENV_NOTIFY_SEND_CONCURRENCY: &str = "RUSTFS_NOTIFY_SEND_CONCURRENCY";
|
||||
|
||||
/// Default concurrency for send operations in the notification system
|
||||
/// This value is used if the environment variable `RUSTFS_NOTIFY_SEND_CONCURRENCY` is not set.
|
||||
/// It defines how many send operations can be processed in parallel by the notification system at any given time.
|
||||
/// Adjust this value based on your system's capabilities and expected load.
|
||||
pub const DEFAULT_NOTIFY_SEND_CONCURRENCY: usize = 64;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_SUB_SYSTEMS: &[&str] = &[NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS];
|
||||
|
||||
|
||||
21
crates/credentials/Cargo.toml
Normal file
21
crates/credentials/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "rustfs-credentials"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Credentials management utilities for RustFS, enabling secure handling of authentication and authorization data."
|
||||
keywords = ["rustfs", "Minio", "credentials", "authentication", "authorization"]
|
||||
categories = ["web-programming", "development-tools", "data-structures", "security"]
|
||||
|
||||
[dependencies]
|
||||
base64-simd = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json.workspace = true
|
||||
time = { workspace = true, features = ["serde-human-readable"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
44
crates/credentials/README.md
Normal file
44
crates/credentials/README.md
Normal file
@@ -0,0 +1,44 @@
|
||||
[](https://rustfs.com)
|
||||
|
||||
# RustFS Credentials - Credential Management Module
|
||||
|
||||
<p align="center">
|
||||
<strong>A module for managing credentials within the RustFS distributed object storage system.</strong>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
This module provides a secure and efficient way to handle various types of credentials,
|
||||
such as API keys, access tokens, and cryptographic keys, required for interacting with
|
||||
the RustFS ecosystem and external services.
|
||||
|
||||
## 📖 Overview
|
||||
|
||||
**RustFS Credentials** is a module dedicated to managing credentials for the [RustFS](https://rustfs.com) distributed
|
||||
object storage system. For the complete RustFS experience,
|
||||
please visit the [main RustFS repository](https://github.com/rustfs/rustfs)
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- Secure storage and retrieval of credentials
|
||||
- Support for multiple credential types (API keys, tokens, etc.)
|
||||
- Encryption of sensitive credential data
|
||||
- Integration with external secret management systems
|
||||
- Easy-to-use API for credential management
|
||||
- Credential rotation and expiration handling
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
For comprehensive documentation, examples, and usage guides, please visit the
|
||||
main [RustFS repository](https://github.com/rustfs/rustfs).
|
||||
|
||||
## 📄 License
|
||||
|
||||
This project is licensed under the Apache License 2.0 - see the [LICENSE](../../LICENSE) file for details.
|
||||
94
crates/credentials/src/constants.rs
Normal file
94
crates/credentials/src/constants.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Default Access Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_ACCESS_KEY
|
||||
/// Command line argument: --access-key
|
||||
/// Example: RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
/// Example: --access-key rustfsadmin
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
/// Default Secret Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_SECRET_KEY
|
||||
/// Command line argument: --secret-key
|
||||
/// Example: RUSTFS_SECRET_KEY=rustfsadmin
|
||||
/// Example: --secret-key rustfsadmin
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Environment variable for gRPC authentication token
|
||||
/// Used to set the authentication token for gRPC communication
|
||||
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
|
||||
|
||||
/// IAM Policy Types
|
||||
/// Used to differentiate between embedded and inherited policies
|
||||
/// Example: "embedded-policy" or "inherited-policy"
|
||||
/// Default value: "embedded-policy"
|
||||
pub const EMBEDDED_POLICY_TYPE: &str = "embedded-policy";
|
||||
|
||||
/// IAM Policy Types
|
||||
/// Used to differentiate between embedded and inherited policies
|
||||
/// Example: "embedded-policy" or "inherited-policy"
|
||||
/// Default value: "inherited-policy"
|
||||
pub const INHERITED_POLICY_TYPE: &str = "inherited-policy";
|
||||
|
||||
/// IAM Policy Claim Name for Service Account
|
||||
/// Used to identify the service account policy claim in JWT tokens
|
||||
/// Example: "sa-policy"
|
||||
/// Default value: "sa-policy"
|
||||
pub const IAM_POLICY_CLAIM_NAME_SA: &str = "sa-policy";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_security_constants() {
|
||||
// Test security related constants
|
||||
assert_eq!(DEFAULT_ACCESS_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
|
||||
assert_eq!(DEFAULT_SECRET_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// In production environment, access key and secret key should be different
|
||||
// These are default values, so being the same is acceptable, but should be warned in documentation
|
||||
println!("Warning: Default access key and secret key are the same. Change them in production!");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_best_practices() {
|
||||
// Test security best practices
|
||||
|
||||
// These are default values, should be changed in production environments
|
||||
println!("Security Warning: Default credentials detected!");
|
||||
println!("Access Key: {DEFAULT_ACCESS_KEY}");
|
||||
println!("Secret Key: {DEFAULT_SECRET_KEY}");
|
||||
println!("These should be changed in production environments!");
|
||||
|
||||
// Verify that key lengths meet minimum security requirements
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// Check if default credentials contain common insecure patterns
|
||||
let _insecure_patterns = ["admin", "password", "123456", "default"];
|
||||
let _access_key_lower = DEFAULT_ACCESS_KEY.to_lowercase();
|
||||
let _secret_key_lower = DEFAULT_SECRET_KEY.to_lowercase();
|
||||
|
||||
// Note: More security check logic can be added here
|
||||
// For example, check if keys contain insecure patterns
|
||||
}
|
||||
}
|
||||
386
crates/credentials/src/credentials.rs
Normal file
386
crates/credentials/src/credentials.rs
Normal file
@@ -0,0 +1,386 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{DEFAULT_SECRET_KEY, ENV_GRPC_AUTH_TOKEN, IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use rand::{Rng, RngCore};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::io::Error;
|
||||
use std::sync::OnceLock;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
/// Global active credentials
|
||||
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
|
||||
|
||||
/// Global gRPC authentication token
|
||||
static GLOBAL_GRPC_AUTH_TOKEN: OnceLock<String> = OnceLock::new();
|
||||
|
||||
/// Initialize the global action credentials
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `ak` - Optional access key
|
||||
/// * `sk` - Optional secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<(), Box<Credentials>>` - Ok if successful, Err with existing credentials if already initialized
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if automatic credential generation fails when `ak` or `sk`
|
||||
/// are `None`, for example if the random number generator fails while calling
|
||||
/// `gen_access_key` or `gen_secret_key`.
|
||||
pub fn init_global_action_credentials(ak: Option<String>, sk: Option<String>) -> Result<(), Box<Credentials>> {
|
||||
let ak = ak.unwrap_or_else(|| gen_access_key(20).expect("Failed to generate access key"));
|
||||
let sk = sk.unwrap_or_else(|| gen_secret_key(32).expect("Failed to generate secret key"));
|
||||
|
||||
let cred = Credentials {
|
||||
access_key: ak,
|
||||
secret_key: sk,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
GLOBAL_ACTIVE_CRED.set(cred).map_err(|e| {
|
||||
Box::new(Credentials {
|
||||
access_key: e.access_key.clone(),
|
||||
..Default::default()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the global action credentials
|
||||
pub fn get_global_action_cred() -> Option<Credentials> {
|
||||
GLOBAL_ACTIVE_CRED.get().cloned()
|
||||
}
|
||||
|
||||
/// Get the global secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<String>` - The global secret key, if set
|
||||
///
|
||||
pub fn get_global_secret_key_opt() -> Option<String> {
|
||||
GLOBAL_ACTIVE_CRED.get().map(|cred| cred.secret_key.clone())
|
||||
}
|
||||
|
||||
/// Get the global secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The global secret key, or empty string if not set
|
||||
///
|
||||
pub fn get_global_secret_key() -> String {
|
||||
GLOBAL_ACTIVE_CRED
|
||||
.get()
|
||||
.map(|cred| cred.secret_key.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Get the global access key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<String>` - The global access key, if set
|
||||
///
|
||||
pub fn get_global_access_key_opt() -> Option<String> {
|
||||
GLOBAL_ACTIVE_CRED.get().map(|cred| cred.access_key.clone())
|
||||
}
|
||||
|
||||
/// Get the global access key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The global access key, or empty string if not set
|
||||
///
|
||||
pub fn get_global_access_key() -> String {
|
||||
GLOBAL_ACTIVE_CRED
|
||||
.get()
|
||||
.map(|cred| cred.access_key.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Generates a random access key of the specified length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `length` - The length of the access key to generate
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<String>` - A result containing the generated access key or an error if the length is too short
|
||||
///
|
||||
/// # Errors
|
||||
/// This function will return an error if the specified length is less than 3.
|
||||
///
|
||||
/// Examples
|
||||
/// ```no_run
|
||||
/// use rustfs_credentials::gen_access_key;
|
||||
///
|
||||
/// let access_key = gen_access_key(16).unwrap();
|
||||
/// println!("Generated access key: {}", access_key);
|
||||
/// ```
|
||||
///
|
||||
pub fn gen_access_key(length: usize) -> std::io::Result<String> {
|
||||
const ALPHA_NUMERIC_TABLE: [char; 36] = [
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
|
||||
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
|
||||
];
|
||||
|
||||
if length < 3 {
|
||||
return Err(Error::other("access key length is too short"));
|
||||
}
|
||||
|
||||
let mut result = String::with_capacity(length);
|
||||
let mut rng = rand::rng();
|
||||
|
||||
for _ in 0..length {
|
||||
result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Generates a random secret key of the specified length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `length` - The length of the secret key to generate
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<String>` - A result containing the generated secret key or an error if the length is too short
|
||||
///
|
||||
/// # Errors
|
||||
/// This function will return an error if the specified length is less than 8.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```no_run
|
||||
/// use rustfs_credentials::gen_secret_key;
|
||||
///
|
||||
/// let secret_key = gen_secret_key(32).unwrap();
|
||||
/// println!("Generated secret key: {}", secret_key);
|
||||
/// ```
|
||||
///
|
||||
pub fn gen_secret_key(length: usize) -> std::io::Result<String> {
|
||||
use base64_simd::URL_SAFE_NO_PAD;
|
||||
|
||||
if length < 8 {
|
||||
return Err(Error::other("secret key length is too short"));
|
||||
}
|
||||
let mut rng = rand::rng();
|
||||
|
||||
let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)];
|
||||
rng.fill_bytes(&mut key);
|
||||
|
||||
let encoded = URL_SAFE_NO_PAD.encode_to_string(&key);
|
||||
let key_str = encoded.replace("/", "+");
|
||||
|
||||
Ok(key_str)
|
||||
}
|
||||
|
||||
/// Get the gRPC authentication token from environment variable
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The gRPC authentication token
|
||||
///
|
||||
pub fn get_grpc_token() -> String {
|
||||
GLOBAL_GRPC_AUTH_TOKEN
|
||||
.get_or_init(|| {
|
||||
env::var(ENV_GRPC_AUTH_TOKEN)
|
||||
.unwrap_or_else(|_| get_global_secret_key_opt().unwrap_or_else(|| DEFAULT_SECRET_KEY.to_string()))
|
||||
})
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// Credentials structure
|
||||
///
|
||||
/// Fields:
|
||||
/// - access_key: Access key string
|
||||
/// - secret_key: Secret key string
|
||||
/// - session_token: Session token string
|
||||
/// - expiration: Optional expiration time as OffsetDateTime
|
||||
/// - status: Status string (e.g., "active", "off")
|
||||
/// - parent_user: Parent user string
|
||||
/// - groups: Optional list of groups
|
||||
/// - claims: Optional map of claims
|
||||
/// - name: Optional name string
|
||||
/// - description: Optional description string
|
||||
///
|
||||
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
|
||||
pub struct Credentials {
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
pub session_token: String,
|
||||
pub expiration: Option<OffsetDateTime>,
|
||||
pub status: String,
|
||||
pub parent_user: String,
|
||||
pub groups: Option<Vec<String>>,
|
||||
pub claims: Option<HashMap<String, Value>>,
|
||||
pub name: Option<String>,
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
impl Credentials {
|
||||
pub fn is_expired(&self) -> bool {
|
||||
if self.expiration.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.expiration
|
||||
.as_ref()
|
||||
.map(|e| OffsetDateTime::now_utc() > *e)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
pub fn is_temp(&self) -> bool {
|
||||
!self.session_token.is_empty() && !self.is_expired()
|
||||
}
|
||||
|
||||
pub fn is_service_account(&self) -> bool {
|
||||
self.claims
|
||||
.as_ref()
|
||||
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|_| !self.parent_user.is_empty()))
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn is_implied_policy(&self) -> bool {
|
||||
if self.is_service_account() {
|
||||
return self
|
||||
.claims
|
||||
.as_ref()
|
||||
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|v| v == INHERITED_POLICY_TYPE))
|
||||
.unwrap_or_default();
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if self.status == "off" {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.access_key.len() >= 3 && self.secret_key.len() >= 8 && !self.is_expired()
|
||||
}
|
||||
|
||||
pub fn is_owner(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_expired() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_expired());
|
||||
|
||||
cred.expiration = Some(OffsetDateTime::now_utc() + Duration::hours(1));
|
||||
assert!(!cred.is_expired());
|
||||
|
||||
cred.expiration = Some(OffsetDateTime::now_utc() - Duration::hours(1));
|
||||
assert!(cred.is_expired());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_temp() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_temp());
|
||||
|
||||
cred.session_token = "token".to_string();
|
||||
assert!(cred.is_temp());
|
||||
|
||||
cred.expiration = Some(OffsetDateTime::now_utc() - Duration::hours(1));
|
||||
assert!(!cred.is_temp());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_service_account() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_service_account());
|
||||
|
||||
let mut claims = HashMap::new();
|
||||
claims.insert(IAM_POLICY_CLAIM_NAME_SA.to_string(), Value::String("policy".to_string()));
|
||||
cred.claims = Some(claims);
|
||||
cred.parent_user = "parent".to_string();
|
||||
assert!(cred.is_service_account());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_implied_policy() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_implied_policy());
|
||||
|
||||
let mut claims = HashMap::new();
|
||||
claims.insert(IAM_POLICY_CLAIM_NAME_SA.to_string(), Value::String(INHERITED_POLICY_TYPE.to_string()));
|
||||
cred.claims = Some(claims);
|
||||
cred.parent_user = "parent".to_string();
|
||||
assert!(cred.is_implied_policy());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_valid() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_valid());
|
||||
|
||||
cred.access_key = "abc".to_string();
|
||||
cred.secret_key = "12345678".to_string();
|
||||
assert!(cred.is_valid());
|
||||
|
||||
cred.status = "off".to_string();
|
||||
assert!(!cred.is_valid());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_owner() {
|
||||
let cred = Credentials::default();
|
||||
assert!(!cred.is_owner());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_global_credentials_flow() {
|
||||
// Since OnceLock can only be set once, we put together all globally related tests
|
||||
// If it has already been initialized (possibly from other tests), we verify the results directly
|
||||
if get_global_action_cred().is_none() {
|
||||
// Verify that the initial state is empty
|
||||
assert!(get_global_access_key_opt().is_none());
|
||||
assert_eq!(get_global_access_key(), "");
|
||||
assert!(get_global_secret_key_opt().is_none());
|
||||
assert_eq!(get_global_secret_key(), "");
|
||||
|
||||
// Initialize
|
||||
let test_ak = "test_access_key".to_string();
|
||||
let test_sk = "test_secret_key_123456".to_string();
|
||||
init_global_action_credentials(Some(test_ak.clone()), Some(test_sk.clone())).ok();
|
||||
}
|
||||
|
||||
// Verify the state after initialization
|
||||
let cred = get_global_action_cred().expect("Global credentials should be set");
|
||||
assert!(!cred.access_key.is_empty());
|
||||
assert!(!cred.secret_key.is_empty());
|
||||
|
||||
assert!(get_global_access_key_opt().is_some());
|
||||
assert!(!get_global_access_key().is_empty());
|
||||
assert!(get_global_secret_key_opt().is_some());
|
||||
assert!(!get_global_secret_key().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_init_global_credentials_auto_gen() {
|
||||
// If it hasn't already been initialized, the test automatically generates logic
|
||||
if get_global_action_cred().is_none() {
|
||||
init_global_action_credentials(None, None).ok();
|
||||
let ak = get_global_access_key();
|
||||
let sk = get_global_secret_key();
|
||||
assert_eq!(ak.len(), 20);
|
||||
assert_eq!(sk.len(), 32);
|
||||
}
|
||||
}
|
||||
}
|
||||
19
crates/credentials/src/lib.rs
Normal file
19
crates/credentials/src/lib.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod constants;
|
||||
mod credentials;
|
||||
|
||||
pub use constants::*;
|
||||
pub use credentials::*;
|
||||
@@ -18,6 +18,9 @@ mod reliant;
|
||||
#[cfg(test)]
|
||||
pub mod common;
|
||||
|
||||
#[cfg(test)]
|
||||
mod version_id_regression_test;
|
||||
|
||||
// Data usage regression tests
|
||||
#[cfg(test)]
|
||||
mod data_usage_test;
|
||||
|
||||
@@ -0,0 +1,138 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Test for HeadObject on deleted objects with versioning enabled
|
||||
//!
|
||||
//! This test reproduces the issue where getting a deleted object returns
|
||||
//! 200 OK instead of 404 NoSuchKey when versioning is enabled.
|
||||
|
||||
#![cfg(test)]
|
||||
|
||||
use aws_config::meta::region::RegionProviderChain;
|
||||
use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::config::{Credentials, Region};
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
use aws_sdk_s3::types::{BucketVersioningStatus, VersioningConfiguration};
|
||||
use bytes::Bytes;
|
||||
use serial_test::serial;
|
||||
use std::error::Error;
|
||||
use tracing::info;
|
||||
|
||||
const ENDPOINT: &str = "http://localhost:9000";
|
||||
const ACCESS_KEY: &str = "rustfsadmin";
|
||||
const SECRET_KEY: &str = "rustfsadmin";
|
||||
const BUCKET: &str = "test-head-deleted-versioning-bucket";
|
||||
|
||||
async fn create_aws_s3_client() -> Result<Client, Box<dyn Error>> {
|
||||
let region_provider = RegionProviderChain::default_provider().or_else(Region::new("us-east-1"));
|
||||
let shared_config = aws_config::defaults(aws_config::BehaviorVersion::latest())
|
||||
.region(region_provider)
|
||||
.credentials_provider(Credentials::new(ACCESS_KEY, SECRET_KEY, None, None, "static"))
|
||||
.endpoint_url(ENDPOINT)
|
||||
.load()
|
||||
.await;
|
||||
|
||||
let client = Client::from_conf(
|
||||
aws_sdk_s3::Config::from(&shared_config)
|
||||
.to_builder()
|
||||
.force_path_style(true)
|
||||
.build(),
|
||||
);
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
/// Setup test bucket, creating it if it doesn't exist, and enable versioning
|
||||
async fn setup_test_bucket(client: &Client) -> Result<(), Box<dyn Error>> {
|
||||
match client.create_bucket().bucket(BUCKET).send().await {
|
||||
Ok(_) => {}
|
||||
Err(SdkError::ServiceError(e)) => {
|
||||
let e = e.into_err();
|
||||
let error_code = e.meta().code().unwrap_or("");
|
||||
if !error_code.eq("BucketAlreadyExists") && !error_code.eq("BucketAlreadyOwnedByYou") {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
|
||||
// Enable versioning
|
||||
client
|
||||
.put_bucket_versioning()
|
||||
.bucket(BUCKET)
|
||||
.versioning_configuration(
|
||||
VersioningConfiguration::builder()
|
||||
.status(BucketVersioningStatus::Enabled)
|
||||
.build(),
|
||||
)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test that HeadObject on a deleted object returns NoSuchKey when versioning is enabled
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn test_head_deleted_object_versioning_returns_nosuchkey() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_max_level(tracing::Level::INFO)
|
||||
.with_test_writer()
|
||||
.try_init();
|
||||
|
||||
info!("🧪 Starting test_head_deleted_object_versioning_returns_nosuchkey");
|
||||
|
||||
let client = create_aws_s3_client().await?;
|
||||
setup_test_bucket(&client).await?;
|
||||
|
||||
let key = "test-head-deleted-versioning.txt";
|
||||
let content = b"Test content for HeadObject with versioning";
|
||||
|
||||
// Upload and verify
|
||||
client
|
||||
.put_object()
|
||||
.bucket(BUCKET)
|
||||
.key(key)
|
||||
.body(Bytes::from_static(content).into())
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// Delete the object (creates a delete marker)
|
||||
client.delete_object().bucket(BUCKET).key(key).send().await?;
|
||||
|
||||
// Try to head the deleted object (latest version is delete marker)
|
||||
let head_result = client.head_object().bucket(BUCKET).key(key).send().await;
|
||||
|
||||
assert!(head_result.is_err(), "HeadObject on deleted object should return an error");
|
||||
|
||||
match head_result.unwrap_err() {
|
||||
SdkError::ServiceError(service_err) => {
|
||||
let s3_err = service_err.into_err();
|
||||
assert!(
|
||||
s3_err.meta().code() == Some("NoSuchKey")
|
||||
|| s3_err.meta().code() == Some("NotFound")
|
||||
|| s3_err.meta().code() == Some("404"),
|
||||
"Error should be NoSuchKey or NotFound, got: {s3_err:?}"
|
||||
);
|
||||
info!("✅ HeadObject correctly returns NoSuchKey/NotFound");
|
||||
}
|
||||
other_err => {
|
||||
panic!("Expected ServiceError but got: {other_err:?}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
mod conditional_writes;
|
||||
mod get_deleted_object_test;
|
||||
mod head_deleted_object_versioning_test;
|
||||
mod lifecycle;
|
||||
mod lock;
|
||||
mod node_interact_test;
|
||||
|
||||
398
crates/e2e_test/src/version_id_regression_test.rs
Normal file
398
crates/e2e_test/src/version_id_regression_test.rs
Normal file
@@ -0,0 +1,398 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Regression test for Issue #1066: Veeam VBR - S3 returned empty versionId
|
||||
//!
|
||||
//! This test verifies that:
|
||||
//! 1. PutObject returns version_id when versioning is enabled
|
||||
//! 2. CopyObject returns version_id when versioning is enabled
|
||||
//! 3. CompleteMultipartUpload returns version_id when versioning is enabled
|
||||
//! 4. Basic S3 operations still work correctly (no regression)
|
||||
//! 5. Operations on non-versioned buckets work as expected
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::common::{RustFSTestEnvironment, init_logging};
|
||||
use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use aws_sdk_s3::types::{BucketVersioningStatus, CompletedMultipartUpload, CompletedPart, VersioningConfiguration};
|
||||
use serial_test::serial;
|
||||
use tracing::info;
|
||||
|
||||
fn create_s3_client(env: &RustFSTestEnvironment) -> Client {
|
||||
env.create_s3_client()
|
||||
}
|
||||
|
||||
async fn create_bucket(client: &Client, bucket: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
match client.create_bucket().bucket(bucket).send().await {
|
||||
Ok(_) => {
|
||||
info!("✅ Bucket {} created successfully", bucket);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
if e.to_string().contains("BucketAlreadyOwnedByYou") || e.to_string().contains("BucketAlreadyExists") {
|
||||
info!("ℹ️ Bucket {} already exists", bucket);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Box::new(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn enable_versioning(client: &Client, bucket: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let versioning_config = VersioningConfiguration::builder()
|
||||
.status(BucketVersioningStatus::Enabled)
|
||||
.build();
|
||||
|
||||
client
|
||||
.put_bucket_versioning()
|
||||
.bucket(bucket)
|
||||
.versioning_configuration(versioning_config)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
info!("✅ Versioning enabled for bucket {}", bucket);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test 1: PutObject should return version_id when versioning is enabled
|
||||
/// This directly addresses the Veeam issue from #1066
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_put_object_returns_version_id_with_versioning() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: PutObject returns version_id with versioning enabled");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-put-version-id";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
let key = "test-file.txt";
|
||||
let content = b"Test content for version ID test";
|
||||
|
||||
info!("📤 Uploading object with key: {}", key);
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "PutObject failed: {:?}", result.err());
|
||||
let output = result.unwrap();
|
||||
|
||||
info!("📥 PutObject response - version_id: {:?}", output.version_id);
|
||||
assert!(
|
||||
output.version_id.is_some(),
|
||||
"❌ FAILED: version_id should be present when versioning is enabled"
|
||||
);
|
||||
assert!(
|
||||
!output.version_id.as_ref().unwrap().is_empty(),
|
||||
"❌ FAILED: version_id should not be empty"
|
||||
);
|
||||
|
||||
info!("✅ PASSED: PutObject correctly returns version_id");
|
||||
}
|
||||
|
||||
/// Test 2: CopyObject should return version_id when versioning is enabled
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_copy_object_returns_version_id_with_versioning() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: CopyObject returns version_id with versioning enabled");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-copy-version-id";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
let source_key = "source-file.txt";
|
||||
let dest_key = "dest-file.txt";
|
||||
let content = b"Content to copy";
|
||||
|
||||
// First, create source object
|
||||
client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(source_key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to create source object");
|
||||
|
||||
info!("📤 Copying object from {} to {}", source_key, dest_key);
|
||||
let copy_result = client
|
||||
.copy_object()
|
||||
.bucket(bucket)
|
||||
.key(dest_key)
|
||||
.copy_source(format!("{}/{}", bucket, source_key))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(copy_result.is_ok(), "CopyObject failed: {:?}", copy_result.err());
|
||||
let output = copy_result.unwrap();
|
||||
|
||||
info!("📥 CopyObject response - version_id: {:?}", output.version_id);
|
||||
assert!(
|
||||
output.version_id.is_some(),
|
||||
"❌ FAILED: version_id should be present when versioning is enabled"
|
||||
);
|
||||
assert!(
|
||||
!output.version_id.as_ref().unwrap().is_empty(),
|
||||
"❌ FAILED: version_id should not be empty"
|
||||
);
|
||||
|
||||
info!("✅ PASSED: CopyObject correctly returns version_id");
|
||||
}
|
||||
|
||||
/// Test 3: CompleteMultipartUpload should return version_id when versioning is enabled
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_multipart_upload_returns_version_id_with_versioning() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: CompleteMultipartUpload returns version_id with versioning enabled");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-multipart-version-id";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
let key = "multipart-file.txt";
|
||||
let content = b"Part 1 content for multipart upload test";
|
||||
|
||||
info!("📤 Creating multipart upload for key: {}", key);
|
||||
let create_result = client
|
||||
.create_multipart_upload()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to create multipart upload");
|
||||
|
||||
let upload_id = create_result.upload_id().expect("No upload_id returned");
|
||||
|
||||
info!("📤 Uploading part 1");
|
||||
let upload_part_result = client
|
||||
.upload_part()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.upload_id(upload_id)
|
||||
.part_number(1)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to upload part");
|
||||
|
||||
let etag = upload_part_result.e_tag().expect("No etag returned").to_string();
|
||||
|
||||
let completed_part = CompletedPart::builder().part_number(1).e_tag(etag).build();
|
||||
|
||||
let completed_upload = CompletedMultipartUpload::builder().parts(completed_part).build();
|
||||
|
||||
info!("📤 Completing multipart upload");
|
||||
let complete_result = client
|
||||
.complete_multipart_upload()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.upload_id(upload_id)
|
||||
.multipart_upload(completed_upload)
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(complete_result.is_ok(), "CompleteMultipartUpload failed: {:?}", complete_result.err());
|
||||
let output = complete_result.unwrap();
|
||||
|
||||
info!("📥 CompleteMultipartUpload response - version_id: {:?}", output.version_id);
|
||||
assert!(
|
||||
output.version_id.is_some(),
|
||||
"❌ FAILED: version_id should be present when versioning is enabled"
|
||||
);
|
||||
assert!(
|
||||
!output.version_id.as_ref().unwrap().is_empty(),
|
||||
"❌ FAILED: version_id should not be empty"
|
||||
);
|
||||
|
||||
info!("✅ PASSED: CompleteMultipartUpload correctly returns version_id");
|
||||
}
|
||||
|
||||
/// Test 4: PutObject should NOT return version_id when versioning is NOT enabled
|
||||
/// This ensures we didn't break non-versioned buckets
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_put_object_without_versioning() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: PutObject behavior without versioning (no regression)");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-no-versioning";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
// Note: NOT enabling versioning here
|
||||
|
||||
let key = "test-file.txt";
|
||||
let content = b"Test content without versioning";
|
||||
|
||||
info!("📤 Uploading object to non-versioned bucket");
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "PutObject failed: {:?}", result.err());
|
||||
let output = result.unwrap();
|
||||
|
||||
info!("📥 PutObject response - version_id: {:?}", output.version_id);
|
||||
// version_id can be None or Some("null") for non-versioned buckets
|
||||
info!("✅ PASSED: PutObject works correctly without versioning");
|
||||
}
|
||||
|
||||
/// Test 5: Basic S3 operations still work correctly (no regression)
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_basic_s3_operations_no_regression() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: Basic S3 operations work correctly (no regression)");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-basic-operations";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
let key = "test-basic-file.txt";
|
||||
let content = b"Basic operations test content";
|
||||
|
||||
// Test PUT
|
||||
info!("📤 Testing PUT operation");
|
||||
let put_result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await;
|
||||
assert!(put_result.is_ok(), "PUT operation failed");
|
||||
let _version_id = put_result.unwrap().version_id;
|
||||
|
||||
// Test GET
|
||||
info!("📥 Testing GET operation");
|
||||
let get_result = client.get_object().bucket(bucket).key(key).send().await;
|
||||
assert!(get_result.is_ok(), "GET operation failed");
|
||||
let body = get_result.unwrap().body.collect().await.unwrap().to_vec();
|
||||
assert_eq!(body, content, "Content mismatch after GET");
|
||||
|
||||
// Test HEAD
|
||||
info!("📋 Testing HEAD operation");
|
||||
let head_result = client.head_object().bucket(bucket).key(key).send().await;
|
||||
assert!(head_result.is_ok(), "HEAD operation failed");
|
||||
|
||||
// Test LIST
|
||||
info!("📝 Testing LIST operation");
|
||||
let list_result = client.list_objects_v2().bucket(bucket).send().await;
|
||||
assert!(list_result.is_ok(), "LIST operation failed");
|
||||
let list_output = list_result.unwrap();
|
||||
let objects = list_output.contents();
|
||||
assert!(objects.iter().any(|obj| obj.key() == Some(key)), "Object not found in LIST");
|
||||
|
||||
// Test DELETE
|
||||
info!("🗑️ Testing DELETE operation");
|
||||
let delete_result = client.delete_object().bucket(bucket).key(key).send().await;
|
||||
assert!(delete_result.is_ok(), "DELETE operation failed");
|
||||
|
||||
// Verify object is deleted (should return NoSuchKey or version marker)
|
||||
let get_after_delete = client.get_object().bucket(bucket).key(key).send().await;
|
||||
assert!(
|
||||
get_after_delete.is_err() || get_after_delete.unwrap().delete_marker == Some(true),
|
||||
"Object should be deleted or have delete marker"
|
||||
);
|
||||
|
||||
info!("✅ PASSED: All basic S3 operations work correctly");
|
||||
}
|
||||
|
||||
/// Test 6: Veeam-specific scenario simulation
|
||||
/// Simulates the exact workflow that Veeam uses when backing up data
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_veeam_backup_workflow_simulation() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: Veeam VBR backup workflow simulation (Issue #1066)");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "veeam-backup-test";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
// Veeam typically creates multiple objects in a backup session
|
||||
let test_paths = vec![
|
||||
"Veeam/Backup/Clients/test-client-id/test-backup-id/CloudStg/Meta/Blocks/History/CheckpointHistory.dat",
|
||||
"Veeam/Backup/Clients/test-client-id/test-backup-id/Metadata/Lock/create.checkpoint/declare",
|
||||
];
|
||||
|
||||
for path in test_paths {
|
||||
info!("📤 Simulating Veeam upload to: {}", path);
|
||||
let content = format!("Veeam backup data for {}", path);
|
||||
|
||||
let put_result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(path)
|
||||
.body(ByteStream::from(content.into_bytes()))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(put_result.is_ok(), "Veeam upload failed for path: {}", path);
|
||||
let output = put_result.unwrap();
|
||||
|
||||
info!("📥 Response version_id: {:?}", output.version_id);
|
||||
assert!(output.version_id.is_some(), "❌ FAILED: Veeam expects version_id for path: {}", path);
|
||||
assert!(
|
||||
!output.version_id.as_ref().unwrap().is_empty(),
|
||||
"❌ FAILED: version_id should not be empty for path: {}",
|
||||
path
|
||||
);
|
||||
|
||||
info!("✅ Veeam upload successful with version_id for: {}", path);
|
||||
}
|
||||
|
||||
info!("✅ PASSED: Veeam backup workflow simulation completed successfully");
|
||||
}
|
||||
}
|
||||
@@ -34,12 +34,19 @@ workspace = true
|
||||
default = []
|
||||
|
||||
[dependencies]
|
||||
rustfs-filemeta.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["full"] }
|
||||
rustfs-rio.workspace = true
|
||||
rustfs-signer.workspace = true
|
||||
rustfs-checksums.workspace = true
|
||||
rustfs-config = { workspace = true, features = ["constants", "notify", "audit"] }
|
||||
rustfs-credentials = { workspace = true }
|
||||
rustfs-common.workspace = true
|
||||
rustfs-policy.workspace = true
|
||||
rustfs-protos.workspace = true
|
||||
async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
byteorder = { workspace = true }
|
||||
rustfs-common.workspace = true
|
||||
rustfs-policy.workspace = true
|
||||
chrono.workspace = true
|
||||
glob = { workspace = true }
|
||||
thiserror.workspace = true
|
||||
@@ -60,7 +67,6 @@ lazy_static.workspace = true
|
||||
rustfs-lock.workspace = true
|
||||
regex = { workspace = true }
|
||||
path-absolutize = { workspace = true }
|
||||
rustfs-protos.workspace = true
|
||||
rmp.workspace = true
|
||||
rmp-serde.workspace = true
|
||||
tokio-util = { workspace = true, features = ["io", "compat"] }
|
||||
@@ -91,11 +97,6 @@ aws-sdk-s3 = { workspace = true }
|
||||
urlencoding = { workspace = true }
|
||||
smallvec = { workspace = true }
|
||||
shadow-rs.workspace = true
|
||||
rustfs-filemeta.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["full"] }
|
||||
rustfs-rio.workspace = true
|
||||
rustfs-signer.workspace = true
|
||||
rustfs-checksums.workspace = true
|
||||
async-recursion.workspace = true
|
||||
aws-credential-types = { workspace = true }
|
||||
aws-smithy-types = { workspace = true }
|
||||
@@ -113,6 +114,7 @@ faster-hex = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
|
||||
criterion = { workspace = true, features = ["html_reports"] }
|
||||
temp-env = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
shadow-rs = { workspace = true, features = ["build", "metadata"] }
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
# ECStore - Erasure Coding Storage
|
||||
|
||||
ECStore provides erasure coding functionality for the RustFS project, using high-performance Reed-Solomon SIMD implementation for optimal performance.
|
||||
|
||||
## Features
|
||||
|
||||
- **Reed-Solomon Implementation**: High-performance SIMD-optimized erasure coding
|
||||
- **Cross-Platform Compatibility**: Support for x86_64, aarch64, and other architectures
|
||||
- **Performance Optimized**: SIMD instructions for maximum throughput
|
||||
- **Thread Safety**: Safe concurrent access with caching optimizations
|
||||
- **Scalable**: Excellent performance for high-throughput scenarios
|
||||
|
||||
## Documentation
|
||||
|
||||
For complete documentation, examples, and usage information, please visit the main [RustFS repository](https://github.com/rustfs/rustfs).
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the Apache License, Version 2.0.
|
||||
@@ -23,7 +23,7 @@ use crate::{
|
||||
};
|
||||
|
||||
use crate::data_usage::load_data_usage_cache;
|
||||
use rustfs_common::{globals::GLOBAL_LOCAL_NODE_NAME, heal_channel::DriveState};
|
||||
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, heal_channel::DriveState};
|
||||
use rustfs_madmin::{
|
||||
BackendDisks, Disk, ErasureSetInfo, ITEM_INITIALIZING, ITEM_OFFLINE, ITEM_ONLINE, InfoMessage, ServerProperties,
|
||||
};
|
||||
|
||||
@@ -16,7 +16,7 @@ use crate::disk::error::DiskError;
|
||||
use crate::disk::{self, DiskAPI, DiskStore, WalkDirOptions};
|
||||
use futures::future::join_all;
|
||||
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetacacheReader, is_io_eof};
|
||||
use std::{future::Future, pin::Pin, sync::Arc};
|
||||
use std::{future::Future, pin::Pin};
|
||||
use tokio::spawn;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info, warn};
|
||||
@@ -71,14 +71,14 @@ pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> d
|
||||
|
||||
let mut jobs: Vec<tokio::task::JoinHandle<std::result::Result<(), DiskError>>> = Vec::new();
|
||||
let mut readers = Vec::with_capacity(opts.disks.len());
|
||||
let fds = Arc::new(opts.fallback_disks.clone());
|
||||
let fds = opts.fallback_disks.iter().flatten().cloned().collect::<Vec<_>>();
|
||||
|
||||
let cancel_rx = CancellationToken::new();
|
||||
|
||||
for disk in opts.disks.iter() {
|
||||
let opdisk = disk.clone();
|
||||
let opts_clone = opts.clone();
|
||||
let fds_clone = fds.clone();
|
||||
let mut fds_clone = fds.clone();
|
||||
let cancel_rx_clone = cancel_rx.clone();
|
||||
let (rd, mut wr) = tokio::io::duplex(64);
|
||||
readers.push(MetacacheReader::new(rd));
|
||||
@@ -113,21 +113,20 @@ pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> d
|
||||
}
|
||||
|
||||
while need_fallback {
|
||||
// warn!("list_path_raw: while need_fallback start");
|
||||
let disk = match fds_clone.iter().find(|d| d.is_some()) {
|
||||
Some(d) => {
|
||||
if let Some(disk) = d.clone() {
|
||||
disk
|
||||
} else {
|
||||
warn!("list_path_raw: fallback disk is none");
|
||||
break;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
warn!("list_path_raw: fallback disk is none2");
|
||||
break;
|
||||
let disk_op = {
|
||||
if fds_clone.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let disk = fds_clone.remove(0);
|
||||
if disk.is_online().await { Some(disk.clone()) } else { None }
|
||||
}
|
||||
};
|
||||
|
||||
let Some(disk) = disk_op else {
|
||||
warn!("list_path_raw: fallback disk is none");
|
||||
break;
|
||||
};
|
||||
|
||||
match disk
|
||||
.as_ref()
|
||||
.walk_dir(
|
||||
|
||||
@@ -1,350 +0,0 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_checksums::ChecksumAlgorithm;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::client::{api_put_object::PutObjectOptions, api_s3_datatypes::ObjectPart};
|
||||
use crate::{disk::DiskAPI, store_api::GetObjectReader};
|
||||
use rustfs_utils::crypto::{base64_decode, base64_encode};
|
||||
use s3s::header::{
|
||||
X_AMZ_CHECKSUM_ALGORITHM, X_AMZ_CHECKSUM_CRC32, X_AMZ_CHECKSUM_CRC32C, X_AMZ_CHECKSUM_SHA1, X_AMZ_CHECKSUM_SHA256,
|
||||
};
|
||||
|
||||
use enumset::{EnumSet, EnumSetType, enum_set};
|
||||
|
||||
#[derive(Debug, EnumSetType, Default)]
|
||||
#[enumset(repr = "u8")]
|
||||
pub enum ChecksumMode {
|
||||
#[default]
|
||||
ChecksumNone,
|
||||
ChecksumSHA256,
|
||||
ChecksumSHA1,
|
||||
ChecksumCRC32,
|
||||
ChecksumCRC32C,
|
||||
ChecksumCRC64NVME,
|
||||
ChecksumFullObject,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref C_ChecksumMask: EnumSet<ChecksumMode> = {
|
||||
let mut s = EnumSet::all();
|
||||
s.remove(ChecksumMode::ChecksumFullObject);
|
||||
s
|
||||
};
|
||||
static ref C_ChecksumFullObjectCRC32: EnumSet<ChecksumMode> =
|
||||
enum_set!(ChecksumMode::ChecksumCRC32 | ChecksumMode::ChecksumFullObject);
|
||||
static ref C_ChecksumFullObjectCRC32C: EnumSet<ChecksumMode> =
|
||||
enum_set!(ChecksumMode::ChecksumCRC32C | ChecksumMode::ChecksumFullObject);
|
||||
}
|
||||
const AMZ_CHECKSUM_CRC64NVME: &str = "x-amz-checksum-crc64nvme";
|
||||
|
||||
impl ChecksumMode {
|
||||
//pub const CRC64_NVME_POLYNOMIAL: i64 = 0xad93d23594c93659;
|
||||
|
||||
pub fn base(&self) -> ChecksumMode {
|
||||
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
|
||||
match s.as_u8() {
|
||||
1_u8 => ChecksumMode::ChecksumNone,
|
||||
2_u8 => ChecksumMode::ChecksumSHA256,
|
||||
4_u8 => ChecksumMode::ChecksumSHA1,
|
||||
8_u8 => ChecksumMode::ChecksumCRC32,
|
||||
16_u8 => ChecksumMode::ChecksumCRC32C,
|
||||
32_u8 => ChecksumMode::ChecksumCRC64NVME,
|
||||
_ => panic!("enum err."),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is(&self, t: ChecksumMode) -> bool {
|
||||
*self & t == t
|
||||
}
|
||||
|
||||
pub fn key(&self) -> String {
|
||||
//match c & checksumMask {
|
||||
match self {
|
||||
ChecksumMode::ChecksumCRC32 => {
|
||||
return X_AMZ_CHECKSUM_CRC32.to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumCRC32C => {
|
||||
return X_AMZ_CHECKSUM_CRC32C.to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumSHA1 => {
|
||||
return X_AMZ_CHECKSUM_SHA1.to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumSHA256 => {
|
||||
return X_AMZ_CHECKSUM_SHA256.to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumCRC64NVME => {
|
||||
return AMZ_CHECKSUM_CRC64NVME.to_string();
|
||||
}
|
||||
_ => {
|
||||
return "".to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn can_composite(&self) -> bool {
|
||||
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
|
||||
match s.as_u8() {
|
||||
2_u8 => true,
|
||||
4_u8 => true,
|
||||
8_u8 => true,
|
||||
16_u8 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn can_merge_crc(&self) -> bool {
|
||||
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
|
||||
match s.as_u8() {
|
||||
8_u8 => true,
|
||||
16_u8 => true,
|
||||
32_u8 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn full_object_requested(&self) -> bool {
|
||||
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
|
||||
match s.as_u8() {
|
||||
//C_ChecksumFullObjectCRC32 as u8 => true,
|
||||
//C_ChecksumFullObjectCRC32C as u8 => true,
|
||||
32_u8 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn key_capitalized(&self) -> String {
|
||||
self.key()
|
||||
}
|
||||
|
||||
pub fn raw_byte_len(&self) -> usize {
|
||||
let u = EnumSet::from(*self).intersection(*C_ChecksumMask).as_u8();
|
||||
if u == ChecksumMode::ChecksumCRC32 as u8 || u == ChecksumMode::ChecksumCRC32C as u8 {
|
||||
4
|
||||
} else if u == ChecksumMode::ChecksumSHA1 as u8 {
|
||||
use sha1::Digest;
|
||||
sha1::Sha1::output_size() as usize
|
||||
} else if u == ChecksumMode::ChecksumSHA256 as u8 {
|
||||
use sha2::Digest;
|
||||
sha2::Sha256::output_size() as usize
|
||||
} else if u == ChecksumMode::ChecksumCRC64NVME as u8 {
|
||||
8
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hasher(&self) -> Result<Box<dyn rustfs_checksums::http::HttpChecksum>, std::io::Error> {
|
||||
match /*C_ChecksumMask & **/self {
|
||||
ChecksumMode::ChecksumCRC32 => {
|
||||
return Ok(ChecksumAlgorithm::Crc32.into_impl());
|
||||
}
|
||||
ChecksumMode::ChecksumCRC32C => {
|
||||
return Ok(ChecksumAlgorithm::Crc32c.into_impl());
|
||||
}
|
||||
ChecksumMode::ChecksumSHA1 => {
|
||||
return Ok(ChecksumAlgorithm::Sha1.into_impl());
|
||||
}
|
||||
ChecksumMode::ChecksumSHA256 => {
|
||||
return Ok(ChecksumAlgorithm::Sha256.into_impl());
|
||||
}
|
||||
ChecksumMode::ChecksumCRC64NVME => {
|
||||
return Ok(ChecksumAlgorithm::Crc64Nvme.into_impl());
|
||||
}
|
||||
_ => return Err(std::io::Error::other("unsupported checksum type")),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_set(&self) -> bool {
|
||||
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
|
||||
s.len() == 1
|
||||
}
|
||||
|
||||
pub fn set_default(&mut self, t: ChecksumMode) {
|
||||
if !self.is_set() {
|
||||
*self = t;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode_to_string(&self, b: &[u8]) -> Result<String, std::io::Error> {
|
||||
if !self.is_set() {
|
||||
return Ok("".to_string());
|
||||
}
|
||||
let mut h = self.hasher()?;
|
||||
h.update(b);
|
||||
let hash = h.finalize();
|
||||
Ok(base64_encode(hash.as_ref()))
|
||||
}
|
||||
|
||||
pub fn to_string(&self) -> String {
|
||||
//match c & checksumMask {
|
||||
match self {
|
||||
ChecksumMode::ChecksumCRC32 => {
|
||||
return "CRC32".to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumCRC32C => {
|
||||
return "CRC32C".to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumSHA1 => {
|
||||
return "SHA1".to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumSHA256 => {
|
||||
return "SHA256".to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumNone => {
|
||||
return "".to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumCRC64NVME => {
|
||||
return "CRC64NVME".to_string();
|
||||
}
|
||||
_ => {
|
||||
return "<invalid>".to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pub fn check_sum_reader(&self, r: GetObjectReader) -> Result<Checksum, std::io::Error> {
|
||||
// let mut h = self.hasher()?;
|
||||
// Ok(Checksum::new(self.clone(), h.sum().as_bytes()))
|
||||
// }
|
||||
|
||||
// pub fn check_sum_bytes(&self, b: &[u8]) -> Result<Checksum, std::io::Error> {
|
||||
// let mut h = self.hasher()?;
|
||||
// Ok(Checksum::new(self.clone(), h.sum().as_bytes()))
|
||||
// }
|
||||
|
||||
pub fn composite_checksum(&self, p: &mut [ObjectPart]) -> Result<Checksum, std::io::Error> {
|
||||
if !self.can_composite() {
|
||||
return Err(std::io::Error::other("cannot do composite checksum"));
|
||||
}
|
||||
p.sort_by(|i, j| {
|
||||
if i.part_num < j.part_num {
|
||||
std::cmp::Ordering::Less
|
||||
} else if i.part_num > j.part_num {
|
||||
std::cmp::Ordering::Greater
|
||||
} else {
|
||||
std::cmp::Ordering::Equal
|
||||
}
|
||||
});
|
||||
let c = self.base();
|
||||
let crc_bytes = Vec::<u8>::with_capacity(p.len() * self.raw_byte_len() as usize);
|
||||
let mut h = self.hasher()?;
|
||||
h.update(crc_bytes.as_ref());
|
||||
let hash = h.finalize();
|
||||
Ok(Checksum {
|
||||
checksum_type: self.clone(),
|
||||
r: hash.as_ref().to_vec(),
|
||||
computed: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn full_object_checksum(&self, p: &mut [ObjectPart]) -> Result<Checksum, std::io::Error> {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Checksum {
|
||||
checksum_type: ChecksumMode,
|
||||
r: Vec<u8>,
|
||||
computed: bool,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl Checksum {
|
||||
fn new(t: ChecksumMode, b: &[u8]) -> Checksum {
|
||||
if t.is_set() && b.len() == t.raw_byte_len() {
|
||||
return Checksum {
|
||||
checksum_type: t,
|
||||
r: b.to_vec(),
|
||||
computed: false,
|
||||
};
|
||||
}
|
||||
Checksum::default()
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn new_checksum_string(t: ChecksumMode, s: &str) -> Result<Checksum, std::io::Error> {
|
||||
let b = match base64_decode(s.as_bytes()) {
|
||||
Ok(b) => b,
|
||||
Err(err) => return Err(std::io::Error::other(err.to_string())),
|
||||
};
|
||||
if t.is_set() && b.len() == t.raw_byte_len() {
|
||||
return Ok(Checksum {
|
||||
checksum_type: t,
|
||||
r: b,
|
||||
computed: false,
|
||||
});
|
||||
}
|
||||
Ok(Checksum::default())
|
||||
}
|
||||
|
||||
fn is_set(&self) -> bool {
|
||||
self.checksum_type.is_set() && self.r.len() == self.checksum_type.raw_byte_len()
|
||||
}
|
||||
|
||||
fn encoded(&self) -> String {
|
||||
if !self.is_set() {
|
||||
return "".to_string();
|
||||
}
|
||||
base64_encode(&self.r)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn raw(&self) -> Option<Vec<u8>> {
|
||||
if !self.is_set() {
|
||||
return None;
|
||||
}
|
||||
Some(self.r.clone())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_auto_checksum_headers(opts: &mut PutObjectOptions) {
|
||||
opts.user_metadata
|
||||
.insert("X-Amz-Checksum-Algorithm".to_string(), opts.auto_checksum.to_string());
|
||||
if opts.auto_checksum.full_object_requested() {
|
||||
opts.user_metadata
|
||||
.insert("X-Amz-Checksum-Type".to_string(), "FULL_OBJECT".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn apply_auto_checksum(opts: &mut PutObjectOptions, all_parts: &mut [ObjectPart]) -> Result<(), std::io::Error> {
|
||||
if opts.auto_checksum.can_composite() && !opts.auto_checksum.is(ChecksumMode::ChecksumFullObject) {
|
||||
let crc = opts.auto_checksum.composite_checksum(all_parts)?;
|
||||
opts.user_metadata = {
|
||||
let mut hm = HashMap::new();
|
||||
hm.insert(opts.auto_checksum.key(), crc.encoded());
|
||||
hm
|
||||
}
|
||||
} else if opts.auto_checksum.can_merge_crc() {
|
||||
let crc = opts.auto_checksum.full_object_checksum(all_parts)?;
|
||||
opts.user_metadata = {
|
||||
let mut hm = HashMap::new();
|
||||
hm.insert(opts.auto_checksum.key_capitalized(), crc.encoded());
|
||||
hm.insert("X-Amz-Checksum-Type".to_string(), "FULL_OBJECT".to_string());
|
||||
hm
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,270 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// use crate::error::StdError;
|
||||
// use bytes::Bytes;
|
||||
// use futures::pin_mut;
|
||||
// use futures::stream::{Stream, StreamExt};
|
||||
// use std::future::Future;
|
||||
// use std::pin::Pin;
|
||||
// use std::task::{Context, Poll};
|
||||
// use transform_stream::AsyncTryStream;
|
||||
|
||||
// pub type SyncBoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + Sync + 'a>>;
|
||||
|
||||
// pub struct ChunkedStream<'a> {
|
||||
// /// inner
|
||||
// inner: AsyncTryStream<Bytes, StdError, SyncBoxFuture<'a, Result<(), StdError>>>,
|
||||
|
||||
// remaining_length: usize,
|
||||
// }
|
||||
|
||||
// impl<'a> ChunkedStream<'a> {
|
||||
// pub fn new<S>(body: S, content_length: usize, chunk_size: usize, need_padding: bool) -> Self
|
||||
// where
|
||||
// S: Stream<Item = Result<Bytes, StdError>> + Send + Sync + 'a,
|
||||
// {
|
||||
// let inner = AsyncTryStream::<_, _, SyncBoxFuture<'a, Result<(), StdError>>>::new(|mut y| {
|
||||
// #[allow(clippy::shadow_same)] // necessary for `pin_mut!`
|
||||
// Box::pin(async move {
|
||||
// pin_mut!(body);
|
||||
// // Data left over from the previous call
|
||||
// let mut prev_bytes = Bytes::new();
|
||||
// let mut read_size = 0;
|
||||
|
||||
// loop {
|
||||
// let data: Vec<Bytes> = {
|
||||
// // Read a fixed-size chunk
|
||||
// match Self::read_data(body.as_mut(), prev_bytes, chunk_size).await {
|
||||
// None => break,
|
||||
// Some(Err(e)) => return Err(e),
|
||||
// Some(Ok((data, remaining_bytes))) => {
|
||||
// // debug!(
|
||||
// // "content_length:{},read_size:{}, read_data data:{}, remaining_bytes: {} ",
|
||||
// // content_length,
|
||||
// // read_size,
|
||||
// // data.len(),
|
||||
// // remaining_bytes.len()
|
||||
// // );
|
||||
|
||||
// prev_bytes = remaining_bytes;
|
||||
// data
|
||||
// }
|
||||
// }
|
||||
// };
|
||||
|
||||
// for bytes in data {
|
||||
// read_size += bytes.len();
|
||||
// // debug!("read_size {}, content_length {}", read_size, content_length,);
|
||||
// y.yield_ok(bytes).await;
|
||||
// }
|
||||
|
||||
// if read_size + prev_bytes.len() >= content_length {
|
||||
// // debug!(
|
||||
// // "Finished reading: read_size:{} + prev_bytes.len({}) == content_length {}",
|
||||
// // read_size,
|
||||
// // prev_bytes.len(),
|
||||
// // content_length,
|
||||
// // );
|
||||
|
||||
// // Pad with zeros?
|
||||
// if !need_padding {
|
||||
// y.yield_ok(prev_bytes).await;
|
||||
// break;
|
||||
// }
|
||||
|
||||
// let mut bytes = vec![0u8; chunk_size];
|
||||
// let (left, _) = bytes.split_at_mut(prev_bytes.len());
|
||||
// left.copy_from_slice(&prev_bytes);
|
||||
|
||||
// y.yield_ok(Bytes::from(bytes)).await;
|
||||
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
|
||||
// // debug!("chunked stream exit");
|
||||
|
||||
// Ok(())
|
||||
// })
|
||||
// });
|
||||
// Self {
|
||||
// inner,
|
||||
// remaining_length: content_length,
|
||||
// }
|
||||
// }
|
||||
// /// read data and return remaining bytes
|
||||
// async fn read_data<S>(
|
||||
// mut body: Pin<&mut S>,
|
||||
// prev_bytes: Bytes,
|
||||
// data_size: usize,
|
||||
// ) -> Option<Result<(Vec<Bytes>, Bytes), StdError>>
|
||||
// where
|
||||
// S: Stream<Item = Result<Bytes, StdError>> + Send,
|
||||
// {
|
||||
// let mut bytes_buffer = Vec::new();
|
||||
|
||||
// // Run only once
|
||||
// let mut push_data_bytes = |mut bytes: Bytes| {
|
||||
// // debug!("read from body {} split per {}, prev_bytes: {}", bytes.len(), data_size, prev_bytes.len());
|
||||
|
||||
// if bytes.is_empty() {
|
||||
// return None;
|
||||
// }
|
||||
|
||||
// if data_size == 0 {
|
||||
// return Some(bytes);
|
||||
// }
|
||||
|
||||
// // Merge with the previous data
|
||||
// if !prev_bytes.is_empty() {
|
||||
// let need_size = data_size.wrapping_sub(prev_bytes.len());
|
||||
// // debug!(
|
||||
// // "Previous leftover {}, take {} now, total: {}",
|
||||
// // prev_bytes.len(),
|
||||
// // need_size,
|
||||
// // prev_bytes.len() + need_size
|
||||
// // );
|
||||
// if bytes.len() >= need_size {
|
||||
// let data = bytes.split_to(need_size);
|
||||
// let mut combined = Vec::new();
|
||||
// combined.extend_from_slice(&prev_bytes);
|
||||
// combined.extend_from_slice(&data);
|
||||
|
||||
// // debug!(
|
||||
// // "Fetched more bytes than needed: {}, merged result {}, remaining bytes {}",
|
||||
// // need_size,
|
||||
// // combined.len(),
|
||||
// // bytes.len(),
|
||||
// // );
|
||||
|
||||
// bytes_buffer.push(Bytes::from(combined));
|
||||
// } else {
|
||||
// let mut combined = Vec::new();
|
||||
// combined.extend_from_slice(&prev_bytes);
|
||||
// combined.extend_from_slice(&bytes);
|
||||
|
||||
// // debug!(
|
||||
// // "Fetched fewer bytes than needed: {}, merged result {}, remaining bytes {}, return immediately",
|
||||
// // need_size,
|
||||
// // combined.len(),
|
||||
// // bytes.len(),
|
||||
// // );
|
||||
|
||||
// return Some(Bytes::from(combined));
|
||||
// }
|
||||
// }
|
||||
|
||||
// // If the fetched data exceeds the chunk, slice the required size
|
||||
// if data_size <= bytes.len() {
|
||||
// let n = bytes.len() / data_size;
|
||||
|
||||
// for _ in 0..n {
|
||||
// let data = bytes.split_to(data_size);
|
||||
|
||||
// // println!("bytes_buffer.push: {}, remaining: {}", data.len(), bytes.len());
|
||||
// bytes_buffer.push(data);
|
||||
// }
|
||||
|
||||
// Some(bytes)
|
||||
// } else {
|
||||
// // Insufficient data
|
||||
// Some(bytes)
|
||||
// }
|
||||
// };
|
||||
|
||||
// // Remaining data
|
||||
// let remaining_bytes = 'outer: {
|
||||
// // // Exit if the previous data was sufficient
|
||||
// // if let Some(remaining_bytes) = push_data_bytes(prev_bytes) {
|
||||
// // println!("Consuming leftovers");
|
||||
// // break 'outer remaining_bytes;
|
||||
// // }
|
||||
|
||||
// loop {
|
||||
// match body.next().await? {
|
||||
// Err(e) => return Some(Err(e)),
|
||||
// Ok(bytes) => {
|
||||
// if let Some(remaining_bytes) = push_data_bytes(bytes) {
|
||||
// break 'outer remaining_bytes;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// };
|
||||
|
||||
// Some(Ok((bytes_buffer, remaining_bytes)))
|
||||
// }
|
||||
|
||||
// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, StdError>>> {
|
||||
// let ans = Pin::new(&mut self.inner).poll_next(cx);
|
||||
// if let Poll::Ready(Some(Ok(ref bytes))) = ans {
|
||||
// self.remaining_length = self.remaining_length.saturating_sub(bytes.len());
|
||||
// }
|
||||
// ans
|
||||
// }
|
||||
|
||||
// // pub fn exact_remaining_length(&self) -> usize {
|
||||
// // self.remaining_length
|
||||
// // }
|
||||
// }
|
||||
|
||||
// impl Stream for ChunkedStream<'_> {
|
||||
// type Item = Result<Bytes, StdError>;
|
||||
|
||||
// fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
// self.poll(cx)
|
||||
// }
|
||||
|
||||
// fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
// (0, None)
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[cfg(test)]
|
||||
// mod test {
|
||||
|
||||
// use super::*;
|
||||
|
||||
// #[tokio::test]
|
||||
// async fn test_chunked_stream() {
|
||||
// let chunk_size = 4;
|
||||
|
||||
// let data1 = vec![1u8; 7777]; // 65536
|
||||
// let data2 = vec![1u8; 7777]; // 65536
|
||||
|
||||
// let content_length = data1.len() + data2.len();
|
||||
|
||||
// let chunk1 = Bytes::from(data1);
|
||||
// let chunk2 = Bytes::from(data2);
|
||||
|
||||
// let chunk_results: Vec<Result<Bytes, _>> = vec![Ok(chunk1), Ok(chunk2)];
|
||||
|
||||
// let stream = futures::stream::iter(chunk_results);
|
||||
|
||||
// let mut chunked_stream = ChunkedStream::new(stream, content_length, chunk_size, true);
|
||||
|
||||
// loop {
|
||||
// let ans1 = chunked_stream.next().await;
|
||||
// if ans1.is_none() {
|
||||
// break;
|
||||
// }
|
||||
|
||||
// let bytes = ans1.unwrap().unwrap();
|
||||
// assert!(bytes.len() == chunk_size)
|
||||
// }
|
||||
|
||||
// // assert_eq!(ans1.unwrap(), chunk1_data.as_slice());
|
||||
// }
|
||||
// }
|
||||
@@ -1,59 +0,0 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
disk::{
|
||||
error::{is_unformatted_disk, DiskError},
|
||||
format::{DistributionAlgoVersion, FormatV3},
|
||||
new_disk, DiskAPI, DiskInfo, DiskOption, DiskStore,
|
||||
},
|
||||
store_api::{
|
||||
BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec,
|
||||
ListMultipartsInfo, ListObjectVersionsInfo, ListObjectsV2Info, MakeBucketOptions, MultipartInfo, MultipartUploadResult,
|
||||
ObjectIO, ObjectInfo, ObjectOptions, ObjectToDelete, PartInfo, PutObjReader, StorageAPI,
|
||||
},
|
||||
credentials::{Credentials, SignatureType,},
|
||||
api_put_object_multipart::UploadPartParams,
|
||||
};
|
||||
|
||||
use http::HeaderMap;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use tracing::{error, info};
|
||||
use url::Url;
|
||||
|
||||
struct HookReader {
|
||||
source: GetObjectReader,
|
||||
hook: GetObjectReader,
|
||||
}
|
||||
|
||||
impl HookReader {
|
||||
pub fn new(source: GetObjectReader, hook: GetObjectReader) -> HookReader {
|
||||
HookReader {
|
||||
source,
|
||||
hook,
|
||||
}
|
||||
}
|
||||
|
||||
fn seek(&self, offset: i64, whence: i64) -> Result<i64> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
fn read(&self, b: &[u8]) -> Result<i64> {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
@@ -132,6 +132,25 @@ pub enum BucketLookupType {
|
||||
BucketLookupPath,
|
||||
}
|
||||
|
||||
fn load_root_store_from_tls_path() -> Option<rustls::RootCertStore> {
|
||||
// Load the root certificate bundle from the path specified by the
|
||||
// RUSTFS_TLS_PATH environment variable.
|
||||
let tp = std::env::var("RUSTFS_TLS_PATH").ok()?;
|
||||
let ca = std::path::Path::new(&tp).join(rustfs_config::RUSTFS_CA_CERT);
|
||||
if !ca.exists() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let der_list = rustfs_utils::load_cert_bundle_der_bytes(ca.to_str().unwrap_or_default()).ok()?;
|
||||
let mut store = rustls::RootCertStore::empty();
|
||||
for der in der_list {
|
||||
if let Err(e) = store.add(der.into()) {
|
||||
warn!("Warning: failed to add certificate from '{}' to root store: {e}", ca.display());
|
||||
}
|
||||
}
|
||||
Some(store)
|
||||
}
|
||||
|
||||
impl TransitionClient {
|
||||
pub async fn new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
|
||||
let clnt = Self::private_new(endpoint, opts, tier_type).await?;
|
||||
@@ -142,18 +161,22 @@ impl TransitionClient {
|
||||
async fn private_new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
|
||||
let endpoint_url = get_endpoint_url(endpoint, opts.secure)?;
|
||||
|
||||
//#[cfg(feature = "ring")]
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
//#[cfg(feature = "aws-lc-rs")]
|
||||
// let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
|
||||
|
||||
let scheme = endpoint_url.scheme();
|
||||
let client;
|
||||
let tls = rustls::ClientConfig::builder().with_native_roots()?.with_no_client_auth();
|
||||
let tls = if let Some(store) = load_root_store_from_tls_path() {
|
||||
rustls::ClientConfig::builder()
|
||||
.with_root_certificates(store)
|
||||
.with_no_client_auth()
|
||||
} else {
|
||||
rustls::ClientConfig::builder().with_native_roots()?.with_no_client_auth()
|
||||
};
|
||||
|
||||
let https = hyper_rustls::HttpsConnectorBuilder::new()
|
||||
.with_tls_config(tls)
|
||||
.https_or_http()
|
||||
.enable_http1()
|
||||
.enable_http2()
|
||||
.build();
|
||||
client = Client::builder(TokioExecutor::new()).build(https);
|
||||
|
||||
|
||||
770
crates/ecstore/src/disk/disk_store.rs
Normal file
770
crates/ecstore/src/disk/disk_store.rs
Normal file
@@ -0,0 +1,770 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskError, DiskInfo, DiskInfoOptions, DiskLocation, Endpoint, Error,
|
||||
FileInfoVersions, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, Result, UpdateMetadataOpts, VolumeInfo,
|
||||
WalkDirOptions, local::LocalDisk,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_utils::string::parse_bool_with_default;
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicI64, AtomicU32, Ordering},
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::{sync::RwLock, time};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Disk health status constants
|
||||
const DISK_HEALTH_OK: u32 = 0;
|
||||
const DISK_HEALTH_FAULTY: u32 = 1;
|
||||
|
||||
pub const ENV_RUSTFS_DRIVE_ACTIVE_MONITORING: &str = "RUSTFS_DRIVE_ACTIVE_MONITORING";
|
||||
pub const ENV_RUSTFS_DRIVE_MAX_TIMEOUT_DURATION: &str = "RUSTFS_DRIVE_MAX_TIMEOUT_DURATION";
|
||||
pub const CHECK_EVERY: Duration = Duration::from_secs(15);
|
||||
pub const SKIP_IF_SUCCESS_BEFORE: Duration = Duration::from_secs(5);
|
||||
pub const CHECK_TIMEOUT_DURATION: Duration = Duration::from_secs(5);
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref TEST_OBJ: String = format!("health-check-{}", Uuid::new_v4());
|
||||
static ref TEST_DATA: Bytes = Bytes::from(vec![42u8; 2048]);
|
||||
static ref TEST_BUCKET: String = ".rustfs.sys/tmp".to_string();
|
||||
}
|
||||
|
||||
pub fn get_max_timeout_duration() -> Duration {
|
||||
std::env::var(ENV_RUSTFS_DRIVE_MAX_TIMEOUT_DURATION)
|
||||
.map(|v| Duration::from_secs(v.parse::<u64>().unwrap_or(30)))
|
||||
.unwrap_or(Duration::from_secs(30))
|
||||
}
|
||||
|
||||
/// DiskHealthTracker tracks the health status of a disk.
|
||||
/// Similar to Go's diskHealthTracker.
|
||||
#[derive(Debug)]
|
||||
pub struct DiskHealthTracker {
|
||||
/// Atomic timestamp of last successful operation
|
||||
pub last_success: AtomicI64,
|
||||
/// Atomic timestamp of last operation start
|
||||
pub last_started: AtomicI64,
|
||||
/// Atomic disk status (OK or Faulty)
|
||||
pub status: AtomicU32,
|
||||
/// Atomic number of waiting operations
|
||||
pub waiting: AtomicU32,
|
||||
}
|
||||
|
||||
impl DiskHealthTracker {
|
||||
/// Create a new disk health tracker
|
||||
pub fn new() -> Self {
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos() as i64;
|
||||
|
||||
Self {
|
||||
last_success: AtomicI64::new(now),
|
||||
last_started: AtomicI64::new(now),
|
||||
status: AtomicU32::new(DISK_HEALTH_OK),
|
||||
waiting: AtomicU32::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Log a successful operation
|
||||
pub fn log_success(&self) {
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos() as i64;
|
||||
self.last_success.store(now, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Check if disk is faulty
|
||||
pub fn is_faulty(&self) -> bool {
|
||||
self.status.load(Ordering::Relaxed) == DISK_HEALTH_FAULTY
|
||||
}
|
||||
|
||||
/// Set disk as faulty
|
||||
pub fn set_faulty(&self) {
|
||||
self.status.store(DISK_HEALTH_FAULTY, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Set disk as OK
|
||||
pub fn set_ok(&self) {
|
||||
self.status.store(DISK_HEALTH_OK, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn swap_ok_to_faulty(&self) -> bool {
|
||||
self.status
|
||||
.compare_exchange(DISK_HEALTH_OK, DISK_HEALTH_FAULTY, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
/// Increment waiting operations counter
|
||||
pub fn increment_waiting(&self) {
|
||||
self.waiting.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Decrement waiting operations counter
|
||||
pub fn decrement_waiting(&self) {
|
||||
self.waiting.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Get waiting operations count
|
||||
pub fn waiting_count(&self) -> u32 {
|
||||
self.waiting.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Get last success timestamp
|
||||
pub fn last_success(&self) -> i64 {
|
||||
self.last_success.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DiskHealthTracker {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Health check context key for tracking disk operations
|
||||
#[derive(Debug, Clone)]
|
||||
struct HealthDiskCtxKey;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct HealthDiskCtxValue {
|
||||
last_success: Arc<AtomicI64>,
|
||||
}
|
||||
|
||||
impl HealthDiskCtxValue {
|
||||
fn log_success(&self) {
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos() as i64;
|
||||
self.last_success.store(now, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
/// LocalDiskWrapper wraps a DiskStore with health tracking capabilities.
|
||||
/// This is similar to Go's xlStorageDiskIDCheck.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LocalDiskWrapper {
|
||||
/// The underlying disk store
|
||||
disk: Arc<LocalDisk>,
|
||||
/// Health tracker
|
||||
health: Arc<DiskHealthTracker>,
|
||||
/// Whether health checking is enabled
|
||||
health_check: bool,
|
||||
/// Cancellation token for monitoring tasks
|
||||
cancel_token: CancellationToken,
|
||||
/// Disk ID for stale checking
|
||||
disk_id: Arc<RwLock<Option<Uuid>>>,
|
||||
}
|
||||
|
||||
impl LocalDiskWrapper {
|
||||
/// Create a new LocalDiskWrapper
|
||||
pub fn new(disk: Arc<LocalDisk>, health_check: bool) -> Self {
|
||||
// Check environment variable for health check override
|
||||
// Default to true if not set, but only enable if both param and env are true
|
||||
let env_health_check = std::env::var(ENV_RUSTFS_DRIVE_ACTIVE_MONITORING)
|
||||
.map(|v| parse_bool_with_default(&v, true))
|
||||
.unwrap_or(true);
|
||||
|
||||
let ret = Self {
|
||||
disk,
|
||||
health: Arc::new(DiskHealthTracker::new()),
|
||||
health_check: health_check && env_health_check,
|
||||
cancel_token: CancellationToken::new(),
|
||||
disk_id: Arc::new(RwLock::new(None)),
|
||||
};
|
||||
|
||||
ret.start_monitoring();
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn get_disk(&self) -> Arc<LocalDisk> {
|
||||
self.disk.clone()
|
||||
}
|
||||
|
||||
/// Start the disk monitoring if health_check is enabled
|
||||
pub fn start_monitoring(&self) {
|
||||
if self.health_check {
|
||||
let health = Arc::clone(&self.health);
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
let disk = Arc::clone(&self.disk);
|
||||
|
||||
tokio::spawn(async move {
|
||||
Self::monitor_disk_writable(disk, health, cancel_token).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop the disk monitoring
|
||||
pub async fn stop_monitoring(&self) {
|
||||
self.cancel_token.cancel();
|
||||
}
|
||||
|
||||
/// Monitor disk writability periodically
|
||||
async fn monitor_disk_writable(disk: Arc<LocalDisk>, health: Arc<DiskHealthTracker>, cancel_token: CancellationToken) {
|
||||
// TODO: config interval
|
||||
|
||||
let mut interval = time::interval(CHECK_EVERY);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
return;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
if cancel_token.is_cancelled() {
|
||||
return;
|
||||
}
|
||||
|
||||
if health.status.load(Ordering::Relaxed) != DISK_HEALTH_OK {
|
||||
continue;
|
||||
}
|
||||
|
||||
let last_success_nanos = health.last_success.load(Ordering::Relaxed);
|
||||
let elapsed = Duration::from_nanos(
|
||||
(std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos() as i64 - last_success_nanos) as u64
|
||||
);
|
||||
|
||||
if elapsed < SKIP_IF_SUCCESS_BEFORE {
|
||||
continue;
|
||||
}
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
|
||||
debug!("health check: performing health check");
|
||||
if Self::perform_health_check(disk.clone(), &TEST_BUCKET, &TEST_OBJ, &TEST_DATA, true, CHECK_TIMEOUT_DURATION).await.is_err() && health.swap_ok_to_faulty() {
|
||||
// Health check failed, disk is considered faulty
|
||||
|
||||
health.increment_waiting(); // Balance the increment from failed operation
|
||||
|
||||
let health_clone = Arc::clone(&health);
|
||||
let disk_clone = disk.clone();
|
||||
let cancel_clone = cancel_token.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
Self::monitor_disk_status(disk_clone, health_clone, cancel_clone).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform a health check by writing and reading a test file
|
||||
async fn perform_health_check(
|
||||
disk: Arc<LocalDisk>,
|
||||
test_bucket: &str,
|
||||
test_filename: &str,
|
||||
test_data: &Bytes,
|
||||
check_faulty_only: bool,
|
||||
timeout_duration: Duration,
|
||||
) -> Result<()> {
|
||||
// Perform health check with timeout
|
||||
let health_check_result = tokio::time::timeout(timeout_duration, async {
|
||||
// Try to write test data
|
||||
disk.write_all(test_bucket, test_filename, test_data.clone()).await?;
|
||||
|
||||
// Try to read back the data
|
||||
let read_data = disk.read_all(test_bucket, test_filename).await?;
|
||||
|
||||
// Verify data integrity
|
||||
if read_data.len() != test_data.len() {
|
||||
warn!(
|
||||
"health check: test file data length mismatch: expected {} bytes, got {}",
|
||||
test_data.len(),
|
||||
read_data.len()
|
||||
);
|
||||
if check_faulty_only {
|
||||
return Ok(());
|
||||
}
|
||||
return Err(DiskError::FaultyDisk);
|
||||
}
|
||||
|
||||
// Clean up
|
||||
disk.delete(
|
||||
test_bucket,
|
||||
test_filename,
|
||||
DeleteOptions {
|
||||
recursive: false,
|
||||
immediate: false,
|
||||
undo_write: false,
|
||||
old_data_dir: None,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await;
|
||||
|
||||
match health_check_result {
|
||||
Ok(result) => match result {
|
||||
Ok(()) => Ok(()),
|
||||
Err(e) => {
|
||||
debug!("health check: failed: {:?}", e);
|
||||
|
||||
if e == DiskError::FaultyDisk {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
if check_faulty_only { Ok(()) } else { Err(e) }
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
// Timeout occurred
|
||||
warn!("health check: timeout after {:?}", timeout_duration);
|
||||
Err(DiskError::FaultyDisk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Monitor disk status and try to bring it back online
|
||||
async fn monitor_disk_status(disk: Arc<LocalDisk>, health: Arc<DiskHealthTracker>, cancel_token: CancellationToken) {
|
||||
const CHECK_EVERY: Duration = Duration::from_secs(5);
|
||||
|
||||
let mut interval = time::interval(CHECK_EVERY);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
return;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
if cancel_token.is_cancelled() {
|
||||
return;
|
||||
}
|
||||
|
||||
match Self::perform_health_check(disk.clone(), &TEST_BUCKET, &TEST_OBJ, &TEST_DATA, false, CHECK_TIMEOUT_DURATION).await {
|
||||
Ok(_) => {
|
||||
info!("Disk {} is back online", disk.to_string());
|
||||
health.set_ok();
|
||||
health.decrement_waiting();
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Disk {} still faulty: {:?}", disk.to_string(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_id(&self, want_id: Option<Uuid>) -> Result<()> {
|
||||
if want_id.is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let stored_disk_id = self.disk.get_disk_id().await?;
|
||||
|
||||
if stored_disk_id != want_id {
|
||||
return Err(Error::other(format!("Disk ID mismatch wanted {:?}, got {:?}", want_id, stored_disk_id)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if disk ID is stale
|
||||
async fn check_disk_stale(&self) -> Result<()> {
|
||||
let Some(current_disk_id) = *self.disk_id.read().await else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let stored_disk_id = match self.disk.get_disk_id().await? {
|
||||
Some(id) => id,
|
||||
None => return Ok(()), // Empty disk ID is allowed during initialization
|
||||
};
|
||||
|
||||
if current_disk_id != stored_disk_id {
|
||||
return Err(DiskError::DiskNotFound);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Set the disk ID
|
||||
pub async fn set_disk_id_internal(&self, id: Option<Uuid>) -> Result<()> {
|
||||
let mut disk_id = self.disk_id.write().await;
|
||||
*disk_id = id;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the current disk ID
|
||||
pub async fn get_current_disk_id(&self) -> Option<Uuid> {
|
||||
*self.disk_id.read().await
|
||||
}
|
||||
|
||||
/// Track disk health for an operation.
|
||||
/// This method should wrap disk operations to ensure health checking.
|
||||
pub async fn track_disk_health<T, F, Fut>(&self, operation: F, timeout_duration: Duration) -> Result<T>
|
||||
where
|
||||
F: FnOnce() -> Fut,
|
||||
Fut: std::future::Future<Output = Result<T>>,
|
||||
{
|
||||
// Check if disk is faulty
|
||||
if self.health.is_faulty() {
|
||||
warn!("disk {} health is faulty, returning error", self.to_string());
|
||||
return Err(DiskError::FaultyDisk);
|
||||
}
|
||||
|
||||
// Check if disk is stale
|
||||
self.check_disk_stale().await?;
|
||||
|
||||
// Record operation start
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos() as i64;
|
||||
self.health.last_started.store(now, Ordering::Relaxed);
|
||||
self.health.increment_waiting();
|
||||
|
||||
if timeout_duration == Duration::ZERO {
|
||||
let result = operation().await;
|
||||
self.health.decrement_waiting();
|
||||
if result.is_ok() {
|
||||
self.health.log_success();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
// Execute the operation with timeout
|
||||
let result = tokio::time::timeout(timeout_duration, operation()).await;
|
||||
|
||||
match result {
|
||||
Ok(operation_result) => {
|
||||
// Log success and decrement waiting counter
|
||||
if operation_result.is_ok() {
|
||||
self.health.log_success();
|
||||
}
|
||||
self.health.decrement_waiting();
|
||||
operation_result
|
||||
}
|
||||
Err(_) => {
|
||||
// Timeout occurred, mark disk as potentially faulty and decrement waiting counter
|
||||
self.health.decrement_waiting();
|
||||
warn!("disk operation timeout after {:?}", timeout_duration);
|
||||
Err(DiskError::other(format!("disk operation timeout after {:?}", timeout_duration)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DiskAPI for LocalDiskWrapper {
|
||||
fn to_string(&self) -> String {
|
||||
self.disk.to_string()
|
||||
}
|
||||
|
||||
async fn is_online(&self) -> bool {
|
||||
let Ok(Some(disk_id)) = self.disk.get_disk_id().await else {
|
||||
return false;
|
||||
};
|
||||
|
||||
let Some(current_disk_id) = *self.disk_id.read().await else {
|
||||
return false;
|
||||
};
|
||||
|
||||
current_disk_id == disk_id
|
||||
}
|
||||
|
||||
fn is_local(&self) -> bool {
|
||||
self.disk.is_local()
|
||||
}
|
||||
|
||||
fn host_name(&self) -> String {
|
||||
self.disk.host_name()
|
||||
}
|
||||
|
||||
fn endpoint(&self) -> Endpoint {
|
||||
self.disk.endpoint()
|
||||
}
|
||||
|
||||
async fn close(&self) -> Result<()> {
|
||||
self.stop_monitoring().await;
|
||||
self.disk.close().await
|
||||
}
|
||||
|
||||
async fn get_disk_id(&self) -> Result<Option<Uuid>> {
|
||||
self.disk.get_disk_id().await
|
||||
}
|
||||
|
||||
async fn set_disk_id(&self, id: Option<Uuid>) -> Result<()> {
|
||||
self.set_disk_id_internal(id).await
|
||||
}
|
||||
|
||||
fn path(&self) -> PathBuf {
|
||||
self.disk.path()
|
||||
}
|
||||
|
||||
fn get_disk_location(&self) -> DiskLocation {
|
||||
self.disk.get_disk_location()
|
||||
}
|
||||
|
||||
async fn disk_info(&self, opts: &DiskInfoOptions) -> Result<DiskInfo> {
|
||||
if opts.noop && opts.metrics {
|
||||
let mut info = DiskInfo::default();
|
||||
// Add health metrics
|
||||
info.metrics.total_waiting = self.health.waiting_count();
|
||||
if self.health.is_faulty() {
|
||||
return Err(DiskError::FaultyDisk);
|
||||
}
|
||||
return Ok(info);
|
||||
}
|
||||
|
||||
if self.health.is_faulty() {
|
||||
return Err(DiskError::FaultyDisk);
|
||||
}
|
||||
|
||||
let result = self.disk.disk_info(opts).await?;
|
||||
|
||||
if let Some(current_disk_id) = *self.disk_id.read().await
|
||||
&& Some(current_disk_id) != result.id
|
||||
{
|
||||
return Err(DiskError::DiskNotFound);
|
||||
};
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn make_volume(&self, volume: &str) -> Result<()> {
|
||||
self.track_disk_health(|| async { self.disk.make_volume(volume).await }, get_max_timeout_duration())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn make_volumes(&self, volumes: Vec<&str>) -> Result<()> {
|
||||
self.track_disk_health(|| async { self.disk.make_volumes(volumes).await }, get_max_timeout_duration())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn list_volumes(&self) -> Result<Vec<VolumeInfo>> {
|
||||
self.track_disk_health(|| async { self.disk.list_volumes().await }, Duration::ZERO)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn stat_volume(&self, volume: &str) -> Result<VolumeInfo> {
|
||||
self.track_disk_health(|| async { self.disk.stat_volume(volume).await }, get_max_timeout_duration())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn delete_volume(&self, volume: &str) -> Result<()> {
|
||||
self.track_disk_health(|| async { self.disk.delete_volume(volume).await }, Duration::ZERO)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn walk_dir<W: tokio::io::AsyncWrite + Unpin + Send>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()> {
|
||||
self.track_disk_health(|| async { self.disk.walk_dir(opts, wr).await }, Duration::ZERO)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn delete_version(
|
||||
&self,
|
||||
volume: &str,
|
||||
path: &str,
|
||||
fi: FileInfo,
|
||||
force_del_marker: bool,
|
||||
opts: DeleteOptions,
|
||||
) -> Result<()> {
|
||||
self.track_disk_health(
|
||||
|| async { self.disk.delete_version(volume, path, fi, force_del_marker, opts).await },
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, opts: DeleteOptions) -> Vec<Option<Error>> {
|
||||
// Check if disk is faulty before proceeding
|
||||
if self.health.is_faulty() {
|
||||
return vec![Some(DiskError::FaultyDisk); versions.len()];
|
||||
}
|
||||
|
||||
// Check if disk is stale
|
||||
if let Err(e) = self.check_disk_stale().await {
|
||||
return vec![Some(e); versions.len()];
|
||||
}
|
||||
|
||||
// Record operation start
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos() as i64;
|
||||
self.health.last_started.store(now, Ordering::Relaxed);
|
||||
self.health.increment_waiting();
|
||||
|
||||
// Execute the operation
|
||||
let result = self.disk.delete_versions(volume, versions, opts).await;
|
||||
|
||||
self.health.decrement_waiting();
|
||||
let has_err = result.iter().any(|e| e.is_some());
|
||||
if !has_err {
|
||||
// Log success and decrement waiting counter
|
||||
self.health.log_success();
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> {
|
||||
self.track_disk_health(|| async { self.disk.delete_paths(volume, paths).await }, get_max_timeout_duration())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn write_metadata(&self, org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()> {
|
||||
self.track_disk_health(
|
||||
|| async { self.disk.write_metadata(org_volume, volume, path, fi).await },
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()> {
|
||||
self.track_disk_health(
|
||||
|| async { self.disk.update_metadata(volume, path, fi, opts).await },
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn read_version(
|
||||
&self,
|
||||
org_volume: &str,
|
||||
volume: &str,
|
||||
path: &str,
|
||||
version_id: &str,
|
||||
opts: &ReadOptions,
|
||||
) -> Result<FileInfo> {
|
||||
self.track_disk_health(
|
||||
|| async { self.disk.read_version(org_volume, volume, path, version_id, opts).await },
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result<RawFileInfo> {
|
||||
self.track_disk_health(|| async { self.disk.read_xl(volume, path, read_data).await }, get_max_timeout_duration())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn rename_data(
|
||||
&self,
|
||||
src_volume: &str,
|
||||
src_path: &str,
|
||||
fi: FileInfo,
|
||||
dst_volume: &str,
|
||||
dst_path: &str,
|
||||
) -> Result<RenameDataResp> {
|
||||
self.track_disk_health(
|
||||
|| async { self.disk.rename_data(src_volume, src_path, fi, dst_volume, dst_path).await },
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result<Vec<String>> {
|
||||
self.track_disk_health(
|
||||
|| async { self.disk.list_dir(origvolume, volume, dir_path, count).await },
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn read_file(&self, volume: &str, path: &str) -> Result<crate::disk::FileReader> {
|
||||
self.track_disk_health(|| async { self.disk.read_file(volume, path).await }, get_max_timeout_duration())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<crate::disk::FileReader> {
|
||||
self.track_disk_health(
|
||||
|| async { self.disk.read_file_stream(volume, path, offset, length).await },
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn append_file(&self, volume: &str, path: &str) -> Result<crate::disk::FileWriter> {
|
||||
self.track_disk_health(|| async { self.disk.append_file(volume, path).await }, Duration::ZERO)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn create_file(&self, origvolume: &str, volume: &str, path: &str, file_size: i64) -> Result<crate::disk::FileWriter> {
|
||||
self.track_disk_health(
|
||||
|| async { self.disk.create_file(origvolume, volume, path, file_size).await },
|
||||
Duration::ZERO,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn rename_file(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str) -> Result<()> {
|
||||
self.track_disk_health(
|
||||
|| async { self.disk.rename_file(src_volume, src_path, dst_volume, dst_path).await },
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Bytes) -> Result<()> {
|
||||
self.track_disk_health(
|
||||
|| async { self.disk.rename_part(src_volume, src_path, dst_volume, dst_path, meta).await },
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()> {
|
||||
self.track_disk_health(|| async { self.disk.delete(volume, path, opt).await }, get_max_timeout_duration())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn verify_file(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
|
||||
self.track_disk_health(|| async { self.disk.verify_file(volume, path, fi).await }, Duration::ZERO)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn check_parts(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
|
||||
self.track_disk_health(|| async { self.disk.check_parts(volume, path, fi).await }, Duration::ZERO)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn read_parts(&self, bucket: &str, paths: &[String]) -> Result<Vec<ObjectPartInfo>> {
|
||||
self.track_disk_health(|| async { self.disk.read_parts(bucket, paths).await }, Duration::ZERO)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn read_multiple(&self, req: ReadMultipleReq) -> Result<Vec<ReadMultipleResp>> {
|
||||
self.track_disk_health(|| async { self.disk.read_multiple(req).await }, Duration::ZERO)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn write_all(&self, volume: &str, path: &str, data: Bytes) -> Result<()> {
|
||||
self.track_disk_health(|| async { self.disk.write_all(volume, path, data).await }, get_max_timeout_duration())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn read_all(&self, volume: &str, path: &str) -> Result<Bytes> {
|
||||
self.track_disk_health(|| async { self.disk.read_all(volume, path).await }, get_max_timeout_duration())
|
||||
.await
|
||||
}
|
||||
}
|
||||
@@ -69,7 +69,7 @@ use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FormatInfo {
|
||||
pub id: Option<Uuid>,
|
||||
pub data: Bytes,
|
||||
@@ -77,16 +77,6 @@ pub struct FormatInfo {
|
||||
pub last_check: Option<OffsetDateTime>,
|
||||
}
|
||||
|
||||
impl FormatInfo {
|
||||
pub fn last_check_valid(&self) -> bool {
|
||||
let now = OffsetDateTime::now_utc();
|
||||
self.file_info.is_some()
|
||||
&& self.id.is_some()
|
||||
&& self.last_check.is_some()
|
||||
&& (now.unix_timestamp() - self.last_check.unwrap().unix_timestamp() <= 1)
|
||||
}
|
||||
}
|
||||
|
||||
/// A helper enum to handle internal buffer types for writing data.
|
||||
pub enum InternalBuf<'a> {
|
||||
Ref(&'a [u8]),
|
||||
@@ -185,7 +175,7 @@ impl LocalDisk {
|
||||
};
|
||||
let root_clone = root.clone();
|
||||
let update_fn: UpdateFn<DiskInfo> = Box::new(move || {
|
||||
let disk_id = id.map_or("".to_string(), |id| id.to_string());
|
||||
let disk_id = id;
|
||||
let root = root_clone.clone();
|
||||
Box::pin(async move {
|
||||
match get_disk_info(root.clone()).await {
|
||||
@@ -200,7 +190,7 @@ impl LocalDisk {
|
||||
minor: info.minor,
|
||||
fs_type: info.fstype,
|
||||
root_disk: root,
|
||||
id: disk_id.to_string(),
|
||||
id: disk_id,
|
||||
..Default::default()
|
||||
};
|
||||
// if root {
|
||||
@@ -1295,7 +1285,7 @@ impl DiskAPI for LocalDisk {
|
||||
}
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn is_online(&self) -> bool {
|
||||
self.check_format_json().await.is_ok()
|
||||
true
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
@@ -1342,24 +1332,40 @@ impl DiskAPI for LocalDisk {
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn get_disk_id(&self) -> Result<Option<Uuid>> {
|
||||
let mut format_info = self.format_info.write().await;
|
||||
let format_info = {
|
||||
let format_info = self.format_info.read().await;
|
||||
format_info.clone()
|
||||
};
|
||||
|
||||
let id = format_info.id;
|
||||
|
||||
if format_info.last_check_valid() {
|
||||
return Ok(id);
|
||||
// if format_info.last_check_valid() {
|
||||
// return Ok(id);
|
||||
// }
|
||||
|
||||
if format_info.file_info.is_some() && id.is_some() {
|
||||
// check last check time
|
||||
if let Some(last_check) = format_info.last_check {
|
||||
if last_check.unix_timestamp() + 1 < OffsetDateTime::now_utc().unix_timestamp() {
|
||||
return Ok(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let file_meta = self.check_format_json().await?;
|
||||
|
||||
if let Some(file_info) = &format_info.file_info {
|
||||
if super::fs::same_file(&file_meta, file_info) {
|
||||
let mut format_info = self.format_info.write().await;
|
||||
format_info.last_check = Some(OffsetDateTime::now_utc());
|
||||
drop(format_info);
|
||||
|
||||
return Ok(id);
|
||||
}
|
||||
}
|
||||
|
||||
debug!("get_disk_id: read format.json");
|
||||
|
||||
let b = fs::read(&self.format_path).await.map_err(to_unformatted_disk_error)?;
|
||||
|
||||
let fm = FormatV3::try_from(b.as_slice()).map_err(|e| {
|
||||
@@ -1375,20 +1381,19 @@ impl DiskAPI for LocalDisk {
|
||||
return Err(DiskError::InconsistentDisk);
|
||||
}
|
||||
|
||||
let mut format_info = self.format_info.write().await;
|
||||
format_info.id = Some(disk_id);
|
||||
format_info.file_info = Some(file_meta);
|
||||
format_info.data = b.into();
|
||||
format_info.last_check = Some(OffsetDateTime::now_utc());
|
||||
drop(format_info);
|
||||
|
||||
Ok(Some(disk_id))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn set_disk_id(&self, id: Option<Uuid>) -> Result<()> {
|
||||
async fn set_disk_id(&self, _id: Option<Uuid>) -> Result<()> {
|
||||
// No setup is required locally
|
||||
// TODO: add check_id_store
|
||||
let mut format_info = self.format_info.write().await;
|
||||
format_info.id = id;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -2438,6 +2443,10 @@ impl DiskAPI for LocalDisk {
|
||||
info.endpoint = self.endpoint.to_string();
|
||||
info.scanning = self.scanning.load(Ordering::SeqCst) == 1;
|
||||
|
||||
if info.id.is_none() {
|
||||
info.id = self.get_disk_id().await.unwrap_or(None);
|
||||
}
|
||||
|
||||
Ok(info)
|
||||
}
|
||||
}
|
||||
@@ -2705,39 +2714,6 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_format_info_last_check_valid() {
|
||||
let now = OffsetDateTime::now_utc();
|
||||
|
||||
// Valid format info
|
||||
let valid_format_info = FormatInfo {
|
||||
id: Some(Uuid::new_v4()),
|
||||
data: vec![1, 2, 3].into(),
|
||||
file_info: Some(fs::metadata("../../../..").await.unwrap()),
|
||||
last_check: Some(now),
|
||||
};
|
||||
assert!(valid_format_info.last_check_valid());
|
||||
|
||||
// Invalid format info (missing id)
|
||||
let invalid_format_info = FormatInfo {
|
||||
id: None,
|
||||
data: vec![1, 2, 3].into(),
|
||||
file_info: Some(fs::metadata("../../../..").await.unwrap()),
|
||||
last_check: Some(now),
|
||||
};
|
||||
assert!(!invalid_format_info.last_check_valid());
|
||||
|
||||
// Invalid format info (old timestamp)
|
||||
let old_time = OffsetDateTime::now_utc() - time::Duration::seconds(10);
|
||||
let old_format_info = FormatInfo {
|
||||
id: Some(Uuid::new_v4()),
|
||||
data: vec![1, 2, 3].into(),
|
||||
file_info: Some(fs::metadata("../../../..").await.unwrap()),
|
||||
last_check: Some(old_time),
|
||||
};
|
||||
assert!(!old_format_info.last_check_valid());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_read_file_exists() {
|
||||
let test_file = "./test_read_exists.txt";
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod disk_store;
|
||||
pub mod endpoint;
|
||||
pub mod error;
|
||||
pub mod error_conv;
|
||||
@@ -30,6 +31,7 @@ pub const FORMAT_CONFIG_FILE: &str = "format.json";
|
||||
pub const STORAGE_FORMAT_FILE: &str = "xl.meta";
|
||||
pub const STORAGE_FORMAT_FILE_BACKUP: &str = "xl.meta.bkp";
|
||||
|
||||
use crate::disk::disk_store::LocalDiskWrapper;
|
||||
use crate::rpc::RemoteDisk;
|
||||
use bytes::Bytes;
|
||||
use endpoint::Endpoint;
|
||||
@@ -51,7 +53,7 @@ pub type FileWriter = Box<dyn AsyncWrite + Send + Sync + Unpin>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Disk {
|
||||
Local(Box<LocalDisk>),
|
||||
Local(Box<LocalDiskWrapper>),
|
||||
Remote(Box<RemoteDisk>),
|
||||
}
|
||||
|
||||
@@ -398,7 +400,7 @@ impl DiskAPI for Disk {
|
||||
pub async fn new_disk(ep: &Endpoint, opt: &DiskOption) -> Result<DiskStore> {
|
||||
if ep.is_local {
|
||||
let s = LocalDisk::new(ep, opt.cleanup).await?;
|
||||
Ok(Arc::new(Disk::Local(Box::new(s))))
|
||||
Ok(Arc::new(Disk::Local(Box::new(LocalDiskWrapper::new(Arc::new(s), opt.health_check)))))
|
||||
} else {
|
||||
let remote_disk = RemoteDisk::new(ep, opt).await?;
|
||||
Ok(Arc::new(Disk::Remote(Box::new(remote_disk))))
|
||||
@@ -534,7 +536,7 @@ pub struct DiskInfo {
|
||||
pub scanning: bool,
|
||||
pub endpoint: String,
|
||||
pub mount_path: String,
|
||||
pub id: String,
|
||||
pub id: Option<Uuid>,
|
||||
pub rotational: bool,
|
||||
pub metrics: DiskMetrics,
|
||||
pub error: String,
|
||||
@@ -1015,7 +1017,7 @@ mod tests {
|
||||
|
||||
let endpoint = Endpoint::try_from(test_dir).unwrap();
|
||||
let local_disk = LocalDisk::new(&endpoint, false).await.unwrap();
|
||||
let disk = Disk::Local(Box::new(local_disk));
|
||||
let disk = Disk::Local(Box::new(LocalDiskWrapper::new(Arc::new(local_disk), false)));
|
||||
|
||||
// Test basic methods
|
||||
assert!(disk.is_local());
|
||||
|
||||
@@ -1,586 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bitrot::{BitrotReader, BitrotWriter};
|
||||
use crate::disk::error::{Error, Result};
|
||||
use crate::disk::error_reduce::{reduce_write_quorum_errs, OBJECT_OP_IGNORED_ERRS};
|
||||
use crate::io::Etag;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures::future::join_all;
|
||||
use reed_solomon_erasure::galois_8::ReedSolomon;
|
||||
use smallvec::SmallVec;
|
||||
use std::any::Any;
|
||||
use std::io::ErrorKind;
|
||||
use std::sync::{mpsc, Arc};
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::warn;
|
||||
use tracing::{error, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::disk::error::DiskError;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Erasure {
|
||||
data_shards: usize,
|
||||
parity_shards: usize,
|
||||
encoder: Option<ReedSolomon>,
|
||||
pub block_size: usize,
|
||||
_id: Uuid,
|
||||
_buf: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Erasure {
|
||||
pub fn new(data_shards: usize, parity_shards: usize, block_size: usize) -> Self {
|
||||
// debug!(
|
||||
// "Erasure new data_shards {},parity_shards {} block_size {} ",
|
||||
// data_shards, parity_shards, block_size
|
||||
// );
|
||||
let mut encoder = None;
|
||||
if parity_shards > 0 {
|
||||
encoder = Some(ReedSolomon::new(data_shards, parity_shards).unwrap());
|
||||
}
|
||||
|
||||
Erasure {
|
||||
data_shards,
|
||||
parity_shards,
|
||||
block_size,
|
||||
encoder,
|
||||
_id: Uuid::new_v4(),
|
||||
_buf: vec![0u8; block_size],
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "info", skip(self, reader, writers))]
|
||||
pub async fn encode<S>(
|
||||
self: Arc<Self>,
|
||||
mut reader: S,
|
||||
writers: &mut [Option<BitrotWriter>],
|
||||
// block_size: usize,
|
||||
total_size: usize,
|
||||
write_quorum: usize,
|
||||
) -> Result<(usize, String)>
|
||||
where
|
||||
S: AsyncRead + Etag + Unpin + Send + 'static,
|
||||
{
|
||||
let (tx, mut rx) = mpsc::channel(5);
|
||||
let task = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; self.block_size];
|
||||
let mut total: usize = 0;
|
||||
loop {
|
||||
if total_size > 0 {
|
||||
let new_len = {
|
||||
let remain = total_size - total;
|
||||
if remain > self.block_size { self.block_size } else { remain }
|
||||
};
|
||||
|
||||
if new_len == 0 && total > 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
buf.resize(new_len, 0u8);
|
||||
match reader.read_exact(&mut buf).await {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
if let ErrorKind::UnexpectedEof = e.kind() {
|
||||
break;
|
||||
} else {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
};
|
||||
total += buf.len();
|
||||
}
|
||||
let blocks = Arc::new(Box::pin(self.clone().encode_data(&buf)?));
|
||||
let _ = tx.send(blocks).await;
|
||||
if total_size == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
let etag = reader.etag().await;
|
||||
Ok((total, etag))
|
||||
});
|
||||
|
||||
while let Some(blocks) = rx.recv().await {
|
||||
let write_futures = writers.iter_mut().enumerate().map(|(i, w_op)| {
|
||||
let i_inner = i;
|
||||
let blocks_inner = blocks.clone();
|
||||
async move {
|
||||
if let Some(w) = w_op {
|
||||
w.write(blocks_inner[i_inner].clone()).await.err()
|
||||
} else {
|
||||
Some(DiskError::DiskNotFound)
|
||||
}
|
||||
}
|
||||
});
|
||||
let errs = join_all(write_futures).await;
|
||||
let none_count = errs.iter().filter(|&x| x.is_none()).count();
|
||||
if none_count >= write_quorum {
|
||||
if total_size == 0 {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(err) = reduce_write_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, write_quorum) {
|
||||
warn!("Erasure encode errs {:?}", &errs);
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
task.await?
|
||||
}
|
||||
|
||||
pub async fn decode<W>(
|
||||
&self,
|
||||
writer: &mut W,
|
||||
readers: Vec<Option<BitrotReader>>,
|
||||
offset: usize,
|
||||
length: usize,
|
||||
total_length: usize,
|
||||
) -> (usize, Option<Error>)
|
||||
where
|
||||
W: AsyncWriteExt + Send + Unpin + 'static,
|
||||
{
|
||||
if length == 0 {
|
||||
return (0, None);
|
||||
}
|
||||
|
||||
let mut reader = ShardReader::new(readers, self, offset, total_length);
|
||||
|
||||
// debug!("ShardReader {:?}", &reader);
|
||||
|
||||
let start_block = offset / self.block_size;
|
||||
let end_block = (offset + length) / self.block_size;
|
||||
|
||||
// debug!("decode block from {} to {}", start_block, end_block);
|
||||
|
||||
let mut bytes_written = 0;
|
||||
|
||||
for block_idx in start_block..=end_block {
|
||||
let (block_offset, block_length) = if start_block == end_block {
|
||||
(offset % self.block_size, length)
|
||||
} else if block_idx == start_block {
|
||||
let block_offset = offset % self.block_size;
|
||||
(block_offset, self.block_size - block_offset)
|
||||
} else if block_idx == end_block {
|
||||
(0, (offset + length) % self.block_size)
|
||||
} else {
|
||||
(0, self.block_size)
|
||||
};
|
||||
|
||||
if block_length == 0 {
|
||||
// debug!("block_length == 0 break");
|
||||
break;
|
||||
}
|
||||
|
||||
// debug!("decode {} block_offset {},block_length {} ", block_idx, block_offset, block_length);
|
||||
|
||||
let mut bufs = match reader.read().await {
|
||||
Ok(bufs) => bufs,
|
||||
Err(err) => return (bytes_written, Some(err)),
|
||||
};
|
||||
|
||||
if self.parity_shards > 0 {
|
||||
if let Err(err) = self.decode_data(&mut bufs) {
|
||||
return (bytes_written, Some(err));
|
||||
}
|
||||
}
|
||||
|
||||
let written_n = match self
|
||||
.write_data_blocks(writer, bufs, self.data_shards, block_offset, block_length)
|
||||
.await
|
||||
{
|
||||
Ok(n) => n,
|
||||
Err(err) => {
|
||||
error!("write_data_blocks err {:?}", &err);
|
||||
return (bytes_written, Some(err));
|
||||
}
|
||||
};
|
||||
|
||||
bytes_written += written_n;
|
||||
|
||||
// debug!("decode {} written_n {}, total_written: {} ", block_idx, written_n, bytes_written);
|
||||
}
|
||||
|
||||
if bytes_written != length {
|
||||
// debug!("bytes_written != length: {} != {} ", bytes_written, length);
|
||||
return (bytes_written, Some(Error::other("erasure decode less data")));
|
||||
}
|
||||
|
||||
(bytes_written, None)
|
||||
}
|
||||
|
||||
async fn write_data_blocks<W>(
|
||||
&self,
|
||||
writer: &mut W,
|
||||
bufs: Vec<Option<Vec<u8>>>,
|
||||
data_blocks: usize,
|
||||
offset: usize,
|
||||
length: usize,
|
||||
) -> Result<usize>
|
||||
where
|
||||
W: AsyncWrite + Send + Unpin + 'static,
|
||||
{
|
||||
if bufs.len() < data_blocks {
|
||||
return Err(Error::other("read bufs not match data_blocks"));
|
||||
}
|
||||
|
||||
let data_len: usize = bufs
|
||||
.iter()
|
||||
.take(data_blocks)
|
||||
.filter(|v| v.is_some())
|
||||
.map(|v| v.as_ref().unwrap().len())
|
||||
.sum();
|
||||
if data_len < length {
|
||||
return Err(Error::other(format!("write_data_blocks data_len < length {} < {}", data_len, length)));
|
||||
}
|
||||
|
||||
let mut offset = offset;
|
||||
|
||||
// debug!("write_data_blocks offset {}, length {}", offset, length);
|
||||
|
||||
let mut write = length;
|
||||
let mut total_written = 0;
|
||||
|
||||
for opt_buf in bufs.iter().take(data_blocks) {
|
||||
let buf = opt_buf.as_ref().unwrap();
|
||||
|
||||
if offset >= buf.len() {
|
||||
offset -= buf.len();
|
||||
continue;
|
||||
}
|
||||
|
||||
let buf = &buf[offset..];
|
||||
|
||||
offset = 0;
|
||||
|
||||
// debug!("write_data_blocks write buf len {}", buf.len());
|
||||
|
||||
if write < buf.len() {
|
||||
let buf = &buf[..write];
|
||||
|
||||
// debug!("write_data_blocks write buf less len {}", buf.len());
|
||||
writer.write_all(buf).await?;
|
||||
// debug!("write_data_blocks write done len {}", buf.len());
|
||||
total_written += buf.len();
|
||||
break;
|
||||
}
|
||||
|
||||
writer.write_all(buf).await?;
|
||||
let n = buf.len();
|
||||
|
||||
// debug!("write_data_blocks write done len {}", n);
|
||||
write -= n;
|
||||
total_written += n;
|
||||
}
|
||||
|
||||
Ok(total_written)
|
||||
}
|
||||
|
||||
pub fn total_shard_count(&self) -> usize {
|
||||
self.data_shards + self.parity_shards
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "info", skip_all, fields(data_len=data.len()))]
|
||||
pub fn encode_data(self: Arc<Self>, data: &[u8]) -> Result<Vec<Bytes>> {
|
||||
let (shard_size, total_size) = self.need_size(data.len());
|
||||
|
||||
// Generate the total length required for all shards
|
||||
let mut data_buffer = BytesMut::with_capacity(total_size);
|
||||
|
||||
// Copy the source data
|
||||
data_buffer.extend_from_slice(data);
|
||||
data_buffer.resize(total_size, 0u8);
|
||||
|
||||
{
|
||||
// Perform EC encoding; the results go into data_buffer
|
||||
let data_slices: SmallVec<[&mut [u8]; 16]> = data_buffer.chunks_exact_mut(shard_size).collect();
|
||||
|
||||
// Only perform EC encoding when parity shards are present
|
||||
if self.parity_shards > 0 {
|
||||
self.encoder.as_ref().unwrap().encode(data_slices).map_err(Error::other)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Zero-copy shards: every shard references data_buffer
|
||||
let mut data_buffer = data_buffer.freeze();
|
||||
let mut shards = Vec::with_capacity(self.total_shard_count());
|
||||
for _ in 0..self.total_shard_count() {
|
||||
let shard = data_buffer.split_to(shard_size);
|
||||
shards.push(shard);
|
||||
}
|
||||
|
||||
Ok(shards)
|
||||
}
|
||||
|
||||
pub fn decode_data(&self, shards: &mut [Option<Vec<u8>>]) -> Result<()> {
|
||||
if self.parity_shards > 0 {
|
||||
self.encoder.as_ref().unwrap().reconstruct(shards).map_err(Error::other)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// The length per shard and the total required length
|
||||
fn need_size(&self, data_size: usize) -> (usize, usize) {
|
||||
let shard_size = self.shard_size(data_size);
|
||||
(shard_size, shard_size * (self.total_shard_count()))
|
||||
}
|
||||
|
||||
// Compute each shard size
|
||||
pub fn shard_size(&self, data_size: usize) -> usize {
|
||||
data_size.div_ceil(self.data_shards)
|
||||
}
|
||||
// returns final erasure size from original size.
|
||||
pub fn shard_file_size(&self, total_size: usize) -> usize {
|
||||
if total_size == 0 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let num_shards = total_size / self.block_size;
|
||||
let last_block_size = total_size % self.block_size;
|
||||
let last_shard_size = last_block_size.div_ceil(self.data_shards);
|
||||
num_shards * self.shard_size(self.block_size) + last_shard_size
|
||||
|
||||
// When writing, EC pads the data so the last shard length should match
|
||||
// if last_block_size != 0 {
|
||||
// num_shards += 1
|
||||
// }
|
||||
// num_shards * self.shard_size(self.block_size)
|
||||
}
|
||||
|
||||
// where erasure reading begins.
|
||||
pub fn shard_file_offset(&self, start_offset: usize, length: usize, total_length: usize) -> usize {
|
||||
let shard_size = self.shard_size(self.block_size);
|
||||
let shard_file_size = self.shard_file_size(total_length);
|
||||
let end_shard = (start_offset + length) / self.block_size;
|
||||
let mut till_offset = end_shard * shard_size + shard_size;
|
||||
if till_offset > shard_file_size {
|
||||
till_offset = shard_file_size;
|
||||
}
|
||||
|
||||
till_offset
|
||||
}
|
||||
|
||||
pub async fn heal(
|
||||
&self,
|
||||
writers: &mut [Option<BitrotWriter>],
|
||||
readers: Vec<Option<BitrotReader>>,
|
||||
total_length: usize,
|
||||
_prefer: &[bool],
|
||||
) -> Result<()> {
|
||||
info!(
|
||||
"Erasure heal, writers len: {}, readers len: {}, total_length: {}",
|
||||
writers.len(),
|
||||
readers.len(),
|
||||
total_length
|
||||
);
|
||||
if writers.len() != self.parity_shards + self.data_shards {
|
||||
return Err(Error::other("invalid argument"));
|
||||
}
|
||||
let mut reader = ShardReader::new(readers, self, 0, total_length);
|
||||
|
||||
let start_block = 0;
|
||||
let mut end_block = total_length / self.block_size;
|
||||
if total_length % self.block_size != 0 {
|
||||
end_block += 1;
|
||||
}
|
||||
|
||||
let mut errs = Vec::new();
|
||||
for _ in start_block..end_block {
|
||||
let mut bufs = reader.read().await?;
|
||||
|
||||
if self.parity_shards > 0 {
|
||||
self.encoder.as_ref().unwrap().reconstruct(&mut bufs).map_err(Error::other)?;
|
||||
}
|
||||
|
||||
let shards = bufs.into_iter().flatten().map(Bytes::from).collect::<Vec<_>>();
|
||||
if shards.len() != self.parity_shards + self.data_shards {
|
||||
return Err(Error::other("can not reconstruct data"));
|
||||
}
|
||||
|
||||
for (i, w) in writers.iter_mut().enumerate() {
|
||||
if w.is_none() {
|
||||
continue;
|
||||
}
|
||||
match w.as_mut().unwrap().write(shards[i].clone()).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
info!("write failed, err: {:?}", e);
|
||||
errs.push(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !errs.is_empty() {
|
||||
return Err(errs[0].clone().into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait Writer {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
async fn write(&mut self, buf: Bytes) -> Result<()>;
|
||||
async fn close(&mut self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait ReadAt {
|
||||
async fn read_at(&mut self, offset: usize, length: usize) -> Result<(Vec<u8>, usize)>;
|
||||
}
|
||||
|
||||
pub struct ShardReader {
|
||||
readers: Vec<Option<BitrotReader>>, // Disk readers
|
||||
data_block_count: usize, // Total number of shards
|
||||
parity_block_count: usize,
|
||||
shard_size: usize, // Block size per shard (read one block at a time)
|
||||
shard_file_size: usize, // Total size of the shard file
|
||||
offset: usize, // Offset within the shard
|
||||
}
|
||||
|
||||
impl ShardReader {
|
||||
pub fn new(readers: Vec<Option<BitrotReader>>, ec: &Erasure, offset: usize, total_length: usize) -> Self {
|
||||
Self {
|
||||
readers,
|
||||
data_block_count: ec.data_shards,
|
||||
parity_block_count: ec.parity_shards,
|
||||
shard_size: ec.shard_size(ec.block_size),
|
||||
shard_file_size: ec.shard_file_size(total_length),
|
||||
offset: (offset / ec.block_size) * ec.shard_size(ec.block_size),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn read(&mut self) -> Result<Vec<Option<Vec<u8>>>> {
|
||||
// let mut disks = self.readers;
|
||||
let reader_length = self.readers.len();
|
||||
// Length of the block to read
|
||||
let mut read_length = self.shard_size;
|
||||
if self.offset + read_length > self.shard_file_size {
|
||||
read_length = self.shard_file_size - self.offset
|
||||
}
|
||||
|
||||
if read_length == 0 {
|
||||
return Ok(vec![None; reader_length]);
|
||||
}
|
||||
|
||||
// debug!("shard reader read offset {}, shard_size {}", self.offset, read_length);
|
||||
|
||||
let mut futures = Vec::with_capacity(reader_length);
|
||||
let mut errors = Vec::with_capacity(reader_length);
|
||||
|
||||
let mut ress = Vec::with_capacity(reader_length);
|
||||
|
||||
for disk in self.readers.iter_mut() {
|
||||
// if disk.is_none() {
|
||||
// ress.push(None);
|
||||
// errors.push(Some(Error::new(DiskError::DiskNotFound)));
|
||||
// continue;
|
||||
// }
|
||||
|
||||
// let disk: &mut BitrotReader = disk.as_mut().unwrap();
|
||||
let offset = self.offset;
|
||||
futures.push(async move {
|
||||
if let Some(disk) = disk {
|
||||
disk.read_at(offset, read_length).await
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let results = join_all(futures).await;
|
||||
for result in results {
|
||||
match result {
|
||||
Ok((res, _)) => {
|
||||
ress.push(Some(res));
|
||||
errors.push(None);
|
||||
}
|
||||
Err(e) => {
|
||||
ress.push(None);
|
||||
errors.push(Some(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.can_decode(&ress) {
|
||||
warn!("ec decode read ress {:?}", &ress);
|
||||
warn!("ec decode read errors {:?}", &errors);
|
||||
|
||||
return Err(Error::other("shard reader read failed"));
|
||||
}
|
||||
|
||||
self.offset += self.shard_size;
|
||||
|
||||
Ok(ress)
|
||||
}
|
||||
|
||||
fn can_decode(&self, bufs: &[Option<Vec<u8>>]) -> bool {
|
||||
let c = bufs.iter().filter(|v| v.is_some()).count();
|
||||
if self.parity_block_count > 0 {
|
||||
c >= self.data_block_count
|
||||
} else {
|
||||
c == self.data_block_count
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fn shards_to_option_shards<T: Clone>(shards: &[Vec<T>]) -> Vec<Option<Vec<T>>> {
|
||||
// let mut result = Vec::with_capacity(shards.len());
|
||||
|
||||
// for v in shards.iter() {
|
||||
// let inner: Vec<T> = v.clone();
|
||||
// result.push(Some(inner));
|
||||
// }
|
||||
// result
|
||||
// }
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_erasure() {
|
||||
let data_shards = 3;
|
||||
let parity_shards = 2;
|
||||
let data: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11];
|
||||
let ec = Erasure::new(data_shards, parity_shards, 1);
|
||||
let shards = Arc::new(ec).encode_data(data).unwrap();
|
||||
println!("shards:{:?}", shards);
|
||||
|
||||
let mut s: Vec<_> = shards
|
||||
.iter()
|
||||
.map(|d| if d.is_empty() { None } else { Some(d.to_vec()) })
|
||||
.collect();
|
||||
|
||||
// let mut s = shards_to_option_shards(&shards);
|
||||
|
||||
// s[0] = None;
|
||||
s[4] = None;
|
||||
s[3] = None;
|
||||
|
||||
println!("sss:{:?}", &s);
|
||||
|
||||
let ec = Erasure::new(data_shards, parity_shards, 1);
|
||||
ec.decode_data(&mut s).unwrap();
|
||||
// ec.encoder.reconstruct(&mut s).unwrap();
|
||||
|
||||
println!("sss:{:?}", &s);
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,6 @@ use crate::{
|
||||
tier::tier::TierConfigMgr,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_policy::auth::Credentials;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{Arc, OnceLock},
|
||||
@@ -61,49 +60,6 @@ lazy_static! {
|
||||
/// Global cancellation token for background services (data scanner and auto heal)
|
||||
static GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
|
||||
/// Global active credentials
|
||||
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
|
||||
|
||||
/// Initialize the global action credentials
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `ak` - Optional access key
|
||||
/// * `sk` - Optional secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * None
|
||||
///
|
||||
pub fn init_global_action_credentials(ak: Option<String>, sk: Option<String>) {
|
||||
let ak = {
|
||||
if let Some(k) = ak {
|
||||
k
|
||||
} else {
|
||||
rustfs_utils::string::gen_access_key(20).unwrap_or_default()
|
||||
}
|
||||
};
|
||||
|
||||
let sk = {
|
||||
if let Some(k) = sk {
|
||||
k
|
||||
} else {
|
||||
rustfs_utils::string::gen_secret_key(32).unwrap_or_default()
|
||||
}
|
||||
};
|
||||
|
||||
GLOBAL_ACTIVE_CRED
|
||||
.set(Credentials {
|
||||
access_key: ak,
|
||||
secret_key: sk,
|
||||
..Default::default()
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Get the global action credentials
|
||||
pub fn get_global_action_cred() -> Option<Credentials> {
|
||||
GLOBAL_ACTIVE_CRED.get().cloned()
|
||||
}
|
||||
|
||||
/// Get the global rustfs port
|
||||
///
|
||||
/// # Returns
|
||||
|
||||
@@ -20,7 +20,6 @@ pub mod batch_processor;
|
||||
pub mod bitrot;
|
||||
pub mod bucket;
|
||||
pub mod cache_value;
|
||||
mod chunk_stream;
|
||||
pub mod compress;
|
||||
pub mod config;
|
||||
pub mod data_usage;
|
||||
|
||||
@@ -19,11 +19,7 @@ use crate::{
|
||||
// utils::os::get_drive_stats,
|
||||
};
|
||||
use chrono::Utc;
|
||||
use rustfs_common::{
|
||||
globals::{GLOBAL_LOCAL_NODE_NAME, GLOBAL_RUSTFS_ADDR},
|
||||
heal_channel::DriveState,
|
||||
metrics::global_metrics,
|
||||
};
|
||||
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, GLOBAL_RUSTFS_ADDR, heal_channel::DriveState, metrics::global_metrics};
|
||||
use rustfs_madmin::metrics::{DiskIOStats, DiskMetric, RealtimeMetrics};
|
||||
use rustfs_utils::os::get_drive_stats;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::global::get_global_action_cred;
|
||||
use base64::Engine as _;
|
||||
use base64::engine::general_purpose;
|
||||
use hmac::{Hmac, KeyInit, Mac};
|
||||
@@ -20,6 +19,7 @@ use http::HeaderMap;
|
||||
use http::HeaderValue;
|
||||
use http::Method;
|
||||
use http::Uri;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use sha2::Sha256;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::error;
|
||||
|
||||
@@ -13,14 +13,18 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::metadata_sys;
|
||||
use crate::disk::error::DiskError;
|
||||
use crate::disk::error::{Error, Result};
|
||||
use crate::disk::error_reduce::{BUCKET_OP_IGNORED_ERRS, is_all_buckets_not_found, reduce_write_quorum_errs};
|
||||
use crate::disk::{DiskAPI, DiskStore};
|
||||
use crate::disk::{DiskAPI, DiskStore, disk_store::get_max_timeout_duration};
|
||||
use crate::global::GLOBAL_LOCAL_DISK_MAP;
|
||||
use crate::store::all_local_disk;
|
||||
use crate::store_utils::is_reserved_or_invalid_bucket;
|
||||
use crate::{
|
||||
disk::{self, VolumeInfo},
|
||||
disk::{
|
||||
self, VolumeInfo,
|
||||
disk_store::{CHECK_EVERY, CHECK_TIMEOUT_DURATION, DiskHealthTracker},
|
||||
},
|
||||
endpoints::{EndpointServerPools, Node},
|
||||
store_api::{BucketInfo, BucketOptions, DeleteBucketOptions, MakeBucketOptions},
|
||||
};
|
||||
@@ -32,10 +36,11 @@ use rustfs_protos::node_service_time_out_client;
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
DeleteBucketRequest, GetBucketInfoRequest, HealBucketRequest, ListBucketRequest, MakeBucketRequest,
|
||||
};
|
||||
use std::{collections::HashMap, fmt::Debug, sync::Arc};
|
||||
use tokio::sync::RwLock;
|
||||
use std::{collections::HashMap, fmt::Debug, sync::Arc, time::Duration};
|
||||
use tokio::{net::TcpStream, sync::RwLock, time};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tonic::Request;
|
||||
use tracing::info;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
type Client = Arc<Box<dyn PeerS3Client>>;
|
||||
|
||||
@@ -559,16 +564,160 @@ pub struct RemotePeerS3Client {
|
||||
pub node: Option<Node>,
|
||||
pub pools: Option<Vec<usize>>,
|
||||
addr: String,
|
||||
/// Health tracker for connection monitoring
|
||||
health: Arc<DiskHealthTracker>,
|
||||
/// Cancellation token for monitoring tasks
|
||||
cancel_token: CancellationToken,
|
||||
}
|
||||
|
||||
impl RemotePeerS3Client {
|
||||
pub fn new(node: Option<Node>, pools: Option<Vec<usize>>) -> Self {
|
||||
let addr = node.as_ref().map(|v| v.url.to_string()).unwrap_or_default().to_string();
|
||||
Self { node, pools, addr }
|
||||
let client = Self {
|
||||
node,
|
||||
pools,
|
||||
addr,
|
||||
health: Arc::new(DiskHealthTracker::new()),
|
||||
cancel_token: CancellationToken::new(),
|
||||
};
|
||||
|
||||
// Start health monitoring
|
||||
client.start_health_monitoring();
|
||||
|
||||
client
|
||||
}
|
||||
|
||||
pub fn get_addr(&self) -> String {
|
||||
self.addr.clone()
|
||||
}
|
||||
|
||||
/// Start health monitoring for the remote peer
|
||||
fn start_health_monitoring(&self) {
|
||||
let health = Arc::clone(&self.health);
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
let addr = self.addr.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
Self::monitor_remote_peer_health(addr, health, cancel_token).await;
|
||||
});
|
||||
}
|
||||
|
||||
/// Monitor remote peer health periodically
|
||||
async fn monitor_remote_peer_health(addr: String, health: Arc<DiskHealthTracker>, cancel_token: CancellationToken) {
|
||||
let mut interval = time::interval(CHECK_EVERY);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
debug!("Health monitoring cancelled for remote peer: {}", addr);
|
||||
return;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
if cancel_token.is_cancelled() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Skip health check if peer is already marked as faulty
|
||||
if health.is_faulty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Perform basic connectivity check
|
||||
if Self::perform_connectivity_check(&addr).await.is_err() && health.swap_ok_to_faulty() {
|
||||
warn!("Remote peer health check failed for {}: marking as faulty", addr);
|
||||
|
||||
// Start recovery monitoring
|
||||
let health_clone = Arc::clone(&health);
|
||||
let addr_clone = addr.clone();
|
||||
let cancel_clone = cancel_token.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
Self::monitor_remote_peer_recovery(addr_clone, health_clone, cancel_clone).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Monitor remote peer recovery and mark as healthy when recovered
|
||||
async fn monitor_remote_peer_recovery(addr: String, health: Arc<DiskHealthTracker>, cancel_token: CancellationToken) {
|
||||
let mut interval = time::interval(Duration::from_secs(5)); // Check every 5 seconds
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
return;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
if Self::perform_connectivity_check(&addr).await.is_ok() {
|
||||
info!("Remote peer recovered: {}", addr);
|
||||
health.set_ok();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform basic connectivity check for remote peer
|
||||
async fn perform_connectivity_check(addr: &str) -> Result<()> {
|
||||
use tokio::time::timeout;
|
||||
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {}", e)))?;
|
||||
|
||||
let Some(host) = url.host_str() else {
|
||||
return Err(Error::other("No host in URL".to_string()));
|
||||
};
|
||||
|
||||
let port = url.port_or_known_default().unwrap_or(80);
|
||||
|
||||
// Try to establish TCP connection
|
||||
match timeout(CHECK_TIMEOUT_DURATION, TcpStream::connect((host, port))).await {
|
||||
Ok(Ok(_)) => Ok(()),
|
||||
_ => Err(Error::other(format!("Cannot connect to {}:{}", host, port))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute operation with timeout and health tracking
|
||||
async fn execute_with_timeout<T, F, Fut>(&self, operation: F, timeout_duration: Duration) -> Result<T>
|
||||
where
|
||||
F: FnOnce() -> Fut,
|
||||
Fut: std::future::Future<Output = Result<T>>,
|
||||
{
|
||||
// Check if peer is faulty
|
||||
if self.health.is_faulty() {
|
||||
return Err(DiskError::FaultyDisk);
|
||||
}
|
||||
|
||||
// Record operation start
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos() as i64;
|
||||
self.health.last_started.store(now, std::sync::atomic::Ordering::Relaxed);
|
||||
self.health.increment_waiting();
|
||||
|
||||
// Execute operation with timeout
|
||||
let result = time::timeout(timeout_duration, operation()).await;
|
||||
|
||||
match result {
|
||||
Ok(operation_result) => {
|
||||
// Log success and decrement waiting counter
|
||||
if operation_result.is_ok() {
|
||||
self.health.log_success();
|
||||
}
|
||||
self.health.decrement_waiting();
|
||||
operation_result
|
||||
}
|
||||
Err(_) => {
|
||||
// Timeout occurred, mark peer as potentially faulty
|
||||
self.health.decrement_waiting();
|
||||
warn!("Remote peer operation timeout after {:?}", timeout_duration);
|
||||
Err(Error::other(format!("Remote peer operation timeout after {:?}", timeout_duration)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -578,115 +727,145 @@ impl PeerS3Client for RemotePeerS3Client {
|
||||
}
|
||||
|
||||
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem> {
|
||||
let options: String = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(HealBucketRequest {
|
||||
bucket: bucket.to_string(),
|
||||
options,
|
||||
});
|
||||
let response = client.heal_bucket(request).await?.into_inner();
|
||||
if !response.success {
|
||||
return if let Some(err) = response.error {
|
||||
Err(err.into())
|
||||
} else {
|
||||
Err(Error::other(""))
|
||||
};
|
||||
}
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options: String = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(HealBucketRequest {
|
||||
bucket: bucket.to_string(),
|
||||
options,
|
||||
});
|
||||
let response = client.heal_bucket(request).await?.into_inner();
|
||||
if !response.success {
|
||||
return if let Some(err) = response.error {
|
||||
Err(err.into())
|
||||
} else {
|
||||
Err(Error::other(""))
|
||||
};
|
||||
}
|
||||
|
||||
Ok(HealResultItem {
|
||||
heal_item_type: HealItemType::Bucket.to_string(),
|
||||
bucket: bucket.to_string(),
|
||||
set_count: 0,
|
||||
..Default::default()
|
||||
})
|
||||
Ok(HealResultItem {
|
||||
heal_item_type: HealItemType::Bucket.to_string(),
|
||||
bucket: bucket.to_string(),
|
||||
set_count: 0,
|
||||
..Default::default()
|
||||
})
|
||||
},
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn list_bucket(&self, opts: &BucketOptions) -> Result<Vec<BucketInfo>> {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ListBucketRequest { options });
|
||||
let response = client.list_bucket(request).await?.into_inner();
|
||||
if !response.success {
|
||||
return if let Some(err) = response.error {
|
||||
Err(err.into())
|
||||
} else {
|
||||
Err(Error::other(""))
|
||||
};
|
||||
}
|
||||
let bucket_infos = response
|
||||
.bucket_infos
|
||||
.into_iter()
|
||||
.filter_map(|json_str| serde_json::from_str::<BucketInfo>(&json_str).ok())
|
||||
.collect();
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ListBucketRequest { options });
|
||||
let response = client.list_bucket(request).await?.into_inner();
|
||||
if !response.success {
|
||||
return if let Some(err) = response.error {
|
||||
Err(err.into())
|
||||
} else {
|
||||
Err(Error::other(""))
|
||||
};
|
||||
}
|
||||
let bucket_infos = response
|
||||
.bucket_infos
|
||||
.into_iter()
|
||||
.filter_map(|json_str| serde_json::from_str::<BucketInfo>(&json_str).ok())
|
||||
.collect();
|
||||
|
||||
Ok(bucket_infos)
|
||||
Ok(bucket_infos)
|
||||
},
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(MakeBucketRequest {
|
||||
name: bucket.to_string(),
|
||||
options,
|
||||
});
|
||||
let response = client.make_bucket(request).await?.into_inner();
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(MakeBucketRequest {
|
||||
name: bucket.to_string(),
|
||||
options,
|
||||
});
|
||||
let response = client.make_bucket(request).await?.into_inner();
|
||||
|
||||
// TODO: deal with error
|
||||
if !response.success {
|
||||
return if let Some(err) = response.error {
|
||||
Err(err.into())
|
||||
} else {
|
||||
Err(Error::other(""))
|
||||
};
|
||||
}
|
||||
// TODO: deal with error
|
||||
if !response.success {
|
||||
return if let Some(err) = response.error {
|
||||
Err(err.into())
|
||||
} else {
|
||||
Err(Error::other(""))
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(())
|
||||
},
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
async fn get_bucket_info(&self, bucket: &str, opts: &BucketOptions) -> Result<BucketInfo> {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(GetBucketInfoRequest {
|
||||
bucket: bucket.to_string(),
|
||||
options,
|
||||
});
|
||||
let response = client.get_bucket_info(request).await?.into_inner();
|
||||
if !response.success {
|
||||
return if let Some(err) = response.error {
|
||||
Err(err.into())
|
||||
} else {
|
||||
Err(Error::other(""))
|
||||
};
|
||||
}
|
||||
let bucket_info = serde_json::from_str::<BucketInfo>(&response.bucket_info)?;
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(GetBucketInfoRequest {
|
||||
bucket: bucket.to_string(),
|
||||
options,
|
||||
});
|
||||
let response = client.get_bucket_info(request).await?.into_inner();
|
||||
if !response.success {
|
||||
return if let Some(err) = response.error {
|
||||
Err(err.into())
|
||||
} else {
|
||||
Err(Error::other(""))
|
||||
};
|
||||
}
|
||||
let bucket_info = serde_json::from_str::<BucketInfo>(&response.bucket_info)?;
|
||||
|
||||
Ok(bucket_info)
|
||||
Ok(bucket_info)
|
||||
},
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn delete_bucket(&self, bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
|
||||
let request = Request::new(DeleteBucketRequest {
|
||||
bucket: bucket.to_string(),
|
||||
});
|
||||
let response = client.delete_bucket(request).await?.into_inner();
|
||||
if !response.success {
|
||||
return if let Some(err) = response.error {
|
||||
Err(err.into())
|
||||
} else {
|
||||
Err(Error::other(""))
|
||||
};
|
||||
}
|
||||
let request = Request::new(DeleteBucketRequest {
|
||||
bucket: bucket.to_string(),
|
||||
});
|
||||
let response = client.delete_bucket(request).await?.into_inner();
|
||||
if !response.success {
|
||||
return if let Some(err) = response.error {
|
||||
Err(err.into())
|
||||
} else {
|
||||
Err(Error::other(""))
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(())
|
||||
},
|
||||
get_max_timeout_duration(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -174,56 +174,56 @@ impl SetDisks {
|
||||
})
|
||||
}
|
||||
|
||||
async fn cached_disk_health(&self, index: usize) -> Option<bool> {
|
||||
let cache = self.disk_health_cache.read().await;
|
||||
cache
|
||||
.get(index)
|
||||
.and_then(|entry| entry.as_ref().and_then(|state| state.cached_value()))
|
||||
}
|
||||
// async fn cached_disk_health(&self, index: usize) -> Option<bool> {
|
||||
// let cache = self.disk_health_cache.read().await;
|
||||
// cache
|
||||
// .get(index)
|
||||
// .and_then(|entry| entry.as_ref().and_then(|state| state.cached_value()))
|
||||
// }
|
||||
|
||||
async fn update_disk_health(&self, index: usize, online: bool) {
|
||||
let mut cache = self.disk_health_cache.write().await;
|
||||
if cache.len() <= index {
|
||||
cache.resize(index + 1, None);
|
||||
}
|
||||
cache[index] = Some(DiskHealthEntry {
|
||||
last_check: Instant::now(),
|
||||
online,
|
||||
});
|
||||
}
|
||||
// async fn update_disk_health(&self, index: usize, online: bool) {
|
||||
// let mut cache = self.disk_health_cache.write().await;
|
||||
// if cache.len() <= index {
|
||||
// cache.resize(index + 1, None);
|
||||
// }
|
||||
// cache[index] = Some(DiskHealthEntry {
|
||||
// last_check: Instant::now(),
|
||||
// online,
|
||||
// });
|
||||
// }
|
||||
|
||||
async fn is_disk_online_cached(&self, index: usize, disk: &DiskStore) -> bool {
|
||||
if let Some(online) = self.cached_disk_health(index).await {
|
||||
return online;
|
||||
}
|
||||
// async fn is_disk_online_cached(&self, index: usize, disk: &DiskStore) -> bool {
|
||||
// if let Some(online) = self.cached_disk_health(index).await {
|
||||
// return online;
|
||||
// }
|
||||
|
||||
let disk_clone = disk.clone();
|
||||
let online = timeout(DISK_ONLINE_TIMEOUT, async move { disk_clone.is_online().await })
|
||||
.await
|
||||
.unwrap_or(false);
|
||||
self.update_disk_health(index, online).await;
|
||||
online
|
||||
}
|
||||
// let disk_clone = disk.clone();
|
||||
// let online = timeout(DISK_ONLINE_TIMEOUT, async move { disk_clone.is_online().await })
|
||||
// .await
|
||||
// .unwrap_or(false);
|
||||
// self.update_disk_health(index, online).await;
|
||||
// online
|
||||
// }
|
||||
|
||||
async fn filter_online_disks(&self, disks: Vec<Option<DiskStore>>) -> (Vec<Option<DiskStore>>, usize) {
|
||||
let mut filtered = Vec::with_capacity(disks.len());
|
||||
let mut online_count = 0;
|
||||
// async fn filter_online_disks(&self, disks: Vec<Option<DiskStore>>) -> (Vec<Option<DiskStore>>, usize) {
|
||||
// let mut filtered = Vec::with_capacity(disks.len());
|
||||
// let mut online_count = 0;
|
||||
|
||||
for (idx, disk) in disks.into_iter().enumerate() {
|
||||
if let Some(disk_store) = disk {
|
||||
if self.is_disk_online_cached(idx, &disk_store).await {
|
||||
filtered.push(Some(disk_store));
|
||||
online_count += 1;
|
||||
} else {
|
||||
filtered.push(None);
|
||||
}
|
||||
} else {
|
||||
filtered.push(None);
|
||||
}
|
||||
}
|
||||
// for (idx, disk) in disks.into_iter().enumerate() {
|
||||
// if let Some(disk_store) = disk {
|
||||
// if self.is_disk_online_cached(idx, &disk_store).await {
|
||||
// filtered.push(Some(disk_store));
|
||||
// online_count += 1;
|
||||
// } else {
|
||||
// filtered.push(None);
|
||||
// }
|
||||
// } else {
|
||||
// filtered.push(None);
|
||||
// }
|
||||
// }
|
||||
|
||||
(filtered, online_count)
|
||||
}
|
||||
// (filtered, online_count)
|
||||
// }
|
||||
fn format_lock_error(&self, bucket: &str, object: &str, mode: &str, err: &LockResult) -> String {
|
||||
match err {
|
||||
LockResult::Timeout => {
|
||||
@@ -259,9 +259,28 @@ impl SetDisks {
|
||||
}
|
||||
|
||||
async fn get_online_disks(&self) -> Vec<Option<DiskStore>> {
|
||||
let disks = self.get_disks_internal().await;
|
||||
let (filtered, _) = self.filter_online_disks(disks).await;
|
||||
filtered.into_iter().filter(|disk| disk.is_some()).collect()
|
||||
let mut disks = self.get_disks_internal().await;
|
||||
|
||||
// TODO: diskinfo filter online
|
||||
|
||||
let mut new_disk = Vec::with_capacity(disks.len());
|
||||
|
||||
for disk in disks.iter() {
|
||||
if let Some(d) = disk {
|
||||
if d.is_online().await {
|
||||
new_disk.push(disk.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut rng = rand::rng();
|
||||
|
||||
disks.shuffle(&mut rng);
|
||||
|
||||
new_disk
|
||||
// let disks = self.get_disks_internal().await;
|
||||
// let (filtered, _) = self.filter_online_disks(disks).await;
|
||||
// filtered.into_iter().filter(|disk| disk.is_some()).collect()
|
||||
}
|
||||
async fn get_online_local_disks(&self) -> Vec<Option<DiskStore>> {
|
||||
let mut disks = self.get_online_disks().await;
|
||||
@@ -1467,7 +1486,9 @@ impl SetDisks {
|
||||
let object = object.clone();
|
||||
let version_id = version_id.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Some(disk) = disk {
|
||||
if let Some(disk) = disk
|
||||
&& disk.is_online().await
|
||||
{
|
||||
if version_id.is_empty() {
|
||||
match disk.read_xl(&bucket, &object, read_data).await {
|
||||
Ok(info) => {
|
||||
@@ -1799,14 +1820,14 @@ impl SetDisks {
|
||||
}
|
||||
|
||||
pub async fn renew_disk(&self, ep: &Endpoint) {
|
||||
debug!("renew_disk start {:?}", ep);
|
||||
debug!("renew_disk: start {:?}", ep);
|
||||
|
||||
let (new_disk, fm) = match Self::connect_endpoint(ep).await {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
warn!("connect_endpoint err {:?}", &e);
|
||||
warn!("renew_disk: connect_endpoint err {:?}", &e);
|
||||
if ep.is_local && e == DiskError::UnformattedDisk {
|
||||
info!("unformatteddisk will trigger heal_disk, {:?}", ep);
|
||||
info!("renew_disk unformatteddisk will trigger heal_disk, {:?}", ep);
|
||||
let set_disk_id = format!("pool_{}_set_{}", ep.pool_idx, ep.set_idx);
|
||||
let _ = send_heal_disk(set_disk_id, Some(HealChannelPriority::Normal)).await;
|
||||
}
|
||||
@@ -1817,7 +1838,7 @@ impl SetDisks {
|
||||
let (set_idx, disk_idx) = match self.find_disk_index(&fm) {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
warn!("find_disk_index err {:?}", e);
|
||||
warn!("renew_disk: find_disk_index err {:?}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
@@ -1837,7 +1858,7 @@ impl SetDisks {
|
||||
}
|
||||
}
|
||||
|
||||
debug!("renew_disk update {:?}", fm.erasure.this);
|
||||
debug!("renew_disk: update {:?}", fm.erasure.this);
|
||||
|
||||
let mut disk_lock = self.disks.write().await;
|
||||
disk_lock[disk_idx] = Some(new_disk);
|
||||
@@ -3051,7 +3072,7 @@ impl SetDisks {
|
||||
for (index, disk) in latest_disks.iter().enumerate() {
|
||||
if let Some(outdated_disk) = &out_dated_disks[index] {
|
||||
info!(disk_index = index, "Creating writer for outdated disk");
|
||||
let writer = create_bitrot_writer(
|
||||
let writer = match create_bitrot_writer(
|
||||
is_inline_buffer,
|
||||
Some(outdated_disk),
|
||||
RUSTFS_META_TMP_BUCKET,
|
||||
@@ -3060,7 +3081,19 @@ impl SetDisks {
|
||||
erasure.shard_size(),
|
||||
HashAlgorithm::HighwayHash256,
|
||||
)
|
||||
.await?;
|
||||
.await
|
||||
{
|
||||
Ok(writer) => writer,
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"create_bitrot_writer disk {}, err {:?}, skipping operation",
|
||||
outdated_disk.to_string(),
|
||||
err
|
||||
);
|
||||
writers.push(None);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
writers.push(Some(writer));
|
||||
} else {
|
||||
info!(disk_index = index, "Skipping writer (disk not outdated)");
|
||||
@@ -3790,8 +3823,8 @@ impl ObjectIO for SetDisks {
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self, data,))]
|
||||
async fn put_object(&self, bucket: &str, object: &str, data: &mut PutObjReader, opts: &ObjectOptions) -> Result<ObjectInfo> {
|
||||
let disks_snapshot = self.get_disks_internal().await;
|
||||
let (disks, filtered_online) = self.filter_online_disks(disks_snapshot).await;
|
||||
let disks = self.get_disks_internal().await;
|
||||
// let (disks, filtered_online) = self.filter_online_disks(disks_snapshot).await;
|
||||
|
||||
// Acquire per-object exclusive lock via RAII guard. It auto-releases asynchronously on drop.
|
||||
let _object_lock_guard = if !opts.no_lock {
|
||||
@@ -3832,13 +3865,13 @@ impl ObjectIO for SetDisks {
|
||||
write_quorum += 1
|
||||
}
|
||||
|
||||
if filtered_online < write_quorum {
|
||||
warn!(
|
||||
"online disk snapshot {} below write quorum {} for {}/{}; returning erasure write quorum error",
|
||||
filtered_online, write_quorum, bucket, object
|
||||
);
|
||||
return Err(to_object_err(Error::ErasureWriteQuorum, vec![bucket, object]));
|
||||
}
|
||||
// if filtered_online < write_quorum {
|
||||
// warn!(
|
||||
// "online disk snapshot {} below write quorum {} for {}/{}; returning erasure write quorum error",
|
||||
// filtered_online, write_quorum, bucket, object
|
||||
// );
|
||||
// return Err(to_object_err(Error::ErasureWriteQuorum, vec![bucket, object]));
|
||||
// }
|
||||
|
||||
let mut fi = FileInfo::new([bucket, object].join("/").as_str(), data_drives, parity_drives);
|
||||
|
||||
@@ -3877,8 +3910,10 @@ impl ObjectIO for SetDisks {
|
||||
let mut writers = Vec::with_capacity(shuffle_disks.len());
|
||||
let mut errors = Vec::with_capacity(shuffle_disks.len());
|
||||
for disk_op in shuffle_disks.iter() {
|
||||
if let Some(disk) = disk_op {
|
||||
let writer = create_bitrot_writer(
|
||||
if let Some(disk) = disk_op
|
||||
&& disk.is_online().await
|
||||
{
|
||||
let writer = match create_bitrot_writer(
|
||||
is_inline_buffer,
|
||||
Some(disk),
|
||||
RUSTFS_META_TMP_BUCKET,
|
||||
@@ -3887,29 +3922,16 @@ impl ObjectIO for SetDisks {
|
||||
erasure.shard_size(),
|
||||
HashAlgorithm::HighwayHash256,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// let writer = if is_inline_buffer {
|
||||
// BitrotWriter::new(
|
||||
// Writer::from_cursor(Cursor::new(Vec::new())),
|
||||
// erasure.shard_size(),
|
||||
// HashAlgorithm::HighwayHash256,
|
||||
// )
|
||||
// } else {
|
||||
// let f = match disk
|
||||
// .create_file("", RUSTFS_META_TMP_BUCKET, &tmp_object, erasure.shard_file_size(data.content_length))
|
||||
// .await
|
||||
// {
|
||||
// Ok(f) => f,
|
||||
// Err(e) => {
|
||||
// errors.push(Some(e));
|
||||
// writers.push(None);
|
||||
// continue;
|
||||
// }
|
||||
// };
|
||||
|
||||
// BitrotWriter::new(Writer::from_tokio_writer(f), erasure.shard_size(), HashAlgorithm::HighwayHash256)
|
||||
// };
|
||||
.await
|
||||
{
|
||||
Ok(writer) => writer,
|
||||
Err(err) => {
|
||||
warn!("create_bitrot_writer disk {}, err {:?}, skipping operation", disk.to_string(), err);
|
||||
errors.push(Some(err));
|
||||
writers.push(None);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
writers.push(Some(writer));
|
||||
errors.push(None);
|
||||
@@ -4072,7 +4094,7 @@ impl StorageAPI for SetDisks {
|
||||
async fn local_storage_info(&self) -> rustfs_madmin::StorageInfo {
|
||||
let disks = self.get_disks_internal().await;
|
||||
|
||||
let mut local_disks: Vec<Option<Arc<disk::Disk>>> = Vec::new();
|
||||
let mut local_disks: Vec<Option<DiskStore>> = Vec::new();
|
||||
let mut local_endpoints = Vec::new();
|
||||
|
||||
for (i, ep) in self.set_endpoints.iter().enumerate() {
|
||||
@@ -4908,9 +4930,7 @@ impl StorageAPI for SetDisks {
|
||||
|
||||
for disk in disks.iter() {
|
||||
if let Some(disk) = disk {
|
||||
if disk.is_online().await {
|
||||
continue;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
let _ = self.add_partial(bucket, object, opts.version_id.as_ref().expect("err")).await;
|
||||
break;
|
||||
@@ -5129,16 +5149,16 @@ impl StorageAPI for SetDisks {
|
||||
return Err(Error::other(format!("checksum mismatch: {checksum}")));
|
||||
}
|
||||
|
||||
let disks_snapshot = self.get_disks_internal().await;
|
||||
let (disks, filtered_online) = self.filter_online_disks(disks_snapshot).await;
|
||||
let disks = self.get_disks_internal().await;
|
||||
// let (disks, filtered_online) = self.filter_online_disks(disks_snapshot).await;
|
||||
|
||||
if filtered_online < write_quorum {
|
||||
warn!(
|
||||
"online disk snapshot {} below write quorum {} for multipart {}/{}; returning erasure write quorum error",
|
||||
filtered_online, write_quorum, bucket, object
|
||||
);
|
||||
return Err(to_object_err(Error::ErasureWriteQuorum, vec![bucket, object]));
|
||||
}
|
||||
// if filtered_online < write_quorum {
|
||||
// warn!(
|
||||
// "online disk snapshot {} below write quorum {} for multipart {}/{}; returning erasure write quorum error",
|
||||
// filtered_online, write_quorum, bucket, object
|
||||
// );
|
||||
// return Err(to_object_err(Error::ErasureWriteQuorum, vec![bucket, object]));
|
||||
// }
|
||||
|
||||
let shuffle_disks = Self::shuffle_disks(&disks, &fi.erasure.distribution);
|
||||
|
||||
@@ -5152,7 +5172,7 @@ impl StorageAPI for SetDisks {
|
||||
let mut errors = Vec::with_capacity(shuffle_disks.len());
|
||||
for disk_op in shuffle_disks.iter() {
|
||||
if let Some(disk) = disk_op {
|
||||
let writer = create_bitrot_writer(
|
||||
let writer = match create_bitrot_writer(
|
||||
false,
|
||||
Some(disk),
|
||||
RUSTFS_META_TMP_BUCKET,
|
||||
@@ -5161,23 +5181,16 @@ impl StorageAPI for SetDisks {
|
||||
erasure.shard_size(),
|
||||
HashAlgorithm::HighwayHash256,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// let writer = {
|
||||
// let f = match disk
|
||||
// .create_file("", RUSTFS_META_TMP_BUCKET, &tmp_part_path, erasure.shard_file_size(data.content_length))
|
||||
// .await
|
||||
// {
|
||||
// Ok(f) => f,
|
||||
// Err(e) => {
|
||||
// errors.push(Some(e));
|
||||
// writers.push(None);
|
||||
// continue;
|
||||
// }
|
||||
// };
|
||||
|
||||
// BitrotWriter::new(Writer::from_tokio_writer(f), erasure.shard_size(), HashAlgorithm::HighwayHash256)
|
||||
// };
|
||||
.await
|
||||
{
|
||||
Ok(writer) => writer,
|
||||
Err(err) => {
|
||||
warn!("create_bitrot_writer disk {}, err {:?}, skipping operation", disk.to_string(), err);
|
||||
errors.push(Some(err));
|
||||
writers.push(None);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
writers.push(Some(writer));
|
||||
errors.push(None);
|
||||
@@ -6769,7 +6782,7 @@ async fn get_disks_info(disks: &[Option<DiskStore>], eps: &[Endpoint]) -> Vec<ru
|
||||
healing: res.healing,
|
||||
scanning: res.scanning,
|
||||
|
||||
uuid: res.id.clone(),
|
||||
uuid: res.id.map_or("".to_string(), |id| id.to_string()),
|
||||
major: res.major as u32,
|
||||
minor: res.minor as u32,
|
||||
model: None,
|
||||
|
||||
@@ -40,7 +40,7 @@ use futures::future::join_all;
|
||||
use http::HeaderMap;
|
||||
use rustfs_common::heal_channel::HealOpts;
|
||||
use rustfs_common::{
|
||||
globals::GLOBAL_LOCAL_NODE_NAME,
|
||||
GLOBAL_LOCAL_NODE_NAME,
|
||||
heal_channel::{DriveState, HealItemType},
|
||||
};
|
||||
use rustfs_filemeta::FileInfo;
|
||||
@@ -255,7 +255,7 @@ impl Sets {
|
||||
self.connect_disks().await;
|
||||
|
||||
// TODO: config interval
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(15 * 3));
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(15));
|
||||
loop {
|
||||
tokio::select! {
|
||||
_= interval.tick()=>{
|
||||
|
||||
@@ -55,8 +55,8 @@ use futures::future::join_all;
|
||||
use http::HeaderMap;
|
||||
use lazy_static::lazy_static;
|
||||
use rand::Rng as _;
|
||||
use rustfs_common::globals::{GLOBAL_LOCAL_NODE_NAME, GLOBAL_RUSTFS_HOST, GLOBAL_RUSTFS_PORT};
|
||||
use rustfs_common::heal_channel::{HealItemType, HealOpts};
|
||||
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, GLOBAL_RUSTFS_HOST, GLOBAL_RUSTFS_PORT};
|
||||
use rustfs_filemeta::FileInfo;
|
||||
use rustfs_madmin::heal_commands::HealResultItem;
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, decode_dir_object, encode_dir_object, path_join_buf};
|
||||
|
||||
@@ -265,7 +265,10 @@ pub async fn load_format_erasure(disk: &DiskStore, heal: bool) -> disk::error::R
|
||||
.map_err(|e| match e {
|
||||
DiskError::FileNotFound => DiskError::UnformattedDisk,
|
||||
DiskError::DiskNotFound => DiskError::UnformattedDisk,
|
||||
_ => e,
|
||||
_ => {
|
||||
warn!("load_format_erasure err: {:?} {:?}", disk.to_string(), e);
|
||||
e
|
||||
}
|
||||
})?;
|
||||
|
||||
let mut fm = FormatV3::try_from(data.as_ref())?;
|
||||
@@ -312,17 +315,18 @@ async fn save_format_file_all(disks: &[Option<DiskStore>], formats: &[Option<For
|
||||
}
|
||||
|
||||
pub async fn save_format_file(disk: &Option<DiskStore>, format: &Option<FormatV3>) -> disk::error::Result<()> {
|
||||
if disk.is_none() {
|
||||
let Some(disk) = disk else {
|
||||
return Err(DiskError::DiskNotFound);
|
||||
}
|
||||
};
|
||||
|
||||
let format = format.as_ref().unwrap();
|
||||
let Some(format) = format else {
|
||||
return Err(DiskError::other("format is none"));
|
||||
};
|
||||
|
||||
let json_data = format.to_json()?;
|
||||
|
||||
let tmpfile = Uuid::new_v4().to_string();
|
||||
|
||||
let disk = disk.as_ref().unwrap();
|
||||
disk.write_all(RUSTFS_META_BUCKET, tmpfile.as_str(), json_data.into_bytes().into())
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -15,8 +15,7 @@
|
||||
use crate::config::storageclass::STANDARD;
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use regex::Regex;
|
||||
use rustfs_utils::http::headers::AMZ_OBJECT_TAGGING;
|
||||
use rustfs_utils::http::headers::AMZ_STORAGE_CLASS;
|
||||
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, AMZ_STORAGE_CLASS};
|
||||
use std::collections::HashMap;
|
||||
use std::io::{Error, Result};
|
||||
|
||||
|
||||
@@ -1,231 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use azure_core::http::{Body, ClientOptions, RequestContent};
|
||||
use azure_storage::StorageCredentials;
|
||||
use azure_storage_blobs::prelude::*;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
transition_api::{Options, ReadCloser, ReaderImpl},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierAzure,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendAzure {
|
||||
pub client: Arc<BlobServiceClient>,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub storage_class: String,
|
||||
}
|
||||
|
||||
impl WarmBackendAzure {
|
||||
pub async fn new(conf: &TierAzure, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let creds = StorageCredentials::access_key(conf.access_key.clone(), conf.secret_key.clone());
|
||||
let client = ClientBuilder::new(conf.access_key.clone(), creds)
|
||||
//.endpoint(conf.endpoint)
|
||||
.blob_service_client();
|
||||
let client = Arc::new(client);
|
||||
Ok(Self {
|
||||
client,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
/*pub fn tier(&self) -> *blob.AccessTier {
|
||||
if self.storage_class == "" {
|
||||
return None;
|
||||
}
|
||||
for t in blob.PossibleAccessTierValues() {
|
||||
if strings.EqualFold(self.storage_class, t) {
|
||||
return &t
|
||||
}
|
||||
}
|
||||
None
|
||||
}*/
|
||||
|
||||
pub fn get_dest(&self, object: &str) -> String {
|
||||
let mut dest_obj = object.to_string();
|
||||
if self.prefix != "" {
|
||||
dest_obj = format!("{}/{}", &self.prefix, object);
|
||||
}
|
||||
return dest_obj;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendAzure {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = length;
|
||||
let client = self.client.clone();
|
||||
let container_client = client.container_client(self.bucket.clone());
|
||||
let blob_client = container_client.blob_client(self.get_dest(object));
|
||||
/*let res = blob_client
|
||||
.upload(
|
||||
RequestContent::from(match r {
|
||||
ReaderImpl::Body(content_body) => content_body.to_vec(),
|
||||
ReaderImpl::ObjectBody(mut content_body) => content_body.read_all().await?,
|
||||
}),
|
||||
false,
|
||||
length as u64,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("upload error"));
|
||||
};*/
|
||||
|
||||
let Ok(res) = blob_client
|
||||
.put_block_blob(match r {
|
||||
ReaderImpl::Body(content_body) => content_body.to_vec(),
|
||||
ReaderImpl::ObjectBody(mut content_body) => content_body.read_all().await?,
|
||||
})
|
||||
.content_type("text/plain")
|
||||
.into_future()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("put_block_blob error"));
|
||||
};
|
||||
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.request_id.to_string())
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let container_client = client.container_client(self.bucket.clone());
|
||||
let blob_client = container_client.blob_client(self.get_dest(object));
|
||||
blob_client.get();
|
||||
todo!();
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let container_client = client.container_client(self.bucket.clone());
|
||||
let blob_client = container_client.blob_client(self.get_dest(object));
|
||||
blob_client.delete();
|
||||
todo!();
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
/*let result = self.client
|
||||
.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR, 1)
|
||||
.await?;
|
||||
|
||||
Ok(result.common_prefixes.len() > 0 || result.contents.len() > 0)*/
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/*fn azure_to_object_error(err: Error, params: Vec<String>) -> Option<error> {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bucket := ""
|
||||
object := ""
|
||||
if len(params) >= 1 {
|
||||
bucket = params[0]
|
||||
}
|
||||
if len(params) == 2 {
|
||||
object = params[1]
|
||||
}
|
||||
|
||||
azureErr, ok := err.(*azcore.ResponseError)
|
||||
if !ok {
|
||||
// We don't interpret non Azure errors. As azure errors will
|
||||
// have StatusCode to help to convert to object errors.
|
||||
return err
|
||||
}
|
||||
|
||||
serviceCode := azureErr.ErrorCode
|
||||
statusCode := azureErr.StatusCode
|
||||
|
||||
azureCodesToObjectError(err, serviceCode, statusCode, bucket, object)
|
||||
}*/
|
||||
|
||||
/*fn azure_codes_to_object_error(err: Error, service_code: String, status_code: i32, bucket: String, object: String) -> Option<Error> {
|
||||
switch serviceCode {
|
||||
case "ContainerNotFound", "ContainerBeingDeleted":
|
||||
err = BucketNotFound{Bucket: bucket}
|
||||
case "ContainerAlreadyExists":
|
||||
err = BucketExists{Bucket: bucket}
|
||||
case "InvalidResourceName":
|
||||
err = BucketNameInvalid{Bucket: bucket}
|
||||
case "RequestBodyTooLarge":
|
||||
err = PartTooBig{}
|
||||
case "InvalidMetadata":
|
||||
err = UnsupportedMetadata{}
|
||||
case "BlobAccessTierNotSupportedForAccountType":
|
||||
err = NotImplemented{}
|
||||
case "OutOfRangeInput":
|
||||
err = ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
default:
|
||||
switch statusCode {
|
||||
case http.StatusNotFound:
|
||||
if object != "" {
|
||||
err = ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
} else {
|
||||
err = BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
case http.StatusBadRequest:
|
||||
err = BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}*/
|
||||
@@ -35,12 +35,11 @@ uuid = { workspace = true, features = ["v4", "fast-rng", "serde"] }
|
||||
tokio = { workspace = true, features = ["io-util", "macros", "sync"] }
|
||||
xxhash-rust = { workspace = true, features = ["xxh64"] }
|
||||
bytes.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["hash","http"] }
|
||||
rustfs-utils = { workspace = true, features = ["hash", "http"] }
|
||||
byteorder = { workspace = true }
|
||||
tracing.workspace = true
|
||||
thiserror.workspace = true
|
||||
s3s.workspace = true
|
||||
lazy_static.workspace = true
|
||||
regex.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const AMZ_META_UNENCRYPTED_CONTENT_LENGTH: &str = "X-Amz-Meta-X-Amz-Unencrypted-Content-Length";
|
||||
pub const AMZ_META_UNENCRYPTED_CONTENT_MD5: &str = "X-Amz-Meta-X-Amz-Unencrypted-Content-Md5";
|
||||
|
||||
pub const AMZ_STORAGE_CLASS: &str = "x-amz-storage-class";
|
||||
|
||||
pub const RESERVED_METADATA_PREFIX: &str = "X-RustFS-Internal-";
|
||||
pub const RESERVED_METADATA_PREFIX_LOWER: &str = "x-rustfs-internal-";
|
||||
|
||||
pub const RUSTFS_HEALING: &str = "X-Rustfs-Internal-healing";
|
||||
// pub const RUSTFS_DATA_MOVE: &str = "X-Rustfs-Internal-data-mov";
|
||||
|
||||
// pub const X_RUSTFS_INLINE_DATA: &str = "x-rustfs-inline-data";
|
||||
|
||||
pub const VERSION_PURGE_STATUS_KEY: &str = "X-Rustfs-Internal-purgestatus";
|
||||
|
||||
pub const X_RUSTFS_HEALING: &str = "X-Rustfs-Internal-healing";
|
||||
pub const X_RUSTFS_DATA_MOV: &str = "X-Rustfs-Internal-data-mov";
|
||||
|
||||
pub const AMZ_OBJECT_TAGGING: &str = "X-Amz-Tagging";
|
||||
pub const AMZ_BUCKET_REPLICATION_STATUS: &str = "X-Amz-Replication-Status";
|
||||
pub const AMZ_DECODED_CONTENT_LENGTH: &str = "X-Amz-Decoded-Content-Length";
|
||||
|
||||
pub const RUSTFS_DATA_MOVE: &str = "X-Rustfs-Internal-data-mov";
|
||||
|
||||
// Server-side encryption headers
|
||||
pub const AMZ_SERVER_SIDE_ENCRYPTION: &str = "x-amz-server-side-encryption";
|
||||
pub const AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID: &str = "x-amz-server-side-encryption-aws-kms-key-id";
|
||||
pub const AMZ_SERVER_SIDE_ENCRYPTION_CONTEXT: &str = "x-amz-server-side-encryption-context";
|
||||
pub const AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: &str = "x-amz-server-side-encryption-customer-algorithm";
|
||||
pub const AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: &str = "x-amz-server-side-encryption-customer-key";
|
||||
pub const AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: &str = "x-amz-server-side-encryption-customer-key-md5";
|
||||
|
||||
// SSE-C copy source headers
|
||||
pub const AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: &str =
|
||||
"x-amz-copy-source-server-side-encryption-customer-algorithm";
|
||||
pub const AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: &str = "x-amz-copy-source-server-side-encryption-customer-key";
|
||||
pub const AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: &str =
|
||||
"x-amz-copy-source-server-side-encryption-customer-key-md5";
|
||||
@@ -19,6 +19,7 @@ use rustfs_utils::http::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Duration;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
@@ -773,9 +774,7 @@ impl ReplicationWorkerOperation for ReplicateObjectInfo {
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref REPL_STATUS_REGEX: Regex = Regex::new(r"([^=].*?)=([^,].*?);").unwrap();
|
||||
}
|
||||
static REPL_STATUS_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"([^=].*?)=([^,].*?);").unwrap());
|
||||
|
||||
impl ReplicateObjectInfo {
|
||||
/// Returns replication status of a target
|
||||
|
||||
@@ -29,6 +29,7 @@ documentation = "https://docs.rs/rustfs-iam/latest/rustfs_iam/"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustfs-credentials = { workspace = true }
|
||||
tokio.workspace = true
|
||||
time = { workspace = true, features = ["serde-human-readable"] }
|
||||
serde = { workspace = true, features = ["derive", "rc"] }
|
||||
|
||||
@@ -109,6 +109,9 @@ pub enum Error {
|
||||
|
||||
#[error("io error: {0}")]
|
||||
Io(std::io::Error),
|
||||
|
||||
#[error("system already initialized")]
|
||||
IamSysAlreadyInitialized,
|
||||
}
|
||||
|
||||
impl PartialEq for Error {
|
||||
@@ -162,6 +165,7 @@ impl Clone for Error {
|
||||
Error::PolicyTooLarge => Error::PolicyTooLarge,
|
||||
Error::ConfigNotFound => Error::ConfigNotFound,
|
||||
Error::Io(e) => Error::Io(std::io::Error::new(e.kind(), e.to_string())),
|
||||
Error::IamSysAlreadyInitialized => Error::IamSysAlreadyInitialized,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -226,6 +230,7 @@ impl From<rustfs_policy::error::Error> for Error {
|
||||
rustfs_policy::error::Error::StringError(s) => Error::StringError(s),
|
||||
rustfs_policy::error::Error::CryptoError(e) => Error::CryptoError(e),
|
||||
rustfs_policy::error::Error::ErrCredMalformed => Error::ErrCredMalformed,
|
||||
rustfs_policy::error::Error::IamSysAlreadyInitialized => Error::IamSysAlreadyInitialized,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,30 +18,58 @@ use rustfs_ecstore::store::ECStore;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use store::object::ObjectStore;
|
||||
use sys::IamSys;
|
||||
use tracing::{debug, instrument};
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
pub mod cache;
|
||||
pub mod error;
|
||||
pub mod manager;
|
||||
pub mod store;
|
||||
pub mod utils;
|
||||
|
||||
pub mod sys;
|
||||
pub mod utils;
|
||||
|
||||
static IAM_SYS: OnceLock<Arc<IamSys<ObjectStore>>> = OnceLock::new();
|
||||
|
||||
#[instrument(skip(ecstore))]
|
||||
pub async fn init_iam_sys(ecstore: Arc<ECStore>) -> Result<()> {
|
||||
debug!("init iam system");
|
||||
let s = IamCache::new(ObjectStore::new(ecstore)).await;
|
||||
if IAM_SYS.get().is_some() {
|
||||
info!("IAM system already initialized, skipping.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
IAM_SYS.get_or_init(move || IamSys::new(s).into());
|
||||
info!("Starting IAM system initialization sequence...");
|
||||
|
||||
// 1. Create the persistent storage adapter
|
||||
let storage_adapter = ObjectStore::new(ecstore);
|
||||
|
||||
// 2. Create the cache manager.
|
||||
// The `new` method now performs a blocking initial load from disk.
|
||||
let cache_manager = IamCache::new(storage_adapter).await;
|
||||
|
||||
// 3. Construct the system interface
|
||||
let iam_instance = Arc::new(IamSys::new(cache_manager));
|
||||
|
||||
// 4. Securely set the global singleton
|
||||
if IAM_SYS.set(iam_instance).is_err() {
|
||||
error!("Critical: Race condition detected during IAM initialization!");
|
||||
return Err(Error::IamSysAlreadyInitialized);
|
||||
}
|
||||
|
||||
info!("IAM system initialization completed successfully.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get() -> Result<Arc<IamSys<ObjectStore>>> {
|
||||
IAM_SYS.get().map(Arc::clone).ok_or(Error::IamSysNotInitialized)
|
||||
let sys = IAM_SYS.get().map(Arc::clone).ok_or(Error::IamSysNotInitialized)?;
|
||||
|
||||
// Double-check the internal readiness state. The OnceLock is only set
|
||||
// after initialization and data loading complete, so this is a defensive
|
||||
// guard to ensure callers never operate on a partially initialized system.
|
||||
if !sys.is_ready() {
|
||||
return Err(Error::IamSysNotInitialized);
|
||||
}
|
||||
|
||||
Ok(sys)
|
||||
}
|
||||
|
||||
pub fn get_global_iam_sys() -> Option<Arc<IamSys<ObjectStore>>> {
|
||||
|
||||
@@ -24,19 +24,18 @@ use crate::{
|
||||
},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_credentials::{Credentials, EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, get_global_action_cred};
|
||||
use rustfs_madmin::{AccountStatus, AddOrUpdateUserReq, GroupDesc};
|
||||
use rustfs_policy::{
|
||||
arn::ARN,
|
||||
auth::{self, Credentials, UserIdentity, is_secret_key_valid, jwt_sign},
|
||||
auth::{self, UserIdentity, is_secret_key_valid, jwt_sign},
|
||||
format::Format,
|
||||
policy::{
|
||||
EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, Policy, PolicyDoc, default::DEFAULT_POLICIES, iam_policy_claim_name_sa,
|
||||
},
|
||||
policy::{Policy, PolicyDoc, default::DEFAULT_POLICIES, iam_policy_claim_name_sa},
|
||||
};
|
||||
use rustfs_utils::path::path_join_buf;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::sync::atomic::AtomicU8;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{
|
||||
@@ -76,9 +75,19 @@ fn get_iam_format_file_path() -> String {
|
||||
path_join_buf(&[&IAM_CONFIG_PREFIX, IAM_FORMAT_FILE])
|
||||
}
|
||||
|
||||
#[repr(u8)]
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum IamState {
|
||||
Uninitialized = 0,
|
||||
Loading = 1,
|
||||
Ready = 2,
|
||||
Error = 3,
|
||||
}
|
||||
|
||||
pub struct IamCache<T> {
|
||||
pub cache: Cache,
|
||||
pub api: T,
|
||||
pub state: Arc<AtomicU8>,
|
||||
pub loading: Arc<AtomicBool>,
|
||||
pub roles: HashMap<ARN, Vec<String>>,
|
||||
pub send_chan: Sender<i64>,
|
||||
@@ -89,12 +98,19 @@ impl<T> IamCache<T>
|
||||
where
|
||||
T: Store,
|
||||
{
|
||||
/// Create a new IAM system instance
|
||||
/// # Arguments
|
||||
/// * `api` - The storage backend implementing the Store trait
|
||||
///
|
||||
/// # Returns
|
||||
/// An Arc-wrapped instance of IamSystem
|
||||
pub(crate) async fn new(api: T) -> Arc<Self> {
|
||||
let (sender, receiver) = mpsc::channel::<i64>(100);
|
||||
|
||||
let sys = Arc::new(Self {
|
||||
api,
|
||||
cache: Cache::default(),
|
||||
state: Arc::new(AtomicU8::new(IamState::Uninitialized as u8)),
|
||||
loading: Arc::new(AtomicBool::new(false)),
|
||||
send_chan: sender,
|
||||
roles: HashMap::new(),
|
||||
@@ -105,10 +121,32 @@ where
|
||||
sys
|
||||
}
|
||||
|
||||
/// Initialize the IAM system
|
||||
async fn init(self: Arc<Self>, receiver: Receiver<i64>) -> Result<()> {
|
||||
self.state.store(IamState::Loading as u8, Ordering::SeqCst);
|
||||
// Ensure the IAM format file is persisted first
|
||||
self.clone().save_iam_formatter().await?;
|
||||
self.clone().load().await?;
|
||||
|
||||
// Critical: Load all existing users/policies into memory cache
|
||||
const MAX_RETRIES: usize = 3;
|
||||
for attempt in 0..MAX_RETRIES {
|
||||
if let Err(e) = self.clone().load().await {
|
||||
if attempt == MAX_RETRIES - 1 {
|
||||
self.state.store(IamState::Error as u8, Ordering::SeqCst);
|
||||
error!("IAM fail to load initial data after {} attempts: {:?}", MAX_RETRIES, e);
|
||||
return Err(e);
|
||||
} else {
|
||||
warn!("IAM load failed, retrying... attempt {}", attempt + 1);
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
self.state.store(IamState::Ready as u8, Ordering::SeqCst);
|
||||
info!("IAM System successfully initialized and marked as READY");
|
||||
|
||||
// Background ticker for synchronization
|
||||
// Check if environment variable is set
|
||||
let skip_background_task = std::env::var("RUSTFS_SKIP_BACKGROUND_TASK").is_ok();
|
||||
|
||||
@@ -152,6 +190,11 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if IAM system is ready
|
||||
pub fn is_ready(&self) -> bool {
|
||||
self.state.load(Ordering::SeqCst) == IamState::Ready as u8
|
||||
}
|
||||
|
||||
async fn _notify(&self) {
|
||||
self.send_chan.send(OffsetDateTime::now_utc().unix_timestamp()).await.unwrap();
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ use crate::{
|
||||
manager::{extract_jwt_claims, get_default_policyes},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use rustfs_ecstore::StorageAPI as _;
|
||||
use rustfs_ecstore::store_api::{ObjectInfoOrErr, WalkOptions};
|
||||
use rustfs_ecstore::{
|
||||
@@ -27,7 +28,6 @@ use rustfs_ecstore::{
|
||||
RUSTFS_CONFIG_PREFIX,
|
||||
com::{delete_config, read_config, read_config_with_metadata, save_config},
|
||||
},
|
||||
global::get_global_action_cred,
|
||||
store::ECStore,
|
||||
store_api::{ObjectInfo, ObjectOptions},
|
||||
};
|
||||
@@ -38,7 +38,7 @@ use std::sync::LazyLock;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::mpsc::{self, Sender};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{info, warn};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
pub static IAM_CONFIG_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam"));
|
||||
pub static IAM_CONFIG_USERS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/users/"));
|
||||
@@ -341,6 +341,27 @@ impl ObjectStore {
|
||||
Ok(policies)
|
||||
}
|
||||
|
||||
/// Checks if the underlying ECStore is ready for metadata operations.
|
||||
/// This prevents silent failures during the storage boot-up phase.
|
||||
///
|
||||
/// Performs a lightweight probe by attempting to read a known configuration object.
|
||||
/// If the object is not found, it indicates the storage metadata is not ready.
|
||||
/// The upper-level caller should handle retries if needed.
|
||||
async fn check_storage_readiness(&self) -> Result<()> {
|
||||
// Probe path for a fixed object under the IAM root prefix.
|
||||
// If it doesn't exist, the system bucket or metadata is not ready.
|
||||
let probe_path = format!("{}/format.json", *IAM_CONFIG_PREFIX);
|
||||
|
||||
match read_config(self.object_api.clone(), &probe_path).await {
|
||||
Ok(_) => Ok(()),
|
||||
Err(rustfs_ecstore::error::StorageError::ConfigNotFound) => Err(Error::other(format!(
|
||||
"Storage metadata not ready: probe object '{}' not found (expected IAM config to be initialized)",
|
||||
probe_path
|
||||
))),
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
// async fn load_policy(&self, name: &str) -> Result<PolicyDoc> {
|
||||
// let mut policy = self
|
||||
// .load_iam_config::<PolicyDoc>(&format!("config/iam/policies/{name}/policy.json"))
|
||||
@@ -389,22 +410,58 @@ impl Store for ObjectStore {
|
||||
data = match Self::decrypt_data(&data) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
warn!("delete the config file when decrypt failed failed: {}, path: {}", err, path.as_ref());
|
||||
// delete the config file when decrypt failed
|
||||
let _ = self.delete_iam_config(path.as_ref()).await;
|
||||
warn!("config decrypt failed, keeping file: {}, path: {}", err, path.as_ref());
|
||||
// keep the config file when decrypt failed - do not delete
|
||||
return Err(Error::ConfigNotFound);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(serde_json::from_slice(&data)?)
|
||||
}
|
||||
/// Saves IAM configuration with a retry mechanism on failure.
|
||||
///
|
||||
/// Attempts to save the IAM configuration up to 5 times if the storage layer is not ready,
|
||||
/// using exponential backoff between attempts (starting at 200ms, doubling each retry).
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `item` - The IAM configuration item to save, must implement `Serialize` and `Send`.
|
||||
/// * `path` - The path where the configuration will be saved.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<()>` - `Ok(())` on success, or an `Error` if all attempts fail.
|
||||
#[tracing::instrument(level = "debug", skip(self, item, path))]
|
||||
async fn save_iam_config<Item: Serialize + Send>(&self, item: Item, path: impl AsRef<str> + Send) -> Result<()> {
|
||||
let mut data = serde_json::to_vec(&item)?;
|
||||
data = Self::encrypt_data(&data)?;
|
||||
|
||||
save_config(self.object_api.clone(), path.as_ref(), data).await?;
|
||||
Ok(())
|
||||
let mut attempts = 0;
|
||||
let max_attempts = 5;
|
||||
let path_ref = path.as_ref();
|
||||
|
||||
loop {
|
||||
match save_config(self.object_api.clone(), path_ref, data.clone()).await {
|
||||
Ok(_) => {
|
||||
debug!("Successfully saved IAM config to {}", path_ref);
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) if attempts < max_attempts => {
|
||||
attempts += 1;
|
||||
// Exponential backoff: 200ms, 400ms, 800ms...
|
||||
let wait_ms = 200 * (1 << attempts);
|
||||
warn!(
|
||||
"Storage layer not ready for IAM write (attempt {}/{}). Retrying in {}ms. Path: {}, Error: {:?}",
|
||||
attempts, max_attempts, wait_ms, path_ref, e
|
||||
);
|
||||
tokio::time::sleep(std::time::Duration::from_millis(wait_ms)).await;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Final failure saving IAM config to {}: {:?}", path_ref, e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
async fn delete_iam_config(&self, path: impl AsRef<str> + Send) -> Result<()> {
|
||||
delete_config(self.object_api.clone(), path.as_ref()).await?;
|
||||
@@ -418,8 +475,16 @@ impl Store for ObjectStore {
|
||||
user_identity: UserIdentity,
|
||||
_ttl: Option<usize>,
|
||||
) -> Result<()> {
|
||||
self.save_iam_config(user_identity, get_user_identity_path(name, user_type))
|
||||
.await
|
||||
// Pre-check storage health
|
||||
self.check_storage_readiness().await?;
|
||||
|
||||
let path = get_user_identity_path(name, user_type);
|
||||
debug!("Saving IAM identity to path: {}", path);
|
||||
|
||||
self.save_iam_config(user_identity, path).await.map_err(|e| {
|
||||
error!("ObjectStore save failure for {}: {:?}", name, e);
|
||||
e
|
||||
})
|
||||
}
|
||||
async fn delete_user_identity(&self, name: &str, user_type: UserType) -> Result<()> {
|
||||
self.delete_iam_config(get_user_identity_path(name, user_type))
|
||||
|
||||
@@ -24,19 +24,18 @@ use crate::store::MappedPolicy;
|
||||
use crate::store::Store;
|
||||
use crate::store::UserType;
|
||||
use crate::utils::extract_claims;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_credentials::{Credentials, EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, get_global_action_cred};
|
||||
use rustfs_ecstore::notification_sys::get_global_notification_sys;
|
||||
use rustfs_madmin::AddOrUpdateUserReq;
|
||||
use rustfs_madmin::GroupDesc;
|
||||
use rustfs_policy::arn::ARN;
|
||||
use rustfs_policy::auth::Credentials;
|
||||
use rustfs_policy::auth::{
|
||||
ACCOUNT_ON, UserIdentity, contains_reserved_chars, create_new_credentials_with_metadata, generate_credentials,
|
||||
is_access_key_valid, is_secret_key_valid,
|
||||
};
|
||||
use rustfs_policy::policy::Args;
|
||||
use rustfs_policy::policy::opa;
|
||||
use rustfs_policy::policy::{EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, Policy, PolicyDoc, iam_policy_claim_name_sa};
|
||||
use rustfs_policy::policy::{Policy, PolicyDoc, iam_policy_claim_name_sa};
|
||||
use serde_json::Value;
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
@@ -67,6 +66,13 @@ pub struct IamSys<T> {
|
||||
}
|
||||
|
||||
impl<T: Store> IamSys<T> {
|
||||
/// Create a new IamSys instance with the given IamCache store
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `store` - An Arc to the IamCache instance
|
||||
///
|
||||
/// # Returns
|
||||
/// A new instance of IamSys
|
||||
pub fn new(store: Arc<IamCache<T>>) -> Self {
|
||||
tokio::spawn(async move {
|
||||
match opa::lookup_config().await {
|
||||
@@ -87,6 +93,11 @@ impl<T: Store> IamSys<T> {
|
||||
roles_map: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the IamSys has a watcher configured
|
||||
///
|
||||
/// # Returns
|
||||
/// `true` if a watcher is configured, `false` otherwise
|
||||
pub fn has_watcher(&self) -> bool {
|
||||
self.store.api.has_watcher()
|
||||
}
|
||||
@@ -859,6 +870,11 @@ impl<T: Store> IamSys<T> {
|
||||
|
||||
self.get_combined_policy(&policies).await.is_allowed(args).await
|
||||
}
|
||||
|
||||
/// Check if the underlying store is ready
|
||||
pub fn is_ready(&self) -> bool {
|
||||
self.store.is_ready()
|
||||
}
|
||||
}
|
||||
|
||||
fn is_allowed_by_session_policy(args: &Args<'_>) -> (bool, bool) {
|
||||
|
||||
@@ -1,325 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Benchmarks comparing fast lock vs old lock performance
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(dead_code)] // Temporarily disable benchmark tests
|
||||
mod benchmarks {
|
||||
use super::super::*;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::task;
|
||||
|
||||
/// Benchmark single-threaded lock operations
|
||||
#[tokio::test]
|
||||
async fn bench_single_threaded_fast_locks() {
|
||||
let manager = Arc::new(FastObjectLockManager::new());
|
||||
let iterations = 10000;
|
||||
|
||||
// Warm up
|
||||
for i in 0..100 {
|
||||
let _guard = manager
|
||||
.acquire_write_lock("bucket", &format!("warm_{}", i), "owner")
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Benchmark write locks
|
||||
let start = Instant::now();
|
||||
for i in 0..iterations {
|
||||
let _guard = manager
|
||||
.acquire_write_lock("bucket", &format!("object_{}", i), "owner")
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
let duration = start.elapsed();
|
||||
|
||||
println!("Fast locks: {} write locks in {:?}", iterations, duration);
|
||||
println!("Average: {:?} per lock", duration / iterations);
|
||||
|
||||
let metrics = manager.get_metrics();
|
||||
println!("Fast path rate: {:.2}%", metrics.shard_metrics.fast_path_rate() * 100.0);
|
||||
|
||||
// Should be much faster than old implementation
|
||||
assert!(duration.as_millis() < 1000, "Should complete 10k locks in <1s");
|
||||
assert!(metrics.shard_metrics.fast_path_rate() > 0.95, "Should have >95% fast path rate");
|
||||
}
|
||||
|
||||
/// Benchmark concurrent lock operations
|
||||
#[tokio::test]
|
||||
async fn bench_concurrent_fast_locks() {
|
||||
let manager = Arc::new(FastObjectLockManager::new());
|
||||
let concurrent_tasks = 100;
|
||||
let iterations_per_task = 100;
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for task_id in 0..concurrent_tasks {
|
||||
let manager_clone = manager.clone();
|
||||
let handle = task::spawn(async move {
|
||||
for i in 0..iterations_per_task {
|
||||
let object_name = format!("obj_{}_{}", task_id, i);
|
||||
let _guard = manager_clone
|
||||
.acquire_write_lock("bucket", &object_name, &format!("owner_{}", task_id))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Simulate some work
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// Wait for all tasks
|
||||
for handle in handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
|
||||
let duration = start.elapsed();
|
||||
let total_ops = concurrent_tasks * iterations_per_task;
|
||||
|
||||
println!("Concurrent fast locks: {} operations across {} tasks in {:?}",
|
||||
total_ops, concurrent_tasks, duration);
|
||||
println!("Throughput: {:.2} ops/sec", total_ops as f64 / duration.as_secs_f64());
|
||||
|
||||
let metrics = manager.get_metrics();
|
||||
println!("Fast path rate: {:.2}%", metrics.shard_metrics.fast_path_rate() * 100.0);
|
||||
println!("Contention events: {}", metrics.shard_metrics.contention_events);
|
||||
|
||||
// Should maintain high throughput even with concurrency
|
||||
assert!(duration.as_millis() < 5000, "Should complete concurrent ops in <5s");
|
||||
}
|
||||
|
||||
/// Benchmark contended lock operations
|
||||
#[tokio::test]
|
||||
async fn bench_contended_locks() {
|
||||
let manager = Arc::new(FastObjectLockManager::new());
|
||||
let concurrent_tasks = 50;
|
||||
let shared_objects = 10; // High contention on few objects
|
||||
let iterations_per_task = 50;
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for task_id in 0..concurrent_tasks {
|
||||
let manager_clone = manager.clone();
|
||||
let handle = task::spawn(async move {
|
||||
for i in 0..iterations_per_task {
|
||||
let object_name = format!("shared_{}", i % shared_objects);
|
||||
|
||||
// Mix of read and write operations
|
||||
if i % 3 == 0 {
|
||||
// Write operation
|
||||
if let Ok(_guard) = manager_clone
|
||||
.acquire_write_lock("bucket", &object_name, &format!("owner_{}", task_id))
|
||||
.await
|
||||
{
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
} else {
|
||||
// Read operation
|
||||
if let Ok(_guard) = manager_clone
|
||||
.acquire_read_lock("bucket", &object_name, &format!("owner_{}", task_id))
|
||||
.await
|
||||
{
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// Wait for all tasks
|
||||
for handle in handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
|
||||
let duration = start.elapsed();
|
||||
|
||||
println!("Contended locks: {} tasks on {} objects in {:?}",
|
||||
concurrent_tasks, shared_objects, duration);
|
||||
|
||||
let metrics = manager.get_metrics();
|
||||
println!("Total acquisitions: {}", metrics.shard_metrics.total_acquisitions());
|
||||
println!("Fast path rate: {:.2}%", metrics.shard_metrics.fast_path_rate() * 100.0);
|
||||
println!("Average wait time: {:?}", metrics.shard_metrics.avg_wait_time());
|
||||
println!("Timeout rate: {:.2}%", metrics.shard_metrics.timeout_rate() * 100.0);
|
||||
|
||||
// Even with contention, should maintain reasonable performance
|
||||
assert!(metrics.shard_metrics.timeout_rate() < 0.1, "Should have <10% timeout rate");
|
||||
assert!(metrics.shard_metrics.avg_wait_time() < Duration::from_millis(100), "Avg wait should be <100ms");
|
||||
}
|
||||
|
||||
/// Benchmark batch operations
|
||||
#[tokio::test]
|
||||
async fn bench_batch_operations() {
|
||||
let manager = FastObjectLockManager::new();
|
||||
let batch_sizes = vec![10, 50, 100, 500];
|
||||
|
||||
for batch_size in batch_sizes {
|
||||
// Create batch request
|
||||
let mut batch = BatchLockRequest::new("batch_owner");
|
||||
for i in 0..batch_size {
|
||||
batch = batch.add_write_lock("bucket", &format!("batch_obj_{}", i));
|
||||
}
|
||||
|
||||
let start = Instant::now();
|
||||
let result = manager.acquire_locks_batch(batch).await;
|
||||
let duration = start.elapsed();
|
||||
|
||||
assert!(result.all_acquired, "Batch should succeed");
|
||||
println!("Batch size {}: {:?} ({:.2} μs per lock)",
|
||||
batch_size,
|
||||
duration,
|
||||
duration.as_micros() as f64 / batch_size as f64);
|
||||
|
||||
// Batch should be much faster than individual acquisitions
|
||||
assert!(duration.as_millis() < batch_size as u128 / 10,
|
||||
"Batch should be 10x+ faster than individual locks");
|
||||
}
|
||||
}
|
||||
|
||||
/// Benchmark version-specific locks
|
||||
#[tokio::test]
|
||||
async fn bench_versioned_locks() {
|
||||
let manager = Arc::new(FastObjectLockManager::new());
|
||||
let objects = 100;
|
||||
let versions_per_object = 10;
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for obj_id in 0..objects {
|
||||
let manager_clone = manager.clone();
|
||||
let handle = task::spawn(async move {
|
||||
for version in 0..versions_per_object {
|
||||
let _guard = manager_clone
|
||||
.acquire_write_lock_versioned(
|
||||
"bucket",
|
||||
&format!("obj_{}", obj_id),
|
||||
&format!("v{}", version),
|
||||
"version_owner"
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
|
||||
let duration = start.elapsed();
|
||||
let total_ops = objects * versions_per_object;
|
||||
|
||||
println!("Versioned locks: {} version locks in {:?}", total_ops, duration);
|
||||
println!("Throughput: {:.2} locks/sec", total_ops as f64 / duration.as_secs_f64());
|
||||
|
||||
let metrics = manager.get_metrics();
|
||||
println!("Fast path rate: {:.2}%", metrics.shard_metrics.fast_path_rate() * 100.0);
|
||||
|
||||
// Versioned locks should not interfere with each other
|
||||
assert!(metrics.shard_metrics.fast_path_rate() > 0.9, "Should maintain high fast path rate");
|
||||
}
|
||||
|
||||
/// Compare with theoretical maximum performance
|
||||
#[tokio::test]
|
||||
async fn bench_theoretical_maximum() {
|
||||
let manager = Arc::new(FastObjectLockManager::new());
|
||||
let iterations = 100000;
|
||||
|
||||
// Measure pure fast path performance (no contention)
|
||||
let start = Instant::now();
|
||||
for i in 0..iterations {
|
||||
let _guard = manager
|
||||
.acquire_write_lock("bucket", &format!("unique_{}", i), "owner")
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
let duration = start.elapsed();
|
||||
|
||||
println!("Theoretical maximum: {} unique locks in {:?}", iterations, duration);
|
||||
println!("Rate: {:.2} locks/sec", iterations as f64 / duration.as_secs_f64());
|
||||
println!("Latency: {:?} per lock", duration / iterations);
|
||||
|
||||
let metrics = manager.get_metrics();
|
||||
println!("Fast path rate: {:.2}%", metrics.shard_metrics.fast_path_rate() * 100.0);
|
||||
|
||||
// Should achieve very high performance with no contention
|
||||
assert!(metrics.shard_metrics.fast_path_rate() > 0.99, "Should be nearly 100% fast path");
|
||||
assert!(duration.as_secs_f64() / (iterations as f64) < 0.0001, "Should be <100μs per lock");
|
||||
}
|
||||
|
||||
/// Performance regression test
|
||||
#[tokio::test]
|
||||
async fn performance_regression_test() {
|
||||
let manager = Arc::new(FastObjectLockManager::new());
|
||||
|
||||
// This test ensures we maintain performance targets
|
||||
let test_cases = vec![
|
||||
("single_thread", 1, 10000),
|
||||
("low_contention", 10, 1000),
|
||||
("high_contention", 100, 100),
|
||||
];
|
||||
|
||||
for (test_name, threads, ops_per_thread) in test_cases {
|
||||
let start = Instant::now();
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for thread_id in 0..threads {
|
||||
let manager_clone = manager.clone();
|
||||
let handle = task::spawn(async move {
|
||||
for op_id in 0..ops_per_thread {
|
||||
let object = if threads == 1 {
|
||||
format!("obj_{}_{}", thread_id, op_id)
|
||||
} else {
|
||||
format!("obj_{}", op_id % 100) // Create contention
|
||||
};
|
||||
|
||||
let owner = format!("owner_{}", thread_id);
|
||||
let _guard = manager_clone
|
||||
.acquire_write_lock("bucket", object, owner)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
|
||||
let duration = start.elapsed();
|
||||
let total_ops = threads * ops_per_thread;
|
||||
let ops_per_sec = total_ops as f64 / duration.as_secs_f64();
|
||||
|
||||
println!("{}: {:.2} ops/sec", test_name, ops_per_sec);
|
||||
|
||||
// Performance targets (adjust based on requirements)
|
||||
match test_name {
|
||||
"single_thread" => assert!(ops_per_sec > 50000.0, "Single thread should exceed 50k ops/sec"),
|
||||
"low_contention" => assert!(ops_per_sec > 20000.0, "Low contention should exceed 20k ops/sec"),
|
||||
"high_contention" => assert!(ops_per_sec > 5000.0, "High contention should exceed 5k ops/sec"),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -37,9 +37,6 @@ pub mod shard;
|
||||
pub mod state;
|
||||
pub mod types;
|
||||
|
||||
// #[cfg(test)]
|
||||
// pub mod benchmarks; // Temporarily disabled due to compilation issues
|
||||
|
||||
// Re-export main types
|
||||
pub use disabled_manager::DisabledLockManager;
|
||||
pub use guard::FastLockGuard;
|
||||
|
||||
@@ -12,4 +12,6 @@ WORKDIR /app
|
||||
|
||||
COPY --from=builder /build/target/release/rustfs-mcp /app/
|
||||
|
||||
ENTRYPOINT ["/app/rustfs-mcp"]
|
||||
RUN apt-get update && apt-get install -y ca-certificates && update-ca-certificates
|
||||
|
||||
ENTRYPOINT ["/app/rustfs-mcp"]
|
||||
|
||||
@@ -30,6 +30,7 @@ rustfs-config = { workspace = true, features = ["notify", "constants"] }
|
||||
rustfs-ecstore = { workspace = true }
|
||||
rustfs-targets = { workspace = true }
|
||||
rustfs-utils = { workspace = true }
|
||||
arc-swap = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
futures = { workspace = true }
|
||||
|
||||
@@ -60,8 +60,9 @@ impl TargetFactory for WebhookTargetFactory {
|
||||
let endpoint = config
|
||||
.lookup(WEBHOOK_ENDPOINT)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
|
||||
let endpoint_url = Url::parse(&endpoint)
|
||||
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{endpoint}')")))?;
|
||||
let parsed_endpoint = endpoint.trim();
|
||||
let endpoint_url = Url::parse(parsed_endpoint)
|
||||
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{parsed_endpoint}')")))?;
|
||||
|
||||
let args = WebhookArgs {
|
||||
enable: true, // If we are here, it's already enabled.
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::notification_system_subscriber::NotificationSystemSubscriberView;
|
||||
use crate::{
|
||||
Event, error::NotificationError, notifier::EventNotifier, registry::TargetRegistry, rules::BucketNotificationConfig, stream,
|
||||
};
|
||||
@@ -104,6 +105,8 @@ pub struct NotificationSystem {
|
||||
concurrency_limiter: Arc<Semaphore>,
|
||||
/// Monitoring indicators
|
||||
metrics: Arc<NotificationMetrics>,
|
||||
/// Subscriber view
|
||||
subscriber_view: NotificationSystemSubscriberView,
|
||||
}
|
||||
|
||||
impl NotificationSystem {
|
||||
@@ -112,6 +115,7 @@ impl NotificationSystem {
|
||||
let concurrency_limiter =
|
||||
rustfs_utils::get_env_usize(ENV_NOTIFY_TARGET_STREAM_CONCURRENCY, DEFAULT_NOTIFY_TARGET_STREAM_CONCURRENCY);
|
||||
NotificationSystem {
|
||||
subscriber_view: NotificationSystemSubscriberView::new(),
|
||||
notifier: Arc::new(EventNotifier::new()),
|
||||
registry: Arc::new(TargetRegistry::new()),
|
||||
config: Arc::new(RwLock::new(config)),
|
||||
@@ -188,8 +192,11 @@ impl NotificationSystem {
|
||||
}
|
||||
|
||||
/// Checks if there are active subscribers for the given bucket and event name.
|
||||
pub async fn has_subscriber(&self, bucket: &str, event_name: &EventName) -> bool {
|
||||
self.notifier.has_subscriber(bucket, event_name).await
|
||||
pub async fn has_subscriber(&self, bucket: &str, event: &EventName) -> bool {
|
||||
if !self.subscriber_view.has_subscriber(bucket, event) {
|
||||
return false;
|
||||
}
|
||||
self.notifier.has_subscriber(bucket, event).await
|
||||
}
|
||||
|
||||
async fn update_config_and_reload<F>(&self, mut modifier: F) -> Result<(), NotificationError>
|
||||
@@ -212,6 +219,11 @@ impl NotificationSystem {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Save the modified configuration to storage
|
||||
rustfs_ecstore::config::com::save_server_config(store, &new_config)
|
||||
.await
|
||||
.map_err(|e| NotificationError::SaveConfig(e.to_string()))?;
|
||||
|
||||
info!("Configuration updated. Reloading system...");
|
||||
self.reload_config(new_config).await
|
||||
}
|
||||
@@ -231,15 +243,18 @@ impl NotificationSystem {
|
||||
pub async fn remove_target(&self, target_id: &TargetID, target_type: &str) -> Result<(), NotificationError> {
|
||||
info!("Attempting to remove target: {}", target_id);
|
||||
|
||||
let ttype = target_type.to_lowercase();
|
||||
let tname = target_id.name.to_lowercase();
|
||||
|
||||
self.update_config_and_reload(|config| {
|
||||
let mut changed = false;
|
||||
if let Some(targets_of_type) = config.0.get_mut(target_type) {
|
||||
if targets_of_type.remove(&target_id.name).is_some() {
|
||||
if let Some(targets_of_type) = config.0.get_mut(&ttype) {
|
||||
if targets_of_type.remove(&tname).is_some() {
|
||||
info!("Removed target {} from configuration", target_id);
|
||||
changed = true;
|
||||
}
|
||||
if targets_of_type.is_empty() {
|
||||
config.0.remove(target_type);
|
||||
config.0.remove(&ttype);
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
@@ -264,20 +279,24 @@ impl NotificationSystem {
|
||||
/// If the target configuration is invalid, it returns Err(NotificationError::Configuration).
|
||||
pub async fn set_target_config(&self, target_type: &str, target_name: &str, kvs: KVS) -> Result<(), NotificationError> {
|
||||
info!("Setting config for target {} of type {}", target_name, target_type);
|
||||
let ttype = target_type.to_lowercase();
|
||||
let tname = target_name.to_lowercase();
|
||||
self.update_config_and_reload(|config| {
|
||||
config
|
||||
.0
|
||||
.entry(target_type.to_lowercase())
|
||||
.or_default()
|
||||
.insert(target_name.to_lowercase(), kvs.clone());
|
||||
config.0.entry(ttype.clone()).or_default().insert(tname.clone(), kvs.clone());
|
||||
true // The configuration is always modified
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Removes all notification configurations for a bucket.
|
||||
pub async fn remove_bucket_notification_config(&self, bucket_name: &str) {
|
||||
self.notifier.remove_rules_map(bucket_name).await;
|
||||
/// If the configuration is successfully removed, the entire notification system will be automatically reloaded.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `bucket` - The name of the bucket whose notification configuration is to be removed.
|
||||
///
|
||||
pub async fn remove_bucket_notification_config(&self, bucket: &str) {
|
||||
self.subscriber_view.clear_bucket(bucket);
|
||||
self.notifier.remove_rules_map(bucket).await;
|
||||
}
|
||||
|
||||
/// Removes a Target configuration.
|
||||
@@ -294,23 +313,50 @@ impl NotificationSystem {
|
||||
/// If the target configuration does not exist, it returns Ok(()) without making any changes.
|
||||
pub async fn remove_target_config(&self, target_type: &str, target_name: &str) -> Result<(), NotificationError> {
|
||||
info!("Removing config for target {} of type {}", target_name, target_type);
|
||||
self.update_config_and_reload(|config| {
|
||||
let mut changed = false;
|
||||
if let Some(targets) = config.0.get_mut(&target_type.to_lowercase()) {
|
||||
if targets.remove(&target_name.to_lowercase()).is_some() {
|
||||
changed = true;
|
||||
|
||||
let ttype = target_type.to_lowercase();
|
||||
let tname = target_name.to_lowercase();
|
||||
|
||||
let target_id = TargetID {
|
||||
id: tname.clone(),
|
||||
name: ttype.clone(),
|
||||
};
|
||||
|
||||
// Deletion is prohibited if bucket rules refer to it
|
||||
if self.notifier.is_target_bound_to_any_bucket(&target_id).await {
|
||||
return Err(NotificationError::Configuration(format!(
|
||||
"Target is still bound to bucket rules and deletion is prohibited: type={} name={}",
|
||||
ttype, tname
|
||||
)));
|
||||
}
|
||||
|
||||
let config_result = self
|
||||
.update_config_and_reload(|config| {
|
||||
let mut changed = false;
|
||||
if let Some(targets) = config.0.get_mut(&ttype) {
|
||||
if targets.remove(&tname).is_some() {
|
||||
changed = true;
|
||||
}
|
||||
if targets.is_empty() {
|
||||
config.0.remove(target_type);
|
||||
}
|
||||
}
|
||||
if targets.is_empty() {
|
||||
config.0.remove(target_type);
|
||||
if !changed {
|
||||
info!("Target {} of type {} not found, no changes made.", target_name, target_type);
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
info!("Target {} of type {} not found, no changes made.", target_name, target_type);
|
||||
}
|
||||
debug!("Config after remove: {:?}", config);
|
||||
changed
|
||||
})
|
||||
.await
|
||||
debug!("Config after remove: {:?}", config);
|
||||
changed
|
||||
})
|
||||
.await;
|
||||
|
||||
if config_result.is_ok() {
|
||||
// Remove from target list
|
||||
let target_list = self.notifier.target_list();
|
||||
let mut target_list_guard = target_list.write().await;
|
||||
let _ = target_list_guard.remove_target_only(&target_id).await;
|
||||
}
|
||||
|
||||
config_result
|
||||
}
|
||||
|
||||
/// Enhanced event stream startup function, including monitoring and concurrency control
|
||||
@@ -341,6 +387,9 @@ impl NotificationSystem {
|
||||
let _ = cancel_tx.send(()).await;
|
||||
}
|
||||
|
||||
// Clear the target_list and ensure that reload is a replacement reconstruction (solve the target_list len unchanged/residual problem)
|
||||
self.notifier.remove_all_bucket_targets().await;
|
||||
|
||||
// Update the config
|
||||
self.update_config(new_config.clone()).await;
|
||||
|
||||
@@ -371,15 +420,16 @@ impl NotificationSystem {
|
||||
|
||||
// The storage of the cloned target and the target itself
|
||||
let store_clone = store.boxed_clone();
|
||||
let target_box = target.clone_dyn();
|
||||
let target_arc = Arc::from(target_box);
|
||||
|
||||
// Add a reference to the monitoring metrics
|
||||
let metrics = self.metrics.clone();
|
||||
let semaphore = self.concurrency_limiter.clone();
|
||||
// let target_box = target.clone_dyn();
|
||||
let target_arc = Arc::from(target.clone_dyn());
|
||||
|
||||
// Encapsulated enhanced version of start_event_stream
|
||||
let cancel_tx = self.enhanced_start_event_stream(store_clone, target_arc, metrics, semaphore);
|
||||
let cancel_tx = self.enhanced_start_event_stream(
|
||||
store_clone,
|
||||
target_arc,
|
||||
self.metrics.clone(),
|
||||
self.concurrency_limiter.clone(),
|
||||
);
|
||||
|
||||
// Start event stream processing and save cancel sender
|
||||
// let cancel_tx = start_event_stream(store_clone, target_clone);
|
||||
@@ -406,17 +456,18 @@ impl NotificationSystem {
|
||||
/// Loads the bucket notification configuration
|
||||
pub async fn load_bucket_notification_config(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
config: &BucketNotificationConfig,
|
||||
bucket: &str,
|
||||
cfg: &BucketNotificationConfig,
|
||||
) -> Result<(), NotificationError> {
|
||||
let arn_list = self.notifier.get_arn_list(&config.region).await;
|
||||
self.subscriber_view.apply_bucket_config(bucket, cfg);
|
||||
let arn_list = self.notifier.get_arn_list(&cfg.region).await;
|
||||
if arn_list.is_empty() {
|
||||
return Err(NotificationError::Configuration("No targets configured".to_string()));
|
||||
}
|
||||
info!("Available ARNs: {:?}", arn_list);
|
||||
// Validate the configuration against the available ARNs
|
||||
if let Err(e) = config.validate(&config.region, &arn_list) {
|
||||
debug!("Bucket notification config validation region:{} failed: {}", &config.region, e);
|
||||
if let Err(e) = cfg.validate(&cfg.region, &arn_list) {
|
||||
debug!("Bucket notification config validation region:{} failed: {}", &cfg.region, e);
|
||||
if !e.to_string().contains("ARN not found") {
|
||||
return Err(NotificationError::BucketNotification(e.to_string()));
|
||||
} else {
|
||||
@@ -424,9 +475,9 @@ impl NotificationSystem {
|
||||
}
|
||||
}
|
||||
|
||||
let rules_map = config.get_rules_map();
|
||||
self.notifier.add_rules_map(bucket_name, rules_map.clone()).await;
|
||||
info!("Loaded notification config for bucket: {}", bucket_name);
|
||||
let rules_map = cfg.get_rules_map();
|
||||
self.notifier.add_rules_map(bucket, rules_map.clone()).await;
|
||||
info!("Loaded notification config for bucket: {}", bucket);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ mod event;
|
||||
pub mod factory;
|
||||
mod global;
|
||||
pub mod integration;
|
||||
mod notification_system_subscriber;
|
||||
pub mod notifier;
|
||||
pub mod registry;
|
||||
pub mod rules;
|
||||
|
||||
74
crates/notify/src/notification_system_subscriber.rs
Normal file
74
crates/notify/src/notification_system_subscriber.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::BucketNotificationConfig;
|
||||
use crate::rules::{BucketRulesSnapshot, DynRulesContainer, SubscriberIndex};
|
||||
use rustfs_targets::EventName;
|
||||
|
||||
/// NotificationSystemSubscriberView - Provides an interface to manage and query
|
||||
/// the subscription status of buckets in the notification system.
|
||||
#[derive(Debug)]
|
||||
pub struct NotificationSystemSubscriberView {
|
||||
index: SubscriberIndex,
|
||||
}
|
||||
|
||||
impl NotificationSystemSubscriberView {
|
||||
/// Creates a new NotificationSystemSubscriberView with an empty SubscriberIndex.
|
||||
///
|
||||
/// Returns a new instance of NotificationSystemSubscriberView.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
index: SubscriberIndex::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if a bucket has any subscribers for a specific event.
|
||||
/// This is a quick check using the event mask in the snapshot.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `bucket` - The name of the bucket to check.
|
||||
/// * `event` - The event name to check for subscriptions.
|
||||
///
|
||||
/// Returns `true` if there are subscribers for the event, `false` otherwise.
|
||||
#[inline]
|
||||
pub fn has_subscriber(&self, bucket: &str, event: &EventName) -> bool {
|
||||
self.index.has_subscriber(bucket, event)
|
||||
}
|
||||
|
||||
/// Builds and atomically replaces a bucket's subscription snapshot from the configuration.
|
||||
///
|
||||
/// Core principle: masks and rules are calculated and stored together in the same update.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `bucket` - The name of the bucket to update.
|
||||
/// * `cfg` - The bucket notification configuration to compile into a snapshot.
|
||||
pub fn apply_bucket_config(&self, bucket: &str, cfg: &BucketNotificationConfig) {
|
||||
// *It is recommended to merge compile into one function to ensure the same origin.
|
||||
let snapshot: BucketRulesSnapshot<DynRulesContainer> = cfg.compile_snapshot();
|
||||
|
||||
// *debug to prevent inconsistencies from being introduced when modifying the compile logic in the future.
|
||||
snapshot.debug_assert_mask_consistent();
|
||||
|
||||
self.index.store_snapshot(bucket, snapshot);
|
||||
}
|
||||
|
||||
/// Clears a bucket's subscription snapshot.
|
||||
///
|
||||
/// #Arguments
|
||||
/// * `bucket` - The name of the bucket to clear.
|
||||
#[inline]
|
||||
pub fn clear_bucket(&self, bucket: &str) {
|
||||
self.index.clear_bucket(bucket);
|
||||
}
|
||||
}
|
||||
@@ -14,19 +14,21 @@
|
||||
|
||||
use crate::{error::NotificationError, event::Event, rules::RulesMap};
|
||||
use hashbrown::HashMap;
|
||||
use rustfs_config::notify::{DEFAULT_NOTIFY_SEND_CONCURRENCY, ENV_NOTIFY_SEND_CONCURRENCY};
|
||||
use rustfs_targets::EventName;
|
||||
use rustfs_targets::Target;
|
||||
use rustfs_targets::arn::TargetID;
|
||||
use rustfs_targets::target::EntityTarget;
|
||||
use starshard::AsyncShardedHashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::sync::{RwLock, Semaphore};
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
|
||||
/// Manages event notification to targets based on rules
|
||||
pub struct EventNotifier {
|
||||
target_list: Arc<RwLock<TargetList>>,
|
||||
bucket_rules_map: Arc<AsyncShardedHashMap<String, RulesMap, rustc_hash::FxBuildHasher>>,
|
||||
send_limiter: Arc<Semaphore>,
|
||||
}
|
||||
|
||||
impl Default for EventNotifier {
|
||||
@@ -37,16 +39,41 @@ impl Default for EventNotifier {
|
||||
|
||||
impl EventNotifier {
|
||||
/// Creates a new EventNotifier
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns a new instance of EventNotifier.
|
||||
pub fn new() -> Self {
|
||||
let max_inflight = rustfs_utils::get_env_usize(ENV_NOTIFY_SEND_CONCURRENCY, DEFAULT_NOTIFY_SEND_CONCURRENCY);
|
||||
EventNotifier {
|
||||
target_list: Arc::new(RwLock::new(TargetList::new())),
|
||||
bucket_rules_map: Arc::new(AsyncShardedHashMap::new(0)),
|
||||
send_limiter: Arc::new(Semaphore::new(max_inflight)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks whether a TargetID is still referenced by any bucket's rules.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The TargetID to check.
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns `true` if the TargetID is bound to any bucket, otherwise `false`.
|
||||
pub async fn is_target_bound_to_any_bucket(&self, target_id: &TargetID) -> bool {
|
||||
// `AsyncShardedHashMap::iter()`: Traverse (bucket_name, rules_map)
|
||||
let items = self.bucket_rules_map.iter().await;
|
||||
for (_bucket, rules_map) in items {
|
||||
if rules_map.contains_target_id(target_id) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns a reference to the target list
|
||||
/// This method provides access to the target list for external use.
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns an `Arc<RwLock<TargetList>>` representing the target list.
|
||||
pub fn target_list(&self) -> Arc<RwLock<TargetList>> {
|
||||
Arc::clone(&self.target_list)
|
||||
}
|
||||
@@ -54,17 +81,23 @@ impl EventNotifier {
|
||||
/// Removes all notification rules for a bucket
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `bucket_name` - The name of the bucket for which to remove rules
|
||||
/// * `bucket` - The name of the bucket for which to remove rules
|
||||
///
|
||||
/// This method removes all rules associated with the specified bucket name.
|
||||
/// It will log a message indicating the removal of rules.
|
||||
pub async fn remove_rules_map(&self, bucket_name: &str) {
|
||||
if self.bucket_rules_map.remove(&bucket_name.to_string()).await.is_some() {
|
||||
info!("Removed all notification rules for bucket: {}", bucket_name);
|
||||
pub async fn remove_rules_map(&self, bucket: &str) {
|
||||
if self.bucket_rules_map.remove(&bucket.to_string()).await.is_some() {
|
||||
info!("Removed all notification rules for bucket: {}", bucket);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a list of ARNs for the registered targets
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `region` - The region to use for generating the ARNs
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns a vector of strings representing the ARNs of the registered targets
|
||||
pub async fn get_arn_list(&self, region: &str) -> Vec<String> {
|
||||
let target_list_guard = self.target_list.read().await;
|
||||
target_list_guard
|
||||
@@ -75,24 +108,37 @@ impl EventNotifier {
|
||||
}
|
||||
|
||||
/// Adds a rules map for a bucket
|
||||
pub async fn add_rules_map(&self, bucket_name: &str, rules_map: RulesMap) {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `bucket` - The name of the bucket for which to add the rules map
|
||||
/// * `rules_map` - The rules map to add for the bucket
|
||||
pub async fn add_rules_map(&self, bucket: &str, rules_map: RulesMap) {
|
||||
if rules_map.is_empty() {
|
||||
self.bucket_rules_map.remove(&bucket_name.to_string()).await;
|
||||
self.bucket_rules_map.remove(&bucket.to_string()).await;
|
||||
} else {
|
||||
self.bucket_rules_map.insert(bucket_name.to_string(), rules_map).await;
|
||||
self.bucket_rules_map.insert(bucket.to_string(), rules_map).await;
|
||||
}
|
||||
info!("Added rules for bucket: {}", bucket_name);
|
||||
info!("Added rules for bucket: {}", bucket);
|
||||
}
|
||||
|
||||
/// Gets the rules map for a specific bucket.
|
||||
pub async fn get_rules_map(&self, bucket_name: &str) -> Option<RulesMap> {
|
||||
self.bucket_rules_map.get(&bucket_name.to_string()).await
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `bucket` - The name of the bucket for which to get the rules map
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns `Some(RulesMap)` if rules exist for the bucket, otherwise returns `None`.
|
||||
pub async fn get_rules_map(&self, bucket: &str) -> Option<RulesMap> {
|
||||
self.bucket_rules_map.get(&bucket.to_string()).await
|
||||
}
|
||||
|
||||
/// Removes notification rules for a bucket
|
||||
pub async fn remove_notification(&self, bucket_name: &str) {
|
||||
self.bucket_rules_map.remove(&bucket_name.to_string()).await;
|
||||
info!("Removed notification rules for bucket: {}", bucket_name);
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `bucket` - The name of the bucket for which to remove notification rules
|
||||
pub async fn remove_notification(&self, bucket: &str) {
|
||||
self.bucket_rules_map.remove(&bucket.to_string()).await;
|
||||
info!("Removed notification rules for bucket: {}", bucket);
|
||||
}
|
||||
|
||||
/// Removes all targets
|
||||
@@ -125,69 +171,87 @@ impl EventNotifier {
|
||||
}
|
||||
|
||||
/// Sends an event to the appropriate targets based on the bucket rules
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `event` - The event to send
|
||||
#[instrument(skip_all)]
|
||||
pub async fn send(&self, event: Arc<Event>) {
|
||||
let bucket_name = &event.s3.bucket.name;
|
||||
let object_key = &event.s3.object.key;
|
||||
let event_name = event.event_name;
|
||||
if let Some(rules) = self.bucket_rules_map.get(bucket_name).await {
|
||||
let target_ids = rules.match_rules(event_name, object_key);
|
||||
if target_ids.is_empty() {
|
||||
debug!("No matching targets for event in bucket: {}", bucket_name);
|
||||
return;
|
||||
}
|
||||
let target_ids_len = target_ids.len();
|
||||
let mut handles = vec![];
|
||||
|
||||
// Use scope to limit the borrow scope of target_list
|
||||
{
|
||||
let target_list_guard = self.target_list.read().await;
|
||||
info!("Sending event to targets: {:?}", target_ids);
|
||||
for target_id in target_ids {
|
||||
// `get` now returns Option<Arc<dyn Target + Send + Sync>>
|
||||
if let Some(target_arc) = target_list_guard.get(&target_id) {
|
||||
// Clone an Arc<Box<dyn Target>> (which is where target_list is stored) to move into an asynchronous task
|
||||
// target_arc is already Arc, clone it for the async task
|
||||
let cloned_target_for_task = target_arc.clone();
|
||||
let event_clone = event.clone();
|
||||
let target_name_for_task = cloned_target_for_task.name(); // Get the name before generating the task
|
||||
debug!("Preparing to send event to target: {}", target_name_for_task);
|
||||
// Use cloned data in closures to avoid borrowing conflicts
|
||||
// Create an EntityTarget from the event
|
||||
let entity_target: Arc<EntityTarget<Event>> = Arc::new(EntityTarget {
|
||||
object_name: object_key.to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
event_name,
|
||||
data: event_clone.clone().as_ref().clone(),
|
||||
});
|
||||
let handle = tokio::spawn(async move {
|
||||
if let Err(e) = cloned_target_for_task.save(entity_target.clone()).await {
|
||||
error!("Failed to send event to target {}: {}", target_name_for_task, e);
|
||||
} else {
|
||||
debug!("Successfully saved event to target {}", target_name_for_task);
|
||||
}
|
||||
});
|
||||
handles.push(handle);
|
||||
} else {
|
||||
warn!("Target ID {:?} found in rules but not in target list.", target_id);
|
||||
}
|
||||
}
|
||||
// target_list is automatically released here
|
||||
}
|
||||
|
||||
// Wait for all tasks to be completed
|
||||
for handle in handles {
|
||||
if let Err(e) = handle.await {
|
||||
error!("Task for sending/saving event failed: {}", e);
|
||||
}
|
||||
}
|
||||
info!("Event processing initiated for {} targets for bucket: {}", target_ids_len, bucket_name);
|
||||
} else {
|
||||
let Some(rules) = self.bucket_rules_map.get(bucket_name).await else {
|
||||
debug!("No rules found for bucket: {}", bucket_name);
|
||||
return;
|
||||
};
|
||||
|
||||
let target_ids = rules.match_rules(event_name, object_key);
|
||||
if target_ids.is_empty() {
|
||||
debug!("No matching targets for event in bucket: {}", bucket_name);
|
||||
return;
|
||||
}
|
||||
let target_ids_len = target_ids.len();
|
||||
let mut handles = vec![];
|
||||
|
||||
// Use scope to limit the borrow scope of target_list
|
||||
let target_list_guard = self.target_list.read().await;
|
||||
info!("Sending event to targets: {:?}", target_ids);
|
||||
for target_id in target_ids {
|
||||
// `get` now returns Option<Arc<dyn Target + Send + Sync>>
|
||||
if let Some(target_arc) = target_list_guard.get(&target_id) {
|
||||
// Clone an Arc<Box<dyn Target>> (which is where target_list is stored) to move into an asynchronous task
|
||||
// target_arc is already Arc, clone it for the async task
|
||||
let target_for_task = target_arc.clone();
|
||||
let limiter = self.send_limiter.clone();
|
||||
let event_clone = event.clone();
|
||||
let target_name_for_task = target_for_task.name(); // Get the name before generating the task
|
||||
debug!("Preparing to send event to target: {}", target_name_for_task);
|
||||
// Use cloned data in closures to avoid borrowing conflicts
|
||||
// Create an EntityTarget from the event
|
||||
let entity_target: Arc<EntityTarget<Event>> = Arc::new(EntityTarget {
|
||||
object_name: object_key.to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
event_name,
|
||||
data: event_clone.as_ref().clone(),
|
||||
});
|
||||
let handle = tokio::spawn(async move {
|
||||
let _permit = match limiter.acquire_owned().await {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
error!("Failed to acquire send permit for target {}: {}", target_name_for_task, e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
if let Err(e) = target_for_task.save(entity_target.clone()).await {
|
||||
error!("Failed to send event to target {}: {}", target_name_for_task, e);
|
||||
} else {
|
||||
debug!("Successfully saved event to target {}", target_name_for_task);
|
||||
}
|
||||
});
|
||||
handles.push(handle);
|
||||
} else {
|
||||
warn!("Target ID {:?} found in rules but not in target list.", target_id);
|
||||
}
|
||||
}
|
||||
// target_list is automatically released here
|
||||
drop(target_list_guard);
|
||||
|
||||
// Wait for all tasks to be completed
|
||||
for handle in handles {
|
||||
if let Err(e) = handle.await {
|
||||
error!("Task for sending/saving event failed: {}", e);
|
||||
}
|
||||
}
|
||||
info!("Event processing initiated for {} targets for bucket: {}", target_ids_len, bucket_name);
|
||||
}
|
||||
|
||||
/// Initializes the targets for buckets
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `targets_to_init` - A vector of boxed targets to initialize
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns `Ok(())` if initialization is successful, otherwise returns a `NotificationError`.
|
||||
#[instrument(skip(self, targets_to_init))]
|
||||
pub async fn init_bucket_targets(
|
||||
&self,
|
||||
@@ -195,6 +259,10 @@ impl EventNotifier {
|
||||
) -> Result<(), NotificationError> {
|
||||
// Currently active, simpler logic
|
||||
let mut target_list_guard = self.target_list.write().await; //Gets a write lock for the TargetList
|
||||
|
||||
// Clear existing targets first - rebuild from scratch to ensure consistency with new configuration
|
||||
target_list_guard.clear();
|
||||
|
||||
for target_boxed in targets_to_init {
|
||||
// Traverse the incoming Box<dyn Target >
|
||||
debug!("init bucket target: {}", target_boxed.name());
|
||||
@@ -214,6 +282,7 @@ impl EventNotifier {
|
||||
|
||||
/// A thread-safe list of targets
|
||||
pub struct TargetList {
|
||||
/// Map of TargetID to Target
|
||||
targets: HashMap<TargetID, Arc<dyn Target<Event> + Send + Sync>>,
|
||||
}
|
||||
|
||||
@@ -230,6 +299,12 @@ impl TargetList {
|
||||
}
|
||||
|
||||
/// Adds a target to the list
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target` - The target to add
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns `Ok(())` if the target was added successfully, or a `NotificationError` if an error occurred.
|
||||
pub fn add(&mut self, target: Arc<dyn Target<Event> + Send + Sync>) -> Result<(), NotificationError> {
|
||||
let id = target.id();
|
||||
if self.targets.contains_key(&id) {
|
||||
@@ -240,8 +315,19 @@ impl TargetList {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Clears all targets from the list
|
||||
pub fn clear(&mut self) {
|
||||
self.targets.clear();
|
||||
}
|
||||
|
||||
/// Removes a target by ID. Note: This does not stop its associated event stream.
|
||||
/// Stream cancellation should be handled by EventNotifier.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `id` - The ID of the target to remove
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns the removed target if it existed, otherwise `None`.
|
||||
pub async fn remove_target_only(&mut self, id: &TargetID) -> Option<Arc<dyn Target<Event> + Send + Sync>> {
|
||||
if let Some(target_arc) = self.targets.remove(id) {
|
||||
if let Err(e) = target_arc.close().await {
|
||||
@@ -269,6 +355,12 @@ impl TargetList {
|
||||
}
|
||||
|
||||
/// Returns a target by ID
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `id` - The ID of the target to retrieve
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns the target if it exists, otherwise `None`.
|
||||
pub fn get(&self, id: &TargetID) -> Option<Arc<dyn Target<Event> + Send + Sync>> {
|
||||
self.targets.get(id).cloned()
|
||||
}
|
||||
@@ -283,7 +375,7 @@ impl TargetList {
|
||||
self.targets.len()
|
||||
}
|
||||
|
||||
// is_empty can be derived from len()
|
||||
/// is_empty can be derived from len()
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.targets.is_empty()
|
||||
}
|
||||
|
||||
@@ -15,13 +15,60 @@
|
||||
use super::rules_map::RulesMap;
|
||||
use super::xml_config::ParseConfigError as BucketNotificationConfigError;
|
||||
use crate::rules::NotificationConfiguration;
|
||||
use crate::rules::pattern_rules;
|
||||
use crate::rules::target_id_set;
|
||||
use hashbrown::HashMap;
|
||||
use crate::rules::subscriber_snapshot::{BucketRulesSnapshot, DynRulesContainer, RuleEvents, RulesContainer};
|
||||
use rustfs_targets::EventName;
|
||||
use rustfs_targets::arn::TargetID;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::io::Read;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// A "rule view", only used for snapshot mask/consistency verification.
|
||||
/// Here we choose to generate the view by "single event" to ensure that event_mask calculation is reliable and simple.
|
||||
#[derive(Debug)]
|
||||
struct RuleView {
|
||||
events: Vec<EventName>,
|
||||
}
|
||||
|
||||
impl RuleEvents for RuleView {
|
||||
fn subscribed_events(&self) -> &[EventName] {
|
||||
&self.events
|
||||
}
|
||||
}
|
||||
|
||||
/// Adapt RulesMap to RulesContainer.
|
||||
/// Key point: The items returned by iter_rules are &dyn RuleEvents, so a RuleView list is cached in the container.
|
||||
#[derive(Debug)]
|
||||
struct CompiledRules {
|
||||
// Keep RulesMap (can be used later if you want to make more complex judgments during the snapshot reading phase)
|
||||
#[allow(dead_code)]
|
||||
rules_map: RulesMap,
|
||||
// for RulesContainer::iter_rules
|
||||
rule_views: Vec<RuleView>,
|
||||
}
|
||||
|
||||
impl CompiledRules {
|
||||
fn from_rules_map(rules_map: &RulesMap) -> Self {
|
||||
let mut rule_views = Vec::new();
|
||||
|
||||
for ev in rules_map.iter_events() {
|
||||
rule_views.push(RuleView { events: vec![ev] });
|
||||
}
|
||||
|
||||
Self {
|
||||
rules_map: rules_map.clone(),
|
||||
rule_views,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RulesContainer for CompiledRules {
|
||||
type Rule = dyn RuleEvents;
|
||||
|
||||
fn iter_rules<'a>(&'a self) -> Box<dyn Iterator<Item = &'a Self::Rule> + 'a> {
|
||||
// Key: Convert &RuleView into &dyn RuleEvents
|
||||
Box::new(self.rule_views.iter().map(|v| v as &dyn RuleEvents))
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for bucket notifications.
|
||||
/// This struct now holds the parsed and validated rules in the new RulesMap format.
|
||||
@@ -119,11 +166,26 @@ impl BucketNotificationConfig {
|
||||
pub fn set_region(&mut self, region: &str) {
|
||||
self.region = region.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
// Add a helper to PatternRules if not already present
|
||||
impl pattern_rules::PatternRules {
|
||||
pub fn inner(&self) -> &HashMap<String, target_id_set::TargetIdSet> {
|
||||
&self.rules
|
||||
/// Compiles the current BucketNotificationConfig into a BucketRulesSnapshot.
|
||||
/// This involves transforming the rules into a format suitable for runtime use,
|
||||
/// and calculating the event mask based on the subscribed events of the rules.
|
||||
///
|
||||
/// # Returns
|
||||
/// A BucketRulesSnapshot containing the compiled rules and event mask.
|
||||
pub fn compile_snapshot(&self) -> BucketRulesSnapshot<DynRulesContainer> {
|
||||
// 1) Generate container from RulesMap
|
||||
let compiled = CompiledRules::from_rules_map(self.get_rules_map());
|
||||
let rules: Arc<DynRulesContainer> = Arc::new(compiled) as Arc<DynRulesContainer>;
|
||||
|
||||
// 2) Calculate event_mask
|
||||
let mut mask = 0u64;
|
||||
for rule in rules.iter_rules() {
|
||||
for ev in rule.subscribed_events() {
|
||||
mask |= ev.mask();
|
||||
}
|
||||
}
|
||||
|
||||
BucketRulesSnapshot { event_mask: mask, rules }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,22 +12,24 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod config;
|
||||
pub mod pattern;
|
||||
pub mod pattern_rules;
|
||||
pub mod rules_map;
|
||||
pub mod target_id_set;
|
||||
mod pattern_rules;
|
||||
mod rules_map;
|
||||
mod subscriber_index;
|
||||
mod subscriber_snapshot;
|
||||
mod target_id_set;
|
||||
pub mod xml_config; // For XML structure definition and parsing
|
||||
|
||||
pub mod config; // Definition and parsing for BucketNotificationConfig
|
||||
// Definition and parsing for BucketNotificationConfig
|
||||
|
||||
// Re-export key types from submodules for easy access to `crate::rules::TypeName`
|
||||
// Re-export key types from submodules for external use
|
||||
pub use config::BucketNotificationConfig;
|
||||
// Assume that BucketNotificationConfigError is also defined in config.rs
|
||||
// Or if it is still an alias for xml_config::ParseConfigError , adjust accordingly
|
||||
pub use xml_config::ParseConfigError as BucketNotificationConfigError;
|
||||
|
||||
pub use pattern_rules::PatternRules;
|
||||
pub use rules_map::RulesMap;
|
||||
pub use subscriber_index::*;
|
||||
pub use subscriber_snapshot::*;
|
||||
pub use target_id_set::TargetIdSet;
|
||||
pub use xml_config::{NotificationConfiguration, ParseConfigError};
|
||||
pub use xml_config::{NotificationConfiguration, ParseConfigError, ParseConfigError as BucketNotificationConfigError};
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::pattern;
|
||||
use super::target_id_set::TargetIdSet;
|
||||
use crate::rules::TargetIdSet;
|
||||
use crate::rules::pattern;
|
||||
use hashbrown::HashMap;
|
||||
use rayon::prelude::*;
|
||||
use rustfs_targets::arn::TargetID;
|
||||
@@ -27,31 +27,69 @@ pub struct PatternRules {
|
||||
}
|
||||
|
||||
impl PatternRules {
|
||||
/// Create a new, empty PatternRules.
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
/// Add rules: Pattern and Target ID.
|
||||
/// If the schema already exists, add target_id to the existing TargetIdSet.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `pattern` - The object name pattern.
|
||||
/// * `target_id` - The TargetID to associate with the pattern.
|
||||
pub fn add(&mut self, pattern: String, target_id: TargetID) {
|
||||
self.rules.entry(pattern).or_default().insert(target_id);
|
||||
}
|
||||
|
||||
/// Checks if there are any rules that match the given object name.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `object_name` - The object name to match against the patterns.
|
||||
///
|
||||
/// # Returns
|
||||
/// `true` if any pattern matches the object name, otherwise `false`.
|
||||
pub fn match_simple(&self, object_name: &str) -> bool {
|
||||
self.rules.keys().any(|p| pattern::match_simple(p, object_name))
|
||||
}
|
||||
|
||||
/// Returns all TargetIDs that match the object name.
|
||||
///
|
||||
/// Performance optimization points:
|
||||
/// 1) Small collections are serialized directly to avoid rayon scheduling/merging overhead
|
||||
/// 2) When hitting, no longer temporarily allocate TargetIdSet for each rule, but directly extend
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `object_name` - The object name to match against the patterns.
|
||||
///
|
||||
/// # Returns
|
||||
/// A TargetIdSet containing all TargetIDs that match the object name.
|
||||
pub fn match_targets(&self, object_name: &str) -> TargetIdSet {
|
||||
let n = self.rules.len();
|
||||
if n == 0 {
|
||||
return TargetIdSet::new();
|
||||
}
|
||||
|
||||
// Experience Threshold: Serial is usually faster below this value (can be adjusted after benchmarking)
|
||||
const PAR_THRESHOLD: usize = 128;
|
||||
|
||||
if n < PAR_THRESHOLD {
|
||||
let mut out = TargetIdSet::new();
|
||||
for (pattern_str, target_set) in self.rules.iter() {
|
||||
if pattern::match_simple(pattern_str, object_name) {
|
||||
out.extend(target_set.iter().cloned());
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
// Parallel path: Each thread accumulates a local set and finally merges it to reduce frequent allocations
|
||||
self.rules
|
||||
.par_iter()
|
||||
.filter_map(|(pattern_str, target_set)| {
|
||||
.fold(TargetIdSet::new, |mut local, (pattern_str, target_set)| {
|
||||
if pattern::match_simple(pattern_str, object_name) {
|
||||
Some(target_set.iter().cloned().collect::<TargetIdSet>())
|
||||
} else {
|
||||
None
|
||||
local.extend(target_set.iter().cloned());
|
||||
}
|
||||
local
|
||||
})
|
||||
.reduce(TargetIdSet::new, |mut acc, set| {
|
||||
acc.extend(set);
|
||||
@@ -65,6 +103,11 @@ impl PatternRules {
|
||||
|
||||
/// Merge another PatternRules.
|
||||
/// Corresponding to Go's `Rules.Union`.
|
||||
/// # Arguments
|
||||
/// * `other` - The PatternRules to merge with.
|
||||
///
|
||||
/// # Returns
|
||||
/// A new PatternRules containing the union of both.
|
||||
pub fn union(&self, other: &Self) -> Self {
|
||||
let mut new_rules = self.clone();
|
||||
for (pattern, their_targets) in &other.rules {
|
||||
@@ -76,6 +119,13 @@ impl PatternRules {
|
||||
|
||||
/// Calculate the difference from another PatternRules.
|
||||
/// Corresponding to Go's `Rules.Difference`.
|
||||
/// The result contains only the patterns and TargetIDs that are in `self` but not in `other`.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `other` - The PatternRules to compare against.
|
||||
///
|
||||
/// # Returns
|
||||
/// A new PatternRules containing the difference.
|
||||
pub fn difference(&self, other: &Self) -> Self {
|
||||
let mut result_rules = HashMap::new();
|
||||
for (pattern, self_targets) in &self.rules {
|
||||
@@ -94,4 +144,59 @@ impl PatternRules {
|
||||
}
|
||||
PatternRules { rules: result_rules }
|
||||
}
|
||||
|
||||
/// Merge another PatternRules into self in place.
|
||||
/// Corresponding to Go's `Rules.UnionInPlace`.
|
||||
/// # Arguments
|
||||
/// * `other` - The PatternRules to merge with.
|
||||
pub fn union_in_place(&mut self, other: &Self) {
|
||||
for (pattern, their_targets) in &other.rules {
|
||||
self.rules
|
||||
.entry(pattern.clone())
|
||||
.or_default()
|
||||
.extend(their_targets.iter().cloned());
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate the difference from another PatternRules in place.
|
||||
/// Corresponding to Go's `Rules.DifferenceInPlace`.
|
||||
/// The result contains only the patterns and TargetIDs that are in `self` but not in `other`.
|
||||
/// # Arguments
|
||||
/// * `other` - The PatternRules to compare against.
|
||||
pub fn difference_in_place(&mut self, other: &Self) {
|
||||
self.rules.retain(|pattern, self_targets| {
|
||||
if let Some(other_targets) = other.rules.get(pattern) {
|
||||
// Remove other_targets from self_targets
|
||||
self_targets.retain(|tid| !other_targets.contains(tid));
|
||||
}
|
||||
!self_targets.is_empty()
|
||||
});
|
||||
}
|
||||
|
||||
/// Remove a pattern and its associated TargetID set from the PatternRules.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `pattern` - The pattern to remove.
|
||||
pub fn remove_pattern(&mut self, pattern: &str) -> bool {
|
||||
self.rules.remove(pattern).is_some()
|
||||
}
|
||||
|
||||
/// Determine whether the current PatternRules contains the specified TargetID (referenced by any pattern).
|
||||
///
|
||||
/// # Parameters
|
||||
/// * `target_id` - The TargetID to check for existence within the PatternRules
|
||||
///
|
||||
/// # Returns
|
||||
/// * `true` if the TargetID exists in any of the patterns; `false` otherwise.
|
||||
pub fn contains_target_id(&self, target_id: &TargetID) -> bool {
|
||||
self.rules.values().any(|set| set.contains(target_id))
|
||||
}
|
||||
|
||||
/// Expose the internal rules for use in scenarios such as BucketNotificationConfig::validate.
|
||||
///
|
||||
/// # Returns
|
||||
/// A reference to the internal HashMap of patterns to TargetIdSets.
|
||||
pub fn inner(&self) -> &HashMap<String, TargetIdSet> {
|
||||
&self.rules
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,8 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::pattern_rules::PatternRules;
|
||||
use super::target_id_set::TargetIdSet;
|
||||
use crate::rules::{PatternRules, TargetIdSet};
|
||||
use hashbrown::HashMap;
|
||||
use rustfs_targets::EventName;
|
||||
use rustfs_targets::arn::TargetID;
|
||||
@@ -31,6 +30,9 @@ pub struct RulesMap {
|
||||
|
||||
impl RulesMap {
|
||||
/// Create a new, empty RulesMap.
|
||||
///
|
||||
/// # Returns
|
||||
/// A new instance of RulesMap with an empty map and a total_events_mask set to 0.
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
@@ -67,12 +69,12 @@ impl RulesMap {
|
||||
|
||||
/// Merge another RulesMap.
|
||||
/// `RulesMap.Add(rulesMap2 RulesMap) corresponding to Go
|
||||
///
|
||||
/// # Parameters
|
||||
/// * `other_map` - The other RulesMap to be merged into the current one.
|
||||
pub fn add_map(&mut self, other_map: &Self) {
|
||||
for (event_name, other_pattern_rules) in &other_map.map {
|
||||
let self_pattern_rules = self.map.entry(*event_name).or_default();
|
||||
// PatternRules::union Returns the new PatternRules, we need to modify the existing ones
|
||||
let merged_rules = self_pattern_rules.union(other_pattern_rules);
|
||||
*self_pattern_rules = merged_rules;
|
||||
self.map.entry(*event_name).or_default().union_in_place(other_pattern_rules);
|
||||
}
|
||||
// Directly merge two masks.
|
||||
self.total_events_mask |= other_map.total_events_mask;
|
||||
@@ -81,11 +83,14 @@ impl RulesMap {
|
||||
/// Remove another rule defined in the RulesMap from the current RulesMap.
|
||||
///
|
||||
/// After the rule is removed, `total_events_mask` is recalculated to ensure its accuracy.
|
||||
///
|
||||
/// # Parameters
|
||||
/// * `other_map` - The other RulesMap containing rules to be removed from the current one.
|
||||
pub fn remove_map(&mut self, other_map: &Self) {
|
||||
let mut events_to_remove = Vec::new();
|
||||
for (event_name, self_pattern_rules) in &mut self.map {
|
||||
if let Some(other_pattern_rules) = other_map.map.get(event_name) {
|
||||
*self_pattern_rules = self_pattern_rules.difference(other_pattern_rules);
|
||||
self_pattern_rules.difference_in_place(other_pattern_rules);
|
||||
if self_pattern_rules.is_empty() {
|
||||
events_to_remove.push(*event_name);
|
||||
}
|
||||
@@ -102,6 +107,9 @@ impl RulesMap {
|
||||
///
|
||||
/// This method uses a bitmask for a quick check of O(1) complexity.
|
||||
/// `event_name` can be a compound type, such as `ObjectCreatedAll`.
|
||||
///
|
||||
/// # Parameters
|
||||
/// * `event_name` - The event name to check for subscribers.
|
||||
pub fn has_subscriber(&self, event_name: &EventName) -> bool {
|
||||
// event_name.mask() will handle compound events correctly
|
||||
(self.total_events_mask & event_name.mask()) != 0
|
||||
@@ -112,39 +120,54 @@ impl RulesMap {
|
||||
/// # Notice
|
||||
/// The `event_name` parameter should be a specific, non-compound event type.
|
||||
/// Because this is taken from the `Event` object that actually occurs.
|
||||
///
|
||||
/// # Parameters
|
||||
/// * `event_name` - The specific event name to match against.
|
||||
/// * `object_key` - The object key to match against the patterns in the rules.
|
||||
///
|
||||
/// # Returns
|
||||
/// * A set of TargetIDs that match the given event and object key.
|
||||
pub fn match_rules(&self, event_name: EventName, object_key: &str) -> TargetIdSet {
|
||||
// Use bitmask to quickly determine whether there is a matching rule
|
||||
if (self.total_events_mask & event_name.mask()) == 0 {
|
||||
return TargetIdSet::new(); // No matching rules
|
||||
}
|
||||
|
||||
// First try to directly match the event name
|
||||
if let Some(pattern_rules) = self.map.get(&event_name) {
|
||||
let targets = pattern_rules.match_targets(object_key);
|
||||
if !targets.is_empty() {
|
||||
return targets;
|
||||
}
|
||||
}
|
||||
// Go's RulesMap[eventName] is directly retrieved, and if it does not exist, it is empty Rules.
|
||||
// Rust's HashMap::get returns Option. If the event name does not exist, there is no rule.
|
||||
// Compound events (such as ObjectCreatedAll) have been expanded as a single event when add_rule_config.
|
||||
// Therefore, a single event name should be used when querying.
|
||||
// If event_name itself is a single type, look it up directly.
|
||||
// If event_name is a compound type, Go's logic is expanded when added.
|
||||
// Here match_rules should receive events that may already be single.
|
||||
// If the caller passes in a compound event, it should expand itself or handle this function first.
|
||||
// Assume that event_name is already a specific event that can be used for searching.
|
||||
// In Go, RulesMap[eventName] returns empty rules if the key doesn't exist.
|
||||
// Rust's HashMap::get returns Option, so missing key means no rules.
|
||||
// Compound events like ObjectCreatedAll are expanded into specific events during add_rule_config.
|
||||
// Thus, queries should use specific event names.
|
||||
// If event_name is compound, expansion happens at addition time.
|
||||
// match_rules assumes event_name is already a specific event for lookup.
|
||||
// Callers should expand compound events before calling this method.
|
||||
self.map
|
||||
.get(&event_name)
|
||||
.map_or_else(TargetIdSet::new, |pr| pr.match_targets(object_key))
|
||||
}
|
||||
|
||||
/// Check if RulesMap is empty.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `true` if there are no rules in the map; `false` otherwise
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.map.is_empty()
|
||||
}
|
||||
|
||||
/// Determine whether the current RulesMap contains the specified TargetID (referenced by any event / pattern).
|
||||
///
|
||||
/// # Parameters
|
||||
/// * `target_id` - The TargetID to check for existence within the RulesMap
|
||||
///
|
||||
/// # Returns
|
||||
/// * `true` if the TargetID exists in any of the PatternRules; `false` otherwise.
|
||||
pub fn contains_target_id(&self, target_id: &TargetID) -> bool {
|
||||
self.map.values().any(|pr| pr.contains_target_id(target_id))
|
||||
}
|
||||
|
||||
/// Returns a clone of internal rules for use in scenarios such as BucketNotificationConfig::validate.
|
||||
///
|
||||
/// # Returns
|
||||
/// A reference to the internal HashMap of EventName to PatternRules.
|
||||
pub fn inner(&self) -> &HashMap<EventName, PatternRules> {
|
||||
&self.map
|
||||
}
|
||||
@@ -160,18 +183,32 @@ impl RulesMap {
|
||||
}
|
||||
|
||||
/// Remove rules and optimize performance
|
||||
///
|
||||
/// # Parameters
|
||||
/// * `event_name` - The EventName from which to remove the rule.
|
||||
/// * `pattern` - The pattern of the rule to be removed.
|
||||
#[allow(dead_code)]
|
||||
pub fn remove_rule(&mut self, event_name: &EventName, pattern: &str) {
|
||||
let mut remove_event = false;
|
||||
|
||||
if let Some(pattern_rules) = self.map.get_mut(event_name) {
|
||||
pattern_rules.rules.remove(pattern);
|
||||
pattern_rules.remove_pattern(pattern);
|
||||
if pattern_rules.is_empty() {
|
||||
self.map.remove(event_name);
|
||||
remove_event = true;
|
||||
}
|
||||
}
|
||||
|
||||
if remove_event {
|
||||
self.map.remove(event_name);
|
||||
}
|
||||
|
||||
self.recalculate_mask(); // Delay calculation mask
|
||||
}
|
||||
|
||||
/// Batch Delete Rules
|
||||
/// Batch Delete Rules and Optimize Performance
|
||||
///
|
||||
/// # Parameters
|
||||
/// * `event_names` - A slice of EventNames to be removed.
|
||||
#[allow(dead_code)]
|
||||
pub fn remove_rules(&mut self, event_names: &[EventName]) {
|
||||
for event_name in event_names {
|
||||
@@ -181,9 +218,27 @@ impl RulesMap {
|
||||
}
|
||||
|
||||
/// Update rules and optimize performance
|
||||
///
|
||||
/// # Parameters
|
||||
/// * `event_name` - The EventName to update.
|
||||
/// * `pattern` - The pattern of the rule to be updated.
|
||||
/// * `target_id` - The TargetID to be added.
|
||||
#[allow(dead_code)]
|
||||
pub fn update_rule(&mut self, event_name: EventName, pattern: String, target_id: TargetID) {
|
||||
self.map.entry(event_name).or_default().add(pattern, target_id);
|
||||
self.total_events_mask |= event_name.mask(); // Update only the relevant bitmask
|
||||
}
|
||||
|
||||
/// Iterate all EventName keys contained in this RulesMap.
|
||||
///
|
||||
/// Used by snapshot compilation to compute bucket event_mask.
|
||||
///
|
||||
/// # Returns
|
||||
/// An iterator over all EventName keys in the RulesMap.
|
||||
#[inline]
|
||||
pub fn iter_events(&self) -> impl Iterator<Item = EventName> + '_ {
|
||||
// `inner()` is already used by config.rs, so we reuse it here.
|
||||
// If the key type is `EventName`, `.copied()` is the cheapest way to return values.
|
||||
self.inner().keys().copied()
|
||||
}
|
||||
}
|
||||
|
||||
131
crates/notify/src/rules/subscriber_index.rs
Normal file
131
crates/notify/src/rules/subscriber_index.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::rules::{BucketRulesSnapshot, BucketSnapshotRef, DynRulesContainer};
|
||||
use arc_swap::ArcSwap;
|
||||
use rustfs_targets::EventName;
|
||||
use starshard::ShardedHashMap;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// A global bucket -> snapshot index.
|
||||
///
|
||||
/// Read path: lock-free load (ArcSwap)
|
||||
/// Write path: atomic replacement after building a new snapshot
|
||||
pub struct SubscriberIndex {
|
||||
// Use starshard for sharding to reduce lock competition when the number of buckets is large
|
||||
inner: ShardedHashMap<String, Arc<ArcSwap<BucketRulesSnapshot<DynRulesContainer>>>>,
|
||||
// Cache an "empty rule container" for empty snapshots (avoids building every time)
|
||||
empty_rules: Arc<DynRulesContainer>,
|
||||
}
|
||||
|
||||
/// Avoid deriving fields that do not support Debug
|
||||
impl fmt::Debug for SubscriberIndex {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("SubscriberIndex").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl SubscriberIndex {
|
||||
/// Create a new SubscriberIndex.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `empty_rules` - An Arc to an empty rules container used for empty snapshots
|
||||
///
|
||||
/// Returns a new instance of SubscriberIndex.
|
||||
pub fn new(empty_rules: Arc<DynRulesContainer>) -> Self {
|
||||
Self {
|
||||
inner: ShardedHashMap::new(64),
|
||||
empty_rules,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current snapshot of a bucket.
|
||||
/// If it does not exist, return empty snapshot.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `bucket` - The name of the bucket to load.
|
||||
///
|
||||
/// Returns the snapshot reference for the specified bucket.
|
||||
pub fn load_snapshot(&self, bucket: &str) -> BucketSnapshotRef {
|
||||
match self.inner.get(&bucket.to_string()) {
|
||||
Some(cell) => cell.load_full(),
|
||||
None => Arc::new(BucketRulesSnapshot::empty(self.empty_rules.clone())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Quickly determine whether the bucket has a subscription to an event.
|
||||
/// This judgment can be consistent with subsequent rule matching when reading the same snapshot.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `bucket` - The name of the bucket to check.
|
||||
/// * `event` - The event name to check for subscriptions.
|
||||
///
|
||||
/// Returns `true` if there are subscribers for the event, `false` otherwise.
|
||||
#[inline]
|
||||
pub fn has_subscriber(&self, bucket: &str, event: &EventName) -> bool {
|
||||
let snap = self.load_snapshot(bucket);
|
||||
if snap.event_mask == 0 {
|
||||
return false;
|
||||
}
|
||||
snap.has_event(event)
|
||||
}
|
||||
|
||||
/// Atomically update a bucket's snapshot (whole package replacement).
|
||||
///
|
||||
/// - The caller first builds the complete `BucketRulesSnapshot` (including event\_mask and rules).
|
||||
/// - This method ensures that the read path will not observe intermediate states.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `bucket` - The name of the bucket to update.
|
||||
/// * `new_snapshot` - The new snapshot to store for the bucket.
|
||||
pub fn store_snapshot(&self, bucket: &str, new_snapshot: BucketRulesSnapshot<DynRulesContainer>) {
|
||||
let key = bucket.to_string();
|
||||
|
||||
let cell = self.inner.get(&key).unwrap_or_else(|| {
|
||||
// Insert a default cell (empty snapshot)
|
||||
let init = Arc::new(ArcSwap::from_pointee(BucketRulesSnapshot::empty(self.empty_rules.clone())));
|
||||
self.inner.insert(key.clone(), init.clone());
|
||||
init
|
||||
});
|
||||
|
||||
cell.store(Arc::new(new_snapshot));
|
||||
}
|
||||
|
||||
/// Delete the bucket's subscription view (make it empty).
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `bucket` - The name of the bucket to clear.
|
||||
pub fn clear_bucket(&self, bucket: &str) {
|
||||
if let Some(cell) = self.inner.get(&bucket.to_string()) {
|
||||
cell.store(Arc::new(BucketRulesSnapshot::empty(self.empty_rules.clone())));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SubscriberIndex {
|
||||
fn default() -> Self {
|
||||
// An available empty rule container is required; here it is implemented using minimal empty
|
||||
#[derive(Debug)]
|
||||
struct EmptyRules;
|
||||
impl crate::rules::subscriber_snapshot::RulesContainer for EmptyRules {
|
||||
type Rule = dyn crate::rules::subscriber_snapshot::RuleEvents;
|
||||
fn iter_rules<'a>(&'a self) -> Box<dyn Iterator<Item = &'a Self::Rule> + 'a> {
|
||||
Box::new(std::iter::empty())
|
||||
}
|
||||
}
|
||||
|
||||
Self::new(Arc::new(EmptyRules) as Arc<DynRulesContainer>)
|
||||
}
|
||||
}
|
||||
117
crates/notify/src/rules/subscriber_snapshot.rs
Normal file
117
crates/notify/src/rules/subscriber_snapshot.rs
Normal file
@@ -0,0 +1,117 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_targets::EventName;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Let the rules structure provide "what events it is subscribed to".
|
||||
/// This way BucketRulesSnapshot does not need to know the internal shape of rules.
|
||||
pub trait RuleEvents {
|
||||
fn subscribed_events(&self) -> &[EventName];
|
||||
}
|
||||
|
||||
/// Let the rules container provide the ability to iterate over all rules (abstracting only to the minimum necessary).
|
||||
pub trait RulesContainer {
|
||||
type Rule: RuleEvents + ?Sized;
|
||||
fn iter_rules<'a>(&'a self) -> Box<dyn Iterator<Item = &'a Self::Rule> + 'a>;
|
||||
|
||||
/// Fast empty judgment for snapshots (fix missing `rules.is_empty()`)
|
||||
fn is_empty(&self) -> bool {
|
||||
self.iter_rules().next().is_none()
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a bucket's notification subscription view snapshot (immutable).
|
||||
///
|
||||
/// - `event_mask`: Quickly determine whether there is a subscription to a certain type of event (bitset/flags).
|
||||
/// - `rules`: precise rule mapping (prefix/suffix/pattern -> targets).
|
||||
///
|
||||
/// The read path only reads this snapshot to ensure consistency.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BucketRulesSnapshot<R>
|
||||
where
|
||||
R: RulesContainer + ?Sized,
|
||||
{
|
||||
pub event_mask: u64,
|
||||
pub rules: Arc<R>,
|
||||
}
|
||||
|
||||
impl<R> BucketRulesSnapshot<R>
|
||||
where
|
||||
R: RulesContainer + ?Sized,
|
||||
{
|
||||
/// Create an empty snapshot with no subscribed events and no rules.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `rules` - An Arc to a rules container (can be an empty container).
|
||||
///
|
||||
/// # Returns
|
||||
/// An instance of `BucketRulesSnapshot` with an empty event mask.
|
||||
#[inline]
|
||||
pub fn empty(rules: Arc<R>) -> Self {
|
||||
Self { event_mask: 0, rules }
|
||||
}
|
||||
|
||||
/// Check if the snapshot has any subscribers for the specified event.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `event` - The event name to check for subscriptions.
|
||||
///
|
||||
/// # Returns
|
||||
/// `true` if there are subscribers for the event, `false` otherwise.
|
||||
#[inline]
|
||||
pub fn has_event(&self, event: &EventName) -> bool {
|
||||
(self.event_mask & event.mask()) != 0
|
||||
}
|
||||
|
||||
/// Check if the snapshot is empty (no subscribed events or rules).
|
||||
///
|
||||
/// # Returns
|
||||
/// `true` if the snapshot is empty, `false` otherwise.
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.event_mask == 0 || self.rules.is_empty()
|
||||
}
|
||||
|
||||
/// [debug] Assert that `event_mask` is consistent with the event declared in `rules`.
|
||||
///
|
||||
/// Constraints:
|
||||
/// - only runs in debug builds (release incurs no cost).
|
||||
/// - If the rule contains compound events (\*All / Everything), rely on `EventName::mask()` to automatically expand.
|
||||
#[inline]
|
||||
pub fn debug_assert_mask_consistent(&self) {
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let mut recomputed = 0u64;
|
||||
for rule in self.rules.iter_rules() {
|
||||
for ev in rule.subscribed_events() {
|
||||
recomputed |= ev.mask();
|
||||
}
|
||||
}
|
||||
|
||||
debug_assert!(
|
||||
recomputed == self.event_mask,
|
||||
"BucketRulesSnapshot.event_mask inconsistent: stored={:#x}, recomputed={:#x}",
|
||||
self.event_mask,
|
||||
recomputed
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Unify trait-object snapshot types (fix Sized / missing generic arguments)
|
||||
pub type DynRulesContainer = dyn RulesContainer<Rule = dyn RuleEvents> + Send + Sync;
|
||||
|
||||
/// Expose Arc form to facilitate sharing.
|
||||
pub type BucketSnapshotRef = Arc<BucketRulesSnapshot<DynRulesContainer>>;
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::pattern;
|
||||
use crate::rules::pattern;
|
||||
use hashbrown::HashSet;
|
||||
use rustfs_targets::EventName;
|
||||
use rustfs_targets::arn::{ARN, ArnError, TargetIDError};
|
||||
|
||||
@@ -13,18 +13,23 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{Event, integration::NotificationMetrics};
|
||||
use rustfs_targets::StoreError;
|
||||
use rustfs_targets::Target;
|
||||
use rustfs_targets::TargetError;
|
||||
use rustfs_targets::store::{Key, Store};
|
||||
use rustfs_targets::target::EntityTarget;
|
||||
use rustfs_targets::{
|
||||
StoreError, Target, TargetError,
|
||||
store::{Key, Store},
|
||||
target::EntityTarget,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::{Semaphore, mpsc};
|
||||
use tokio::time::sleep;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Streams events from the store to the target
|
||||
/// Streams events from the store to the target with retry logic
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `store`: The event store
|
||||
/// - `target`: The target to send events to
|
||||
/// - `cancel_rx`: Receiver to listen for cancellation signals
|
||||
pub async fn stream_events(
|
||||
store: &mut (dyn Store<Event, Error = StoreError, Key = Key> + Send),
|
||||
target: &dyn Target<Event>,
|
||||
@@ -67,6 +72,7 @@ pub async fn stream_events(
|
||||
match target.send_from_store(key.clone()).await {
|
||||
Ok(_) => {
|
||||
info!("Successfully sent event for target: {}", target.name());
|
||||
// send_from_store deletes the event from store on success
|
||||
success = true;
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -104,6 +110,13 @@ pub async fn stream_events(
|
||||
}
|
||||
|
||||
/// Starts the event streaming process for a target
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `store`: The event store
|
||||
/// - `target`: The target to send events to
|
||||
///
|
||||
/// # Returns
|
||||
/// A sender to signal cancellation of the event stream
|
||||
pub fn start_event_stream(
|
||||
mut store: Box<dyn Store<Event, Error = StoreError, Key = Key> + Send>,
|
||||
target: Arc<dyn Target<Event> + Send + Sync>,
|
||||
@@ -119,6 +132,15 @@ pub fn start_event_stream(
|
||||
}
|
||||
|
||||
/// Start event stream with batch processing
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `store`: The event store
|
||||
/// - `target`: The target to send events to clients
|
||||
/// - `metrics`: Metrics for monitoring
|
||||
/// - `semaphore`: Semaphore to limit concurrency
|
||||
///
|
||||
/// # Returns
|
||||
/// A sender to signal cancellation of the event stream
|
||||
pub fn start_event_stream_with_batching(
|
||||
mut store: Box<dyn Store<EntityTarget<Event>, Error = StoreError, Key = Key> + Send>,
|
||||
target: Arc<dyn Target<Event> + Send + Sync>,
|
||||
@@ -136,6 +158,16 @@ pub fn start_event_stream_with_batching(
|
||||
}
|
||||
|
||||
/// Event stream processing with batch processing
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `store`: The event store
|
||||
/// - `target`: The target to send events to clients
|
||||
/// - `cancel_rx`: Receiver to listen for cancellation signals
|
||||
/// - `metrics`: Metrics for monitoring
|
||||
/// - `semaphore`: Semaphore to limit concurrency
|
||||
///
|
||||
/// # Notes
|
||||
/// This function processes events in batches to improve efficiency.
|
||||
pub async fn stream_events_with_batching(
|
||||
store: &mut (dyn Store<EntityTarget<Event>, Error = StoreError, Key = Key> + Send),
|
||||
target: &dyn Target<Event>,
|
||||
@@ -231,7 +263,17 @@ pub async fn stream_events_with_batching(
|
||||
}
|
||||
}
|
||||
|
||||
/// Processing event batches
|
||||
/// Processing event batches for targets
|
||||
/// # Arguments
|
||||
/// - `batch`: The batch of events to process
|
||||
/// - `batch_keys`: The corresponding keys of the events in the batch
|
||||
/// - `target`: The target to send events to clients
|
||||
/// - `max_retries`: Maximum number of retries for sending an event
|
||||
/// - `base_delay`: Base delay duration for retries
|
||||
/// - `metrics`: Metrics for monitoring
|
||||
/// - `semaphore`: Semaphore to limit concurrency
|
||||
/// # Notes
|
||||
/// This function processes a batch of events, sending each event to the target with retry
|
||||
async fn process_batch(
|
||||
batch: &mut Vec<EntityTarget<Event>>,
|
||||
batch_keys: &mut Vec<Key>,
|
||||
@@ -262,6 +304,7 @@ async fn process_batch(
|
||||
|
||||
// Retry logic
|
||||
while retry_count < max_retries && !success {
|
||||
// After sending successfully, the event in the storage is deleted synchronously.
|
||||
match target.send_from_store(key.clone()).await {
|
||||
Ok(_) => {
|
||||
info!("Successfully sent event for target: {}, Key: {}", target.name(), key.to_string());
|
||||
|
||||
@@ -39,9 +39,9 @@ use rustfs_config::{
|
||||
ENV_OBS_LOG_DIRECTORY, ENV_OBS_LOG_FLUSH_MS, ENV_OBS_LOG_MESSAGE_CAPA, ENV_OBS_LOG_POOL_CAPA,
|
||||
},
|
||||
};
|
||||
use rustfs_utils::{get_env_u64, get_env_usize, get_local_ip_with_default};
|
||||
use rustfs_utils::{get_env_opt_str, get_env_u64, get_env_usize, get_local_ip_with_default};
|
||||
use smallvec::SmallVec;
|
||||
use std::{borrow::Cow, env, fs, io::IsTerminal, time::Duration};
|
||||
use std::{borrow::Cow, fs, io::IsTerminal, time::Duration};
|
||||
use tracing::info;
|
||||
use tracing_error::ErrorLayer;
|
||||
use tracing_opentelemetry::{MetricsLayer, OpenTelemetryLayer};
|
||||
@@ -574,8 +574,8 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> Result<OtelGuard, Telemetry
|
||||
}
|
||||
|
||||
// Rule 2: The user has explicitly customized the log directory (determined by whether ENV_OBS_LOG_DIRECTORY is set)
|
||||
let user_set_log_dir = env::var(ENV_OBS_LOG_DIRECTORY).is_ok();
|
||||
if user_set_log_dir {
|
||||
let user_set_log_dir = get_env_opt_str(ENV_OBS_LOG_DIRECTORY);
|
||||
if user_set_log_dir.filter(|d| !d.is_empty()).is_some() {
|
||||
return init_file_logging(config, logger_level, is_production);
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user