mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 09:40:32 +00:00
Compare commits
160 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a95e549430 | ||
|
|
00f3275603 | ||
|
|
359c9d2d26 | ||
|
|
3ce99939a3 | ||
|
|
02f809312b | ||
|
|
356dc7e0c2 | ||
|
|
e4ad86ada6 | ||
|
|
b95bee64b2 | ||
|
|
18fb920fa4 | ||
|
|
5f19eef945 | ||
|
|
40ad2a6ea9 | ||
|
|
e7a3129be4 | ||
|
|
b142563127 | ||
|
|
5660208e89 | ||
|
|
0b6f3302ce | ||
|
|
60103f0f72 | ||
|
|
ab752458ce | ||
|
|
1d6c8750e7 | ||
|
|
9c44f71a0a | ||
|
|
9c432fc963 | ||
|
|
f86761fae9 | ||
|
|
377ed507c5 | ||
|
|
e063306ac3 | ||
|
|
8009ad5692 | ||
|
|
fb89a16086 | ||
|
|
666c0a9a38 | ||
|
|
486a4b58e6 | ||
|
|
f5f6ea4a5c | ||
|
|
38c2d74d36 | ||
|
|
ffbcd3852f | ||
|
|
75b144b7d4 | ||
|
|
d06397cf4a | ||
|
|
f995943832 | ||
|
|
de4a3fa766 | ||
|
|
4d0045ff18 | ||
|
|
d96e04a579 | ||
|
|
cc916926ff | ||
|
|
134e7e237c | ||
|
|
cf53a9d84a | ||
|
|
8d7cd4cb1b | ||
|
|
61b3100260 | ||
|
|
b19e8070a2 | ||
|
|
b8aa8214e2 | ||
|
|
3c14947878 | ||
|
|
2924b4e463 | ||
|
|
b4ba62fa33 | ||
|
|
a5b3522880 | ||
|
|
056a0ee62b | ||
|
|
4603ece708 | ||
|
|
eb33e82b56 | ||
|
|
c7e2b4d8e7 | ||
|
|
71c59d1187 | ||
|
|
e3a0a07495 | ||
|
|
136db7e0c9 | ||
|
|
2e3c5f695a | ||
|
|
fe9609fd17 | ||
|
|
f2d79b485e | ||
|
|
3d6681c9e5 | ||
|
|
07a26fadad | ||
|
|
a083fca17a | ||
|
|
89c3ae77a4 | ||
|
|
82a6e78845 | ||
|
|
7e75c9b1f5 | ||
|
|
8bdff3fbcb | ||
|
|
65d32e693f | ||
|
|
1ff28b3157 | ||
|
|
2186f46ea3 | ||
|
|
add6453aea | ||
|
|
4418c882ad | ||
|
|
00c607b5ce | ||
|
|
79585f98e0 | ||
|
|
2a3517f1d5 | ||
|
|
3942e07487 | ||
|
|
04811c0006 | ||
|
|
73c15d6be1 | ||
|
|
af5c0b13ef | ||
|
|
f17990f746 | ||
|
|
80cfb4feab | ||
|
|
08f1a31f3f | ||
|
|
1c51e204ab | ||
|
|
958f054123 | ||
|
|
3e2252e4bb | ||
|
|
f3a1431fa5 | ||
|
|
3bd96bcf10 | ||
|
|
20ea591049 | ||
|
|
cc31e88c91 | ||
|
|
b5535083de | ||
|
|
1e35edf079 | ||
|
|
8dd3e8b534 | ||
|
|
8e0aeb4fdc | ||
|
|
abe8a50b5a | ||
|
|
61f4d307b5 | ||
|
|
3eafeb0ff0 | ||
|
|
4abfc9f554 | ||
|
|
1057953052 | ||
|
|
889c67f359 | ||
|
|
1d111464f9 | ||
|
|
a0b2f5a232 | ||
|
|
46557cddd1 | ||
|
|
443947e1ac | ||
|
|
8821fcc1e7 | ||
|
|
17828ec2a8 | ||
|
|
94d5b1c1e4 | ||
|
|
0bca1fbd56 | ||
|
|
52c2d15a4b | ||
|
|
352035a06f | ||
|
|
fe4fabb195 | ||
|
|
07c5e7997a | ||
|
|
0007b541cd | ||
|
|
0f2e4d124c | ||
|
|
2e4ce6921b | ||
|
|
7178a94792 | ||
|
|
e8fe9731fd | ||
|
|
3ba415740e | ||
|
|
aeccd14d99 | ||
|
|
89a155a35d | ||
|
|
67095c05f9 | ||
|
|
1229fddb5d | ||
|
|
08be8f5472 | ||
|
|
0bf25fdefa | ||
|
|
9e2fa148ee | ||
|
|
cb3e496b17 | ||
|
|
997f54e700 | ||
|
|
1a4e95e940 | ||
|
|
a3006ab407 | ||
|
|
e197486c8c | ||
|
|
0da943a6a4 | ||
|
|
fba201df3d | ||
|
|
ccbab3232b | ||
|
|
421f66ea18 | ||
|
|
ede2fa9d0b | ||
|
|
978845b555 | ||
|
|
53c126d678 | ||
|
|
9f12a7678c | ||
|
|
2c86fe30ec | ||
|
|
ac0c34e734 | ||
|
|
ae46ea4bd3 | ||
|
|
8b3d4ea59b | ||
|
|
ef261deef6 | ||
|
|
20961d7c91 | ||
|
|
8de8172833 | ||
|
|
7c98c62d60 | ||
|
|
15c75b9d36 | ||
|
|
af650716da | ||
|
|
552e95e368 | ||
|
|
619cc69512 | ||
|
|
76d25d9a20 | ||
|
|
834025d9e3 | ||
|
|
e2d8e9e3d3 | ||
|
|
cd6a26bc3a | ||
|
|
5f256249f4 | ||
|
|
b10d80cbb6 | ||
|
|
7c6cbaf837 | ||
|
|
72930b1e30 | ||
|
|
6ca8945ca7 | ||
|
|
0d0edc22be | ||
|
|
030d3c9426 | ||
|
|
b8b905be86 | ||
|
|
ace58fea0d | ||
|
|
3a79242133 |
64
.config/make/build-docker-buildx-dev.mak
Normal file
64
.config/make/build-docker-buildx-dev.mak
Normal file
@@ -0,0 +1,64 @@
|
||||
## —— Development/Source builds using direct buildx commands ---------------------------------------
|
||||
|
||||
.PHONY: docker-dev
|
||||
docker-dev: ## Build dev multi-arch image (cannot load locally)
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local: ## Build dev single-arch image (local load)
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push: ## Build and push multi-arch development image # e.g (make docker-dev-push REGISTRY=xxx)
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 Pushing to registry: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start: ## Start development container environment
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop: ## Stop development container environment
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start ## Restart development container environment
|
||||
41
.config/make/build-docker-buildx-production.mak
Normal file
41
.config/make/build-docker-buildx-production.mak
Normal file
@@ -0,0 +1,41 @@
|
||||
## —— Production builds using docker buildx (for CI/CD and production) -----------------------------
|
||||
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx: ## Build production multi-arch image (no push)
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push: ## Build and push production multi-arch image
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version: ## Build and version production multi-arch image # e.g (make docker-buildx-version VERSION=v1.0.0)
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version: ## Build and version and push production multi-arch image # e.g (make docker-buildx-push-version VERSION=v1.0.0)
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local: ## Build production single-arch image locally
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
16
.config/make/build-docker-production.mak
Normal file
16
.config/make/build-docker-production.mak
Normal file
@@ -0,0 +1,16 @@
|
||||
## —— Single Architecture Docker Builds (Traditional) ----------------------------------------------
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production: ## Build single-arch production image
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source: ## Build single-arch source image
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
22
.config/make/build-docker.mak
Normal file
22
.config/make/build-docker.mak
Normal file
@@ -0,0 +1,22 @@
|
||||
## —— Docker-based build (alternative approach) ----------------------------------------------------
|
||||
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
|
||||
.PHONY: build-docker
|
||||
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build-docker: ## Build using Docker container # e.g (make build-docker BUILD_OS=ubuntu22.04)
|
||||
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
|
||||
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch: ## Check image architecture support
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
|
||||
docker buildx imagetools inspect $(IMAGE)
|
||||
55
.config/make/build.mak
Normal file
55
.config/make/build.mak
Normal file
@@ -0,0 +1,55 @@
|
||||
## —— Local Native Build using build-rustfs.sh script (Recommended) --------------------------------
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build RustFS binary (includes console by default)
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
.PHONY: build-dev
|
||||
build-dev: ## Build RustFS in Development mode
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl: ## Build x86_64 musl version
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu: ## Build x86_64 GNU version
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
.PHONY: build-musl-arm64
|
||||
build-musl-arm64: ## Build aarch64 musl version
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu-arm64
|
||||
build-gnu-arm64: ## Build aarch64 GNU version
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
|
||||
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all: core-deps ## Build binaries for all architectures
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
24
.config/make/check.mak
Normal file
24
.config/make/check.mak
Normal file
@@ -0,0 +1,24 @@
|
||||
## —— Check and Inform Dependencies ----------------------------------------------------------------
|
||||
|
||||
# Fatal check
|
||||
# Checks all required dependencies and exits with error if not found
|
||||
# (e.g., cargo, rustfmt)
|
||||
check-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "❌ '$*' is not installed."; \
|
||||
exit 1; \
|
||||
}
|
||||
|
||||
# Warning-only check
|
||||
# Checks for optional dependencies and issues a warning if not found
|
||||
# (e.g., cargo-nextest for enhanced testing)
|
||||
warn-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "⚠️ '$*' is not installed."; \
|
||||
}
|
||||
|
||||
# For checking dependencies use check-<dep-name> or warn-<dep-name>
|
||||
.PHONY: core-deps fmt-deps test-deps
|
||||
core-deps: check-cargo ## Check core dependencies
|
||||
fmt-deps: check-rustfmt ## Check lint and formatting dependencies
|
||||
test-deps: warn-cargo-nextest ## Check tests dependencies
|
||||
6
.config/make/deploy.mak
Normal file
6
.config/make/deploy.mak
Normal file
@@ -0,0 +1,6 @@
|
||||
## —— Deploy using dev_deploy.sh script ------------------------------------------------------------
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl ## Deploy to dev server
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
38
.config/make/help.mak
Normal file
38
.config/make/help.mak
Normal file
@@ -0,0 +1,38 @@
|
||||
## —— Help, Help Build and Help Docker -------------------------------------------------------------
|
||||
|
||||
|
||||
.PHONY: help
|
||||
help: ## Shows This Help Menu
|
||||
echo -e "$$HEADER"
|
||||
grep -E '(^[a-zA-Z0-9_-]+:.*?## .*$$)|(^## )' $(MAKEFILE_LIST) | sed 's/^[^:]*://g' | awk 'BEGIN {FS = ":.*?## | #"} ; {printf "${cyan}%-30s${reset} ${white}%s${reset} ${green}%s${reset}\n", $$1, $$2, $$3}' | sed -e 's/\[36m##/\n[32m##/'
|
||||
|
||||
.PHONY: help-build
|
||||
help-build: ## Shows RustFS build help
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
|
||||
@echo ""
|
||||
@echo "🔧 Direct usage of build-rustfs.sh script:"
|
||||
@echo ""
|
||||
@echo " ./build-rustfs.sh --help # View script help"
|
||||
@echo " ./build-rustfs.sh --no-console # Build without console resources"
|
||||
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
|
||||
@echo " ./build-rustfs.sh --dev # Development mode build"
|
||||
@echo " ./build-rustfs.sh --sign # Sign binary files"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
|
||||
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
|
||||
@echo ""
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker: ## Shows docker environment and suggestion help
|
||||
@echo ""
|
||||
@echo "📋 Environment Variables:"
|
||||
@echo " REGISTRY Image registry address (required for push)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub username"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub access token"
|
||||
@echo " GITHUB_TOKEN GitHub access token"
|
||||
@echo ""
|
||||
@echo "💡 Suggestions:"
|
||||
@echo " Production use: Use docker-buildx* commands (based on precompiled binaries)"
|
||||
@echo " Local development: Use docker-dev* commands (build from source)"
|
||||
@echo " Development environment: Use dev-env-* commands to manage dev containers"
|
||||
@echo ""
|
||||
22
.config/make/lint-fmt.mak
Normal file
22
.config/make/lint-fmt.mak
Normal file
@@ -0,0 +1,22 @@
|
||||
## —— Code quality and Formatting ------------------------------------------------------------------
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: core-deps fmt-deps ## Format code
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: core-deps fmt-deps ## Check code formatting
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
.PHONY: clippy-check
|
||||
clippy-check: core-deps ## Run clippy checks
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: compilation-check
|
||||
compilation-check: core-deps ## Run compilation check
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
11
.config/make/pre-commit.mak
Normal file
11
.config/make/pre-commit.mak
Normal file
@@ -0,0 +1,11 @@
|
||||
## —— Pre Commit Checks ----------------------------------------------------------------------------
|
||||
|
||||
.PHONY: setup-hooks
|
||||
setup-hooks: ## Set up git hooks
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy-check compilation-check test ## Run pre-commit checks
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
20
.config/make/tests.mak
Normal file
20
.config/make/tests.mak
Normal file
@@ -0,0 +1,20 @@
|
||||
## —— Tests and e2e test ---------------------------------------------------------------------------
|
||||
|
||||
.PHONY: test
|
||||
test: core-deps test-deps ## Run all tests
|
||||
@echo "🧪 Running tests..."
|
||||
@if command -v cargo-nextest >/dev/null 2>&1; then \
|
||||
cargo nextest run --all --exclude e2e_test; \
|
||||
else \
|
||||
echo "ℹ️ cargo-nextest not found; falling back to 'cargo test'"; \
|
||||
cargo test --workspace --exclude e2e_test -- --nocapture; \
|
||||
fi
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server: ## Run e2e-server tests
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
|
||||
.PHONY: probe-e2e
|
||||
probe-e2e: ## Probe e2e tests
|
||||
sh $(shell pwd)/scripts/probe.sh
|
||||
0
.docker/observability/prometheus-data/.gitignore
vendored
Normal file → Executable file
0
.docker/observability/prometheus-data/.gitignore
vendored
Normal file → Executable file
103
.github/s3tests/README.md
vendored
Normal file
103
.github/s3tests/README.md
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
# S3 Compatibility Tests Configuration
|
||||
|
||||
This directory contains the configuration for running [Ceph S3 compatibility tests](https://github.com/ceph/s3-tests) against RustFS.
|
||||
|
||||
## Configuration File
|
||||
|
||||
The `s3tests.conf` file is based on the official `s3tests.conf.SAMPLE` from the ceph/s3-tests repository. It uses environment variable substitution via `envsubst` to configure the endpoint and credentials.
|
||||
|
||||
### Key Configuration Points
|
||||
|
||||
- **Host**: Set via `${S3_HOST}` environment variable (e.g., `rustfs-single` for single-node, `lb` for multi-node)
|
||||
- **Port**: 9000 (standard RustFS port)
|
||||
- **Credentials**: Uses `${S3_ACCESS_KEY}` and `${S3_SECRET_KEY}` from workflow environment
|
||||
- **TLS**: Disabled (`is_secure = False`)
|
||||
|
||||
## Test Execution Strategy
|
||||
|
||||
### Network Connectivity Fix
|
||||
|
||||
Tests run inside a Docker container on the `rustfs-net` network, which allows them to resolve and connect to the RustFS container hostnames. This fixes the "Temporary failure in name resolution" error that occurred when tests ran on the GitHub runner host.
|
||||
|
||||
### Performance Optimizations
|
||||
|
||||
1. **Parallel Execution**: Uses `pytest-xdist` with `-n 4` to run tests in parallel across 4 workers
|
||||
2. **Load Distribution**: Uses `--dist=loadgroup` to distribute test groups across workers
|
||||
3. **Fail-Fast**: Uses `--maxfail=50` to stop after 50 failures, saving time on catastrophic failures
|
||||
|
||||
### Feature Filtering
|
||||
|
||||
Tests are filtered using pytest markers (`-m`) to skip features not yet supported by RustFS:
|
||||
|
||||
- `lifecycle` - Bucket lifecycle policies
|
||||
- `versioning` - Object versioning
|
||||
- `s3website` - Static website hosting
|
||||
- `bucket_logging` - Bucket logging
|
||||
- `encryption` / `sse_s3` - Server-side encryption
|
||||
- `cloud_transition` / `cloud_restore` - Cloud storage transitions
|
||||
- `lifecycle_expiration` / `lifecycle_transition` - Lifecycle operations
|
||||
|
||||
This filtering:
|
||||
1. Reduces test execution time significantly (from 1+ hour to ~10-15 minutes)
|
||||
2. Focuses on features RustFS currently supports
|
||||
3. Avoids hundreds of expected failures
|
||||
|
||||
## Running Tests Locally
|
||||
|
||||
### Single-Node Test
|
||||
|
||||
```bash
|
||||
# Set credentials
|
||||
export S3_ACCESS_KEY=rustfsadmin
|
||||
export S3_SECRET_KEY=rustfsadmin
|
||||
|
||||
# Start RustFS container
|
||||
docker run -d --name rustfs-single \
|
||||
--network rustfs-net \
|
||||
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
|
||||
-e RUSTFS_ACCESS_KEY=$S3_ACCESS_KEY \
|
||||
-e RUSTFS_SECRET_KEY=$S3_SECRET_KEY \
|
||||
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
|
||||
rustfs-ci
|
||||
|
||||
# Generate config
|
||||
export S3_HOST=rustfs-single
|
||||
envsubst < .github/s3tests/s3tests.conf > /tmp/s3tests.conf
|
||||
|
||||
# Run tests
|
||||
docker run --rm \
|
||||
--network rustfs-net \
|
||||
-v /tmp/s3tests.conf:/etc/s3tests.conf:ro \
|
||||
python:3.12-slim \
|
||||
bash -c '
|
||||
apt-get update -qq && apt-get install -y -qq git
|
||||
git clone --depth 1 https://github.com/ceph/s3-tests.git /s3-tests
|
||||
cd /s3-tests
|
||||
pip install -q -r requirements.txt pytest-xdist
|
||||
S3TEST_CONF=/etc/s3tests.conf pytest -v -n 4 \
|
||||
s3tests/functional/test_s3.py \
|
||||
-m "not lifecycle and not versioning and not s3website and not bucket_logging and not encryption and not sse_s3"
|
||||
'
|
||||
```
|
||||
|
||||
## Test Results Interpretation
|
||||
|
||||
- **PASSED**: Test succeeded, feature works correctly
|
||||
- **FAILED**: Test failed, indicates a potential bug or incompatibility
|
||||
- **ERROR**: Test setup failed (e.g., network issues, missing dependencies)
|
||||
- **SKIPPED**: Test skipped due to marker filtering
|
||||
|
||||
## Adding New Feature Support
|
||||
|
||||
When adding support for a new S3 feature to RustFS:
|
||||
|
||||
1. Remove the corresponding marker from the filter in `.github/workflows/e2e-s3tests.yml`
|
||||
2. Run the tests to verify compatibility
|
||||
3. Fix any failing tests
|
||||
4. Update this README to reflect the newly supported feature
|
||||
|
||||
## References
|
||||
|
||||
- [Ceph S3 Tests Repository](https://github.com/ceph/s3-tests)
|
||||
- [S3 API Compatibility](https://docs.aws.amazon.com/AmazonS3/latest/API/)
|
||||
- [pytest-xdist Documentation](https://pytest-xdist.readthedocs.io/)
|
||||
185
.github/s3tests/s3tests.conf
vendored
Normal file
185
.github/s3tests/s3tests.conf
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
# RustFS s3-tests configuration
|
||||
# Based on: https://github.com/ceph/s3-tests/blob/master/s3tests.conf.SAMPLE
|
||||
#
|
||||
# Usage:
|
||||
# Single-node: S3_HOST=rustfs-single envsubst < s3tests.conf > /tmp/s3tests.conf
|
||||
# Multi-node: S3_HOST=lb envsubst < s3tests.conf > /tmp/s3tests.conf
|
||||
|
||||
[DEFAULT]
|
||||
## this section is just used for host, port and bucket_prefix
|
||||
|
||||
# host set for RustFS - will be substituted via envsubst
|
||||
host = ${S3_HOST}
|
||||
|
||||
# port for RustFS
|
||||
port = 9000
|
||||
|
||||
## say "False" to disable TLS
|
||||
is_secure = False
|
||||
|
||||
## say "False" to disable SSL Verify
|
||||
ssl_verify = False
|
||||
|
||||
[fixtures]
|
||||
## all the buckets created will start with this prefix;
|
||||
## {random} will be filled with random characters to pad
|
||||
## the prefix to 30 characters long, and avoid collisions
|
||||
bucket prefix = rustfs-{random}-
|
||||
|
||||
# all the iam account resources (users, roles, etc) created
|
||||
# will start with this name prefix
|
||||
iam name prefix = s3-tests-
|
||||
|
||||
# all the iam account resources (users, roles, etc) created
|
||||
# will start with this path prefix
|
||||
iam path prefix = /s3-tests/
|
||||
|
||||
[s3 main]
|
||||
# main display_name
|
||||
display_name = RustFS Tester
|
||||
|
||||
# main user_id
|
||||
user_id = rustfsadmin
|
||||
|
||||
# main email
|
||||
email = tester@rustfs.local
|
||||
|
||||
# zonegroup api_name for bucket location
|
||||
api_name = default
|
||||
|
||||
## main AWS access key
|
||||
access_key = ${S3_ACCESS_KEY}
|
||||
|
||||
## main AWS secret key
|
||||
secret_key = ${S3_SECRET_KEY}
|
||||
|
||||
## replace with key id obtained when secret is created, or delete if KMS not tested
|
||||
#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
|
||||
|
||||
## Storage classes
|
||||
#storage_classes = "LUKEWARM, FROZEN"
|
||||
|
||||
## Lifecycle debug interval (default: 10)
|
||||
#lc_debug_interval = 20
|
||||
## Restore debug interval (default: 100)
|
||||
#rgw_restore_debug_interval = 60
|
||||
#rgw_restore_processor_period = 60
|
||||
|
||||
[s3 alt]
|
||||
# alt display_name
|
||||
display_name = RustFS Alt Tester
|
||||
|
||||
## alt email
|
||||
email = alt@rustfs.local
|
||||
|
||||
# alt user_id
|
||||
user_id = rustfsalt
|
||||
|
||||
# alt AWS access key (must be different from s3 main for many tests)
|
||||
access_key = ${S3_ALT_ACCESS_KEY}
|
||||
|
||||
# alt AWS secret key
|
||||
secret_key = ${S3_ALT_SECRET_KEY}
|
||||
|
||||
#[s3 cloud]
|
||||
## to run the testcases with "cloud_transition" for transition
|
||||
## and "cloud_restore" for restore attribute.
|
||||
## Note: the waiting time may have to tweaked depending on
|
||||
## the I/O latency to the cloud endpoint.
|
||||
|
||||
## host set for cloud endpoint
|
||||
# host = localhost
|
||||
|
||||
## port set for cloud endpoint
|
||||
# port = 8001
|
||||
|
||||
## say "False" to disable TLS
|
||||
# is_secure = False
|
||||
|
||||
## cloud endpoint credentials
|
||||
# access_key = 0555b35654ad1656d804
|
||||
# secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
|
||||
|
||||
## storage class configured as cloud tier on local rgw server
|
||||
# cloud_storage_class = CLOUDTIER
|
||||
|
||||
## Below are optional -
|
||||
|
||||
## Above configured cloud storage class config options
|
||||
# retain_head_object = false
|
||||
# allow_read_through = false # change it to enable read_through
|
||||
# read_through_restore_days = 2
|
||||
# target_storage_class = Target_SC
|
||||
# target_path = cloud-bucket
|
||||
|
||||
## another regular storage class to test multiple transition rules,
|
||||
# storage_class = S1
|
||||
|
||||
[s3 tenant]
|
||||
# tenant display_name
|
||||
display_name = RustFS Tenant Tester
|
||||
|
||||
# tenant user_id
|
||||
user_id = rustfstenant
|
||||
|
||||
# tenant AWS access key
|
||||
access_key = ${S3_ACCESS_KEY}
|
||||
|
||||
# tenant AWS secret key
|
||||
secret_key = ${S3_SECRET_KEY}
|
||||
|
||||
# tenant email
|
||||
email = tenant@rustfs.local
|
||||
|
||||
# tenant name
|
||||
tenant = testx
|
||||
|
||||
#following section needs to be added for all sts-tests
|
||||
[iam]
|
||||
#used for iam operations in sts-tests
|
||||
#email
|
||||
email = s3@rustfs.local
|
||||
|
||||
#user_id
|
||||
user_id = rustfsiam
|
||||
|
||||
#access_key
|
||||
access_key = ${S3_ACCESS_KEY}
|
||||
|
||||
#secret_key
|
||||
secret_key = ${S3_SECRET_KEY}
|
||||
|
||||
#display_name
|
||||
display_name = RustFS IAM User
|
||||
|
||||
# iam account root user for iam_account tests
|
||||
[iam root]
|
||||
access_key = ${S3_ACCESS_KEY}
|
||||
secret_key = ${S3_SECRET_KEY}
|
||||
user_id = RGW11111111111111111
|
||||
email = account1@rustfs.local
|
||||
|
||||
# iam account root user in a different account than [iam root]
|
||||
[iam alt root]
|
||||
access_key = ${S3_ACCESS_KEY}
|
||||
secret_key = ${S3_SECRET_KEY}
|
||||
user_id = RGW22222222222222222
|
||||
email = account2@rustfs.local
|
||||
|
||||
#following section needs to be added when you want to run Assume Role With Webidentity test
|
||||
[webidentity]
|
||||
#used for assume role with web identity test in sts-tests
|
||||
#all parameters will be obtained from ceph/qa/tasks/keycloak.py
|
||||
#token=<access_token>
|
||||
|
||||
#aud=<obtained after introspecting token>
|
||||
|
||||
#sub=<obtained after introspecting token>
|
||||
|
||||
#azp=<obtained after introspecting token>
|
||||
|
||||
#user_token=<access token for a user, with attribute Department=[Engineering, Marketing>]
|
||||
|
||||
#thumbprint=<obtained from x509 certificate>
|
||||
|
||||
#KC_REALM=<name of the realm>
|
||||
10
.github/workflows/audit.yml
vendored
10
.github/workflows/audit.yml
vendored
@@ -40,11 +40,11 @@ env:
|
||||
jobs:
|
||||
security-audit:
|
||||
name: Security Audit
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Install cargo-audit
|
||||
uses: taiki-e/install-action@v2
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
|
||||
- name: Upload audit results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: security-audit-results-${{ github.run_number }}
|
||||
path: audit-results.json
|
||||
@@ -65,14 +65,14 @@ jobs:
|
||||
|
||||
dependency-review:
|
||||
name: Dependency Review
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
if: github.event_name == 'pull_request'
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Dependency Review
|
||||
uses: actions/dependency-review-action@v4
|
||||
|
||||
36
.github/workflows/build.yml
vendored
36
.github/workflows/build.yml
vendored
@@ -83,7 +83,7 @@ jobs:
|
||||
# Build strategy check - determine build type based on trigger
|
||||
build-check:
|
||||
name: Build Strategy Check
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
build_type: ${{ steps.check.outputs.build_type }}
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -167,19 +167,19 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
# Linux builds
|
||||
- os: ubuntu-latest
|
||||
- os: ubicloud-standard-2
|
||||
target: x86_64-unknown-linux-musl
|
||||
cross: false
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
- os: ubicloud-standard-2
|
||||
target: aarch64-unknown-linux-musl
|
||||
cross: true
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
- os: ubicloud-standard-2
|
||||
target: x86_64-unknown-linux-gnu
|
||||
cross: false
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
- os: ubicloud-standard-2
|
||||
target: aarch64-unknown-linux-gnu
|
||||
cross: true
|
||||
platform: linux
|
||||
@@ -203,7 +203,7 @@ jobs:
|
||||
# platform: windows
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -442,7 +442,7 @@ jobs:
|
||||
echo "📊 Version: ${VERSION}"
|
||||
|
||||
- name: Upload to GitHub artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: ${{ steps.package.outputs.package_name }}
|
||||
path: "rustfs-*.zip"
|
||||
@@ -454,7 +454,7 @@ jobs:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
OSS_ENDPOINT: https://oss-accelerate.aliyuncs.com
|
||||
shell: bash
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
@@ -532,7 +532,7 @@ jobs:
|
||||
name: Build Summary
|
||||
needs: [ build-check, build-rustfs ]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
steps:
|
||||
- name: Build completion summary
|
||||
shell: bash
|
||||
@@ -584,7 +584,7 @@ jobs:
|
||||
name: Create GitHub Release
|
||||
needs: [ build-check, build-rustfs ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
permissions:
|
||||
contents: write
|
||||
outputs:
|
||||
@@ -592,7 +592,7 @@ jobs:
|
||||
release_url: ${{ steps.create.outputs.release_url }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -670,16 +670,16 @@ jobs:
|
||||
name: Upload Release Assets
|
||||
needs: [ build-check, build-rustfs, create-release ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
permissions:
|
||||
contents: write
|
||||
actions: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Download all build artifacts
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
path: ./artifacts
|
||||
pattern: rustfs-*
|
||||
@@ -751,7 +751,7 @@ jobs:
|
||||
name: Update Latest Version
|
||||
needs: [ build-check, upload-release-assets ]
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
steps:
|
||||
- name: Update latest.json
|
||||
env:
|
||||
@@ -801,12 +801,12 @@ jobs:
|
||||
name: Publish Release
|
||||
needs: [ build-check, create-release, upload-release-assets ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Update release notes and publish
|
||||
env:
|
||||
|
||||
41
.github/workflows/ci.yml
vendored
41
.github/workflows/ci.yml
vendored
@@ -4,7 +4,7 @@
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -62,17 +62,23 @@ on:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_BUILD_JOBS: 2
|
||||
|
||||
jobs:
|
||||
|
||||
skip-check:
|
||||
name: Skip Duplicate Actions
|
||||
permissions:
|
||||
actions: write
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
@@ -83,15 +89,13 @@ jobs:
|
||||
concurrent_skipping: "same_content_newer"
|
||||
cancel_others: true
|
||||
paths_ignore: '["*.md", "docs/**", "deploy/**"]'
|
||||
# Never skip release events and tag pushes
|
||||
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
|
||||
|
||||
|
||||
typos:
|
||||
name: Typos
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Typos check with custom config file
|
||||
uses: crate-ci/typos@master
|
||||
@@ -100,13 +104,11 @@ jobs:
|
||||
name: Test and Lint
|
||||
needs: skip-check
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-4
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Delete huge unnecessary tools folder
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -116,6 +118,9 @@ jobs:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
cache-save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
||||
- name: Install cargo-nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
@@ -131,11 +136,16 @@ jobs:
|
||||
name: End-to-End Tests
|
||||
needs: skip-check
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Clean up previous test run
|
||||
run: |
|
||||
rm -rf /tmp/rustfs
|
||||
rm -f /tmp/rustfs.log
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -150,12 +160,13 @@ jobs:
|
||||
with:
|
||||
tool: s3s-e2e
|
||||
git: https://github.com/Nugine/s3s.git
|
||||
rev: b7714bfaa17ddfa9b23ea01774a1e7bbdbfc2ca3
|
||||
rev: 9e41304ed549b89cfb03ede98e9c0d2ac7522051
|
||||
|
||||
- name: Build debug binary
|
||||
run: |
|
||||
touch rustfs/build.rs
|
||||
cargo build -p rustfs --bins
|
||||
# Limit concurrency to prevent OOM
|
||||
cargo build -p rustfs --bins --jobs 2
|
||||
|
||||
- name: Run end-to-end tests
|
||||
run: |
|
||||
@@ -164,7 +175,7 @@ jobs:
|
||||
|
||||
- name: Upload test logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: e2e-test-logs-${{ github.run_number }}
|
||||
path: /tmp/rustfs.log
|
||||
|
||||
34
.github/workflows/docker.yml
vendored
34
.github/workflows/docker.yml
vendored
@@ -72,7 +72,7 @@ jobs:
|
||||
# Check if we should build Docker images
|
||||
build-check:
|
||||
name: Docker Build Check
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
should_push: ${{ steps.check.outputs.should_push }}
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
create_latest: ${{ steps.check.outputs.create_latest }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# For workflow_run events, checkout the specific commit that triggered the workflow
|
||||
@@ -162,11 +162,11 @@ jobs:
|
||||
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
# TODO: 临时修改 - 当前允许 alpha 版本也创建 latest 标签
|
||||
# 等版本稳定后,需要移除下面这行,恢复原有逻辑(只有稳定版本才创建 latest)
|
||||
# TODO: Temporary change - currently allows alpha versions to also create latest tags
|
||||
# After the version is stable, you need to remove the following line and restore the original logic (latest is created only for stable versions)
|
||||
if [[ "$version" == *"alpha"* ]]; then
|
||||
create_latest=true
|
||||
echo "🧪 Building Docker image for prerelease: $version (临时允许创建 latest 标签)"
|
||||
echo "🧪 Building Docker image for prerelease: $version (temporarily allowing creation of latest tag)"
|
||||
else
|
||||
echo "🧪 Building Docker image for prerelease: $version"
|
||||
fi
|
||||
@@ -215,11 +215,11 @@ jobs:
|
||||
v*alpha*|v*beta*|v*rc*|*alpha*|*beta*|*rc*)
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
# TODO: 临时修改 - 当前允许 alpha 版本也创建 latest 标签
|
||||
# 等版本稳定后,需要移除下面的 if 块,恢复原有逻辑
|
||||
# TODO: Temporary change - currently allows alpha versions to also create latest tags
|
||||
# After the version is stable, you need to remove the if block below and restore the original logic.
|
||||
if [[ "$input_version" == *"alpha"* ]]; then
|
||||
create_latest=true
|
||||
echo "🧪 Building with prerelease version: $input_version (临时允许创建 latest 标签)"
|
||||
echo "🧪 Building with prerelease version: $input_version (temporarily allowing creation of latest tag)"
|
||||
else
|
||||
echo "🧪 Building with prerelease version: $input_version"
|
||||
fi
|
||||
@@ -264,11 +264,11 @@ jobs:
|
||||
name: Build Docker Images
|
||||
needs: build-check
|
||||
if: needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
@@ -330,9 +330,9 @@ jobs:
|
||||
|
||||
# Add channel tags for prereleases and latest for stable
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
# TODO: 临时修改 - 当前 alpha 版本也会创建 latest 标签
|
||||
# 等版本稳定后,这里的逻辑保持不变,但上游的 CREATE_LATEST 设置需要恢复
|
||||
# Stable release (以及临时的 alpha 版本)
|
||||
# TODO: Temporary change - the current alpha version will also create the latest tag
|
||||
# After the version is stabilized, the logic here remains unchanged, but the upstream CREATE_LATEST setting needs to be restored.
|
||||
# Stable release (and temporary alpha versions)
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest"
|
||||
elif [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Prerelease channel tags (alpha, beta, rc)
|
||||
@@ -404,7 +404,7 @@ jobs:
|
||||
name: Docker Build Summary
|
||||
needs: [ build-check, build-docker ]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
steps:
|
||||
- name: Docker build completion summary
|
||||
run: |
|
||||
@@ -429,10 +429,10 @@ jobs:
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease Docker image has been built with ${VERSION} tags"
|
||||
echo "⚠️ This is a prerelease image - use with caution"
|
||||
# TODO: 临时修改 - alpha 版本当前会创建 latest 标签
|
||||
# 等版本稳定后,需要恢复下面的提示信息
|
||||
# TODO: Temporary change - alpha versions currently create the latest tag
|
||||
# After the version is stable, you need to restore the following prompt information
|
||||
if [[ "$VERSION" == *"alpha"* ]] && [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
echo "🏷️ Latest tag has been created for alpha version (临时措施)"
|
||||
echo "🏷️ Latest tag has been created for alpha version (temporary measures)"
|
||||
else
|
||||
echo "🚫 Latest tag NOT created for prerelease"
|
||||
fi
|
||||
|
||||
422
.github/workflows/e2e-s3tests.yml
vendored
Normal file
422
.github/workflows/e2e-s3tests.yml
vendored
Normal file
@@ -0,0 +1,422 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: e2e-s3tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
test-mode:
|
||||
description: "Test mode to run"
|
||||
required: true
|
||||
type: choice
|
||||
default: "single"
|
||||
options:
|
||||
- single
|
||||
- multi
|
||||
xdist:
|
||||
description: "Enable pytest-xdist (parallel). '0' to disable."
|
||||
required: false
|
||||
default: "0"
|
||||
maxfail:
|
||||
description: "Stop after N failures (debug friendly)"
|
||||
required: false
|
||||
default: "1"
|
||||
markexpr:
|
||||
description: "pytest -m expression (feature filters)"
|
||||
required: false
|
||||
default: "not lifecycle and not versioning and not s3website and not bucket_logging and not encryption"
|
||||
|
||||
env:
|
||||
# main user
|
||||
S3_ACCESS_KEY: rustfsadmin
|
||||
S3_SECRET_KEY: rustfsadmin
|
||||
# alt user (must be different from main for many s3-tests)
|
||||
S3_ALT_ACCESS_KEY: rustfsalt
|
||||
S3_ALT_SECRET_KEY: rustfsalt
|
||||
|
||||
S3_REGION: us-east-1
|
||||
|
||||
RUST_LOG: info
|
||||
PLATFORM: linux/amd64
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
s3tests-single:
|
||||
if: github.event.inputs.test-mode == 'single'
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Enable buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build RustFS image (source, cached)
|
||||
run: |
|
||||
DOCKER_BUILDKIT=1 docker buildx build --load \
|
||||
--platform ${PLATFORM} \
|
||||
--cache-from type=gha \
|
||||
--cache-to type=gha,mode=max \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source .
|
||||
|
||||
- name: Create network
|
||||
run: docker network inspect rustfs-net >/dev/null 2>&1 || docker network create rustfs-net
|
||||
|
||||
- name: Remove existing rustfs-single (if any)
|
||||
run: docker rm -f rustfs-single >/dev/null 2>&1 || true
|
||||
|
||||
- name: Start single RustFS
|
||||
run: |
|
||||
docker run -d --name rustfs-single \
|
||||
--network rustfs-net \
|
||||
-p 9000:9000 \
|
||||
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
|
||||
-e RUSTFS_ACCESS_KEY=$S3_ACCESS_KEY \
|
||||
-e RUSTFS_SECRET_KEY=$S3_SECRET_KEY \
|
||||
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
|
||||
-v /tmp/rustfs-single:/data \
|
||||
rustfs-ci
|
||||
|
||||
- name: Wait for RustFS ready
|
||||
run: |
|
||||
for i in {1..60}; do
|
||||
if curl -sf http://127.0.0.1:9000/health >/dev/null 2>&1; then
|
||||
echo "RustFS is ready"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$(docker inspect -f '{{.State.Running}}' rustfs-single 2>/dev/null)" != "true" ]; then
|
||||
echo "RustFS container not running" >&2
|
||||
docker logs rustfs-single || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Health check timed out" >&2
|
||||
docker logs rustfs-single || true
|
||||
exit 1
|
||||
|
||||
- name: Generate s3tests config
|
||||
run: |
|
||||
export S3_HOST=127.0.0.1
|
||||
envsubst < .github/s3tests/s3tests.conf > s3tests.conf
|
||||
|
||||
- name: Provision s3-tests alt user (required by suite)
|
||||
run: |
|
||||
python3 -m pip install --user --upgrade pip awscurl
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
|
||||
# Admin API requires AWS SigV4 signing. awscurl is used by RustFS codebase as well.
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X PUT \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"secretKey":"'"${S3_ALT_SECRET_KEY}"'","status":"enabled","policy":"readwrite"}' \
|
||||
"http://127.0.0.1:9000/rustfs/admin/v3/add-user?accessKey=${S3_ALT_ACCESS_KEY}"
|
||||
|
||||
# Explicitly attach built-in policy via policy mapping.
|
||||
# s3-tests relies on alt client being able to ListBuckets during setup cleanup.
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X PUT \
|
||||
"http://127.0.0.1:9000/rustfs/admin/v3/set-user-or-group-policy?policyName=readwrite&userOrGroup=${S3_ALT_ACCESS_KEY}&isGroup=false"
|
||||
|
||||
# Sanity check: alt user can list buckets (should not be AccessDenied).
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ALT_ACCESS_KEY}" \
|
||||
--secret_key "${S3_ALT_SECRET_KEY}" \
|
||||
-X GET \
|
||||
"http://127.0.0.1:9000/" >/dev/null
|
||||
|
||||
- name: Prepare s3-tests
|
||||
run: |
|
||||
python3 -m pip install --user --upgrade pip tox
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
git clone --depth 1 https://github.com/ceph/s3-tests.git s3-tests
|
||||
|
||||
- name: Run ceph s3-tests (debug friendly)
|
||||
run: |
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
mkdir -p artifacts/s3tests-single
|
||||
|
||||
cd s3-tests
|
||||
|
||||
set -o pipefail
|
||||
|
||||
MAXFAIL="${{ github.event.inputs.maxfail }}"
|
||||
if [ -z "$MAXFAIL" ]; then MAXFAIL="1"; fi
|
||||
|
||||
MARKEXPR="${{ github.event.inputs.markexpr }}"
|
||||
if [ -z "$MARKEXPR" ]; then MARKEXPR="not lifecycle and not versioning and not s3website and not bucket_logging and not encryption"; fi
|
||||
|
||||
XDIST="${{ github.event.inputs.xdist }}"
|
||||
if [ -z "$XDIST" ]; then XDIST="0"; fi
|
||||
XDIST_ARGS=""
|
||||
if [ "$XDIST" != "0" ]; then
|
||||
# Add pytest-xdist to requirements.txt so tox installs it inside
|
||||
# its virtualenv. Installing outside tox does NOT work.
|
||||
echo "pytest-xdist" >> requirements.txt
|
||||
XDIST_ARGS="-n $XDIST --dist=loadgroup"
|
||||
fi
|
||||
|
||||
# Run tests from s3tests/functional (boto2+boto3 combined directory).
|
||||
S3TEST_CONF=${GITHUB_WORKSPACE}/s3tests.conf \
|
||||
tox -- \
|
||||
-vv -ra --showlocals --tb=long \
|
||||
--maxfail="$MAXFAIL" \
|
||||
--junitxml=${GITHUB_WORKSPACE}/artifacts/s3tests-single/junit.xml \
|
||||
$XDIST_ARGS \
|
||||
s3tests/functional/test_s3.py \
|
||||
-m "$MARKEXPR" \
|
||||
2>&1 | tee ${GITHUB_WORKSPACE}/artifacts/s3tests-single/pytest.log
|
||||
|
||||
- name: Collect RustFS logs
|
||||
if: always()
|
||||
run: |
|
||||
mkdir -p artifacts/rustfs-single
|
||||
docker logs rustfs-single > artifacts/rustfs-single/rustfs.log 2>&1 || true
|
||||
docker inspect rustfs-single > artifacts/rustfs-single/inspect.json || true
|
||||
|
||||
- name: Upload artifacts
|
||||
if: always() && env.ACT != 'true'
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: s3tests-single
|
||||
path: artifacts/**
|
||||
|
||||
s3tests-multi:
|
||||
if: github.event_name == 'workflow_dispatch' && github.event.inputs.test-mode == 'multi'
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 150
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Enable buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build RustFS image (source, cached)
|
||||
run: |
|
||||
DOCKER_BUILDKIT=1 docker buildx build --load \
|
||||
--platform ${PLATFORM} \
|
||||
--cache-from type=gha \
|
||||
--cache-to type=gha,mode=max \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source .
|
||||
|
||||
- name: Prepare cluster compose
|
||||
run: |
|
||||
cat > compose.yml <<'EOF'
|
||||
services:
|
||||
rustfs1:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs1
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
RUSTFS_ADDRESS: "0.0.0.0:9000"
|
||||
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
|
||||
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
|
||||
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
|
||||
volumes:
|
||||
- rustfs1-data:/data
|
||||
rustfs2:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs2
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
RUSTFS_ADDRESS: "0.0.0.0:9000"
|
||||
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
|
||||
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
|
||||
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
|
||||
volumes:
|
||||
- rustfs2-data:/data
|
||||
rustfs3:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs3
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
RUSTFS_ADDRESS: "0.0.0.0:9000"
|
||||
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
|
||||
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
|
||||
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
|
||||
volumes:
|
||||
- rustfs3-data:/data
|
||||
rustfs4:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs4
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
RUSTFS_ADDRESS: "0.0.0.0:9000"
|
||||
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
|
||||
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
|
||||
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
|
||||
volumes:
|
||||
- rustfs4-data:/data
|
||||
lb:
|
||||
image: haproxy:2.9
|
||||
hostname: lb
|
||||
networks: [rustfs-net]
|
||||
ports:
|
||||
- "9000:9000"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
networks:
|
||||
rustfs-net:
|
||||
name: rustfs-net
|
||||
volumes:
|
||||
rustfs1-data:
|
||||
rustfs2-data:
|
||||
rustfs3-data:
|
||||
rustfs4-data:
|
||||
EOF
|
||||
|
||||
cat > haproxy.cfg <<'EOF'
|
||||
defaults
|
||||
mode http
|
||||
timeout connect 5s
|
||||
timeout client 30s
|
||||
timeout server 30s
|
||||
|
||||
frontend fe_s3
|
||||
bind *:9000
|
||||
default_backend be_s3
|
||||
|
||||
backend be_s3
|
||||
balance roundrobin
|
||||
server s1 rustfs1:9000 check
|
||||
server s2 rustfs2:9000 check
|
||||
server s3 rustfs3:9000 check
|
||||
server s4 rustfs4:9000 check
|
||||
EOF
|
||||
|
||||
- name: Launch cluster
|
||||
run: docker compose -f compose.yml up -d
|
||||
|
||||
- name: Wait for LB ready
|
||||
run: |
|
||||
for i in {1..90}; do
|
||||
if curl -sf http://127.0.0.1:9000/health >/dev/null 2>&1; then
|
||||
echo "Load balancer is ready"
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "LB or backend not ready" >&2
|
||||
docker compose -f compose.yml logs --tail=200 || true
|
||||
exit 1
|
||||
|
||||
- name: Generate s3tests config
|
||||
run: |
|
||||
export S3_HOST=127.0.0.1
|
||||
envsubst < .github/s3tests/s3tests.conf > s3tests.conf
|
||||
|
||||
- name: Provision s3-tests alt user (required by suite)
|
||||
run: |
|
||||
python3 -m pip install --user --upgrade pip awscurl
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X PUT \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"secretKey":"'"${S3_ALT_SECRET_KEY}"'","status":"enabled","policy":"readwrite"}' \
|
||||
"http://127.0.0.1:9000/rustfs/admin/v3/add-user?accessKey=${S3_ALT_ACCESS_KEY}"
|
||||
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X PUT \
|
||||
"http://127.0.0.1:9000/rustfs/admin/v3/set-user-or-group-policy?policyName=readwrite&userOrGroup=${S3_ALT_ACCESS_KEY}&isGroup=false"
|
||||
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ALT_ACCESS_KEY}" \
|
||||
--secret_key "${S3_ALT_SECRET_KEY}" \
|
||||
-X GET \
|
||||
"http://127.0.0.1:9000/" >/dev/null
|
||||
|
||||
- name: Prepare s3-tests
|
||||
run: |
|
||||
python3 -m pip install --user --upgrade pip tox
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
git clone --depth 1 https://github.com/ceph/s3-tests.git s3-tests
|
||||
|
||||
- name: Run ceph s3-tests (multi, debug friendly)
|
||||
run: |
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
mkdir -p artifacts/s3tests-multi
|
||||
|
||||
cd s3-tests
|
||||
|
||||
set -o pipefail
|
||||
|
||||
MAXFAIL="${{ github.event.inputs.maxfail }}"
|
||||
if [ -z "$MAXFAIL" ]; then MAXFAIL="1"; fi
|
||||
|
||||
MARKEXPR="${{ github.event.inputs.markexpr }}"
|
||||
if [ -z "$MARKEXPR" ]; then MARKEXPR="not lifecycle and not versioning and not s3website and not bucket_logging and not encryption"; fi
|
||||
|
||||
XDIST="${{ github.event.inputs.xdist }}"
|
||||
if [ -z "$XDIST" ]; then XDIST="0"; fi
|
||||
XDIST_ARGS=""
|
||||
if [ "$XDIST" != "0" ]; then
|
||||
# Add pytest-xdist to requirements.txt so tox installs it inside
|
||||
# its virtualenv. Installing outside tox does NOT work.
|
||||
echo "pytest-xdist" >> requirements.txt
|
||||
XDIST_ARGS="-n $XDIST --dist=loadgroup"
|
||||
fi
|
||||
|
||||
# Run tests from s3tests/functional (boto2+boto3 combined directory).
|
||||
S3TEST_CONF=${GITHUB_WORKSPACE}/s3tests.conf \
|
||||
tox -- \
|
||||
-vv -ra --showlocals --tb=long \
|
||||
--maxfail="$MAXFAIL" \
|
||||
--junitxml=${GITHUB_WORKSPACE}/artifacts/s3tests-multi/junit.xml \
|
||||
$XDIST_ARGS \
|
||||
s3tests/functional/test_s3.py \
|
||||
-m "$MARKEXPR" \
|
||||
2>&1 | tee ${GITHUB_WORKSPACE}/artifacts/s3tests-multi/pytest.log
|
||||
|
||||
- name: Collect logs
|
||||
if: always()
|
||||
run: |
|
||||
mkdir -p artifacts/cluster
|
||||
docker compose -f compose.yml logs --no-color > artifacts/cluster/cluster.log 2>&1 || true
|
||||
|
||||
- name: Upload artifacts
|
||||
if: always() && env.ACT != 'true'
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: s3tests-multi
|
||||
path: artifacts/**
|
||||
95
.github/workflows/helm-package.yml
vendored
Normal file
95
.github/workflows/helm-package.yml
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Publish helm chart to artifacthub
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [ "Build and Release" ]
|
||||
types: [ completed ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
new_version: ${{ github.event.workflow_run.head_branch }}
|
||||
|
||||
jobs:
|
||||
build-helm-package:
|
||||
runs-on: ubicloud-standard-2
|
||||
# Only run on successful builds triggered by tag pushes (version format: x.y.z or x.y.z-suffix)
|
||||
if: |
|
||||
github.event.workflow_run.conclusion == 'success' &&
|
||||
github.event.workflow_run.event == 'push' &&
|
||||
contains(github.event.workflow_run.head_branch, '.')
|
||||
|
||||
steps:
|
||||
- name: Checkout helm chart repo
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Replace chart app version
|
||||
run: |
|
||||
set -e
|
||||
set -x
|
||||
old_version=$(grep "^appVersion:" helm/rustfs/Chart.yaml | awk '{print $2}')
|
||||
sed -i "s/$old_version/$new_version/g" helm/rustfs/Chart.yaml
|
||||
sed -i "/^image:/,/^[^ ]/ s/tag:.*/tag: "$new_version"/" helm/rustfs/values.yaml
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
- name: Package Helm Chart
|
||||
run: |
|
||||
cp helm/README.md helm/rustfs/
|
||||
package_version=$(echo $new_version | awk -F '-' '{print $2}' | awk -F '.' '{print $NF}')
|
||||
helm package ./helm/rustfs --destination helm/rustfs/ --version "0.0.$package_version"
|
||||
|
||||
- name: Upload helm package as artifact
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: helm-package
|
||||
path: helm/rustfs/*.tgz
|
||||
retention-days: 1
|
||||
|
||||
publish-helm-package:
|
||||
runs-on: ubicloud-standard-2
|
||||
needs: [ build-helm-package ]
|
||||
|
||||
steps:
|
||||
- name: Checkout helm package repo
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
repository: rustfs/helm
|
||||
token: ${{ secrets.RUSTFS_HELM_PACKAGE }}
|
||||
|
||||
- name: Download helm package
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: helm-package
|
||||
path: ./
|
||||
|
||||
- name: Set up helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
- name: Generate index
|
||||
run: helm repo index . --url https://charts.rustfs.com
|
||||
|
||||
- name: Push helm package and index file
|
||||
run: |
|
||||
git config --global user.name "${{ secrets.USERNAME }}"
|
||||
git config --global user.email "${{ secrets.EMAIL_ADDRESS }}"
|
||||
git status .
|
||||
git add .
|
||||
git commit -m "Update rustfs helm package with $new_version."
|
||||
git push origin main
|
||||
2
.github/workflows/issue-translator.yml
vendored
2
.github/workflows/issue-translator.yml
vendored
@@ -25,7 +25,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-4
|
||||
steps:
|
||||
- uses: usthe/issues-translate-action@v2.7
|
||||
with:
|
||||
|
||||
12
.github/workflows/performance.yml
vendored
12
.github/workflows/performance.yml
vendored
@@ -40,11 +40,11 @@ env:
|
||||
jobs:
|
||||
performance-profile:
|
||||
name: Performance Profiling
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -107,7 +107,7 @@ jobs:
|
||||
|
||||
- name: Upload profile data
|
||||
if: steps.profiling.outputs.profile_generated == 'true'
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: performance-profile-${{ github.run_number }}
|
||||
path: samply-profile.json
|
||||
@@ -115,11 +115,11 @@ jobs:
|
||||
|
||||
benchmark:
|
||||
name: Benchmark Tests
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 45
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -135,7 +135,7 @@ jobs:
|
||||
tee benchmark-results.json
|
||||
|
||||
- name: Upload benchmark results
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: benchmark-results-${{ github.run_number }}
|
||||
path: benchmark-results.json
|
||||
|
||||
20
.github/workflows/stale.yml
vendored
Normal file
20
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: "Mark stale issues"
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs.'
|
||||
stale-issue-label: 'stale'
|
||||
## Mark if there is no activity for more than 7 days
|
||||
days-before-stale: 7
|
||||
# If no one responds after 3 days, the tag will be closed.
|
||||
days-before-close: 3
|
||||
# These tags are exempt and will not close automatically.
|
||||
exempt-issue-labels: 'pinned,security'
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -2,6 +2,7 @@
|
||||
.DS_Store
|
||||
.idea
|
||||
.vscode
|
||||
.direnv/
|
||||
/test
|
||||
/logs
|
||||
/data
|
||||
@@ -23,4 +24,13 @@ profile.json
|
||||
*.go
|
||||
*.pb
|
||||
*.svg
|
||||
deploy/logs/*.log.*
|
||||
deploy/logs/*.log.*
|
||||
artifacts/
|
||||
# s3-tests local artifacts (root directory only)
|
||||
/s3-tests/
|
||||
/s3-tests-local/
|
||||
/s3tests.conf
|
||||
/s3tests.conf.*
|
||||
*.events
|
||||
*.audit
|
||||
*.snappy
|
||||
|
||||
32
.pre-commit-config.yaml
Normal file
32
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: cargo-fmt
|
||||
name: cargo fmt
|
||||
entry: cargo fmt --all --check
|
||||
language: system
|
||||
types: [rust]
|
||||
pass_filenames: false
|
||||
|
||||
- id: cargo-clippy
|
||||
name: cargo clippy
|
||||
entry: cargo clippy --all-targets --all-features -- -D warnings
|
||||
language: system
|
||||
types: [rust]
|
||||
pass_filenames: false
|
||||
|
||||
- id: cargo-check
|
||||
name: cargo check
|
||||
entry: cargo check --all-targets
|
||||
language: system
|
||||
types: [rust]
|
||||
pass_filenames: false
|
||||
|
||||
- id: cargo-test
|
||||
name: cargo test
|
||||
entry: bash -c 'cargo test --workspace --exclude e2e_test && cargo test --all --doc'
|
||||
language: system
|
||||
types: [rust]
|
||||
pass_filenames: false
|
||||
41
.vscode/launch.json
vendored
41
.vscode/launch.json
vendored
@@ -1,9 +1,31 @@
|
||||
{
|
||||
// 使用 IntelliSense 了解相关属性。
|
||||
// 悬停以查看现有属性的描述。
|
||||
// 欲了解更多信息,请访问: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Debug(only) executable 'rustfs'",
|
||||
"env": {
|
||||
"RUST_LOG": "rustfs=info,ecstore=info,s3s=info,iam=info",
|
||||
"RUSTFS_SKIP_BACKGROUND_TASK": "on"
|
||||
//"RUSTFS_OBS_LOG_DIRECTORY": "./deploy/logs",
|
||||
// "RUSTFS_POLICY_PLUGIN_URL":"http://localhost:8181/v1/data/rustfs/authz/allow",
|
||||
// "RUSTFS_POLICY_PLUGIN_AUTH_TOKEN":"your-opa-token"
|
||||
},
|
||||
"program": "${workspaceFolder}/target/debug/rustfs",
|
||||
"args": [
|
||||
"--access-key",
|
||||
"rustfsadmin",
|
||||
"--secret-key",
|
||||
"rustfsadmin",
|
||||
"--address",
|
||||
"0.0.0.0:9010",
|
||||
"--server-domains",
|
||||
"127.0.0.1:9010",
|
||||
"./target/volume/test{1...4}"
|
||||
],
|
||||
"cwd": "${workspaceFolder}"
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
@@ -22,6 +44,7 @@
|
||||
"env": {
|
||||
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug,iam=debug",
|
||||
"RUSTFS_SKIP_BACKGROUND_TASK": "on",
|
||||
//"RUSTFS_OBS_LOG_DIRECTORY": "./deploy/logs",
|
||||
// "RUSTFS_POLICY_PLUGIN_URL":"http://localhost:8181/v1/data/rustfs/authz/allow",
|
||||
// "RUSTFS_POLICY_PLUGIN_AUTH_TOKEN":"your-opa-token"
|
||||
},
|
||||
@@ -66,12 +89,8 @@
|
||||
"test",
|
||||
"--no-run",
|
||||
"--lib",
|
||||
"--package=ecstore"
|
||||
],
|
||||
"filter": {
|
||||
"name": "ecstore",
|
||||
"kind": "lib"
|
||||
}
|
||||
"--package=rustfs-ecstore"
|
||||
]
|
||||
},
|
||||
"args": [],
|
||||
"cwd": "${workspaceFolder}"
|
||||
@@ -91,6 +110,10 @@
|
||||
"RUSTFS_VOLUMES": "./target/volume/test{1...4}",
|
||||
"RUSTFS_ADDRESS": ":9000",
|
||||
"RUSTFS_CONSOLE_ENABLE": "true",
|
||||
// "RUSTFS_OBS_TRACE_ENDPOINT": "http://127.0.0.1:4318/v1/traces", // jeager otlp http endpoint
|
||||
// "RUSTFS_OBS_METRIC_ENDPOINT": "http://127.0.0.1:4318/v1/metrics", // default otlp http endpoint
|
||||
// "RUSTFS_OBS_LOG_ENDPOINT": "http://127.0.0.1:4318/v1/logs", // default otlp http endpoint
|
||||
// "RUSTFS_COMPRESS_ENABLE": "true",
|
||||
"RUSTFS_CONSOLE_ADDRESS": "127.0.0.1:9001",
|
||||
"RUSTFS_OBS_LOG_DIRECTORY": "./target/logs",
|
||||
},
|
||||
|
||||
19
AGENTS.md
19
AGENTS.md
@@ -1,7 +1,18 @@
|
||||
# Repository Guidelines
|
||||
|
||||
## ⚠️ Pre-Commit Checklist (MANDATORY)
|
||||
**Before EVERY commit, you MUST run and pass ALL of the following:**
|
||||
```bash
|
||||
cargo fmt --all --check # Code formatting
|
||||
cargo clippy --all-targets --all-features -- -D warnings # Lints
|
||||
cargo test --workspace --exclude e2e_test # Unit tests
|
||||
```
|
||||
Or simply run `make pre-commit` which covers all checks. **DO NOT commit if any check fails.**
|
||||
|
||||
## Communication Rules
|
||||
- Respond to the user in Chinese; use English in all other contexts.
|
||||
- Code and documentation must be written in English only. Chinese text is allowed solely as test data/fixtures when a case explicitly requires Chinese-language content for validation.
|
||||
- **Pull Request titles and descriptions must be written in English** to ensure consistency and accessibility for all contributors.
|
||||
|
||||
## Project Structure & Module Organization
|
||||
The workspace root hosts shared dependencies in `Cargo.toml`. The service binary lives under `rustfs/src/main.rs`, while reusable crates sit in `crates/` (`crypto`, `iam`, `kms`, and `e2e_test`). Local fixtures for standalone flows reside in `test_standalone/`, deployment manifests are under `deploy/`, Docker assets sit at the root, and automation lives in `scripts/`. Skim each crate’s README or module docs before contributing changes.
|
||||
@@ -18,7 +29,13 @@ Co-locate unit tests with their modules and give behavior-led names such as `han
|
||||
When fixing bugs or adding features, include regression tests that capture the new behavior so future changes cannot silently break it.
|
||||
|
||||
## Commit & Pull Request Guidelines
|
||||
Work on feature branches (e.g., `feat/...`) after syncing `main`. Follow Conventional Commits under 72 characters (e.g., `feat: add kms key rotation`). Each commit must compile, format cleanly, and pass `make pre-commit`. Open PRs with a concise summary, note verification commands, link relevant issues, and wait for reviewer approval.
|
||||
Work on feature branches (e.g., `feat/...`) after syncing `main`. Follow Conventional Commits under 72 characters (e.g., `feat: add kms key rotation`). Each commit must compile, format cleanly, and pass `make pre-commit`.
|
||||
|
||||
**Pull Request Requirements:**
|
||||
- PR titles and descriptions **MUST be written in English**
|
||||
- Open PRs with a concise summary, note verification commands, link relevant issues
|
||||
- Follow the PR template format and fill in all required sections
|
||||
- Wait for reviewer approval before merging
|
||||
|
||||
## Security & Configuration Tips
|
||||
Do not commit secrets or cloud credentials; prefer environment variables or vault tooling. Review IAM- and KMS-related changes with a second maintainer. Confirm proxy settings before running sensitive tests to avoid leaking traffic outside localhost.
|
||||
|
||||
3
CLA.md
3
CLA.md
@@ -83,6 +83,3 @@ that body of laws known as conflict of laws. The parties expressly agree that th
|
||||
for the International Sale of Goods will not apply. Any legal action or proceeding arising under this Agreement will be
|
||||
brought exclusively in the courts located in Beijing, China, and the parties hereby irrevocably consent to the personal
|
||||
jurisdiction and venue therein.
|
||||
|
||||
For your reading convenience, this Agreement is written in parallel English and Chinese sections. To the extent there is
|
||||
a conflict between the English and Chinese sections, the English sections shall govern.
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
## 📋 Code Quality Requirements
|
||||
|
||||
For instructions on setting up and running the local development environment, please see [Development Guide](docs/DEVELOPMENT.md).
|
||||
|
||||
### 🔧 Code Formatting Rules
|
||||
|
||||
**MANDATORY**: All code must be properly formatted before committing. This project enforces strict formatting standards to maintain code consistency and readability.
|
||||
@@ -184,6 +186,39 @@ cargo clippy --all-targets --all-features -- -D warnings
|
||||
cargo clippy --fix --all-targets --all-features
|
||||
```
|
||||
|
||||
## 📝 Pull Request Guidelines
|
||||
|
||||
### Language Requirements
|
||||
|
||||
**All Pull Request titles and descriptions MUST be written in English.**
|
||||
|
||||
This ensures:
|
||||
- Consistency across all contributions
|
||||
- Accessibility for international contributors
|
||||
- Better integration with automated tools and CI/CD systems
|
||||
- Clear communication in a globally understood language
|
||||
|
||||
#### PR Description Requirements
|
||||
|
||||
When creating a Pull Request, ensure:
|
||||
|
||||
1. **Title**: Use English and follow Conventional Commits format (e.g., `fix: improve s3-tests readiness detection`)
|
||||
2. **Description**: Write in English, following the PR template format
|
||||
3. **Code Comments**: Must be in English (as per coding standards)
|
||||
4. **Commit Messages**: Must be in English (as per commit guidelines)
|
||||
|
||||
#### PR Template
|
||||
|
||||
Always use the PR template (`.github/pull_request_template.md`) and fill in all sections:
|
||||
- Type of Change
|
||||
- Related Issues
|
||||
- Summary of Changes
|
||||
- Checklist
|
||||
- Impact
|
||||
- Additional Notes
|
||||
|
||||
**Note**: While you may communicate with reviewers in Chinese during discussions, the PR itself (title, description, and all formal documentation) must be in English.
|
||||
|
||||
---
|
||||
|
||||
Following these guidelines ensures high code quality and smooth collaboration across the RustFS project! 🚀
|
||||
|
||||
2591
Cargo.lock
generated
2591
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
106
Cargo.toml
106
Cargo.toml
@@ -19,6 +19,7 @@ members = [
|
||||
"crates/audit", # Audit target management system with multi-target fan-out
|
||||
"crates/common", # Shared utilities and data structures
|
||||
"crates/config", # Configuration management
|
||||
"crates/credentials", # Credential management system
|
||||
"crates/crypto", # Cryptography and security features
|
||||
"crates/ecstore", # Erasure coding storage implementation
|
||||
"crates/e2e_test", # End-to-end test suite
|
||||
@@ -49,7 +50,7 @@ resolver = "2"
|
||||
edition = "2024"
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/rustfs/rustfs"
|
||||
rust-version = "1.85"
|
||||
rust-version = "1.90"
|
||||
version = "0.0.5"
|
||||
homepage = "https://rustfs.com"
|
||||
description = "RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. "
|
||||
@@ -71,6 +72,7 @@ rustfs-audit = { path = "crates/audit", version = "0.0.5" }
|
||||
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.5" }
|
||||
rustfs-credentials = { path = "crates/credentials", version = "0.0.5" }
|
||||
rustfs-crypto = { path = "crates/crypto", version = "0.0.5" }
|
||||
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
@@ -97,61 +99,63 @@ async-channel = "2.5.0"
|
||||
async-compression = { version = "0.4.19" }
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
axum = "0.8.7"
|
||||
axum-extra = "0.12.2"
|
||||
axum-server = { version = "0.7.3", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
axum = "0.8.8"
|
||||
axum-server = { version = "0.8.0", features = ["tls-rustls"], default-features = false }
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
pollster = "0.4.0"
|
||||
hyper = { version = "1.8.1", features = ["http2", "http1", "server"] }
|
||||
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "ring", "webpki-roots"] }
|
||||
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "aws-lc-rs", "webpki-roots"] }
|
||||
hyper-util = { version = "0.1.19", features = ["tokio", "server-auto", "server-graceful"] }
|
||||
http = "1.4.0"
|
||||
http-body = "1.0.1"
|
||||
reqwest = { version = "0.12.24", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
http-body-util = "0.1.3"
|
||||
reqwest = { version = "0.12.28", default-features = false, features = ["rustls-tls-no-provider", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
socket2 = "0.6.1"
|
||||
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.17", features = ["io", "compat"] }
|
||||
tokio = { version = "1.49.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "aws-lc-rs"] }
|
||||
tokio-stream = { version = "0.1.18" }
|
||||
tokio-test = "0.4.5"
|
||||
tokio-util = { version = "0.7.18", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.2", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.7", features = ["cors"] }
|
||||
tower-http = { version = "0.6.8", features = ["cors"] }
|
||||
|
||||
# Serialization and Data Formats
|
||||
bytes = { version = "1.11.0", features = ["serde"] }
|
||||
bytesize = "2.3.1"
|
||||
byteorder = "1.5.0"
|
||||
flatbuffers = "25.9.23"
|
||||
flatbuffers = "25.12.19"
|
||||
form_urlencoded = "1.2.2"
|
||||
prost = "0.14.1"
|
||||
quick-xml = "0.38.4"
|
||||
rmcp = { version = "0.10.0" }
|
||||
rmp = { version = "0.8.14" }
|
||||
rmp-serde = { version = "1.3.0" }
|
||||
rmcp = { version = "0.12.0" }
|
||||
rmp = { version = "0.8.15" }
|
||||
rmp-serde = { version = "1.3.1" }
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.145", features = ["raw_value"] }
|
||||
serde_json = { version = "1.0.149", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
schemars = "1.1.0"
|
||||
schemars = "1.2.0"
|
||||
|
||||
# Cryptography and Security
|
||||
aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
|
||||
argon2 = { version = "0.6.0-rc.2", features = ["std"] }
|
||||
argon2 = { version = "0.6.0-rc.5" }
|
||||
blake3 = { version = "1.8.2", features = ["rayon", "mmap"] }
|
||||
chacha20poly1305 = { version = "0.11.0-rc.2" }
|
||||
crc-fast = "1.6.0"
|
||||
hmac = { version = "0.13.0-rc.3" }
|
||||
jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] }
|
||||
pbkdf2 = "0.13.0-rc.2"
|
||||
rsa = { version = "0.10.0-rc.10" }
|
||||
rustls = { version = "0.23.35", features = ["ring", "logging", "std", "tls12"], default-features = false }
|
||||
pbkdf2 = "0.13.0-rc.5"
|
||||
rsa = { version = "0.10.0-rc.11" }
|
||||
rustls = { version = "0.23.36", default-features = false, features = ["aws-lc-rs", "logging", "tls12", "prefer-post-quantum", "std"] }
|
||||
rustls-pemfile = "2.2.0"
|
||||
rustls-pki-types = "1.13.1"
|
||||
rustls-pki-types = "1.13.2"
|
||||
sha1 = "0.11.0-rc.3"
|
||||
sha2 = "0.11.0-rc.3"
|
||||
subtle = "2.6"
|
||||
zeroize = { version = "1.8.2", features = ["derive"] }
|
||||
|
||||
# Time and Date
|
||||
@@ -161,20 +165,20 @@ time = { version = "0.3.44", features = ["std", "parsing", "formatting", "macros
|
||||
|
||||
# Utilities and Tools
|
||||
anyhow = "1.0.100"
|
||||
arc-swap = "1.7.1"
|
||||
arc-swap = "1.8.0"
|
||||
astral-tokio-tar = "0.5.6"
|
||||
atoi = "2.0.0"
|
||||
atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.11" }
|
||||
aws-credential-types = { version = "1.2.10" }
|
||||
aws-sdk-s3 = { version = "1.116.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
aws-smithy-types = { version = "1.3.4" }
|
||||
aws-config = { version = "1.8.12" }
|
||||
aws-credential-types = { version = "1.2.11" }
|
||||
aws-sdk-s3 = { version = "1.119.0", default-features = false, features = ["sigv4a", "default-https-client", "rt-tokio"] }
|
||||
aws-smithy-types = { version = "1.3.5" }
|
||||
base64 = "0.22.1"
|
||||
base64-simd = "0.8.0"
|
||||
brotli = "8.0.2"
|
||||
cfg-if = "1.0.4"
|
||||
clap = { version = "4.5.53", features = ["derive", "env"] }
|
||||
const-str = { version = "0.7.0", features = ["std", "proc"] }
|
||||
clap = { version = "4.5.54", features = ["derive", "env"] }
|
||||
const-str = { version = "1.0.0", features = ["std", "proc"] }
|
||||
convert_case = "0.10.0"
|
||||
criterion = { version = "0.8", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
@@ -185,23 +189,23 @@ faster-hex = "0.10.0"
|
||||
flate2 = "1.1.5"
|
||||
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv", "json"] }
|
||||
glob = "0.3.3"
|
||||
google-cloud-storage = "1.4.0"
|
||||
google-cloud-auth = "1.2.0"
|
||||
google-cloud-storage = "1.5.0"
|
||||
google-cloud-auth = "1.3.0"
|
||||
hashbrown = { version = "0.16.1", features = ["serde", "rayon"] }
|
||||
heed = { version = "0.22.0" }
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
lazy_static = "1.5.0"
|
||||
libc = "0.2.178"
|
||||
libc = "0.2.179"
|
||||
libsystemd = "0.7.2"
|
||||
local-ip-address = "0.6.5"
|
||||
local-ip-address = "0.6.8"
|
||||
lz4 = "1.28.1"
|
||||
matchit = "0.9.0"
|
||||
matchit = "0.9.1"
|
||||
md-5 = "0.11.0-rc.3"
|
||||
md5 = "0.8.0"
|
||||
mime_guess = "2.0.5"
|
||||
moka = { version = "0.12.11", features = ["future"] }
|
||||
moka = { version = "0.12.12", features = ["future"] }
|
||||
netif = "0.1.6"
|
||||
nix = { version = "0.30.1", features = ["fs"] }
|
||||
nu-ansi-term = "0.50.3"
|
||||
@@ -213,16 +217,16 @@ path-absolutize = "3.1.1"
|
||||
path-clean = "1.0.1"
|
||||
pin-project-lite = "0.2.16"
|
||||
pretty_assertions = "1.4.1"
|
||||
rand = { version = "0.10.0-rc.5", features = ["serde"] }
|
||||
rand = { version = "0.10.0-rc.6", features = ["serde"] }
|
||||
rayon = "1.11.0"
|
||||
reed-solomon-simd = { version = "3.1.0" }
|
||||
regex = { version = "1.12.2" }
|
||||
rumqttc = { version = "0.25.1" }
|
||||
rust-embed = { version = "8.9.0" }
|
||||
rustc-hash = { version = "2.1.1" }
|
||||
s3s = { version = "0.12.0-rc.4", features = ["minio"] }
|
||||
serial_test = "3.2.0"
|
||||
shadow-rs = { version = "1.4.0", default-features = false }
|
||||
s3s = { version = "0.13.0-alpha", features = ["minio"], git = "https://github.com/s3s-project/s3s.git", branch = "main" }
|
||||
serial_test = "3.3.1"
|
||||
shadow-rs = { version = "1.5.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
smartstring = "1.0.1"
|
||||
@@ -230,19 +234,18 @@ snafu = "0.8.9"
|
||||
snap = "1.1.1"
|
||||
starshard = { version = "0.6.0", features = ["rayon", "async", "serde"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysctl = "0.7.1"
|
||||
sysinfo = "0.37.2"
|
||||
temp-env = "0.3.6"
|
||||
tempfile = "3.23.0"
|
||||
tempfile = "3.24.0"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.17"
|
||||
tracing = { version = "0.1.43" }
|
||||
tracing = { version = "0.1.44" }
|
||||
tracing-appender = "0.2.4"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.7"
|
||||
url = "2.5.8"
|
||||
urlencoding = "2.1.3"
|
||||
uuid = { version = "1.19.0", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||
vaultrs = { version = "0.7.4" }
|
||||
@@ -250,7 +253,7 @@ walkdir = "2.5.0"
|
||||
wildmatch = { version = "2.6.1", features = ["serde"] }
|
||||
winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "6.0.0"
|
||||
zip = "7.0.0"
|
||||
zstd = "0.13.3"
|
||||
|
||||
# Observability and Metrics
|
||||
@@ -262,7 +265,16 @@ opentelemetry_sdk = { version = "0.31.0" }
|
||||
opentelemetry-semantic-conventions = { version = "0.31.0", features = ["semconv_experimental"] }
|
||||
opentelemetry-stdout = { version = "0.31.0" }
|
||||
|
||||
# FTP and SFTP
|
||||
libunftp = "0.21.0"
|
||||
russh = { version = "0.56.0", features = ["aws-lc-rs", "rsa"], default-features = false }
|
||||
russh-sftp = "2.1.1"
|
||||
ssh-key = { version = "0.7.0-rc.4", features = ["std", "rsa", "ed25519"] }
|
||||
suppaftp = { version = "7.0.7", features = ["tokio", "tokio-rustls", "rustls"] }
|
||||
rcgen = "0.14.6"
|
||||
|
||||
# Performance Analysis and Memory Profiling
|
||||
mimalloc = "0.1"
|
||||
# Use tikv-jemallocator as memory allocator and enable performance analysis
|
||||
tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms", "background_threads"] }
|
||||
# Used to control and obtain statistics for jemalloc at runtime
|
||||
@@ -271,11 +283,9 @@ tikv-jemalloc-ctl = { version = "0.6", features = ["use_std", "stats", "profilin
|
||||
jemalloc_pprof = { version = "0.8.1", features = ["symbolize", "flamegraph"] }
|
||||
# Used to generate CPU performance analysis data and flame diagrams
|
||||
pprof = { version = "0.15.0", features = ["flamegraph", "protobuf-codec"] }
|
||||
mimalloc = "0.1"
|
||||
|
||||
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = ["rustfs", "rustfs-mcp", "tokio-test"]
|
||||
ignored = ["rustfs", "rustfs-mcp"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.22 AS build
|
||||
FROM alpine:3.23 AS build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE=latest
|
||||
@@ -40,7 +40,7 @@ RUN set -eux; \
|
||||
rm -rf rustfs.zip /build/.tmp || true
|
||||
|
||||
|
||||
FROM alpine:3.22
|
||||
FROM alpine:3.23
|
||||
|
||||
ARG RELEASE=latest
|
||||
ARG BUILD_DATE
|
||||
@@ -81,12 +81,11 @@ ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CORS_ALLOWED_ORIGINS="*" \
|
||||
RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="*" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs"
|
||||
RUST_LOG="warn"
|
||||
|
||||
EXPOSE 9000 9001
|
||||
|
||||
VOLUME ["/data", "/logs"]
|
||||
VOLUME ["/data"]
|
||||
|
||||
USER rustfs
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ ARG BUILDPLATFORM
|
||||
# -----------------------------
|
||||
# Build stage
|
||||
# -----------------------------
|
||||
FROM rust:1.88-bookworm AS builder
|
||||
FROM rust:1.91-trixie AS builder
|
||||
|
||||
# Re-declare args after FROM
|
||||
ARG TARGETPLATFORM
|
||||
@@ -39,7 +39,9 @@ RUN set -eux; \
|
||||
libssl-dev \
|
||||
lld \
|
||||
protobuf-compiler \
|
||||
flatbuffers-compiler; \
|
||||
flatbuffers-compiler \
|
||||
gcc-aarch64-linux-gnu \
|
||||
gcc-x86-64-linux-gnu; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Optional: cross toolchain for aarch64 (only when targeting linux/arm64)
|
||||
@@ -51,18 +53,18 @@ RUN set -eux; \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
|
||||
# Add Rust targets based on TARGETPLATFORM
|
||||
# Add Rust targets for both arches (to support cross-builds on multi-arch runners)
|
||||
RUN set -eux; \
|
||||
case "${TARGETPLATFORM:-linux/amd64}" in \
|
||||
linux/amd64) rustup target add x86_64-unknown-linux-gnu ;; \
|
||||
linux/arm64) rustup target add aarch64-unknown-linux-gnu ;; \
|
||||
*) echo "Unsupported TARGETPLATFORM=${TARGETPLATFORM}" >&2; exit 1 ;; \
|
||||
esac
|
||||
rustup target add x86_64-unknown-linux-gnu aarch64-unknown-linux-gnu; \
|
||||
rustup component add rust-std-x86_64-unknown-linux-gnu rust-std-aarch64-unknown-linux-gnu
|
||||
|
||||
# Cross-compilation environment (used only when targeting aarch64)
|
||||
ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc
|
||||
ENV CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
|
||||
ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=x86_64-linux-gnu-gcc
|
||||
ENV CC_x86_64_unknown_linux_gnu=x86_64-linux-gnu-gcc
|
||||
ENV CXX_x86_64_unknown_linux_gnu=x86_64-linux-gnu-g++
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
@@ -72,7 +74,6 @@ COPY Cargo.toml Cargo.lock ./
|
||||
# 2) workspace member manifests (adjust if workspace layout changes)
|
||||
COPY rustfs/Cargo.toml rustfs/Cargo.toml
|
||||
COPY crates/*/Cargo.toml crates/
|
||||
COPY cli/rustfs-gui/Cargo.toml cli/rustfs-gui/Cargo.toml
|
||||
|
||||
# Pre-fetch dependencies for better caching
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
@@ -117,6 +118,49 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
;; \
|
||||
esac
|
||||
|
||||
# -----------------------------
|
||||
# Development stage (keeps toolchain)
|
||||
# -----------------------------
|
||||
FROM builder AS dev
|
||||
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
|
||||
LABEL name="RustFS (dev-source)" \
|
||||
maintainer="RustFS Team" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
description="RustFS - local development with Rust toolchain."
|
||||
|
||||
# Install runtime dependencies that might be missing in partial builder
|
||||
# (builder already has build-essential, lld, etc.)
|
||||
WORKDIR /app
|
||||
|
||||
ENV CARGO_INCREMENTAL=1
|
||||
|
||||
# Ensure we have the same default env vars available
|
||||
ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_ACCESS_KEY="rustfsadmin" \
|
||||
RUSTFS_SECRET_KEY="rustfsadmin" \
|
||||
RUSTFS_CONSOLE_ENABLE="true" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="10001" \
|
||||
RUSTFS_GID="10001"
|
||||
|
||||
# Note: We don't COPY source here because we expect it to be mounted at /app
|
||||
# We rely on cargo run to build and run
|
||||
EXPOSE 9000 9001
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["cargo", "run", "--bin", "rustfs", "--"]
|
||||
|
||||
# -----------------------------
|
||||
# Runtime stage (Ubuntu minimal)
|
||||
# -----------------------------
|
||||
@@ -143,8 +187,8 @@ RUN set -eux; \
|
||||
|
||||
# Create a conventional runtime user/group (final switch happens in entrypoint via chroot --userspec)
|
||||
RUN set -eux; \
|
||||
groupadd -g 1000 rustfs; \
|
||||
useradd -u 1000 -g rustfs -M -s /usr/sbin/nologin rustfs
|
||||
groupadd -g 10001 rustfs; \
|
||||
useradd -u 10001 -g rustfs -M -s /usr/sbin/nologin rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
@@ -166,14 +210,13 @@ ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE="true" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
RUSTFS_GID="1000"
|
||||
RUSTFS_UID="10001" \
|
||||
RUSTFS_GID="10001"
|
||||
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data", "/logs"]
|
||||
VOLUME ["/data"]
|
||||
|
||||
# Keep root here; entrypoint will drop privileges using chroot --userspec
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
411
Makefile
411
Makefile
@@ -2,375 +2,80 @@
|
||||
# Remote development requires VSCode with Dev Containers, Remote SSH, Remote Explorer
|
||||
# https://code.visualstudio.com/docs/remote/containers
|
||||
###########
|
||||
|
||||
.PHONY: SHELL
|
||||
|
||||
# Makefile global config
|
||||
# Use config.mak to override any of the following variables.
|
||||
# Do not make changes here.
|
||||
|
||||
.DEFAULT_GOAL := help
|
||||
.EXPORT_ALL_VARIABLES:
|
||||
.ONESHELL:
|
||||
.SILENT:
|
||||
|
||||
NUM_CORES := $(shell nproc 2>/dev/null || sysctl -n hw.ncpu)
|
||||
|
||||
MAKEFLAGS += -j$(NUM_CORES) -l$(NUM_CORES)
|
||||
MAKEFLAGS += --silent
|
||||
|
||||
SHELL := $(shell which bash)
|
||||
.SHELLFLAGS = -eu -o pipefail -c
|
||||
|
||||
DOCKER_CLI ?= docker
|
||||
IMAGE_NAME ?= rustfs:v1.0.0
|
||||
CONTAINER_NAME ?= rustfs-dev
|
||||
# Docker build configurations
|
||||
DOCKERFILE_PRODUCTION = Dockerfile
|
||||
DOCKERFILE_SOURCE = Dockerfile.source
|
||||
|
||||
# Code quality and formatting targets
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check:
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
.PHONY: clippy
|
||||
clippy:
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: check
|
||||
check:
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
@if command -v cargo-nextest >/dev/null 2>&1; then \
|
||||
cargo nextest run --all --exclude e2e_test; \
|
||||
else \
|
||||
echo "ℹ️ cargo-nextest not found; falling back to 'cargo test'"; \
|
||||
cargo test --workspace --exclude e2e_test -- --nocapture; \
|
||||
fi
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy check test
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
|
||||
.PHONY: setup-hooks
|
||||
setup-hooks:
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server:
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
|
||||
.PHONY: probe-e2e
|
||||
probe-e2e:
|
||||
sh $(shell pwd)/scripts/probe.sh
|
||||
|
||||
# Native build using build-rustfs.sh script
|
||||
.PHONY: build
|
||||
build:
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
.PHONY: build-dev
|
||||
build-dev:
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
# Docker-based build (alternative approach)
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
BUILD_OS ?= rockylinux9.3
|
||||
.PHONY: build-docker
|
||||
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build-docker:
|
||||
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
|
||||
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
# Makefile colors config
|
||||
bold := $(shell tput bold)
|
||||
normal := $(shell tput sgr0)
|
||||
errorTitle := $(shell tput setab 1 && tput bold && echo '\n')
|
||||
recommendation := $(shell tput setab 4)
|
||||
underline := $(shell tput smul)
|
||||
reset := $(shell tput -Txterm sgr0)
|
||||
black := $(shell tput setaf 0)
|
||||
red := $(shell tput setaf 1)
|
||||
green := $(shell tput setaf 2)
|
||||
yellow := $(shell tput setaf 3)
|
||||
blue := $(shell tput setaf 4)
|
||||
magenta := $(shell tput setaf 5)
|
||||
cyan := $(shell tput setaf 6)
|
||||
white := $(shell tput setaf 7)
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
define HEADER
|
||||
How to use me:
|
||||
# To get help for each target
|
||||
${bold}make help${reset}
|
||||
|
||||
.PHONY: build-musl-arm64
|
||||
build-musl-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
# To run and execute a target
|
||||
${bold}make ${cyan}<target>${reset}
|
||||
|
||||
.PHONY: build-gnu-arm64
|
||||
build-gnu-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
💡 For more help use 'make help', 'make help-build' or 'make help-docker'
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
🦀 RustFS Makefile Help:
|
||||
|
||||
# ========================================================================================
|
||||
# Docker Multi-Architecture Builds (Primary Methods)
|
||||
# ========================================================================================
|
||||
📋 Main Command Categories:
|
||||
make help-build # Show build-related help
|
||||
make help-docker # Show Docker-related help
|
||||
|
||||
# Production builds using docker-buildx.sh (for CI/CD and production)
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx:
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
🔧 Code Quality:
|
||||
make fmt # Format code
|
||||
make clippy # Run clippy checks
|
||||
make test # Run tests
|
||||
make pre-commit # Run all pre-commit checks
|
||||
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
|
||||
# Development/Source builds using direct buildx commands
|
||||
.PHONY: docker-dev
|
||||
docker-dev:
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local:
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push:
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 Pushing to registry: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
🚀 Quick Start:
|
||||
make build # Build RustFS binary
|
||||
make docker-dev-local # Build development Docker image (local)
|
||||
make dev-env-start # Start development environment
|
||||
|
||||
|
||||
endef
|
||||
export HEADER
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local:
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
-include $(addsuffix /*.mak, $(shell find .config/make -type d))
|
||||
|
||||
# ========================================================================================
|
||||
# Single Architecture Docker Builds (Traditional)
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production:
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source:
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start:
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop:
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
|
||||
|
||||
# ========================================================================================
|
||||
# Build Utilities
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch:
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
|
||||
docker buildx imagetools inspect $(IMAGE)
|
||||
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
@echo "🔨 Building x86_64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
@echo "🔨 Building aarch64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
@echo "🔨 Building aarch64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
@echo "✅ All architectures built successfully!"
|
||||
|
||||
# ========================================================================================
|
||||
# Help and Documentation
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: help-build
|
||||
help-build:
|
||||
@echo "🔨 RustFS Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 Local Build (Recommended):"
|
||||
@echo " make build # Build RustFS binary (includes console by default)"
|
||||
@echo " make build-dev # Development mode build"
|
||||
@echo " make build-musl # Build x86_64 musl version"
|
||||
@echo " make build-gnu # Build x86_64 GNU version"
|
||||
@echo " make build-musl-arm64 # Build aarch64 musl version"
|
||||
@echo " make build-gnu-arm64 # Build aarch64 GNU version"
|
||||
@echo ""
|
||||
@echo "🐳 Docker Build:"
|
||||
@echo " make build-docker # Build using Docker container"
|
||||
@echo " make build-docker BUILD_OS=ubuntu22.04 # Specify build system"
|
||||
@echo ""
|
||||
@echo "🏗️ Cross-architecture Build:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo ""
|
||||
@echo "🔧 Direct usage of build-rustfs.sh script:"
|
||||
@echo " ./build-rustfs.sh --help # View script help"
|
||||
@echo " ./build-rustfs.sh --no-console # Build without console resources"
|
||||
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
|
||||
@echo " ./build-rustfs.sh --dev # Development mode build"
|
||||
@echo " ./build-rustfs.sh --sign # Sign binary files"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
|
||||
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker:
|
||||
@echo "🐳 Docker Multi-architecture Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 Production Image Build (Recommended to use docker-buildx.sh):"
|
||||
@echo " make docker-buildx # Build production multi-arch image (no push)"
|
||||
@echo " make docker-buildx-push # Build and push production multi-arch image"
|
||||
@echo " make docker-buildx-version VERSION=v1.0.0 # Build specific version"
|
||||
@echo " make docker-buildx-push-version VERSION=v1.0.0 # Build and push specific version"
|
||||
@echo ""
|
||||
@echo "🔧 Development/Source Image Build (Local development testing):"
|
||||
@echo " make docker-dev # Build dev multi-arch image (cannot load locally)"
|
||||
@echo " make docker-dev-local # Build dev single-arch image (local load)"
|
||||
@echo " make docker-dev-push REGISTRY=xxx # Build and push dev image"
|
||||
@echo ""
|
||||
@echo "🏗️ Local Production Image Build (Alternative):"
|
||||
@echo " make docker-buildx-production-local # Build production single-arch image locally"
|
||||
@echo ""
|
||||
@echo "📦 Single-architecture Build (Traditional way):"
|
||||
@echo " make docker-build-production # Build single-arch production image"
|
||||
@echo " make docker-build-source # Build single-arch source image"
|
||||
@echo ""
|
||||
@echo "🚀 Development Environment Management:"
|
||||
@echo " make dev-env-start # Start development container environment"
|
||||
@echo " make dev-env-stop # Stop development container environment"
|
||||
@echo " make dev-env-restart # Restart development container environment"
|
||||
@echo ""
|
||||
@echo "🔧 Auxiliary Tools:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # Check image architecture support"
|
||||
@echo ""
|
||||
@echo "📋 Environment Variables:"
|
||||
@echo " REGISTRY Image registry address (required for push)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub username"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub access token"
|
||||
@echo " GITHUB_TOKEN GitHub access token"
|
||||
@echo ""
|
||||
@echo "💡 Suggestions:"
|
||||
@echo " - Production use: Use docker-buildx* commands (based on precompiled binaries)"
|
||||
@echo " - Local development: Use docker-dev* commands (build from source)"
|
||||
@echo " - Development environment: Use dev-env-* commands to manage dev containers"
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "🦀 RustFS Makefile Help:"
|
||||
@echo ""
|
||||
@echo "📋 Main Command Categories:"
|
||||
@echo " make help-build # Show build-related help"
|
||||
@echo " make help-docker # Show Docker-related help"
|
||||
@echo ""
|
||||
@echo "🔧 Code Quality:"
|
||||
@echo " make fmt # Format code"
|
||||
@echo " make clippy # Run clippy checks"
|
||||
@echo " make test # Run tests"
|
||||
@echo " make pre-commit # Run all pre-commit checks"
|
||||
@echo ""
|
||||
@echo "🚀 Quick Start:"
|
||||
@echo " make build # Build RustFS binary"
|
||||
@echo " make docker-dev-local # Build development Docker image (local)"
|
||||
@echo " make dev-env-start # Start development environment"
|
||||
@echo ""
|
||||
@echo "💡 For more help use 'make help-build' or 'make help-docker'"
|
||||
|
||||
35
README.md
35
README.md
@@ -10,6 +10,11 @@
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14181" alt="rustfs%2Frustfs | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</p>
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/installation/">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/">Docs</a>
|
||||
@@ -45,10 +50,10 @@ Unlike other storage systems, RustFS is released under the permissible Apache 2.
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **S3 Core Features** | ✅ Available | **Bitrot Protection** | ✅ Available |
|
||||
| **Upload / Download** | ✅ Available | **Single Node Mode** | ✅ Available |
|
||||
| **Versioning** | ✅ Available | **Bucket Replication** | ⚠️ Partial Support |
|
||||
| **Versioning** | ✅ Available | **Bucket Replication** | ✅ Available |
|
||||
| **Logging** | ✅ Available | **Lifecycle Management** | 🚧 Under Testing |
|
||||
| **Event Notifications** | ✅ Available | **Distributed Mode** | 🚧 Under Testing |
|
||||
| **K8s Helm Charts** | ✅ Available | **OPA (Open Policy Agent)** | 🚧 Under Testing |
|
||||
| **K8s Helm Charts** | ✅ Available | **RustFS KMS** | 🚧 Under Testing |
|
||||
|
||||
|
||||
|
||||
@@ -103,7 +108,7 @@ The RustFS container runs as a non-root user `rustfs` (UID `10001`). If you run
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
|
||||
|
||||
# Using specific version
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.68
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0-alpha.76
|
||||
```
|
||||
|
||||
You can also use Docker Compose. Using the `docker-compose.yml` file in the root directory:
|
||||
@@ -153,11 +158,28 @@ make help-docker # Show all Docker-related commands
|
||||
|
||||
Follow the instructions in the [Helm Chart README](https://charts.rustfs.com/) to install RustFS on a Kubernetes cluster.
|
||||
|
||||
### 5\. Nix Flake (Option 5)
|
||||
|
||||
If you have [Nix with flakes enabled](https://nixos.wiki/wiki/Flakes#Enable_flakes):
|
||||
|
||||
```bash
|
||||
# Run directly without installing
|
||||
nix run github:rustfs/rustfs
|
||||
|
||||
# Build the binary
|
||||
nix build github:rustfs/rustfs
|
||||
./result/bin/rustfs --help
|
||||
|
||||
# Or from a local checkout
|
||||
nix build
|
||||
nix run
|
||||
```
|
||||
|
||||
-----
|
||||
|
||||
### Accessing RustFS
|
||||
|
||||
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console.
|
||||
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9001` to access the RustFS console.
|
||||
* Default credentials: `rustfsadmin` / `rustfsadmin`
|
||||
6. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
7. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs/clients to interact with your RustFS instance.
|
||||
@@ -198,11 +220,6 @@ RustFS is a community-driven project, and we appreciate all contributions. Check
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending top charts.
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star History
|
||||
|
||||
|
||||
12
README_ZH.md
12
README_ZH.md
@@ -10,6 +10,10 @@
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14181" alt="rustfs%2Frustfs | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/installation/">快速开始</a>
|
||||
· <a href="https://docs.rustfs.com/">文档</a>
|
||||
@@ -17,6 +21,8 @@
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">社区讨论</a>
|
||||
</p>
|
||||
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a> | 简体中文 |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
@@ -46,7 +52,7 @@ RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **S3 核心功能** | ✅ 可用 | **Bitrot (防数据腐烂)** | ✅ 可用 |
|
||||
| **上传 / 下载** | ✅ 可用 | **单机模式** | ✅ 可用 |
|
||||
| **版本控制** | ✅ 可用 | **存储桶复制** | ⚠️ 部分可用 |
|
||||
| **版本控制** | ✅ 可用 | **存储桶复制** | ✅ 可用 |
|
||||
| **日志功能** | ✅ 可用 | **生命周期管理** | 🚧 测试中 |
|
||||
| **事件通知** | ✅ 可用 | **分布式模式** | 🚧 测试中 |
|
||||
| **K8s Helm Chart** | ✅ 可用 | **OPA (策略引擎)** | 🚧 测试中 |
|
||||
@@ -200,11 +206,7 @@ RustFS 是一个社区驱动的项目,我们感谢所有的贡献。请查看
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS 深受全球开源爱好者和企业用户的喜爱,经常荣登 GitHub Trending 榜单。
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star 历史
|
||||
|
||||
|
||||
36
SECURITY.md
36
SECURITY.md
@@ -1,18 +1,40 @@
|
||||
# Security Policy
|
||||
|
||||
## Security Philosophy
|
||||
|
||||
At RustFS, we take security seriously. We believe that **transparency leads to better security**. The more open our code is, the more eyes are on it, and the faster we can identify and resolve potential issues.
|
||||
|
||||
We highly value the contributions of the security community and welcome anyone to audit our code. Your efforts help us make RustFS safer for everyone.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Use this section to tell people about which versions of your project are
|
||||
currently being supported with security updates.
|
||||
To help us focus our security efforts, please refer to the table below to see which versions of RustFS are currently supported with security updates.
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 1.x.x | :white_check_mark: |
|
||||
| Latest | :white_check_mark: |
|
||||
| < 1.0 | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Use this section to tell people how to report a vulnerability.
|
||||
If you discover a security vulnerability in RustFS, we appreciate your help in disclosing it to us responsibly.
|
||||
|
||||
Tell them where to go, how often they can expect to get an update on a
|
||||
reported vulnerability, what to expect if the vulnerability is accepted or
|
||||
declined, etc.
|
||||
**Please do not open a public GitHub issue for security vulnerabilities.** Publicly disclosing a vulnerability can put the entire community at risk before a fix is available.
|
||||
|
||||
### How to Report
|
||||
|
||||
1. https://github.com/rustfs/rustfs/security/advisories/new
|
||||
2. Please email us directly at: **security@rustfs.com**
|
||||
|
||||
In your email, please include:
|
||||
1. **Description**: A detailed description of the vulnerability.
|
||||
2. **Steps to Reproduce**: Steps or a script to reproduce the issue.
|
||||
3. **Impact**: The potential impact of the vulnerability.
|
||||
|
||||
### Our Response Process
|
||||
|
||||
1. **Acknowledgment**: We will acknowledge your email within 48 hours.
|
||||
2. **Assessment**: We will investigate the issue and determine its severity.
|
||||
3. **Fix & Disclosure**: We will work on a patch. Once the patch is released, we will publicly announce the vulnerability and acknowledge your contribution (unless you prefer to remain anonymous).
|
||||
|
||||
Thank you for helping keep RustFS and its users safe!
|
||||
|
||||
@@ -36,6 +36,9 @@ clen = "clen"
|
||||
datas = "datas"
|
||||
bre = "bre"
|
||||
abd = "abd"
|
||||
mak = "mak"
|
||||
# s3-tests original test names (cannot be changed)
|
||||
nonexisted = "nonexisted"
|
||||
|
||||
[files]
|
||||
extend-exclude = []
|
||||
extend-exclude = []
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# RustFS Binary Build Script
|
||||
# This script compiles RustFS binaries for different platforms and architectures
|
||||
|
||||
@@ -348,7 +348,7 @@ impl ErasureSetHealer {
|
||||
}
|
||||
|
||||
// save checkpoint periodically
|
||||
if global_obj_idx % 100 == 0 {
|
||||
if global_obj_idx.is_multiple_of(100) {
|
||||
checkpoint_manager
|
||||
.update_position(bucket_index, *current_object_index)
|
||||
.await?;
|
||||
|
||||
@@ -143,16 +143,16 @@ impl PriorityHealQueue {
|
||||
format!("object:{}:{}:{}", bucket, object, version_id.as_deref().unwrap_or(""))
|
||||
}
|
||||
HealType::Bucket { bucket } => {
|
||||
format!("bucket:{}", bucket)
|
||||
format!("bucket:{bucket}")
|
||||
}
|
||||
HealType::ErasureSet { set_disk_id, .. } => {
|
||||
format!("erasure_set:{}", set_disk_id)
|
||||
format!("erasure_set:{set_disk_id}")
|
||||
}
|
||||
HealType::Metadata { bucket, object } => {
|
||||
format!("metadata:{}:{}", bucket, object)
|
||||
format!("metadata:{bucket}:{object}")
|
||||
}
|
||||
HealType::MRF { meta_path } => {
|
||||
format!("mrf:{}", meta_path)
|
||||
format!("mrf:{meta_path}")
|
||||
}
|
||||
HealType::ECDecode {
|
||||
bucket,
|
||||
@@ -173,7 +173,7 @@ impl PriorityHealQueue {
|
||||
|
||||
/// Check if an erasure set heal request for a specific set_disk_id exists
|
||||
fn contains_erasure_set(&self, set_disk_id: &str) -> bool {
|
||||
let key = format!("erasure_set:{}", set_disk_id);
|
||||
let key = format!("erasure_set:{set_disk_id}");
|
||||
self.dedup_keys.contains(&key)
|
||||
}
|
||||
}
|
||||
@@ -327,7 +327,7 @@ impl HealManager {
|
||||
|
||||
if queue_len >= queue_capacity {
|
||||
return Err(Error::ConfigurationError {
|
||||
message: format!("Heal queue is full ({}/{})", queue_len, queue_capacity),
|
||||
message: format!("Heal queue is full ({queue_len}/{queue_capacity})"),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -468,14 +468,17 @@ impl HealManager {
|
||||
let active_heals = self.active_heals.clone();
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
let storage = self.storage.clone();
|
||||
|
||||
info!(
|
||||
"start_auto_disk_scanner: Starting auto disk scanner with interval: {:?}",
|
||||
config.read().await.heal_interval
|
||||
);
|
||||
let mut duration = {
|
||||
let config = config.read().await;
|
||||
config.heal_interval
|
||||
};
|
||||
if duration < Duration::from_secs(1) {
|
||||
duration = Duration::from_secs(1);
|
||||
}
|
||||
info!("start_auto_disk_scanner: Starting auto disk scanner with interval: {:?}", duration);
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut interval = interval(config.read().await.heal_interval);
|
||||
let mut interval = interval(duration);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
@@ -489,12 +492,11 @@ impl HealManager {
|
||||
for (_, disk_opt) in GLOBAL_LOCAL_DISK_MAP.read().await.iter() {
|
||||
if let Some(disk) = disk_opt {
|
||||
// detect unformatted disk via get_disk_id()
|
||||
if let Err(err) = disk.get_disk_id().await {
|
||||
if err == DiskError::UnformattedDisk {
|
||||
if let Err(err) = disk.get_disk_id().await
|
||||
&& err == DiskError::UnformattedDisk {
|
||||
endpoints.push(disk.endpoint());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -541,10 +541,10 @@ impl ResumeUtils {
|
||||
for entry in entries {
|
||||
if entry.ends_with(&format!("_{RESUME_STATE_FILE}")) {
|
||||
// Extract task ID from filename: {task_id}_ahm_resume_state.json
|
||||
if let Some(task_id) = entry.strip_suffix(&format!("_{RESUME_STATE_FILE}")) {
|
||||
if !task_id.is_empty() {
|
||||
task_ids.push(task_id.to_string());
|
||||
}
|
||||
if let Some(task_id) = entry.strip_suffix(&format!("_{RESUME_STATE_FILE}"))
|
||||
&& !task_id.is_empty()
|
||||
{
|
||||
task_ids.push(task_id.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,10 +83,10 @@ pub struct CheckpointManager {
|
||||
|
||||
impl CheckpointManager {
|
||||
pub fn new(node_id: &str, data_dir: &Path) -> Self {
|
||||
if !data_dir.exists() {
|
||||
if let Err(e) = std::fs::create_dir_all(data_dir) {
|
||||
error!("create data dir failed {:?}: {}", data_dir, e);
|
||||
}
|
||||
if !data_dir.exists()
|
||||
&& let Err(e) = std::fs::create_dir_all(data_dir)
|
||||
{
|
||||
error!("create data dir failed {:?}: {}", data_dir, e);
|
||||
}
|
||||
|
||||
let checkpoint_file = data_dir.join(format!("scanner_checkpoint_{node_id}.json"));
|
||||
|
||||
@@ -29,8 +29,8 @@ use rustfs_ecstore::{
|
||||
self as ecstore, StorageAPI,
|
||||
bucket::versioning::VersioningApi,
|
||||
bucket::versioning_sys::BucketVersioningSys,
|
||||
data_usage::{aggregate_local_snapshots, store_data_usage_in_backend},
|
||||
disk::{Disk, DiskAPI, DiskStore, RUSTFS_META_BUCKET, WalkDirOptions},
|
||||
data_usage::{aggregate_local_snapshots, compute_bucket_usage, store_data_usage_in_backend},
|
||||
disk::{DiskAPI, DiskStore, RUSTFS_META_BUCKET, WalkDirOptions},
|
||||
set_disk::SetDisks,
|
||||
store_api::ObjectInfo,
|
||||
};
|
||||
@@ -137,6 +137,8 @@ pub struct Scanner {
|
||||
data_usage_stats: Arc<Mutex<HashMap<String, DataUsageInfo>>>,
|
||||
/// Last data usage statistics collection time
|
||||
last_data_usage_collection: Arc<RwLock<Option<SystemTime>>>,
|
||||
/// Backoff timestamp for heavy fallback collection
|
||||
fallback_backoff_until: Arc<RwLock<Option<SystemTime>>>,
|
||||
/// Heal manager for auto-heal integration
|
||||
heal_manager: Option<Arc<HealManager>>,
|
||||
|
||||
@@ -192,6 +194,7 @@ impl Scanner {
|
||||
disk_metrics: Arc::new(Mutex::new(HashMap::new())),
|
||||
data_usage_stats: Arc::new(Mutex::new(HashMap::new())),
|
||||
last_data_usage_collection: Arc::new(RwLock::new(None)),
|
||||
fallback_backoff_until: Arc::new(RwLock::new(None)),
|
||||
heal_manager,
|
||||
node_scanner,
|
||||
stats_aggregator,
|
||||
@@ -398,10 +401,10 @@ impl Scanner {
|
||||
let mut latest_update: Option<SystemTime> = None;
|
||||
|
||||
for snapshot in &outcome.snapshots {
|
||||
if let Some(update) = snapshot.last_update {
|
||||
if latest_update.is_none_or(|current| update > current) {
|
||||
latest_update = Some(update);
|
||||
}
|
||||
if let Some(update) = snapshot.last_update
|
||||
&& latest_update.is_none_or(|current| update > current)
|
||||
{
|
||||
latest_update = Some(update);
|
||||
}
|
||||
|
||||
aggregated.objects_total_count = aggregated.objects_total_count.saturating_add(snapshot.objects_total_count);
|
||||
@@ -473,6 +476,8 @@ impl Scanner {
|
||||
size: usage.total_size as i64,
|
||||
delete_marker: !usage.has_live_object && usage.delete_markers_count > 0,
|
||||
mod_time: usage.last_modified_ns.and_then(Self::ns_to_offset_datetime),
|
||||
// Set is_latest to true for live objects - required for lifecycle expiration evaluation
|
||||
is_latest: usage.has_live_object,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -522,28 +527,20 @@ impl Scanner {
|
||||
let (disks, _) = set_disks.get_online_disks_with_healing(false).await;
|
||||
if let Some(disk) = disks.first() {
|
||||
let bucket_path = disk.path().join(bucket_name);
|
||||
if bucket_path.exists() {
|
||||
if let Ok(entries) = std::fs::read_dir(&bucket_path) {
|
||||
for entry in entries.flatten() {
|
||||
if let Ok(file_type) = entry.file_type() {
|
||||
if file_type.is_dir() {
|
||||
if let Some(object_name) = entry.file_name().to_str() {
|
||||
if !object_name.starts_with('.') {
|
||||
debug!("Deep scanning object: {}/{}", bucket_name, object_name);
|
||||
if let Err(e) = self.verify_object_integrity(bucket_name, object_name).await {
|
||||
warn!(
|
||||
"Object integrity verification failed for {}/{}: {}",
|
||||
bucket_name, object_name, e
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
"Object integrity verification passed for {}/{}",
|
||||
bucket_name, object_name
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if bucket_path.exists()
|
||||
&& let Ok(entries) = std::fs::read_dir(&bucket_path)
|
||||
{
|
||||
for entry in entries.flatten() {
|
||||
if let Ok(file_type) = entry.file_type()
|
||||
&& file_type.is_dir()
|
||||
&& let Some(object_name) = entry.file_name().to_str()
|
||||
&& !object_name.starts_with('.')
|
||||
{
|
||||
debug!("Deep scanning object: {}/{}", bucket_name, object_name);
|
||||
if let Err(e) = self.verify_object_integrity(bucket_name, object_name).await {
|
||||
warn!("Object integrity verification failed for {}/{}: {}", bucket_name, object_name, e);
|
||||
} else {
|
||||
debug!("Object integrity verification passed for {}/{}", bucket_name, object_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -854,10 +851,10 @@ impl Scanner {
|
||||
|
||||
// Phase 2: Minimal EC verification for critical objects only
|
||||
// Note: The main scanning is now handled by NodeScanner in the background
|
||||
if let Some(ecstore) = rustfs_ecstore::new_object_layer_fn() {
|
||||
if let Err(e) = self.minimal_ec_verification(&ecstore).await {
|
||||
error!("Minimal EC verification failed: {}", e);
|
||||
}
|
||||
if let Some(ecstore) = rustfs_ecstore::new_object_layer_fn()
|
||||
&& let Err(e) = self.minimal_ec_verification(&ecstore).await
|
||||
{
|
||||
error!("Minimal EC verification failed: {}", e);
|
||||
}
|
||||
|
||||
// Update scan duration
|
||||
@@ -879,6 +876,7 @@ impl Scanner {
|
||||
/// Collect and persist data usage statistics
|
||||
async fn collect_and_persist_data_usage(&self) -> Result<()> {
|
||||
info!("Starting data usage collection and persistence");
|
||||
let now = SystemTime::now();
|
||||
|
||||
// Get ECStore instance
|
||||
let Some(ecstore) = rustfs_ecstore::new_object_layer_fn() else {
|
||||
@@ -886,6 +884,10 @@ impl Scanner {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// Helper to avoid hammering the storage layer with repeated realtime scans.
|
||||
let mut use_cached_on_backoff = false;
|
||||
let fallback_backoff_secs = Duration::from_secs(300);
|
||||
|
||||
// Run local usage scan and aggregate snapshots; fall back to on-demand build when necessary.
|
||||
let mut data_usage = match local_scan::scan_and_persist_local_usage(ecstore.clone()).await {
|
||||
Ok(outcome) => {
|
||||
@@ -907,16 +909,54 @@ impl Scanner {
|
||||
"Failed to aggregate local data usage snapshots, falling back to realtime collection: {}",
|
||||
e
|
||||
);
|
||||
self.build_data_usage_from_ecstore(&ecstore).await?
|
||||
match self.maybe_fallback_collection(now, fallback_backoff_secs, &ecstore).await? {
|
||||
Some(usage) => usage,
|
||||
None => {
|
||||
use_cached_on_backoff = true;
|
||||
DataUsageInfo::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Local usage scan failed (using realtime collection instead): {}", e);
|
||||
self.build_data_usage_from_ecstore(&ecstore).await?
|
||||
match self.maybe_fallback_collection(now, fallback_backoff_secs, &ecstore).await? {
|
||||
Some(usage) => usage,
|
||||
None => {
|
||||
use_cached_on_backoff = true;
|
||||
DataUsageInfo::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// If heavy fallback was skipped due to backoff, try to reuse cached stats to avoid empty responses.
|
||||
if use_cached_on_backoff && data_usage.buckets_usage.is_empty() {
|
||||
let cached = {
|
||||
let guard = self.data_usage_stats.lock().await;
|
||||
guard.values().next().cloned()
|
||||
};
|
||||
if let Some(cached_usage) = cached {
|
||||
data_usage = cached_usage;
|
||||
}
|
||||
|
||||
// If there is still no data, try backend before persisting zeros
|
||||
if data_usage.buckets_usage.is_empty()
|
||||
&& let Ok(existing) = rustfs_ecstore::data_usage::load_data_usage_from_backend(ecstore.clone()).await
|
||||
&& !existing.buckets_usage.is_empty()
|
||||
{
|
||||
info!("Using existing backend data usage during fallback backoff");
|
||||
data_usage = existing;
|
||||
}
|
||||
|
||||
// Avoid overwriting valid backend stats with zeros when fallback is throttled
|
||||
if data_usage.buckets_usage.is_empty() {
|
||||
warn!("Skipping data usage persistence: fallback throttled and no cached/backend data available");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure bucket counters reflect aggregated content
|
||||
data_usage.buckets_count = data_usage.buckets_usage.len() as u64;
|
||||
if data_usage.last_update.is_none() {
|
||||
@@ -959,8 +999,31 @@ impl Scanner {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn maybe_fallback_collection(
|
||||
&self,
|
||||
now: SystemTime,
|
||||
backoff: Duration,
|
||||
ecstore: &Arc<rustfs_ecstore::store::ECStore>,
|
||||
) -> Result<Option<DataUsageInfo>> {
|
||||
let backoff_until = *self.fallback_backoff_until.read().await;
|
||||
let within_backoff = backoff_until.map(|ts| now < ts).unwrap_or(false);
|
||||
|
||||
if within_backoff {
|
||||
warn!(
|
||||
"Skipping heavy data usage fallback within backoff window (until {:?}); using cached stats if available",
|
||||
backoff_until
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let usage = self.build_data_usage_from_ecstore(ecstore).await?;
|
||||
let mut backoff_guard = self.fallback_backoff_until.write().await;
|
||||
*backoff_guard = Some(now + backoff);
|
||||
Ok(Some(usage))
|
||||
}
|
||||
|
||||
/// Build data usage statistics directly from ECStore
|
||||
async fn build_data_usage_from_ecstore(&self, ecstore: &Arc<rustfs_ecstore::store::ECStore>) -> Result<DataUsageInfo> {
|
||||
pub async fn build_data_usage_from_ecstore(&self, ecstore: &Arc<rustfs_ecstore::store::ECStore>) -> Result<DataUsageInfo> {
|
||||
let mut data_usage = DataUsageInfo::default();
|
||||
|
||||
// Get bucket list
|
||||
@@ -973,6 +1036,8 @@ impl Scanner {
|
||||
data_usage.last_update = Some(SystemTime::now());
|
||||
|
||||
let mut total_objects = 0u64;
|
||||
let mut total_versions = 0u64;
|
||||
let mut total_delete_markers = 0u64;
|
||||
let mut total_size = 0u64;
|
||||
|
||||
for bucket_info in buckets {
|
||||
@@ -980,37 +1045,26 @@ impl Scanner {
|
||||
continue; // Skip system buckets
|
||||
}
|
||||
|
||||
// Try to get actual object count for this bucket
|
||||
let (object_count, bucket_size) = match ecstore
|
||||
.clone()
|
||||
.list_objects_v2(
|
||||
&bucket_info.name,
|
||||
"", // prefix
|
||||
None, // continuation_token
|
||||
None, // delimiter
|
||||
100, // max_keys - small limit for performance
|
||||
false, // fetch_owner
|
||||
None, // start_after
|
||||
false, // incl_deleted
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(result) => {
|
||||
let count = result.objects.len() as u64;
|
||||
let size = result.objects.iter().map(|obj| obj.size as u64).sum();
|
||||
(count, size)
|
||||
}
|
||||
Err(_) => (0, 0),
|
||||
};
|
||||
// Use ecstore pagination helper to avoid truncating at 100 objects
|
||||
let (object_count, bucket_size, versions_count, delete_markers) =
|
||||
match compute_bucket_usage(ecstore.clone(), &bucket_info.name).await {
|
||||
Ok(usage) => (usage.objects_count, usage.size, usage.versions_count, usage.delete_markers_count),
|
||||
Err(e) => {
|
||||
warn!("Failed to compute bucket usage for {}: {}", bucket_info.name, e);
|
||||
(0, 0, 0, 0)
|
||||
}
|
||||
};
|
||||
|
||||
total_objects += object_count;
|
||||
total_versions += versions_count;
|
||||
total_delete_markers += delete_markers;
|
||||
total_size += bucket_size;
|
||||
|
||||
let bucket_usage = rustfs_common::data_usage::BucketUsageInfo {
|
||||
size: bucket_size,
|
||||
objects_count: object_count,
|
||||
versions_count: object_count, // Simplified
|
||||
delete_markers_count: 0,
|
||||
versions_count,
|
||||
delete_markers_count: delete_markers,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -1020,7 +1074,8 @@ impl Scanner {
|
||||
|
||||
data_usage.objects_total_count = total_objects;
|
||||
data_usage.objects_total_size = total_size;
|
||||
data_usage.versions_total_count = total_objects;
|
||||
data_usage.versions_total_count = total_versions;
|
||||
data_usage.delete_markers_total_count = total_delete_markers;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to list buckets for data usage collection: {}", e);
|
||||
@@ -1657,36 +1712,34 @@ impl Scanner {
|
||||
// check disk status, if offline, submit erasure set heal task
|
||||
if !metrics.is_online {
|
||||
let enable_healing = self.config.read().await.enable_healing;
|
||||
if enable_healing {
|
||||
if let Some(heal_manager) = &self.heal_manager {
|
||||
// Get bucket list for erasure set healing
|
||||
let buckets = match rustfs_ecstore::new_object_layer_fn() {
|
||||
Some(ecstore) => match ecstore.list_bucket(&ecstore::store_api::BucketOptions::default()).await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket list for disk healing: {}", e);
|
||||
return Err(Error::Storage(e));
|
||||
}
|
||||
},
|
||||
None => {
|
||||
error!("No ECStore available for getting bucket list");
|
||||
return Err(Error::Storage(ecstore::error::StorageError::other("No ECStore available")));
|
||||
}
|
||||
};
|
||||
|
||||
let set_disk_id = format!("pool_{}_set_{}", disk.endpoint().pool_idx, disk.endpoint().set_idx);
|
||||
let req = HealRequest::new(
|
||||
crate::heal::task::HealType::ErasureSet { buckets, set_disk_id },
|
||||
crate::heal::task::HealOptions::default(),
|
||||
crate::heal::task::HealPriority::High,
|
||||
);
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!("disk offline, submit erasure set heal task: {} {}", task_id, disk_path);
|
||||
}
|
||||
if enable_healing && let Some(heal_manager) = &self.heal_manager {
|
||||
// Get bucket list for erasure set healing
|
||||
let buckets = match rustfs_ecstore::new_object_layer_fn() {
|
||||
Some(ecstore) => match ecstore.list_bucket(&ecstore::store_api::BucketOptions::default()).await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("disk offline, submit erasure set heal task failed: {} {}", disk_path, e);
|
||||
error!("Failed to get bucket list for disk healing: {}", e);
|
||||
return Err(Error::Storage(e));
|
||||
}
|
||||
},
|
||||
None => {
|
||||
error!("No ECStore available for getting bucket list");
|
||||
return Err(Error::Storage(ecstore::error::StorageError::other("No ECStore available")));
|
||||
}
|
||||
};
|
||||
|
||||
let set_disk_id = format!("pool_{}_set_{}", disk.endpoint().pool_idx, disk.endpoint().set_idx);
|
||||
let req = HealRequest::new(
|
||||
crate::heal::task::HealType::ErasureSet { buckets, set_disk_id },
|
||||
crate::heal::task::HealOptions::default(),
|
||||
crate::heal::task::HealPriority::High,
|
||||
);
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!("disk offline, submit erasure set heal task: {} {}", task_id, disk_path);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("disk offline, submit erasure set heal task failed: {} {}", disk_path, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1714,36 +1767,34 @@ impl Scanner {
|
||||
|
||||
// disk access failed, submit erasure set heal task
|
||||
let enable_healing = self.config.read().await.enable_healing;
|
||||
if enable_healing {
|
||||
if let Some(heal_manager) = &self.heal_manager {
|
||||
// Get bucket list for erasure set healing
|
||||
let buckets = match rustfs_ecstore::new_object_layer_fn() {
|
||||
Some(ecstore) => match ecstore.list_bucket(&ecstore::store_api::BucketOptions::default()).await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket list for disk healing: {}", e);
|
||||
return Err(Error::Storage(e));
|
||||
}
|
||||
},
|
||||
None => {
|
||||
error!("No ECStore available for getting bucket list");
|
||||
return Err(Error::Storage(ecstore::error::StorageError::other("No ECStore available")));
|
||||
if enable_healing && let Some(heal_manager) = &self.heal_manager {
|
||||
// Get bucket list for erasure set healing
|
||||
let buckets = match rustfs_ecstore::new_object_layer_fn() {
|
||||
Some(ecstore) => match ecstore.list_bucket(&ecstore::store_api::BucketOptions::default()).await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket list for disk healing: {}", e);
|
||||
return Err(Error::Storage(e));
|
||||
}
|
||||
};
|
||||
},
|
||||
None => {
|
||||
error!("No ECStore available for getting bucket list");
|
||||
return Err(Error::Storage(ecstore::error::StorageError::other("No ECStore available")));
|
||||
}
|
||||
};
|
||||
|
||||
let set_disk_id = format!("pool_{}_set_{}", disk.endpoint().pool_idx, disk.endpoint().set_idx);
|
||||
let req = HealRequest::new(
|
||||
crate::heal::task::HealType::ErasureSet { buckets, set_disk_id },
|
||||
crate::heal::task::HealOptions::default(),
|
||||
crate::heal::task::HealPriority::Urgent,
|
||||
);
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!("disk access failed, submit erasure set heal task: {} {}", task_id, disk_path);
|
||||
}
|
||||
Err(heal_err) => {
|
||||
error!("disk access failed, submit erasure set heal task failed: {} {}", disk_path, heal_err);
|
||||
}
|
||||
let set_disk_id = format!("pool_{}_set_{}", disk.endpoint().pool_idx, disk.endpoint().set_idx);
|
||||
let req = HealRequest::new(
|
||||
crate::heal::task::HealType::ErasureSet { buckets, set_disk_id },
|
||||
crate::heal::task::HealOptions::default(),
|
||||
crate::heal::task::HealPriority::Urgent,
|
||||
);
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!("disk access failed, submit erasure set heal task: {} {}", task_id, disk_path);
|
||||
}
|
||||
Err(heal_err) => {
|
||||
error!("disk access failed, submit erasure set heal task failed: {} {}", disk_path, heal_err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1756,11 +1807,11 @@ impl Scanner {
|
||||
let mut disk_objects = HashMap::new();
|
||||
for volume in volumes {
|
||||
// check cancel token
|
||||
if let Some(cancel_token) = get_ahm_services_cancel_token() {
|
||||
if cancel_token.is_cancelled() {
|
||||
info!("Cancellation requested, stopping disk scan");
|
||||
break;
|
||||
}
|
||||
if let Some(cancel_token) = get_ahm_services_cancel_token()
|
||||
&& cancel_token.is_cancelled()
|
||||
{
|
||||
info!("Cancellation requested, stopping disk scan");
|
||||
break;
|
||||
}
|
||||
|
||||
match self.scan_volume(disk, &volume.name).await {
|
||||
@@ -1891,104 +1942,96 @@ impl Scanner {
|
||||
|
||||
// object metadata damaged, submit metadata heal task
|
||||
let enable_healing = self.config.read().await.enable_healing;
|
||||
if enable_healing {
|
||||
if let Some(heal_manager) = &self.heal_manager {
|
||||
let req = HealRequest::metadata(bucket.to_string(), entry.name.clone());
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!(
|
||||
"object metadata damaged, submit heal task: {} {} / {}",
|
||||
task_id, bucket, entry.name
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"object metadata damaged, submit heal task failed: {} / {} {}",
|
||||
bucket, entry.name, e
|
||||
);
|
||||
}
|
||||
if enable_healing && let Some(heal_manager) = &self.heal_manager {
|
||||
let req = HealRequest::metadata(bucket.to_string(), entry.name.clone());
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!("object metadata damaged, submit heal task: {} {} / {}", task_id, bucket, entry.name);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("object metadata damaged, submit heal task failed: {} / {} {}", bucket, entry.name, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Apply lifecycle actions
|
||||
if let Some(lifecycle_config) = &lifecycle_config {
|
||||
if let Disk::Local(_local_disk) = &**disk {
|
||||
let vcfg = BucketVersioningSys::get(bucket).await.ok();
|
||||
if let Some(lifecycle_config) = &lifecycle_config
|
||||
&& disk.is_local()
|
||||
{
|
||||
let vcfg = BucketVersioningSys::get(bucket).await.ok();
|
||||
|
||||
let mut scanner_item = ScannerItem {
|
||||
bucket: bucket.to_string(),
|
||||
object_name: entry.name.clone(),
|
||||
lifecycle: Some(lifecycle_config.clone()),
|
||||
versioning: versioning_config.clone(),
|
||||
};
|
||||
//ScannerItem::new(bucket.to_string(), Some(lifecycle_config.clone()), versioning_config.clone());
|
||||
let fivs = match entry.clone().file_info_versions(&scanner_item.bucket) {
|
||||
Ok(fivs) => fivs,
|
||||
Err(_err) => {
|
||||
stop_fn();
|
||||
return Err(Error::other("skip this file"));
|
||||
}
|
||||
};
|
||||
let mut size_s = SizeSummary::default();
|
||||
let obj_infos = match scanner_item.apply_versions_actions(&fivs.versions).await {
|
||||
Ok(obj_infos) => obj_infos,
|
||||
Err(_err) => {
|
||||
stop_fn();
|
||||
return Err(Error::other("skip this file"));
|
||||
}
|
||||
};
|
||||
let mut scanner_item = ScannerItem {
|
||||
bucket: bucket.to_string(),
|
||||
object_name: entry.name.clone(),
|
||||
lifecycle: Some(lifecycle_config.clone()),
|
||||
versioning: versioning_config.clone(),
|
||||
};
|
||||
//ScannerItem::new(bucket.to_string(), Some(lifecycle_config.clone()), versioning_config.clone());
|
||||
let fivs = match entry.clone().file_info_versions(&scanner_item.bucket) {
|
||||
Ok(fivs) => fivs,
|
||||
Err(_err) => {
|
||||
stop_fn();
|
||||
return Err(Error::other("skip this file"));
|
||||
}
|
||||
};
|
||||
let mut size_s = SizeSummary::default();
|
||||
let obj_infos = match scanner_item.apply_versions_actions(&fivs.versions).await {
|
||||
Ok(obj_infos) => obj_infos,
|
||||
Err(_err) => {
|
||||
stop_fn();
|
||||
return Err(Error::other("skip this file"));
|
||||
}
|
||||
};
|
||||
|
||||
let versioned = if let Some(vcfg) = vcfg.as_ref() {
|
||||
vcfg.versioned(&scanner_item.object_name)
|
||||
} else {
|
||||
false
|
||||
};
|
||||
let versioned = if let Some(vcfg) = vcfg.as_ref() {
|
||||
vcfg.versioned(&scanner_item.object_name)
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
#[allow(unused_assignments)]
|
||||
let mut obj_deleted = false;
|
||||
for info in obj_infos.iter() {
|
||||
let sz: i64;
|
||||
(obj_deleted, sz) = scanner_item.apply_actions(info, &mut size_s).await;
|
||||
#[allow(unused_assignments)]
|
||||
let mut obj_deleted = false;
|
||||
for info in obj_infos.iter() {
|
||||
let sz: i64;
|
||||
(obj_deleted, sz) = scanner_item.apply_actions(info, &mut size_s).await;
|
||||
|
||||
if obj_deleted {
|
||||
break;
|
||||
}
|
||||
|
||||
let actual_sz = match info.get_actual_size() {
|
||||
Ok(size) => size,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
if info.delete_marker {
|
||||
size_s.delete_markers += 1;
|
||||
}
|
||||
|
||||
if info.version_id.is_some() && sz == actual_sz {
|
||||
size_s.versions += 1;
|
||||
}
|
||||
|
||||
size_s.total_size += sz as usize;
|
||||
|
||||
if info.delete_marker {
|
||||
continue;
|
||||
}
|
||||
if obj_deleted {
|
||||
break;
|
||||
}
|
||||
|
||||
for free_version in fivs.free_versions.iter() {
|
||||
let _obj_info = rustfs_ecstore::store_api::ObjectInfo::from_file_info(
|
||||
free_version,
|
||||
&scanner_item.bucket,
|
||||
&scanner_item.object_name,
|
||||
versioned,
|
||||
);
|
||||
let actual_sz = match info.get_actual_size() {
|
||||
Ok(size) => size,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
if info.delete_marker {
|
||||
size_s.delete_markers += 1;
|
||||
}
|
||||
|
||||
// todo: global trace
|
||||
/*if obj_deleted {
|
||||
return Err(Error::other(ERR_IGNORE_FILE_CONTRIB).into());
|
||||
}*/
|
||||
if info.version_id.is_some() && sz == actual_sz {
|
||||
size_s.versions += 1;
|
||||
}
|
||||
|
||||
size_s.total_size += sz as usize;
|
||||
|
||||
if info.delete_marker {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for free_version in fivs.free_versions.iter() {
|
||||
let _obj_info = rustfs_ecstore::store_api::ObjectInfo::from_file_info(
|
||||
free_version,
|
||||
&scanner_item.bucket,
|
||||
&scanner_item.object_name,
|
||||
versioned,
|
||||
);
|
||||
}
|
||||
|
||||
// todo: global trace
|
||||
/*if obj_deleted {
|
||||
return Err(Error::other(ERR_IGNORE_FILE_CONTRIB).into());
|
||||
}*/
|
||||
}
|
||||
|
||||
// Store object metadata for later analysis
|
||||
@@ -2000,22 +2043,17 @@ impl Scanner {
|
||||
|
||||
// object metadata parse failed, submit metadata heal task
|
||||
let enable_healing = self.config.read().await.enable_healing;
|
||||
if enable_healing {
|
||||
if let Some(heal_manager) = &self.heal_manager {
|
||||
let req = HealRequest::metadata(bucket.to_string(), entry.name.clone());
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!(
|
||||
"object metadata parse failed, submit heal task: {} {} / {}",
|
||||
task_id, bucket, entry.name
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"object metadata parse failed, submit heal task failed: {} / {} {}",
|
||||
bucket, entry.name, e
|
||||
);
|
||||
}
|
||||
if enable_healing && let Some(heal_manager) = &self.heal_manager {
|
||||
let req = HealRequest::metadata(bucket.to_string(), entry.name.clone());
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!("object metadata parse failed, submit heal task: {} {} / {}", task_id, bucket, entry.name);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"object metadata parse failed, submit heal task failed: {} / {} {}",
|
||||
bucket, entry.name, e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2126,17 +2164,14 @@ impl Scanner {
|
||||
// the delete marker, but we keep it conservative here.
|
||||
let mut has_latest_delete_marker = false;
|
||||
for &disk_idx in locations {
|
||||
if let Some(bucket_map) = all_disk_objects.get(disk_idx) {
|
||||
if let Some(file_map) = bucket_map.get(bucket) {
|
||||
if let Some(fm) = file_map.get(object_name) {
|
||||
if let Some(first_ver) = fm.versions.first() {
|
||||
if first_ver.header.version_type == VersionType::Delete {
|
||||
has_latest_delete_marker = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(bucket_map) = all_disk_objects.get(disk_idx)
|
||||
&& let Some(file_map) = bucket_map.get(bucket)
|
||||
&& let Some(fm) = file_map.get(object_name)
|
||||
&& let Some(first_ver) = fm.versions.first()
|
||||
&& first_ver.header.version_type == VersionType::Delete
|
||||
{
|
||||
has_latest_delete_marker = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if has_latest_delete_marker {
|
||||
@@ -2184,28 +2219,26 @@ impl Scanner {
|
||||
|
||||
// submit heal task
|
||||
let enable_healing = self.config.read().await.enable_healing;
|
||||
if enable_healing {
|
||||
if let Some(heal_manager) = &self.heal_manager {
|
||||
use crate::heal::{HealPriority, HealRequest};
|
||||
let req = HealRequest::new(
|
||||
crate::heal::HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object_name.clone(),
|
||||
version_id: None,
|
||||
},
|
||||
crate::heal::HealOptions::default(),
|
||||
HealPriority::High,
|
||||
);
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!(
|
||||
"object missing, submit heal task: {} {} / {} (missing disks: {:?})",
|
||||
task_id, bucket, object_name, missing_disks
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("object missing, submit heal task failed: {} / {} {}", bucket, object_name, e);
|
||||
}
|
||||
if enable_healing && let Some(heal_manager) = &self.heal_manager {
|
||||
use crate::heal::{HealPriority, HealRequest};
|
||||
let req = HealRequest::new(
|
||||
crate::heal::HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object_name.clone(),
|
||||
version_id: None,
|
||||
},
|
||||
crate::heal::HealOptions::default(),
|
||||
HealPriority::High,
|
||||
);
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!(
|
||||
"object missing, submit heal task: {} {} / {} (missing disks: {:?})",
|
||||
task_id, bucket, object_name, missing_disks
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("object missing, submit heal task failed: {} / {} {}", bucket, object_name, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2213,11 +2246,11 @@ impl Scanner {
|
||||
|
||||
// Step 3: Deep scan EC verification
|
||||
let config = self.config.read().await;
|
||||
if config.scan_mode == ScanMode::Deep {
|
||||
if let Err(e) = self.verify_object_integrity(bucket, object_name).await {
|
||||
objects_with_ec_issues += 1;
|
||||
warn!("Object integrity verification failed for object {}/{}: {}", bucket, object_name, e);
|
||||
}
|
||||
if config.scan_mode == ScanMode::Deep
|
||||
&& let Err(e) = self.verify_object_integrity(bucket, object_name).await
|
||||
{
|
||||
objects_with_ec_issues += 1;
|
||||
warn!("Object integrity verification failed for object {}/{}: {}", bucket, object_name, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2229,10 +2262,10 @@ impl Scanner {
|
||||
|
||||
// Step 4: Collect data usage statistics if enabled
|
||||
let config = self.config.read().await;
|
||||
if config.enable_data_usage_stats {
|
||||
if let Err(e) = self.collect_data_usage_statistics(all_disk_objects).await {
|
||||
error!("Failed to collect data usage statistics: {}", e);
|
||||
}
|
||||
if config.enable_data_usage_stats
|
||||
&& let Err(e) = self.collect_data_usage_statistics(all_disk_objects).await
|
||||
{
|
||||
error!("Failed to collect data usage statistics: {}", e);
|
||||
}
|
||||
drop(config);
|
||||
|
||||
@@ -2462,11 +2495,11 @@ impl Scanner {
|
||||
info!("Starting legacy scan loop for backward compatibility");
|
||||
|
||||
loop {
|
||||
if let Some(token) = get_ahm_services_cancel_token() {
|
||||
if token.is_cancelled() {
|
||||
info!("Cancellation requested, exiting legacy scan loop");
|
||||
break;
|
||||
}
|
||||
if let Some(token) = get_ahm_services_cancel_token()
|
||||
&& token.is_cancelled()
|
||||
{
|
||||
info!("Cancellation requested, exiting legacy scan loop");
|
||||
break;
|
||||
}
|
||||
|
||||
let (enable_data_usage_stats, scan_interval) = {
|
||||
@@ -2474,10 +2507,8 @@ impl Scanner {
|
||||
(config.enable_data_usage_stats, config.scan_interval)
|
||||
};
|
||||
|
||||
if enable_data_usage_stats {
|
||||
if let Err(e) = self.collect_and_persist_data_usage().await {
|
||||
warn!("Background data usage collection failed: {}", e);
|
||||
}
|
||||
if enable_data_usage_stats && let Err(e) = self.collect_and_persist_data_usage().await {
|
||||
warn!("Background data usage collection failed: {}", e);
|
||||
}
|
||||
|
||||
// Update local stats in aggregator after latest scan
|
||||
@@ -2554,6 +2585,7 @@ impl Scanner {
|
||||
disk_metrics: Arc::clone(&self.disk_metrics),
|
||||
data_usage_stats: Arc::clone(&self.data_usage_stats),
|
||||
last_data_usage_collection: Arc::clone(&self.last_data_usage_collection),
|
||||
fallback_backoff_until: Arc::clone(&self.fallback_backoff_until),
|
||||
heal_manager: self.heal_manager.clone(),
|
||||
node_scanner: Arc::clone(&self.node_scanner),
|
||||
stats_aggregator: Arc::clone(&self.stats_aggregator),
|
||||
@@ -2591,10 +2623,10 @@ mod tests {
|
||||
// create temp dir as 4 disks
|
||||
let test_base_dir = test_dir.unwrap_or("/tmp/rustfs_ahm_test");
|
||||
let temp_dir = std::path::PathBuf::from(test_base_dir);
|
||||
if temp_dir.exists() {
|
||||
if let Err(e) = fs::remove_dir_all(&temp_dir) {
|
||||
panic!("Failed to remove test directory: {e}");
|
||||
}
|
||||
if temp_dir.exists()
|
||||
&& let Err(e) = fs::remove_dir_all(&temp_dir)
|
||||
{
|
||||
panic!("Failed to remove test directory: {e}");
|
||||
}
|
||||
if let Err(e) = fs::create_dir_all(&temp_dir) {
|
||||
panic!("Failed to create test directory: {e}");
|
||||
|
||||
@@ -84,6 +84,9 @@ pub async fn scan_and_persist_local_usage(store: Arc<ECStore>) -> Result<LocalSc
|
||||
guard.clone()
|
||||
};
|
||||
|
||||
// Use the first local online disk in the set to avoid missing stats when disk 0 is down
|
||||
let mut picked = false;
|
||||
|
||||
for (disk_index, disk_opt) in disks.into_iter().enumerate() {
|
||||
let Some(disk) = disk_opt else {
|
||||
continue;
|
||||
@@ -93,11 +96,17 @@ pub async fn scan_and_persist_local_usage(store: Arc<ECStore>) -> Result<LocalSc
|
||||
continue;
|
||||
}
|
||||
|
||||
// Count objects once by scanning only disk index zero from each set.
|
||||
if disk_index != 0 {
|
||||
if picked {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip offline disks; keep looking for an online candidate
|
||||
if !disk.is_online().await {
|
||||
continue;
|
||||
}
|
||||
|
||||
picked = true;
|
||||
|
||||
let disk_id = match disk.get_disk_id().await.map_err(Error::from)? {
|
||||
Some(id) => id.to_string(),
|
||||
None => {
|
||||
@@ -296,10 +305,10 @@ fn compute_object_usage(bucket: &str, object: &str, file_meta: &FileMeta) -> Res
|
||||
has_live_object = true;
|
||||
versions_count = versions_count.saturating_add(1);
|
||||
|
||||
if latest_file_info.is_none() {
|
||||
if let Ok(info) = file_meta.into_fileinfo(bucket, object, "", false, false) {
|
||||
latest_file_info = Some(info);
|
||||
}
|
||||
if latest_file_info.is_none()
|
||||
&& let Ok(info) = file_meta.into_fileinfo(bucket, object, "", false, false, false)
|
||||
{
|
||||
latest_file_info = Some(info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,10 +112,10 @@ impl LocalStatsManager {
|
||||
/// create new local stats manager
|
||||
pub fn new(node_id: &str, data_dir: &Path) -> Self {
|
||||
// ensure data directory exists
|
||||
if !data_dir.exists() {
|
||||
if let Err(e) = std::fs::create_dir_all(data_dir) {
|
||||
error!("create stats data directory failed {:?}: {}", data_dir, e);
|
||||
}
|
||||
if !data_dir.exists()
|
||||
&& let Err(e) = std::fs::create_dir_all(data_dir)
|
||||
{
|
||||
error!("create stats data directory failed {:?}: {}", data_dir, e);
|
||||
}
|
||||
|
||||
let stats_file = data_dir.join(format!("scanner_stats_{node_id}.json"));
|
||||
|
||||
@@ -436,10 +436,10 @@ impl NodeScanner {
|
||||
/// create a new node scanner
|
||||
pub fn new(node_id: String, config: NodeScannerConfig) -> Self {
|
||||
// Ensure data directory exists
|
||||
if !config.data_dir.exists() {
|
||||
if let Err(e) = std::fs::create_dir_all(&config.data_dir) {
|
||||
error!("create data directory failed {:?}: {}", config.data_dir, e);
|
||||
}
|
||||
if !config.data_dir.exists()
|
||||
&& let Err(e) = std::fs::create_dir_all(&config.data_dir)
|
||||
{
|
||||
error!("create data directory failed {:?}: {}", config.data_dir, e);
|
||||
}
|
||||
|
||||
let stats_manager = Arc::new(LocalStatsManager::new(&node_id, &config.data_dir));
|
||||
|
||||
@@ -327,16 +327,16 @@ impl DecentralizedStatsAggregator {
|
||||
);
|
||||
|
||||
// Check cache validity if timestamp is not initial value (UNIX_EPOCH)
|
||||
if cache_timestamp != SystemTime::UNIX_EPOCH {
|
||||
if let Ok(elapsed) = now.duration_since(cache_timestamp) {
|
||||
if elapsed < cache_ttl {
|
||||
if let Some(cached) = self.cached_stats.read().await.as_ref() {
|
||||
debug!("Returning cached aggregated stats, remaining TTL: {:?}", cache_ttl - elapsed);
|
||||
return Ok(cached.clone());
|
||||
}
|
||||
} else {
|
||||
debug!("Cache expired: elapsed={:?} >= ttl={:?}", elapsed, cache_ttl);
|
||||
if cache_timestamp != SystemTime::UNIX_EPOCH
|
||||
&& let Ok(elapsed) = now.duration_since(cache_timestamp)
|
||||
{
|
||||
if elapsed < cache_ttl {
|
||||
if let Some(cached) = self.cached_stats.read().await.as_ref() {
|
||||
debug!("Returning cached aggregated stats, remaining TTL: {:?}", cache_ttl - elapsed);
|
||||
return Ok(cached.clone());
|
||||
}
|
||||
} else {
|
||||
debug!("Cache expired: elapsed={:?} >= ttl={:?}", elapsed, cache_ttl);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -347,7 +347,8 @@ impl DecentralizedStatsAggregator {
|
||||
|
||||
// update cache
|
||||
*self.cached_stats.write().await = Some(aggregated.clone());
|
||||
*self.cache_timestamp.write().await = aggregation_timestamp;
|
||||
// Use the time when aggregation completes as cache timestamp to avoid premature expiry during long runs
|
||||
*self.cache_timestamp.write().await = SystemTime::now();
|
||||
|
||||
Ok(aggregated)
|
||||
}
|
||||
@@ -359,7 +360,8 @@ impl DecentralizedStatsAggregator {
|
||||
|
||||
// update cache
|
||||
*self.cached_stats.write().await = Some(aggregated.clone());
|
||||
*self.cache_timestamp.write().await = now;
|
||||
// Cache timestamp should reflect completion time rather than aggregation start
|
||||
*self.cache_timestamp.write().await = SystemTime::now();
|
||||
|
||||
Ok(aggregated)
|
||||
}
|
||||
|
||||
112
crates/ahm/tests/data_usage_fallback_test.rs
Normal file
112
crates/ahm/tests/data_usage_fallback_test.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![cfg(test)]
|
||||
|
||||
use rustfs_ahm::scanner::data_scanner::Scanner;
|
||||
use rustfs_common::data_usage::DataUsageInfo;
|
||||
use rustfs_ecstore::GLOBAL_Endpoints;
|
||||
use rustfs_ecstore::bucket::metadata_sys::{BucketMetadataSys, GLOBAL_BucketMetadataSys};
|
||||
use rustfs_ecstore::endpoints::EndpointServerPools;
|
||||
use rustfs_ecstore::store::ECStore;
|
||||
use rustfs_ecstore::store_api::{ObjectIO, PutObjReader, StorageAPI};
|
||||
use std::sync::{Arc, Once};
|
||||
use tempfile::TempDir;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::Level;
|
||||
|
||||
/// Build a minimal single-node ECStore over a temp directory and populate objects.
|
||||
async fn create_store_with_objects(count: usize) -> (TempDir, std::sync::Arc<ECStore>) {
|
||||
let temp_dir = TempDir::new().expect("temp dir");
|
||||
let root = temp_dir.path().to_string_lossy().to_string();
|
||||
|
||||
// Create endpoints from the temp dir
|
||||
let (endpoint_pools, _setup) = EndpointServerPools::from_volumes("127.0.0.1:0", vec![root])
|
||||
.await
|
||||
.expect("endpoint pools");
|
||||
|
||||
// Seed globals required by metadata sys if not already set
|
||||
if GLOBAL_Endpoints.get().is_none() {
|
||||
let _ = GLOBAL_Endpoints.set(endpoint_pools.clone());
|
||||
}
|
||||
|
||||
let store = ECStore::new("127.0.0.1:0".parse().unwrap(), endpoint_pools, CancellationToken::new())
|
||||
.await
|
||||
.expect("create store");
|
||||
|
||||
if rustfs_ecstore::global::new_object_layer_fn().is_none() {
|
||||
rustfs_ecstore::global::set_object_layer(store.clone()).await;
|
||||
}
|
||||
|
||||
// Initialize metadata system before bucket operations
|
||||
if GLOBAL_BucketMetadataSys.get().is_none() {
|
||||
let mut sys = BucketMetadataSys::new(store.clone());
|
||||
sys.init(Vec::new()).await;
|
||||
let _ = GLOBAL_BucketMetadataSys.set(Arc::new(RwLock::new(sys)));
|
||||
}
|
||||
|
||||
store
|
||||
.make_bucket("fallback-bucket", &rustfs_ecstore::store_api::MakeBucketOptions::default())
|
||||
.await
|
||||
.expect("make bucket");
|
||||
|
||||
for i in 0..count {
|
||||
let key = format!("obj-{i:04}");
|
||||
let data = format!("payload-{i}");
|
||||
let mut reader = PutObjReader::from_vec(data.into_bytes());
|
||||
store
|
||||
.put_object("fallback-bucket", &key, &mut reader, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
.expect("put object");
|
||||
}
|
||||
|
||||
(temp_dir, store)
|
||||
}
|
||||
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
fn init_tracing(filter_level: Level) {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.with_max_level(filter_level)
|
||||
.with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339())
|
||||
.with_thread_names(true)
|
||||
.try_init();
|
||||
});
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fallback_builds_full_counts_over_100_objects() {
|
||||
init_tracing(Level::ERROR);
|
||||
let (_tmp, store) = create_store_with_objects(1000).await;
|
||||
let scanner = Scanner::new(None, None);
|
||||
|
||||
// Directly call the fallback builder to ensure pagination works.
|
||||
let usage: DataUsageInfo = scanner.build_data_usage_from_ecstore(&store).await.expect("fallback usage");
|
||||
|
||||
let bucket = usage.buckets_usage.get("fallback-bucket").expect("bucket usage present");
|
||||
|
||||
assert!(
|
||||
usage.objects_total_count >= 1000,
|
||||
"total objects should be >=1000, got {}",
|
||||
usage.objects_total_count
|
||||
);
|
||||
assert!(
|
||||
bucket.objects_count >= 1000,
|
||||
"bucket objects should be >=1000, got {}",
|
||||
bucket.objects_count
|
||||
);
|
||||
}
|
||||
@@ -38,9 +38,13 @@ use walkdir::WalkDir;
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
fn init_tracing() {
|
||||
pub fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt::try_init();
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339())
|
||||
.with_thread_names(true)
|
||||
.try_init();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -356,7 +360,7 @@ mod serial_tests {
|
||||
|
||||
// Create heal manager with faster interval
|
||||
let cfg = HealConfig {
|
||||
heal_interval: Duration::from_secs(2),
|
||||
heal_interval: Duration::from_secs(1),
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
|
||||
|
||||
@@ -421,86 +421,86 @@ mod serial_tests {
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(lmdb_env) = GLOBAL_LMDB_ENV.get() {
|
||||
if let Some(lmdb) = GLOBAL_LMDB_DB.get() {
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
if let Some(lmdb_env) = GLOBAL_LMDB_ENV.get()
|
||||
&& let Some(lmdb) = GLOBAL_LMDB_DB.get()
|
||||
{
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
|
||||
/*if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(object_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config,
|
||||
None,
|
||||
None,
|
||||
&object_info,
|
||||
)
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&object_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
}
|
||||
}*/
|
||||
|
||||
for record in records {
|
||||
if !record.usage.has_live_object {
|
||||
continue;
|
||||
}
|
||||
|
||||
let object_info = convert_record_to_object_info(record);
|
||||
println!("object_info2: {object_info:?}");
|
||||
let mod_time = object_info.mod_time.unwrap_or(OffsetDateTime::now_utc());
|
||||
let expiry_time = rustfs_ecstore::bucket::lifecycle::lifecycle::expected_expiry_time(mod_time, 1);
|
||||
|
||||
let version_id = if let Some(version_id) = object_info.version_id {
|
||||
version_id.to_string()
|
||||
} else {
|
||||
"zzzzzzzz-zzzz-zzzz-zzzz-zzzzzzzzzzzz".to_string()
|
||||
};
|
||||
|
||||
lmdb.put(
|
||||
&mut wtxn,
|
||||
&expiry_time.unix_timestamp(),
|
||||
&LifecycleContent {
|
||||
ver_no: 0,
|
||||
ver_id: version_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionNoncurrent,
|
||||
object_name: object_info.name,
|
||||
},
|
||||
/*if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(object_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config,
|
||||
None,
|
||||
None,
|
||||
&object_info,
|
||||
)
|
||||
.unwrap();
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&object_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
}
|
||||
}*/
|
||||
|
||||
for record in records {
|
||||
if !record.usage.has_live_object {
|
||||
continue;
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
let object_info = convert_record_to_object_info(record);
|
||||
println!("object_info2: {object_info:?}");
|
||||
let mod_time = object_info.mod_time.unwrap_or(OffsetDateTime::now_utc());
|
||||
let expiry_time = rustfs_ecstore::bucket::lifecycle::lifecycle::expected_expiry_time(mod_time, 1);
|
||||
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
let iter = lmdb.iter_mut(&mut wtxn).unwrap();
|
||||
//let _ = unsafe { iter.del_current().unwrap() };
|
||||
for row in iter {
|
||||
if let Ok(ref elm) = row {
|
||||
let LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_,
|
||||
object_name,
|
||||
} = &elm.1;
|
||||
println!("cache row:{ver_no} {ver_id} {mod_time} {type_:?} {object_name}");
|
||||
}
|
||||
println!("row:{row:?}");
|
||||
}
|
||||
//drop(iter);
|
||||
wtxn.commit().unwrap();
|
||||
let version_id = if let Some(version_id) = object_info.version_id {
|
||||
version_id.to_string()
|
||||
} else {
|
||||
"zzzzzzzz-zzzz-zzzz-zzzz-zzzzzzzzzzzz".to_string()
|
||||
};
|
||||
|
||||
lmdb.put(
|
||||
&mut wtxn,
|
||||
&expiry_time.unix_timestamp(),
|
||||
&LifecycleContent {
|
||||
ver_no: 0,
|
||||
ver_id: version_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionNoncurrent,
|
||||
object_name: object_info.name,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
let iter = lmdb.iter_mut(&mut wtxn).unwrap();
|
||||
//let _ = unsafe { iter.del_current().unwrap() };
|
||||
for row in iter {
|
||||
if let Ok(ref elm) = row {
|
||||
let LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_,
|
||||
object_name,
|
||||
} = &elm.1;
|
||||
println!("cache row:{ver_no} {ver_id} {mod_time} {type_:?} {object_name}");
|
||||
}
|
||||
println!("row:{row:?}");
|
||||
}
|
||||
//drop(iter);
|
||||
wtxn.commit().unwrap();
|
||||
}
|
||||
|
||||
println!("Lifecycle cache test completed");
|
||||
|
||||
@@ -415,29 +415,28 @@ mod serial_tests {
|
||||
.await;
|
||||
println!("Pending expiry tasks: {pending}");
|
||||
|
||||
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(object_info) = ecstore
|
||||
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await
|
||||
&& let Ok(object_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config,
|
||||
None,
|
||||
None,
|
||||
&object_info,
|
||||
)
|
||||
.await;
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config,
|
||||
None,
|
||||
None,
|
||||
&object_info,
|
||||
)
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&object_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&object_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
}
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
}
|
||||
|
||||
if !expired {
|
||||
@@ -550,32 +549,31 @@ mod serial_tests {
|
||||
.await;
|
||||
println!("Pending expiry tasks: {pending}");
|
||||
|
||||
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(obj_info) = ecstore
|
||||
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await
|
||||
&& let Ok(obj_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config, None, None, &obj_info,
|
||||
)
|
||||
.await;
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config, None, None, &obj_info,
|
||||
)
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&obj_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&obj_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
deleted = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
deleted = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
|
||||
if !deleted {
|
||||
println!(
|
||||
"Object info: name={}, size={}, mod_time={:?}",
|
||||
obj_info.name, obj_info.size, obj_info.mod_time
|
||||
);
|
||||
}
|
||||
if !deleted {
|
||||
println!(
|
||||
"Object info: name={}, size={}, mod_time={:?}",
|
||||
obj_info.name, obj_info.size, obj_info.mod_time
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -29,6 +29,7 @@ categories = ["web-programming", "development-tools", "asynchronous", "api-bindi
|
||||
rustfs-targets = { workspace = true }
|
||||
rustfs-config = { workspace = true, features = ["audit", "constants"] }
|
||||
rustfs-ecstore = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
const-str = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
224
crates/audit/src/factory.rs
Normal file
224
crates/audit/src/factory.rs
Normal file
@@ -0,0 +1,224 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::AuditEntry;
|
||||
use async_trait::async_trait;
|
||||
use hashbrown::HashSet;
|
||||
use rumqttc::QoS;
|
||||
use rustfs_config::audit::{AUDIT_MQTT_KEYS, AUDIT_WEBHOOK_KEYS, ENV_AUDIT_MQTT_KEYS, ENV_AUDIT_WEBHOOK_KEYS};
|
||||
use rustfs_config::{
|
||||
AUDIT_DEFAULT_DIR, DEFAULT_LIMIT, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR,
|
||||
MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_CLIENT_CERT,
|
||||
WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_LIMIT,
|
||||
};
|
||||
use rustfs_ecstore::config::KVS;
|
||||
use rustfs_targets::{
|
||||
Target,
|
||||
error::TargetError,
|
||||
target::{mqtt::MQTTArgs, webhook::WebhookArgs},
|
||||
};
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, warn};
|
||||
use url::Url;
|
||||
|
||||
/// Trait for creating targets from configuration
|
||||
#[async_trait]
|
||||
pub trait TargetFactory: Send + Sync {
|
||||
/// Creates a target from configuration
|
||||
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError>;
|
||||
|
||||
/// Validates target configuration
|
||||
fn validate_config(&self, id: &str, config: &KVS) -> Result<(), TargetError>;
|
||||
|
||||
/// Returns a set of valid configuration field names for this target type.
|
||||
/// This is used to filter environment variables.
|
||||
fn get_valid_fields(&self) -> HashSet<String>;
|
||||
|
||||
/// Returns a set of valid configuration env field names for this target type.
|
||||
/// This is used to filter environment variables.
|
||||
fn get_valid_env_fields(&self) -> HashSet<String>;
|
||||
}
|
||||
|
||||
/// Factory for creating Webhook targets
|
||||
pub struct WebhookTargetFactory;
|
||||
|
||||
#[async_trait]
|
||||
impl TargetFactory for WebhookTargetFactory {
|
||||
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
|
||||
// All config values are now read directly from the merged `config` KVS.
|
||||
let endpoint = config
|
||||
.lookup(WEBHOOK_ENDPOINT)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
|
||||
let parsed_endpoint = endpoint.trim();
|
||||
let endpoint_url = Url::parse(parsed_endpoint)
|
||||
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{parsed_endpoint}')")))?;
|
||||
|
||||
let args = WebhookArgs {
|
||||
enable: true, // If we are here, it's already enabled.
|
||||
endpoint: endpoint_url,
|
||||
auth_token: config.lookup(WEBHOOK_AUTH_TOKEN).unwrap_or_default(),
|
||||
queue_dir: config.lookup(WEBHOOK_QUEUE_DIR).unwrap_or(AUDIT_DEFAULT_DIR.to_string()),
|
||||
queue_limit: config
|
||||
.lookup(WEBHOOK_QUEUE_LIMIT)
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(DEFAULT_LIMIT),
|
||||
client_cert: config.lookup(WEBHOOK_CLIENT_CERT).unwrap_or_default(),
|
||||
client_key: config.lookup(WEBHOOK_CLIENT_KEY).unwrap_or_default(),
|
||||
target_type: rustfs_targets::target::TargetType::AuditLog,
|
||||
};
|
||||
|
||||
let target = rustfs_targets::target::webhook::WebhookTarget::new(id, args)?;
|
||||
Ok(Box::new(target))
|
||||
}
|
||||
|
||||
fn validate_config(&self, _id: &str, config: &KVS) -> Result<(), TargetError> {
|
||||
// Validation also uses the merged `config` KVS directly.
|
||||
let endpoint = config
|
||||
.lookup(WEBHOOK_ENDPOINT)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
|
||||
debug!("endpoint: {}", endpoint);
|
||||
let parsed_endpoint = endpoint.trim();
|
||||
Url::parse(parsed_endpoint)
|
||||
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{parsed_endpoint}')")))?;
|
||||
|
||||
let client_cert = config.lookup(WEBHOOK_CLIENT_CERT).unwrap_or_default();
|
||||
let client_key = config.lookup(WEBHOOK_CLIENT_KEY).unwrap_or_default();
|
||||
|
||||
if client_cert.is_empty() != client_key.is_empty() {
|
||||
return Err(TargetError::Configuration(
|
||||
"Both client_cert and client_key must be specified together".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let queue_dir = config.lookup(WEBHOOK_QUEUE_DIR).unwrap_or(AUDIT_DEFAULT_DIR.to_string());
|
||||
if !queue_dir.is_empty() && !std::path::Path::new(&queue_dir).is_absolute() {
|
||||
return Err(TargetError::Configuration("Webhook queue directory must be an absolute path".to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_valid_fields(&self) -> HashSet<String> {
|
||||
AUDIT_WEBHOOK_KEYS.iter().map(|s| s.to_string()).collect()
|
||||
}
|
||||
|
||||
fn get_valid_env_fields(&self) -> HashSet<String> {
|
||||
ENV_AUDIT_WEBHOOK_KEYS.iter().map(|s| s.to_string()).collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Factory for creating MQTT targets
|
||||
pub struct MQTTTargetFactory;
|
||||
|
||||
#[async_trait]
|
||||
impl TargetFactory for MQTTTargetFactory {
|
||||
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
|
||||
let broker = config
|
||||
.lookup(MQTT_BROKER)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing MQTT broker".to_string()))?;
|
||||
let broker_url = Url::parse(&broker)
|
||||
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {e} (value: '{broker}')")))?;
|
||||
|
||||
let topic = config
|
||||
.lookup(MQTT_TOPIC)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing MQTT topic".to_string()))?;
|
||||
|
||||
let args = MQTTArgs {
|
||||
enable: true, // Assumed enabled.
|
||||
broker: broker_url,
|
||||
topic,
|
||||
qos: config
|
||||
.lookup(MQTT_QOS)
|
||||
.and_then(|v| v.parse::<u8>().ok())
|
||||
.map(|q| match q {
|
||||
0 => QoS::AtMostOnce,
|
||||
1 => QoS::AtLeastOnce,
|
||||
2 => QoS::ExactlyOnce,
|
||||
_ => QoS::AtLeastOnce,
|
||||
})
|
||||
.unwrap_or(QoS::AtLeastOnce),
|
||||
username: config.lookup(MQTT_USERNAME).unwrap_or_default(),
|
||||
password: config.lookup(MQTT_PASSWORD).unwrap_or_default(),
|
||||
max_reconnect_interval: config
|
||||
.lookup(MQTT_RECONNECT_INTERVAL)
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.map(Duration::from_secs)
|
||||
.unwrap_or_else(|| Duration::from_secs(5)),
|
||||
keep_alive: config
|
||||
.lookup(MQTT_KEEP_ALIVE_INTERVAL)
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.map(Duration::from_secs)
|
||||
.unwrap_or_else(|| Duration::from_secs(30)),
|
||||
queue_dir: config.lookup(MQTT_QUEUE_DIR).unwrap_or(AUDIT_DEFAULT_DIR.to_string()),
|
||||
queue_limit: config
|
||||
.lookup(MQTT_QUEUE_LIMIT)
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(DEFAULT_LIMIT),
|
||||
target_type: rustfs_targets::target::TargetType::AuditLog,
|
||||
};
|
||||
|
||||
let target = rustfs_targets::target::mqtt::MQTTTarget::new(id, args)?;
|
||||
Ok(Box::new(target))
|
||||
}
|
||||
|
||||
fn validate_config(&self, _id: &str, config: &KVS) -> Result<(), TargetError> {
|
||||
let broker = config
|
||||
.lookup(MQTT_BROKER)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing MQTT broker".to_string()))?;
|
||||
let url = Url::parse(&broker)
|
||||
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {e} (value: '{broker}')")))?;
|
||||
|
||||
match url.scheme() {
|
||||
"tcp" | "ssl" | "ws" | "wss" | "mqtt" | "mqtts" => {}
|
||||
_ => {
|
||||
return Err(TargetError::Configuration("Unsupported broker URL scheme".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
if config.lookup(MQTT_TOPIC).is_none() {
|
||||
return Err(TargetError::Configuration("Missing MQTT topic".to_string()));
|
||||
}
|
||||
|
||||
if let Some(qos_str) = config.lookup(MQTT_QOS) {
|
||||
let qos = qos_str
|
||||
.parse::<u8>()
|
||||
.map_err(|_| TargetError::Configuration("Invalid QoS value".to_string()))?;
|
||||
if qos > 2 {
|
||||
return Err(TargetError::Configuration("QoS must be 0, 1, or 2".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
let queue_dir = config.lookup(MQTT_QUEUE_DIR).unwrap_or_default();
|
||||
if !queue_dir.is_empty() {
|
||||
if !std::path::Path::new(&queue_dir).is_absolute() {
|
||||
return Err(TargetError::Configuration("MQTT queue directory must be an absolute path".to_string()));
|
||||
}
|
||||
if let Some(qos_str) = config.lookup(MQTT_QOS)
|
||||
&& qos_str == "0"
|
||||
{
|
||||
warn!("Using queue_dir with QoS 0 may result in event loss");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_valid_fields(&self) -> HashSet<String> {
|
||||
AUDIT_MQTT_KEYS.iter().map(|s| s.to_string()).collect()
|
||||
}
|
||||
|
||||
fn get_valid_env_fields(&self) -> HashSet<String> {
|
||||
ENV_AUDIT_MQTT_KEYS.iter().map(|s| s.to_string()).collect()
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,7 @@
|
||||
|
||||
pub mod entity;
|
||||
pub mod error;
|
||||
pub mod factory;
|
||||
pub mod global;
|
||||
pub mod observability;
|
||||
pub mod registry;
|
||||
|
||||
@@ -12,29 +12,27 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{AuditEntry, AuditError, AuditResult};
|
||||
use futures::{StreamExt, stream::FuturesUnordered};
|
||||
use crate::{
|
||||
AuditEntry, AuditError, AuditResult,
|
||||
factory::{MQTTTargetFactory, TargetFactory, WebhookTargetFactory},
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use futures::stream::FuturesUnordered;
|
||||
use hashbrown::{HashMap, HashSet};
|
||||
use rustfs_config::{
|
||||
DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR,
|
||||
MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_BATCH_SIZE,
|
||||
WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_HTTP_TIMEOUT, WEBHOOK_MAX_RETRY, WEBHOOK_QUEUE_DIR,
|
||||
WEBHOOK_QUEUE_LIMIT, WEBHOOK_RETRY_INTERVAL, audit::AUDIT_ROUTE_PREFIX,
|
||||
};
|
||||
use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, EnableState, audit::AUDIT_ROUTE_PREFIX};
|
||||
use rustfs_ecstore::config::{Config, KVS};
|
||||
use rustfs_targets::{
|
||||
Target, TargetError,
|
||||
target::{ChannelTargetType, TargetType, mqtt::MQTTArgs, webhook::WebhookArgs},
|
||||
};
|
||||
use rustfs_targets::arn::TargetID;
|
||||
use rustfs_targets::{Target, TargetError, target::ChannelTargetType};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, error, info, warn};
|
||||
use url::Url;
|
||||
|
||||
/// Registry for managing audit targets
|
||||
pub struct AuditRegistry {
|
||||
/// Storage for created targets
|
||||
targets: HashMap<String, Box<dyn Target<AuditEntry> + Send + Sync>>,
|
||||
/// Factories for creating targets
|
||||
factories: HashMap<String, Box<dyn TargetFactory>>,
|
||||
}
|
||||
|
||||
impl Default for AuditRegistry {
|
||||
@@ -46,162 +44,206 @@ impl Default for AuditRegistry {
|
||||
impl AuditRegistry {
|
||||
/// Creates a new AuditRegistry
|
||||
pub fn new() -> Self {
|
||||
Self { targets: HashMap::new() }
|
||||
let mut registry = AuditRegistry {
|
||||
factories: HashMap::new(),
|
||||
targets: HashMap::new(),
|
||||
};
|
||||
|
||||
// Register built-in factories
|
||||
registry.register(ChannelTargetType::Webhook.as_str(), Box::new(WebhookTargetFactory));
|
||||
registry.register(ChannelTargetType::Mqtt.as_str(), Box::new(MQTTTargetFactory));
|
||||
|
||||
registry
|
||||
}
|
||||
|
||||
/// Creates all audit targets from system configuration and environment variables.
|
||||
/// Registers a new factory for a target type
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `factory` - The factory instance to create targets of this type.
|
||||
pub fn register(&mut self, target_type: &str, factory: Box<dyn TargetFactory>) {
|
||||
self.factories.insert(target_type.to_string(), factory);
|
||||
}
|
||||
|
||||
/// Creates a target of the specified type with the given ID and configuration
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `id` - The identifier for the target instance.
|
||||
/// * `config` - The configuration key-value store for the target.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError>` - The created target or an error.
|
||||
pub async fn create_target(
|
||||
&self,
|
||||
target_type: &str,
|
||||
id: String,
|
||||
config: &KVS,
|
||||
) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
|
||||
let factory = self
|
||||
.factories
|
||||
.get(target_type)
|
||||
.ok_or_else(|| TargetError::Configuration(format!("Unknown target type: {target_type}")))?;
|
||||
|
||||
// Validate configuration before creating target
|
||||
factory.validate_config(&id, config)?;
|
||||
|
||||
// Create target
|
||||
factory.create_target(id, config).await
|
||||
}
|
||||
|
||||
/// Creates all targets from a configuration
|
||||
/// Create all notification targets from system configuration and environment variables.
|
||||
/// This method processes the creation of each target concurrently as follows:
|
||||
/// 1. Iterate through supported target types (webhook, mqtt).
|
||||
/// 2. For each type, resolve its configuration from file and environment variables.
|
||||
/// 1. Iterate through all registered target types (e.g. webhooks, mqtt).
|
||||
/// 2. For each type, resolve its configuration in the configuration file and environment variables.
|
||||
/// 3. Identify all target instance IDs that need to be created.
|
||||
/// 4. Merge configurations with precedence: ENV > file instance > file default.
|
||||
/// 5. Create async tasks for enabled instances.
|
||||
/// 6. Execute tasks concurrently and collect successful targets.
|
||||
/// 7. Persist successful configurations back to system storage.
|
||||
pub async fn create_targets_from_config(
|
||||
&mut self,
|
||||
/// 4. Combine the default configuration, file configuration, and environment variable configuration for each instance.
|
||||
/// 5. If the instance is enabled, create an asynchronous task for it to instantiate.
|
||||
/// 6. Concurrency executes all creation tasks and collects results.
|
||||
pub async fn create_audit_targets_from_config(
|
||||
&self,
|
||||
config: &Config,
|
||||
) -> AuditResult<Vec<Box<dyn Target<AuditEntry> + Send + Sync>>> {
|
||||
// Collect only environment variables with the relevant prefix to reduce memory usage
|
||||
let all_env: Vec<(String, String)> = std::env::vars().filter(|(key, _)| key.starts_with(ENV_PREFIX)).collect();
|
||||
|
||||
// A collection of asynchronous tasks for concurrently executing target creation
|
||||
let mut tasks = FuturesUnordered::new();
|
||||
// let final_config = config.clone();
|
||||
|
||||
// let final_config = config.clone(); // Clone a configuration for aggregating the final result
|
||||
// Record the defaults for each segment so that the segment can eventually be rebuilt
|
||||
let mut section_defaults: HashMap<String, KVS> = HashMap::new();
|
||||
|
||||
// Supported target types for audit
|
||||
let target_types = vec![ChannelTargetType::Webhook.as_str(), ChannelTargetType::Mqtt.as_str()];
|
||||
|
||||
// 1. Traverse all target types and process them
|
||||
for target_type in target_types {
|
||||
let span = tracing::Span::current();
|
||||
span.record("target_type", target_type);
|
||||
info!(target_type = %target_type, "Starting audit target type processing");
|
||||
// 1. Traverse all registered plants and process them by target type
|
||||
for (target_type, factory) in &self.factories {
|
||||
tracing::Span::current().record("target_type", target_type.as_str());
|
||||
info!("Start working on target types...");
|
||||
|
||||
// 2. Prepare the configuration source
|
||||
// 2.1. Get the configuration segment in the file, e.g. 'audit_webhook'
|
||||
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase();
|
||||
let file_configs = config.0.get(§ion_name).cloned().unwrap_or_default();
|
||||
// 2.2. Get the default configuration for that type
|
||||
let default_cfg = file_configs.get(DEFAULT_DELIMITER).cloned().unwrap_or_default();
|
||||
debug!(?default_cfg, "Retrieved default configuration");
|
||||
debug!(?default_cfg, "Get the default configuration");
|
||||
|
||||
// Save defaults for eventual write back
|
||||
section_defaults.insert(section_name.clone(), default_cfg.clone());
|
||||
|
||||
// Get valid fields for the target type
|
||||
let valid_fields = match target_type {
|
||||
"webhook" => get_webhook_valid_fields(),
|
||||
"mqtt" => get_mqtt_valid_fields(),
|
||||
_ => {
|
||||
warn!(target_type = %target_type, "Unknown target type, skipping");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
debug!(?valid_fields, "Retrieved valid configuration fields");
|
||||
// *** Optimization point 1: Get all legitimate fields of the current target type ***
|
||||
let valid_fields = factory.get_valid_fields();
|
||||
debug!(?valid_fields, "Get the legitimate configuration fields");
|
||||
|
||||
// 3. Resolve instance IDs and configuration overrides from environment variables
|
||||
let mut instance_ids_from_env = HashSet::new();
|
||||
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
|
||||
|
||||
for (env_key, env_value) in &all_env {
|
||||
let audit_prefix = format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}").to_uppercase();
|
||||
if !env_key.starts_with(&audit_prefix) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let suffix = &env_key[audit_prefix.len()..];
|
||||
if suffix.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Parse field and instance from suffix (FIELD_INSTANCE or FIELD)
|
||||
let (field_name, instance_id) = if let Some(last_underscore) = suffix.rfind('_') {
|
||||
let potential_field = &suffix[1..last_underscore]; // Skip leading _
|
||||
let potential_instance = &suffix[last_underscore + 1..];
|
||||
|
||||
// Check if the part before the last underscore is a valid field
|
||||
if valid_fields.contains(&potential_field.to_lowercase()) {
|
||||
(potential_field.to_lowercase(), potential_instance.to_lowercase())
|
||||
} else {
|
||||
// Treat the entire suffix as field name with default instance
|
||||
(suffix[1..].to_lowercase(), DEFAULT_DELIMITER.to_string())
|
||||
}
|
||||
} else {
|
||||
// No underscore, treat as field with default instance
|
||||
(suffix[1..].to_lowercase(), DEFAULT_DELIMITER.to_string())
|
||||
};
|
||||
|
||||
if valid_fields.contains(&field_name) {
|
||||
if instance_id != DEFAULT_DELIMITER {
|
||||
instance_ids_from_env.insert(instance_id.clone());
|
||||
}
|
||||
env_overrides
|
||||
.entry(instance_id)
|
||||
.or_default()
|
||||
.insert(field_name, env_value.clone());
|
||||
} else {
|
||||
debug!(
|
||||
env_key = %env_key,
|
||||
field_name = %field_name,
|
||||
"Ignoring environment variable field not found in valid fields for target type {}",
|
||||
target_type
|
||||
);
|
||||
// 3.1. Instance discovery: Based on the '..._ENABLE_INSTANCEID' format
|
||||
let enable_prefix =
|
||||
format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}{ENABLE_KEY}{DEFAULT_DELIMITER}")
|
||||
.to_uppercase();
|
||||
for (key, value) in &all_env {
|
||||
if EnableState::from_str(value).ok().map(|s| s.is_enabled()).unwrap_or(false)
|
||||
&& let Some(id) = key.strip_prefix(&enable_prefix)
|
||||
&& !id.is_empty()
|
||||
{
|
||||
instance_ids_from_env.insert(id.to_lowercase());
|
||||
}
|
||||
}
|
||||
debug!(?env_overrides, "Completed environment variable analysis");
|
||||
|
||||
// 3.2. Parse all relevant environment variable configurations
|
||||
// 3.2.1. Build environment variable prefixes such as 'RUSTFS_AUDIT_WEBHOOK_'
|
||||
let env_prefix = format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}").to_uppercase();
|
||||
// 3.2.2. 'env_overrides' is used to store configurations parsed from environment variables in the format: {instance id -> {field -> value}}
|
||||
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
|
||||
for (key, value) in &all_env {
|
||||
if let Some(rest) = key.strip_prefix(&env_prefix) {
|
||||
// Use rsplitn to split from the right side to properly extract the INSTANCE_ID at the end
|
||||
// Format: <FIELD_NAME>_<INSTANCE_ID> or <FIELD_NAME>
|
||||
let mut parts = rest.rsplitn(2, DEFAULT_DELIMITER);
|
||||
|
||||
// The first part from the right is INSTANCE_ID
|
||||
let instance_id_part = parts.next().unwrap_or(DEFAULT_DELIMITER);
|
||||
// The remaining part is FIELD_NAME
|
||||
let field_name_part = parts.next();
|
||||
|
||||
let (field_name, instance_id) = match field_name_part {
|
||||
// Case 1: The format is <FIELD_NAME>_<INSTANCE_ID>
|
||||
// e.g., rest = "ENDPOINT_PRIMARY" -> field_name="ENDPOINT", instance_id="PRIMARY"
|
||||
Some(field) => (field.to_lowercase(), instance_id_part.to_lowercase()),
|
||||
// Case 2: The format is <FIELD_NAME> (without INSTANCE_ID)
|
||||
// e.g., rest = "ENABLE" -> field_name="ENABLE", instance_id="" (Universal configuration `_ DEFAULT_DELIMITER`)
|
||||
None => (instance_id_part.to_lowercase(), DEFAULT_DELIMITER.to_string()),
|
||||
};
|
||||
|
||||
// *** Optimization point 2: Verify whether the parsed field_name is legal ***
|
||||
if !field_name.is_empty() && valid_fields.contains(&field_name) {
|
||||
debug!(
|
||||
instance_id = %if instance_id.is_empty() { DEFAULT_DELIMITER } else { &instance_id },
|
||||
%field_name,
|
||||
%value,
|
||||
"Parsing to environment variables"
|
||||
);
|
||||
env_overrides
|
||||
.entry(instance_id)
|
||||
.or_default()
|
||||
.insert(field_name, value.clone());
|
||||
} else {
|
||||
// Ignore illegal field names
|
||||
warn!(
|
||||
field_name = %field_name,
|
||||
"Ignore environment variable fields, not found in the list of valid fields for target type {}",
|
||||
target_type
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!(?env_overrides, "Complete the environment variable analysis");
|
||||
|
||||
// 4. Determine all instance IDs that need to be processed
|
||||
let mut all_instance_ids: HashSet<String> =
|
||||
file_configs.keys().filter(|k| *k != DEFAULT_DELIMITER).cloned().collect();
|
||||
all_instance_ids.extend(instance_ids_from_env);
|
||||
debug!(?all_instance_ids, "Determined all instance IDs");
|
||||
debug!(?all_instance_ids, "Determine all instance IDs");
|
||||
|
||||
// 5. Merge configurations and create tasks for each instance
|
||||
for id in all_instance_ids {
|
||||
// 5.1. Merge configuration, priority: Environment variables > File instance > File default
|
||||
// 5.1. Merge configuration, priority: Environment variables > File instance configuration > File default configuration
|
||||
let mut merged_config = default_cfg.clone();
|
||||
|
||||
// Apply file instance configuration if available
|
||||
// Instance-specific configuration in application files
|
||||
if let Some(file_instance_cfg) = file_configs.get(&id) {
|
||||
merged_config.extend(file_instance_cfg.clone());
|
||||
}
|
||||
|
||||
// Apply environment variable overrides
|
||||
// Application instance-specific environment variable configuration
|
||||
if let Some(env_instance_cfg) = env_overrides.get(&id) {
|
||||
// Convert HashMap<String, String> to KVS
|
||||
let mut kvs_from_env = KVS::new();
|
||||
for (k, v) in env_instance_cfg {
|
||||
kvs_from_env.insert(k.clone(), v.clone());
|
||||
}
|
||||
merged_config.extend(kvs_from_env);
|
||||
}
|
||||
debug!(instance_id = %id, ?merged_config, "Completed configuration merge");
|
||||
debug!(instance_id = %id, ?merged_config, "Complete configuration merge");
|
||||
|
||||
// 5.2. Check if the instance is enabled
|
||||
let enabled = merged_config
|
||||
.lookup(ENABLE_KEY)
|
||||
.map(|v| parse_enable_value(&v))
|
||||
.map(|v| {
|
||||
EnableState::from_str(v.as_str())
|
||||
.ok()
|
||||
.map(|s| s.is_enabled())
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.unwrap_or(false);
|
||||
|
||||
if enabled {
|
||||
info!(instance_id = %id, "Creating audit target");
|
||||
|
||||
// Create task for concurrent execution
|
||||
let target_type_clone = target_type.to_string();
|
||||
let id_clone = id.clone();
|
||||
let merged_config_arc = Arc::new(merged_config.clone());
|
||||
let task = tokio::spawn(async move {
|
||||
let result = create_audit_target(&target_type_clone, &id_clone, &merged_config_arc).await;
|
||||
(target_type_clone, id_clone, result, merged_config_arc)
|
||||
info!(instance_id = %id, "Target is enabled, ready to create a task");
|
||||
// 5.3. Create asynchronous tasks for enabled instances
|
||||
let target_type_clone = target_type.clone();
|
||||
let tid = id.clone();
|
||||
let merged_config_arc = Arc::new(merged_config);
|
||||
tasks.push(async move {
|
||||
let result = factory.create_target(tid.clone(), &merged_config_arc).await;
|
||||
(target_type_clone, tid, result, Arc::clone(&merged_config_arc))
|
||||
});
|
||||
|
||||
tasks.push(task);
|
||||
|
||||
// Update final config with successful instance
|
||||
// final_config.0.entry(section_name.clone()).or_default().insert(id, merged_config);
|
||||
} else {
|
||||
info!(instance_id = %id, "Skipping disabled audit target, will be removed from final configuration");
|
||||
info!(instance_id = %id, "Skip the disabled target and will be removed from the final configuration");
|
||||
// Remove disabled target from final configuration
|
||||
// final_config.0.entry(section_name.clone()).or_default().remove(&id);
|
||||
}
|
||||
@@ -211,30 +253,28 @@ impl AuditRegistry {
|
||||
// 6. Concurrently execute all creation tasks and collect results
|
||||
let mut successful_targets = Vec::new();
|
||||
let mut successful_configs = Vec::new();
|
||||
while let Some(task_result) = tasks.next().await {
|
||||
match task_result {
|
||||
Ok((target_type, id, result, kvs_arc)) => match result {
|
||||
Ok(target) => {
|
||||
info!(target_type = %target_type, instance_id = %id, "Created audit target successfully");
|
||||
successful_targets.push(target);
|
||||
successful_configs.push((target_type, id, kvs_arc));
|
||||
}
|
||||
Err(e) => {
|
||||
error!(target_type = %target_type, instance_id = %id, error = %e, "Failed to create audit target");
|
||||
}
|
||||
},
|
||||
while let Some((target_type, id, result, final_config)) = tasks.next().await {
|
||||
match result {
|
||||
Ok(target) => {
|
||||
info!(target_type = %target_type, instance_id = %id, "Create a target successfully");
|
||||
successful_targets.push(target);
|
||||
successful_configs.push((target_type, id, final_config));
|
||||
}
|
||||
Err(e) => {
|
||||
error!(error = %e, "Task execution failed");
|
||||
error!(target_type = %target_type, instance_id = %id, error = %e, "Failed to create a target");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rebuild in pieces based on "default items + successful instances" and overwrite writeback to ensure that deleted/disabled instances will not be "resurrected"
|
||||
// 7. Aggregate new configuration and write back to system configuration
|
||||
if !successful_configs.is_empty() || !section_defaults.is_empty() {
|
||||
info!("Prepare to rebuild and save target configurations to the system configuration...");
|
||||
info!(
|
||||
"Prepare to update {} successfully created target configurations to the system configuration...",
|
||||
successful_configs.len()
|
||||
);
|
||||
|
||||
// Aggregate successful instances into segments
|
||||
let mut successes_by_section: HashMap<String, HashMap<String, KVS>> = HashMap::new();
|
||||
|
||||
for (target_type, id, kvs) in successful_configs {
|
||||
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase();
|
||||
successes_by_section
|
||||
@@ -244,76 +284,99 @@ impl AuditRegistry {
|
||||
}
|
||||
|
||||
let mut new_config = config.clone();
|
||||
|
||||
// Collection of segments that need to be processed: Collect all segments where default items exist or where successful instances exist
|
||||
let mut sections: HashSet<String> = HashSet::new();
|
||||
sections.extend(section_defaults.keys().cloned());
|
||||
sections.extend(successes_by_section.keys().cloned());
|
||||
|
||||
for section_name in sections {
|
||||
for section in sections {
|
||||
let mut section_map: std::collections::HashMap<String, KVS> = std::collections::HashMap::new();
|
||||
|
||||
// The default entry (if present) is written back to `_`
|
||||
if let Some(default_cfg) = section_defaults.get(§ion_name) {
|
||||
if !default_cfg.is_empty() {
|
||||
section_map.insert(DEFAULT_DELIMITER.to_string(), default_cfg.clone());
|
||||
}
|
||||
// Add default item
|
||||
if let Some(default_kvs) = section_defaults.get(§ion)
|
||||
&& !default_kvs.is_empty()
|
||||
{
|
||||
section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone());
|
||||
}
|
||||
|
||||
// Successful instance write back
|
||||
if let Some(instances) = successes_by_section.get(§ion_name) {
|
||||
// Add successful instance item
|
||||
if let Some(instances) = successes_by_section.get(§ion) {
|
||||
for (id, kvs) in instances {
|
||||
section_map.insert(id.clone(), kvs.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Empty segments are removed and non-empty segments are replaced as a whole.
|
||||
// Empty breaks are removed and non-empty breaks are replaced entirely.
|
||||
if section_map.is_empty() {
|
||||
new_config.0.remove(§ion_name);
|
||||
new_config.0.remove(§ion);
|
||||
} else {
|
||||
new_config.0.insert(section_name, section_map);
|
||||
new_config.0.insert(section, section_map);
|
||||
}
|
||||
}
|
||||
|
||||
// 7. Save the new configuration to the system
|
||||
let Some(store) = rustfs_ecstore::new_object_layer_fn() else {
|
||||
let Some(store) = rustfs_ecstore::global::new_object_layer_fn() else {
|
||||
return Err(AuditError::StorageNotAvailable(
|
||||
"Failed to save target configuration: server storage not initialized".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
match rustfs_ecstore::config::com::save_server_config(store, &new_config).await {
|
||||
Ok(_) => info!("New audit configuration saved to system successfully"),
|
||||
Ok(_) => {
|
||||
info!("The new configuration was saved to the system successfully.")
|
||||
}
|
||||
Err(e) => {
|
||||
error!(error = %e, "Failed to save new audit configuration");
|
||||
error!("Failed to save the new configuration: {}", e);
|
||||
return Err(AuditError::SaveConfig(Box::new(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(count = successful_targets.len(), "All target processing completed");
|
||||
Ok(successful_targets)
|
||||
}
|
||||
|
||||
/// Adds a target to the registry
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `id` - The identifier for the target.
|
||||
/// * `target` - The target instance to be added.
|
||||
pub fn add_target(&mut self, id: String, target: Box<dyn Target<AuditEntry> + Send + Sync>) {
|
||||
self.targets.insert(id, target);
|
||||
}
|
||||
|
||||
/// Removes a target from the registry
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `id` - The identifier for the target to be removed.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<Box<dyn Target<AuditEntry> + Send + Sync>>` - The removed target if it existed.
|
||||
pub fn remove_target(&mut self, id: &str) -> Option<Box<dyn Target<AuditEntry> + Send + Sync>> {
|
||||
self.targets.remove(id)
|
||||
}
|
||||
|
||||
/// Gets a target from the registry
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `id` - The identifier for the target to be retrieved.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<&(dyn Target<AuditEntry> + Send + Sync)>` - The target if it exists.
|
||||
pub fn get_target(&self, id: &str) -> Option<&(dyn Target<AuditEntry> + Send + Sync)> {
|
||||
self.targets.get(id).map(|t| t.as_ref())
|
||||
}
|
||||
|
||||
/// Lists all target IDs
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Vec<String>` - A vector of all target IDs in the registry.
|
||||
pub fn list_targets(&self) -> Vec<String> {
|
||||
self.targets.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Closes all targets and clears the registry
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure.
|
||||
pub async fn close_all(&mut self) -> AuditResult<()> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
@@ -330,153 +393,80 @@ impl AuditRegistry {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an audit target based on type and configuration
|
||||
async fn create_audit_target(
|
||||
target_type: &str,
|
||||
id: &str,
|
||||
config: &KVS,
|
||||
) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
|
||||
match target_type {
|
||||
val if val == ChannelTargetType::Webhook.as_str() => {
|
||||
let args = parse_webhook_args(id, config)?;
|
||||
let target = rustfs_targets::target::webhook::WebhookTarget::new(id.to_string(), args)?;
|
||||
Ok(Box::new(target))
|
||||
/// Creates a unique key for a target based on its type and ID
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `target_id` - The identifier for the target instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The unique key for the target.
|
||||
pub fn create_key(&self, target_type: &str, target_id: &str) -> String {
|
||||
let key = TargetID::new(target_id.to_string(), target_type.to_string());
|
||||
info!(target_type = %target_type, "Create key for {}", key);
|
||||
key.to_string()
|
||||
}
|
||||
|
||||
/// Enables a target (placeholder, assumes target exists)
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `target_id` - The identifier for the target instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure.
|
||||
pub fn enable_target(&self, target_type: &str, target_id: &str) -> AuditResult<()> {
|
||||
let key = self.create_key(target_type, target_id);
|
||||
if self.get_target(&key).is_some() {
|
||||
info!("Target {}-{} enabled", target_type, target_id);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuditError::Configuration(
|
||||
format!("Target not found: {}-{}", target_type, target_id),
|
||||
None,
|
||||
))
|
||||
}
|
||||
val if val == ChannelTargetType::Mqtt.as_str() => {
|
||||
let args = parse_mqtt_args(id, config)?;
|
||||
let target = rustfs_targets::target::mqtt::MQTTTarget::new(id.to_string(), args)?;
|
||||
Ok(Box::new(target))
|
||||
}
|
||||
|
||||
/// Disables a target (placeholder, assumes target exists)
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `target_id` - The identifier for the target instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure.
|
||||
pub fn disable_target(&self, target_type: &str, target_id: &str) -> AuditResult<()> {
|
||||
let key = self.create_key(target_type, target_id);
|
||||
if self.get_target(&key).is_some() {
|
||||
info!("Target {}-{} disabled", target_type, target_id);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuditError::Configuration(
|
||||
format!("Target not found: {}-{}", target_type, target_id),
|
||||
None,
|
||||
))
|
||||
}
|
||||
_ => Err(TargetError::Configuration(format!("Unknown target type: {target_type}"))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets valid field names for webhook configuration
|
||||
fn get_webhook_valid_fields() -> HashSet<String> {
|
||||
vec![
|
||||
ENABLE_KEY.to_string(),
|
||||
WEBHOOK_ENDPOINT.to_string(),
|
||||
WEBHOOK_AUTH_TOKEN.to_string(),
|
||||
WEBHOOK_CLIENT_CERT.to_string(),
|
||||
WEBHOOK_CLIENT_KEY.to_string(),
|
||||
WEBHOOK_BATCH_SIZE.to_string(),
|
||||
WEBHOOK_QUEUE_LIMIT.to_string(),
|
||||
WEBHOOK_QUEUE_DIR.to_string(),
|
||||
WEBHOOK_MAX_RETRY.to_string(),
|
||||
WEBHOOK_RETRY_INTERVAL.to_string(),
|
||||
WEBHOOK_HTTP_TIMEOUT.to_string(),
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Gets valid field names for MQTT configuration
|
||||
fn get_mqtt_valid_fields() -> HashSet<String> {
|
||||
vec![
|
||||
ENABLE_KEY.to_string(),
|
||||
MQTT_BROKER.to_string(),
|
||||
MQTT_TOPIC.to_string(),
|
||||
MQTT_USERNAME.to_string(),
|
||||
MQTT_PASSWORD.to_string(),
|
||||
MQTT_QOS.to_string(),
|
||||
MQTT_KEEP_ALIVE_INTERVAL.to_string(),
|
||||
MQTT_RECONNECT_INTERVAL.to_string(),
|
||||
MQTT_QUEUE_DIR.to_string(),
|
||||
MQTT_QUEUE_LIMIT.to_string(),
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Parses webhook arguments from KVS configuration
|
||||
fn parse_webhook_args(_id: &str, config: &KVS) -> Result<WebhookArgs, TargetError> {
|
||||
let endpoint = config
|
||||
.lookup(WEBHOOK_ENDPOINT)
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| TargetError::Configuration("webhook endpoint is required".to_string()))?;
|
||||
|
||||
let endpoint_url =
|
||||
Url::parse(&endpoint).map_err(|e| TargetError::Configuration(format!("invalid webhook endpoint URL: {e}")))?;
|
||||
|
||||
let args = WebhookArgs {
|
||||
enable: true, // Already validated as enabled
|
||||
endpoint: endpoint_url,
|
||||
auth_token: config.lookup(WEBHOOK_AUTH_TOKEN).unwrap_or_default(),
|
||||
queue_dir: config.lookup(WEBHOOK_QUEUE_DIR).unwrap_or_default(),
|
||||
queue_limit: config
|
||||
.lookup(WEBHOOK_QUEUE_LIMIT)
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(100000),
|
||||
client_cert: config.lookup(WEBHOOK_CLIENT_CERT).unwrap_or_default(),
|
||||
client_key: config.lookup(WEBHOOK_CLIENT_KEY).unwrap_or_default(),
|
||||
target_type: TargetType::AuditLog,
|
||||
};
|
||||
|
||||
args.validate()?;
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
/// Parses MQTT arguments from KVS configuration
|
||||
fn parse_mqtt_args(_id: &str, config: &KVS) -> Result<MQTTArgs, TargetError> {
|
||||
let broker = config
|
||||
.lookup(MQTT_BROKER)
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| TargetError::Configuration("MQTT broker is required".to_string()))?;
|
||||
|
||||
let broker_url = Url::parse(&broker).map_err(|e| TargetError::Configuration(format!("invalid MQTT broker URL: {e}")))?;
|
||||
|
||||
let topic = config
|
||||
.lookup(MQTT_TOPIC)
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| TargetError::Configuration("MQTT topic is required".to_string()))?;
|
||||
|
||||
let qos = config
|
||||
.lookup(MQTT_QOS)
|
||||
.and_then(|s| s.parse::<u8>().ok())
|
||||
.and_then(|q| match q {
|
||||
0 => Some(rumqttc::QoS::AtMostOnce),
|
||||
1 => Some(rumqttc::QoS::AtLeastOnce),
|
||||
2 => Some(rumqttc::QoS::ExactlyOnce),
|
||||
_ => None,
|
||||
})
|
||||
.unwrap_or(rumqttc::QoS::AtLeastOnce);
|
||||
|
||||
let args = MQTTArgs {
|
||||
enable: true, // Already validated as enabled
|
||||
broker: broker_url,
|
||||
topic,
|
||||
qos,
|
||||
username: config.lookup(MQTT_USERNAME).unwrap_or_default(),
|
||||
password: config.lookup(MQTT_PASSWORD).unwrap_or_default(),
|
||||
max_reconnect_interval: parse_duration(&config.lookup(MQTT_RECONNECT_INTERVAL).unwrap_or_else(|| "5s".to_string()))
|
||||
.unwrap_or(Duration::from_secs(5)),
|
||||
keep_alive: parse_duration(&config.lookup(MQTT_KEEP_ALIVE_INTERVAL).unwrap_or_else(|| "60s".to_string()))
|
||||
.unwrap_or(Duration::from_secs(60)),
|
||||
queue_dir: config.lookup(MQTT_QUEUE_DIR).unwrap_or_default(),
|
||||
queue_limit: config.lookup(MQTT_QUEUE_LIMIT).and_then(|s| s.parse().ok()).unwrap_or(100000),
|
||||
target_type: TargetType::AuditLog,
|
||||
};
|
||||
|
||||
args.validate()?;
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
/// Parses enable value from string
|
||||
fn parse_enable_value(value: &str) -> bool {
|
||||
matches!(value.to_lowercase().as_str(), "1" | "on" | "true" | "yes")
|
||||
}
|
||||
|
||||
/// Parses duration from string (e.g., "3s", "5m")
|
||||
fn parse_duration(s: &str) -> Option<Duration> {
|
||||
if let Some(stripped) = s.strip_suffix('s') {
|
||||
stripped.parse::<u64>().ok().map(Duration::from_secs)
|
||||
} else if let Some(stripped) = s.strip_suffix('m') {
|
||||
stripped.parse::<u64>().ok().map(|m| Duration::from_secs(m * 60))
|
||||
} else if let Some(stripped) = s.strip_suffix("ms") {
|
||||
stripped.parse::<u64>().ok().map(Duration::from_millis)
|
||||
} else {
|
||||
s.parse::<u64>().ok().map(Duration::from_secs)
|
||||
}
|
||||
|
||||
/// Upserts a target into the registry
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `target_id` - The identifier for the target instance.
|
||||
/// * `target` - The target instance to be upserted.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure.
|
||||
pub fn upsert_target(
|
||||
&mut self,
|
||||
target_type: &str,
|
||||
target_id: &str,
|
||||
target: Box<dyn Target<AuditEntry> + Send + Sync>,
|
||||
) -> AuditResult<()> {
|
||||
let key = self.create_key(target_type, target_id);
|
||||
self.targets.insert(key, target);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,6 +58,12 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Starts the audit system with the given configuration
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `config` - The configuration to use for starting the audit system
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn start(&self, config: Config) -> AuditResult<()> {
|
||||
let state = self.state.write().await;
|
||||
|
||||
@@ -87,7 +93,7 @@ impl AuditSystem {
|
||||
|
||||
// Create targets from configuration
|
||||
let mut registry = self.registry.lock().await;
|
||||
match registry.create_targets_from_config(&config).await {
|
||||
match registry.create_audit_targets_from_config(&config).await {
|
||||
Ok(targets) => {
|
||||
if targets.is_empty() {
|
||||
info!("No enabled audit targets found, keeping audit system stopped");
|
||||
@@ -143,6 +149,9 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Pauses the audit system
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn pause(&self) -> AuditResult<()> {
|
||||
let mut state = self.state.write().await;
|
||||
|
||||
@@ -161,6 +170,9 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Resumes the audit system
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn resume(&self) -> AuditResult<()> {
|
||||
let mut state = self.state.write().await;
|
||||
|
||||
@@ -179,6 +191,9 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Stops the audit system and closes all targets
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn close(&self) -> AuditResult<()> {
|
||||
let mut state = self.state.write().await;
|
||||
|
||||
@@ -223,11 +238,20 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Checks if the audit system is running
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - True if running, false otherwise
|
||||
pub async fn is_running(&self) -> bool {
|
||||
matches!(*self.state.read().await, AuditSystemState::Running)
|
||||
}
|
||||
|
||||
/// Dispatches an audit log entry to all active targets
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `entry` - The audit log entry to dispatch
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn dispatch(&self, entry: Arc<AuditEntry>) -> AuditResult<()> {
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
@@ -250,9 +274,9 @@ impl AuditSystem {
|
||||
drop(state);
|
||||
|
||||
let registry = self.registry.lock().await;
|
||||
let target_ids = registry.list_targets();
|
||||
let target_keys = registry.list_targets();
|
||||
|
||||
if target_ids.is_empty() {
|
||||
if target_keys.is_empty() {
|
||||
warn!("No audit targets configured for dispatch");
|
||||
return Ok(());
|
||||
}
|
||||
@@ -260,22 +284,22 @@ impl AuditSystem {
|
||||
// Dispatch to all targets concurrently
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for target_id in target_ids {
|
||||
if let Some(target) = registry.get_target(&target_id) {
|
||||
for target_key in target_keys {
|
||||
if let Some(target) = registry.get_target(&target_key) {
|
||||
let entry_clone = Arc::clone(&entry);
|
||||
let target_id_clone = target_id.clone();
|
||||
let target_key_clone = target_key.clone();
|
||||
|
||||
// Create EntityTarget for the audit log entry
|
||||
let entity_target = EntityTarget {
|
||||
object_name: entry.api.name.clone().unwrap_or_default(),
|
||||
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
|
||||
event_name: rustfs_targets::EventName::ObjectCreatedPut, // Default, should be derived from entry
|
||||
event_name: entry.event, // Default, should be derived from entry
|
||||
data: (*entry_clone).clone(),
|
||||
};
|
||||
|
||||
let task = async move {
|
||||
let result = target.save(Arc::new(entity_target)).await;
|
||||
(target_id_clone, result)
|
||||
(target_key_clone, result)
|
||||
};
|
||||
|
||||
tasks.push(task);
|
||||
@@ -288,14 +312,14 @@ impl AuditSystem {
|
||||
let mut errors = Vec::new();
|
||||
let mut success_count = 0;
|
||||
|
||||
for (target_id, result) in results {
|
||||
for (target_key, result) in results {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
success_count += 1;
|
||||
observability::record_target_success();
|
||||
}
|
||||
Err(e) => {
|
||||
error!(target_id = %target_id, error = %e, "Failed to dispatch audit log to target");
|
||||
error!(target_id = %target_key, error = %e, "Failed to dispatch audit log to target");
|
||||
errors.push(e);
|
||||
observability::record_target_failure();
|
||||
}
|
||||
@@ -319,6 +343,13 @@ impl AuditSystem {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Dispatches a batch of audit log entries to all active targets
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `entries` - A vector of audit log entries to dispatch
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn dispatch_batch(&self, entries: Vec<Arc<AuditEntry>>) -> AuditResult<()> {
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
@@ -329,18 +360,18 @@ impl AuditSystem {
|
||||
drop(state);
|
||||
|
||||
let registry = self.registry.lock().await;
|
||||
let target_ids = registry.list_targets();
|
||||
let target_keys = registry.list_targets();
|
||||
|
||||
if target_ids.is_empty() {
|
||||
if target_keys.is_empty() {
|
||||
warn!("No audit targets configured for batch dispatch");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for target_id in target_ids {
|
||||
if let Some(target) = registry.get_target(&target_id) {
|
||||
for target_key in target_keys {
|
||||
if let Some(target) = registry.get_target(&target_key) {
|
||||
let entries_clone: Vec<_> = entries.iter().map(Arc::clone).collect();
|
||||
let target_id_clone = target_id.clone();
|
||||
let target_key_clone = target_key.clone();
|
||||
|
||||
let task = async move {
|
||||
let mut success_count = 0;
|
||||
@@ -349,7 +380,7 @@ impl AuditSystem {
|
||||
let entity_target = EntityTarget {
|
||||
object_name: entry.api.name.clone().unwrap_or_default(),
|
||||
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
|
||||
event_name: rustfs_targets::EventName::ObjectCreatedPut,
|
||||
event_name: entry.event,
|
||||
data: (*entry).clone(),
|
||||
};
|
||||
match target.save(Arc::new(entity_target)).await {
|
||||
@@ -357,7 +388,7 @@ impl AuditSystem {
|
||||
Err(e) => errors.push(e),
|
||||
}
|
||||
}
|
||||
(target_id_clone, success_count, errors)
|
||||
(target_key_clone, success_count, errors)
|
||||
};
|
||||
tasks.push(task);
|
||||
}
|
||||
@@ -386,7 +417,14 @@ impl AuditSystem {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// New: Audit flow background tasks, based on send_from_store, including retries and exponential backoffs
|
||||
/// Starts the audit stream processing for a target with batching and retry logic
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `store` - The store from which to read audit entries
|
||||
/// * `target` - The target to which audit entries will be sent
|
||||
///
|
||||
/// This function spawns a background task that continuously reads audit entries from the provided store
|
||||
/// and attempts to send them to the specified target. It implements retry logic with exponential backoff
|
||||
fn start_audit_stream_with_batching(
|
||||
&self,
|
||||
store: Box<dyn Store<EntityTarget<AuditEntry>, Error = StoreError, Key = Key> + Send>,
|
||||
@@ -462,6 +500,12 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Enables a specific target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to enable, TargetID to string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn enable_target(&self, target_id: &str) -> AuditResult<()> {
|
||||
// This would require storing enabled/disabled state per target
|
||||
// For now, just check if target exists
|
||||
@@ -475,6 +519,12 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Disables a specific target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to disable, TargetID to string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn disable_target(&self, target_id: &str) -> AuditResult<()> {
|
||||
// This would require storing enabled/disabled state per target
|
||||
// For now, just check if target exists
|
||||
@@ -488,6 +538,12 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Removes a target from the system
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to remove, TargetID to string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn remove_target(&self, target_id: &str) -> AuditResult<()> {
|
||||
let mut registry = self.registry.lock().await;
|
||||
if let Some(target) = registry.remove_target(target_id) {
|
||||
@@ -502,6 +558,13 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Updates or inserts a target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to upsert, TargetID to string
|
||||
/// * `target` - The target instance to insert or update
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn upsert_target(&self, target_id: String, target: Box<dyn Target<AuditEntry> + Send + Sync>) -> AuditResult<()> {
|
||||
let mut registry = self.registry.lock().await;
|
||||
|
||||
@@ -511,10 +574,10 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
// Remove existing target if present
|
||||
if let Some(old_target) = registry.remove_target(&target_id) {
|
||||
if let Err(e) = old_target.close().await {
|
||||
error!(target_id = %target_id, error = %e, "Failed to close old target during upsert");
|
||||
}
|
||||
if let Some(old_target) = registry.remove_target(&target_id)
|
||||
&& let Err(e) = old_target.close().await
|
||||
{
|
||||
error!(target_id = %target_id, error = %e, "Failed to close old target during upsert");
|
||||
}
|
||||
|
||||
registry.add_target(target_id.clone(), target);
|
||||
@@ -523,18 +586,33 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Lists all targets
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Vec<String>` - List of target IDs
|
||||
pub async fn list_targets(&self) -> Vec<String> {
|
||||
let registry = self.registry.lock().await;
|
||||
registry.list_targets()
|
||||
}
|
||||
|
||||
/// Gets information about a specific target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to retrieve, TargetID to string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<String>` - Target ID if found
|
||||
pub async fn get_target(&self, target_id: &str) -> Option<String> {
|
||||
let registry = self.registry.lock().await;
|
||||
registry.get_target(target_id).map(|target| target.id().to_string())
|
||||
}
|
||||
|
||||
/// Reloads configuration and updates targets
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `new_config` - The new configuration to load
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn reload_config(&self, new_config: Config) -> AuditResult<()> {
|
||||
info!("Reloading audit system configuration");
|
||||
|
||||
@@ -554,7 +632,7 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
// Create new targets from updated configuration
|
||||
match registry.create_targets_from_config(&new_config).await {
|
||||
match registry.create_audit_targets_from_config(&new_config).await {
|
||||
Ok(targets) => {
|
||||
info!(target_count = targets.len(), "Reloaded audit targets successfully");
|
||||
|
||||
@@ -594,16 +672,22 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Gets current audit system metrics
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditMetricsReport` - Current metrics report
|
||||
pub async fn get_metrics(&self) -> observability::AuditMetricsReport {
|
||||
observability::get_metrics_report().await
|
||||
}
|
||||
|
||||
/// Validates system performance against requirements
|
||||
///
|
||||
/// # Returns
|
||||
/// * `PerformanceValidation` - Performance validation results
|
||||
pub async fn validate_performance(&self) -> observability::PerformanceValidation {
|
||||
observability::validate_performance().await
|
||||
}
|
||||
|
||||
/// Resets all metrics
|
||||
/// Resets all metrics to initial state
|
||||
pub async fn reset_metrics(&self) {
|
||||
observability::reset_metrics().await;
|
||||
}
|
||||
|
||||
@@ -43,11 +43,11 @@ async fn test_config_parsing_webhook() {
|
||||
audit_webhook_section.insert("_".to_string(), default_kvs);
|
||||
config.0.insert("audit_webhook".to_string(), audit_webhook_section);
|
||||
|
||||
let mut registry = AuditRegistry::new();
|
||||
let registry = AuditRegistry::new();
|
||||
|
||||
// This should not fail even if server storage is not initialized
|
||||
// as it's an integration test
|
||||
let result = registry.create_targets_from_config(&config).await;
|
||||
let result = registry.create_audit_targets_from_config(&config).await;
|
||||
|
||||
// We expect this to fail due to server storage not being initialized
|
||||
// but the parsing should work correctly
|
||||
|
||||
@@ -44,7 +44,7 @@ async fn test_audit_system_startup_performance() {
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_target_creation() {
|
||||
// Test that multiple targets can be created concurrently
|
||||
let mut registry = AuditRegistry::new();
|
||||
let registry = AuditRegistry::new();
|
||||
|
||||
// Create config with multiple webhook instances
|
||||
let mut config = rustfs_ecstore::config::Config(std::collections::HashMap::new());
|
||||
@@ -63,7 +63,7 @@ async fn test_concurrent_target_creation() {
|
||||
let start = Instant::now();
|
||||
|
||||
// This will fail due to server storage not being initialized, but we can measure timing
|
||||
let result = registry.create_targets_from_config(&config).await;
|
||||
let result = registry.create_audit_targets_from_config(&config).await;
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
println!("Concurrent target creation took: {elapsed:?}");
|
||||
|
||||
@@ -135,7 +135,7 @@ async fn test_global_audit_functions() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_config_parsing_with_multiple_instances() {
|
||||
let mut registry = AuditRegistry::new();
|
||||
let registry = AuditRegistry::new();
|
||||
|
||||
// Create config with multiple webhook instances
|
||||
let mut config = Config(HashMap::new());
|
||||
@@ -164,7 +164,7 @@ async fn test_config_parsing_with_multiple_instances() {
|
||||
config.0.insert("audit_webhook".to_string(), webhook_section);
|
||||
|
||||
// Try to create targets from config
|
||||
let result = registry.create_targets_from_config(&config).await;
|
||||
let result = registry.create_audit_targets_from_config(&config).await;
|
||||
|
||||
// Should fail due to server storage not initialized, but parsing should work
|
||||
match result {
|
||||
|
||||
@@ -39,3 +39,4 @@ path-clean = { workspace = true }
|
||||
rmp-serde = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
s3s = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -605,13 +605,12 @@ impl DataUsageCache {
|
||||
|
||||
pub fn search_parent(&self, hash: &DataUsageHash) -> Option<DataUsageHash> {
|
||||
let want = hash.key();
|
||||
if let Some(last_index) = want.rfind('/') {
|
||||
if let Some(v) = self.find(&want[0..last_index]) {
|
||||
if v.children.contains(&want) {
|
||||
let found = hash_path(&want[0..last_index]);
|
||||
return Some(found);
|
||||
}
|
||||
}
|
||||
if let Some(last_index) = want.rfind('/')
|
||||
&& let Some(v) = self.find(&want[0..last_index])
|
||||
&& v.children.contains(&want)
|
||||
{
|
||||
let found = hash_path(&want[0..last_index]);
|
||||
return Some(found);
|
||||
}
|
||||
|
||||
for (k, v) in self.cache.iter() {
|
||||
@@ -1150,10 +1149,10 @@ impl DataUsageInfo {
|
||||
self.buckets_count = self.buckets_usage.len() as u64;
|
||||
|
||||
// Update last update time
|
||||
if let Some(other_update) = other.last_update {
|
||||
if self.last_update.is_none() || other_update > self.last_update.unwrap() {
|
||||
self.last_update = Some(other_update);
|
||||
}
|
||||
if let Some(other_update) = other.last_update
|
||||
&& (self.last_update.is_none() || other_update > self.last_update.unwrap())
|
||||
{
|
||||
self.last_update = Some(other_update);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,17 +14,111 @@
|
||||
|
||||
#![allow(non_upper_case_globals)] // FIXME
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
pub static GLOBAL_Local_Node_Name: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Host: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Port: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
|
||||
pub static GLOBAL_Rustfs_Addr: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Conn_Map: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
pub static GLOBAL_LOCAL_NODE_NAME: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_RUSTFS_HOST: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_RUSTFS_PORT: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
|
||||
pub static GLOBAL_RUSTFS_ADDR: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_CONN_MAP: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
pub static GLOBAL_ROOT_CERT: LazyLock<RwLock<Option<Vec<u8>>>> = LazyLock::new(|| RwLock::new(None));
|
||||
pub static GLOBAL_MTLS_IDENTITY: LazyLock<RwLock<Option<MtlsIdentityPem>>> = LazyLock::new(|| RwLock::new(None));
|
||||
/// Global initialization time of the RustFS node.
|
||||
pub static GLOBAL_INIT_TIME: LazyLock<RwLock<Option<DateTime<Utc>>>> = LazyLock::new(|| RwLock::new(None));
|
||||
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_Rustfs_Addr.write().await = addr.to_string();
|
||||
/// Set the global local node name.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `name` - A string slice representing the local node name.
|
||||
pub async fn set_global_local_node_name(name: &str) {
|
||||
*GLOBAL_LOCAL_NODE_NAME.write().await = name.to_string();
|
||||
}
|
||||
|
||||
/// Set the global RustFS initialization time to the current UTC time.
|
||||
pub async fn set_global_init_time_now() {
|
||||
let now = Utc::now();
|
||||
*GLOBAL_INIT_TIME.write().await = Some(now);
|
||||
}
|
||||
|
||||
/// Get the global RustFS initialization time.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<DateTime<Utc>>` - The initialization time if set.
|
||||
pub async fn get_global_init_time() -> Option<DateTime<Utc>> {
|
||||
*GLOBAL_INIT_TIME.read().await
|
||||
}
|
||||
|
||||
/// Set the global RustFS address used for gRPC connections.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `addr` - A string slice representing the RustFS address (e.g., "https://node1:9000").
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_RUSTFS_ADDR.write().await = addr.to_string();
|
||||
}
|
||||
|
||||
/// Set the global root CA certificate for outbound gRPC clients.
|
||||
/// This certificate is used to validate server TLS certificates.
|
||||
/// When set to None, clients use the system default root CAs.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `cert` - A vector of bytes representing the PEM-encoded root CA certificate.
|
||||
pub async fn set_global_root_cert(cert: Vec<u8>) {
|
||||
*GLOBAL_ROOT_CERT.write().await = Some(cert);
|
||||
}
|
||||
|
||||
/// Set the global mTLS identity (cert+key PEM) for outbound gRPC clients.
|
||||
/// When set, clients will present this identity to servers requesting/requiring mTLS.
|
||||
/// When None, clients proceed with standard server-authenticated TLS.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `identity` - An optional MtlsIdentityPem struct containing the cert and key PEM.
|
||||
pub async fn set_global_mtls_identity(identity: Option<MtlsIdentityPem>) {
|
||||
*GLOBAL_MTLS_IDENTITY.write().await = identity;
|
||||
}
|
||||
|
||||
/// Evict a stale/dead connection from the global connection cache.
|
||||
/// This is critical for cluster recovery when a node dies unexpectedly (e.g., power-off).
|
||||
/// By removing the cached connection, subsequent requests will establish a fresh connection.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `addr` - The address of the connection to evict.
|
||||
pub async fn evict_connection(addr: &str) {
|
||||
let removed = GLOBAL_CONN_MAP.write().await.remove(addr);
|
||||
if removed.is_some() {
|
||||
tracing::warn!("Evicted stale connection from cache: {}", addr);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a connection exists in the cache for the given address.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `addr` - The address to check.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - True if a cached connection exists, false otherwise.
|
||||
pub async fn has_cached_connection(addr: &str) -> bool {
|
||||
GLOBAL_CONN_MAP.read().await.contains_key(addr)
|
||||
}
|
||||
|
||||
/// Clear all cached connections. Useful for full cluster reset/recovery.
|
||||
pub async fn clear_all_connections() {
|
||||
let mut map = GLOBAL_CONN_MAP.write().await;
|
||||
let count = map.len();
|
||||
map.clear();
|
||||
if count > 0 {
|
||||
tracing::warn!("Cleared {} cached connections from global map", count);
|
||||
}
|
||||
}
|
||||
/// Optional client identity (cert+key PEM) for outbound mTLS.
|
||||
///
|
||||
/// When present, gRPC clients will present this identity to servers requesting/requiring mTLS.
|
||||
/// When absent, clients proceed with standard server-authenticated TLS.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MtlsIdentityPem {
|
||||
pub cert_pem: Vec<u8>,
|
||||
pub key_pem: Vec<u8>,
|
||||
}
|
||||
|
||||
@@ -125,7 +125,7 @@ impl<'de> Deserialize<'de> for HealScanMode {
|
||||
0 => Ok(HealScanMode::Unknown),
|
||||
1 => Ok(HealScanMode::Normal),
|
||||
2 => Ok(HealScanMode::Deep),
|
||||
_ => Err(E::custom(format!("invalid HealScanMode value: {}", value))),
|
||||
_ => Err(E::custom(format!("invalid HealScanMode value: {value}"))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@ impl<'de> Deserialize<'de> for HealScanMode {
|
||||
E: serde::de::Error,
|
||||
{
|
||||
if value > u8::MAX as u64 {
|
||||
return Err(E::custom(format!("HealScanMode value too large: {}", value)));
|
||||
return Err(E::custom(format!("HealScanMode value too large: {value}")));
|
||||
}
|
||||
self.visit_u8(value as u8)
|
||||
}
|
||||
@@ -144,7 +144,7 @@ impl<'de> Deserialize<'de> for HealScanMode {
|
||||
E: serde::de::Error,
|
||||
{
|
||||
if value < 0 || value > u8::MAX as i64 {
|
||||
return Err(E::custom(format!("invalid HealScanMode value: {}", value)));
|
||||
return Err(E::custom(format!("invalid HealScanMode value: {value}")));
|
||||
}
|
||||
self.visit_u8(value as u8)
|
||||
}
|
||||
@@ -162,7 +162,7 @@ impl<'de> Deserialize<'de> for HealScanMode {
|
||||
"Unknown" | "unknown" => Ok(HealScanMode::Unknown),
|
||||
"Normal" | "normal" => Ok(HealScanMode::Normal),
|
||||
"Deep" | "deep" => Ok(HealScanMode::Deep),
|
||||
_ => Err(E::custom(format!("invalid HealScanMode string: {}", value))),
|
||||
_ => Err(E::custom(format!("invalid HealScanMode string: {value}"))),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -403,10 +403,10 @@ fn lc_get_prefix(rule: &LifecycleRule) -> String {
|
||||
} else if let Some(filter) = &rule.filter {
|
||||
if let Some(p) = &filter.prefix {
|
||||
return p.to_string();
|
||||
} else if let Some(and) = &filter.and {
|
||||
if let Some(p) = &and.prefix {
|
||||
return p.to_string();
|
||||
}
|
||||
} else if let Some(and) = &filter.and
|
||||
&& let Some(p) = &and.prefix
|
||||
{
|
||||
return p.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -475,21 +475,19 @@ pub fn rep_has_active_rules(config: &ReplicationConfiguration, prefix: &str, rec
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if !prefix.is_empty() {
|
||||
if let Some(filter) = &rule.filter {
|
||||
if let Some(r_prefix) = &filter.prefix {
|
||||
if !r_prefix.is_empty() {
|
||||
// incoming prefix must be in rule prefix
|
||||
if !recursive && !prefix.starts_with(r_prefix) {
|
||||
continue;
|
||||
}
|
||||
// If recursive, we can skip this rule if it doesn't match the tested prefix or level below prefix
|
||||
// does not match
|
||||
if recursive && !r_prefix.starts_with(prefix) && !prefix.starts_with(r_prefix) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if !prefix.is_empty()
|
||||
&& let Some(filter) = &rule.filter
|
||||
&& let Some(r_prefix) = &filter.prefix
|
||||
&& !r_prefix.is_empty()
|
||||
{
|
||||
// incoming prefix must be in rule prefix
|
||||
if !recursive && !prefix.starts_with(r_prefix) {
|
||||
continue;
|
||||
}
|
||||
// If recursive, we can skip this rule if it doesn't match the tested prefix or level below prefix
|
||||
// does not match
|
||||
if recursive && !r_prefix.starts_with(prefix) && !prefix.starts_with(r_prefix) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
||||
@@ -19,6 +19,10 @@ pub mod globals;
|
||||
pub mod heal_channel;
|
||||
pub mod last_minute;
|
||||
pub mod metrics;
|
||||
mod readiness;
|
||||
|
||||
pub use globals::*;
|
||||
pub use readiness::{GlobalReadiness, SystemStage};
|
||||
|
||||
// is ','
|
||||
pub static DEFAULT_DELIMITER: u8 = 44;
|
||||
|
||||
@@ -18,6 +18,7 @@ use rustfs_madmin::metrics::ScannerMetrics as M_ScannerMetrics;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fmt::Display,
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
sync::{
|
||||
Arc, OnceLock,
|
||||
@@ -115,7 +116,7 @@ pub enum Metric {
|
||||
|
||||
impl Metric {
|
||||
/// Convert to string representation for metrics
|
||||
pub fn as_str(self) -> &'static str {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::ReadMetadata => "read_metadata",
|
||||
Self::CheckMissing => "check_missing",
|
||||
@@ -460,27 +461,32 @@ impl Metrics {
|
||||
metrics.current_started = cycle.started;
|
||||
}
|
||||
|
||||
// Replace default start time with global init time if it's the placeholder
|
||||
if let Some(init_time) = crate::get_global_init_time().await {
|
||||
metrics.current_started = init_time;
|
||||
}
|
||||
|
||||
metrics.collected_at = Utc::now();
|
||||
metrics.active_paths = self.get_current_paths().await;
|
||||
|
||||
// Lifetime operations
|
||||
for i in 0..Metric::Last as usize {
|
||||
let count = self.operations[i].load(Ordering::Relaxed);
|
||||
if count > 0 {
|
||||
if let Some(metric) = Metric::from_index(i) {
|
||||
metrics.life_time_ops.insert(metric.as_str().to_string(), count);
|
||||
}
|
||||
if count > 0
|
||||
&& let Some(metric) = Metric::from_index(i)
|
||||
{
|
||||
metrics.life_time_ops.insert(metric.as_str().to_string(), count);
|
||||
}
|
||||
}
|
||||
|
||||
// Last minute statistics for realtime metrics
|
||||
for i in 0..Metric::LastRealtime as usize {
|
||||
let last_min = self.latency[i].total().await;
|
||||
if last_min.n > 0 {
|
||||
if let Some(_metric) = Metric::from_index(i) {
|
||||
// Convert to madmin TimedAction format if needed
|
||||
// This would require implementing the conversion
|
||||
}
|
||||
if last_min.n > 0
|
||||
&& let Some(_metric) = Metric::from_index(i)
|
||||
{
|
||||
// Convert to madmin TimedAction format if needed
|
||||
// This would require implementing the conversion
|
||||
}
|
||||
}
|
||||
|
||||
@@ -489,8 +495,8 @@ impl Metrics {
|
||||
}
|
||||
|
||||
// Type aliases for compatibility with existing code
|
||||
pub type UpdateCurrentPathFn = Arc<dyn Fn(&str) -> Pin<Box<dyn std::future::Future<Output = ()> + Send>> + Send + Sync>;
|
||||
pub type CloseDiskFn = Arc<dyn Fn() -> Pin<Box<dyn std::future::Future<Output = ()> + Send>> + Send + Sync>;
|
||||
pub type UpdateCurrentPathFn = Arc<dyn Fn(&str) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync>;
|
||||
pub type CloseDiskFn = Arc<dyn Fn() -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync>;
|
||||
|
||||
/// Create a current path updater for tracking scan progress
|
||||
pub fn current_path_updater(disk: &str, initial: &str) -> (UpdateCurrentPathFn, CloseDiskFn) {
|
||||
@@ -506,7 +512,7 @@ pub fn current_path_updater(disk: &str, initial: &str) -> (UpdateCurrentPathFn,
|
||||
|
||||
let update_fn = {
|
||||
let tracker = Arc::clone(&tracker);
|
||||
Arc::new(move |path: &str| -> Pin<Box<dyn std::future::Future<Output = ()> + Send>> {
|
||||
Arc::new(move |path: &str| -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
||||
let tracker = Arc::clone(&tracker);
|
||||
let path = path.to_string();
|
||||
Box::pin(async move {
|
||||
@@ -517,7 +523,7 @@ pub fn current_path_updater(disk: &str, initial: &str) -> (UpdateCurrentPathFn,
|
||||
|
||||
let done_fn = {
|
||||
let disk_name = disk_name.clone();
|
||||
Arc::new(move || -> Pin<Box<dyn std::future::Future<Output = ()> + Send>> {
|
||||
Arc::new(move || -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
||||
let disk_name = disk_name.clone();
|
||||
Box::pin(async move {
|
||||
global_metrics().current_paths.write().await.remove(&disk_name);
|
||||
|
||||
136
crates/common/src/readiness.rs
Normal file
136
crates/common/src/readiness.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicU8, Ordering};
|
||||
|
||||
/// Represents the various stages of system startup
|
||||
#[repr(u8)]
|
||||
pub enum SystemStage {
|
||||
Booting = 0,
|
||||
StorageReady = 1, // Disks online, Quorum met
|
||||
IamReady = 2, // Users and Policies loaded into cache
|
||||
FullReady = 3, // System ready to serve all traffic
|
||||
}
|
||||
|
||||
/// Global readiness tracker for the service
|
||||
/// This struct uses atomic operations to track the readiness status of various components
|
||||
/// of the service in a thread-safe manner.
|
||||
pub struct GlobalReadiness {
|
||||
status: AtomicU8,
|
||||
}
|
||||
|
||||
impl Default for GlobalReadiness {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl GlobalReadiness {
|
||||
/// Create a new GlobalReadiness instance with initial status as Starting
|
||||
/// # Returns
|
||||
/// A new instance of GlobalReadiness
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
status: AtomicU8::new(SystemStage::Booting as u8),
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the system to a new stage
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `step` - The SystemStage step to mark as ready
|
||||
pub fn mark_stage(&self, step: SystemStage) {
|
||||
self.status.fetch_max(step as u8, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
/// Check if the service is fully ready
|
||||
/// # Returns
|
||||
/// `true` if the service is fully ready, `false` otherwise
|
||||
pub fn is_ready(&self) -> bool {
|
||||
self.status.load(Ordering::SeqCst) == SystemStage::FullReady as u8
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
#[test]
|
||||
fn test_initial_state() {
|
||||
let readiness = GlobalReadiness::new();
|
||||
assert!(!readiness.is_ready());
|
||||
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::Booting as u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mark_stage_progression() {
|
||||
let readiness = GlobalReadiness::new();
|
||||
readiness.mark_stage(SystemStage::StorageReady);
|
||||
assert!(!readiness.is_ready());
|
||||
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::StorageReady as u8);
|
||||
|
||||
readiness.mark_stage(SystemStage::IamReady);
|
||||
assert!(!readiness.is_ready());
|
||||
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::IamReady as u8);
|
||||
|
||||
readiness.mark_stage(SystemStage::FullReady);
|
||||
assert!(readiness.is_ready());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_regression() {
|
||||
let readiness = GlobalReadiness::new();
|
||||
readiness.mark_stage(SystemStage::FullReady);
|
||||
readiness.mark_stage(SystemStage::IamReady); // Should not regress
|
||||
assert!(readiness.is_ready());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_marking() {
|
||||
let readiness = Arc::new(GlobalReadiness::new());
|
||||
let mut handles = vec![];
|
||||
|
||||
for _ in 0..10 {
|
||||
let r = Arc::clone(&readiness);
|
||||
handles.push(thread::spawn(move || {
|
||||
r.mark_stage(SystemStage::StorageReady);
|
||||
r.mark_stage(SystemStage::IamReady);
|
||||
r.mark_stage(SystemStage::FullReady);
|
||||
}));
|
||||
}
|
||||
|
||||
for h in handles {
|
||||
h.join().unwrap();
|
||||
}
|
||||
|
||||
assert!(readiness.is_ready());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_ready_only_at_full_ready() {
|
||||
let readiness = GlobalReadiness::new();
|
||||
assert!(!readiness.is_ready());
|
||||
|
||||
readiness.mark_stage(SystemStage::StorageReady);
|
||||
assert!(!readiness.is_ready());
|
||||
|
||||
readiness.mark_stage(SystemStage::IamReady);
|
||||
assert!(!readiness.is_ready());
|
||||
|
||||
readiness.mark_stage(SystemStage::FullReady);
|
||||
assert!(readiness.is_ready());
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -29,7 +29,7 @@ pub const AUDIT_PREFIX: &str = "audit";
|
||||
pub const AUDIT_ROUTE_PREFIX: &str = const_str::concat!(AUDIT_PREFIX, DEFAULT_DELIMITER);
|
||||
|
||||
pub const AUDIT_WEBHOOK_SUB_SYS: &str = "audit_webhook";
|
||||
pub const AUDIT_MQTT_SUB_SYS: &str = "mqtt_webhook";
|
||||
pub const AUDIT_MQTT_SUB_SYS: &str = "audit_mqtt";
|
||||
|
||||
pub const AUDIT_STORE_EXTENSION: &str = ".audit";
|
||||
#[allow(dead_code)]
|
||||
|
||||
@@ -49,21 +49,6 @@ pub const SERVICE_VERSION: &str = "1.0.0";
|
||||
/// Default value: production
|
||||
pub const ENVIRONMENT: &str = "production";
|
||||
|
||||
/// Default Access Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_ACCESS_KEY
|
||||
/// Command line argument: --access-key
|
||||
/// Example: RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
/// Example: --access-key rustfsadmin
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
/// Default Secret Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_SECRET_KEY
|
||||
/// Command line argument: --secret-key
|
||||
/// Example: RUSTFS_SECRET_KEY=rustfsadmin
|
||||
/// Example: --secret-key rustfsadmin
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Default console enable
|
||||
/// This is the default value for the console server.
|
||||
/// It is used to enable or disable the console server.
|
||||
@@ -89,6 +74,30 @@ pub const RUSTFS_TLS_KEY: &str = "rustfs_key.pem";
|
||||
/// This is the default cert for TLS.
|
||||
pub const RUSTFS_TLS_CERT: &str = "rustfs_cert.pem";
|
||||
|
||||
/// Default public certificate filename for rustfs
|
||||
/// This is the default public certificate filename for rustfs.
|
||||
/// It is used to store the public certificate of the application.
|
||||
/// Default value: public.crt
|
||||
pub const RUSTFS_PUBLIC_CERT: &str = "public.crt";
|
||||
|
||||
/// Default CA certificate filename for rustfs
|
||||
/// This is the default CA certificate filename for rustfs.
|
||||
/// It is used to store the CA certificate of the application.
|
||||
/// Default value: ca.crt
|
||||
pub const RUSTFS_CA_CERT: &str = "ca.crt";
|
||||
|
||||
/// Default HTTP prefix for rustfs
|
||||
/// This is the default HTTP prefix for rustfs.
|
||||
/// It is used to identify HTTP URLs.
|
||||
/// Default value: http://
|
||||
pub const RUSTFS_HTTP_PREFIX: &str = "http://";
|
||||
|
||||
/// Default HTTPS prefix for rustfs
|
||||
/// This is the default HTTPS prefix for rustfs.
|
||||
/// It is used to identify HTTPS URLs.
|
||||
/// Default value: https://
|
||||
pub const RUSTFS_HTTPS_PREFIX: &str = "https://";
|
||||
|
||||
/// Default port for rustfs
|
||||
/// This is the default port for rustfs.
|
||||
/// This is used to bind the server to a specific port.
|
||||
@@ -201,20 +210,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_constants() {
|
||||
// Test security related constants
|
||||
assert_eq!(DEFAULT_ACCESS_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
|
||||
assert_eq!(DEFAULT_SECRET_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// In production environment, access key and secret key should be different
|
||||
// These are default values, so being the same is acceptable, but should be warned in documentation
|
||||
println!("Warning: Default access key and secret key are the same. Change them in production!");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_path_constants() {
|
||||
assert_eq!(RUSTFS_TLS_KEY, "rustfs_key.pem");
|
||||
@@ -276,8 +271,6 @@ mod tests {
|
||||
DEFAULT_LOG_LEVEL,
|
||||
SERVICE_VERSION,
|
||||
ENVIRONMENT,
|
||||
DEFAULT_ACCESS_KEY,
|
||||
DEFAULT_SECRET_KEY,
|
||||
RUSTFS_TLS_KEY,
|
||||
RUSTFS_TLS_CERT,
|
||||
DEFAULT_ADDRESS,
|
||||
@@ -307,29 +300,6 @@ mod tests {
|
||||
assert_ne!(DEFAULT_CONSOLE_PORT, 0, "Console port should not be zero");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_best_practices() {
|
||||
// Test security best practices
|
||||
|
||||
// These are default values, should be changed in production environments
|
||||
println!("Security Warning: Default credentials detected!");
|
||||
println!("Access Key: {DEFAULT_ACCESS_KEY}");
|
||||
println!("Secret Key: {DEFAULT_SECRET_KEY}");
|
||||
println!("These should be changed in production environments!");
|
||||
|
||||
// Verify that key lengths meet minimum security requirements
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// Check if default credentials contain common insecure patterns
|
||||
let _insecure_patterns = ["admin", "password", "123456", "default"];
|
||||
let _access_key_lower = DEFAULT_ACCESS_KEY.to_lowercase();
|
||||
let _secret_key_lower = DEFAULT_SECRET_KEY.to_lowercase();
|
||||
|
||||
// Note: More security check logic can be added here
|
||||
// For example, check if keys contain insecure patterns
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_configuration_consistency() {
|
||||
// Test configuration consistency
|
||||
|
||||
56
crates/config/src/constants/body_limits.rs
Normal file
56
crates/config/src/constants/body_limits.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Request body size limits for admin API endpoints
|
||||
//!
|
||||
//! These limits prevent DoS attacks through unbounded memory allocation
|
||||
//! while allowing legitimate use cases.
|
||||
|
||||
/// Maximum size for standard admin API request bodies (1 MB)
|
||||
/// Used for: user creation/update, policies, tier config, KMS config, events, groups, service accounts
|
||||
/// Rationale: Admin API payloads are typically JSON/XML configs under 100KB.
|
||||
/// AWS IAM policy limit is 6KB-10KB. 1MB provides generous headroom.
|
||||
pub const MAX_ADMIN_REQUEST_BODY_SIZE: usize = 1024 * 1024; // 1 MB
|
||||
|
||||
/// Maximum size for IAM import/export operations (10 MB)
|
||||
/// Used for: IAM entity imports/exports containing multiple users, policies, groups
|
||||
/// Rationale: ZIP archives with hundreds of IAM entities. 10MB allows ~10,000 small configs.
|
||||
pub const MAX_IAM_IMPORT_SIZE: usize = 10 * 1024 * 1024; // 10 MB
|
||||
|
||||
/// Maximum size for bucket metadata import operations (100 MB)
|
||||
/// Used for: Bucket metadata import containing configurations for many buckets
|
||||
/// Rationale: Large deployments may have thousands of buckets with various configs.
|
||||
/// 100MB allows importing metadata for ~10,000 buckets with reasonable configs.
|
||||
pub const MAX_BUCKET_METADATA_IMPORT_SIZE: usize = 100 * 1024 * 1024; // 100 MB
|
||||
|
||||
/// Maximum size for healing operation requests (1 MB)
|
||||
/// Used for: Healing parameters and configuration
|
||||
/// Rationale: Healing requests contain bucket/object paths and options. Should be small.
|
||||
pub const MAX_HEAL_REQUEST_SIZE: usize = 1024 * 1024; // 1 MB
|
||||
|
||||
/// Maximum size for S3 client response bodies (10 MB)
|
||||
/// Used for: Reading responses from remote S3-compatible services (ACL, attributes, lists)
|
||||
/// Rationale: Responses from external services should be bounded.
|
||||
/// Large responses (>10MB) indicate misconfiguration or potential attack.
|
||||
/// Typical responses: ACL XML < 10KB, List responses < 1MB
|
||||
///
|
||||
/// Rationale: Responses from external S3-compatible services should be bounded.
|
||||
/// - ACL XML responses: typically < 10KB
|
||||
/// - Object attributes: typically < 100KB
|
||||
/// - List responses: typically < 1MB (1000 objects with metadata)
|
||||
/// - Location/error responses: typically < 10KB
|
||||
///
|
||||
/// 10MB provides generous headroom for legitimate responses while preventing
|
||||
/// memory exhaustion from malicious or misconfigured remote services.
|
||||
pub const MAX_S3_CLIENT_RESPONSE_SIZE: usize = 10 * 1024 * 1024; // 10 MB
|
||||
61
crates/config/src/constants/compress.rs
Normal file
61
crates/config/src/constants/compress.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! HTTP Response Compression Configuration
|
||||
//!
|
||||
//! This module provides configuration options for HTTP response compression.
|
||||
//! By default, compression is disabled (aligned with MinIO behavior).
|
||||
//! When enabled via `RUSTFS_COMPRESS_ENABLE=on`, compression can be configured
|
||||
//! to apply only to specific file extensions, MIME types, and minimum file sizes.
|
||||
|
||||
/// Environment variable to enable/disable HTTP response compression
|
||||
/// Default: off (disabled)
|
||||
/// Values: on, off, true, false, yes, no, 1, 0
|
||||
/// Example: RUSTFS_COMPRESS_ENABLE=on
|
||||
pub const ENV_COMPRESS_ENABLE: &str = "RUSTFS_COMPRESS_ENABLE";
|
||||
|
||||
/// Default compression enable state
|
||||
/// Aligned with MinIO behavior - compression is disabled by default
|
||||
pub const DEFAULT_COMPRESS_ENABLE: bool = false;
|
||||
|
||||
/// Environment variable for file extensions that should be compressed
|
||||
/// Comma-separated list of file extensions (with or without leading dot)
|
||||
/// Default: "" (empty, meaning use MIME type matching only)
|
||||
/// Example: RUSTFS_COMPRESS_EXTENSIONS=.txt,.log,.csv,.json,.xml,.html,.css,.js
|
||||
pub const ENV_COMPRESS_EXTENSIONS: &str = "RUSTFS_COMPRESS_EXTENSIONS";
|
||||
|
||||
/// Default file extensions for compression
|
||||
/// Empty by default - relies on MIME type matching
|
||||
pub const DEFAULT_COMPRESS_EXTENSIONS: &str = "";
|
||||
|
||||
/// Environment variable for MIME types that should be compressed
|
||||
/// Comma-separated list of MIME types, supports wildcard (*) for subtypes
|
||||
/// Default: "text/*,application/json,application/xml,application/javascript"
|
||||
/// Example: RUSTFS_COMPRESS_MIME_TYPES=text/*,application/json,application/xml
|
||||
pub const ENV_COMPRESS_MIME_TYPES: &str = "RUSTFS_COMPRESS_MIME_TYPES";
|
||||
|
||||
/// Default MIME types for compression
|
||||
/// Includes common text-based content types that benefit from compression
|
||||
pub const DEFAULT_COMPRESS_MIME_TYPES: &str = "text/*,application/json,application/xml,application/javascript";
|
||||
|
||||
/// Environment variable for minimum file size to apply compression
|
||||
/// Files smaller than this size will not be compressed
|
||||
/// Default: 1000 (bytes)
|
||||
/// Example: RUSTFS_COMPRESS_MIN_SIZE=1000
|
||||
pub const ENV_COMPRESS_MIN_SIZE: &str = "RUSTFS_COMPRESS_MIN_SIZE";
|
||||
|
||||
/// Default minimum file size for compression (in bytes)
|
||||
/// Files smaller than 1000 bytes typically don't benefit from compression
|
||||
/// and the compression overhead may outweigh the benefits
|
||||
pub const DEFAULT_COMPRESS_MIN_SIZE: u64 = 1000;
|
||||
@@ -16,7 +16,8 @@ pub const DEFAULT_DELIMITER: &str = "_";
|
||||
pub const ENV_PREFIX: &str = "RUSTFS_";
|
||||
pub const ENV_WORD_DELIMITER: &str = "_";
|
||||
|
||||
pub const DEFAULT_DIR: &str = "/opt/rustfs/events"; // Default directory for event store
|
||||
pub const EVENT_DEFAULT_DIR: &str = "/opt/rustfs/events"; // Default directory for event store
|
||||
pub const AUDIT_DEFAULT_DIR: &str = "/opt/rustfs/audit"; // Default directory for audit store
|
||||
pub const DEFAULT_LIMIT: u64 = 100000; // Default store limit
|
||||
|
||||
/// Standard config keys and values.
|
||||
|
||||
@@ -13,11 +13,14 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod app;
|
||||
pub(crate) mod body_limits;
|
||||
pub(crate) mod compress;
|
||||
pub(crate) mod console;
|
||||
pub(crate) mod env;
|
||||
pub(crate) mod heal;
|
||||
pub(crate) mod object;
|
||||
pub(crate) mod profiler;
|
||||
pub(crate) mod protocols;
|
||||
pub(crate) mod runtime;
|
||||
pub(crate) mod targets;
|
||||
pub(crate) mod tls;
|
||||
|
||||
40
crates/config/src/constants/protocols.rs
Normal file
40
crates/config/src/constants/protocols.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Protocol server configuration constants
|
||||
|
||||
/// Default FTPS server bind address
|
||||
pub const DEFAULT_FTPS_ADDRESS: &str = "0.0.0.0:8021";
|
||||
|
||||
/// Default SFTP server bind address
|
||||
pub const DEFAULT_SFTP_ADDRESS: &str = "0.0.0.0:8022";
|
||||
|
||||
/// Default FTPS passive ports range (optional)
|
||||
pub const DEFAULT_FTPS_PASSIVE_PORTS: Option<&str> = None;
|
||||
|
||||
/// Default FTPS external IP (auto-detected)
|
||||
pub const DEFAULT_FTPS_EXTERNAL_IP: Option<&str> = None;
|
||||
|
||||
/// Environment variable names
|
||||
pub const ENV_FTPS_ENABLE: &str = "RUSTFS_FTPS_ENABLE";
|
||||
pub const ENV_FTPS_ADDRESS: &str = "RUSTFS_FTPS_ADDRESS";
|
||||
pub const ENV_FTPS_CERTS_FILE: &str = "RUSTFS_FTPS_CERTS_FILE";
|
||||
pub const ENV_FTPS_KEY_FILE: &str = "RUSTFS_FTPS_KEY_FILE";
|
||||
pub const ENV_FTPS_PASSIVE_PORTS: &str = "RUSTFS_FTPS_PASSIVE_PORTS";
|
||||
pub const ENV_FTPS_EXTERNAL_IP: &str = "RUSTFS_FTPS_EXTERNAL_IP";
|
||||
|
||||
pub const ENV_SFTP_ENABLE: &str = "RUSTFS_SFTP_ENABLE";
|
||||
pub const ENV_SFTP_ADDRESS: &str = "RUSTFS_SFTP_ADDRESS";
|
||||
pub const ENV_SFTP_HOST_KEY: &str = "RUSTFS_SFTP_HOST_KEY";
|
||||
pub const ENV_SFTP_AUTHORIZED_KEYS: &str = "RUSTFS_SFTP_AUTHORIZED_KEYS";
|
||||
@@ -39,3 +39,10 @@ pub const DEFAULT_MAX_IO_EVENTS_PER_TICK: usize = 1024;
|
||||
/// Event polling default (Tokio default 61)
|
||||
pub const DEFAULT_EVENT_INTERVAL: u32 = 61;
|
||||
pub const DEFAULT_RNG_SEED: Option<u64> = None; // None means random
|
||||
|
||||
/// Threshold for small object seek support in megabytes.
|
||||
///
|
||||
/// When an object is smaller than this size, rustfs will provide seek support.
|
||||
///
|
||||
/// Default is set to 10MB.
|
||||
pub const DEFAULT_OBJECT_SEEK_SUPPORT_THRESHOLD: usize = 10 * 1024 * 1024;
|
||||
|
||||
@@ -12,4 +12,75 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// TLS related environment variable names and default values
|
||||
/// Environment variable to enable TLS key logging
|
||||
/// When set to "1", RustFS will log TLS keys to the specified file for debugging purposes.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_TLS_KEYLOG=1
|
||||
pub const ENV_TLS_KEYLOG: &str = "RUSTFS_TLS_KEYLOG";
|
||||
|
||||
/// Default value for TLS key logging
|
||||
/// By default, RustFS does not log TLS keys.
|
||||
/// To change this behavior, set the environment variable RUSTFS_TLS_KEYLOG=1
|
||||
pub const DEFAULT_TLS_KEYLOG: bool = false;
|
||||
|
||||
/// Environment variable to trust system CA certificates
|
||||
/// When set to "1", RustFS will trust system CA certificates in addition to any
|
||||
/// custom CA certificates provided in the configuration.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_TRUST_SYSTEM_CA=1
|
||||
pub const ENV_TRUST_SYSTEM_CA: &str = "RUSTFS_TRUST_SYSTEM_CA";
|
||||
|
||||
/// Default value for trusting system CA certificates
|
||||
/// By default, RustFS does not trust system CA certificates.
|
||||
/// To change this behavior, set the environment variable RUSTFS_TRUST_SYSTEM_CA=1
|
||||
pub const DEFAULT_TRUST_SYSTEM_CA: bool = false;
|
||||
|
||||
/// Environment variable to trust leaf certificates as CA
|
||||
/// When set to "1", RustFS will treat leaf certificates as CA certificates for trust validation.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_TRUST_LEAF_CERT_AS_CA=1
|
||||
pub const ENV_TRUST_LEAF_CERT_AS_CA: &str = "RUSTFS_TRUST_LEAF_CERT_AS_CA";
|
||||
|
||||
/// Default value for trusting leaf certificates as CA
|
||||
/// By default, RustFS does not trust leaf certificates as CA.
|
||||
/// To change this behavior, set the environment variable RUSTFS_TRUST_LEAF_CERT_AS_CA=1
|
||||
pub const DEFAULT_TRUST_LEAF_CERT_AS_CA: bool = false;
|
||||
|
||||
/// Default filename for client CA certificate
|
||||
/// client_ca.crt (CA bundle for verifying client certificates in server mTLS)
|
||||
pub const RUSTFS_CLIENT_CA_CERT_FILENAME: &str = "client_ca.crt";
|
||||
|
||||
/// Environment variable for client certificate file path
|
||||
/// RUSTFS_MTLS_CLIENT_CERT
|
||||
/// Specifies the file path to the client certificate used for mTLS authentication.
|
||||
/// If not set, RustFS will look for the default filename "client_cert.pem" in the current directory.
|
||||
/// To set, use the environment variable RUSTFS_MTLS_CLIENT_CERT=/path/to/client_cert.pem
|
||||
pub const ENV_MTLS_CLIENT_CERT: &str = "RUSTFS_MTLS_CLIENT_CERT";
|
||||
|
||||
/// Default filename for client certificate
|
||||
/// client_cert.pem
|
||||
pub const RUSTFS_CLIENT_CERT_FILENAME: &str = "client_cert.pem";
|
||||
|
||||
/// Environment variable for client private key file path
|
||||
/// RUSTFS_MTLS_CLIENT_KEY
|
||||
/// Specifies the file path to the client private key used for mTLS authentication.
|
||||
/// If not set, RustFS will look for the default filename "client_key.pem" in the current directory.
|
||||
/// To set, use the environment variable RUSTFS_MTLS_CLIENT_KEY=/path/to/client_key.pem
|
||||
pub const ENV_MTLS_CLIENT_KEY: &str = "RUSTFS_MTLS_CLIENT_KEY";
|
||||
|
||||
/// Default filename for client private key
|
||||
/// client_key.pem
|
||||
pub const RUSTFS_CLIENT_KEY_FILENAME: &str = "client_key.pem";
|
||||
|
||||
/// RUSTFS_SERVER_MTLS_ENABLE
|
||||
/// Environment variable to enable server mTLS
|
||||
/// When set to "1", RustFS server will require client certificates for authentication.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_SERVER_MTLS_ENABLE=1
|
||||
pub const ENV_SERVER_MTLS_ENABLE: &str = "RUSTFS_SERVER_MTLS_ENABLE";
|
||||
|
||||
/// Default value for enabling server mTLS
|
||||
/// By default, RustFS server mTLS is disabled.
|
||||
/// To change this behavior, set the environment variable RUSTFS_SERVER_MTLS_ENABLE=1
|
||||
pub const DEFAULT_SERVER_MTLS_ENABLE: bool = false;
|
||||
|
||||
@@ -17,6 +17,10 @@ pub mod constants;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::app::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::body_limits::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::compress::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::console::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::env::*;
|
||||
@@ -27,6 +31,8 @@ pub use constants::object::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::profiler::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::protocols::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::runtime::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::targets::*;
|
||||
|
||||
@@ -24,13 +24,45 @@ pub use webhook::*;
|
||||
|
||||
use crate::DEFAULT_DELIMITER;
|
||||
|
||||
// --- Configuration Constants ---
|
||||
/// Default target identifier for notifications,
|
||||
/// Used in notification system when no specific target is provided,
|
||||
/// Represents the default target stream or endpoint for notifications when no specific target is provided.
|
||||
pub const DEFAULT_TARGET: &str = "1";
|
||||
|
||||
/// Notification prefix for routing and identification,
|
||||
/// Used in notification system,
|
||||
/// This prefix is utilized in constructing routes and identifiers related to notifications within the system.
|
||||
pub const NOTIFY_PREFIX: &str = "notify";
|
||||
|
||||
/// Notification route prefix combining the notification prefix and default delimiter
|
||||
/// Combines the notification prefix with the default delimiter
|
||||
/// Used in notification system for defining routes related to notifications.
|
||||
/// Example: "notify:/"
|
||||
pub const NOTIFY_ROUTE_PREFIX: &str = const_str::concat!(NOTIFY_PREFIX, DEFAULT_DELIMITER);
|
||||
|
||||
/// Name of the environment variable that configures target stream concurrency.
|
||||
/// Controls how many target streams are processed in parallel by the notification system.
|
||||
/// Defaults to [`DEFAULT_NOTIFY_TARGET_STREAM_CONCURRENCY`] if not set.
|
||||
/// Example: `RUSTFS_NOTIFY_TARGET_STREAM_CONCURRENCY=20`.
|
||||
pub const ENV_NOTIFY_TARGET_STREAM_CONCURRENCY: &str = "RUSTFS_NOTIFY_TARGET_STREAM_CONCURRENCY";
|
||||
|
||||
/// Default concurrency for target stream processing in the notification system
|
||||
/// This value is used if the environment variable `RUSTFS_NOTIFY_TARGET_STREAM_CONCURRENCY` is not set.
|
||||
/// It defines how many target streams can be processed in parallel by the notification system at any given time.
|
||||
/// Adjust this value based on your system's capabilities and expected load.
|
||||
pub const DEFAULT_NOTIFY_TARGET_STREAM_CONCURRENCY: usize = 20;
|
||||
|
||||
/// Name of the environment variable that configures send concurrency.
|
||||
/// Controls how many send operations are processed in parallel by the notification system.
|
||||
/// Defaults to [`DEFAULT_NOTIFY_SEND_CONCURRENCY`] if not set.
|
||||
/// Example: `RUSTFS_NOTIFY_SEND_CONCURRENCY=64`.
|
||||
pub const ENV_NOTIFY_SEND_CONCURRENCY: &str = "RUSTFS_NOTIFY_SEND_CONCURRENCY";
|
||||
|
||||
/// Default concurrency for send operations in the notification system
|
||||
/// This value is used if the environment variable `RUSTFS_NOTIFY_SEND_CONCURRENCY` is not set.
|
||||
/// It defines how many send operations can be processed in parallel by the notification system at any given time.
|
||||
/// Adjust this value based on your system's capabilities and expected load.
|
||||
pub const DEFAULT_NOTIFY_SEND_CONCURRENCY: usize = 64;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_SUB_SYSTEMS: &[&str] = &[NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS];
|
||||
|
||||
|
||||
@@ -15,5 +15,5 @@
|
||||
pub const DEFAULT_EXT: &str = ".unknown"; // Default file extension
|
||||
pub const COMPRESS_EXT: &str = ".snappy"; // Extension for compressed files
|
||||
|
||||
/// STORE_EXTENSION - file extension of an event file in store
|
||||
pub const STORE_EXTENSION: &str = ".event";
|
||||
/// NOTIFY_STORE_EXTENSION - file extension of an event file in store
|
||||
pub const NOTIFY_STORE_EXTENSION: &str = ".event";
|
||||
|
||||
21
crates/credentials/Cargo.toml
Normal file
21
crates/credentials/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "rustfs-credentials"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Credentials management utilities for RustFS, enabling secure handling of authentication and authorization data."
|
||||
keywords = ["rustfs", "Minio", "credentials", "authentication", "authorization"]
|
||||
categories = ["web-programming", "development-tools", "data-structures", "security"]
|
||||
|
||||
[dependencies]
|
||||
base64-simd = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json.workspace = true
|
||||
time = { workspace = true, features = ["serde-human-readable"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
44
crates/credentials/README.md
Normal file
44
crates/credentials/README.md
Normal file
@@ -0,0 +1,44 @@
|
||||
[](https://rustfs.com)
|
||||
|
||||
# RustFS Credentials - Credential Management Module
|
||||
|
||||
<p align="center">
|
||||
<strong>A module for managing credentials within the RustFS distributed object storage system.</strong>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
This module provides a secure and efficient way to handle various types of credentials,
|
||||
such as API keys, access tokens, and cryptographic keys, required for interacting with
|
||||
the RustFS ecosystem and external services.
|
||||
|
||||
## 📖 Overview
|
||||
|
||||
**RustFS Credentials** is a module dedicated to managing credentials for the [RustFS](https://rustfs.com) distributed
|
||||
object storage system. For the complete RustFS experience,
|
||||
please visit the [main RustFS repository](https://github.com/rustfs/rustfs)
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- Secure storage and retrieval of credentials
|
||||
- Support for multiple credential types (API keys, tokens, etc.)
|
||||
- Encryption of sensitive credential data
|
||||
- Integration with external secret management systems
|
||||
- Easy-to-use API for credential management
|
||||
- Credential rotation and expiration handling
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
For comprehensive documentation, examples, and usage guides, please visit the
|
||||
main [RustFS repository](https://github.com/rustfs/rustfs).
|
||||
|
||||
## 📄 License
|
||||
|
||||
This project is licensed under the Apache License 2.0 - see the [LICENSE](../../LICENSE) file for details.
|
||||
94
crates/credentials/src/constants.rs
Normal file
94
crates/credentials/src/constants.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Default Access Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_ACCESS_KEY
|
||||
/// Command line argument: --access-key
|
||||
/// Example: RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
/// Example: --access-key rustfsadmin
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
/// Default Secret Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_SECRET_KEY
|
||||
/// Command line argument: --secret-key
|
||||
/// Example: RUSTFS_SECRET_KEY=rustfsadmin
|
||||
/// Example: --secret-key rustfsadmin
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Environment variable for RPC authentication token
|
||||
/// Used to set the authentication token for RPC communication
|
||||
/// Example: RUSTFS_RPC_SECRET=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_RPC_SECRET: &str = "RUSTFS_RPC_SECRET";
|
||||
|
||||
/// IAM Policy Types
|
||||
/// Used to differentiate between embedded and inherited policies
|
||||
/// Example: "embedded-policy" or "inherited-policy"
|
||||
/// Default value: "embedded-policy"
|
||||
pub const EMBEDDED_POLICY_TYPE: &str = "embedded-policy";
|
||||
|
||||
/// IAM Policy Types
|
||||
/// Used to differentiate between embedded and inherited policies
|
||||
/// Example: "embedded-policy" or "inherited-policy"
|
||||
/// Default value: "inherited-policy"
|
||||
pub const INHERITED_POLICY_TYPE: &str = "inherited-policy";
|
||||
|
||||
/// IAM Policy Claim Name for Service Account
|
||||
/// Used to identify the service account policy claim in JWT tokens
|
||||
/// Example: "sa-policy"
|
||||
/// Default value: "sa-policy"
|
||||
pub const IAM_POLICY_CLAIM_NAME_SA: &str = "sa-policy";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_security_constants() {
|
||||
// Test security related constants
|
||||
assert_eq!(DEFAULT_ACCESS_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
|
||||
assert_eq!(DEFAULT_SECRET_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// In production environment, access key and secret key should be different
|
||||
// These are default values, so being the same is acceptable, but should be warned in documentation
|
||||
println!("Warning: Default access key and secret key are the same. Change them in production!");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_best_practices() {
|
||||
// Test security best practices
|
||||
|
||||
// These are default values, should be changed in production environments
|
||||
println!("Security Warning: Default credentials detected!");
|
||||
println!("Access Key: {DEFAULT_ACCESS_KEY}");
|
||||
println!("Secret Key: {DEFAULT_SECRET_KEY}");
|
||||
println!("These should be changed in production environments!");
|
||||
|
||||
// Verify that key lengths meet minimum security requirements
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// Check if default credentials contain common insecure patterns
|
||||
let _insecure_patterns = ["admin", "password", "123456", "default"];
|
||||
let _access_key_lower = DEFAULT_ACCESS_KEY.to_lowercase();
|
||||
let _secret_key_lower = DEFAULT_SECRET_KEY.to_lowercase();
|
||||
|
||||
// Note: More security check logic can be added here
|
||||
// For example, check if keys contain insecure patterns
|
||||
}
|
||||
}
|
||||
386
crates/credentials/src/credentials.rs
Normal file
386
crates/credentials/src/credentials.rs
Normal file
@@ -0,0 +1,386 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{DEFAULT_SECRET_KEY, ENV_RPC_SECRET, IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use rand::{Rng, RngCore};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::io::Error;
|
||||
use std::sync::OnceLock;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
/// Global active credentials
|
||||
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
|
||||
|
||||
/// Global RPC authentication token
|
||||
pub static GLOBAL_RUSTFS_RPC_SECRET: OnceLock<String> = OnceLock::new();
|
||||
|
||||
/// Initialize the global action credentials
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `ak` - Optional access key
|
||||
/// * `sk` - Optional secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<(), Box<Credentials>>` - Ok if successful, Err with existing credentials if already initialized
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if automatic credential generation fails when `ak` or `sk`
|
||||
/// are `None`, for example if the random number generator fails while calling
|
||||
/// `gen_access_key` or `gen_secret_key`.
|
||||
pub fn init_global_action_credentials(ak: Option<String>, sk: Option<String>) -> Result<(), Box<Credentials>> {
|
||||
let ak = ak.unwrap_or_else(|| gen_access_key(20).expect("Failed to generate access key"));
|
||||
let sk = sk.unwrap_or_else(|| gen_secret_key(32).expect("Failed to generate secret key"));
|
||||
|
||||
let cred = Credentials {
|
||||
access_key: ak,
|
||||
secret_key: sk,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
GLOBAL_ACTIVE_CRED.set(cred).map_err(|e| {
|
||||
Box::new(Credentials {
|
||||
access_key: e.access_key.clone(),
|
||||
..Default::default()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the global action credentials
|
||||
pub fn get_global_action_cred() -> Option<Credentials> {
|
||||
GLOBAL_ACTIVE_CRED.get().cloned()
|
||||
}
|
||||
|
||||
/// Get the global secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<String>` - The global secret key, if set
|
||||
///
|
||||
pub fn get_global_secret_key_opt() -> Option<String> {
|
||||
GLOBAL_ACTIVE_CRED.get().map(|cred| cred.secret_key.clone())
|
||||
}
|
||||
|
||||
/// Get the global secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The global secret key, or empty string if not set
|
||||
///
|
||||
pub fn get_global_secret_key() -> String {
|
||||
GLOBAL_ACTIVE_CRED
|
||||
.get()
|
||||
.map(|cred| cred.secret_key.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Get the global access key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<String>` - The global access key, if set
|
||||
///
|
||||
pub fn get_global_access_key_opt() -> Option<String> {
|
||||
GLOBAL_ACTIVE_CRED.get().map(|cred| cred.access_key.clone())
|
||||
}
|
||||
|
||||
/// Get the global access key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The global access key, or empty string if not set
|
||||
///
|
||||
pub fn get_global_access_key() -> String {
|
||||
GLOBAL_ACTIVE_CRED
|
||||
.get()
|
||||
.map(|cred| cred.access_key.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Generates a random access key of the specified length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `length` - The length of the access key to generate
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<String>` - A result containing the generated access key or an error if the length is too short
|
||||
///
|
||||
/// # Errors
|
||||
/// This function will return an error if the specified length is less than 3.
|
||||
///
|
||||
/// Examples
|
||||
/// ```no_run
|
||||
/// use rustfs_credentials::gen_access_key;
|
||||
///
|
||||
/// let access_key = gen_access_key(16).unwrap();
|
||||
/// println!("Generated access key: {}", access_key);
|
||||
/// ```
|
||||
///
|
||||
pub fn gen_access_key(length: usize) -> std::io::Result<String> {
|
||||
const ALPHA_NUMERIC_TABLE: [char; 36] = [
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
|
||||
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
|
||||
];
|
||||
|
||||
if length < 3 {
|
||||
return Err(Error::other("access key length is too short"));
|
||||
}
|
||||
|
||||
let mut result = String::with_capacity(length);
|
||||
let mut rng = rand::rng();
|
||||
|
||||
for _ in 0..length {
|
||||
result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Generates a random secret key of the specified length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `length` - The length of the secret key to generate
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<String>` - A result containing the generated secret key or an error if the length is too short
|
||||
///
|
||||
/// # Errors
|
||||
/// This function will return an error if the specified length is less than 8.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```no_run
|
||||
/// use rustfs_credentials::gen_secret_key;
|
||||
///
|
||||
/// let secret_key = gen_secret_key(32).unwrap();
|
||||
/// println!("Generated secret key: {}", secret_key);
|
||||
/// ```
|
||||
///
|
||||
pub fn gen_secret_key(length: usize) -> std::io::Result<String> {
|
||||
use base64_simd::URL_SAFE_NO_PAD;
|
||||
|
||||
if length < 8 {
|
||||
return Err(Error::other("secret key length is too short"));
|
||||
}
|
||||
let mut rng = rand::rng();
|
||||
|
||||
let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)];
|
||||
rng.fill_bytes(&mut key);
|
||||
|
||||
let encoded = URL_SAFE_NO_PAD.encode_to_string(&key);
|
||||
let key_str = encoded.replace("/", "+");
|
||||
|
||||
Ok(key_str)
|
||||
}
|
||||
|
||||
/// Get the RPC authentication token from environment variable
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The RPC authentication token
|
||||
///
|
||||
pub fn get_rpc_token() -> String {
|
||||
GLOBAL_RUSTFS_RPC_SECRET
|
||||
.get_or_init(|| {
|
||||
env::var(ENV_RPC_SECRET)
|
||||
.unwrap_or_else(|_| get_global_secret_key_opt().unwrap_or_else(|| DEFAULT_SECRET_KEY.to_string()))
|
||||
})
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// Credentials structure
|
||||
///
|
||||
/// Fields:
|
||||
/// - access_key: Access key string
|
||||
/// - secret_key: Secret key string
|
||||
/// - session_token: Session token string
|
||||
/// - expiration: Optional expiration time as OffsetDateTime
|
||||
/// - status: Status string (e.g., "active", "off")
|
||||
/// - parent_user: Parent user string
|
||||
/// - groups: Optional list of groups
|
||||
/// - claims: Optional map of claims
|
||||
/// - name: Optional name string
|
||||
/// - description: Optional description string
|
||||
///
|
||||
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
|
||||
pub struct Credentials {
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
pub session_token: String,
|
||||
pub expiration: Option<OffsetDateTime>,
|
||||
pub status: String,
|
||||
pub parent_user: String,
|
||||
pub groups: Option<Vec<String>>,
|
||||
pub claims: Option<HashMap<String, Value>>,
|
||||
pub name: Option<String>,
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
impl Credentials {
|
||||
pub fn is_expired(&self) -> bool {
|
||||
if self.expiration.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.expiration
|
||||
.as_ref()
|
||||
.map(|e| OffsetDateTime::now_utc() > *e)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
pub fn is_temp(&self) -> bool {
|
||||
!self.session_token.is_empty() && !self.is_expired()
|
||||
}
|
||||
|
||||
pub fn is_service_account(&self) -> bool {
|
||||
self.claims
|
||||
.as_ref()
|
||||
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|_| !self.parent_user.is_empty()))
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn is_implied_policy(&self) -> bool {
|
||||
if self.is_service_account() {
|
||||
return self
|
||||
.claims
|
||||
.as_ref()
|
||||
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|v| v == INHERITED_POLICY_TYPE))
|
||||
.unwrap_or_default();
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if self.status == "off" {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.access_key.len() >= 3 && self.secret_key.len() >= 8 && !self.is_expired()
|
||||
}
|
||||
|
||||
pub fn is_owner(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_expired() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_expired());
|
||||
|
||||
cred.expiration = Some(OffsetDateTime::now_utc() + Duration::hours(1));
|
||||
assert!(!cred.is_expired());
|
||||
|
||||
cred.expiration = Some(OffsetDateTime::now_utc() - Duration::hours(1));
|
||||
assert!(cred.is_expired());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_temp() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_temp());
|
||||
|
||||
cred.session_token = "token".to_string();
|
||||
assert!(cred.is_temp());
|
||||
|
||||
cred.expiration = Some(OffsetDateTime::now_utc() - Duration::hours(1));
|
||||
assert!(!cred.is_temp());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_service_account() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_service_account());
|
||||
|
||||
let mut claims = HashMap::new();
|
||||
claims.insert(IAM_POLICY_CLAIM_NAME_SA.to_string(), Value::String("policy".to_string()));
|
||||
cred.claims = Some(claims);
|
||||
cred.parent_user = "parent".to_string();
|
||||
assert!(cred.is_service_account());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_implied_policy() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_implied_policy());
|
||||
|
||||
let mut claims = HashMap::new();
|
||||
claims.insert(IAM_POLICY_CLAIM_NAME_SA.to_string(), Value::String(INHERITED_POLICY_TYPE.to_string()));
|
||||
cred.claims = Some(claims);
|
||||
cred.parent_user = "parent".to_string();
|
||||
assert!(cred.is_implied_policy());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_valid() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_valid());
|
||||
|
||||
cred.access_key = "abc".to_string();
|
||||
cred.secret_key = "12345678".to_string();
|
||||
assert!(cred.is_valid());
|
||||
|
||||
cred.status = "off".to_string();
|
||||
assert!(!cred.is_valid());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_owner() {
|
||||
let cred = Credentials::default();
|
||||
assert!(!cred.is_owner());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_global_credentials_flow() {
|
||||
// Since OnceLock can only be set once, we put together all globally related tests
|
||||
// If it has already been initialized (possibly from other tests), we verify the results directly
|
||||
if get_global_action_cred().is_none() {
|
||||
// Verify that the initial state is empty
|
||||
assert!(get_global_access_key_opt().is_none());
|
||||
assert_eq!(get_global_access_key(), "");
|
||||
assert!(get_global_secret_key_opt().is_none());
|
||||
assert_eq!(get_global_secret_key(), "");
|
||||
|
||||
// Initialize
|
||||
let test_ak = "test_access_key".to_string();
|
||||
let test_sk = "test_secret_key_123456".to_string();
|
||||
init_global_action_credentials(Some(test_ak.clone()), Some(test_sk.clone())).ok();
|
||||
}
|
||||
|
||||
// Verify the state after initialization
|
||||
let cred = get_global_action_cred().expect("Global credentials should be set");
|
||||
assert!(!cred.access_key.is_empty());
|
||||
assert!(!cred.secret_key.is_empty());
|
||||
|
||||
assert!(get_global_access_key_opt().is_some());
|
||||
assert!(!get_global_access_key().is_empty());
|
||||
assert!(get_global_secret_key_opt().is_some());
|
||||
assert!(!get_global_secret_key().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_init_global_credentials_auto_gen() {
|
||||
// If it hasn't already been initialized, the test automatically generates logic
|
||||
if get_global_action_cred().is_none() {
|
||||
init_global_action_credentials(None, None).ok();
|
||||
let ak = get_global_access_key();
|
||||
let sk = get_global_secret_key();
|
||||
assert_eq!(ak.len(), 20);
|
||||
assert_eq!(sk.len(), 32);
|
||||
}
|
||||
}
|
||||
}
|
||||
19
crates/credentials/src/lib.rs
Normal file
19
crates/credentials/src/lib.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod constants;
|
||||
mod credentials;
|
||||
|
||||
pub use constants::*;
|
||||
pub use credentials::*;
|
||||
@@ -30,7 +30,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
aes-gcm = { workspace = true, optional = true }
|
||||
argon2 = { workspace = true, features = ["std"], optional = true }
|
||||
argon2 = { workspace = true, optional = true }
|
||||
cfg-if = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true, optional = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -25,6 +25,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustfs-ecstore.workspace = true
|
||||
rustfs-common.workspace = true
|
||||
flatbuffers.workspace = true
|
||||
futures.workspace = true
|
||||
rustfs-lock.workspace = true
|
||||
@@ -49,4 +50,9 @@ uuid = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
md5 = { workspace = true }
|
||||
md5 = { workspace = true }
|
||||
suppaftp.workspace = true
|
||||
rcgen.workspace = true
|
||||
anyhow.workspace = true
|
||||
rustls.workspace = true
|
||||
rustls-pemfile.workspace = true
|
||||
|
||||
@@ -34,8 +34,8 @@ use tracing::{error, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
// Common constants for all E2E tests
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "minioadmin";
|
||||
pub const DEFAULT_SECRET_KEY: &str = "minioadmin";
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
pub const TEST_BUCKET: &str = "e2e-test-bucket";
|
||||
pub fn workspace_root() -> PathBuf {
|
||||
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
@@ -165,7 +165,7 @@ impl RustFSTestEnvironment {
|
||||
}
|
||||
|
||||
/// Find an available port for the test
|
||||
async fn find_available_port() -> Result<u16, Box<dyn std::error::Error + Send + Sync>> {
|
||||
pub async fn find_available_port() -> Result<u16, Box<dyn std::error::Error + Send + Sync>> {
|
||||
use std::net::TcpListener;
|
||||
let listener = TcpListener::bind("127.0.0.1:0")?;
|
||||
let port = listener.local_addr()?.port();
|
||||
@@ -178,11 +178,11 @@ impl RustFSTestEnvironment {
|
||||
info!("Cleaning up any existing RustFS processes");
|
||||
let output = Command::new("pkill").args(["-f", "rustfs"]).output();
|
||||
|
||||
if let Ok(output) = output {
|
||||
if output.status.success() {
|
||||
info!("Killed existing RustFS processes");
|
||||
sleep(Duration::from_millis(1000)).await;
|
||||
}
|
||||
if let Ok(output) = output
|
||||
&& output.status.success()
|
||||
{
|
||||
info!("Killed existing RustFS processes");
|
||||
sleep(Duration::from_millis(1000)).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -327,7 +327,8 @@ pub async fn execute_awscurl(
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(format!("awscurl failed: {stderr}").into());
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
return Err(format!("awscurl failed: stderr='{stderr}', stdout='{stdout}'").into());
|
||||
}
|
||||
|
||||
let response = String::from_utf8_lossy(&output.stdout).to_string();
|
||||
@@ -352,3 +353,13 @@ pub async fn awscurl_get(
|
||||
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
|
||||
execute_awscurl(url, "GET", None, access_key, secret_key).await
|
||||
}
|
||||
|
||||
/// Helper function for PUT requests
|
||||
pub async fn awscurl_put(
|
||||
url: &str,
|
||||
body: &str,
|
||||
access_key: &str,
|
||||
secret_key: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
|
||||
execute_awscurl(url, "PUT", Some(body), access_key, secret_key).await
|
||||
}
|
||||
|
||||
85
crates/e2e_test/src/content_encoding_test.rs
Normal file
85
crates/e2e_test/src/content_encoding_test.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! End-to-end test for Content-Encoding header handling
|
||||
//!
|
||||
//! Tests that the Content-Encoding header is correctly stored during PUT
|
||||
//! and returned in GET/HEAD responses. This is important for clients that
|
||||
//! upload pre-compressed content and rely on the header for decompression.
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::common::{RustFSTestEnvironment, init_logging};
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use serial_test::serial;
|
||||
use tracing::info;
|
||||
|
||||
/// Verify Content-Encoding header roundtrips through PUT, GET, and HEAD operations
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_content_encoding_roundtrip() {
|
||||
init_logging();
|
||||
info!("Starting Content-Encoding roundtrip test");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = env.create_s3_client();
|
||||
let bucket = "content-encoding-test";
|
||||
let key = "logs/app.log.zst";
|
||||
let content = b"2024-01-15 10:23:45 INFO Application started\n2024-01-15 10:23:46 DEBUG Loading config\n";
|
||||
|
||||
client
|
||||
.create_bucket()
|
||||
.bucket(bucket)
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to create bucket");
|
||||
|
||||
info!("Uploading object with Content-Encoding: zstd");
|
||||
client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.content_type("text/plain")
|
||||
.content_encoding("zstd")
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await
|
||||
.expect("PUT failed");
|
||||
|
||||
info!("Verifying GET response includes Content-Encoding");
|
||||
let get_resp = client.get_object().bucket(bucket).key(key).send().await.expect("GET failed");
|
||||
|
||||
assert_eq!(get_resp.content_encoding(), Some("zstd"), "GET should return Content-Encoding: zstd");
|
||||
assert_eq!(get_resp.content_type(), Some("text/plain"), "GET should return correct Content-Type");
|
||||
|
||||
let body = get_resp.body.collect().await.unwrap().into_bytes();
|
||||
assert_eq!(body.as_ref(), content, "Body content mismatch");
|
||||
|
||||
info!("Verifying HEAD response includes Content-Encoding");
|
||||
let head_resp = client
|
||||
.head_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.send()
|
||||
.await
|
||||
.expect("HEAD failed");
|
||||
|
||||
assert_eq!(head_resp.content_encoding(), Some("zstd"), "HEAD should return Content-Encoding: zstd");
|
||||
assert_eq!(head_resp.content_type(), Some("text/plain"), "HEAD should return correct Content-Type");
|
||||
|
||||
env.stop_server();
|
||||
}
|
||||
}
|
||||
73
crates/e2e_test/src/data_usage_test.rs
Normal file
73
crates/e2e_test/src/data_usage_test.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use rustfs_common::data_usage::DataUsageInfo;
|
||||
use serial_test::serial;
|
||||
|
||||
use crate::common::{RustFSTestEnvironment, TEST_BUCKET, awscurl_get, init_logging};
|
||||
|
||||
/// Regression test for data usage accuracy (issue #1012).
|
||||
/// Launches rustfs, writes 1000 objects, then asserts admin data usage reports the full count.
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server and requires awscurl; enable when running full E2E"]
|
||||
async fn data_usage_reports_all_objects() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await?;
|
||||
env.start_rustfs_server(vec![]).await?;
|
||||
|
||||
let client = env.create_s3_client();
|
||||
|
||||
// Create bucket and upload objects
|
||||
client.create_bucket().bucket(TEST_BUCKET).send().await?;
|
||||
|
||||
for i in 0..1000 {
|
||||
let key = format!("obj-{i:04}");
|
||||
client
|
||||
.put_object()
|
||||
.bucket(TEST_BUCKET)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(b"hello-world"))
|
||||
.send()
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Query admin data usage API
|
||||
let url = format!("{}/rustfs/admin/v3/datausageinfo", env.url);
|
||||
let resp = awscurl_get(&url, &env.access_key, &env.secret_key).await?;
|
||||
let usage: DataUsageInfo = serde_json::from_str(&resp)?;
|
||||
|
||||
// Assert total object count and per-bucket count are not truncated
|
||||
let bucket_usage = usage
|
||||
.buckets_usage
|
||||
.get(TEST_BUCKET)
|
||||
.cloned()
|
||||
.expect("bucket usage should exist");
|
||||
|
||||
assert!(
|
||||
usage.objects_total_count >= 1000,
|
||||
"total object count should be at least 1000, got {}",
|
||||
usage.objects_total_count
|
||||
);
|
||||
assert!(
|
||||
bucket_usage.objects_count >= 1000,
|
||||
"bucket object count should be at least 1000, got {}",
|
||||
bucket_usage.objects_count
|
||||
);
|
||||
|
||||
env.stop_server();
|
||||
Ok(())
|
||||
}
|
||||
@@ -406,11 +406,11 @@ impl VaultTestEnvironment {
|
||||
let port_check = TcpStream::connect(VAULT_ADDRESS).await.is_ok();
|
||||
if port_check {
|
||||
// Additional check by making a health request
|
||||
if let Ok(response) = reqwest::get(&format!("{VAULT_URL}/v1/sys/health")).await {
|
||||
if response.status().is_success() {
|
||||
info!("Vault server is ready after {} seconds", i);
|
||||
return Ok(());
|
||||
}
|
||||
if let Ok(response) = reqwest::get(&format!("{VAULT_URL}/v1/sys/health")).await
|
||||
&& response.status().is_success()
|
||||
{
|
||||
info!("Vault server is ready after {} seconds", i);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,28 @@ mod reliant;
|
||||
#[cfg(test)]
|
||||
pub mod common;
|
||||
|
||||
#[cfg(test)]
|
||||
mod version_id_regression_test;
|
||||
|
||||
// Data usage regression tests
|
||||
#[cfg(test)]
|
||||
mod data_usage_test;
|
||||
|
||||
// KMS-specific test modules
|
||||
#[cfg(test)]
|
||||
mod kms;
|
||||
|
||||
// Special characters in path test modules
|
||||
#[cfg(test)]
|
||||
mod special_chars_test;
|
||||
|
||||
// Content-Encoding header preservation test
|
||||
#[cfg(test)]
|
||||
mod content_encoding_test;
|
||||
|
||||
// Policy variables tests
|
||||
#[cfg(test)]
|
||||
mod policy;
|
||||
|
||||
#[cfg(test)]
|
||||
mod protocols;
|
||||
|
||||
39
crates/e2e_test/src/policy/README.md
Normal file
39
crates/e2e_test/src/policy/README.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# RustFS Policy Variables Tests
|
||||
|
||||
This directory contains comprehensive end-to-end tests for AWS IAM policy variables in RustFS.
|
||||
|
||||
## Test Overview
|
||||
|
||||
The tests cover the following AWS policy variable scenarios:
|
||||
|
||||
1. **Single-value variables** - Basic variable resolution like `${aws:username}`
|
||||
2. **Multi-value variables** - Variables that can have multiple values
|
||||
3. **Variable concatenation** - Combining variables with static text like `prefix-${aws:username}-suffix`
|
||||
4. **Nested variables** - Complex nested variable patterns like `${${aws:username}-test}`
|
||||
5. **Deny scenarios** - Testing deny policies with variables
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- RustFS server binary
|
||||
- `awscurl` utility for admin API calls
|
||||
- AWS SDK for Rust (included in the project)
|
||||
|
||||
## Running Tests
|
||||
|
||||
### Run All Policy Tests Using Unified Test Runner
|
||||
|
||||
```bash
|
||||
# Run all policy tests with comprehensive reporting
|
||||
# Note: Requires a RustFS server running on localhost:9000
|
||||
cargo test -p e2e_test policy::test_runner::test_policy_full_suite -- --nocapture --ignored --test-threads=1
|
||||
|
||||
# Run only critical policy tests
|
||||
cargo test -p e2e_test policy::test_runner::test_policy_critical_suite -- --nocapture --ignored --test-threads=1
|
||||
```
|
||||
|
||||
### Run All Policy Tests
|
||||
|
||||
```bash
|
||||
# From the project root directory
|
||||
cargo test -p e2e_test policy:: -- --nocapture --ignored --test-threads=1
|
||||
```
|
||||
22
crates/e2e_test/src/policy/mod.rs
Normal file
22
crates/e2e_test/src/policy/mod.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Policy-specific tests for RustFS
|
||||
//!
|
||||
//! This module provides comprehensive tests for AWS IAM policy variables
|
||||
//! including single-value, multi-value, and nested variable scenarios.
|
||||
|
||||
mod policy_variables_test;
|
||||
mod test_env;
|
||||
mod test_runner;
|
||||
798
crates/e2e_test/src/policy/policy_variables_test.rs
Normal file
798
crates/e2e_test/src/policy/policy_variables_test.rs
Normal file
@@ -0,0 +1,798 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Tests for AWS IAM policy variables with single-value, multi-value, and nested scenarios
|
||||
|
||||
use crate::common::{awscurl_put, init_logging};
|
||||
use crate::policy::test_env::PolicyTestEnvironment;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use serial_test::serial;
|
||||
use tracing::info;
|
||||
|
||||
/// Helper function to create a regular user with given credentials
|
||||
async fn create_user(
|
||||
env: &PolicyTestEnvironment,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let create_user_body = serde_json::json!({
|
||||
"secretKey": password,
|
||||
"status": "enabled"
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let create_user_url = format!("{}/rustfs/admin/v3/add-user?accessKey={}", env.url, username);
|
||||
awscurl_put(&create_user_url, &create_user_body, &env.access_key, &env.secret_key).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to create an STS user with given credentials
|
||||
async fn create_sts_user(
|
||||
env: &PolicyTestEnvironment,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// For STS, we create a regular user first, then use it to assume roles
|
||||
create_user(env, username, password).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to create and attach a policy
|
||||
async fn create_and_attach_policy(
|
||||
env: &PolicyTestEnvironment,
|
||||
policy_name: &str,
|
||||
username: &str,
|
||||
policy_document: serde_json::Value,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let policy_string = policy_document.to_string();
|
||||
|
||||
// Create policy
|
||||
let add_policy_url = format!("{}/rustfs/admin/v3/add-canned-policy?name={}", env.url, policy_name);
|
||||
awscurl_put(&add_policy_url, &policy_string, &env.access_key, &env.secret_key).await?;
|
||||
|
||||
// Attach policy to user
|
||||
let attach_policy_url = format!(
|
||||
"{}/rustfs/admin/v3/set-user-or-group-policy?policyName={}&userOrGroup={}&isGroup=false",
|
||||
env.url, policy_name, username
|
||||
);
|
||||
awscurl_put(&attach_policy_url, "", &env.access_key, &env.secret_key).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to clean up test resources
|
||||
async fn cleanup_user_and_policy(env: &PolicyTestEnvironment, username: &str, policy_name: &str) {
|
||||
// Create admin client for cleanup
|
||||
let admin_client = env.create_s3_client(&env.access_key, &env.secret_key);
|
||||
|
||||
// Delete buckets that might have been created by this user
|
||||
let bucket_patterns = [
|
||||
format!("{username}-test-bucket"),
|
||||
format!("{username}-bucket1"),
|
||||
format!("{username}-bucket2"),
|
||||
format!("{username}-bucket3"),
|
||||
format!("prefix-{username}-suffix"),
|
||||
format!("{username}-test"),
|
||||
format!("{username}-sts-bucket"),
|
||||
format!("{username}-service-bucket"),
|
||||
"private-test-bucket".to_string(), // For deny test
|
||||
];
|
||||
|
||||
// Try to delete objects and buckets
|
||||
for bucket_name in &bucket_patterns {
|
||||
let _ = admin_client
|
||||
.delete_object()
|
||||
.bucket(bucket_name)
|
||||
.key("test-object.txt")
|
||||
.send()
|
||||
.await;
|
||||
let _ = admin_client
|
||||
.delete_object()
|
||||
.bucket(bucket_name)
|
||||
.key("test-sts-object.txt")
|
||||
.send()
|
||||
.await;
|
||||
let _ = admin_client
|
||||
.delete_object()
|
||||
.bucket(bucket_name)
|
||||
.key("test-service-object.txt")
|
||||
.send()
|
||||
.await;
|
||||
let _ = admin_client.delete_bucket().bucket(bucket_name).send().await;
|
||||
}
|
||||
|
||||
// Remove user
|
||||
let remove_user_url = format!("{}/rustfs/admin/v3/remove-user?accessKey={}", env.url, username);
|
||||
let _ = awscurl_put(&remove_user_url, "", &env.access_key, &env.secret_key).await;
|
||||
|
||||
// Remove policy
|
||||
let remove_policy_url = format!("{}/rustfs/admin/v3/remove-canned-policy?name={}", env.url, policy_name);
|
||||
let _ = awscurl_put(&remove_policy_url, "", &env.access_key, &env.secret_key).await;
|
||||
}
|
||||
|
||||
/// Test AWS policy variables with single-value scenarios
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server; enable when running full E2E"]
|
||||
pub async fn test_aws_policy_variables_single_value() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
test_aws_policy_variables_single_value_impl().await
|
||||
}
|
||||
|
||||
/// Implementation function for single-value policy variables test
|
||||
pub async fn test_aws_policy_variables_single_value_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting AWS policy variables single-value test");
|
||||
|
||||
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
|
||||
|
||||
test_aws_policy_variables_single_value_impl_with_env(&env).await
|
||||
}
|
||||
|
||||
/// Implementation function for single-value policy variables test with shared environment
|
||||
pub async fn test_aws_policy_variables_single_value_impl_with_env(
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create test user
|
||||
let test_user = "testuser1";
|
||||
let test_password = "testpassword123";
|
||||
let policy_name = "test-single-value-policy";
|
||||
|
||||
// Create cleanup function
|
||||
let cleanup = || async {
|
||||
cleanup_user_and_policy(env, test_user, policy_name).await;
|
||||
};
|
||||
|
||||
let create_user_body = serde_json::json!({
|
||||
"secretKey": test_password,
|
||||
"status": "enabled"
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let create_user_url = format!("{}/rustfs/admin/v3/add-user?accessKey={}", env.url, test_user);
|
||||
awscurl_put(&create_user_url, &create_user_body, &env.access_key, &env.secret_key).await?;
|
||||
|
||||
// Create policy with single-value AWS variables
|
||||
let policy_document = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListAllMyBuckets"],
|
||||
"Resource": ["arn:aws:s3:::*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": [format!("arn:aws:s3:::{}-*", "${aws:username}")]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListBucket"],
|
||||
"Resource": [format!("arn:aws:s3:::{}-*", "${aws:username}")]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:PutObject", "s3:GetObject"],
|
||||
"Resource": [format!("arn:aws:s3:::{}-*/*", "${aws:username}")]
|
||||
}
|
||||
]
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let add_policy_url = format!("{}/rustfs/admin/v3/add-canned-policy?name={}", env.url, policy_name);
|
||||
awscurl_put(&add_policy_url, &policy_document, &env.access_key, &env.secret_key).await?;
|
||||
|
||||
// Attach policy to user
|
||||
let attach_policy_url = format!(
|
||||
"{}/rustfs/admin/v3/set-user-or-group-policy?policyName={}&userOrGroup={}&isGroup=false",
|
||||
env.url, policy_name, test_user
|
||||
);
|
||||
awscurl_put(&attach_policy_url, "", &env.access_key, &env.secret_key).await?;
|
||||
|
||||
// Create S3 client for test user
|
||||
let test_client = env.create_s3_client(test_user, test_password);
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
||||
|
||||
// Test 1: User should be able to list buckets (allowed by policy)
|
||||
info!("Test 1: User listing buckets");
|
||||
let list_result = test_client.list_buckets().send().await;
|
||||
if let Err(e) = list_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to list buckets: {e}").into());
|
||||
}
|
||||
|
||||
// Test 2: User should be able to create bucket matching username pattern
|
||||
info!("Test 2: User creating bucket matching pattern");
|
||||
let bucket_name = format!("{test_user}-test-bucket");
|
||||
let create_result = test_client.create_bucket().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = create_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create bucket matching username pattern: {e}").into());
|
||||
}
|
||||
|
||||
// Test 3: User should be able to list objects in their own bucket
|
||||
info!("Test 3: User listing objects in their bucket");
|
||||
let list_objects_result = test_client.list_objects_v2().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = list_objects_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to list objects in their own bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Test 4: User should be able to put object in their own bucket
|
||||
info!("Test 4: User putting object in their bucket");
|
||||
let put_result = test_client
|
||||
.put_object()
|
||||
.bucket(&bucket_name)
|
||||
.key("test-object.txt")
|
||||
.body(ByteStream::from_static(b"Hello, Policy Variables!"))
|
||||
.send()
|
||||
.await;
|
||||
if let Err(e) = put_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to put object in their own bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Test 5: User should be able to get object from their own bucket
|
||||
info!("Test 5: User getting object from their bucket");
|
||||
let get_result = test_client
|
||||
.get_object()
|
||||
.bucket(&bucket_name)
|
||||
.key("test-object.txt")
|
||||
.send()
|
||||
.await;
|
||||
if let Err(e) = get_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to get object from their own bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Test 6: User should NOT be able to create bucket NOT matching username pattern
|
||||
info!("Test 6: User attempting to create bucket NOT matching pattern");
|
||||
let other_bucket_name = "other-user-bucket";
|
||||
let create_other_result = test_client.create_bucket().bucket(other_bucket_name).send().await;
|
||||
if create_other_result.is_ok() {
|
||||
cleanup().await;
|
||||
return Err("User should NOT be able to create bucket NOT matching username pattern".into());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
info!("Cleaning up test resources");
|
||||
cleanup().await;
|
||||
|
||||
info!("AWS policy variables single-value test completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test AWS policy variables with multi-value scenarios
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server; enable when running full E2E"]
|
||||
pub async fn test_aws_policy_variables_multi_value() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
test_aws_policy_variables_multi_value_impl().await
|
||||
}
|
||||
|
||||
/// Implementation function for multi-value policy variables test
|
||||
pub async fn test_aws_policy_variables_multi_value_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting AWS policy variables multi-value test");
|
||||
|
||||
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
|
||||
|
||||
test_aws_policy_variables_multi_value_impl_with_env(&env).await
|
||||
}
|
||||
|
||||
/// Implementation function for multi-value policy variables test with shared environment
|
||||
pub async fn test_aws_policy_variables_multi_value_impl_with_env(
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create test user
|
||||
let test_user = "testuser2";
|
||||
let test_password = "testpassword123";
|
||||
let policy_name = "test-multi-value-policy";
|
||||
|
||||
// Create cleanup function
|
||||
let cleanup = || async {
|
||||
cleanup_user_and_policy(env, test_user, policy_name).await;
|
||||
};
|
||||
|
||||
// Create user
|
||||
create_user(env, test_user, test_password).await?;
|
||||
|
||||
// Create policy with multi-value AWS variables
|
||||
let policy_document = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListAllMyBuckets"],
|
||||
"Resource": ["arn:aws:s3:::*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": [
|
||||
format!("arn:aws:s3:::{}-bucket1", "${aws:username}"),
|
||||
format!("arn:aws:s3:::{}-bucket2", "${aws:username}"),
|
||||
format!("arn:aws:s3:::{}-bucket3", "${aws:username}")
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListBucket"],
|
||||
"Resource": [
|
||||
format!("arn:aws:s3:::{}-bucket1", "${aws:username}"),
|
||||
format!("arn:aws:s3:::{}-bucket2", "${aws:username}"),
|
||||
format!("arn:aws:s3:::{}-bucket3", "${aws:username}")
|
||||
]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
|
||||
|
||||
// Create S3 client for test user
|
||||
let test_client = env.create_s3_client(test_user, test_password);
|
||||
|
||||
// Test 1: User should be able to create buckets matching any of the multi-value patterns
|
||||
info!("Test 1: User creating first bucket matching multi-value pattern");
|
||||
let bucket1_name = format!("{test_user}-bucket1");
|
||||
let create_result1 = test_client.create_bucket().bucket(&bucket1_name).send().await;
|
||||
if let Err(e) = create_result1 {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create first bucket matching multi-value pattern: {e}").into());
|
||||
}
|
||||
|
||||
info!("Test 2: User creating second bucket matching multi-value pattern");
|
||||
let bucket2_name = format!("{test_user}-bucket2");
|
||||
let create_result2 = test_client.create_bucket().bucket(&bucket2_name).send().await;
|
||||
if let Err(e) = create_result2 {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create second bucket matching multi-value pattern: {e}").into());
|
||||
}
|
||||
|
||||
info!("Test 3: User creating third bucket matching multi-value pattern");
|
||||
let bucket3_name = format!("{test_user}-bucket3");
|
||||
let create_result3 = test_client.create_bucket().bucket(&bucket3_name).send().await;
|
||||
if let Err(e) = create_result3 {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create third bucket matching multi-value pattern: {e}").into());
|
||||
}
|
||||
|
||||
// Test 4: User should NOT be able to create bucket NOT matching any multi-value pattern
|
||||
info!("Test 4: User attempting to create bucket NOT matching any pattern");
|
||||
let other_bucket_name = format!("{test_user}-other-bucket");
|
||||
let create_other_result = test_client.create_bucket().bucket(&other_bucket_name).send().await;
|
||||
if create_other_result.is_ok() {
|
||||
cleanup().await;
|
||||
return Err("User should NOT be able to create bucket NOT matching any multi-value pattern".into());
|
||||
}
|
||||
|
||||
// Test 5: User should be able to list objects in their allowed buckets
|
||||
info!("Test 5: User listing objects in allowed buckets");
|
||||
let list_objects_result1 = test_client.list_objects_v2().bucket(&bucket1_name).send().await;
|
||||
if let Err(e) = list_objects_result1 {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to list objects in first allowed bucket: {e}").into());
|
||||
}
|
||||
|
||||
let list_objects_result2 = test_client.list_objects_v2().bucket(&bucket2_name).send().await;
|
||||
if let Err(e) = list_objects_result2 {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to list objects in second allowed bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
info!("Cleaning up test resources");
|
||||
cleanup().await;
|
||||
|
||||
info!("AWS policy variables multi-value test completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test AWS policy variables with variable concatenation
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server; enable when running full E2E"]
|
||||
pub async fn test_aws_policy_variables_concatenation() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
test_aws_policy_variables_concatenation_impl().await
|
||||
}
|
||||
|
||||
/// Implementation function for concatenation policy variables test
|
||||
pub async fn test_aws_policy_variables_concatenation_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting AWS policy variables concatenation test");
|
||||
|
||||
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
|
||||
|
||||
test_aws_policy_variables_concatenation_impl_with_env(&env).await
|
||||
}
|
||||
|
||||
/// Implementation function for concatenation policy variables test with shared environment
|
||||
pub async fn test_aws_policy_variables_concatenation_impl_with_env(
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create test user
|
||||
let test_user = "testuser3";
|
||||
let test_password = "testpassword123";
|
||||
let policy_name = "test-concatenation-policy";
|
||||
|
||||
// Create cleanup function
|
||||
let cleanup = || async {
|
||||
cleanup_user_and_policy(env, test_user, policy_name).await;
|
||||
};
|
||||
|
||||
// Create user
|
||||
create_user(env, test_user, test_password).await?;
|
||||
|
||||
// Create policy with variable concatenation
|
||||
let policy_document = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListAllMyBuckets"],
|
||||
"Resource": ["arn:aws:s3:::*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": [format!("arn:aws:s3:::prefix-{}-suffix", "${aws:username}")]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListBucket"],
|
||||
"Resource": [format!("arn:aws:s3:::prefix-{}-suffix", "${aws:username}")]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
|
||||
|
||||
// Create S3 client for test user
|
||||
let test_client = env.create_s3_client(test_user, test_password);
|
||||
|
||||
// Add a small delay to allow policy to propagate
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
||||
|
||||
// Test: User should be able to create bucket matching concatenated pattern
|
||||
info!("Test: User creating bucket matching concatenated pattern");
|
||||
let bucket_name = format!("prefix-{test_user}-suffix");
|
||||
let create_result = test_client.create_bucket().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = create_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create bucket matching concatenated pattern: {e}").into());
|
||||
}
|
||||
|
||||
// Test: User should be able to list objects in the concatenated pattern bucket
|
||||
info!("Test: User listing objects in concatenated pattern bucket");
|
||||
let list_objects_result = test_client.list_objects_v2().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = list_objects_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to list objects in concatenated pattern bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
info!("Cleaning up test resources");
|
||||
cleanup().await;
|
||||
|
||||
info!("AWS policy variables concatenation test completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test AWS policy variables with nested scenarios
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server; enable when running full E2E"]
|
||||
pub async fn test_aws_policy_variables_nested() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
test_aws_policy_variables_nested_impl().await
|
||||
}
|
||||
|
||||
/// Implementation function for nested policy variables test
|
||||
pub async fn test_aws_policy_variables_nested_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting AWS policy variables nested test");
|
||||
|
||||
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
|
||||
|
||||
test_aws_policy_variables_nested_impl_with_env(&env).await
|
||||
}
|
||||
|
||||
/// Test AWS policy variables with STS temporary credentials
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server; enable when running full E2E"]
|
||||
pub async fn test_aws_policy_variables_sts() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
test_aws_policy_variables_sts_impl().await
|
||||
}
|
||||
|
||||
/// Implementation function for STS policy variables test
|
||||
pub async fn test_aws_policy_variables_sts_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting AWS policy variables STS test");
|
||||
|
||||
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
|
||||
|
||||
test_aws_policy_variables_sts_impl_with_env(&env).await
|
||||
}
|
||||
|
||||
/// Implementation function for nested policy variables test with shared environment
|
||||
pub async fn test_aws_policy_variables_nested_impl_with_env(
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create test user
|
||||
let test_user = "testuser4";
|
||||
let test_password = "testpassword123";
|
||||
let policy_name = "test-nested-policy";
|
||||
|
||||
// Create cleanup function
|
||||
let cleanup = || async {
|
||||
cleanup_user_and_policy(env, test_user, policy_name).await;
|
||||
};
|
||||
|
||||
// Create user
|
||||
create_user(env, test_user, test_password).await?;
|
||||
|
||||
// Create policy with nested variables - this tests complex variable resolution
|
||||
let policy_document = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListAllMyBuckets"],
|
||||
"Resource": ["arn:aws:s3:::*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": ["arn:aws:s3:::${${aws:username}-test}"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListBucket"],
|
||||
"Resource": ["arn:aws:s3:::${${aws:username}-test}"]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
|
||||
|
||||
// Create S3 client for test user
|
||||
let test_client = env.create_s3_client(test_user, test_password);
|
||||
|
||||
// Add a small delay to allow policy to propagate
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
||||
|
||||
// Test nested variable resolution
|
||||
info!("Test: Nested variable resolution");
|
||||
|
||||
// Create bucket with expected resolved name
|
||||
let expected_bucket = format!("{test_user}-test");
|
||||
|
||||
// Attempt to create bucket with resolved name
|
||||
let create_result = test_client.create_bucket().bucket(&expected_bucket).send().await;
|
||||
|
||||
// Verify bucket creation succeeds (nested variable resolved correctly)
|
||||
if let Err(e) = create_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create bucket with nested variable: {e}").into());
|
||||
}
|
||||
|
||||
// Verify bucket creation fails with unresolved variable
|
||||
let unresolved_bucket = format!("${{}}-test {test_user}");
|
||||
let create_unresolved = test_client.create_bucket().bucket(&unresolved_bucket).send().await;
|
||||
|
||||
if create_unresolved.is_ok() {
|
||||
cleanup().await;
|
||||
return Err("User should NOT be able to create bucket with unresolved variable".into());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
info!("Cleaning up test resources");
|
||||
cleanup().await;
|
||||
|
||||
info!("AWS policy variables nested test completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Implementation function for STS policy variables test with shared environment
|
||||
pub async fn test_aws_policy_variables_sts_impl_with_env(
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create test user for STS
|
||||
let test_user = "testuser-sts";
|
||||
let test_password = "testpassword123";
|
||||
let policy_name = "test-sts-policy";
|
||||
|
||||
// Create cleanup function
|
||||
let cleanup = || async {
|
||||
cleanup_user_and_policy(env, test_user, policy_name).await;
|
||||
};
|
||||
|
||||
// Create STS user
|
||||
create_sts_user(env, test_user, test_password).await?;
|
||||
|
||||
// Create policy with STS-compatible variables
|
||||
let policy_document = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListAllMyBuckets"],
|
||||
"Resource": ["arn:aws:s3:::*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": [format!("arn:aws:s3:::{}-sts-bucket", "${aws:username}")]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListBucket", "s3:PutObject", "s3:GetObject"],
|
||||
"Resource": [format!("arn:aws:s3:::{}-sts-bucket/*", "${aws:username}")]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
|
||||
|
||||
// Create S3 client for test user
|
||||
let test_client = env.create_s3_client(test_user, test_password);
|
||||
|
||||
// Add a small delay to allow policy to propagate
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
||||
|
||||
// Test: User should be able to create bucket matching STS pattern
|
||||
info!("Test: User creating bucket matching STS pattern");
|
||||
let bucket_name = format!("{test_user}-sts-bucket");
|
||||
let create_result = test_client.create_bucket().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = create_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create STS bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Test: User should be able to put object in STS bucket
|
||||
info!("Test: User putting object in STS bucket");
|
||||
let put_result = test_client
|
||||
.put_object()
|
||||
.bucket(&bucket_name)
|
||||
.key("test-sts-object.txt")
|
||||
.body(ByteStream::from_static(b"STS Test Object"))
|
||||
.send()
|
||||
.await;
|
||||
if let Err(e) = put_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to put object in STS bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Test: User should be able to get object from STS bucket
|
||||
info!("Test: User getting object from STS bucket");
|
||||
let get_result = test_client
|
||||
.get_object()
|
||||
.bucket(&bucket_name)
|
||||
.key("test-sts-object.txt")
|
||||
.send()
|
||||
.await;
|
||||
if let Err(e) = get_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to get object from STS bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Test: User should be able to list objects in STS bucket
|
||||
info!("Test: User listing objects in STS bucket");
|
||||
let list_result = test_client.list_objects_v2().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = list_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to list objects in STS bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
info!("Cleaning up test resources");
|
||||
cleanup().await;
|
||||
|
||||
info!("AWS policy variables STS test completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test AWS policy variables with deny scenarios
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server; enable when running full E2E"]
|
||||
pub async fn test_aws_policy_variables_deny() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
test_aws_policy_variables_deny_impl().await
|
||||
}
|
||||
|
||||
/// Implementation function for deny policy variables test
|
||||
pub async fn test_aws_policy_variables_deny_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting AWS policy variables deny test");
|
||||
|
||||
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
|
||||
|
||||
test_aws_policy_variables_deny_impl_with_env(&env).await
|
||||
}
|
||||
|
||||
/// Implementation function for deny policy variables test with shared environment
|
||||
pub async fn test_aws_policy_variables_deny_impl_with_env(
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create test user
|
||||
let test_user = "testuser5";
|
||||
let test_password = "testpassword123";
|
||||
let policy_name = "test-deny-policy";
|
||||
|
||||
// Create cleanup function
|
||||
let cleanup = || async {
|
||||
cleanup_user_and_policy(env, test_user, policy_name).await;
|
||||
};
|
||||
|
||||
// Create user
|
||||
create_user(env, test_user, test_password).await?;
|
||||
|
||||
// Create policy with both allow and deny statements
|
||||
let policy_document = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
// Allow general access
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListAllMyBuckets"],
|
||||
"Resource": ["arn:aws:s3:::*"]
|
||||
},
|
||||
// Allow creating buckets matching username pattern
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": [format!("arn:aws:s3:::{}-*", "${aws:username}")]
|
||||
},
|
||||
// Deny creating buckets with "private" in the name
|
||||
{
|
||||
"Effect": "Deny",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": ["arn:aws:s3:::*private*"]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
|
||||
|
||||
// Create S3 client for test user
|
||||
let test_client = env.create_s3_client(test_user, test_password);
|
||||
|
||||
// Add a small delay to allow policy to propagate
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
||||
|
||||
// Test 1: User should be able to create bucket matching username pattern
|
||||
info!("Test 1: User creating bucket matching username pattern");
|
||||
let bucket_name = format!("{test_user}-test-bucket");
|
||||
let create_result = test_client.create_bucket().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = create_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create bucket matching username pattern: {e}").into());
|
||||
}
|
||||
|
||||
// Test 2: User should NOT be able to create bucket with "private" in the name (deny rule)
|
||||
info!("Test 2: User attempting to create bucket with 'private' in name (should be denied)");
|
||||
let private_bucket_name = "private-test-bucket";
|
||||
let create_private_result = test_client.create_bucket().bucket(private_bucket_name).send().await;
|
||||
if create_private_result.is_ok() {
|
||||
cleanup().await;
|
||||
return Err("User should NOT be able to create bucket with 'private' in name due to deny rule".into());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
info!("Cleaning up test resources");
|
||||
cleanup().await;
|
||||
|
||||
info!("AWS policy variables deny test completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
100
crates/e2e_test/src/policy/test_env.rs
Normal file
100
crates/e2e_test/src/policy/test_env.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Custom test environment for policy variables tests
|
||||
//!
|
||||
//! This module provides a custom test environment that doesn't automatically
|
||||
//! stop servers when destroyed, addressing the server stopping issue.
|
||||
|
||||
use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::config::{Config, Credentials, Region};
|
||||
use std::net::TcpStream;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use tracing::{info, warn};
|
||||
|
||||
// Default credentials
|
||||
const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Custom test environment that doesn't automatically stop servers
|
||||
pub struct PolicyTestEnvironment {
|
||||
pub temp_dir: String,
|
||||
pub address: String,
|
||||
pub url: String,
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
}
|
||||
|
||||
impl PolicyTestEnvironment {
|
||||
/// Create a new test environment with specific address
|
||||
/// This environment won't stop any server when dropped
|
||||
pub async fn with_address(address: &str) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let temp_dir = format!("/tmp/rustfs_policy_test_{}", uuid::Uuid::new_v4());
|
||||
tokio::fs::create_dir_all(&temp_dir).await?;
|
||||
|
||||
let url = format!("http://{address}");
|
||||
|
||||
Ok(Self {
|
||||
temp_dir,
|
||||
address: address.to_string(),
|
||||
url,
|
||||
access_key: DEFAULT_ACCESS_KEY.to_string(),
|
||||
secret_key: DEFAULT_SECRET_KEY.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create an AWS S3 client configured for this RustFS instance
|
||||
pub fn create_s3_client(&self, access_key: &str, secret_key: &str) -> Client {
|
||||
let credentials = Credentials::new(access_key, secret_key, None, None, "policy-test");
|
||||
let config = Config::builder()
|
||||
.credentials_provider(credentials)
|
||||
.region(Region::new("us-east-1"))
|
||||
.endpoint_url(&self.url)
|
||||
.force_path_style(true)
|
||||
.behavior_version_latest()
|
||||
.build();
|
||||
Client::from_conf(config)
|
||||
}
|
||||
|
||||
/// Wait for RustFS server to be ready by checking TCP connectivity
|
||||
pub async fn wait_for_server_ready(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
info!("Waiting for RustFS server to be ready on {}", self.address);
|
||||
|
||||
for i in 0..30 {
|
||||
if TcpStream::connect(&self.address).is_ok() {
|
||||
info!("✅ RustFS server is ready after {} attempts", i + 1);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if i == 29 {
|
||||
return Err("RustFS server failed to become ready within 30 seconds".into());
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Implement Drop trait that doesn't stop servers
|
||||
impl Drop for PolicyTestEnvironment {
|
||||
fn drop(&mut self) {
|
||||
// Clean up temp directory only, don't stop any server
|
||||
if let Err(e) = std::fs::remove_dir_all(&self.temp_dir) {
|
||||
warn!("Failed to clean up temp directory {}: {}", self.temp_dir, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user