Compare commits

..

84 Commits

Author SHA1 Message Date
yxrxy
3c14947878 fix(iam): preserve decrypt-failed credentials instead of deleting them (#1312)
Signed-off-by: loverustfs <github@rustfs.com>
Co-authored-by: loverustfs <github@rustfs.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-30 22:41:10 +08:00
houseme
2924b4e463 Restore globals and add unified TLS/mTLS loading from RUSTFS_TLS_PATH (#1309)
Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
2025-12-30 21:55:43 +08:00
loverustfs
b4ba62fa33 fix: correctly handle aws:SourceIp in policy evaluation (#1301) (#1306)
Signed-off-by: loverustfs <github@rustfs.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-12-30 16:54:48 +08:00
loverustfs
a5b3522880 Add trendshift 2025-12-30 13:03:15 +08:00
安正超
056a0ee62b feat: add local s3-tests script with configurable options and improvements (#1300) 2025-12-29 23:48:32 +08:00
Juri Malinovski
4603ece708 helm: add enableServiceLinks, poddisruptionbudget (#1293)
Signed-off-by: Juri Malinovski <juri.malinovski@coolbet.com>
2025-12-29 09:31:18 +08:00
houseme
eb33e82b56 fix: Prevent panic in GetMetrics gRPC handler on invalid input (#1291)
Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
2025-12-29 03:10:23 +08:00
Ali Mehraji
c7e2b4d8e7 Modular Makefile (#1288)
Signed-off-by: Ali Mehraji <a.mehraji75@gmail.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-28 21:57:44 +08:00
LeonWang0735
71c59d1187 fix:ListObjects and ListObjectV2 correctly handles unordered and delimiter (#1285) 2025-12-28 16:18:42 +08:00
loverustfs
e3a0a07495 fix: ensure version_id is returned in S3 response headers (#1272)
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: 安正超 <anzhengchao@gmail.com>
2025-12-28 09:41:32 +08:00
0xdx2
136db7e0c9 feat: add function to extract user-defined metadata keys and integrat… (#1281)
Signed-off-by: 0xdx2 <xuedamon2@gmail.com>
Signed-off-by: houseme <housemecn@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-27 22:18:16 +08:00
Juri Malinovski
2e3c5f695a helm: update default Chart.yaml, appVersion version bump, add appVersion as a default image tag (#1247)
Co-authored-by: majinghe <42570491+majinghe@users.noreply.github.com>
2025-12-27 20:50:22 +08:00
bbb4aaa
fe9609fd17 fix:affinity.podAntiAffinity.enabled value not taking effect (#1280)
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-27 20:46:25 +08:00
bbb4aaa
f2d79b485e fix: prevent PV/PVC deletion during rustfs uninstallation (#1279) 2025-12-27 20:45:43 +08:00
Copilot
3d6681c9e5 chore: remove e2e-mint workflow (#1274)
Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>
2025-12-26 21:55:04 +08:00
lgpseu
07a26fadad opt: store IoLoadMetrics records with circular vector (#1265)
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-26 12:59:40 +08:00
majinghe
a083fca17a delete -R parameter in init container step (#1264) 2025-12-25 18:14:50 +08:00
houseme
89c3ae77a4 feat: Add TONIC_PREFIX prefix matching in ReadinessGateService (#1261) 2025-12-25 14:28:07 +08:00
houseme
82a6e78845 Inject GlobalReadiness into HTTP server pipeline and gate traffic until FullReady (#1255) 2025-12-25 00:19:03 +08:00
houseme
7e75c9b1f5 remove unlinked file (#1258) 2025-12-24 23:37:43 +08:00
weisd
8bdff3fbcb fix: Add retry mechanism for GLOBAL_CONFIG_SYS initialization (#1252) 2025-12-24 16:38:28 +08:00
Andrea Manzi
65d32e693f add ca-certificates in mcp-server Dockerfile (#1248)
Signed-off-by: Andrea Manzi <andrea.manzi@gmail.com>
2025-12-24 08:36:14 +08:00
Michele Zanotti
1ff28b3157 helm: expose init container parameters as helm values (#1232)
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-23 21:31:28 +08:00
Juri Malinovski
2186f46ea3 helm: fix service/containers ports, fix podAntiAffinity (#1230)
Co-authored-by: majinghe <42570491+majinghe@users.noreply.github.com>
2025-12-23 20:36:33 +08:00
唐小鸭
add6453aea feat: add seek support for small objects in rustfs (#1231)
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-23 20:27:34 +08:00
yxrxy
4418c882ad Revert "fix(iam): store previous credentials in .rustfs.sys bucket to… (#1238)
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-23 19:37:39 +08:00
Muhammed Hussain Karimi
00c607b5ce 🧑‍💻 Fix nix develop problem with Git-Based dependecies on nix develop shell (#1243)
Signed-off-by: Muhammed Hussain Karimi <info@karimi.dev>
2025-12-23 19:26:50 +08:00
majinghe
79585f98e0 delete userless helm chart file (#1245) 2025-12-23 19:15:29 +08:00
majinghe
2a3517f1d5 Custom annotation (#1242) 2025-12-23 17:31:01 +08:00
tryao
3942e07487 console port is 9001 (#1235)
Signed-off-by: tryao <yaotairan@gmail.com>
2025-12-23 13:36:38 +08:00
houseme
04811c0006 update s3s version (#1237) 2025-12-23 13:09:57 +08:00
Ali Mehraji
73c15d6be1 Add: rust installation in Makefile (#1188)
Signed-off-by: Ali Mehraji <a.mehraji75@gmail.com>
Signed-off-by: houseme <housemecn@gmail.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-12-23 08:51:04 +08:00
loverustfs
af5c0b13ef fix: HeadObject returns 404 for deleted objects with versioning enabled (#1229)
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-22 20:43:00 +08:00
Juri Malinovski
f17990f746 helm: allow to define additional config variables (#1220)
Signed-off-by: Juri Malinovski <juri.malinovski@coolbet.com>
2025-12-22 20:25:23 +08:00
weisd
80cfb4feab Add Disk Timeout and Health Check Functionality (#1196)
Signed-off-by: weisd <im@weisd.in>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-22 17:15:19 +08:00
houseme
08f1a31f3f Fix notification event stream cleanup, add bounded send concurrency, and reduce overhead (#1224) 2025-12-22 00:57:05 +08:00
loverustfs
1c51e204ab ci: reduce cargo build jobs to 2 for standard-2 runner 2025-12-21 23:54:40 +08:00
loverustfs
958f054123 ci: update all workflows to use ubicloud-standard-2 runner 2025-12-21 23:43:12 +08:00
0xdx2
3e2252e4bb fix(config):Update argument parsing for volumes and server_domains to support del… (#1209)
Signed-off-by: houseme <housemecn@gmail.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-12-21 17:54:23 +08:00
loverustfs
f3a1431fa5 fix: resolve TLS handshake failure in inter-node communication (#1201) (#1222)
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-21 16:11:55 +08:00
yxrxy
3bd96bcf10 fix: resolve event target deletion issue (#1219) 2025-12-21 12:43:48 +08:00
majinghe
20ea591049 add custom nodeport support (#1217) 2025-12-20 22:02:21 +08:00
GatewayJ
cc31e88c91 fix: expiration time (#1215) 2025-12-20 20:25:52 +08:00
yxrxy
b5535083de fix(iam): store previous credentials in .rustfs.sys bucket to preserv… (#1213) 2025-12-20 19:15:49 +08:00
loverustfs
1e35edf079 chore(ci): restore workflows before 8e0aeb4 (#1212) 2025-12-20 07:50:49 +08:00
Copilot
8dd3e8b534 fix: decode form-urlencoded object names in webhook/mqtt Key field (#1210)
Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-20 01:31:09 +08:00
loverustfs
8e0aeb4fdc Optimize ci ubicloud (#1208) 2025-12-19 23:22:45 +08:00
majinghe
abe8a50b5a add cert manager and ingress annotations support (#1206) 2025-12-19 21:50:23 +08:00
loverustfs
61f4d307b5 Modify latest version tips to console 2025-12-19 14:57:19 +08:00
loverustfs
3eafeb0ff0 Modify to accelerate 2025-12-19 13:01:17 +08:00
houseme
4abfc9f554 Fix/fix event 1216 (#1191)
Signed-off-by: loverustfs <hello@rustfs.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-19 12:07:07 +08:00
唐小鸭
1057953052 fix: Remove the compression check that has already been handled by tower-http::CompressionLayer. (#1190)
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-19 10:15:52 +08:00
loverustfs
889c67f359 Modify to ubicloud 2025-12-19 09:42:21 +08:00
loverustfs
1d111464f9 Return to GitHub hosting
Return to GitHub hosting

Signed-off-by: loverustfs <hello@rustfs.com>
2025-12-19 09:15:26 +08:00
loverustfs
a0b2f5a232 self-host
self-host

Signed-off-by: loverustfs <hello@rustfs.com>
2025-12-18 22:23:25 +08:00
Muhammed Hussain Karimi
46557cddd1 🧑‍💻 Improve shebang compatibility (#1180)
Signed-off-by: Muhammed Hussain Karimi <info@karimi.dev>
2025-12-18 20:13:24 +08:00
安正超
443947e1ac fix: improve S3 API compatibility for ListObjects operations (#1173)
Signed-off-by: 安正超 <anzhengchao@gmail.com>
Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-12-17 21:50:03 +08:00
yxrxy
8821fcc1e7 feat: Replace LRU cache with Moka async cache in policy variables (#1166)
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-17 00:19:31 +08:00
houseme
17828ec2a8 Dependabot/cargo/s3s df2434d 1216 (#1170)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-16 21:21:43 +08:00
mythrnr
94d5b1c1e4 fix: format of bucket event notifications (#1138) 2025-12-16 20:44:57 +08:00
GatewayJ
0bca1fbd56 fix: the method for correcting judgment headers (#1159)
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-16 19:30:50 +08:00
唐小鸭
52c2d15a4b feat: Implement whitelist-based HTTP response compression configuration (#1136)
Signed-off-by: 唐小鸭 <tangtang1251@qq.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <hello@rustfs.com>
Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-12-16 15:05:40 +08:00
yxrxy
352035a06f feat: Implement AWS policy variables support (#1131)
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-16 13:32:01 +08:00
yihong
fe4fabb195 fix: other two memory leak in the code base (#1160)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-16 11:45:45 +08:00
GatewayJ
07c5e7997a list object version Interface returns storage_class (#1133)
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-16 07:09:05 +08:00
yihong
0007b541cd feat: add pre-commit file (#1155)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-12-15 22:23:43 +08:00
dependabot[bot]
0f2e4d124c build(deps): bump the dependencies group with 3 updates (#1148)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-15 20:39:04 +08:00
Christian Simon
2e4ce6921b helm: Mount /tmp as emptyDir (#1105)
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-15 16:59:28 +08:00
Juri Malinovski
7178a94792 helm: refactor helm chart (#1122)
Signed-off-by: Juri Malinovski <juri.malinovski@coolbet.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-15 13:05:43 +08:00
sunfkny
e8fe9731fd Fix memory leak in Cache update method (#1143) 2025-12-15 10:04:14 +08:00
Jörg Thalheim
3ba415740e Add docs for using Nix flake (#1103)
Co-authored-by: loverustfs <hello@rustfs.com>
Co-authored-by: 0xdx2 <xuedamon2@gmail.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-14 09:44:13 +08:00
Lazar
aeccd14d99 Replace placeholder content in SECURITY.md (#1140)
Signed-off-by: Lazar <66002359+WauHundeland@users.noreply.github.com>
2025-12-14 09:31:27 +08:00
Jörg Thalheim
89a155a35d flake: add Nix flake for reproducible builds (#1096)
Co-authored-by: loverustfs <hello@rustfs.com>
Co-authored-by: 0xdx2 <xuedamon2@gmail.com>
2025-12-13 23:54:54 +08:00
yihong
67095c05f9 fix: update tool chain make everything happy (#1134)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-12-13 20:32:42 +08:00
czaloumis
1229fddb5d render imagePullSecrets in Deployment/StatefulSet (#1130)
Signed-off-by: czaloumis <80974398+czaloumis@users.noreply.github.com>
2025-12-13 11:23:35 +08:00
majinghe
08be8f5472 add image pull secret support (#1127)
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-12 20:25:25 +08:00
Sebastian Wolf
0bf25fdefa feat: Be able to set region from Helm chart (#1119)
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-12 12:30:35 +08:00
houseme
9e2fa148ee Fix type errors in ecfs.rs and apply clippy fixes for Rust 1.92.0 (#1121) 2025-12-12 00:49:21 +08:00
安正超
cb3e496b17 Feat/e2e s3tests (#1120)
Signed-off-by: 安正超 <anzhengchao@gmail.com>
2025-12-11 22:32:07 +08:00
YGoetschel
997f54e700 Fix Docker-based Development Workflow (#1031)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-11 19:48:14 +08:00
houseme
1a4e95e940 chore: remove unused dependencies to optimize build (#1117) 2025-12-11 18:13:26 +08:00
Christian Simon
a3006ab407 helm: Use service.type from Values (#1106)
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-11 17:32:15 +08:00
houseme
e197486c8c upgrade action checkout version from v5 to v6 (#1067)
Co-authored-by: 0xdx2 <xuedamon2@gmail.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-11 15:39:20 +08:00
dependabot[bot]
0da943a6a4 build(deps): bump s3s from 0.12.0-rc.4 to 0.12.0-rc.5 in the s3s group (#1046)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: loverustfs <hello@rustfs.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
2025-12-11 15:20:36 +08:00
269 changed files with 15340 additions and 5902 deletions

View File

@@ -0,0 +1,64 @@
## —— Development/Source builds using direct buildx commands ---------------------------------------
.PHONY: docker-dev
docker-dev: ## Build dev multi-arch image (cannot load locally)
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
@echo "💡 This builds from source code and is intended for local development and testing"
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
$(DOCKER_CLI) buildx build \
--platform linux/amd64,linux/arm64 \
--file $(DOCKERFILE_SOURCE) \
--tag rustfs:source-latest \
--tag rustfs:dev-latest \
.
.PHONY: docker-dev-local
docker-dev-local: ## Build dev single-arch image (local load)
@echo "🏗️ Building single-architecture development Docker image for local use..."
@echo "💡 This builds from source code for the current platform and loads locally"
$(DOCKER_CLI) buildx build \
--file $(DOCKERFILE_SOURCE) \
--tag rustfs:source-latest \
--tag rustfs:dev-latest \
--load \
.
.PHONY: docker-dev-push
docker-dev-push: ## Build and push multi-arch development image # e.g (make docker-dev-push REGISTRY=xxx)
@if [ -z "$(REGISTRY)" ]; then \
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
exit 1; \
fi
@echo "🚀 Building and pushing multi-architecture development Docker images..."
@echo "💡 Pushing to registry: $(REGISTRY)"
$(DOCKER_CLI) buildx build \
--platform linux/amd64,linux/arm64 \
--file $(DOCKERFILE_SOURCE) \
--tag $(REGISTRY)/rustfs:source-latest \
--tag $(REGISTRY)/rustfs:dev-latest \
--push \
.
.PHONY: dev-env-start
dev-env-start: ## Start development container environment
@echo "🚀 Starting development environment..."
$(DOCKER_CLI) buildx build \
--file $(DOCKERFILE_SOURCE) \
--tag rustfs:dev \
--load \
.
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
-p 9010:9010 -p 9000:9000 \
-v $(shell pwd):/workspace \
-it rustfs:dev
.PHONY: dev-env-stop
dev-env-stop: ## Stop development container environment
@echo "🛑 Stopping development environment..."
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
.PHONY: dev-env-restart
dev-env-restart: dev-env-stop dev-env-start ## Restart development container environment

View File

@@ -0,0 +1,41 @@
## —— Production builds using docker buildx (for CI/CD and production) -----------------------------
.PHONY: docker-buildx
docker-buildx: ## Build production multi-arch image (no push)
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
./docker-buildx.sh
.PHONY: docker-buildx-push
docker-buildx-push: ## Build and push production multi-arch image
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
./docker-buildx.sh --push
.PHONY: docker-buildx-version
docker-buildx-version: ## Build and version production multi-arch image # e.g (make docker-buildx-version VERSION=v1.0.0)
@if [ -z "$(VERSION)" ]; then \
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
exit 1; \
fi
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
./docker-buildx.sh --release $(VERSION)
.PHONY: docker-buildx-push-version
docker-buildx-push-version: ## Build and version and push production multi-arch image # e.g (make docker-buildx-push-version VERSION=v1.0.0)
@if [ -z "$(VERSION)" ]; then \
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
exit 1; \
fi
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
./docker-buildx.sh --release $(VERSION) --push
.PHONY: docker-buildx-production-local
docker-buildx-production-local: ## Build production single-arch image locally
@echo "🏗️ Building single-architecture production Docker image locally..."
@echo "💡 Alternative to docker-buildx.sh for local testing"
$(DOCKER_CLI) buildx build \
--file $(DOCKERFILE_PRODUCTION) \
--tag rustfs:production-latest \
--tag rustfs:latest \
--load \
--build-arg RELEASE=latest \
.

View File

@@ -0,0 +1,16 @@
## —— Single Architecture Docker Builds (Traditional) ----------------------------------------------
.PHONY: docker-build-production
docker-build-production: ## Build single-arch production image
@echo "🏗️ Building single-architecture production Docker image..."
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
.PHONY: docker-build-source
docker-build-source: ## Build single-arch source image
@echo "🏗️ Building single-architecture source Docker image..."
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
--build-arg BUILDKIT_INLINE_CACHE=1 \
-f $(DOCKERFILE_SOURCE) -t rustfs:source .

View File

@@ -0,0 +1,22 @@
## —— Docker-based build (alternative approach) ----------------------------------------------------
# Usage: make BUILD_OS=ubuntu22.04 build-docker
# Output: target/ubuntu22.04/release/rustfs
.PHONY: build-docker
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
build-docker: ## Build using Docker container # e.g (make build-docker BUILD_OS=ubuntu22.04)
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
.PHONY: docker-inspect-multiarch
docker-inspect-multiarch: ## Check image architecture support
@if [ -z "$(IMAGE)" ]; then \
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
exit 1; \
fi
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
docker buildx imagetools inspect $(IMAGE)

55
.config/make/build.mak Normal file
View File

@@ -0,0 +1,55 @@
## —— Local Native Build using build-rustfs.sh script (Recommended) --------------------------------
.PHONY: build
build: ## Build RustFS binary (includes console by default)
@echo "🔨 Building RustFS using build-rustfs.sh script..."
./build-rustfs.sh
.PHONY: build-dev
build-dev: ## Build RustFS in Development mode
@echo "🔨 Building RustFS in development mode..."
./build-rustfs.sh --dev
.PHONY: build-musl
build-musl: ## Build x86_64 musl version
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform x86_64-unknown-linux-musl
.PHONY: build-gnu
build-gnu: ## Build x86_64 GNU version
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
.PHONY: build-musl-arm64
build-musl-arm64: ## Build aarch64 musl version
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform aarch64-unknown-linux-musl
.PHONY: build-gnu-arm64
build-gnu-arm64: ## Build aarch64 GNU version
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
.PHONY: build-cross-all
build-cross-all: core-deps ## Build binaries for all architectures
@echo "🔧 Building all target architectures..."
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
@echo "🔨 Generating protobuf code..."
cargo run --bin gproto || true
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
./build-rustfs.sh --platform x86_64-unknown-linux-musl
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
./build-rustfs.sh --platform aarch64-unknown-linux-musl
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
./build-rustfs.sh --platform aarch64-unknown-linux-gnu

24
.config/make/check.mak Normal file
View File

@@ -0,0 +1,24 @@
## —— Check and Inform Dependencies ----------------------------------------------------------------
# Fatal check
# Checks all required dependencies and exits with error if not found
# (e.g., cargo, rustfmt)
check-%:
@command -v $* >/dev/null 2>&1 || { \
echo >&2 "❌ '$*' is not installed."; \
exit 1; \
}
# Warning-only check
# Checks for optional dependencies and issues a warning if not found
# (e.g., cargo-nextest for enhanced testing)
warn-%:
@command -v $* >/dev/null 2>&1 || { \
echo >&2 "⚠️ '$*' is not installed."; \
}
# For checking dependencies use check-<dep-name> or warn-<dep-name>
.PHONY: core-deps fmt-deps test-deps
core-deps: check-cargo ## Check core dependencies
fmt-deps: check-rustfmt ## Check lint and formatting dependencies
test-deps: warn-cargo-nextest ## Check tests dependencies

6
.config/make/deploy.mak Normal file
View File

@@ -0,0 +1,6 @@
## —— Deploy using dev_deploy.sh script ------------------------------------------------------------
.PHONY: deploy-dev
deploy-dev: build-musl ## Deploy to dev server
@echo "🚀 Deploying to dev server: $${IP}"
./scripts/dev_deploy.sh $${IP}

38
.config/make/help.mak Normal file
View File

@@ -0,0 +1,38 @@
## —— Help, Help Build and Help Docker -------------------------------------------------------------
.PHONY: help
help: ## Shows This Help Menu
echo -e "$$HEADER"
grep -E '(^[a-zA-Z0-9_-]+:.*?## .*$$)|(^## )' $(MAKEFILE_LIST) | sed 's/^[^:]*://g' | awk 'BEGIN {FS = ":.*?## | #"} ; {printf "${cyan}%-30s${reset} ${white}%s${reset} ${green}%s${reset}\n", $$1, $$2, $$3}' | sed -e 's/\[36m##/\n[32m##/'
.PHONY: help-build
help-build: ## Shows RustFS build help
@echo ""
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
@echo ""
@echo "🔧 Direct usage of build-rustfs.sh script:"
@echo ""
@echo " ./build-rustfs.sh --help # View script help"
@echo " ./build-rustfs.sh --no-console # Build without console resources"
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
@echo " ./build-rustfs.sh --dev # Development mode build"
@echo " ./build-rustfs.sh --sign # Sign binary files"
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
@echo ""
.PHONY: help-docker
help-docker: ## Shows docker environment and suggestion help
@echo ""
@echo "📋 Environment Variables:"
@echo " REGISTRY Image registry address (required for push)"
@echo " DOCKERHUB_USERNAME Docker Hub username"
@echo " DOCKERHUB_TOKEN Docker Hub access token"
@echo " GITHUB_TOKEN GitHub access token"
@echo ""
@echo "💡 Suggestions:"
@echo " Production use: Use docker-buildx* commands (based on precompiled binaries)"
@echo " Local development: Use docker-dev* commands (build from source)"
@echo " Development environment: Use dev-env-* commands to manage dev containers"
@echo ""

22
.config/make/lint-fmt.mak Normal file
View File

@@ -0,0 +1,22 @@
## —— Code quality and Formatting ------------------------------------------------------------------
.PHONY: fmt
fmt: core-deps fmt-deps ## Format code
@echo "🔧 Formatting code..."
cargo fmt --all
.PHONY: fmt-check
fmt-check: core-deps fmt-deps ## Check code formatting
@echo "📝 Checking code formatting..."
cargo fmt --all --check
.PHONY: clippy-check
clippy-check: core-deps ## Run clippy checks
@echo "🔍 Running clippy checks..."
cargo clippy --fix --allow-dirty
cargo clippy --all-targets --all-features -- -D warnings
.PHONY: compilation-check
compilation-check: core-deps ## Run compilation check
@echo "🔨 Running compilation check..."
cargo check --all-targets

View File

@@ -0,0 +1,11 @@
## —— Pre Commit Checks ----------------------------------------------------------------------------
.PHONY: setup-hooks
setup-hooks: ## Set up git hooks
@echo "🔧 Setting up git hooks..."
chmod +x .git/hooks/pre-commit
@echo "✅ Git hooks setup complete!"
.PHONY: pre-commit
pre-commit: fmt clippy-check compilation-check test ## Run pre-commit checks
@echo "✅ All pre-commit checks passed!"

20
.config/make/tests.mak Normal file
View File

@@ -0,0 +1,20 @@
## —— Tests and e2e test ---------------------------------------------------------------------------
.PHONY: test
test: core-deps test-deps ## Run all tests
@echo "🧪 Running tests..."
@if command -v cargo-nextest >/dev/null 2>&1; then \
cargo nextest run --all --exclude e2e_test; \
else \
echo " cargo-nextest not found; falling back to 'cargo test'"; \
cargo test --workspace --exclude e2e_test -- --nocapture; \
fi
cargo test --all --doc
.PHONY: e2e-server
e2e-server: ## Run e2e-server tests
sh $(shell pwd)/scripts/run.sh
.PHONY: probe-e2e
probe-e2e: ## Probe e2e tests
sh $(shell pwd)/scripts/probe.sh

1
.envrc Normal file
View File

@@ -0,0 +1 @@
use flake

103
.github/s3tests/README.md vendored Normal file
View File

@@ -0,0 +1,103 @@
# S3 Compatibility Tests Configuration
This directory contains the configuration for running [Ceph S3 compatibility tests](https://github.com/ceph/s3-tests) against RustFS.
## Configuration File
The `s3tests.conf` file is based on the official `s3tests.conf.SAMPLE` from the ceph/s3-tests repository. It uses environment variable substitution via `envsubst` to configure the endpoint and credentials.
### Key Configuration Points
- **Host**: Set via `${S3_HOST}` environment variable (e.g., `rustfs-single` for single-node, `lb` for multi-node)
- **Port**: 9000 (standard RustFS port)
- **Credentials**: Uses `${S3_ACCESS_KEY}` and `${S3_SECRET_KEY}` from workflow environment
- **TLS**: Disabled (`is_secure = False`)
## Test Execution Strategy
### Network Connectivity Fix
Tests run inside a Docker container on the `rustfs-net` network, which allows them to resolve and connect to the RustFS container hostnames. This fixes the "Temporary failure in name resolution" error that occurred when tests ran on the GitHub runner host.
### Performance Optimizations
1. **Parallel Execution**: Uses `pytest-xdist` with `-n 4` to run tests in parallel across 4 workers
2. **Load Distribution**: Uses `--dist=loadgroup` to distribute test groups across workers
3. **Fail-Fast**: Uses `--maxfail=50` to stop after 50 failures, saving time on catastrophic failures
### Feature Filtering
Tests are filtered using pytest markers (`-m`) to skip features not yet supported by RustFS:
- `lifecycle` - Bucket lifecycle policies
- `versioning` - Object versioning
- `s3website` - Static website hosting
- `bucket_logging` - Bucket logging
- `encryption` / `sse_s3` - Server-side encryption
- `cloud_transition` / `cloud_restore` - Cloud storage transitions
- `lifecycle_expiration` / `lifecycle_transition` - Lifecycle operations
This filtering:
1. Reduces test execution time significantly (from 1+ hour to ~10-15 minutes)
2. Focuses on features RustFS currently supports
3. Avoids hundreds of expected failures
## Running Tests Locally
### Single-Node Test
```bash
# Set credentials
export S3_ACCESS_KEY=rustfsadmin
export S3_SECRET_KEY=rustfsadmin
# Start RustFS container
docker run -d --name rustfs-single \
--network rustfs-net \
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
-e RUSTFS_ACCESS_KEY=$S3_ACCESS_KEY \
-e RUSTFS_SECRET_KEY=$S3_SECRET_KEY \
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
rustfs-ci
# Generate config
export S3_HOST=rustfs-single
envsubst < .github/s3tests/s3tests.conf > /tmp/s3tests.conf
# Run tests
docker run --rm \
--network rustfs-net \
-v /tmp/s3tests.conf:/etc/s3tests.conf:ro \
python:3.12-slim \
bash -c '
apt-get update -qq && apt-get install -y -qq git
git clone --depth 1 https://github.com/ceph/s3-tests.git /s3-tests
cd /s3-tests
pip install -q -r requirements.txt pytest-xdist
S3TEST_CONF=/etc/s3tests.conf pytest -v -n 4 \
s3tests/functional/test_s3.py \
-m "not lifecycle and not versioning and not s3website and not bucket_logging and not encryption and not sse_s3"
'
```
## Test Results Interpretation
- **PASSED**: Test succeeded, feature works correctly
- **FAILED**: Test failed, indicates a potential bug or incompatibility
- **ERROR**: Test setup failed (e.g., network issues, missing dependencies)
- **SKIPPED**: Test skipped due to marker filtering
## Adding New Feature Support
When adding support for a new S3 feature to RustFS:
1. Remove the corresponding marker from the filter in `.github/workflows/e2e-s3tests.yml`
2. Run the tests to verify compatibility
3. Fix any failing tests
4. Update this README to reflect the newly supported feature
## References
- [Ceph S3 Tests Repository](https://github.com/ceph/s3-tests)
- [S3 API Compatibility](https://docs.aws.amazon.com/AmazonS3/latest/API/)
- [pytest-xdist Documentation](https://pytest-xdist.readthedocs.io/)

185
.github/s3tests/s3tests.conf vendored Normal file
View File

@@ -0,0 +1,185 @@
# RustFS s3-tests configuration
# Based on: https://github.com/ceph/s3-tests/blob/master/s3tests.conf.SAMPLE
#
# Usage:
# Single-node: S3_HOST=rustfs-single envsubst < s3tests.conf > /tmp/s3tests.conf
# Multi-node: S3_HOST=lb envsubst < s3tests.conf > /tmp/s3tests.conf
[DEFAULT]
## this section is just used for host, port and bucket_prefix
# host set for RustFS - will be substituted via envsubst
host = ${S3_HOST}
# port for RustFS
port = 9000
## say "False" to disable TLS
is_secure = False
## say "False" to disable SSL Verify
ssl_verify = False
[fixtures]
## all the buckets created will start with this prefix;
## {random} will be filled with random characters to pad
## the prefix to 30 characters long, and avoid collisions
bucket prefix = rustfs-{random}-
# all the iam account resources (users, roles, etc) created
# will start with this name prefix
iam name prefix = s3-tests-
# all the iam account resources (users, roles, etc) created
# will start with this path prefix
iam path prefix = /s3-tests/
[s3 main]
# main display_name
display_name = RustFS Tester
# main user_id
user_id = rustfsadmin
# main email
email = tester@rustfs.local
# zonegroup api_name for bucket location
api_name = default
## main AWS access key
access_key = ${S3_ACCESS_KEY}
## main AWS secret key
secret_key = ${S3_SECRET_KEY}
## replace with key id obtained when secret is created, or delete if KMS not tested
#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
## Storage classes
#storage_classes = "LUKEWARM, FROZEN"
## Lifecycle debug interval (default: 10)
#lc_debug_interval = 20
## Restore debug interval (default: 100)
#rgw_restore_debug_interval = 60
#rgw_restore_processor_period = 60
[s3 alt]
# alt display_name
display_name = RustFS Alt Tester
## alt email
email = alt@rustfs.local
# alt user_id
user_id = rustfsalt
# alt AWS access key (must be different from s3 main for many tests)
access_key = ${S3_ALT_ACCESS_KEY}
# alt AWS secret key
secret_key = ${S3_ALT_SECRET_KEY}
#[s3 cloud]
## to run the testcases with "cloud_transition" for transition
## and "cloud_restore" for restore attribute.
## Note: the waiting time may have to tweaked depending on
## the I/O latency to the cloud endpoint.
## host set for cloud endpoint
# host = localhost
## port set for cloud endpoint
# port = 8001
## say "False" to disable TLS
# is_secure = False
## cloud endpoint credentials
# access_key = 0555b35654ad1656d804
# secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
## storage class configured as cloud tier on local rgw server
# cloud_storage_class = CLOUDTIER
## Below are optional -
## Above configured cloud storage class config options
# retain_head_object = false
# allow_read_through = false # change it to enable read_through
# read_through_restore_days = 2
# target_storage_class = Target_SC
# target_path = cloud-bucket
## another regular storage class to test multiple transition rules,
# storage_class = S1
[s3 tenant]
# tenant display_name
display_name = RustFS Tenant Tester
# tenant user_id
user_id = rustfstenant
# tenant AWS access key
access_key = ${S3_ACCESS_KEY}
# tenant AWS secret key
secret_key = ${S3_SECRET_KEY}
# tenant email
email = tenant@rustfs.local
# tenant name
tenant = testx
#following section needs to be added for all sts-tests
[iam]
#used for iam operations in sts-tests
#email
email = s3@rustfs.local
#user_id
user_id = rustfsiam
#access_key
access_key = ${S3_ACCESS_KEY}
#secret_key
secret_key = ${S3_SECRET_KEY}
#display_name
display_name = RustFS IAM User
# iam account root user for iam_account tests
[iam root]
access_key = ${S3_ACCESS_KEY}
secret_key = ${S3_SECRET_KEY}
user_id = RGW11111111111111111
email = account1@rustfs.local
# iam account root user in a different account than [iam root]
[iam alt root]
access_key = ${S3_ACCESS_KEY}
secret_key = ${S3_SECRET_KEY}
user_id = RGW22222222222222222
email = account2@rustfs.local
#following section needs to be added when you want to run Assume Role With Webidentity test
[webidentity]
#used for assume role with web identity test in sts-tests
#all parameters will be obtained from ceph/qa/tasks/keycloak.py
#token=<access_token>
#aud=<obtained after introspecting token>
#sub=<obtained after introspecting token>
#azp=<obtained after introspecting token>
#user_token=<access token for a user, with attribute Department=[Engineering, Marketing>]
#thumbprint=<obtained from x509 certificate>
#KC_REALM=<name of the realm>

View File

@@ -40,11 +40,11 @@ env:
jobs:
security-audit:
name: Security Audit
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
timeout-minutes: 15
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Install cargo-audit
uses: taiki-e/install-action@v2
@@ -65,14 +65,14 @@ jobs:
dependency-review:
name: Dependency Review
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
if: github.event_name == 'pull_request'
permissions:
contents: read
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Dependency Review
uses: actions/dependency-review-action@v4

View File

@@ -83,7 +83,7 @@ jobs:
# Build strategy check - determine build type based on trigger
build-check:
name: Build Strategy Check
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
outputs:
should_build: ${{ steps.check.outputs.should_build }}
build_type: ${{ steps.check.outputs.build_type }}
@@ -92,7 +92,7 @@ jobs:
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 0
@@ -167,19 +167,19 @@ jobs:
matrix:
include:
# Linux builds
- os: ubuntu-latest
- os: ubicloud-standard-2
target: x86_64-unknown-linux-musl
cross: false
platform: linux
- os: ubuntu-latest
- os: ubicloud-standard-2
target: aarch64-unknown-linux-musl
cross: true
platform: linux
- os: ubuntu-latest
- os: ubicloud-standard-2
target: x86_64-unknown-linux-gnu
cross: false
platform: linux
- os: ubuntu-latest
- os: ubicloud-standard-2
target: aarch64-unknown-linux-gnu
cross: true
platform: linux
@@ -203,7 +203,7 @@ jobs:
# platform: windows
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 0
@@ -454,7 +454,7 @@ jobs:
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
OSS_REGION: cn-beijing
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
OSS_ENDPOINT: https://oss-accelerate.aliyuncs.com
shell: bash
run: |
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
@@ -532,7 +532,7 @@ jobs:
name: Build Summary
needs: [ build-check, build-rustfs ]
if: always() && needs.build-check.outputs.should_build == 'true'
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
steps:
- name: Build completion summary
shell: bash
@@ -584,7 +584,7 @@ jobs:
name: Create GitHub Release
needs: [ build-check, build-rustfs ]
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
permissions:
contents: write
outputs:
@@ -592,7 +592,7 @@ jobs:
release_url: ${{ steps.create.outputs.release_url }}
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 0
@@ -670,13 +670,13 @@ jobs:
name: Upload Release Assets
needs: [ build-check, build-rustfs, create-release ]
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
permissions:
contents: write
actions: read
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Download all build artifacts
uses: actions/download-artifact@v5
@@ -751,7 +751,7 @@ jobs:
name: Update Latest Version
needs: [ build-check, upload-release-assets ]
if: startsWith(github.ref, 'refs/tags/')
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
steps:
- name: Update latest.json
env:
@@ -801,12 +801,12 @@ jobs:
name: Publish Release
needs: [ build-check, create-release, upload-release-assets ]
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
permissions:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Update release notes and publish
env:

View File

@@ -4,7 +4,7 @@
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -62,17 +62,23 @@ on:
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: 1
CARGO_BUILD_JOBS: 2
jobs:
skip-check:
name: Skip Duplicate Actions
permissions:
actions: write
contents: read
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
@@ -83,15 +89,13 @@ jobs:
concurrent_skipping: "same_content_newer"
cancel_others: true
paths_ignore: '["*.md", "docs/**", "deploy/**"]'
# Never skip release events and tag pushes
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
typos:
name: Typos
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@stable
- name: Typos check with custom config file
uses: crate-ci/typos@master
@@ -100,13 +104,11 @@ jobs:
name: Test and Lint
needs: skip-check
if: needs.skip-check.outputs.should_skip != 'true'
runs-on: ubuntu-latest
runs-on: ubicloud-standard-4
timeout-minutes: 60
steps:
- name: Delete huge unnecessary tools folder
run: rm -rf /opt/hostedtoolcache
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Setup Rust environment
uses: ./.github/actions/setup
@@ -116,6 +118,9 @@ jobs:
github-token: ${{ secrets.GITHUB_TOKEN }}
cache-save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-nextest
uses: taiki-e/install-action@nextest
- name: Run tests
run: |
cargo nextest run --all --exclude e2e_test
@@ -131,11 +136,16 @@ jobs:
name: End-to-End Tests
needs: skip-check
if: needs.skip-check.outputs.should_skip != 'true'
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
timeout-minutes: 30
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Clean up previous test run
run: |
rm -rf /tmp/rustfs
rm -f /tmp/rustfs.log
- name: Setup Rust environment
uses: ./.github/actions/setup
@@ -155,7 +165,8 @@ jobs:
- name: Build debug binary
run: |
touch rustfs/build.rs
cargo build -p rustfs --bins
# Limit concurrency to prevent OOM
cargo build -p rustfs --bins --jobs 2
- name: Run end-to-end tests
run: |

View File

@@ -72,7 +72,7 @@ jobs:
# Check if we should build Docker images
build-check:
name: Docker Build Check
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
outputs:
should_build: ${{ steps.check.outputs.should_build }}
should_push: ${{ steps.check.outputs.should_push }}
@@ -83,7 +83,7 @@ jobs:
create_latest: ${{ steps.check.outputs.create_latest }}
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 0
# For workflow_run events, checkout the specific commit that triggered the workflow
@@ -162,11 +162,11 @@ jobs:
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
build_type="prerelease"
is_prerelease=true
# TODO: 临时修改 - 当前允许 alpha 版本也创建 latest 标签
# 等版本稳定后,需要移除下面这行,恢复原有逻辑(只有稳定版本才创建 latest
# TODO: Temporary change - currently allows alpha versions to also create latest tags
# After the version is stable, you need to remove the following line and restore the original logic (latest is created only for stable versions)
if [[ "$version" == *"alpha"* ]]; then
create_latest=true
echo "🧪 Building Docker image for prerelease: $version (临时允许创建 latest 标签)"
echo "🧪 Building Docker image for prerelease: $version (temporarily allowing creation of latest tag)"
else
echo "🧪 Building Docker image for prerelease: $version"
fi
@@ -215,11 +215,11 @@ jobs:
v*alpha*|v*beta*|v*rc*|*alpha*|*beta*|*rc*)
build_type="prerelease"
is_prerelease=true
# TODO: 临时修改 - 当前允许 alpha 版本也创建 latest 标签
# 等版本稳定后,需要移除下面的 if 块,恢复原有逻辑
# TODO: Temporary change - currently allows alpha versions to also create latest tags
# After the version is stable, you need to remove the if block below and restore the original logic.
if [[ "$input_version" == *"alpha"* ]]; then
create_latest=true
echo "🧪 Building with prerelease version: $input_version (临时允许创建 latest 标签)"
echo "🧪 Building with prerelease version: $input_version (temporarily allowing creation of latest tag)"
else
echo "🧪 Building with prerelease version: $input_version"
fi
@@ -264,11 +264,11 @@ jobs:
name: Build Docker Images
needs: build-check
if: needs.build-check.outputs.should_build == 'true'
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Login to Docker Hub
uses: docker/login-action@v3
@@ -330,9 +330,9 @@ jobs:
# Add channel tags for prereleases and latest for stable
if [[ "$CREATE_LATEST" == "true" ]]; then
# TODO: 临时修改 - 当前 alpha 版本也会创建 latest 标签
# 等版本稳定后,这里的逻辑保持不变,但上游的 CREATE_LATEST 设置需要恢复
# Stable release (以及临时的 alpha 版本)
# TODO: Temporary change - the current alpha version will also create the latest tag
# After the version is stabilized, the logic here remains unchanged, but the upstream CREATE_LATEST setting needs to be restored.
# Stable release (and temporary alpha versions)
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest"
elif [[ "$BUILD_TYPE" == "prerelease" ]]; then
# Prerelease channel tags (alpha, beta, rc)
@@ -404,7 +404,7 @@ jobs:
name: Docker Build Summary
needs: [ build-check, build-docker ]
if: always() && needs.build-check.outputs.should_build == 'true'
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
steps:
- name: Docker build completion summary
run: |
@@ -429,10 +429,10 @@ jobs:
"prerelease")
echo "🧪 Prerelease Docker image has been built with ${VERSION} tags"
echo "⚠️ This is a prerelease image - use with caution"
# TODO: 临时修改 - alpha 版本当前会创建 latest 标签
# 等版本稳定后,需要恢复下面的提示信息
# TODO: Temporary change - alpha versions currently create the latest tag
# After the version is stable, you need to restore the following prompt information
if [[ "$VERSION" == *"alpha"* ]] && [[ "$CREATE_LATEST" == "true" ]]; then
echo "🏷️ Latest tag has been created for alpha version (临时措施)"
echo "🏷️ Latest tag has been created for alpha version (temporary measures)"
else
echo "🚫 Latest tag NOT created for prerelease"
fi

422
.github/workflows/e2e-s3tests.yml vendored Normal file
View File

@@ -0,0 +1,422 @@
# Copyright 2024 RustFS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: e2e-s3tests
on:
workflow_dispatch:
inputs:
test-mode:
description: "Test mode to run"
required: true
type: choice
default: "single"
options:
- single
- multi
xdist:
description: "Enable pytest-xdist (parallel). '0' to disable."
required: false
default: "0"
maxfail:
description: "Stop after N failures (debug friendly)"
required: false
default: "1"
markexpr:
description: "pytest -m expression (feature filters)"
required: false
default: "not lifecycle and not versioning and not s3website and not bucket_logging and not encryption"
env:
# main user
S3_ACCESS_KEY: rustfsadmin
S3_SECRET_KEY: rustfsadmin
# alt user (must be different from main for many s3-tests)
S3_ALT_ACCESS_KEY: rustfsalt
S3_ALT_SECRET_KEY: rustfsalt
S3_REGION: us-east-1
RUST_LOG: info
PLATFORM: linux/amd64
defaults:
run:
shell: bash
jobs:
s3tests-single:
if: github.event.inputs.test-mode == 'single'
runs-on: ubicloud-standard-2
timeout-minutes: 120
steps:
- uses: actions/checkout@v6
- name: Enable buildx
uses: docker/setup-buildx-action@v3
- name: Build RustFS image (source, cached)
run: |
DOCKER_BUILDKIT=1 docker buildx build --load \
--platform ${PLATFORM} \
--cache-from type=gha \
--cache-to type=gha,mode=max \
-t rustfs-ci \
-f Dockerfile.source .
- name: Create network
run: docker network inspect rustfs-net >/dev/null 2>&1 || docker network create rustfs-net
- name: Remove existing rustfs-single (if any)
run: docker rm -f rustfs-single >/dev/null 2>&1 || true
- name: Start single RustFS
run: |
docker run -d --name rustfs-single \
--network rustfs-net \
-p 9000:9000 \
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
-e RUSTFS_ACCESS_KEY=$S3_ACCESS_KEY \
-e RUSTFS_SECRET_KEY=$S3_SECRET_KEY \
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
-v /tmp/rustfs-single:/data \
rustfs-ci
- name: Wait for RustFS ready
run: |
for i in {1..60}; do
if curl -sf http://127.0.0.1:9000/health >/dev/null 2>&1; then
echo "RustFS is ready"
exit 0
fi
if [ "$(docker inspect -f '{{.State.Running}}' rustfs-single 2>/dev/null)" != "true" ]; then
echo "RustFS container not running" >&2
docker logs rustfs-single || true
exit 1
fi
sleep 2
done
echo "Health check timed out" >&2
docker logs rustfs-single || true
exit 1
- name: Generate s3tests config
run: |
export S3_HOST=127.0.0.1
envsubst < .github/s3tests/s3tests.conf > s3tests.conf
- name: Provision s3-tests alt user (required by suite)
run: |
python3 -m pip install --user --upgrade pip awscurl
export PATH="$HOME/.local/bin:$PATH"
# Admin API requires AWS SigV4 signing. awscurl is used by RustFS codebase as well.
awscurl \
--service s3 \
--region "${S3_REGION}" \
--access_key "${S3_ACCESS_KEY}" \
--secret_key "${S3_SECRET_KEY}" \
-X PUT \
-H 'Content-Type: application/json' \
-d '{"secretKey":"'"${S3_ALT_SECRET_KEY}"'","status":"enabled","policy":"readwrite"}' \
"http://127.0.0.1:9000/rustfs/admin/v3/add-user?accessKey=${S3_ALT_ACCESS_KEY}"
# Explicitly attach built-in policy via policy mapping.
# s3-tests relies on alt client being able to ListBuckets during setup cleanup.
awscurl \
--service s3 \
--region "${S3_REGION}" \
--access_key "${S3_ACCESS_KEY}" \
--secret_key "${S3_SECRET_KEY}" \
-X PUT \
"http://127.0.0.1:9000/rustfs/admin/v3/set-user-or-group-policy?policyName=readwrite&userOrGroup=${S3_ALT_ACCESS_KEY}&isGroup=false"
# Sanity check: alt user can list buckets (should not be AccessDenied).
awscurl \
--service s3 \
--region "${S3_REGION}" \
--access_key "${S3_ALT_ACCESS_KEY}" \
--secret_key "${S3_ALT_SECRET_KEY}" \
-X GET \
"http://127.0.0.1:9000/" >/dev/null
- name: Prepare s3-tests
run: |
python3 -m pip install --user --upgrade pip tox
export PATH="$HOME/.local/bin:$PATH"
git clone --depth 1 https://github.com/ceph/s3-tests.git s3-tests
- name: Run ceph s3-tests (debug friendly)
run: |
export PATH="$HOME/.local/bin:$PATH"
mkdir -p artifacts/s3tests-single
cd s3-tests
set -o pipefail
MAXFAIL="${{ github.event.inputs.maxfail }}"
if [ -z "$MAXFAIL" ]; then MAXFAIL="1"; fi
MARKEXPR="${{ github.event.inputs.markexpr }}"
if [ -z "$MARKEXPR" ]; then MARKEXPR="not lifecycle and not versioning and not s3website and not bucket_logging and not encryption"; fi
XDIST="${{ github.event.inputs.xdist }}"
if [ -z "$XDIST" ]; then XDIST="0"; fi
XDIST_ARGS=""
if [ "$XDIST" != "0" ]; then
# Add pytest-xdist to requirements.txt so tox installs it inside
# its virtualenv. Installing outside tox does NOT work.
echo "pytest-xdist" >> requirements.txt
XDIST_ARGS="-n $XDIST --dist=loadgroup"
fi
# Run tests from s3tests/functional (boto2+boto3 combined directory).
S3TEST_CONF=${GITHUB_WORKSPACE}/s3tests.conf \
tox -- \
-vv -ra --showlocals --tb=long \
--maxfail="$MAXFAIL" \
--junitxml=${GITHUB_WORKSPACE}/artifacts/s3tests-single/junit.xml \
$XDIST_ARGS \
s3tests/functional/test_s3.py \
-m "$MARKEXPR" \
2>&1 | tee ${GITHUB_WORKSPACE}/artifacts/s3tests-single/pytest.log
- name: Collect RustFS logs
if: always()
run: |
mkdir -p artifacts/rustfs-single
docker logs rustfs-single > artifacts/rustfs-single/rustfs.log 2>&1 || true
docker inspect rustfs-single > artifacts/rustfs-single/inspect.json || true
- name: Upload artifacts
if: always() && env.ACT != 'true'
uses: actions/upload-artifact@v4
with:
name: s3tests-single
path: artifacts/**
s3tests-multi:
if: github.event_name == 'workflow_dispatch' && github.event.inputs.test-mode == 'multi'
runs-on: ubicloud-standard-2
timeout-minutes: 150
steps:
- uses: actions/checkout@v6
- name: Enable buildx
uses: docker/setup-buildx-action@v3
- name: Build RustFS image (source, cached)
run: |
DOCKER_BUILDKIT=1 docker buildx build --load \
--platform ${PLATFORM} \
--cache-from type=gha \
--cache-to type=gha,mode=max \
-t rustfs-ci \
-f Dockerfile.source .
- name: Prepare cluster compose
run: |
cat > compose.yml <<'EOF'
services:
rustfs1:
image: rustfs-ci
hostname: rustfs1
networks: [rustfs-net]
environment:
RUSTFS_ADDRESS: "0.0.0.0:9000"
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
volumes:
- rustfs1-data:/data
rustfs2:
image: rustfs-ci
hostname: rustfs2
networks: [rustfs-net]
environment:
RUSTFS_ADDRESS: "0.0.0.0:9000"
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
volumes:
- rustfs2-data:/data
rustfs3:
image: rustfs-ci
hostname: rustfs3
networks: [rustfs-net]
environment:
RUSTFS_ADDRESS: "0.0.0.0:9000"
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
volumes:
- rustfs3-data:/data
rustfs4:
image: rustfs-ci
hostname: rustfs4
networks: [rustfs-net]
environment:
RUSTFS_ADDRESS: "0.0.0.0:9000"
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
volumes:
- rustfs4-data:/data
lb:
image: haproxy:2.9
hostname: lb
networks: [rustfs-net]
ports:
- "9000:9000"
volumes:
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
networks:
rustfs-net:
name: rustfs-net
volumes:
rustfs1-data:
rustfs2-data:
rustfs3-data:
rustfs4-data:
EOF
cat > haproxy.cfg <<'EOF'
defaults
mode http
timeout connect 5s
timeout client 30s
timeout server 30s
frontend fe_s3
bind *:9000
default_backend be_s3
backend be_s3
balance roundrobin
server s1 rustfs1:9000 check
server s2 rustfs2:9000 check
server s3 rustfs3:9000 check
server s4 rustfs4:9000 check
EOF
- name: Launch cluster
run: docker compose -f compose.yml up -d
- name: Wait for LB ready
run: |
for i in {1..90}; do
if curl -sf http://127.0.0.1:9000/health >/dev/null 2>&1; then
echo "Load balancer is ready"
exit 0
fi
sleep 2
done
echo "LB or backend not ready" >&2
docker compose -f compose.yml logs --tail=200 || true
exit 1
- name: Generate s3tests config
run: |
export S3_HOST=127.0.0.1
envsubst < .github/s3tests/s3tests.conf > s3tests.conf
- name: Provision s3-tests alt user (required by suite)
run: |
python3 -m pip install --user --upgrade pip awscurl
export PATH="$HOME/.local/bin:$PATH"
awscurl \
--service s3 \
--region "${S3_REGION}" \
--access_key "${S3_ACCESS_KEY}" \
--secret_key "${S3_SECRET_KEY}" \
-X PUT \
-H 'Content-Type: application/json' \
-d '{"secretKey":"'"${S3_ALT_SECRET_KEY}"'","status":"enabled","policy":"readwrite"}' \
"http://127.0.0.1:9000/rustfs/admin/v3/add-user?accessKey=${S3_ALT_ACCESS_KEY}"
awscurl \
--service s3 \
--region "${S3_REGION}" \
--access_key "${S3_ACCESS_KEY}" \
--secret_key "${S3_SECRET_KEY}" \
-X PUT \
"http://127.0.0.1:9000/rustfs/admin/v3/set-user-or-group-policy?policyName=readwrite&userOrGroup=${S3_ALT_ACCESS_KEY}&isGroup=false"
awscurl \
--service s3 \
--region "${S3_REGION}" \
--access_key "${S3_ALT_ACCESS_KEY}" \
--secret_key "${S3_ALT_SECRET_KEY}" \
-X GET \
"http://127.0.0.1:9000/" >/dev/null
- name: Prepare s3-tests
run: |
python3 -m pip install --user --upgrade pip tox
export PATH="$HOME/.local/bin:$PATH"
git clone --depth 1 https://github.com/ceph/s3-tests.git s3-tests
- name: Run ceph s3-tests (multi, debug friendly)
run: |
export PATH="$HOME/.local/bin:$PATH"
mkdir -p artifacts/s3tests-multi
cd s3-tests
set -o pipefail
MAXFAIL="${{ github.event.inputs.maxfail }}"
if [ -z "$MAXFAIL" ]; then MAXFAIL="1"; fi
MARKEXPR="${{ github.event.inputs.markexpr }}"
if [ -z "$MARKEXPR" ]; then MARKEXPR="not lifecycle and not versioning and not s3website and not bucket_logging and not encryption"; fi
XDIST="${{ github.event.inputs.xdist }}"
if [ -z "$XDIST" ]; then XDIST="0"; fi
XDIST_ARGS=""
if [ "$XDIST" != "0" ]; then
# Add pytest-xdist to requirements.txt so tox installs it inside
# its virtualenv. Installing outside tox does NOT work.
echo "pytest-xdist" >> requirements.txt
XDIST_ARGS="-n $XDIST --dist=loadgroup"
fi
# Run tests from s3tests/functional (boto2+boto3 combined directory).
S3TEST_CONF=${GITHUB_WORKSPACE}/s3tests.conf \
tox -- \
-vv -ra --showlocals --tb=long \
--maxfail="$MAXFAIL" \
--junitxml=${GITHUB_WORKSPACE}/artifacts/s3tests-multi/junit.xml \
$XDIST_ARGS \
s3tests/functional/test_s3.py \
-m "$MARKEXPR" \
2>&1 | tee ${GITHUB_WORKSPACE}/artifacts/s3tests-multi/pytest.log
- name: Collect logs
if: always()
run: |
mkdir -p artifacts/cluster
docker compose -f compose.yml logs --no-color > artifacts/cluster/cluster.log 2>&1 || true
- name: Upload artifacts
if: always() && env.ACT != 'true'
uses: actions/upload-artifact@v4
with:
name: s3tests-multi
path: artifacts/**

View File

@@ -1,9 +1,23 @@
# Copyright 2024 RustFS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Publish helm chart to artifacthub
on:
workflow_run:
workflows: ["Build and Release"]
types: [completed]
workflows: [ "Build and Release" ]
types: [ completed ]
permissions:
contents: read
@@ -13,7 +27,7 @@ env:
jobs:
build-helm-package:
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
# Only run on successful builds triggered by tag pushes (version format: x.y.z or x.y.z-suffix)
if: |
github.event.workflow_run.conclusion == 'success' &&
@@ -22,9 +36,9 @@ jobs:
steps:
- name: Checkout helm chart repo
uses: actions/checkout@v2
uses: actions/checkout@v6
- name: Replace chart appversion
- name: Replace chart app version
run: |
set -e
set -x
@@ -40,7 +54,7 @@ jobs:
cp helm/README.md helm/rustfs/
package_version=$(echo $new_version | awk -F '-' '{print $2}' | awk -F '.' '{print $NF}')
helm package ./helm/rustfs --destination helm/rustfs/ --version "0.0.$package_version"
- name: Upload helm package as artifact
uses: actions/upload-artifact@v4
with:
@@ -49,25 +63,25 @@ jobs:
retention-days: 1
publish-helm-package:
runs-on: ubuntu-latest
needs: [build-helm-package]
runs-on: ubicloud-standard-2
needs: [ build-helm-package ]
steps:
- name: Checkout helm package repo
uses: actions/checkout@v2
uses: actions/checkout@v6
with:
repository: rustfs/helm
repository: rustfs/helm
token: ${{ secrets.RUSTFS_HELM_PACKAGE }}
- name: Download helm package
uses: actions/download-artifact@v4
with:
name: helm-package
path: ./
- name: Set up helm
uses: azure/setup-helm@v4.3.0
- name: Generate index
run: helm repo index . --url https://charts.rustfs.com

View File

@@ -25,7 +25,7 @@ permissions:
jobs:
build:
runs-on: ubuntu-latest
runs-on: ubicloud-standard-4
steps:
- uses: usthe/issues-translate-action@v2.7
with:

View File

@@ -40,11 +40,11 @@ env:
jobs:
performance-profile:
name: Performance Profiling
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
timeout-minutes: 30
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Setup Rust environment
uses: ./.github/actions/setup
@@ -115,11 +115,11 @@ jobs:
benchmark:
name: Benchmark Tests
runs-on: ubuntu-latest
runs-on: ubicloud-standard-2
timeout-minutes: 45
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Setup Rust environment
uses: ./.github/actions/setup

12
.gitignore vendored
View File

@@ -2,6 +2,7 @@
.DS_Store
.idea
.vscode
.direnv/
/test
/logs
/data
@@ -23,4 +24,13 @@ profile.json
*.go
*.pb
*.svg
deploy/logs/*.log.*
deploy/logs/*.log.*
artifacts/
# s3-tests local artifacts (root directory only)
/s3-tests/
/s3-tests-local/
/s3tests.conf
/s3tests.conf.*
*.events
*.audit
*.snappy

32
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,32 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: local
hooks:
- id: cargo-fmt
name: cargo fmt
entry: cargo fmt --all --check
language: system
types: [rust]
pass_filenames: false
- id: cargo-clippy
name: cargo clippy
entry: cargo clippy --all-targets --all-features -- -D warnings
language: system
types: [rust]
pass_filenames: false
- id: cargo-check
name: cargo check
entry: cargo check --all-targets
language: system
types: [rust]
pass_filenames: false
- id: cargo-test
name: cargo test
entry: bash -c 'cargo test --workspace --exclude e2e_test && cargo test --all --doc'
language: system
types: [rust]
pass_filenames: false

37
.vscode/launch.json vendored
View File

@@ -1,9 +1,31 @@
{
// 使用 IntelliSense 了解相关属性。
// 悬停以查看现有属性的描述。
// 欲了解更多信息,请访问: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"type": "lldb",
"request": "launch",
"name": "Debug(only) executable 'rustfs'",
"env": {
"RUST_LOG": "rustfs=info,ecstore=info,s3s=info,iam=info",
"RUSTFS_SKIP_BACKGROUND_TASK": "on"
//"RUSTFS_OBS_LOG_DIRECTORY": "./deploy/logs",
// "RUSTFS_POLICY_PLUGIN_URL":"http://localhost:8181/v1/data/rustfs/authz/allow",
// "RUSTFS_POLICY_PLUGIN_AUTH_TOKEN":"your-opa-token"
},
"program": "${workspaceFolder}/target/debug/rustfs",
"args": [
"--access-key",
"rustfsadmin",
"--secret-key",
"rustfsadmin",
"--address",
"0.0.0.0:9010",
"--server-domains",
"127.0.0.1:9010",
"./target/volume/test{1...4}"
],
"cwd": "${workspaceFolder}"
},
{
"type": "lldb",
"request": "launch",
@@ -67,12 +89,8 @@
"test",
"--no-run",
"--lib",
"--package=ecstore"
],
"filter": {
"name": "ecstore",
"kind": "lib"
}
"--package=rustfs-ecstore"
]
},
"args": [],
"cwd": "${workspaceFolder}"
@@ -95,6 +113,7 @@
// "RUSTFS_OBS_TRACE_ENDPOINT": "http://127.0.0.1:4318/v1/traces", // jeager otlp http endpoint
// "RUSTFS_OBS_METRIC_ENDPOINT": "http://127.0.0.1:4318/v1/metrics", // default otlp http endpoint
// "RUSTFS_OBS_LOG_ENDPOINT": "http://127.0.0.1:4318/v1/logs", // default otlp http endpoint
// "RUSTFS_COMPRESS_ENABLE": "true",
"RUSTFS_CONSOLE_ADDRESS": "127.0.0.1:9001",
"RUSTFS_OBS_LOG_DIRECTORY": "./target/logs",
},

View File

@@ -2,6 +2,7 @@
## Communication Rules
- Respond to the user in Chinese; use English in all other contexts.
- Code and documentation must be written in English only. Chinese text is allowed solely as test data/fixtures when a case explicitly requires Chinese-language content for validation.
## Project Structure & Module Organization
The workspace root hosts shared dependencies in `Cargo.toml`. The service binary lives under `rustfs/src/main.rs`, while reusable crates sit in `crates/` (`crypto`, `iam`, `kms`, and `e2e_test`). Local fixtures for standalone flows reside in `test_standalone/`, deployment manifests are under `deploy/`, Docker assets sit at the root, and automation lives in `scripts/`. Skim each crates README or module docs before contributing changes.

View File

@@ -2,6 +2,8 @@
## 📋 Code Quality Requirements
For instructions on setting up and running the local development environment, please see [Development Guide](docs/DEVELOPMENT.md).
### 🔧 Code Formatting Rules
**MANDATORY**: All code must be properly formatted before committing. This project enforces strict formatting standards to maintain code consistency and readability.

733
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -19,6 +19,7 @@ members = [
"crates/audit", # Audit target management system with multi-target fan-out
"crates/common", # Shared utilities and data structures
"crates/config", # Configuration management
"crates/credentials", # Credential management system
"crates/crypto", # Cryptography and security features
"crates/ecstore", # Erasure coding storage implementation
"crates/e2e_test", # End-to-end test suite
@@ -71,6 +72,7 @@ rustfs-audit = { path = "crates/audit", version = "0.0.5" }
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
rustfs-common = { path = "crates/common", version = "0.0.5" }
rustfs-config = { path = "./crates/config", version = "0.0.5" }
rustfs-credentials = { path = "crates/credentials", version = "0.0.5" }
rustfs-crypto = { path = "crates/crypto", version = "0.0.5" }
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.5" }
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
@@ -97,18 +99,19 @@ async-channel = "2.5.0"
async-compression = { version = "0.4.19" }
async-recursion = "1.1.1"
async-trait = "0.1.89"
axum = "0.8.7"
axum-extra = "0.12.2"
axum = "0.8.8"
axum-server = { version = "0.8.0", features = ["tls-rustls-no-provider"], default-features = false }
futures = "0.3.31"
futures-core = "0.3.31"
futures-util = "0.3.31"
pollster = "0.4.0"
hyper = { version = "1.8.1", features = ["http2", "http1", "server"] }
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "ring", "webpki-roots"] }
hyper-util = { version = "0.1.19", features = ["tokio", "server-auto", "server-graceful"] }
http = "1.4.0"
http-body = "1.0.1"
reqwest = { version = "0.12.25", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
http-body-util = "0.1.3"
reqwest = { version = "0.12.28", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
socket2 = "0.6.1"
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
@@ -125,31 +128,31 @@ tower-http = { version = "0.6.8", features = ["cors"] }
bytes = { version = "1.11.0", features = ["serde"] }
bytesize = "2.3.1"
byteorder = "1.5.0"
flatbuffers = "25.9.23"
flatbuffers = "25.12.19"
form_urlencoded = "1.2.2"
prost = "0.14.1"
quick-xml = "0.38.4"
rmcp = { version = "0.10.0" }
rmp = { version = "0.8.14" }
rmp-serde = { version = "1.3.0" }
rmcp = { version = "0.12.0" }
rmp = { version = "0.8.15" }
rmp-serde = { version = "1.3.1" }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["raw_value"] }
serde_json = { version = "1.0.148", features = ["raw_value"] }
serde_urlencoded = "0.7.1"
schemars = "1.1.0"
schemars = "1.2.0"
# Cryptography and Security
aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
argon2 = { version = "0.6.0-rc.3", features = ["std"] }
argon2 = { version = "0.6.0-rc.5" }
blake3 = { version = "1.8.2", features = ["rayon", "mmap"] }
chacha20poly1305 = { version = "0.11.0-rc.2" }
crc-fast = "1.6.0"
hmac = { version = "0.13.0-rc.3" }
jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] }
pbkdf2 = "0.13.0-rc.3"
pbkdf2 = "0.13.0-rc.5"
rsa = { version = "0.10.0-rc.10" }
rustls = { version = "0.23.35", features = ["ring", "logging", "std", "tls12"], default-features = false }
rustls-pemfile = "2.2.0"
rustls-pki-types = "1.13.1"
rustls-pki-types = "1.13.2"
sha1 = "0.11.0-rc.3"
sha2 = "0.11.0-rc.3"
subtle = "2.6"
@@ -162,20 +165,20 @@ time = { version = "0.3.44", features = ["std", "parsing", "formatting", "macros
# Utilities and Tools
anyhow = "1.0.100"
arc-swap = "1.7.1"
arc-swap = "1.8.0"
astral-tokio-tar = "0.5.6"
atoi = "2.0.0"
atomic_enum = "0.3.0"
aws-config = { version = "1.8.11" }
aws-credential-types = { version = "1.2.10" }
aws-sdk-s3 = { version = "1.116.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
aws-smithy-types = { version = "1.3.4" }
aws-config = { version = "1.8.12" }
aws-credential-types = { version = "1.2.11" }
aws-sdk-s3 = { version = "1.119.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
aws-smithy-types = { version = "1.3.5" }
base64 = "0.22.1"
base64-simd = "0.8.0"
brotli = "8.0.2"
cfg-if = "1.0.4"
clap = { version = "4.5.53", features = ["derive", "env"] }
const-str = { version = "0.7.0", features = ["std", "proc"] }
const-str = { version = "0.7.1", features = ["std", "proc"] }
convert_case = "0.10.0"
criterion = { version = "0.8", features = ["html_reports"] }
crossbeam-queue = "0.3.12"
@@ -186,8 +189,8 @@ faster-hex = "0.10.0"
flate2 = "1.1.5"
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv", "json"] }
glob = "0.3.3"
google-cloud-storage = "1.4.0"
google-cloud-auth = "1.2.0"
google-cloud-storage = "1.5.0"
google-cloud-auth = "1.3.0"
hashbrown = { version = "0.16.1", features = ["serde", "rayon"] }
heed = { version = "0.22.0" }
hex-simd = "0.8.0"
@@ -196,13 +199,13 @@ ipnetwork = { version = "0.21.1", features = ["serde"] }
lazy_static = "1.5.0"
libc = "0.2.178"
libsystemd = "0.7.2"
local-ip-address = "0.6.6"
local-ip-address = "0.6.8"
lz4 = "1.28.1"
matchit = "0.9.0"
matchit = "0.9.1"
md-5 = "0.11.0-rc.3"
md5 = "0.8.0"
mime_guess = "2.0.5"
moka = { version = "0.12.11", features = ["future"] }
moka = { version = "0.12.12", features = ["future"] }
netif = "0.1.6"
nix = { version = "0.30.1", features = ["fs"] }
nu-ansi-term = "0.50.3"
@@ -221,9 +224,9 @@ regex = { version = "1.12.2" }
rumqttc = { version = "0.25.1" }
rust-embed = { version = "8.9.0" }
rustc-hash = { version = "2.1.1" }
s3s = { version = "0.12.0-rc.4", features = ["minio"] }
s3s = { version = "0.13.0-alpha", features = ["minio"], git = "https://github.com/s3s-project/s3s.git", branch = "main" }
serial_test = "3.2.0"
shadow-rs = { version = "1.4.0", default-features = false }
shadow-rs = { version = "1.5.0", default-features = false }
siphasher = "1.0.1"
smallvec = { version = "1.15.1", features = ["serde"] }
smartstring = "1.0.1"
@@ -234,10 +237,10 @@ strum = { version = "0.27.2", features = ["derive"] }
sysctl = "0.7.1"
sysinfo = "0.37.2"
temp-env = "0.3.6"
tempfile = "3.23.0"
tempfile = "3.24.0"
test-case = "3.3.1"
thiserror = "2.0.17"
tracing = { version = "0.1.43" }
tracing = { version = "0.1.44" }
tracing-appender = "0.2.4"
tracing-error = "0.2.1"
tracing-opentelemetry = "0.32.0"
@@ -251,7 +254,7 @@ walkdir = "2.5.0"
wildmatch = { version = "2.6.1", features = ["serde"] }
winapi = { version = "0.3.9" }
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
zip = "6.0.0"
zip = "7.0.0"
zstd = "0.13.3"
# Observability and Metrics
@@ -274,10 +277,8 @@ jemalloc_pprof = { version = "0.8.1", features = ["symbolize", "flamegraph"] }
# Used to generate CPU performance analysis data and flame diagrams
pprof = { version = "0.15.0", features = ["flamegraph", "protobuf-codec"] }
[workspace.metadata.cargo-shear]
ignored = ["rustfs", "rustfs-mcp", "tokio-test"]
ignored = ["rustfs", "rustfs-mcp"]
[profile.release]
opt-level = 3

View File

@@ -39,7 +39,9 @@ RUN set -eux; \
libssl-dev \
lld \
protobuf-compiler \
flatbuffers-compiler; \
flatbuffers-compiler \
gcc-aarch64-linux-gnu \
gcc-x86-64-linux-gnu; \
rm -rf /var/lib/apt/lists/*
# Optional: cross toolchain for aarch64 (only when targeting linux/arm64)
@@ -51,18 +53,18 @@ RUN set -eux; \
rm -rf /var/lib/apt/lists/*; \
fi
# Add Rust targets based on TARGETPLATFORM
# Add Rust targets for both arches (to support cross-builds on multi-arch runners)
RUN set -eux; \
case "${TARGETPLATFORM:-linux/amd64}" in \
linux/amd64) rustup target add x86_64-unknown-linux-gnu ;; \
linux/arm64) rustup target add aarch64-unknown-linux-gnu ;; \
*) echo "Unsupported TARGETPLATFORM=${TARGETPLATFORM}" >&2; exit 1 ;; \
esac
rustup target add x86_64-unknown-linux-gnu aarch64-unknown-linux-gnu; \
rustup component add rust-std-x86_64-unknown-linux-gnu rust-std-aarch64-unknown-linux-gnu
# Cross-compilation environment (used only when targeting aarch64)
ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc
ENV CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=x86_64-linux-gnu-gcc
ENV CC_x86_64_unknown_linux_gnu=x86_64-linux-gnu-gcc
ENV CXX_x86_64_unknown_linux_gnu=x86_64-linux-gnu-g++
WORKDIR /usr/src/rustfs
@@ -72,7 +74,6 @@ COPY Cargo.toml Cargo.lock ./
# 2) workspace member manifests (adjust if workspace layout changes)
COPY rustfs/Cargo.toml rustfs/Cargo.toml
COPY crates/*/Cargo.toml crates/
COPY cli/rustfs-gui/Cargo.toml cli/rustfs-gui/Cargo.toml
# Pre-fetch dependencies for better caching
RUN --mount=type=cache,target=/usr/local/cargo/registry \
@@ -117,6 +118,49 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
;; \
esac
# -----------------------------
# Development stage (keeps toolchain)
# -----------------------------
FROM builder AS dev
ARG BUILD_DATE
ARG VCS_REF
LABEL name="RustFS (dev-source)" \
maintainer="RustFS Team" \
build-date="${BUILD_DATE}" \
vcs-ref="${VCS_REF}" \
description="RustFS - local development with Rust toolchain."
# Install runtime dependencies that might be missing in partial builder
# (builder already has build-essential, lld, etc.)
WORKDIR /app
ENV CARGO_INCREMENTAL=1
# Ensure we have the same default env vars available
ENV RUSTFS_ADDRESS=":9000" \
RUSTFS_ACCESS_KEY="rustfsadmin" \
RUSTFS_SECRET_KEY="rustfsadmin" \
RUSTFS_CONSOLE_ENABLE="true" \
RUSTFS_VOLUMES="/data" \
RUST_LOG="warn" \
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
RUSTFS_USERNAME="rustfs" \
RUSTFS_GROUPNAME="rustfs" \
RUSTFS_UID="10001" \
RUSTFS_GID="10001"
# Note: We don't COPY source here because we expect it to be mounted at /app
# We rely on cargo run to build and run
EXPOSE 9000 9001
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
CMD ["cargo", "run", "--bin", "rustfs", "--"]
# -----------------------------
# Runtime stage (Ubuntu minimal)
# -----------------------------
@@ -143,8 +187,8 @@ RUN set -eux; \
# Create a conventional runtime user/group (final switch happens in entrypoint via chroot --userspec)
RUN set -eux; \
groupadd -g 1000 rustfs; \
useradd -u 1000 -g rustfs -M -s /usr/sbin/nologin rustfs
groupadd -g 10001 rustfs; \
useradd -u 10001 -g rustfs -M -s /usr/sbin/nologin rustfs
WORKDIR /app
@@ -168,8 +212,8 @@ ENV RUSTFS_ADDRESS=":9000" \
RUST_LOG="warn" \
RUSTFS_USERNAME="rustfs" \
RUSTFS_GROUPNAME="rustfs" \
RUSTFS_UID="1000" \
RUSTFS_GID="1000"
RUSTFS_UID="10001" \
RUSTFS_GID="10001"
EXPOSE 9000
VOLUME ["/data"]

411
Makefile
View File

@@ -2,375 +2,80 @@
# Remote development requires VSCode with Dev Containers, Remote SSH, Remote Explorer
# https://code.visualstudio.com/docs/remote/containers
###########
.PHONY: SHELL
# Makefile global config
# Use config.mak to override any of the following variables.
# Do not make changes here.
.DEFAULT_GOAL := help
.EXPORT_ALL_VARIABLES:
.ONESHELL:
.SILENT:
NUM_CORES := $(shell nproc 2>/dev/null || sysctl -n hw.ncpu)
MAKEFLAGS += -j$(NUM_CORES) -l$(NUM_CORES)
MAKEFLAGS += --silent
SHELL:= /bin/bash
.SHELLFLAGS = -eu -o pipefail -c
DOCKER_CLI ?= docker
IMAGE_NAME ?= rustfs:v1.0.0
CONTAINER_NAME ?= rustfs-dev
# Docker build configurations
DOCKERFILE_PRODUCTION = Dockerfile
DOCKERFILE_SOURCE = Dockerfile.source
# Code quality and formatting targets
.PHONY: fmt
fmt:
@echo "🔧 Formatting code..."
cargo fmt --all
.PHONY: fmt-check
fmt-check:
@echo "📝 Checking code formatting..."
cargo fmt --all --check
.PHONY: clippy
clippy:
@echo "🔍 Running clippy checks..."
cargo clippy --fix --allow-dirty
cargo clippy --all-targets --all-features -- -D warnings
.PHONY: check
check:
@echo "🔨 Running compilation check..."
cargo check --all-targets
.PHONY: test
test:
@echo "🧪 Running tests..."
@if command -v cargo-nextest >/dev/null 2>&1; then \
cargo nextest run --all --exclude e2e_test; \
else \
echo " cargo-nextest not found; falling back to 'cargo test'"; \
cargo test --workspace --exclude e2e_test -- --nocapture; \
fi
cargo test --all --doc
.PHONY: pre-commit
pre-commit: fmt clippy check test
@echo "✅ All pre-commit checks passed!"
.PHONY: setup-hooks
setup-hooks:
@echo "🔧 Setting up git hooks..."
chmod +x .git/hooks/pre-commit
@echo "✅ Git hooks setup complete!"
.PHONY: e2e-server
e2e-server:
sh $(shell pwd)/scripts/run.sh
.PHONY: probe-e2e
probe-e2e:
sh $(shell pwd)/scripts/probe.sh
# Native build using build-rustfs.sh script
.PHONY: build
build:
@echo "🔨 Building RustFS using build-rustfs.sh script..."
./build-rustfs.sh
.PHONY: build-dev
build-dev:
@echo "🔨 Building RustFS in development mode..."
./build-rustfs.sh --dev
# Docker-based build (alternative approach)
# Usage: make BUILD_OS=ubuntu22.04 build-docker
# Output: target/ubuntu22.04/release/rustfs
BUILD_OS ?= rockylinux9.3
.PHONY: build-docker
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
build-docker:
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
.PHONY: build-musl
build-musl:
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform x86_64-unknown-linux-musl
# Makefile colors config
bold := $(shell tput bold)
normal := $(shell tput sgr0)
errorTitle := $(shell tput setab 1 && tput bold && echo '\n')
recommendation := $(shell tput setab 4)
underline := $(shell tput smul)
reset := $(shell tput -Txterm sgr0)
black := $(shell tput setaf 0)
red := $(shell tput setaf 1)
green := $(shell tput setaf 2)
yellow := $(shell tput setaf 3)
blue := $(shell tput setaf 4)
magenta := $(shell tput setaf 5)
cyan := $(shell tput setaf 6)
white := $(shell tput setaf 7)
.PHONY: build-gnu
build-gnu:
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
define HEADER
How to use me:
# To get help for each target
${bold}make help${reset}
.PHONY: build-musl-arm64
build-musl-arm64:
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform aarch64-unknown-linux-musl
# To run and execute a target
${bold}make ${cyan}<target>${reset}
.PHONY: build-gnu-arm64
build-gnu-arm64:
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
💡 For more help use 'make help', 'make help-build' or 'make help-docker'
.PHONY: deploy-dev
deploy-dev: build-musl
@echo "🚀 Deploying to dev server: $${IP}"
./scripts/dev_deploy.sh $${IP}
🦀 RustFS Makefile Help:
# ========================================================================================
# Docker Multi-Architecture Builds (Primary Methods)
# ========================================================================================
📋 Main Command Categories:
make help-build # Show build-related help
make help-docker # Show Docker-related help
# Production builds using docker-buildx.sh (for CI/CD and production)
.PHONY: docker-buildx
docker-buildx:
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
./docker-buildx.sh
🔧 Code Quality:
make fmt # Format code
make clippy # Run clippy checks
make test # Run tests
make pre-commit # Run all pre-commit checks
.PHONY: docker-buildx-push
docker-buildx-push:
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
./docker-buildx.sh --push
.PHONY: docker-buildx-version
docker-buildx-version:
@if [ -z "$(VERSION)" ]; then \
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
exit 1; \
fi
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
./docker-buildx.sh --release $(VERSION)
.PHONY: docker-buildx-push-version
docker-buildx-push-version:
@if [ -z "$(VERSION)" ]; then \
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
exit 1; \
fi
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
./docker-buildx.sh --release $(VERSION) --push
# Development/Source builds using direct buildx commands
.PHONY: docker-dev
docker-dev:
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
@echo "💡 This builds from source code and is intended for local development and testing"
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
$(DOCKER_CLI) buildx build \
--platform linux/amd64,linux/arm64 \
--file $(DOCKERFILE_SOURCE) \
--tag rustfs:source-latest \
--tag rustfs:dev-latest \
.
.PHONY: docker-dev-local
docker-dev-local:
@echo "🏗️ Building single-architecture development Docker image for local use..."
@echo "💡 This builds from source code for the current platform and loads locally"
$(DOCKER_CLI) buildx build \
--file $(DOCKERFILE_SOURCE) \
--tag rustfs:source-latest \
--tag rustfs:dev-latest \
--load \
.
.PHONY: docker-dev-push
docker-dev-push:
@if [ -z "$(REGISTRY)" ]; then \
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
exit 1; \
fi
@echo "🚀 Building and pushing multi-architecture development Docker images..."
@echo "💡 Pushing to registry: $(REGISTRY)"
$(DOCKER_CLI) buildx build \
--platform linux/amd64,linux/arm64 \
--file $(DOCKERFILE_SOURCE) \
--tag $(REGISTRY)/rustfs:source-latest \
--tag $(REGISTRY)/rustfs:dev-latest \
--push \
.
🚀 Quick Start:
make build # Build RustFS binary
make docker-dev-local # Build development Docker image (local)
make dev-env-start # Start development environment
endef
export HEADER
# Local production builds using direct buildx (alternative to docker-buildx.sh)
.PHONY: docker-buildx-production-local
docker-buildx-production-local:
@echo "🏗️ Building single-architecture production Docker image locally..."
@echo "💡 Alternative to docker-buildx.sh for local testing"
$(DOCKER_CLI) buildx build \
--file $(DOCKERFILE_PRODUCTION) \
--tag rustfs:production-latest \
--tag rustfs:latest \
--load \
--build-arg RELEASE=latest \
.
-include $(addsuffix /*.mak, $(shell find .config/make -type d))
# ========================================================================================
# Single Architecture Docker Builds (Traditional)
# ========================================================================================
.PHONY: docker-build-production
docker-build-production:
@echo "🏗️ Building single-architecture production Docker image..."
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
.PHONY: docker-build-source
docker-build-source:
@echo "🏗️ Building single-architecture source Docker image..."
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
--build-arg BUILDKIT_INLINE_CACHE=1 \
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
# ========================================================================================
# Development Environment
# ========================================================================================
.PHONY: dev-env-start
dev-env-start:
@echo "🚀 Starting development environment..."
$(DOCKER_CLI) buildx build \
--file $(DOCKERFILE_SOURCE) \
--tag rustfs:dev \
--load \
.
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
-p 9010:9010 -p 9000:9000 \
-v $(shell pwd):/workspace \
-it rustfs:dev
.PHONY: dev-env-stop
dev-env-stop:
@echo "🛑 Stopping development environment..."
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
.PHONY: dev-env-restart
dev-env-restart: dev-env-stop dev-env-start
# ========================================================================================
# Build Utilities
# ========================================================================================
.PHONY: docker-inspect-multiarch
docker-inspect-multiarch:
@if [ -z "$(IMAGE)" ]; then \
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
exit 1; \
fi
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
docker buildx imagetools inspect $(IMAGE)
.PHONY: build-cross-all
build-cross-all:
@echo "🔧 Building all target architectures..."
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
@echo "🔨 Generating protobuf code..."
cargo run --bin gproto || true
@echo "🔨 Building x86_64-unknown-linux-gnu..."
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
@echo "🔨 Building aarch64-unknown-linux-gnu..."
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
@echo "🔨 Building x86_64-unknown-linux-musl..."
./build-rustfs.sh --platform x86_64-unknown-linux-musl
@echo "🔨 Building aarch64-unknown-linux-musl..."
./build-rustfs.sh --platform aarch64-unknown-linux-musl
@echo "✅ All architectures built successfully!"
# ========================================================================================
# Help and Documentation
# ========================================================================================
.PHONY: help-build
help-build:
@echo "🔨 RustFS Build Help:"
@echo ""
@echo "🚀 Local Build (Recommended):"
@echo " make build # Build RustFS binary (includes console by default)"
@echo " make build-dev # Development mode build"
@echo " make build-musl # Build x86_64 musl version"
@echo " make build-gnu # Build x86_64 GNU version"
@echo " make build-musl-arm64 # Build aarch64 musl version"
@echo " make build-gnu-arm64 # Build aarch64 GNU version"
@echo ""
@echo "🐳 Docker Build:"
@echo " make build-docker # Build using Docker container"
@echo " make build-docker BUILD_OS=ubuntu22.04 # Specify build system"
@echo ""
@echo "🏗️ Cross-architecture Build:"
@echo " make build-cross-all # Build binaries for all architectures"
@echo ""
@echo "🔧 Direct usage of build-rustfs.sh script:"
@echo " ./build-rustfs.sh --help # View script help"
@echo " ./build-rustfs.sh --no-console # Build without console resources"
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
@echo " ./build-rustfs.sh --dev # Development mode build"
@echo " ./build-rustfs.sh --sign # Sign binary files"
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
@echo ""
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
.PHONY: help-docker
help-docker:
@echo "🐳 Docker Multi-architecture Build Help:"
@echo ""
@echo "🚀 Production Image Build (Recommended to use docker-buildx.sh):"
@echo " make docker-buildx # Build production multi-arch image (no push)"
@echo " make docker-buildx-push # Build and push production multi-arch image"
@echo " make docker-buildx-version VERSION=v1.0.0 # Build specific version"
@echo " make docker-buildx-push-version VERSION=v1.0.0 # Build and push specific version"
@echo ""
@echo "🔧 Development/Source Image Build (Local development testing):"
@echo " make docker-dev # Build dev multi-arch image (cannot load locally)"
@echo " make docker-dev-local # Build dev single-arch image (local load)"
@echo " make docker-dev-push REGISTRY=xxx # Build and push dev image"
@echo ""
@echo "🏗️ Local Production Image Build (Alternative):"
@echo " make docker-buildx-production-local # Build production single-arch image locally"
@echo ""
@echo "📦 Single-architecture Build (Traditional way):"
@echo " make docker-build-production # Build single-arch production image"
@echo " make docker-build-source # Build single-arch source image"
@echo ""
@echo "🚀 Development Environment Management:"
@echo " make dev-env-start # Start development container environment"
@echo " make dev-env-stop # Stop development container environment"
@echo " make dev-env-restart # Restart development container environment"
@echo ""
@echo "🔧 Auxiliary Tools:"
@echo " make build-cross-all # Build binaries for all architectures"
@echo " make docker-inspect-multiarch IMAGE=xxx # Check image architecture support"
@echo ""
@echo "📋 Environment Variables:"
@echo " REGISTRY Image registry address (required for push)"
@echo " DOCKERHUB_USERNAME Docker Hub username"
@echo " DOCKERHUB_TOKEN Docker Hub access token"
@echo " GITHUB_TOKEN GitHub access token"
@echo ""
@echo "💡 Suggestions:"
@echo " - Production use: Use docker-buildx* commands (based on precompiled binaries)"
@echo " - Local development: Use docker-dev* commands (build from source)"
@echo " - Development environment: Use dev-env-* commands to manage dev containers"
.PHONY: help
help:
@echo "🦀 RustFS Makefile Help:"
@echo ""
@echo "📋 Main Command Categories:"
@echo " make help-build # Show build-related help"
@echo " make help-docker # Show Docker-related help"
@echo ""
@echo "🔧 Code Quality:"
@echo " make fmt # Format code"
@echo " make clippy # Run clippy checks"
@echo " make test # Run tests"
@echo " make pre-commit # Run all pre-commit checks"
@echo ""
@echo "🚀 Quick Start:"
@echo " make build # Build RustFS binary"
@echo " make docker-dev-local # Build development Docker image (local)"
@echo " make dev-env-start # Start development environment"
@echo ""
@echo "💡 For more help use 'make help-build' or 'make help-docker'"

View File

@@ -10,6 +10,11 @@
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="FeaturedHelloGitHub" /></a>
</p>
<p align="center">
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14181" alt="rustfs%2Frustfs | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
</p>
<p align="center">
<a href="https://docs.rustfs.com/installation/">Getting Started</a>
· <a href="https://docs.rustfs.com/">Docs</a>
@@ -45,10 +50,10 @@ Unlike other storage systems, RustFS is released under the permissible Apache 2.
| :--- | :--- | :--- | :--- |
| **S3 Core Features** | ✅ Available | **Bitrot Protection** | ✅ Available |
| **Upload / Download** | ✅ Available | **Single Node Mode** | ✅ Available |
| **Versioning** | ✅ Available | **Bucket Replication** | ⚠️ Partial Support |
| **Versioning** | ✅ Available | **Bucket Replication** | ✅ Available |
| **Logging** | ✅ Available | **Lifecycle Management** | 🚧 Under Testing |
| **Event Notifications** | ✅ Available | **Distributed Mode** | 🚧 Under Testing |
| **K8s Helm Charts** | ✅ Available | **OPA (Open Policy Agent)** | 🚧 Under Testing |
| **K8s Helm Charts** | ✅ Available | **RustFS KMS** | 🚧 Under Testing |
@@ -103,7 +108,7 @@ The RustFS container runs as a non-root user `rustfs` (UID `10001`). If you run
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
# Using specific version
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.68
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0-alpha.76
```
You can also use Docker Compose. Using the `docker-compose.yml` file in the root directory:
@@ -153,11 +158,28 @@ make help-docker # Show all Docker-related commands
Follow the instructions in the [Helm Chart README](https://charts.rustfs.com/) to install RustFS on a Kubernetes cluster.
### 5\. Nix Flake (Option 5)
If you have [Nix with flakes enabled](https://nixos.wiki/wiki/Flakes#Enable_flakes):
```bash
# Run directly without installing
nix run github:rustfs/rustfs
# Build the binary
nix build github:rustfs/rustfs
./result/bin/rustfs --help
# Or from a local checkout
nix build
nix run
```
-----
### Accessing RustFS
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console.
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9001` to access the RustFS console.
* Default credentials: `rustfsadmin` / `rustfsadmin`
6. **Create a Bucket**: Use the console to create a new bucket for your objects.
7. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs/clients to interact with your RustFS instance.
@@ -198,11 +220,6 @@ RustFS is a community-driven project, and we appreciate all contributions. Check
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
</a>
## Github Trending Top
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending top charts.
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
## Star History

View File

@@ -10,6 +10,10 @@
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="FeaturedHelloGitHub" /></a>
</p>
<p align="center">
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14181" alt="rustfs%2Frustfs | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
</p>
<p align="center">
<a href="https://docs.rustfs.com/installation/">快速开始</a>
· <a href="https://docs.rustfs.com/">文档</a>
@@ -17,6 +21,8 @@
· <a href="https://github.com/rustfs/rustfs/discussions">社区讨论</a>
</p>
<p align="center">
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a> | 简体中文 |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
@@ -46,7 +52,7 @@ RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust
| :--- | :--- | :--- | :--- |
| **S3 核心功能** | ✅ 可用 | **Bitrot (防数据腐烂)** | ✅ 可用 |
| **上传 / 下载** | ✅ 可用 | **单机模式** | ✅ 可用 |
| **版本控制** | ✅ 可用 | **存储桶复制** | ⚠️ 部分可用 |
| **版本控制** | ✅ 可用 | **存储桶复制** | 可用 |
| **日志功能** | ✅ 可用 | **生命周期管理** | 🚧 测试中 |
| **事件通知** | ✅ 可用 | **分布式模式** | 🚧 测试中 |
| **K8s Helm Chart** | ✅ 可用 | **OPA (策略引擎)** | 🚧 测试中 |
@@ -200,11 +206,7 @@ RustFS 是一个社区驱动的项目,我们感谢所有的贡献。请查看
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
</a>
## Github Trending Top
🚀 RustFS 深受全球开源爱好者和企业用户的喜爱,经常荣登 GitHub Trending 榜单。
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
## Star 历史

View File

@@ -2,8 +2,7 @@
## Supported Versions
Use this section to tell people about which versions of your project are
currently being supported with security updates.
Security updates are provided for the latest released version of this project.
| Version | Supported |
| ------- | ------------------ |
@@ -11,8 +10,10 @@ currently being supported with security updates.
## Reporting a Vulnerability
Use this section to tell people how to report a vulnerability.
Please report security vulnerabilities **privately** via GitHub Security Advisories:
Tell them where to go, how often they can expect to get an update on a
reported vulnerability, what to expect if the vulnerability is accepted or
declined, etc.
https://github.com/rustfs/rustfs/security/advisories/new
Do **not** open a public issue for security-sensitive bugs.
You can expect an initial response within a reasonable timeframe. Further updates will be provided as the report is triaged.

View File

@@ -36,6 +36,7 @@ clen = "clen"
datas = "datas"
bre = "bre"
abd = "abd"
mak = "mak"
[files]
extend-exclude = []

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# RustFS Binary Build Script
# This script compiles RustFS binaries for different platforms and architectures

View File

@@ -468,14 +468,17 @@ impl HealManager {
let active_heals = self.active_heals.clone();
let cancel_token = self.cancel_token.clone();
let storage = self.storage.clone();
info!(
"start_auto_disk_scanner: Starting auto disk scanner with interval: {:?}",
config.read().await.heal_interval
);
let mut duration = {
let config = config.read().await;
config.heal_interval
};
if duration < Duration::from_secs(1) {
duration = Duration::from_secs(1);
}
info!("start_auto_disk_scanner: Starting auto disk scanner with interval: {:?}", duration);
tokio::spawn(async move {
let mut interval = interval(config.read().await.heal_interval);
let mut interval = interval(duration);
loop {
tokio::select! {

View File

@@ -30,7 +30,7 @@ use rustfs_ecstore::{
bucket::versioning::VersioningApi,
bucket::versioning_sys::BucketVersioningSys,
data_usage::{aggregate_local_snapshots, compute_bucket_usage, store_data_usage_in_backend},
disk::{Disk, DiskAPI, DiskStore, RUSTFS_META_BUCKET, WalkDirOptions},
disk::{DiskAPI, DiskStore, RUSTFS_META_BUCKET, WalkDirOptions},
set_disk::SetDisks,
store_api::ObjectInfo,
};
@@ -1977,7 +1977,7 @@ impl Scanner {
} else {
// Apply lifecycle actions
if let Some(lifecycle_config) = &lifecycle_config {
if let Disk::Local(_local_disk) = &**disk {
if disk.is_local() {
let vcfg = BucketVersioningSys::get(bucket).await.ok();
let mut scanner_item = ScannerItem {

View File

@@ -21,10 +21,11 @@ use rustfs_ecstore::bucket::metadata_sys::{BucketMetadataSys, GLOBAL_BucketMetad
use rustfs_ecstore::endpoints::EndpointServerPools;
use rustfs_ecstore::store::ECStore;
use rustfs_ecstore::store_api::{ObjectIO, PutObjReader, StorageAPI};
use std::sync::Arc;
use std::sync::{Arc, Once};
use tempfile::TempDir;
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
use tracing::Level;
/// Build a minimal single-node ECStore over a temp directory and populate objects.
async fn create_store_with_objects(count: usize) -> (TempDir, std::sync::Arc<ECStore>) {
@@ -74,8 +75,22 @@ async fn create_store_with_objects(count: usize) -> (TempDir, std::sync::Arc<ECS
(temp_dir, store)
}
static INIT: Once = Once::new();
fn init_tracing(filter_level: Level) {
INIT.call_once(|| {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.with_max_level(filter_level)
.with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339())
.with_thread_names(true)
.try_init();
});
}
#[tokio::test]
async fn fallback_builds_full_counts_over_100_objects() {
init_tracing(Level::ERROR);
let (_tmp, store) = create_store_with_objects(1000).await;
let scanner = Scanner::new(None, None);

View File

@@ -38,9 +38,13 @@ use walkdir::WalkDir;
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage>)> = OnceLock::new();
static INIT: Once = Once::new();
fn init_tracing() {
pub fn init_tracing() {
INIT.call_once(|| {
let _ = tracing_subscriber::fmt::try_init();
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339())
.with_thread_names(true)
.try_init();
});
}
@@ -356,7 +360,7 @@ mod serial_tests {
// Create heal manager with faster interval
let cfg = HealConfig {
heal_interval: Duration::from_secs(2),
heal_interval: Duration::from_secs(1),
..Default::default()
};
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));

View File

@@ -29,6 +29,7 @@ categories = ["web-programming", "development-tools", "asynchronous", "api-bindi
rustfs-targets = { workspace = true }
rustfs-config = { workspace = true, features = ["audit", "constants"] }
rustfs-ecstore = { workspace = true }
async-trait = { workspace = true }
chrono = { workspace = true }
const-str = { workspace = true }
futures = { workspace = true }

224
crates/audit/src/factory.rs Normal file
View File

@@ -0,0 +1,224 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::AuditEntry;
use async_trait::async_trait;
use hashbrown::HashSet;
use rumqttc::QoS;
use rustfs_config::audit::{AUDIT_MQTT_KEYS, AUDIT_WEBHOOK_KEYS, ENV_AUDIT_MQTT_KEYS, ENV_AUDIT_WEBHOOK_KEYS};
use rustfs_config::{
AUDIT_DEFAULT_DIR, DEFAULT_LIMIT, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR,
MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_CLIENT_CERT,
WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_LIMIT,
};
use rustfs_ecstore::config::KVS;
use rustfs_targets::{
Target,
error::TargetError,
target::{mqtt::MQTTArgs, webhook::WebhookArgs},
};
use std::time::Duration;
use tracing::{debug, warn};
use url::Url;
/// Trait for creating targets from configuration
#[async_trait]
pub trait TargetFactory: Send + Sync {
/// Creates a target from configuration
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError>;
/// Validates target configuration
fn validate_config(&self, id: &str, config: &KVS) -> Result<(), TargetError>;
/// Returns a set of valid configuration field names for this target type.
/// This is used to filter environment variables.
fn get_valid_fields(&self) -> HashSet<String>;
/// Returns a set of valid configuration env field names for this target type.
/// This is used to filter environment variables.
fn get_valid_env_fields(&self) -> HashSet<String>;
}
/// Factory for creating Webhook targets
pub struct WebhookTargetFactory;
#[async_trait]
impl TargetFactory for WebhookTargetFactory {
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
// All config values are now read directly from the merged `config` KVS.
let endpoint = config
.lookup(WEBHOOK_ENDPOINT)
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
let parsed_endpoint = endpoint.trim();
let endpoint_url = Url::parse(parsed_endpoint)
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{parsed_endpoint}')")))?;
let args = WebhookArgs {
enable: true, // If we are here, it's already enabled.
endpoint: endpoint_url,
auth_token: config.lookup(WEBHOOK_AUTH_TOKEN).unwrap_or_default(),
queue_dir: config.lookup(WEBHOOK_QUEUE_DIR).unwrap_or(AUDIT_DEFAULT_DIR.to_string()),
queue_limit: config
.lookup(WEBHOOK_QUEUE_LIMIT)
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(DEFAULT_LIMIT),
client_cert: config.lookup(WEBHOOK_CLIENT_CERT).unwrap_or_default(),
client_key: config.lookup(WEBHOOK_CLIENT_KEY).unwrap_or_default(),
target_type: rustfs_targets::target::TargetType::AuditLog,
};
let target = rustfs_targets::target::webhook::WebhookTarget::new(id, args)?;
Ok(Box::new(target))
}
fn validate_config(&self, _id: &str, config: &KVS) -> Result<(), TargetError> {
// Validation also uses the merged `config` KVS directly.
let endpoint = config
.lookup(WEBHOOK_ENDPOINT)
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
debug!("endpoint: {}", endpoint);
let parsed_endpoint = endpoint.trim();
Url::parse(parsed_endpoint)
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{parsed_endpoint}')")))?;
let client_cert = config.lookup(WEBHOOK_CLIENT_CERT).unwrap_or_default();
let client_key = config.lookup(WEBHOOK_CLIENT_KEY).unwrap_or_default();
if client_cert.is_empty() != client_key.is_empty() {
return Err(TargetError::Configuration(
"Both client_cert and client_key must be specified together".to_string(),
));
}
let queue_dir = config.lookup(WEBHOOK_QUEUE_DIR).unwrap_or(AUDIT_DEFAULT_DIR.to_string());
if !queue_dir.is_empty() && !std::path::Path::new(&queue_dir).is_absolute() {
return Err(TargetError::Configuration("Webhook queue directory must be an absolute path".to_string()));
}
Ok(())
}
fn get_valid_fields(&self) -> HashSet<String> {
AUDIT_WEBHOOK_KEYS.iter().map(|s| s.to_string()).collect()
}
fn get_valid_env_fields(&self) -> HashSet<String> {
ENV_AUDIT_WEBHOOK_KEYS.iter().map(|s| s.to_string()).collect()
}
}
/// Factory for creating MQTT targets
pub struct MQTTTargetFactory;
#[async_trait]
impl TargetFactory for MQTTTargetFactory {
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
let broker = config
.lookup(MQTT_BROKER)
.ok_or_else(|| TargetError::Configuration("Missing MQTT broker".to_string()))?;
let broker_url = Url::parse(&broker)
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {e} (value: '{broker}')")))?;
let topic = config
.lookup(MQTT_TOPIC)
.ok_or_else(|| TargetError::Configuration("Missing MQTT topic".to_string()))?;
let args = MQTTArgs {
enable: true, // Assumed enabled.
broker: broker_url,
topic,
qos: config
.lookup(MQTT_QOS)
.and_then(|v| v.parse::<u8>().ok())
.map(|q| match q {
0 => QoS::AtMostOnce,
1 => QoS::AtLeastOnce,
2 => QoS::ExactlyOnce,
_ => QoS::AtLeastOnce,
})
.unwrap_or(QoS::AtLeastOnce),
username: config.lookup(MQTT_USERNAME).unwrap_or_default(),
password: config.lookup(MQTT_PASSWORD).unwrap_or_default(),
max_reconnect_interval: config
.lookup(MQTT_RECONNECT_INTERVAL)
.and_then(|v| v.parse::<u64>().ok())
.map(Duration::from_secs)
.unwrap_or_else(|| Duration::from_secs(5)),
keep_alive: config
.lookup(MQTT_KEEP_ALIVE_INTERVAL)
.and_then(|v| v.parse::<u64>().ok())
.map(Duration::from_secs)
.unwrap_or_else(|| Duration::from_secs(30)),
queue_dir: config.lookup(MQTT_QUEUE_DIR).unwrap_or(AUDIT_DEFAULT_DIR.to_string()),
queue_limit: config
.lookup(MQTT_QUEUE_LIMIT)
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(DEFAULT_LIMIT),
target_type: rustfs_targets::target::TargetType::AuditLog,
};
let target = rustfs_targets::target::mqtt::MQTTTarget::new(id, args)?;
Ok(Box::new(target))
}
fn validate_config(&self, _id: &str, config: &KVS) -> Result<(), TargetError> {
let broker = config
.lookup(MQTT_BROKER)
.ok_or_else(|| TargetError::Configuration("Missing MQTT broker".to_string()))?;
let url = Url::parse(&broker)
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {e} (value: '{broker}')")))?;
match url.scheme() {
"tcp" | "ssl" | "ws" | "wss" | "mqtt" | "mqtts" => {}
_ => {
return Err(TargetError::Configuration("Unsupported broker URL scheme".to_string()));
}
}
if config.lookup(MQTT_TOPIC).is_none() {
return Err(TargetError::Configuration("Missing MQTT topic".to_string()));
}
if let Some(qos_str) = config.lookup(MQTT_QOS) {
let qos = qos_str
.parse::<u8>()
.map_err(|_| TargetError::Configuration("Invalid QoS value".to_string()))?;
if qos > 2 {
return Err(TargetError::Configuration("QoS must be 0, 1, or 2".to_string()));
}
}
let queue_dir = config.lookup(MQTT_QUEUE_DIR).unwrap_or_default();
if !queue_dir.is_empty() {
if !std::path::Path::new(&queue_dir).is_absolute() {
return Err(TargetError::Configuration("MQTT queue directory must be an absolute path".to_string()));
}
if let Some(qos_str) = config.lookup(MQTT_QOS) {
if qos_str == "0" {
warn!("Using queue_dir with QoS 0 may result in event loss");
}
}
}
Ok(())
}
fn get_valid_fields(&self) -> HashSet<String> {
AUDIT_MQTT_KEYS.iter().map(|s| s.to_string()).collect()
}
fn get_valid_env_fields(&self) -> HashSet<String> {
ENV_AUDIT_MQTT_KEYS.iter().map(|s| s.to_string()).collect()
}
}

View File

@@ -20,6 +20,7 @@
pub mod entity;
pub mod error;
pub mod factory;
pub mod global;
pub mod observability;
pub mod registry;

View File

@@ -12,29 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{AuditEntry, AuditError, AuditResult};
use futures::{StreamExt, stream::FuturesUnordered};
use crate::{
AuditEntry, AuditError, AuditResult,
factory::{MQTTTargetFactory, TargetFactory, WebhookTargetFactory},
};
use futures::StreamExt;
use futures::stream::FuturesUnordered;
use hashbrown::{HashMap, HashSet};
use rustfs_config::{
DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR,
MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_BATCH_SIZE,
WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_HTTP_TIMEOUT, WEBHOOK_MAX_RETRY, WEBHOOK_QUEUE_DIR,
WEBHOOK_QUEUE_LIMIT, WEBHOOK_RETRY_INTERVAL, audit::AUDIT_ROUTE_PREFIX,
};
use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, EnableState, audit::AUDIT_ROUTE_PREFIX};
use rustfs_ecstore::config::{Config, KVS};
use rustfs_targets::{
Target, TargetError,
target::{ChannelTargetType, TargetType, mqtt::MQTTArgs, webhook::WebhookArgs},
};
use rustfs_targets::{Target, TargetError, target::ChannelTargetType};
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use tracing::{debug, error, info, warn};
use url::Url;
/// Registry for managing audit targets
pub struct AuditRegistry {
/// Storage for created targets
targets: HashMap<String, Box<dyn Target<AuditEntry> + Send + Sync>>,
/// Factories for creating targets
factories: HashMap<String, Box<dyn TargetFactory>>,
}
impl Default for AuditRegistry {
@@ -46,162 +43,207 @@ impl Default for AuditRegistry {
impl AuditRegistry {
/// Creates a new AuditRegistry
pub fn new() -> Self {
Self { targets: HashMap::new() }
let mut registry = AuditRegistry {
factories: HashMap::new(),
targets: HashMap::new(),
};
// Register built-in factories
registry.register(ChannelTargetType::Webhook.as_str(), Box::new(WebhookTargetFactory));
registry.register(ChannelTargetType::Mqtt.as_str(), Box::new(MQTTTargetFactory));
registry
}
/// Creates all audit targets from system configuration and environment variables.
/// Registers a new factory for a target type
///
/// # Arguments
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
/// * `factory` - The factory instance to create targets of this type.
pub fn register(&mut self, target_type: &str, factory: Box<dyn TargetFactory>) {
self.factories.insert(target_type.to_string(), factory);
}
/// Creates a target of the specified type with the given ID and configuration
///
/// # Arguments
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
/// * `id` - The identifier for the target instance.
/// * `config` - The configuration key-value store for the target.
///
/// # Returns
/// * `Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError>` - The created target or an error.
pub async fn create_target(
&self,
target_type: &str,
id: String,
config: &KVS,
) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
let factory = self
.factories
.get(target_type)
.ok_or_else(|| TargetError::Configuration(format!("Unknown target type: {target_type}")))?;
// Validate configuration before creating target
factory.validate_config(&id, config)?;
// Create target
factory.create_target(id, config).await
}
/// Creates all targets from a configuration
/// Create all notification targets from system configuration and environment variables.
/// This method processes the creation of each target concurrently as follows:
/// 1. Iterate through supported target types (webhook, mqtt).
/// 2. For each type, resolve its configuration from file and environment variables.
/// 1. Iterate through all registered target types (e.g. webhooks, mqtt).
/// 2. For each type, resolve its configuration in the configuration file and environment variables.
/// 3. Identify all target instance IDs that need to be created.
/// 4. Merge configurations with precedence: ENV > file instance > file default.
/// 5. Create async tasks for enabled instances.
/// 6. Execute tasks concurrently and collect successful targets.
/// 7. Persist successful configurations back to system storage.
pub async fn create_targets_from_config(
&mut self,
/// 4. Combine the default configuration, file configuration, and environment variable configuration for each instance.
/// 5. If the instance is enabled, create an asynchronous task for it to instantiate.
/// 6. Concurrency executes all creation tasks and collects results.
pub async fn create_audit_targets_from_config(
&self,
config: &Config,
) -> AuditResult<Vec<Box<dyn Target<AuditEntry> + Send + Sync>>> {
// Collect only environment variables with the relevant prefix to reduce memory usage
let all_env: Vec<(String, String)> = std::env::vars().filter(|(key, _)| key.starts_with(ENV_PREFIX)).collect();
// A collection of asynchronous tasks for concurrently executing target creation
let mut tasks = FuturesUnordered::new();
// let final_config = config.clone();
// let final_config = config.clone(); // Clone a configuration for aggregating the final result
// Record the defaults for each segment so that the segment can eventually be rebuilt
let mut section_defaults: HashMap<String, KVS> = HashMap::new();
// Supported target types for audit
let target_types = vec![ChannelTargetType::Webhook.as_str(), ChannelTargetType::Mqtt.as_str()];
// 1. Traverse all target types and process them
for target_type in target_types {
let span = tracing::Span::current();
span.record("target_type", target_type);
info!(target_type = %target_type, "Starting audit target type processing");
// 1. Traverse all registered plants and process them by target type
for (target_type, factory) in &self.factories {
tracing::Span::current().record("target_type", target_type.as_str());
info!("Start working on target types...");
// 2. Prepare the configuration source
// 2.1. Get the configuration segment in the file, e.g. 'audit_webhook'
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase();
let file_configs = config.0.get(&section_name).cloned().unwrap_or_default();
// 2.2. Get the default configuration for that type
let default_cfg = file_configs.get(DEFAULT_DELIMITER).cloned().unwrap_or_default();
debug!(?default_cfg, "Retrieved default configuration");
debug!(?default_cfg, "Get the default configuration");
// Save defaults for eventual write back
section_defaults.insert(section_name.clone(), default_cfg.clone());
// Get valid fields for the target type
let valid_fields = match target_type {
"webhook" => get_webhook_valid_fields(),
"mqtt" => get_mqtt_valid_fields(),
_ => {
warn!(target_type = %target_type, "Unknown target type, skipping");
continue;
}
};
debug!(?valid_fields, "Retrieved valid configuration fields");
// *** Optimization point 1: Get all legitimate fields of the current target type ***
let valid_fields = factory.get_valid_fields();
debug!(?valid_fields, "Get the legitimate configuration fields");
// 3. Resolve instance IDs and configuration overrides from environment variables
let mut instance_ids_from_env = HashSet::new();
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
for (env_key, env_value) in &all_env {
let audit_prefix = format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}").to_uppercase();
if !env_key.starts_with(&audit_prefix) {
continue;
}
let suffix = &env_key[audit_prefix.len()..];
if suffix.is_empty() {
continue;
}
// Parse field and instance from suffix (FIELD_INSTANCE or FIELD)
let (field_name, instance_id) = if let Some(last_underscore) = suffix.rfind('_') {
let potential_field = &suffix[1..last_underscore]; // Skip leading _
let potential_instance = &suffix[last_underscore + 1..];
// Check if the part before the last underscore is a valid field
if valid_fields.contains(&potential_field.to_lowercase()) {
(potential_field.to_lowercase(), potential_instance.to_lowercase())
} else {
// Treat the entire suffix as field name with default instance
(suffix[1..].to_lowercase(), DEFAULT_DELIMITER.to_string())
// 3.1. Instance discovery: Based on the '..._ENABLE_INSTANCEID' format
let enable_prefix =
format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}{ENABLE_KEY}{DEFAULT_DELIMITER}")
.to_uppercase();
for (key, value) in &all_env {
if EnableState::from_str(value).ok().map(|s| s.is_enabled()).unwrap_or(false) {
if let Some(id) = key.strip_prefix(&enable_prefix) {
if !id.is_empty() {
instance_ids_from_env.insert(id.to_lowercase());
}
}
} else {
// No underscore, treat as field with default instance
(suffix[1..].to_lowercase(), DEFAULT_DELIMITER.to_string())
};
if valid_fields.contains(&field_name) {
if instance_id != DEFAULT_DELIMITER {
instance_ids_from_env.insert(instance_id.clone());
}
env_overrides
.entry(instance_id)
.or_default()
.insert(field_name, env_value.clone());
} else {
debug!(
env_key = %env_key,
field_name = %field_name,
"Ignoring environment variable field not found in valid fields for target type {}",
target_type
);
}
}
debug!(?env_overrides, "Completed environment variable analysis");
// 3.2. Parse all relevant environment variable configurations
// 3.2.1. Build environment variable prefixes such as 'RUSTFS_AUDIT_WEBHOOK_'
let env_prefix = format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}").to_uppercase();
// 3.2.2. 'env_overrides' is used to store configurations parsed from environment variables in the format: {instance id -> {field -> value}}
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
for (key, value) in &all_env {
if let Some(rest) = key.strip_prefix(&env_prefix) {
// Use rsplitn to split from the right side to properly extract the INSTANCE_ID at the end
// Format: <FIELD_NAME>_<INSTANCE_ID> or <FIELD_NAME>
let mut parts = rest.rsplitn(2, DEFAULT_DELIMITER);
// The first part from the right is INSTANCE_ID
let instance_id_part = parts.next().unwrap_or(DEFAULT_DELIMITER);
// The remaining part is FIELD_NAME
let field_name_part = parts.next();
let (field_name, instance_id) = match field_name_part {
// Case 1: The format is <FIELD_NAME>_<INSTANCE_ID>
// e.g., rest = "ENDPOINT_PRIMARY" -> field_name="ENDPOINT", instance_id="PRIMARY"
Some(field) => (field.to_lowercase(), instance_id_part.to_lowercase()),
// Case 2: The format is <FIELD_NAME> (without INSTANCE_ID)
// e.g., rest = "ENABLE" -> field_name="ENABLE", instance_id="" (Universal configuration `_ DEFAULT_DELIMITER`)
None => (instance_id_part.to_lowercase(), DEFAULT_DELIMITER.to_string()),
};
// *** Optimization point 2: Verify whether the parsed field_name is legal ***
if !field_name.is_empty() && valid_fields.contains(&field_name) {
debug!(
instance_id = %if instance_id.is_empty() { DEFAULT_DELIMITER } else { &instance_id },
%field_name,
%value,
"Parsing to environment variables"
);
env_overrides
.entry(instance_id)
.or_default()
.insert(field_name, value.clone());
} else {
// Ignore illegal field names
warn!(
field_name = %field_name,
"Ignore environment variable fields, not found in the list of valid fields for target type {}",
target_type
);
}
}
}
debug!(?env_overrides, "Complete the environment variable analysis");
// 4. Determine all instance IDs that need to be processed
let mut all_instance_ids: HashSet<String> =
file_configs.keys().filter(|k| *k != DEFAULT_DELIMITER).cloned().collect();
all_instance_ids.extend(instance_ids_from_env);
debug!(?all_instance_ids, "Determined all instance IDs");
debug!(?all_instance_ids, "Determine all instance IDs");
// 5. Merge configurations and create tasks for each instance
for id in all_instance_ids {
// 5.1. Merge configuration, priority: Environment variables > File instance > File default
// 5.1. Merge configuration, priority: Environment variables > File instance configuration > File default configuration
let mut merged_config = default_cfg.clone();
// Apply file instance configuration if available
// Instance-specific configuration in application files
if let Some(file_instance_cfg) = file_configs.get(&id) {
merged_config.extend(file_instance_cfg.clone());
}
// Apply environment variable overrides
// Application instance-specific environment variable configuration
if let Some(env_instance_cfg) = env_overrides.get(&id) {
// Convert HashMap<String, String> to KVS
let mut kvs_from_env = KVS::new();
for (k, v) in env_instance_cfg {
kvs_from_env.insert(k.clone(), v.clone());
}
merged_config.extend(kvs_from_env);
}
debug!(instance_id = %id, ?merged_config, "Completed configuration merge");
debug!(instance_id = %id, ?merged_config, "Complete configuration merge");
// 5.2. Check if the instance is enabled
let enabled = merged_config
.lookup(ENABLE_KEY)
.map(|v| parse_enable_value(&v))
.map(|v| {
EnableState::from_str(v.as_str())
.ok()
.map(|s| s.is_enabled())
.unwrap_or(false)
})
.unwrap_or(false);
if enabled {
info!(instance_id = %id, "Creating audit target");
// Create task for concurrent execution
let target_type_clone = target_type.to_string();
let id_clone = id.clone();
let merged_config_arc = Arc::new(merged_config.clone());
let task = tokio::spawn(async move {
let result = create_audit_target(&target_type_clone, &id_clone, &merged_config_arc).await;
(target_type_clone, id_clone, result, merged_config_arc)
info!(instance_id = %id, "Target is enabled, ready to create a task");
// 5.3. Create asynchronous tasks for enabled instances
let target_type_clone = target_type.clone();
let tid = id.clone();
let merged_config_arc = Arc::new(merged_config);
tasks.push(async move {
let result = factory.create_target(tid.clone(), &merged_config_arc).await;
(target_type_clone, tid, result, Arc::clone(&merged_config_arc))
});
tasks.push(task);
// Update final config with successful instance
// final_config.0.entry(section_name.clone()).or_default().insert(id, merged_config);
} else {
info!(instance_id = %id, "Skipping disabled audit target, will be removed from final configuration");
info!(instance_id = %id, "Skip the disabled target and will be removed from the final configuration");
// Remove disabled target from final configuration
// final_config.0.entry(section_name.clone()).or_default().remove(&id);
}
@@ -211,30 +253,28 @@ impl AuditRegistry {
// 6. Concurrently execute all creation tasks and collect results
let mut successful_targets = Vec::new();
let mut successful_configs = Vec::new();
while let Some(task_result) = tasks.next().await {
match task_result {
Ok((target_type, id, result, kvs_arc)) => match result {
Ok(target) => {
info!(target_type = %target_type, instance_id = %id, "Created audit target successfully");
successful_targets.push(target);
successful_configs.push((target_type, id, kvs_arc));
}
Err(e) => {
error!(target_type = %target_type, instance_id = %id, error = %e, "Failed to create audit target");
}
},
while let Some((target_type, id, result, final_config)) = tasks.next().await {
match result {
Ok(target) => {
info!(target_type = %target_type, instance_id = %id, "Create a target successfully");
successful_targets.push(target);
successful_configs.push((target_type, id, final_config));
}
Err(e) => {
error!(error = %e, "Task execution failed");
error!(target_type = %target_type, instance_id = %id, error = %e, "Failed to create a target");
}
}
}
// Rebuild in pieces based on "default items + successful instances" and overwrite writeback to ensure that deleted/disabled instances will not be "resurrected"
// 7. Aggregate new configuration and write back to system configuration
if !successful_configs.is_empty() || !section_defaults.is_empty() {
info!("Prepare to rebuild and save target configurations to the system configuration...");
info!(
"Prepare to update {} successfully created target configurations to the system configuration...",
successful_configs.len()
);
// Aggregate successful instances into segments
let mut successes_by_section: HashMap<String, HashMap<String, KVS>> = HashMap::new();
for (target_type, id, kvs) in successful_configs {
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase();
successes_by_section
@@ -244,76 +284,99 @@ impl AuditRegistry {
}
let mut new_config = config.clone();
// Collection of segments that need to be processed: Collect all segments where default items exist or where successful instances exist
let mut sections: HashSet<String> = HashSet::new();
sections.extend(section_defaults.keys().cloned());
sections.extend(successes_by_section.keys().cloned());
for section_name in sections {
for section in sections {
let mut section_map: std::collections::HashMap<String, KVS> = std::collections::HashMap::new();
// The default entry (if present) is written back to `_`
if let Some(default_cfg) = section_defaults.get(&section_name) {
if !default_cfg.is_empty() {
section_map.insert(DEFAULT_DELIMITER.to_string(), default_cfg.clone());
// Add default item
if let Some(default_kvs) = section_defaults.get(&section) {
if !default_kvs.is_empty() {
section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone());
}
}
// Successful instance write back
if let Some(instances) = successes_by_section.get(&section_name) {
// Add successful instance item
if let Some(instances) = successes_by_section.get(&section) {
for (id, kvs) in instances {
section_map.insert(id.clone(), kvs.clone());
}
}
// Empty segments are removed and non-empty segments are replaced as a whole.
// Empty breaks are removed and non-empty breaks are replaced entirely.
if section_map.is_empty() {
new_config.0.remove(&section_name);
new_config.0.remove(&section);
} else {
new_config.0.insert(section_name, section_map);
new_config.0.insert(section, section_map);
}
}
// 7. Save the new configuration to the system
let Some(store) = rustfs_ecstore::new_object_layer_fn() else {
let Some(store) = rustfs_ecstore::global::new_object_layer_fn() else {
return Err(AuditError::StorageNotAvailable(
"Failed to save target configuration: server storage not initialized".to_string(),
));
};
match rustfs_ecstore::config::com::save_server_config(store, &new_config).await {
Ok(_) => info!("New audit configuration saved to system successfully"),
Ok(_) => {
info!("The new configuration was saved to the system successfully.")
}
Err(e) => {
error!(error = %e, "Failed to save new audit configuration");
error!("Failed to save the new configuration: {}", e);
return Err(AuditError::SaveConfig(Box::new(e)));
}
}
}
info!(count = successful_targets.len(), "All target processing completed");
Ok(successful_targets)
}
/// Adds a target to the registry
///
/// # Arguments
/// * `id` - The identifier for the target.
/// * `target` - The target instance to be added.
pub fn add_target(&mut self, id: String, target: Box<dyn Target<AuditEntry> + Send + Sync>) {
self.targets.insert(id, target);
}
/// Removes a target from the registry
///
/// # Arguments
/// * `id` - The identifier for the target to be removed.
///
/// # Returns
/// * `Option<Box<dyn Target<AuditEntry> + Send + Sync>>` - The removed target if it existed.
pub fn remove_target(&mut self, id: &str) -> Option<Box<dyn Target<AuditEntry> + Send + Sync>> {
self.targets.remove(id)
}
/// Gets a target from the registry
///
/// # Arguments
/// * `id` - The identifier for the target to be retrieved.
///
/// # Returns
/// * `Option<&(dyn Target<AuditEntry> + Send + Sync)>` - The target if it exists.
pub fn get_target(&self, id: &str) -> Option<&(dyn Target<AuditEntry> + Send + Sync)> {
self.targets.get(id).map(|t| t.as_ref())
}
/// Lists all target IDs
///
/// # Returns
/// * `Vec<String>` - A vector of all target IDs in the registry.
pub fn list_targets(&self) -> Vec<String> {
self.targets.keys().cloned().collect()
}
/// Closes all targets and clears the registry
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure.
pub async fn close_all(&mut self) -> AuditResult<()> {
let mut errors = Vec::new();
@@ -331,152 +394,3 @@ impl AuditRegistry {
Ok(())
}
}
/// Creates an audit target based on type and configuration
async fn create_audit_target(
target_type: &str,
id: &str,
config: &KVS,
) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
match target_type {
val if val == ChannelTargetType::Webhook.as_str() => {
let args = parse_webhook_args(id, config)?;
let target = rustfs_targets::target::webhook::WebhookTarget::new(id.to_string(), args)?;
Ok(Box::new(target))
}
val if val == ChannelTargetType::Mqtt.as_str() => {
let args = parse_mqtt_args(id, config)?;
let target = rustfs_targets::target::mqtt::MQTTTarget::new(id.to_string(), args)?;
Ok(Box::new(target))
}
_ => Err(TargetError::Configuration(format!("Unknown target type: {target_type}"))),
}
}
/// Gets valid field names for webhook configuration
fn get_webhook_valid_fields() -> HashSet<String> {
vec![
ENABLE_KEY.to_string(),
WEBHOOK_ENDPOINT.to_string(),
WEBHOOK_AUTH_TOKEN.to_string(),
WEBHOOK_CLIENT_CERT.to_string(),
WEBHOOK_CLIENT_KEY.to_string(),
WEBHOOK_BATCH_SIZE.to_string(),
WEBHOOK_QUEUE_LIMIT.to_string(),
WEBHOOK_QUEUE_DIR.to_string(),
WEBHOOK_MAX_RETRY.to_string(),
WEBHOOK_RETRY_INTERVAL.to_string(),
WEBHOOK_HTTP_TIMEOUT.to_string(),
]
.into_iter()
.collect()
}
/// Gets valid field names for MQTT configuration
fn get_mqtt_valid_fields() -> HashSet<String> {
vec![
ENABLE_KEY.to_string(),
MQTT_BROKER.to_string(),
MQTT_TOPIC.to_string(),
MQTT_USERNAME.to_string(),
MQTT_PASSWORD.to_string(),
MQTT_QOS.to_string(),
MQTT_KEEP_ALIVE_INTERVAL.to_string(),
MQTT_RECONNECT_INTERVAL.to_string(),
MQTT_QUEUE_DIR.to_string(),
MQTT_QUEUE_LIMIT.to_string(),
]
.into_iter()
.collect()
}
/// Parses webhook arguments from KVS configuration
fn parse_webhook_args(_id: &str, config: &KVS) -> Result<WebhookArgs, TargetError> {
let endpoint = config
.lookup(WEBHOOK_ENDPOINT)
.filter(|s| !s.is_empty())
.ok_or_else(|| TargetError::Configuration("webhook endpoint is required".to_string()))?;
let endpoint_url =
Url::parse(&endpoint).map_err(|e| TargetError::Configuration(format!("invalid webhook endpoint URL: {e}")))?;
let args = WebhookArgs {
enable: true, // Already validated as enabled
endpoint: endpoint_url,
auth_token: config.lookup(WEBHOOK_AUTH_TOKEN).unwrap_or_default(),
queue_dir: config.lookup(WEBHOOK_QUEUE_DIR).unwrap_or_default(),
queue_limit: config
.lookup(WEBHOOK_QUEUE_LIMIT)
.and_then(|s| s.parse().ok())
.unwrap_or(100000),
client_cert: config.lookup(WEBHOOK_CLIENT_CERT).unwrap_or_default(),
client_key: config.lookup(WEBHOOK_CLIENT_KEY).unwrap_or_default(),
target_type: TargetType::AuditLog,
};
args.validate()?;
Ok(args)
}
/// Parses MQTT arguments from KVS configuration
fn parse_mqtt_args(_id: &str, config: &KVS) -> Result<MQTTArgs, TargetError> {
let broker = config
.lookup(MQTT_BROKER)
.filter(|s| !s.is_empty())
.ok_or_else(|| TargetError::Configuration("MQTT broker is required".to_string()))?;
let broker_url = Url::parse(&broker).map_err(|e| TargetError::Configuration(format!("invalid MQTT broker URL: {e}")))?;
let topic = config
.lookup(MQTT_TOPIC)
.filter(|s| !s.is_empty())
.ok_or_else(|| TargetError::Configuration("MQTT topic is required".to_string()))?;
let qos = config
.lookup(MQTT_QOS)
.and_then(|s| s.parse::<u8>().ok())
.and_then(|q| match q {
0 => Some(rumqttc::QoS::AtMostOnce),
1 => Some(rumqttc::QoS::AtLeastOnce),
2 => Some(rumqttc::QoS::ExactlyOnce),
_ => None,
})
.unwrap_or(rumqttc::QoS::AtLeastOnce);
let args = MQTTArgs {
enable: true, // Already validated as enabled
broker: broker_url,
topic,
qos,
username: config.lookup(MQTT_USERNAME).unwrap_or_default(),
password: config.lookup(MQTT_PASSWORD).unwrap_or_default(),
max_reconnect_interval: parse_duration(&config.lookup(MQTT_RECONNECT_INTERVAL).unwrap_or_else(|| "5s".to_string()))
.unwrap_or(Duration::from_secs(5)),
keep_alive: parse_duration(&config.lookup(MQTT_KEEP_ALIVE_INTERVAL).unwrap_or_else(|| "60s".to_string()))
.unwrap_or(Duration::from_secs(60)),
queue_dir: config.lookup(MQTT_QUEUE_DIR).unwrap_or_default(),
queue_limit: config.lookup(MQTT_QUEUE_LIMIT).and_then(|s| s.parse().ok()).unwrap_or(100000),
target_type: TargetType::AuditLog,
};
args.validate()?;
Ok(args)
}
/// Parses enable value from string
fn parse_enable_value(value: &str) -> bool {
matches!(value.to_lowercase().as_str(), "1" | "on" | "true" | "yes")
}
/// Parses duration from string (e.g., "3s", "5m")
fn parse_duration(s: &str) -> Option<Duration> {
if let Some(stripped) = s.strip_suffix('s') {
stripped.parse::<u64>().ok().map(Duration::from_secs)
} else if let Some(stripped) = s.strip_suffix('m') {
stripped.parse::<u64>().ok().map(|m| Duration::from_secs(m * 60))
} else if let Some(stripped) = s.strip_suffix("ms") {
stripped.parse::<u64>().ok().map(Duration::from_millis)
} else {
s.parse::<u64>().ok().map(Duration::from_secs)
}
}

View File

@@ -58,6 +58,12 @@ impl AuditSystem {
}
/// Starts the audit system with the given configuration
///
/// # Arguments
/// * `config` - The configuration to use for starting the audit system
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn start(&self, config: Config) -> AuditResult<()> {
let state = self.state.write().await;
@@ -87,7 +93,7 @@ impl AuditSystem {
// Create targets from configuration
let mut registry = self.registry.lock().await;
match registry.create_targets_from_config(&config).await {
match registry.create_audit_targets_from_config(&config).await {
Ok(targets) => {
if targets.is_empty() {
info!("No enabled audit targets found, keeping audit system stopped");
@@ -143,6 +149,9 @@ impl AuditSystem {
}
/// Pauses the audit system
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn pause(&self) -> AuditResult<()> {
let mut state = self.state.write().await;
@@ -161,6 +170,9 @@ impl AuditSystem {
}
/// Resumes the audit system
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn resume(&self) -> AuditResult<()> {
let mut state = self.state.write().await;
@@ -179,6 +191,9 @@ impl AuditSystem {
}
/// Stops the audit system and closes all targets
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn close(&self) -> AuditResult<()> {
let mut state = self.state.write().await;
@@ -223,11 +238,20 @@ impl AuditSystem {
}
/// Checks if the audit system is running
///
/// # Returns
/// * `bool` - True if running, false otherwise
pub async fn is_running(&self) -> bool {
matches!(*self.state.read().await, AuditSystemState::Running)
}
/// Dispatches an audit log entry to all active targets
///
/// # Arguments
/// * `entry` - The audit log entry to dispatch
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn dispatch(&self, entry: Arc<AuditEntry>) -> AuditResult<()> {
let start_time = std::time::Instant::now();
@@ -319,6 +343,13 @@ impl AuditSystem {
Ok(())
}
/// Dispatches a batch of audit log entries to all active targets
///
/// # Arguments
/// * `entries` - A vector of audit log entries to dispatch
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn dispatch_batch(&self, entries: Vec<Arc<AuditEntry>>) -> AuditResult<()> {
let start_time = std::time::Instant::now();
@@ -386,7 +417,13 @@ impl AuditSystem {
Ok(())
}
// New: Audit flow background tasks, based on send_from_store, including retries and exponential backoffs
/// Starts the audit stream processing for a target with batching and retry logic
/// # Arguments
/// * `store` - The store from which to read audit entries
/// * `target` - The target to which audit entries will be sent
///
/// This function spawns a background task that continuously reads audit entries from the provided store
/// and attempts to send them to the specified target. It implements retry logic with exponential backoff
fn start_audit_stream_with_batching(
&self,
store: Box<dyn Store<EntityTarget<AuditEntry>, Error = StoreError, Key = Key> + Send>,
@@ -462,6 +499,12 @@ impl AuditSystem {
}
/// Enables a specific target
///
/// # Arguments
/// * `target_id` - The ID of the target to enable
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn enable_target(&self, target_id: &str) -> AuditResult<()> {
// This would require storing enabled/disabled state per target
// For now, just check if target exists
@@ -475,6 +518,12 @@ impl AuditSystem {
}
/// Disables a specific target
///
/// # Arguments
/// * `target_id` - The ID of the target to disable
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn disable_target(&self, target_id: &str) -> AuditResult<()> {
// This would require storing enabled/disabled state per target
// For now, just check if target exists
@@ -488,6 +537,12 @@ impl AuditSystem {
}
/// Removes a target from the system
///
/// # Arguments
/// * `target_id` - The ID of the target to remove
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn remove_target(&self, target_id: &str) -> AuditResult<()> {
let mut registry = self.registry.lock().await;
if let Some(target) = registry.remove_target(target_id) {
@@ -502,6 +557,13 @@ impl AuditSystem {
}
/// Updates or inserts a target
///
/// # Arguments
/// * `target_id` - The ID of the target to upsert
/// * `target` - The target instance to insert or update
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn upsert_target(&self, target_id: String, target: Box<dyn Target<AuditEntry> + Send + Sync>) -> AuditResult<()> {
let mut registry = self.registry.lock().await;
@@ -523,18 +585,33 @@ impl AuditSystem {
}
/// Lists all targets
///
/// # Returns
/// * `Vec<String>` - List of target IDs
pub async fn list_targets(&self) -> Vec<String> {
let registry = self.registry.lock().await;
registry.list_targets()
}
/// Gets information about a specific target
///
/// # Arguments
/// * `target_id` - The ID of the target to retrieve
///
/// # Returns
/// * `Option<String>` - Target ID if found
pub async fn get_target(&self, target_id: &str) -> Option<String> {
let registry = self.registry.lock().await;
registry.get_target(target_id).map(|target| target.id().to_string())
}
/// Reloads configuration and updates targets
///
/// # Arguments
/// * `new_config` - The new configuration to load
///
/// # Returns
/// * `AuditResult<()>` - Result indicating success or failure
pub async fn reload_config(&self, new_config: Config) -> AuditResult<()> {
info!("Reloading audit system configuration");
@@ -554,7 +631,7 @@ impl AuditSystem {
}
// Create new targets from updated configuration
match registry.create_targets_from_config(&new_config).await {
match registry.create_audit_targets_from_config(&new_config).await {
Ok(targets) => {
info!(target_count = targets.len(), "Reloaded audit targets successfully");
@@ -594,16 +671,22 @@ impl AuditSystem {
}
/// Gets current audit system metrics
///
/// # Returns
/// * `AuditMetricsReport` - Current metrics report
pub async fn get_metrics(&self) -> observability::AuditMetricsReport {
observability::get_metrics_report().await
}
/// Validates system performance against requirements
///
/// # Returns
/// * `PerformanceValidation` - Performance validation results
pub async fn validate_performance(&self) -> observability::PerformanceValidation {
observability::validate_performance().await
}
/// Resets all metrics
/// Resets all metrics to initial state
pub async fn reset_metrics(&self) {
observability::reset_metrics().await;
}

View File

@@ -43,11 +43,11 @@ async fn test_config_parsing_webhook() {
audit_webhook_section.insert("_".to_string(), default_kvs);
config.0.insert("audit_webhook".to_string(), audit_webhook_section);
let mut registry = AuditRegistry::new();
let registry = AuditRegistry::new();
// This should not fail even if server storage is not initialized
// as it's an integration test
let result = registry.create_targets_from_config(&config).await;
let result = registry.create_audit_targets_from_config(&config).await;
// We expect this to fail due to server storage not being initialized
// but the parsing should work correctly

View File

@@ -44,7 +44,7 @@ async fn test_audit_system_startup_performance() {
#[tokio::test]
async fn test_concurrent_target_creation() {
// Test that multiple targets can be created concurrently
let mut registry = AuditRegistry::new();
let registry = AuditRegistry::new();
// Create config with multiple webhook instances
let mut config = rustfs_ecstore::config::Config(std::collections::HashMap::new());
@@ -63,7 +63,7 @@ async fn test_concurrent_target_creation() {
let start = Instant::now();
// This will fail due to server storage not being initialized, but we can measure timing
let result = registry.create_targets_from_config(&config).await;
let result = registry.create_audit_targets_from_config(&config).await;
let elapsed = start.elapsed();
println!("Concurrent target creation took: {elapsed:?}");

View File

@@ -135,7 +135,7 @@ async fn test_global_audit_functions() {
#[tokio::test]
async fn test_config_parsing_with_multiple_instances() {
let mut registry = AuditRegistry::new();
let registry = AuditRegistry::new();
// Create config with multiple webhook instances
let mut config = Config(HashMap::new());
@@ -164,7 +164,7 @@ async fn test_config_parsing_with_multiple_instances() {
config.0.insert("audit_webhook".to_string(), webhook_section);
// Try to create targets from config
let result = registry.create_targets_from_config(&config).await;
let result = registry.create_audit_targets_from_config(&config).await;
// Should fail due to server storage not initialized, but parsing should work
match result {

View File

@@ -39,4 +39,4 @@ path-clean = { workspace = true }
rmp-serde = { workspace = true }
async-trait = { workspace = true }
s3s = { workspace = true }
tracing = { workspace = true }
tracing = { workspace = true }

View File

@@ -19,37 +19,81 @@ use std::sync::LazyLock;
use tokio::sync::RwLock;
use tonic::transport::Channel;
pub static GLOBAL_Local_Node_Name: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
pub static GLOBAL_Rustfs_Host: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
pub static GLOBAL_Rustfs_Port: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
pub static GLOBAL_Rustfs_Addr: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
pub static GLOBAL_Conn_Map: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
pub static GLOBAL_LOCAL_NODE_NAME: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
pub static GLOBAL_RUSTFS_HOST: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
pub static GLOBAL_RUSTFS_PORT: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
pub static GLOBAL_RUSTFS_ADDR: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
pub static GLOBAL_CONN_MAP: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
pub static GLOBAL_ROOT_CERT: LazyLock<RwLock<Option<Vec<u8>>>> = LazyLock::new(|| RwLock::new(None));
pub static GLOBAL_MTLS_IDENTITY: LazyLock<RwLock<Option<MtlsIdentityPem>>> = LazyLock::new(|| RwLock::new(None));
/// Set the global RustFS address used for gRPC connections.
///
/// # Arguments
/// * `addr` - A string slice representing the RustFS address (e.g., "https://node1:9000").
pub async fn set_global_addr(addr: &str) {
*GLOBAL_Rustfs_Addr.write().await = addr.to_string();
*GLOBAL_RUSTFS_ADDR.write().await = addr.to_string();
}
/// Set the global root CA certificate for outbound gRPC clients.
/// This certificate is used to validate server TLS certificates.
/// When set to None, clients use the system default root CAs.
///
/// # Arguments
/// * `cert` - A vector of bytes representing the PEM-encoded root CA certificate.
pub async fn set_global_root_cert(cert: Vec<u8>) {
*GLOBAL_ROOT_CERT.write().await = Some(cert);
}
/// Set the global mTLS identity (cert+key PEM) for outbound gRPC clients.
/// When set, clients will present this identity to servers requesting/requiring mTLS.
/// When None, clients proceed with standard server-authenticated TLS.
///
/// # Arguments
/// * `identity` - An optional MtlsIdentityPem struct containing the cert and key PEM.
pub async fn set_global_mtls_identity(identity: Option<MtlsIdentityPem>) {
*GLOBAL_MTLS_IDENTITY.write().await = identity;
}
/// Evict a stale/dead connection from the global connection cache.
/// This is critical for cluster recovery when a node dies unexpectedly (e.g., power-off).
/// By removing the cached connection, subsequent requests will establish a fresh connection.
///
/// # Arguments
/// * `addr` - The address of the connection to evict.
pub async fn evict_connection(addr: &str) {
let removed = GLOBAL_Conn_Map.write().await.remove(addr);
let removed = GLOBAL_CONN_MAP.write().await.remove(addr);
if removed.is_some() {
tracing::warn!("Evicted stale connection from cache: {}", addr);
}
}
/// Check if a connection exists in the cache for the given address.
///
/// # Arguments
/// * `addr` - The address to check.
///
/// # Returns
/// * `bool` - True if a cached connection exists, false otherwise.
pub async fn has_cached_connection(addr: &str) -> bool {
GLOBAL_Conn_Map.read().await.contains_key(addr)
GLOBAL_CONN_MAP.read().await.contains_key(addr)
}
/// Clear all cached connections. Useful for full cluster reset/recovery.
pub async fn clear_all_connections() {
let mut map = GLOBAL_Conn_Map.write().await;
let mut map = GLOBAL_CONN_MAP.write().await;
let count = map.len();
map.clear();
if count > 0 {
tracing::warn!("Cleared {} cached connections from global map", count);
}
}
/// Optional client identity (cert+key PEM) for outbound mTLS.
///
/// When present, gRPC clients will present this identity to servers requesting/requiring mTLS.
/// When absent, clients proceed with standard server-authenticated TLS.
#[derive(Clone, Debug)]
pub struct MtlsIdentityPem {
pub cert_pem: Vec<u8>,
pub key_pem: Vec<u8>,
}

View File

@@ -19,6 +19,10 @@ pub mod globals;
pub mod heal_channel;
pub mod last_minute;
pub mod metrics;
mod readiness;
pub use globals::*;
pub use readiness::{GlobalReadiness, SystemStage};
// is ','
pub static DEFAULT_DELIMITER: u8 = 44;

View File

@@ -0,0 +1,136 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::atomic::{AtomicU8, Ordering};
/// Represents the various stages of system startup
#[repr(u8)]
pub enum SystemStage {
Booting = 0,
StorageReady = 1, // Disks online, Quorum met
IamReady = 2, // Users and Policies loaded into cache
FullReady = 3, // System ready to serve all traffic
}
/// Global readiness tracker for the service
/// This struct uses atomic operations to track the readiness status of various components
/// of the service in a thread-safe manner.
pub struct GlobalReadiness {
status: AtomicU8,
}
impl Default for GlobalReadiness {
fn default() -> Self {
Self::new()
}
}
impl GlobalReadiness {
/// Create a new GlobalReadiness instance with initial status as Starting
/// # Returns
/// A new instance of GlobalReadiness
pub fn new() -> Self {
Self {
status: AtomicU8::new(SystemStage::Booting as u8),
}
}
/// Update the system to a new stage
///
/// # Arguments
/// * `step` - The SystemStage step to mark as ready
pub fn mark_stage(&self, step: SystemStage) {
self.status.fetch_max(step as u8, Ordering::SeqCst);
}
/// Check if the service is fully ready
/// # Returns
/// `true` if the service is fully ready, `false` otherwise
pub fn is_ready(&self) -> bool {
self.status.load(Ordering::SeqCst) == SystemStage::FullReady as u8
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
use std::thread;
#[test]
fn test_initial_state() {
let readiness = GlobalReadiness::new();
assert!(!readiness.is_ready());
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::Booting as u8);
}
#[test]
fn test_mark_stage_progression() {
let readiness = GlobalReadiness::new();
readiness.mark_stage(SystemStage::StorageReady);
assert!(!readiness.is_ready());
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::StorageReady as u8);
readiness.mark_stage(SystemStage::IamReady);
assert!(!readiness.is_ready());
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::IamReady as u8);
readiness.mark_stage(SystemStage::FullReady);
assert!(readiness.is_ready());
}
#[test]
fn test_no_regression() {
let readiness = GlobalReadiness::new();
readiness.mark_stage(SystemStage::FullReady);
readiness.mark_stage(SystemStage::IamReady); // Should not regress
assert!(readiness.is_ready());
}
#[test]
fn test_concurrent_marking() {
let readiness = Arc::new(GlobalReadiness::new());
let mut handles = vec![];
for _ in 0..10 {
let r = Arc::clone(&readiness);
handles.push(thread::spawn(move || {
r.mark_stage(SystemStage::StorageReady);
r.mark_stage(SystemStage::IamReady);
r.mark_stage(SystemStage::FullReady);
}));
}
for h in handles {
h.join().unwrap();
}
assert!(readiness.is_ready());
}
#[test]
fn test_is_ready_only_at_full_ready() {
let readiness = GlobalReadiness::new();
assert!(!readiness.is_ready());
readiness.mark_stage(SystemStage::StorageReady);
assert!(!readiness.is_ready());
readiness.mark_stage(SystemStage::IamReady);
assert!(!readiness.is_ready());
readiness.mark_stage(SystemStage::FullReady);
assert!(readiness.is_ready());
}
}

View File

@@ -29,7 +29,7 @@ pub const AUDIT_PREFIX: &str = "audit";
pub const AUDIT_ROUTE_PREFIX: &str = const_str::concat!(AUDIT_PREFIX, DEFAULT_DELIMITER);
pub const AUDIT_WEBHOOK_SUB_SYS: &str = "audit_webhook";
pub const AUDIT_MQTT_SUB_SYS: &str = "mqtt_webhook";
pub const AUDIT_MQTT_SUB_SYS: &str = "audit_mqtt";
pub const AUDIT_STORE_EXTENSION: &str = ".audit";
#[allow(dead_code)]

View File

@@ -49,21 +49,6 @@ pub const SERVICE_VERSION: &str = "1.0.0";
/// Default value: production
pub const ENVIRONMENT: &str = "production";
/// Default Access Key
/// Default value: rustfsadmin
/// Environment variable: RUSTFS_ACCESS_KEY
/// Command line argument: --access-key
/// Example: RUSTFS_ACCESS_KEY=rustfsadmin
/// Example: --access-key rustfsadmin
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
/// Default Secret Key
/// Default value: rustfsadmin
/// Environment variable: RUSTFS_SECRET_KEY
/// Command line argument: --secret-key
/// Example: RUSTFS_SECRET_KEY=rustfsadmin
/// Example: --secret-key rustfsadmin
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
/// Default console enable
/// This is the default value for the console server.
/// It is used to enable or disable the console server.
@@ -89,6 +74,30 @@ pub const RUSTFS_TLS_KEY: &str = "rustfs_key.pem";
/// This is the default cert for TLS.
pub const RUSTFS_TLS_CERT: &str = "rustfs_cert.pem";
/// Default public certificate filename for rustfs
/// This is the default public certificate filename for rustfs.
/// It is used to store the public certificate of the application.
/// Default value: public.crt
pub const RUSTFS_PUBLIC_CERT: &str = "public.crt";
/// Default CA certificate filename for rustfs
/// This is the default CA certificate filename for rustfs.
/// It is used to store the CA certificate of the application.
/// Default value: ca.crt
pub const RUSTFS_CA_CERT: &str = "ca.crt";
/// Default HTTP prefix for rustfs
/// This is the default HTTP prefix for rustfs.
/// It is used to identify HTTP URLs.
/// Default value: http://
pub const RUSTFS_HTTP_PREFIX: &str = "http://";
/// Default HTTPS prefix for rustfs
/// This is the default HTTPS prefix for rustfs.
/// It is used to identify HTTPS URLs.
/// Default value: https://
pub const RUSTFS_HTTPS_PREFIX: &str = "https://";
/// Default port for rustfs
/// This is the default port for rustfs.
/// This is used to bind the server to a specific port.
@@ -161,6 +170,12 @@ pub const KI_B: usize = 1024;
/// Default value: 1048576
pub const MI_B: usize = 1024 * 1024;
/// Environment variable for gRPC authentication token
/// Used to set the authentication token for gRPC communication
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
#[cfg(test)]
mod tests {
use super::*;
@@ -201,20 +216,6 @@ mod tests {
);
}
#[test]
fn test_security_constants() {
// Test security related constants
assert_eq!(DEFAULT_ACCESS_KEY, "rustfsadmin");
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
assert_eq!(DEFAULT_SECRET_KEY, "rustfsadmin");
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
// In production environment, access key and secret key should be different
// These are default values, so being the same is acceptable, but should be warned in documentation
println!("Warning: Default access key and secret key are the same. Change them in production!");
}
#[test]
fn test_file_path_constants() {
assert_eq!(RUSTFS_TLS_KEY, "rustfs_key.pem");
@@ -276,8 +277,6 @@ mod tests {
DEFAULT_LOG_LEVEL,
SERVICE_VERSION,
ENVIRONMENT,
DEFAULT_ACCESS_KEY,
DEFAULT_SECRET_KEY,
RUSTFS_TLS_KEY,
RUSTFS_TLS_CERT,
DEFAULT_ADDRESS,
@@ -307,29 +306,6 @@ mod tests {
assert_ne!(DEFAULT_CONSOLE_PORT, 0, "Console port should not be zero");
}
#[test]
fn test_security_best_practices() {
// Test security best practices
// These are default values, should be changed in production environments
println!("Security Warning: Default credentials detected!");
println!("Access Key: {DEFAULT_ACCESS_KEY}");
println!("Secret Key: {DEFAULT_SECRET_KEY}");
println!("These should be changed in production environments!");
// Verify that key lengths meet minimum security requirements
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
// Check if default credentials contain common insecure patterns
let _insecure_patterns = ["admin", "password", "123456", "default"];
let _access_key_lower = DEFAULT_ACCESS_KEY.to_lowercase();
let _secret_key_lower = DEFAULT_SECRET_KEY.to_lowercase();
// Note: More security check logic can be added here
// For example, check if keys contain insecure patterns
}
#[test]
fn test_configuration_consistency() {
// Test configuration consistency

View File

@@ -0,0 +1,56 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Request body size limits for admin API endpoints
//!
//! These limits prevent DoS attacks through unbounded memory allocation
//! while allowing legitimate use cases.
/// Maximum size for standard admin API request bodies (1 MB)
/// Used for: user creation/update, policies, tier config, KMS config, events, groups, service accounts
/// Rationale: Admin API payloads are typically JSON/XML configs under 100KB.
/// AWS IAM policy limit is 6KB-10KB. 1MB provides generous headroom.
pub const MAX_ADMIN_REQUEST_BODY_SIZE: usize = 1024 * 1024; // 1 MB
/// Maximum size for IAM import/export operations (10 MB)
/// Used for: IAM entity imports/exports containing multiple users, policies, groups
/// Rationale: ZIP archives with hundreds of IAM entities. 10MB allows ~10,000 small configs.
pub const MAX_IAM_IMPORT_SIZE: usize = 10 * 1024 * 1024; // 10 MB
/// Maximum size for bucket metadata import operations (100 MB)
/// Used for: Bucket metadata import containing configurations for many buckets
/// Rationale: Large deployments may have thousands of buckets with various configs.
/// 100MB allows importing metadata for ~10,000 buckets with reasonable configs.
pub const MAX_BUCKET_METADATA_IMPORT_SIZE: usize = 100 * 1024 * 1024; // 100 MB
/// Maximum size for healing operation requests (1 MB)
/// Used for: Healing parameters and configuration
/// Rationale: Healing requests contain bucket/object paths and options. Should be small.
pub const MAX_HEAL_REQUEST_SIZE: usize = 1024 * 1024; // 1 MB
/// Maximum size for S3 client response bodies (10 MB)
/// Used for: Reading responses from remote S3-compatible services (ACL, attributes, lists)
/// Rationale: Responses from external services should be bounded.
/// Large responses (>10MB) indicate misconfiguration or potential attack.
/// Typical responses: ACL XML < 10KB, List responses < 1MB
///
/// Rationale: Responses from external S3-compatible services should be bounded.
/// - ACL XML responses: typically < 10KB
/// - Object attributes: typically < 100KB
/// - List responses: typically < 1MB (1000 objects with metadata)
/// - Location/error responses: typically < 10KB
///
/// 10MB provides generous headroom for legitimate responses while preventing
/// memory exhaustion from malicious or misconfigured remote services.
pub const MAX_S3_CLIENT_RESPONSE_SIZE: usize = 10 * 1024 * 1024; // 10 MB

View File

@@ -0,0 +1,61 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HTTP Response Compression Configuration
//!
//! This module provides configuration options for HTTP response compression.
//! By default, compression is disabled (aligned with MinIO behavior).
//! When enabled via `RUSTFS_COMPRESS_ENABLE=on`, compression can be configured
//! to apply only to specific file extensions, MIME types, and minimum file sizes.
/// Environment variable to enable/disable HTTP response compression
/// Default: off (disabled)
/// Values: on, off, true, false, yes, no, 1, 0
/// Example: RUSTFS_COMPRESS_ENABLE=on
pub const ENV_COMPRESS_ENABLE: &str = "RUSTFS_COMPRESS_ENABLE";
/// Default compression enable state
/// Aligned with MinIO behavior - compression is disabled by default
pub const DEFAULT_COMPRESS_ENABLE: bool = false;
/// Environment variable for file extensions that should be compressed
/// Comma-separated list of file extensions (with or without leading dot)
/// Default: "" (empty, meaning use MIME type matching only)
/// Example: RUSTFS_COMPRESS_EXTENSIONS=.txt,.log,.csv,.json,.xml,.html,.css,.js
pub const ENV_COMPRESS_EXTENSIONS: &str = "RUSTFS_COMPRESS_EXTENSIONS";
/// Default file extensions for compression
/// Empty by default - relies on MIME type matching
pub const DEFAULT_COMPRESS_EXTENSIONS: &str = "";
/// Environment variable for MIME types that should be compressed
/// Comma-separated list of MIME types, supports wildcard (*) for subtypes
/// Default: "text/*,application/json,application/xml,application/javascript"
/// Example: RUSTFS_COMPRESS_MIME_TYPES=text/*,application/json,application/xml
pub const ENV_COMPRESS_MIME_TYPES: &str = "RUSTFS_COMPRESS_MIME_TYPES";
/// Default MIME types for compression
/// Includes common text-based content types that benefit from compression
pub const DEFAULT_COMPRESS_MIME_TYPES: &str = "text/*,application/json,application/xml,application/javascript";
/// Environment variable for minimum file size to apply compression
/// Files smaller than this size will not be compressed
/// Default: 1000 (bytes)
/// Example: RUSTFS_COMPRESS_MIN_SIZE=1000
pub const ENV_COMPRESS_MIN_SIZE: &str = "RUSTFS_COMPRESS_MIN_SIZE";
/// Default minimum file size for compression (in bytes)
/// Files smaller than 1000 bytes typically don't benefit from compression
/// and the compression overhead may outweigh the benefits
pub const DEFAULT_COMPRESS_MIN_SIZE: u64 = 1000;

View File

@@ -16,7 +16,8 @@ pub const DEFAULT_DELIMITER: &str = "_";
pub const ENV_PREFIX: &str = "RUSTFS_";
pub const ENV_WORD_DELIMITER: &str = "_";
pub const DEFAULT_DIR: &str = "/opt/rustfs/events"; // Default directory for event store
pub const EVENT_DEFAULT_DIR: &str = "/opt/rustfs/events"; // Default directory for event store
pub const AUDIT_DEFAULT_DIR: &str = "/opt/rustfs/audit"; // Default directory for audit store
pub const DEFAULT_LIMIT: u64 = 100000; // Default store limit
/// Standard config keys and values.

View File

@@ -13,6 +13,8 @@
// limitations under the License.
pub(crate) mod app;
pub(crate) mod body_limits;
pub(crate) mod compress;
pub(crate) mod console;
pub(crate) mod env;
pub(crate) mod heal;

View File

@@ -39,3 +39,10 @@ pub const DEFAULT_MAX_IO_EVENTS_PER_TICK: usize = 1024;
/// Event polling default (Tokio default 61)
pub const DEFAULT_EVENT_INTERVAL: u32 = 61;
pub const DEFAULT_RNG_SEED: Option<u64> = None; // None means random
/// Threshold for small object seek support in megabytes.
///
/// When an object is smaller than this size, rustfs will provide seek support.
///
/// Default is set to 10MB.
pub const DEFAULT_OBJECT_SEEK_SUPPORT_THRESHOLD: usize = 10 * 1024 * 1024;

View File

@@ -12,4 +12,75 @@
// See the License for the specific language governing permissions and
// limitations under the License.
/// TLS related environment variable names and default values
/// Environment variable to enable TLS key logging
/// When set to "1", RustFS will log TLS keys to the specified file for debugging purposes.
/// By default, this is disabled.
/// To enable, set the environment variable RUSTFS_TLS_KEYLOG=1
pub const ENV_TLS_KEYLOG: &str = "RUSTFS_TLS_KEYLOG";
/// Default value for TLS key logging
/// By default, RustFS does not log TLS keys.
/// To change this behavior, set the environment variable RUSTFS_TLS_KEYLOG=1
pub const DEFAULT_TLS_KEYLOG: bool = false;
/// Environment variable to trust system CA certificates
/// When set to "1", RustFS will trust system CA certificates in addition to any
/// custom CA certificates provided in the configuration.
/// By default, this is disabled.
/// To enable, set the environment variable RUSTFS_TRUST_SYSTEM_CA=1
pub const ENV_TRUST_SYSTEM_CA: &str = "RUSTFS_TRUST_SYSTEM_CA";
/// Default value for trusting system CA certificates
/// By default, RustFS does not trust system CA certificates.
/// To change this behavior, set the environment variable RUSTFS_TRUST_SYSTEM_CA=1
pub const DEFAULT_TRUST_SYSTEM_CA: bool = false;
/// Environment variable to trust leaf certificates as CA
/// When set to "1", RustFS will treat leaf certificates as CA certificates for trust validation.
/// By default, this is disabled.
/// To enable, set the environment variable RUSTFS_TRUST_LEAF_CERT_AS_CA=1
pub const ENV_TRUST_LEAF_CERT_AS_CA: &str = "RUSTFS_TRUST_LEAF_CERT_AS_CA";
/// Default value for trusting leaf certificates as CA
/// By default, RustFS does not trust leaf certificates as CA.
/// To change this behavior, set the environment variable RUSTFS_TRUST_LEAF_CERT_AS_CA=1
pub const DEFAULT_TRUST_LEAF_CERT_AS_CA: bool = false;
/// Default filename for client CA certificate
/// client_ca.crt (CA bundle for verifying client certificates in server mTLS)
pub const RUSTFS_CLIENT_CA_CERT_FILENAME: &str = "client_ca.crt";
/// Environment variable for client certificate file path
/// RUSTFS_MTLS_CLIENT_CERT
/// Specifies the file path to the client certificate used for mTLS authentication.
/// If not set, RustFS will look for the default filename "client_cert.pem" in the current directory.
/// To set, use the environment variable RUSTFS_MTLS_CLIENT_CERT=/path/to/client_cert.pem
pub const ENV_MTLS_CLIENT_CERT: &str = "RUSTFS_MTLS_CLIENT_CERT";
/// Default filename for client certificate
/// client_cert.pem
pub const RUSTFS_CLIENT_CERT_FILENAME: &str = "client_cert.pem";
/// Environment variable for client private key file path
/// RUSTFS_MTLS_CLIENT_KEY
/// Specifies the file path to the client private key used for mTLS authentication.
/// If not set, RustFS will look for the default filename "client_key.pem" in the current directory.
/// To set, use the environment variable RUSTFS_MTLS_CLIENT_KEY=/path/to/client_key.pem
pub const ENV_MTLS_CLIENT_KEY: &str = "RUSTFS_MTLS_CLIENT_KEY";
/// Default filename for client private key
/// client_key.pem
pub const RUSTFS_CLIENT_KEY_FILENAME: &str = "client_key.pem";
/// RUSTFS_SERVER_MTLS_ENABLE
/// Environment variable to enable server mTLS
/// When set to "1", RustFS server will require client certificates for authentication.
/// By default, this is disabled.
/// To enable, set the environment variable RUSTFS_SERVER_MTLS_ENABLE=1
pub const ENV_SERVER_MTLS_ENABLE: &str = "RUSTFS_SERVER_MTLS_ENABLE";
/// Default value for enabling server mTLS
/// By default, RustFS server mTLS is disabled.
/// To change this behavior, set the environment variable RUSTFS_SERVER_MTLS_ENABLE=1
pub const DEFAULT_SERVER_MTLS_ENABLE: bool = false;

View File

@@ -17,6 +17,10 @@ pub mod constants;
#[cfg(feature = "constants")]
pub use constants::app::*;
#[cfg(feature = "constants")]
pub use constants::body_limits::*;
#[cfg(feature = "constants")]
pub use constants::compress::*;
#[cfg(feature = "constants")]
pub use constants::console::*;
#[cfg(feature = "constants")]
pub use constants::env::*;

View File

@@ -24,13 +24,45 @@ pub use webhook::*;
use crate::DEFAULT_DELIMITER;
// --- Configuration Constants ---
/// Default target identifier for notifications,
/// Used in notification system when no specific target is provided,
/// Represents the default target stream or endpoint for notifications when no specific target is provided.
pub const DEFAULT_TARGET: &str = "1";
/// Notification prefix for routing and identification,
/// Used in notification system,
/// This prefix is utilized in constructing routes and identifiers related to notifications within the system.
pub const NOTIFY_PREFIX: &str = "notify";
/// Notification route prefix combining the notification prefix and default delimiter
/// Combines the notification prefix with the default delimiter
/// Used in notification system for defining routes related to notifications.
/// Example: "notify:/"
pub const NOTIFY_ROUTE_PREFIX: &str = const_str::concat!(NOTIFY_PREFIX, DEFAULT_DELIMITER);
/// Name of the environment variable that configures target stream concurrency.
/// Controls how many target streams are processed in parallel by the notification system.
/// Defaults to [`DEFAULT_NOTIFY_TARGET_STREAM_CONCURRENCY`] if not set.
/// Example: `RUSTFS_NOTIFY_TARGET_STREAM_CONCURRENCY=20`.
pub const ENV_NOTIFY_TARGET_STREAM_CONCURRENCY: &str = "RUSTFS_NOTIFY_TARGET_STREAM_CONCURRENCY";
/// Default concurrency for target stream processing in the notification system
/// This value is used if the environment variable `RUSTFS_NOTIFY_TARGET_STREAM_CONCURRENCY` is not set.
/// It defines how many target streams can be processed in parallel by the notification system at any given time.
/// Adjust this value based on your system's capabilities and expected load.
pub const DEFAULT_NOTIFY_TARGET_STREAM_CONCURRENCY: usize = 20;
/// Name of the environment variable that configures send concurrency.
/// Controls how many send operations are processed in parallel by the notification system.
/// Defaults to [`DEFAULT_NOTIFY_SEND_CONCURRENCY`] if not set.
/// Example: `RUSTFS_NOTIFY_SEND_CONCURRENCY=64`.
pub const ENV_NOTIFY_SEND_CONCURRENCY: &str = "RUSTFS_NOTIFY_SEND_CONCURRENCY";
/// Default concurrency for send operations in the notification system
/// This value is used if the environment variable `RUSTFS_NOTIFY_SEND_CONCURRENCY` is not set.
/// It defines how many send operations can be processed in parallel by the notification system at any given time.
/// Adjust this value based on your system's capabilities and expected load.
pub const DEFAULT_NOTIFY_SEND_CONCURRENCY: usize = 64;
#[allow(dead_code)]
pub const NOTIFY_SUB_SYSTEMS: &[&str] = &[NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS];

View File

@@ -15,5 +15,5 @@
pub const DEFAULT_EXT: &str = ".unknown"; // Default file extension
pub const COMPRESS_EXT: &str = ".snappy"; // Extension for compressed files
/// STORE_EXTENSION - file extension of an event file in store
pub const STORE_EXTENSION: &str = ".event";
/// NOTIFY_STORE_EXTENSION - file extension of an event file in store
pub const NOTIFY_STORE_EXTENSION: &str = ".event";

View File

@@ -0,0 +1,21 @@
[package]
name = "rustfs-credentials"
edition.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
version.workspace = true
homepage.workspace = true
description = "Credentials management utilities for RustFS, enabling secure handling of authentication and authorization data."
keywords = ["rustfs", "Minio", "credentials", "authentication", "authorization"]
categories = ["web-programming", "development-tools", "data-structures", "security"]
[dependencies]
base64-simd = { workspace = true }
rand = { workspace = true }
serde = { workspace = true }
serde_json.workspace = true
time = { workspace = true, features = ["serde-human-readable"] }
[lints]
workspace = true

View File

@@ -0,0 +1,44 @@
[![RustFS](https://rustfs.com/images/rustfs-github.png)](https://rustfs.com)
# RustFS Credentials - Credential Management Module
<p align="center">
<strong>A module for managing credentials within the RustFS distributed object storage system.</strong>
</p>
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://docs.rustfs.com/">📖 Documentation</a>
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
</p>
---
This module provides a secure and efficient way to handle various types of credentials,
such as API keys, access tokens, and cryptographic keys, required for interacting with
the RustFS ecosystem and external services.
## 📖 Overview
**RustFS Credentials** is a module dedicated to managing credentials for the [RustFS](https://rustfs.com) distributed
object storage system. For the complete RustFS experience,
please visit the [main RustFS repository](https://github.com/rustfs/rustfs)
## ✨ Features
- Secure storage and retrieval of credentials
- Support for multiple credential types (API keys, tokens, etc.)
- Encryption of sensitive credential data
- Integration with external secret management systems
- Easy-to-use API for credential management
- Credential rotation and expiration handling
## 📚 Documentation
For comprehensive documentation, examples, and usage guides, please visit the
main [RustFS repository](https://github.com/rustfs/rustfs).
## 📄 License
This project is licensed under the Apache License 2.0 - see the [LICENSE](../../LICENSE) file for details.

View File

@@ -0,0 +1,94 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Default Access Key
/// Default value: rustfsadmin
/// Environment variable: RUSTFS_ACCESS_KEY
/// Command line argument: --access-key
/// Example: RUSTFS_ACCESS_KEY=rustfsadmin
/// Example: --access-key rustfsadmin
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
/// Default Secret Key
/// Default value: rustfsadmin
/// Environment variable: RUSTFS_SECRET_KEY
/// Command line argument: --secret-key
/// Example: RUSTFS_SECRET_KEY=rustfsadmin
/// Example: --secret-key rustfsadmin
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
/// Environment variable for gRPC authentication token
/// Used to set the authentication token for gRPC communication
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
/// IAM Policy Types
/// Used to differentiate between embedded and inherited policies
/// Example: "embedded-policy" or "inherited-policy"
/// Default value: "embedded-policy"
pub const EMBEDDED_POLICY_TYPE: &str = "embedded-policy";
/// IAM Policy Types
/// Used to differentiate between embedded and inherited policies
/// Example: "embedded-policy" or "inherited-policy"
/// Default value: "inherited-policy"
pub const INHERITED_POLICY_TYPE: &str = "inherited-policy";
/// IAM Policy Claim Name for Service Account
/// Used to identify the service account policy claim in JWT tokens
/// Example: "sa-policy"
/// Default value: "sa-policy"
pub const IAM_POLICY_CLAIM_NAME_SA: &str = "sa-policy";
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_security_constants() {
// Test security related constants
assert_eq!(DEFAULT_ACCESS_KEY, "rustfsadmin");
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
assert_eq!(DEFAULT_SECRET_KEY, "rustfsadmin");
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
// In production environment, access key and secret key should be different
// These are default values, so being the same is acceptable, but should be warned in documentation
println!("Warning: Default access key and secret key are the same. Change them in production!");
}
#[test]
fn test_security_best_practices() {
// Test security best practices
// These are default values, should be changed in production environments
println!("Security Warning: Default credentials detected!");
println!("Access Key: {DEFAULT_ACCESS_KEY}");
println!("Secret Key: {DEFAULT_SECRET_KEY}");
println!("These should be changed in production environments!");
// Verify that key lengths meet minimum security requirements
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
// Check if default credentials contain common insecure patterns
let _insecure_patterns = ["admin", "password", "123456", "default"];
let _access_key_lower = DEFAULT_ACCESS_KEY.to_lowercase();
let _secret_key_lower = DEFAULT_SECRET_KEY.to_lowercase();
// Note: More security check logic can be added here
// For example, check if keys contain insecure patterns
}
}

View File

@@ -0,0 +1,386 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{DEFAULT_SECRET_KEY, ENV_GRPC_AUTH_TOKEN, IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
use rand::{Rng, RngCore};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
use std::env;
use std::io::Error;
use std::sync::OnceLock;
use time::OffsetDateTime;
/// Global active credentials
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
/// Global gRPC authentication token
static GLOBAL_GRPC_AUTH_TOKEN: OnceLock<String> = OnceLock::new();
/// Initialize the global action credentials
///
/// # Arguments
/// * `ak` - Optional access key
/// * `sk` - Optional secret key
///
/// # Returns
/// * `Result<(), Box<Credentials>>` - Ok if successful, Err with existing credentials if already initialized
///
/// # Panics
/// This function panics if automatic credential generation fails when `ak` or `sk`
/// are `None`, for example if the random number generator fails while calling
/// `gen_access_key` or `gen_secret_key`.
pub fn init_global_action_credentials(ak: Option<String>, sk: Option<String>) -> Result<(), Box<Credentials>> {
let ak = ak.unwrap_or_else(|| gen_access_key(20).expect("Failed to generate access key"));
let sk = sk.unwrap_or_else(|| gen_secret_key(32).expect("Failed to generate secret key"));
let cred = Credentials {
access_key: ak,
secret_key: sk,
..Default::default()
};
GLOBAL_ACTIVE_CRED.set(cred).map_err(|e| {
Box::new(Credentials {
access_key: e.access_key.clone(),
..Default::default()
})
})
}
/// Get the global action credentials
pub fn get_global_action_cred() -> Option<Credentials> {
GLOBAL_ACTIVE_CRED.get().cloned()
}
/// Get the global secret key
///
/// # Returns
/// * `Option<String>` - The global secret key, if set
///
pub fn get_global_secret_key_opt() -> Option<String> {
GLOBAL_ACTIVE_CRED.get().map(|cred| cred.secret_key.clone())
}
/// Get the global secret key
///
/// # Returns
/// * `String` - The global secret key, or empty string if not set
///
pub fn get_global_secret_key() -> String {
GLOBAL_ACTIVE_CRED
.get()
.map(|cred| cred.secret_key.clone())
.unwrap_or_default()
}
/// Get the global access key
///
/// # Returns
/// * `Option<String>` - The global access key, if set
///
pub fn get_global_access_key_opt() -> Option<String> {
GLOBAL_ACTIVE_CRED.get().map(|cred| cred.access_key.clone())
}
/// Get the global access key
///
/// # Returns
/// * `String` - The global access key, or empty string if not set
///
pub fn get_global_access_key() -> String {
GLOBAL_ACTIVE_CRED
.get()
.map(|cred| cred.access_key.clone())
.unwrap_or_default()
}
/// Generates a random access key of the specified length.
///
/// # Arguments
/// * `length` - The length of the access key to generate
///
/// # Returns
/// * `Result<String>` - A result containing the generated access key or an error if the length is too short
///
/// # Errors
/// This function will return an error if the specified length is less than 3.
///
/// Examples
/// ```no_run
/// use rustfs_credentials::gen_access_key;
///
/// let access_key = gen_access_key(16).unwrap();
/// println!("Generated access key: {}", access_key);
/// ```
///
pub fn gen_access_key(length: usize) -> std::io::Result<String> {
const ALPHA_NUMERIC_TABLE: [char; 36] = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
];
if length < 3 {
return Err(Error::other("access key length is too short"));
}
let mut result = String::with_capacity(length);
let mut rng = rand::rng();
for _ in 0..length {
result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]);
}
Ok(result)
}
/// Generates a random secret key of the specified length.
///
/// # Arguments
/// * `length` - The length of the secret key to generate
///
/// # Returns
/// * `Result<String>` - A result containing the generated secret key or an error if the length is too short
///
/// # Errors
/// This function will return an error if the specified length is less than 8.
///
/// # Examples
/// ```no_run
/// use rustfs_credentials::gen_secret_key;
///
/// let secret_key = gen_secret_key(32).unwrap();
/// println!("Generated secret key: {}", secret_key);
/// ```
///
pub fn gen_secret_key(length: usize) -> std::io::Result<String> {
use base64_simd::URL_SAFE_NO_PAD;
if length < 8 {
return Err(Error::other("secret key length is too short"));
}
let mut rng = rand::rng();
let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)];
rng.fill_bytes(&mut key);
let encoded = URL_SAFE_NO_PAD.encode_to_string(&key);
let key_str = encoded.replace("/", "+");
Ok(key_str)
}
/// Get the gRPC authentication token from environment variable
///
/// # Returns
/// * `String` - The gRPC authentication token
///
pub fn get_grpc_token() -> String {
GLOBAL_GRPC_AUTH_TOKEN
.get_or_init(|| {
env::var(ENV_GRPC_AUTH_TOKEN)
.unwrap_or_else(|_| get_global_secret_key_opt().unwrap_or_else(|| DEFAULT_SECRET_KEY.to_string()))
})
.clone()
}
/// Credentials structure
///
/// Fields:
/// - access_key: Access key string
/// - secret_key: Secret key string
/// - session_token: Session token string
/// - expiration: Optional expiration time as OffsetDateTime
/// - status: Status string (e.g., "active", "off")
/// - parent_user: Parent user string
/// - groups: Optional list of groups
/// - claims: Optional map of claims
/// - name: Optional name string
/// - description: Optional description string
///
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
pub struct Credentials {
pub access_key: String,
pub secret_key: String,
pub session_token: String,
pub expiration: Option<OffsetDateTime>,
pub status: String,
pub parent_user: String,
pub groups: Option<Vec<String>>,
pub claims: Option<HashMap<String, Value>>,
pub name: Option<String>,
pub description: Option<String>,
}
impl Credentials {
pub fn is_expired(&self) -> bool {
if self.expiration.is_none() {
return false;
}
self.expiration
.as_ref()
.map(|e| OffsetDateTime::now_utc() > *e)
.unwrap_or(false)
}
pub fn is_temp(&self) -> bool {
!self.session_token.is_empty() && !self.is_expired()
}
pub fn is_service_account(&self) -> bool {
self.claims
.as_ref()
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|_| !self.parent_user.is_empty()))
.unwrap_or_default()
}
pub fn is_implied_policy(&self) -> bool {
if self.is_service_account() {
return self
.claims
.as_ref()
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|v| v == INHERITED_POLICY_TYPE))
.unwrap_or_default();
}
false
}
pub fn is_valid(&self) -> bool {
if self.status == "off" {
return false;
}
self.access_key.len() >= 3 && self.secret_key.len() >= 8 && !self.is_expired()
}
pub fn is_owner(&self) -> bool {
false
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
use time::Duration;
#[test]
fn test_credentials_is_expired() {
let mut cred = Credentials::default();
assert!(!cred.is_expired());
cred.expiration = Some(OffsetDateTime::now_utc() + Duration::hours(1));
assert!(!cred.is_expired());
cred.expiration = Some(OffsetDateTime::now_utc() - Duration::hours(1));
assert!(cred.is_expired());
}
#[test]
fn test_credentials_is_temp() {
let mut cred = Credentials::default();
assert!(!cred.is_temp());
cred.session_token = "token".to_string();
assert!(cred.is_temp());
cred.expiration = Some(OffsetDateTime::now_utc() - Duration::hours(1));
assert!(!cred.is_temp());
}
#[test]
fn test_credentials_is_service_account() {
let mut cred = Credentials::default();
assert!(!cred.is_service_account());
let mut claims = HashMap::new();
claims.insert(IAM_POLICY_CLAIM_NAME_SA.to_string(), Value::String("policy".to_string()));
cred.claims = Some(claims);
cred.parent_user = "parent".to_string();
assert!(cred.is_service_account());
}
#[test]
fn test_credentials_is_implied_policy() {
let mut cred = Credentials::default();
assert!(!cred.is_implied_policy());
let mut claims = HashMap::new();
claims.insert(IAM_POLICY_CLAIM_NAME_SA.to_string(), Value::String(INHERITED_POLICY_TYPE.to_string()));
cred.claims = Some(claims);
cred.parent_user = "parent".to_string();
assert!(cred.is_implied_policy());
}
#[test]
fn test_credentials_is_valid() {
let mut cred = Credentials::default();
assert!(!cred.is_valid());
cred.access_key = "abc".to_string();
cred.secret_key = "12345678".to_string();
assert!(cred.is_valid());
cred.status = "off".to_string();
assert!(!cred.is_valid());
}
#[test]
fn test_credentials_is_owner() {
let cred = Credentials::default();
assert!(!cred.is_owner());
}
#[test]
fn test_global_credentials_flow() {
// Since OnceLock can only be set once, we put together all globally related tests
// If it has already been initialized (possibly from other tests), we verify the results directly
if get_global_action_cred().is_none() {
// Verify that the initial state is empty
assert!(get_global_access_key_opt().is_none());
assert_eq!(get_global_access_key(), "");
assert!(get_global_secret_key_opt().is_none());
assert_eq!(get_global_secret_key(), "");
// Initialize
let test_ak = "test_access_key".to_string();
let test_sk = "test_secret_key_123456".to_string();
init_global_action_credentials(Some(test_ak.clone()), Some(test_sk.clone())).ok();
}
// Verify the state after initialization
let cred = get_global_action_cred().expect("Global credentials should be set");
assert!(!cred.access_key.is_empty());
assert!(!cred.secret_key.is_empty());
assert!(get_global_access_key_opt().is_some());
assert!(!get_global_access_key().is_empty());
assert!(get_global_secret_key_opt().is_some());
assert!(!get_global_secret_key().is_empty());
}
#[test]
fn test_init_global_credentials_auto_gen() {
// If it hasn't already been initialized, the test automatically generates logic
if get_global_action_cred().is_none() {
init_global_action_credentials(None, None).ok();
let ak = get_global_access_key();
let sk = get_global_secret_key();
assert_eq!(ak.len(), 20);
assert_eq!(sk.len(), 32);
}
}
}

View File

@@ -0,0 +1,19 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod constants;
mod credentials;
pub use constants::*;
pub use credentials::*;

View File

@@ -30,7 +30,7 @@ workspace = true
[dependencies]
aes-gcm = { workspace = true, optional = true }
argon2 = { workspace = true, features = ["std"], optional = true }
argon2 = { workspace = true, optional = true }
cfg-if = { workspace = true }
chacha20poly1305 = { workspace = true, optional = true }
jsonwebtoken = { workspace = true }

View File

@@ -327,7 +327,8 @@ pub async fn execute_awscurl(
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!("awscurl failed: {stderr}").into());
let stdout = String::from_utf8_lossy(&output.stdout);
return Err(format!("awscurl failed: stderr='{stderr}', stdout='{stdout}'").into());
}
let response = String::from_utf8_lossy(&output.stdout).to_string();
@@ -352,3 +353,13 @@ pub async fn awscurl_get(
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
execute_awscurl(url, "GET", None, access_key, secret_key).await
}
/// Helper function for PUT requests
pub async fn awscurl_put(
url: &str,
body: &str,
access_key: &str,
secret_key: &str,
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
execute_awscurl(url, "PUT", Some(body), access_key, secret_key).await
}

View File

@@ -18,6 +18,9 @@ mod reliant;
#[cfg(test)]
pub mod common;
#[cfg(test)]
mod version_id_regression_test;
// Data usage regression tests
#[cfg(test)]
mod data_usage_test;
@@ -33,3 +36,7 @@ mod special_chars_test;
// Content-Encoding header preservation test
#[cfg(test)]
mod content_encoding_test;
// Policy variables tests
#[cfg(test)]
mod policy;

View File

@@ -0,0 +1,39 @@
# RustFS Policy Variables Tests
This directory contains comprehensive end-to-end tests for AWS IAM policy variables in RustFS.
## Test Overview
The tests cover the following AWS policy variable scenarios:
1. **Single-value variables** - Basic variable resolution like `${aws:username}`
2. **Multi-value variables** - Variables that can have multiple values
3. **Variable concatenation** - Combining variables with static text like `prefix-${aws:username}-suffix`
4. **Nested variables** - Complex nested variable patterns like `${${aws:username}-test}`
5. **Deny scenarios** - Testing deny policies with variables
## Prerequisites
- RustFS server binary
- `awscurl` utility for admin API calls
- AWS SDK for Rust (included in the project)
## Running Tests
### Run All Policy Tests Using Unified Test Runner
```bash
# Run all policy tests with comprehensive reporting
# Note: Requires a RustFS server running on localhost:9000
cargo test -p e2e_test policy::test_runner::test_policy_full_suite -- --nocapture --ignored --test-threads=1
# Run only critical policy tests
cargo test -p e2e_test policy::test_runner::test_policy_critical_suite -- --nocapture --ignored --test-threads=1
```
### Run All Policy Tests
```bash
# From the project root directory
cargo test -p e2e_test policy:: -- --nocapture --ignored --test-threads=1
```

View File

@@ -0,0 +1,22 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Policy-specific tests for RustFS
//!
//! This module provides comprehensive tests for AWS IAM policy variables
//! including single-value, multi-value, and nested variable scenarios.
mod policy_variables_test;
mod test_env;
mod test_runner;

View File

@@ -0,0 +1,798 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Tests for AWS IAM policy variables with single-value, multi-value, and nested scenarios
use crate::common::{awscurl_put, init_logging};
use crate::policy::test_env::PolicyTestEnvironment;
use aws_sdk_s3::primitives::ByteStream;
use serial_test::serial;
use tracing::info;
/// Helper function to create a regular user with given credentials
async fn create_user(
env: &PolicyTestEnvironment,
username: &str,
password: &str,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let create_user_body = serde_json::json!({
"secretKey": password,
"status": "enabled"
})
.to_string();
let create_user_url = format!("{}/rustfs/admin/v3/add-user?accessKey={}", env.url, username);
awscurl_put(&create_user_url, &create_user_body, &env.access_key, &env.secret_key).await?;
Ok(())
}
/// Helper function to create an STS user with given credentials
async fn create_sts_user(
env: &PolicyTestEnvironment,
username: &str,
password: &str,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// For STS, we create a regular user first, then use it to assume roles
create_user(env, username, password).await?;
Ok(())
}
/// Helper function to create and attach a policy
async fn create_and_attach_policy(
env: &PolicyTestEnvironment,
policy_name: &str,
username: &str,
policy_document: serde_json::Value,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let policy_string = policy_document.to_string();
// Create policy
let add_policy_url = format!("{}/rustfs/admin/v3/add-canned-policy?name={}", env.url, policy_name);
awscurl_put(&add_policy_url, &policy_string, &env.access_key, &env.secret_key).await?;
// Attach policy to user
let attach_policy_url = format!(
"{}/rustfs/admin/v3/set-user-or-group-policy?policyName={}&userOrGroup={}&isGroup=false",
env.url, policy_name, username
);
awscurl_put(&attach_policy_url, "", &env.access_key, &env.secret_key).await?;
Ok(())
}
/// Helper function to clean up test resources
async fn cleanup_user_and_policy(env: &PolicyTestEnvironment, username: &str, policy_name: &str) {
// Create admin client for cleanup
let admin_client = env.create_s3_client(&env.access_key, &env.secret_key);
// Delete buckets that might have been created by this user
let bucket_patterns = [
format!("{username}-test-bucket"),
format!("{username}-bucket1"),
format!("{username}-bucket2"),
format!("{username}-bucket3"),
format!("prefix-{username}-suffix"),
format!("{username}-test"),
format!("{username}-sts-bucket"),
format!("{username}-service-bucket"),
"private-test-bucket".to_string(), // For deny test
];
// Try to delete objects and buckets
for bucket_name in &bucket_patterns {
let _ = admin_client
.delete_object()
.bucket(bucket_name)
.key("test-object.txt")
.send()
.await;
let _ = admin_client
.delete_object()
.bucket(bucket_name)
.key("test-sts-object.txt")
.send()
.await;
let _ = admin_client
.delete_object()
.bucket(bucket_name)
.key("test-service-object.txt")
.send()
.await;
let _ = admin_client.delete_bucket().bucket(bucket_name).send().await;
}
// Remove user
let remove_user_url = format!("{}/rustfs/admin/v3/remove-user?accessKey={}", env.url, username);
let _ = awscurl_put(&remove_user_url, "", &env.access_key, &env.secret_key).await;
// Remove policy
let remove_policy_url = format!("{}/rustfs/admin/v3/remove-canned-policy?name={}", env.url, policy_name);
let _ = awscurl_put(&remove_policy_url, "", &env.access_key, &env.secret_key).await;
}
/// Test AWS policy variables with single-value scenarios
#[tokio::test(flavor = "multi_thread")]
#[serial]
#[ignore = "Starts a rustfs server; enable when running full E2E"]
pub async fn test_aws_policy_variables_single_value() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
test_aws_policy_variables_single_value_impl().await
}
/// Implementation function for single-value policy variables test
pub async fn test_aws_policy_variables_single_value_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
info!("Starting AWS policy variables single-value test");
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
test_aws_policy_variables_single_value_impl_with_env(&env).await
}
/// Implementation function for single-value policy variables test with shared environment
pub async fn test_aws_policy_variables_single_value_impl_with_env(
env: &PolicyTestEnvironment,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Create test user
let test_user = "testuser1";
let test_password = "testpassword123";
let policy_name = "test-single-value-policy";
// Create cleanup function
let cleanup = || async {
cleanup_user_and_policy(env, test_user, policy_name).await;
};
let create_user_body = serde_json::json!({
"secretKey": test_password,
"status": "enabled"
})
.to_string();
let create_user_url = format!("{}/rustfs/admin/v3/add-user?accessKey={}", env.url, test_user);
awscurl_put(&create_user_url, &create_user_body, &env.access_key, &env.secret_key).await?;
// Create policy with single-value AWS variables
let policy_document = serde_json::json!({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["s3:ListAllMyBuckets"],
"Resource": ["arn:aws:s3:::*"]
},
{
"Effect": "Allow",
"Action": ["s3:CreateBucket"],
"Resource": [format!("arn:aws:s3:::{}-*", "${aws:username}")]
},
{
"Effect": "Allow",
"Action": ["s3:ListBucket"],
"Resource": [format!("arn:aws:s3:::{}-*", "${aws:username}")]
},
{
"Effect": "Allow",
"Action": ["s3:PutObject", "s3:GetObject"],
"Resource": [format!("arn:aws:s3:::{}-*/*", "${aws:username}")]
}
]
})
.to_string();
let add_policy_url = format!("{}/rustfs/admin/v3/add-canned-policy?name={}", env.url, policy_name);
awscurl_put(&add_policy_url, &policy_document, &env.access_key, &env.secret_key).await?;
// Attach policy to user
let attach_policy_url = format!(
"{}/rustfs/admin/v3/set-user-or-group-policy?policyName={}&userOrGroup={}&isGroup=false",
env.url, policy_name, test_user
);
awscurl_put(&attach_policy_url, "", &env.access_key, &env.secret_key).await?;
// Create S3 client for test user
let test_client = env.create_s3_client(test_user, test_password);
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
// Test 1: User should be able to list buckets (allowed by policy)
info!("Test 1: User listing buckets");
let list_result = test_client.list_buckets().send().await;
if let Err(e) = list_result {
cleanup().await;
return Err(format!("User should be able to list buckets: {e}").into());
}
// Test 2: User should be able to create bucket matching username pattern
info!("Test 2: User creating bucket matching pattern");
let bucket_name = format!("{test_user}-test-bucket");
let create_result = test_client.create_bucket().bucket(&bucket_name).send().await;
if let Err(e) = create_result {
cleanup().await;
return Err(format!("User should be able to create bucket matching username pattern: {e}").into());
}
// Test 3: User should be able to list objects in their own bucket
info!("Test 3: User listing objects in their bucket");
let list_objects_result = test_client.list_objects_v2().bucket(&bucket_name).send().await;
if let Err(e) = list_objects_result {
cleanup().await;
return Err(format!("User should be able to list objects in their own bucket: {e}").into());
}
// Test 4: User should be able to put object in their own bucket
info!("Test 4: User putting object in their bucket");
let put_result = test_client
.put_object()
.bucket(&bucket_name)
.key("test-object.txt")
.body(ByteStream::from_static(b"Hello, Policy Variables!"))
.send()
.await;
if let Err(e) = put_result {
cleanup().await;
return Err(format!("User should be able to put object in their own bucket: {e}").into());
}
// Test 5: User should be able to get object from their own bucket
info!("Test 5: User getting object from their bucket");
let get_result = test_client
.get_object()
.bucket(&bucket_name)
.key("test-object.txt")
.send()
.await;
if let Err(e) = get_result {
cleanup().await;
return Err(format!("User should be able to get object from their own bucket: {e}").into());
}
// Test 6: User should NOT be able to create bucket NOT matching username pattern
info!("Test 6: User attempting to create bucket NOT matching pattern");
let other_bucket_name = "other-user-bucket";
let create_other_result = test_client.create_bucket().bucket(other_bucket_name).send().await;
if create_other_result.is_ok() {
cleanup().await;
return Err("User should NOT be able to create bucket NOT matching username pattern".into());
}
// Cleanup
info!("Cleaning up test resources");
cleanup().await;
info!("AWS policy variables single-value test completed successfully");
Ok(())
}
/// Test AWS policy variables with multi-value scenarios
#[tokio::test(flavor = "multi_thread")]
#[serial]
#[ignore = "Starts a rustfs server; enable when running full E2E"]
pub async fn test_aws_policy_variables_multi_value() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
test_aws_policy_variables_multi_value_impl().await
}
/// Implementation function for multi-value policy variables test
pub async fn test_aws_policy_variables_multi_value_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
info!("Starting AWS policy variables multi-value test");
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
test_aws_policy_variables_multi_value_impl_with_env(&env).await
}
/// Implementation function for multi-value policy variables test with shared environment
pub async fn test_aws_policy_variables_multi_value_impl_with_env(
env: &PolicyTestEnvironment,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Create test user
let test_user = "testuser2";
let test_password = "testpassword123";
let policy_name = "test-multi-value-policy";
// Create cleanup function
let cleanup = || async {
cleanup_user_and_policy(env, test_user, policy_name).await;
};
// Create user
create_user(env, test_user, test_password).await?;
// Create policy with multi-value AWS variables
let policy_document = serde_json::json!({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["s3:ListAllMyBuckets"],
"Resource": ["arn:aws:s3:::*"]
},
{
"Effect": "Allow",
"Action": ["s3:CreateBucket"],
"Resource": [
format!("arn:aws:s3:::{}-bucket1", "${aws:username}"),
format!("arn:aws:s3:::{}-bucket2", "${aws:username}"),
format!("arn:aws:s3:::{}-bucket3", "${aws:username}")
]
},
{
"Effect": "Allow",
"Action": ["s3:ListBucket"],
"Resource": [
format!("arn:aws:s3:::{}-bucket1", "${aws:username}"),
format!("arn:aws:s3:::{}-bucket2", "${aws:username}"),
format!("arn:aws:s3:::{}-bucket3", "${aws:username}")
]
}
]
});
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
// Create S3 client for test user
let test_client = env.create_s3_client(test_user, test_password);
// Test 1: User should be able to create buckets matching any of the multi-value patterns
info!("Test 1: User creating first bucket matching multi-value pattern");
let bucket1_name = format!("{test_user}-bucket1");
let create_result1 = test_client.create_bucket().bucket(&bucket1_name).send().await;
if let Err(e) = create_result1 {
cleanup().await;
return Err(format!("User should be able to create first bucket matching multi-value pattern: {e}").into());
}
info!("Test 2: User creating second bucket matching multi-value pattern");
let bucket2_name = format!("{test_user}-bucket2");
let create_result2 = test_client.create_bucket().bucket(&bucket2_name).send().await;
if let Err(e) = create_result2 {
cleanup().await;
return Err(format!("User should be able to create second bucket matching multi-value pattern: {e}").into());
}
info!("Test 3: User creating third bucket matching multi-value pattern");
let bucket3_name = format!("{test_user}-bucket3");
let create_result3 = test_client.create_bucket().bucket(&bucket3_name).send().await;
if let Err(e) = create_result3 {
cleanup().await;
return Err(format!("User should be able to create third bucket matching multi-value pattern: {e}").into());
}
// Test 4: User should NOT be able to create bucket NOT matching any multi-value pattern
info!("Test 4: User attempting to create bucket NOT matching any pattern");
let other_bucket_name = format!("{test_user}-other-bucket");
let create_other_result = test_client.create_bucket().bucket(&other_bucket_name).send().await;
if create_other_result.is_ok() {
cleanup().await;
return Err("User should NOT be able to create bucket NOT matching any multi-value pattern".into());
}
// Test 5: User should be able to list objects in their allowed buckets
info!("Test 5: User listing objects in allowed buckets");
let list_objects_result1 = test_client.list_objects_v2().bucket(&bucket1_name).send().await;
if let Err(e) = list_objects_result1 {
cleanup().await;
return Err(format!("User should be able to list objects in first allowed bucket: {e}").into());
}
let list_objects_result2 = test_client.list_objects_v2().bucket(&bucket2_name).send().await;
if let Err(e) = list_objects_result2 {
cleanup().await;
return Err(format!("User should be able to list objects in second allowed bucket: {e}").into());
}
// Cleanup
info!("Cleaning up test resources");
cleanup().await;
info!("AWS policy variables multi-value test completed successfully");
Ok(())
}
/// Test AWS policy variables with variable concatenation
#[tokio::test(flavor = "multi_thread")]
#[serial]
#[ignore = "Starts a rustfs server; enable when running full E2E"]
pub async fn test_aws_policy_variables_concatenation() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
test_aws_policy_variables_concatenation_impl().await
}
/// Implementation function for concatenation policy variables test
pub async fn test_aws_policy_variables_concatenation_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
info!("Starting AWS policy variables concatenation test");
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
test_aws_policy_variables_concatenation_impl_with_env(&env).await
}
/// Implementation function for concatenation policy variables test with shared environment
pub async fn test_aws_policy_variables_concatenation_impl_with_env(
env: &PolicyTestEnvironment,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Create test user
let test_user = "testuser3";
let test_password = "testpassword123";
let policy_name = "test-concatenation-policy";
// Create cleanup function
let cleanup = || async {
cleanup_user_and_policy(env, test_user, policy_name).await;
};
// Create user
create_user(env, test_user, test_password).await?;
// Create policy with variable concatenation
let policy_document = serde_json::json!({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["s3:ListAllMyBuckets"],
"Resource": ["arn:aws:s3:::*"]
},
{
"Effect": "Allow",
"Action": ["s3:CreateBucket"],
"Resource": [format!("arn:aws:s3:::prefix-{}-suffix", "${aws:username}")]
},
{
"Effect": "Allow",
"Action": ["s3:ListBucket"],
"Resource": [format!("arn:aws:s3:::prefix-{}-suffix", "${aws:username}")]
}
]
});
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
// Create S3 client for test user
let test_client = env.create_s3_client(test_user, test_password);
// Add a small delay to allow policy to propagate
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
// Test: User should be able to create bucket matching concatenated pattern
info!("Test: User creating bucket matching concatenated pattern");
let bucket_name = format!("prefix-{test_user}-suffix");
let create_result = test_client.create_bucket().bucket(&bucket_name).send().await;
if let Err(e) = create_result {
cleanup().await;
return Err(format!("User should be able to create bucket matching concatenated pattern: {e}").into());
}
// Test: User should be able to list objects in the concatenated pattern bucket
info!("Test: User listing objects in concatenated pattern bucket");
let list_objects_result = test_client.list_objects_v2().bucket(&bucket_name).send().await;
if let Err(e) = list_objects_result {
cleanup().await;
return Err(format!("User should be able to list objects in concatenated pattern bucket: {e}").into());
}
// Cleanup
info!("Cleaning up test resources");
cleanup().await;
info!("AWS policy variables concatenation test completed successfully");
Ok(())
}
/// Test AWS policy variables with nested scenarios
#[tokio::test(flavor = "multi_thread")]
#[serial]
#[ignore = "Starts a rustfs server; enable when running full E2E"]
pub async fn test_aws_policy_variables_nested() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
test_aws_policy_variables_nested_impl().await
}
/// Implementation function for nested policy variables test
pub async fn test_aws_policy_variables_nested_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
info!("Starting AWS policy variables nested test");
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
test_aws_policy_variables_nested_impl_with_env(&env).await
}
/// Test AWS policy variables with STS temporary credentials
#[tokio::test(flavor = "multi_thread")]
#[serial]
#[ignore = "Starts a rustfs server; enable when running full E2E"]
pub async fn test_aws_policy_variables_sts() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
test_aws_policy_variables_sts_impl().await
}
/// Implementation function for STS policy variables test
pub async fn test_aws_policy_variables_sts_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
info!("Starting AWS policy variables STS test");
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
test_aws_policy_variables_sts_impl_with_env(&env).await
}
/// Implementation function for nested policy variables test with shared environment
pub async fn test_aws_policy_variables_nested_impl_with_env(
env: &PolicyTestEnvironment,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Create test user
let test_user = "testuser4";
let test_password = "testpassword123";
let policy_name = "test-nested-policy";
// Create cleanup function
let cleanup = || async {
cleanup_user_and_policy(env, test_user, policy_name).await;
};
// Create user
create_user(env, test_user, test_password).await?;
// Create policy with nested variables - this tests complex variable resolution
let policy_document = serde_json::json!({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["s3:ListAllMyBuckets"],
"Resource": ["arn:aws:s3:::*"]
},
{
"Effect": "Allow",
"Action": ["s3:CreateBucket"],
"Resource": ["arn:aws:s3:::${${aws:username}-test}"]
},
{
"Effect": "Allow",
"Action": ["s3:ListBucket"],
"Resource": ["arn:aws:s3:::${${aws:username}-test}"]
}
]
});
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
// Create S3 client for test user
let test_client = env.create_s3_client(test_user, test_password);
// Add a small delay to allow policy to propagate
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
// Test nested variable resolution
info!("Test: Nested variable resolution");
// Create bucket with expected resolved name
let expected_bucket = format!("{test_user}-test");
// Attempt to create bucket with resolved name
let create_result = test_client.create_bucket().bucket(&expected_bucket).send().await;
// Verify bucket creation succeeds (nested variable resolved correctly)
if let Err(e) = create_result {
cleanup().await;
return Err(format!("User should be able to create bucket with nested variable: {e}").into());
}
// Verify bucket creation fails with unresolved variable
let unresolved_bucket = format!("${{}}-test {test_user}");
let create_unresolved = test_client.create_bucket().bucket(&unresolved_bucket).send().await;
if create_unresolved.is_ok() {
cleanup().await;
return Err("User should NOT be able to create bucket with unresolved variable".into());
}
// Cleanup
info!("Cleaning up test resources");
cleanup().await;
info!("AWS policy variables nested test completed successfully");
Ok(())
}
/// Implementation function for STS policy variables test with shared environment
pub async fn test_aws_policy_variables_sts_impl_with_env(
env: &PolicyTestEnvironment,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Create test user for STS
let test_user = "testuser-sts";
let test_password = "testpassword123";
let policy_name = "test-sts-policy";
// Create cleanup function
let cleanup = || async {
cleanup_user_and_policy(env, test_user, policy_name).await;
};
// Create STS user
create_sts_user(env, test_user, test_password).await?;
// Create policy with STS-compatible variables
let policy_document = serde_json::json!({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["s3:ListAllMyBuckets"],
"Resource": ["arn:aws:s3:::*"]
},
{
"Effect": "Allow",
"Action": ["s3:CreateBucket"],
"Resource": [format!("arn:aws:s3:::{}-sts-bucket", "${aws:username}")]
},
{
"Effect": "Allow",
"Action": ["s3:ListBucket", "s3:PutObject", "s3:GetObject"],
"Resource": [format!("arn:aws:s3:::{}-sts-bucket/*", "${aws:username}")]
}
]
});
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
// Create S3 client for test user
let test_client = env.create_s3_client(test_user, test_password);
// Add a small delay to allow policy to propagate
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
// Test: User should be able to create bucket matching STS pattern
info!("Test: User creating bucket matching STS pattern");
let bucket_name = format!("{test_user}-sts-bucket");
let create_result = test_client.create_bucket().bucket(&bucket_name).send().await;
if let Err(e) = create_result {
cleanup().await;
return Err(format!("User should be able to create STS bucket: {e}").into());
}
// Test: User should be able to put object in STS bucket
info!("Test: User putting object in STS bucket");
let put_result = test_client
.put_object()
.bucket(&bucket_name)
.key("test-sts-object.txt")
.body(ByteStream::from_static(b"STS Test Object"))
.send()
.await;
if let Err(e) = put_result {
cleanup().await;
return Err(format!("User should be able to put object in STS bucket: {e}").into());
}
// Test: User should be able to get object from STS bucket
info!("Test: User getting object from STS bucket");
let get_result = test_client
.get_object()
.bucket(&bucket_name)
.key("test-sts-object.txt")
.send()
.await;
if let Err(e) = get_result {
cleanup().await;
return Err(format!("User should be able to get object from STS bucket: {e}").into());
}
// Test: User should be able to list objects in STS bucket
info!("Test: User listing objects in STS bucket");
let list_result = test_client.list_objects_v2().bucket(&bucket_name).send().await;
if let Err(e) = list_result {
cleanup().await;
return Err(format!("User should be able to list objects in STS bucket: {e}").into());
}
// Cleanup
info!("Cleaning up test resources");
cleanup().await;
info!("AWS policy variables STS test completed successfully");
Ok(())
}
/// Test AWS policy variables with deny scenarios
#[tokio::test(flavor = "multi_thread")]
#[serial]
#[ignore = "Starts a rustfs server; enable when running full E2E"]
pub async fn test_aws_policy_variables_deny() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
test_aws_policy_variables_deny_impl().await
}
/// Implementation function for deny policy variables test
pub async fn test_aws_policy_variables_deny_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
info!("Starting AWS policy variables deny test");
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
test_aws_policy_variables_deny_impl_with_env(&env).await
}
/// Implementation function for deny policy variables test with shared environment
pub async fn test_aws_policy_variables_deny_impl_with_env(
env: &PolicyTestEnvironment,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Create test user
let test_user = "testuser5";
let test_password = "testpassword123";
let policy_name = "test-deny-policy";
// Create cleanup function
let cleanup = || async {
cleanup_user_and_policy(env, test_user, policy_name).await;
};
// Create user
create_user(env, test_user, test_password).await?;
// Create policy with both allow and deny statements
let policy_document = serde_json::json!({
"Version": "2012-10-17",
"Statement": [
// Allow general access
{
"Effect": "Allow",
"Action": ["s3:ListAllMyBuckets"],
"Resource": ["arn:aws:s3:::*"]
},
// Allow creating buckets matching username pattern
{
"Effect": "Allow",
"Action": ["s3:CreateBucket"],
"Resource": [format!("arn:aws:s3:::{}-*", "${aws:username}")]
},
// Deny creating buckets with "private" in the name
{
"Effect": "Deny",
"Action": ["s3:CreateBucket"],
"Resource": ["arn:aws:s3:::*private*"]
}
]
});
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
// Create S3 client for test user
let test_client = env.create_s3_client(test_user, test_password);
// Add a small delay to allow policy to propagate
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
// Test 1: User should be able to create bucket matching username pattern
info!("Test 1: User creating bucket matching username pattern");
let bucket_name = format!("{test_user}-test-bucket");
let create_result = test_client.create_bucket().bucket(&bucket_name).send().await;
if let Err(e) = create_result {
cleanup().await;
return Err(format!("User should be able to create bucket matching username pattern: {e}").into());
}
// Test 2: User should NOT be able to create bucket with "private" in the name (deny rule)
info!("Test 2: User attempting to create bucket with 'private' in name (should be denied)");
let private_bucket_name = "private-test-bucket";
let create_private_result = test_client.create_bucket().bucket(private_bucket_name).send().await;
if create_private_result.is_ok() {
cleanup().await;
return Err("User should NOT be able to create bucket with 'private' in name due to deny rule".into());
}
// Cleanup
info!("Cleaning up test resources");
cleanup().await;
info!("AWS policy variables deny test completed successfully");
Ok(())
}

View File

@@ -0,0 +1,100 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Custom test environment for policy variables tests
//!
//! This module provides a custom test environment that doesn't automatically
//! stop servers when destroyed, addressing the server stopping issue.
use aws_sdk_s3::Client;
use aws_sdk_s3::config::{Config, Credentials, Region};
use std::net::TcpStream;
use std::time::Duration;
use tokio::time::sleep;
use tracing::{info, warn};
// Default credentials
const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
/// Custom test environment that doesn't automatically stop servers
pub struct PolicyTestEnvironment {
pub temp_dir: String,
pub address: String,
pub url: String,
pub access_key: String,
pub secret_key: String,
}
impl PolicyTestEnvironment {
/// Create a new test environment with specific address
/// This environment won't stop any server when dropped
pub async fn with_address(address: &str) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
let temp_dir = format!("/tmp/rustfs_policy_test_{}", uuid::Uuid::new_v4());
tokio::fs::create_dir_all(&temp_dir).await?;
let url = format!("http://{address}");
Ok(Self {
temp_dir,
address: address.to_string(),
url,
access_key: DEFAULT_ACCESS_KEY.to_string(),
secret_key: DEFAULT_SECRET_KEY.to_string(),
})
}
/// Create an AWS S3 client configured for this RustFS instance
pub fn create_s3_client(&self, access_key: &str, secret_key: &str) -> Client {
let credentials = Credentials::new(access_key, secret_key, None, None, "policy-test");
let config = Config::builder()
.credentials_provider(credentials)
.region(Region::new("us-east-1"))
.endpoint_url(&self.url)
.force_path_style(true)
.behavior_version_latest()
.build();
Client::from_conf(config)
}
/// Wait for RustFS server to be ready by checking TCP connectivity
pub async fn wait_for_server_ready(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
info!("Waiting for RustFS server to be ready on {}", self.address);
for i in 0..30 {
if TcpStream::connect(&self.address).is_ok() {
info!("✅ RustFS server is ready after {} attempts", i + 1);
return Ok(());
}
if i == 29 {
return Err("RustFS server failed to become ready within 30 seconds".into());
}
sleep(Duration::from_secs(1)).await;
}
Ok(())
}
}
// Implement Drop trait that doesn't stop servers
impl Drop for PolicyTestEnvironment {
fn drop(&mut self) {
// Clean up temp directory only, don't stop any server
if let Err(e) = std::fs::remove_dir_all(&self.temp_dir) {
warn!("Failed to clean up temp directory {}: {}", self.temp_dir, e);
}
}
}

View File

@@ -0,0 +1,247 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::common::init_logging;
use crate::policy::test_env::PolicyTestEnvironment;
use serial_test::serial;
use std::time::Instant;
use tokio::time::{Duration, sleep};
use tracing::{error, info};
/// Core test categories
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum TestCategory {
SingleValue,
MultiValue,
Concatenation,
Nested,
DenyScenarios,
}
impl TestCategory {}
/// Test case definition
#[derive(Debug, Clone)]
pub struct TestDefinition {
pub name: String,
#[allow(dead_code)]
pub category: TestCategory,
pub is_critical: bool,
}
impl TestDefinition {
pub fn new(name: impl Into<String>, category: TestCategory, is_critical: bool) -> Self {
Self {
name: name.into(),
category,
is_critical,
}
}
}
/// Test result
#[derive(Debug, Clone)]
pub struct TestResult {
pub test_name: String,
pub success: bool,
pub error_message: Option<String>,
}
impl TestResult {
pub fn success(test_name: String) -> Self {
Self {
test_name,
success: true,
error_message: None,
}
}
pub fn failure(test_name: String, error: String) -> Self {
Self {
test_name,
success: false,
error_message: Some(error),
}
}
}
/// Test suite configuration
#[derive(Debug, Clone, Default)]
pub struct TestSuiteConfig {
pub include_critical_only: bool,
}
/// Policy test suite
pub struct PolicyTestSuite {
tests: Vec<TestDefinition>,
config: TestSuiteConfig,
}
impl PolicyTestSuite {
/// Create default test suite
pub fn new() -> Self {
let tests = vec![
TestDefinition::new("test_aws_policy_variables_single_value", TestCategory::SingleValue, true),
TestDefinition::new("test_aws_policy_variables_multi_value", TestCategory::MultiValue, true),
TestDefinition::new("test_aws_policy_variables_concatenation", TestCategory::Concatenation, true),
TestDefinition::new("test_aws_policy_variables_nested", TestCategory::Nested, true),
TestDefinition::new("test_aws_policy_variables_deny", TestCategory::DenyScenarios, true),
TestDefinition::new("test_aws_policy_variables_sts", TestCategory::SingleValue, true),
];
Self {
tests,
config: TestSuiteConfig::default(),
}
}
/// Configure test suite
pub fn with_config(mut self, config: TestSuiteConfig) -> Self {
self.config = config;
self
}
/// Run test suite
pub async fn run_test_suite(&self) -> Vec<TestResult> {
init_logging();
info!("Starting Policy Variables test suite");
let start_time = Instant::now();
let mut results = Vec::new();
// Create test environment
let env = match PolicyTestEnvironment::with_address("127.0.0.1:9000").await {
Ok(env) => env,
Err(e) => {
error!("Failed to create test environment: {}", e);
return vec![TestResult::failure("env_creation".into(), e.to_string())];
}
};
// Wait for server to be ready
if env.wait_for_server_ready().await.is_err() {
error!("Server is not ready");
return vec![TestResult::failure("server_check".into(), "Server not ready".into())];
}
// Filter tests
let tests_to_run: Vec<&TestDefinition> = self
.tests
.iter()
.filter(|test| !self.config.include_critical_only || test.is_critical)
.collect();
info!("Scheduled {} tests", tests_to_run.len());
// Run tests
for (i, test_def) in tests_to_run.iter().enumerate() {
info!("Running test {}/{}: {}", i + 1, tests_to_run.len(), test_def.name);
let test_start = Instant::now();
let result = self.run_single_test(test_def, &env).await;
let test_duration = test_start.elapsed();
match result {
Ok(_) => {
info!("Test passed: {} ({:.2}s)", test_def.name, test_duration.as_secs_f64());
results.push(TestResult::success(test_def.name.clone()));
}
Err(e) => {
error!("Test failed: {} ({:.2}s): {}", test_def.name, test_duration.as_secs_f64(), e);
results.push(TestResult::failure(test_def.name.clone(), e.to_string()));
}
}
// Delay between tests to avoid resource conflicts
if i < tests_to_run.len() - 1 {
sleep(Duration::from_secs(2)).await;
}
}
// Print summary
self.print_summary(&results, start_time.elapsed());
results
}
/// Run a single test
async fn run_single_test(
&self,
test_def: &TestDefinition,
env: &PolicyTestEnvironment,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match test_def.name.as_str() {
"test_aws_policy_variables_single_value" => {
super::policy_variables_test::test_aws_policy_variables_single_value_impl_with_env(env).await
}
"test_aws_policy_variables_multi_value" => {
super::policy_variables_test::test_aws_policy_variables_multi_value_impl_with_env(env).await
}
"test_aws_policy_variables_concatenation" => {
super::policy_variables_test::test_aws_policy_variables_concatenation_impl_with_env(env).await
}
"test_aws_policy_variables_nested" => {
super::policy_variables_test::test_aws_policy_variables_nested_impl_with_env(env).await
}
"test_aws_policy_variables_deny" => {
super::policy_variables_test::test_aws_policy_variables_deny_impl_with_env(env).await
}
"test_aws_policy_variables_sts" => {
super::policy_variables_test::test_aws_policy_variables_sts_impl_with_env(env).await
}
_ => Err(format!("Test {} not implemented", test_def.name).into()),
}
}
/// Print test summary
fn print_summary(&self, results: &[TestResult], total_duration: Duration) {
info!("=== Test Suite Summary ===");
info!("Total duration: {:.2}s", total_duration.as_secs_f64());
info!("Total tests: {}", results.len());
let passed = results.iter().filter(|r| r.success).count();
let failed = results.len() - passed;
let success_rate = (passed as f64 / results.len() as f64) * 100.0;
info!("Passed: {} | Failed: {}", passed, failed);
info!("Success rate: {:.1}%", success_rate);
if failed > 0 {
error!("Failed tests:");
for result in results.iter().filter(|r| !r.success) {
error!(" - {}: {}", result.test_name, result.error_message.as_ref().unwrap());
}
}
}
}
/// Test suite
#[tokio::test]
#[serial]
#[ignore = "Connects to existing rustfs server"]
async fn test_policy_critical_suite() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let config = TestSuiteConfig {
include_critical_only: true,
};
let suite = PolicyTestSuite::new().with_config(config);
let results = suite.run_test_suite().await;
let failed = results.iter().filter(|r| !r.success).count();
if failed > 0 {
return Err(format!("Critical tests failed: {failed} failures").into());
}
info!("All critical tests passed");
Ok(())
}

View File

@@ -127,12 +127,12 @@ async fn test_get_deleted_object_returns_nosuchkey() -> Result<(), Box<dyn std::
info!("Service error code: {:?}", s3_err.meta().code());
// The error should be NoSuchKey
assert!(s3_err.is_no_such_key(), "Error should be NoSuchKey, got: {:?}", s3_err);
assert!(s3_err.is_no_such_key(), "Error should be NoSuchKey, got: {s3_err:?}");
info!("✅ Test passed: GetObject on deleted object correctly returns NoSuchKey");
}
other_err => {
panic!("Expected ServiceError with NoSuchKey, but got: {:?}", other_err);
panic!("Expected ServiceError with NoSuchKey, but got: {other_err:?}");
}
}
@@ -182,13 +182,12 @@ async fn test_head_deleted_object_returns_nosuchkey() -> Result<(), Box<dyn std:
let s3_err = service_err.into_err();
assert!(
s3_err.meta().code() == Some("NoSuchKey") || s3_err.meta().code() == Some("NotFound"),
"Error should be NoSuchKey or NotFound, got: {:?}",
s3_err
"Error should be NoSuchKey or NotFound, got: {s3_err:?}"
);
info!("✅ HeadObject correctly returns NoSuchKey/NotFound");
}
other_err => {
panic!("Expected ServiceError but got: {:?}", other_err);
panic!("Expected ServiceError but got: {other_err:?}");
}
}
@@ -220,11 +219,11 @@ async fn test_get_nonexistent_object_returns_nosuchkey() -> Result<(), Box<dyn s
match get_result.unwrap_err() {
SdkError::ServiceError(service_err) => {
let s3_err = service_err.into_err();
assert!(s3_err.is_no_such_key(), "Error should be NoSuchKey, got: {:?}", s3_err);
assert!(s3_err.is_no_such_key(), "Error should be NoSuchKey, got: {s3_err:?}");
info!("✅ GetObject correctly returns NoSuchKey for non-existent object");
}
other_err => {
panic!("Expected ServiceError with NoSuchKey, but got: {:?}", other_err);
panic!("Expected ServiceError with NoSuchKey, but got: {other_err:?}");
}
}
@@ -266,15 +265,15 @@ async fn test_multiple_gets_deleted_object() -> Result<(), Box<dyn std::error::E
info!("Attempt {} to get deleted object", i);
let get_result = client.get_object().bucket(BUCKET).key(key).send().await;
assert!(get_result.is_err(), "Attempt {}: should return error", i);
assert!(get_result.is_err(), "Attempt {i}: should return error");
match get_result.unwrap_err() {
SdkError::ServiceError(service_err) => {
let s3_err = service_err.into_err();
assert!(s3_err.is_no_such_key(), "Attempt {}: Error should be NoSuchKey, got: {:?}", i, s3_err);
assert!(s3_err.is_no_such_key(), "Attempt {i}: Error should be NoSuchKey, got: {s3_err:?}");
}
other_err => {
panic!("Attempt {}: Expected ServiceError but got: {:?}", i, other_err);
panic!("Attempt {i}: Expected ServiceError but got: {other_err:?}");
}
}
}

View File

@@ -0,0 +1,138 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Test for HeadObject on deleted objects with versioning enabled
//!
//! This test reproduces the issue where getting a deleted object returns
//! 200 OK instead of 404 NoSuchKey when versioning is enabled.
#![cfg(test)]
use aws_config::meta::region::RegionProviderChain;
use aws_sdk_s3::Client;
use aws_sdk_s3::config::{Credentials, Region};
use aws_sdk_s3::error::SdkError;
use aws_sdk_s3::types::{BucketVersioningStatus, VersioningConfiguration};
use bytes::Bytes;
use serial_test::serial;
use std::error::Error;
use tracing::info;
const ENDPOINT: &str = "http://localhost:9000";
const ACCESS_KEY: &str = "rustfsadmin";
const SECRET_KEY: &str = "rustfsadmin";
const BUCKET: &str = "test-head-deleted-versioning-bucket";
async fn create_aws_s3_client() -> Result<Client, Box<dyn Error>> {
let region_provider = RegionProviderChain::default_provider().or_else(Region::new("us-east-1"));
let shared_config = aws_config::defaults(aws_config::BehaviorVersion::latest())
.region(region_provider)
.credentials_provider(Credentials::new(ACCESS_KEY, SECRET_KEY, None, None, "static"))
.endpoint_url(ENDPOINT)
.load()
.await;
let client = Client::from_conf(
aws_sdk_s3::Config::from(&shared_config)
.to_builder()
.force_path_style(true)
.build(),
);
Ok(client)
}
/// Setup test bucket, creating it if it doesn't exist, and enable versioning
async fn setup_test_bucket(client: &Client) -> Result<(), Box<dyn Error>> {
match client.create_bucket().bucket(BUCKET).send().await {
Ok(_) => {}
Err(SdkError::ServiceError(e)) => {
let e = e.into_err();
let error_code = e.meta().code().unwrap_or("");
if !error_code.eq("BucketAlreadyExists") && !error_code.eq("BucketAlreadyOwnedByYou") {
return Err(e.into());
}
}
Err(e) => {
return Err(e.into());
}
}
// Enable versioning
client
.put_bucket_versioning()
.bucket(BUCKET)
.versioning_configuration(
VersioningConfiguration::builder()
.status(BucketVersioningStatus::Enabled)
.build(),
)
.send()
.await?;
Ok(())
}
/// Test that HeadObject on a deleted object returns NoSuchKey when versioning is enabled
#[tokio::test]
#[serial]
#[ignore = "requires running RustFS server at localhost:9000"]
async fn test_head_deleted_object_versioning_returns_nosuchkey() -> Result<(), Box<dyn std::error::Error>> {
let _ = tracing_subscriber::fmt()
.with_max_level(tracing::Level::INFO)
.with_test_writer()
.try_init();
info!("🧪 Starting test_head_deleted_object_versioning_returns_nosuchkey");
let client = create_aws_s3_client().await?;
setup_test_bucket(&client).await?;
let key = "test-head-deleted-versioning.txt";
let content = b"Test content for HeadObject with versioning";
// Upload and verify
client
.put_object()
.bucket(BUCKET)
.key(key)
.body(Bytes::from_static(content).into())
.send()
.await?;
// Delete the object (creates a delete marker)
client.delete_object().bucket(BUCKET).key(key).send().await?;
// Try to head the deleted object (latest version is delete marker)
let head_result = client.head_object().bucket(BUCKET).key(key).send().await;
assert!(head_result.is_err(), "HeadObject on deleted object should return an error");
match head_result.unwrap_err() {
SdkError::ServiceError(service_err) => {
let s3_err = service_err.into_err();
assert!(
s3_err.meta().code() == Some("NoSuchKey")
|| s3_err.meta().code() == Some("NotFound")
|| s3_err.meta().code() == Some("404"),
"Error should be NoSuchKey or NotFound, got: {s3_err:?}"
);
info!("✅ HeadObject correctly returns NoSuchKey/NotFound");
}
other_err => {
panic!("Expected ServiceError but got: {other_err:?}");
}
}
Ok(())
}

View File

@@ -14,6 +14,7 @@
mod conditional_writes;
mod get_deleted_object_test;
mod head_deleted_object_versioning_test;
mod lifecycle;
mod lock;
mod node_interact_test;

View File

@@ -256,7 +256,7 @@ mod tests {
let output = result.unwrap();
let body_bytes = output.body.collect().await.unwrap().into_bytes();
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for key '{}'", key);
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for key '{key}'");
info!("✅ PUT/GET succeeded for key: {}", key);
}
@@ -472,7 +472,7 @@ mod tests {
info!("Testing COPY from '{}' to '{}'", src_key, dest_key);
// COPY object
let copy_source = format!("{}/{}", bucket, src_key);
let copy_source = format!("{bucket}/{src_key}");
let result = client
.copy_object()
.bucket(bucket)
@@ -543,7 +543,7 @@ mod tests {
let output = result.unwrap();
let body_bytes = output.body.collect().await.unwrap().into_bytes();
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for Unicode key '{}'", key);
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for Unicode key '{key}'");
info!("✅ PUT/GET succeeded for Unicode key: {}", key);
}
@@ -610,7 +610,7 @@ mod tests {
let output = result.unwrap();
let body_bytes = output.body.collect().await.unwrap().into_bytes();
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for key '{}'", key);
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for key '{key}'");
info!("✅ PUT/GET succeeded for key: {}", key);
}
@@ -658,7 +658,7 @@ mod tests {
// Note: The validation happens on the server side, so we expect an error
// For null byte, newline, and carriage return
if key.contains('\0') || key.contains('\n') || key.contains('\r') {
assert!(result.is_err(), "Control character should be rejected for key: {:?}", key);
assert!(result.is_err(), "Control character should be rejected for key: {key:?}");
if let Err(e) = result {
info!("✅ Control character correctly rejected: {:?}", e);
}

View File

@@ -0,0 +1,398 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Regression test for Issue #1066: Veeam VBR - S3 returned empty versionId
//!
//! This test verifies that:
//! 1. PutObject returns version_id when versioning is enabled
//! 2. CopyObject returns version_id when versioning is enabled
//! 3. CompleteMultipartUpload returns version_id when versioning is enabled
//! 4. Basic S3 operations still work correctly (no regression)
//! 5. Operations on non-versioned buckets work as expected
#[cfg(test)]
mod tests {
use crate::common::{RustFSTestEnvironment, init_logging};
use aws_sdk_s3::Client;
use aws_sdk_s3::primitives::ByteStream;
use aws_sdk_s3::types::{BucketVersioningStatus, CompletedMultipartUpload, CompletedPart, VersioningConfiguration};
use serial_test::serial;
use tracing::info;
fn create_s3_client(env: &RustFSTestEnvironment) -> Client {
env.create_s3_client()
}
async fn create_bucket(client: &Client, bucket: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match client.create_bucket().bucket(bucket).send().await {
Ok(_) => {
info!("✅ Bucket {} created successfully", bucket);
Ok(())
}
Err(e) => {
if e.to_string().contains("BucketAlreadyOwnedByYou") || e.to_string().contains("BucketAlreadyExists") {
info!(" Bucket {} already exists", bucket);
Ok(())
} else {
Err(Box::new(e))
}
}
}
}
async fn enable_versioning(client: &Client, bucket: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let versioning_config = VersioningConfiguration::builder()
.status(BucketVersioningStatus::Enabled)
.build();
client
.put_bucket_versioning()
.bucket(bucket)
.versioning_configuration(versioning_config)
.send()
.await?;
info!("✅ Versioning enabled for bucket {}", bucket);
Ok(())
}
/// Test 1: PutObject should return version_id when versioning is enabled
/// This directly addresses the Veeam issue from #1066
#[tokio::test]
#[serial]
async fn test_put_object_returns_version_id_with_versioning() {
init_logging();
info!("🧪 TEST: PutObject returns version_id with versioning enabled");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-put-version-id";
create_bucket(&client, bucket).await.expect("Failed to create bucket");
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
let key = "test-file.txt";
let content = b"Test content for version ID test";
info!("📤 Uploading object with key: {}", key);
let result = client
.put_object()
.bucket(bucket)
.key(key)
.body(ByteStream::from_static(content))
.send()
.await;
assert!(result.is_ok(), "PutObject failed: {:?}", result.err());
let output = result.unwrap();
info!("📥 PutObject response - version_id: {:?}", output.version_id);
assert!(
output.version_id.is_some(),
"❌ FAILED: version_id should be present when versioning is enabled"
);
assert!(
!output.version_id.as_ref().unwrap().is_empty(),
"❌ FAILED: version_id should not be empty"
);
info!("✅ PASSED: PutObject correctly returns version_id");
}
/// Test 2: CopyObject should return version_id when versioning is enabled
#[tokio::test]
#[serial]
async fn test_copy_object_returns_version_id_with_versioning() {
init_logging();
info!("🧪 TEST: CopyObject returns version_id with versioning enabled");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-copy-version-id";
create_bucket(&client, bucket).await.expect("Failed to create bucket");
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
let source_key = "source-file.txt";
let dest_key = "dest-file.txt";
let content = b"Content to copy";
// First, create source object
client
.put_object()
.bucket(bucket)
.key(source_key)
.body(ByteStream::from_static(content))
.send()
.await
.expect("Failed to create source object");
info!("📤 Copying object from {} to {}", source_key, dest_key);
let copy_result = client
.copy_object()
.bucket(bucket)
.key(dest_key)
.copy_source(format!("{}/{}", bucket, source_key))
.send()
.await;
assert!(copy_result.is_ok(), "CopyObject failed: {:?}", copy_result.err());
let output = copy_result.unwrap();
info!("📥 CopyObject response - version_id: {:?}", output.version_id);
assert!(
output.version_id.is_some(),
"❌ FAILED: version_id should be present when versioning is enabled"
);
assert!(
!output.version_id.as_ref().unwrap().is_empty(),
"❌ FAILED: version_id should not be empty"
);
info!("✅ PASSED: CopyObject correctly returns version_id");
}
/// Test 3: CompleteMultipartUpload should return version_id when versioning is enabled
#[tokio::test]
#[serial]
async fn test_multipart_upload_returns_version_id_with_versioning() {
init_logging();
info!("🧪 TEST: CompleteMultipartUpload returns version_id with versioning enabled");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-multipart-version-id";
create_bucket(&client, bucket).await.expect("Failed to create bucket");
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
let key = "multipart-file.txt";
let content = b"Part 1 content for multipart upload test";
info!("📤 Creating multipart upload for key: {}", key);
let create_result = client
.create_multipart_upload()
.bucket(bucket)
.key(key)
.send()
.await
.expect("Failed to create multipart upload");
let upload_id = create_result.upload_id().expect("No upload_id returned");
info!("📤 Uploading part 1");
let upload_part_result = client
.upload_part()
.bucket(bucket)
.key(key)
.upload_id(upload_id)
.part_number(1)
.body(ByteStream::from_static(content))
.send()
.await
.expect("Failed to upload part");
let etag = upload_part_result.e_tag().expect("No etag returned").to_string();
let completed_part = CompletedPart::builder().part_number(1).e_tag(etag).build();
let completed_upload = CompletedMultipartUpload::builder().parts(completed_part).build();
info!("📤 Completing multipart upload");
let complete_result = client
.complete_multipart_upload()
.bucket(bucket)
.key(key)
.upload_id(upload_id)
.multipart_upload(completed_upload)
.send()
.await;
assert!(complete_result.is_ok(), "CompleteMultipartUpload failed: {:?}", complete_result.err());
let output = complete_result.unwrap();
info!("📥 CompleteMultipartUpload response - version_id: {:?}", output.version_id);
assert!(
output.version_id.is_some(),
"❌ FAILED: version_id should be present when versioning is enabled"
);
assert!(
!output.version_id.as_ref().unwrap().is_empty(),
"❌ FAILED: version_id should not be empty"
);
info!("✅ PASSED: CompleteMultipartUpload correctly returns version_id");
}
/// Test 4: PutObject should NOT return version_id when versioning is NOT enabled
/// This ensures we didn't break non-versioned buckets
#[tokio::test]
#[serial]
async fn test_put_object_without_versioning() {
init_logging();
info!("🧪 TEST: PutObject behavior without versioning (no regression)");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-no-versioning";
create_bucket(&client, bucket).await.expect("Failed to create bucket");
// Note: NOT enabling versioning here
let key = "test-file.txt";
let content = b"Test content without versioning";
info!("📤 Uploading object to non-versioned bucket");
let result = client
.put_object()
.bucket(bucket)
.key(key)
.body(ByteStream::from_static(content))
.send()
.await;
assert!(result.is_ok(), "PutObject failed: {:?}", result.err());
let output = result.unwrap();
info!("📥 PutObject response - version_id: {:?}", output.version_id);
// version_id can be None or Some("null") for non-versioned buckets
info!("✅ PASSED: PutObject works correctly without versioning");
}
/// Test 5: Basic S3 operations still work correctly (no regression)
#[tokio::test]
#[serial]
async fn test_basic_s3_operations_no_regression() {
init_logging();
info!("🧪 TEST: Basic S3 operations work correctly (no regression)");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-basic-operations";
create_bucket(&client, bucket).await.expect("Failed to create bucket");
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
let key = "test-basic-file.txt";
let content = b"Basic operations test content";
// Test PUT
info!("📤 Testing PUT operation");
let put_result = client
.put_object()
.bucket(bucket)
.key(key)
.body(ByteStream::from_static(content))
.send()
.await;
assert!(put_result.is_ok(), "PUT operation failed");
let _version_id = put_result.unwrap().version_id;
// Test GET
info!("📥 Testing GET operation");
let get_result = client.get_object().bucket(bucket).key(key).send().await;
assert!(get_result.is_ok(), "GET operation failed");
let body = get_result.unwrap().body.collect().await.unwrap().to_vec();
assert_eq!(body, content, "Content mismatch after GET");
// Test HEAD
info!("📋 Testing HEAD operation");
let head_result = client.head_object().bucket(bucket).key(key).send().await;
assert!(head_result.is_ok(), "HEAD operation failed");
// Test LIST
info!("📝 Testing LIST operation");
let list_result = client.list_objects_v2().bucket(bucket).send().await;
assert!(list_result.is_ok(), "LIST operation failed");
let list_output = list_result.unwrap();
let objects = list_output.contents();
assert!(objects.iter().any(|obj| obj.key() == Some(key)), "Object not found in LIST");
// Test DELETE
info!("🗑️ Testing DELETE operation");
let delete_result = client.delete_object().bucket(bucket).key(key).send().await;
assert!(delete_result.is_ok(), "DELETE operation failed");
// Verify object is deleted (should return NoSuchKey or version marker)
let get_after_delete = client.get_object().bucket(bucket).key(key).send().await;
assert!(
get_after_delete.is_err() || get_after_delete.unwrap().delete_marker == Some(true),
"Object should be deleted or have delete marker"
);
info!("✅ PASSED: All basic S3 operations work correctly");
}
/// Test 6: Veeam-specific scenario simulation
/// Simulates the exact workflow that Veeam uses when backing up data
#[tokio::test]
#[serial]
async fn test_veeam_backup_workflow_simulation() {
init_logging();
info!("🧪 TEST: Veeam VBR backup workflow simulation (Issue #1066)");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "veeam-backup-test";
create_bucket(&client, bucket).await.expect("Failed to create bucket");
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
// Veeam typically creates multiple objects in a backup session
let test_paths = vec![
"Veeam/Backup/Clients/test-client-id/test-backup-id/CloudStg/Meta/Blocks/History/CheckpointHistory.dat",
"Veeam/Backup/Clients/test-client-id/test-backup-id/Metadata/Lock/create.checkpoint/declare",
];
for path in test_paths {
info!("📤 Simulating Veeam upload to: {}", path);
let content = format!("Veeam backup data for {}", path);
let put_result = client
.put_object()
.bucket(bucket)
.key(path)
.body(ByteStream::from(content.into_bytes()))
.send()
.await;
assert!(put_result.is_ok(), "Veeam upload failed for path: {}", path);
let output = put_result.unwrap();
info!("📥 Response version_id: {:?}", output.version_id);
assert!(output.version_id.is_some(), "❌ FAILED: Veeam expects version_id for path: {}", path);
assert!(
!output.version_id.as_ref().unwrap().is_empty(),
"❌ FAILED: version_id should not be empty for path: {}",
path
);
info!("✅ Veeam upload successful with version_id for: {}", path);
}
info!("✅ PASSED: Veeam backup workflow simulation completed successfully");
}
}

View File

@@ -34,12 +34,19 @@ workspace = true
default = []
[dependencies]
rustfs-filemeta.workspace = true
rustfs-utils = { workspace = true, features = ["full"] }
rustfs-rio.workspace = true
rustfs-signer.workspace = true
rustfs-checksums.workspace = true
rustfs-config = { workspace = true, features = ["constants", "notify", "audit"] }
rustfs-credentials = { workspace = true }
rustfs-common.workspace = true
rustfs-policy.workspace = true
rustfs-protos.workspace = true
async-trait.workspace = true
bytes.workspace = true
byteorder = { workspace = true }
rustfs-common.workspace = true
rustfs-policy.workspace = true
chrono.workspace = true
glob = { workspace = true }
thiserror.workspace = true
@@ -60,7 +67,6 @@ lazy_static.workspace = true
rustfs-lock.workspace = true
regex = { workspace = true }
path-absolutize = { workspace = true }
rustfs-protos.workspace = true
rmp.workspace = true
rmp-serde.workspace = true
tokio-util = { workspace = true, features = ["io", "compat"] }
@@ -91,11 +97,6 @@ aws-sdk-s3 = { workspace = true }
urlencoding = { workspace = true }
smallvec = { workspace = true }
shadow-rs.workspace = true
rustfs-filemeta.workspace = true
rustfs-utils = { workspace = true, features = ["full"] }
rustfs-rio.workspace = true
rustfs-signer.workspace = true
rustfs-checksums.workspace = true
async-recursion.workspace = true
aws-credential-types = { workspace = true }
aws-smithy-types = { workspace = true }
@@ -108,17 +109,12 @@ google-cloud-auth = { workspace = true }
aws-config = { workspace = true }
faster-hex = { workspace = true }
[target.'cfg(not(windows))'.dependencies]
nix = { workspace = true }
[target.'cfg(windows)'.dependencies]
winapi = { workspace = true }
[dev-dependencies]
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
criterion = { workspace = true, features = ["html_reports"] }
temp-env = { workspace = true }
tracing-subscriber = { workspace = true }
[build-dependencies]
shadow-rs = { workspace = true, features = ["build", "metadata"] }

View File

@@ -1,19 +0,0 @@
# ECStore - Erasure Coding Storage
ECStore provides erasure coding functionality for the RustFS project, using high-performance Reed-Solomon SIMD implementation for optimal performance.
## Features
- **Reed-Solomon Implementation**: High-performance SIMD-optimized erasure coding
- **Cross-Platform Compatibility**: Support for x86_64, aarch64, and other architectures
- **Performance Optimized**: SIMD instructions for maximum throughput
- **Thread Safety**: Safe concurrent access with caching optimizations
- **Scalable**: Excellent performance for high-throughput scenarios
## Documentation
For complete documentation, examples, and usage information, please visit the main [RustFS repository](https://github.com/rustfs/rustfs).
## License
This project is licensed under the Apache License, Version 2.0.

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Copyright 2024 RustFS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");

View File

@@ -23,7 +23,7 @@ use crate::{
};
use crate::data_usage::load_data_usage_cache;
use rustfs_common::{globals::GLOBAL_Local_Node_Name, heal_channel::DriveState};
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, heal_channel::DriveState};
use rustfs_madmin::{
BackendDisks, Disk, ErasureSetInfo, ITEM_INITIALIZING, ITEM_OFFLINE, ITEM_ONLINE, InfoMessage, ServerProperties,
};
@@ -128,7 +128,7 @@ async fn is_server_resolvable(endpoint: &Endpoint) -> Result<()> {
}
pub async fn get_local_server_property() -> ServerProperties {
let addr = GLOBAL_Local_Node_Name.read().await.clone();
let addr = GLOBAL_LOCAL_NODE_NAME.read().await.clone();
let mut pool_numbers = HashSet::new();
let mut network = HashMap::new();

View File

@@ -22,7 +22,7 @@ pub struct PolicySys {}
impl PolicySys {
pub async fn is_allowed(args: &BucketPolicyArgs<'_>) -> bool {
match Self::get(args.bucket).await {
Ok(cfg) => return cfg.is_allowed(args),
Ok(cfg) => return cfg.is_allowed(args).await,
Err(err) => {
if err != StorageError::ConfigNotFound {
info!("config get err {:?}", err);

View File

@@ -16,7 +16,7 @@ use crate::disk::error::DiskError;
use crate::disk::{self, DiskAPI, DiskStore, WalkDirOptions};
use futures::future::join_all;
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetacacheReader, is_io_eof};
use std::{future::Future, pin::Pin, sync::Arc};
use std::{future::Future, pin::Pin};
use tokio::spawn;
use tokio_util::sync::CancellationToken;
use tracing::{error, info, warn};
@@ -71,14 +71,14 @@ pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> d
let mut jobs: Vec<tokio::task::JoinHandle<std::result::Result<(), DiskError>>> = Vec::new();
let mut readers = Vec::with_capacity(opts.disks.len());
let fds = Arc::new(opts.fallback_disks.clone());
let fds = opts.fallback_disks.iter().flatten().cloned().collect::<Vec<_>>();
let cancel_rx = CancellationToken::new();
for disk in opts.disks.iter() {
let opdisk = disk.clone();
let opts_clone = opts.clone();
let fds_clone = fds.clone();
let mut fds_clone = fds.clone();
let cancel_rx_clone = cancel_rx.clone();
let (rd, mut wr) = tokio::io::duplex(64);
readers.push(MetacacheReader::new(rd));
@@ -113,21 +113,20 @@ pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> d
}
while need_fallback {
// warn!("list_path_raw: while need_fallback start");
let disk = match fds_clone.iter().find(|d| d.is_some()) {
Some(d) => {
if let Some(disk) = d.clone() {
disk
} else {
warn!("list_path_raw: fallback disk is none");
break;
}
}
None => {
warn!("list_path_raw: fallback disk is none2");
break;
let disk_op = {
if fds_clone.is_empty() {
None
} else {
let disk = fds_clone.remove(0);
if disk.is_online().await { Some(disk.clone()) } else { None }
}
};
let Some(disk) = disk_op else {
warn!("list_path_raw: fallback disk is none");
break;
};
match disk
.as_ref()
.walk_dir(

View File

@@ -1,350 +0,0 @@
#![allow(clippy::map_entry)]
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(unused_imports)]
#![allow(unused_variables)]
#![allow(unused_mut)]
#![allow(unused_assignments)]
#![allow(unused_must_use)]
#![allow(clippy::all)]
use lazy_static::lazy_static;
use rustfs_checksums::ChecksumAlgorithm;
use std::collections::HashMap;
use crate::client::{api_put_object::PutObjectOptions, api_s3_datatypes::ObjectPart};
use crate::{disk::DiskAPI, store_api::GetObjectReader};
use rustfs_utils::crypto::{base64_decode, base64_encode};
use s3s::header::{
X_AMZ_CHECKSUM_ALGORITHM, X_AMZ_CHECKSUM_CRC32, X_AMZ_CHECKSUM_CRC32C, X_AMZ_CHECKSUM_SHA1, X_AMZ_CHECKSUM_SHA256,
};
use enumset::{EnumSet, EnumSetType, enum_set};
#[derive(Debug, EnumSetType, Default)]
#[enumset(repr = "u8")]
pub enum ChecksumMode {
#[default]
ChecksumNone,
ChecksumSHA256,
ChecksumSHA1,
ChecksumCRC32,
ChecksumCRC32C,
ChecksumCRC64NVME,
ChecksumFullObject,
}
lazy_static! {
static ref C_ChecksumMask: EnumSet<ChecksumMode> = {
let mut s = EnumSet::all();
s.remove(ChecksumMode::ChecksumFullObject);
s
};
static ref C_ChecksumFullObjectCRC32: EnumSet<ChecksumMode> =
enum_set!(ChecksumMode::ChecksumCRC32 | ChecksumMode::ChecksumFullObject);
static ref C_ChecksumFullObjectCRC32C: EnumSet<ChecksumMode> =
enum_set!(ChecksumMode::ChecksumCRC32C | ChecksumMode::ChecksumFullObject);
}
const AMZ_CHECKSUM_CRC64NVME: &str = "x-amz-checksum-crc64nvme";
impl ChecksumMode {
//pub const CRC64_NVME_POLYNOMIAL: i64 = 0xad93d23594c93659;
pub fn base(&self) -> ChecksumMode {
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
match s.as_u8() {
1_u8 => ChecksumMode::ChecksumNone,
2_u8 => ChecksumMode::ChecksumSHA256,
4_u8 => ChecksumMode::ChecksumSHA1,
8_u8 => ChecksumMode::ChecksumCRC32,
16_u8 => ChecksumMode::ChecksumCRC32C,
32_u8 => ChecksumMode::ChecksumCRC64NVME,
_ => panic!("enum err."),
}
}
pub fn is(&self, t: ChecksumMode) -> bool {
*self & t == t
}
pub fn key(&self) -> String {
//match c & checksumMask {
match self {
ChecksumMode::ChecksumCRC32 => {
return X_AMZ_CHECKSUM_CRC32.to_string();
}
ChecksumMode::ChecksumCRC32C => {
return X_AMZ_CHECKSUM_CRC32C.to_string();
}
ChecksumMode::ChecksumSHA1 => {
return X_AMZ_CHECKSUM_SHA1.to_string();
}
ChecksumMode::ChecksumSHA256 => {
return X_AMZ_CHECKSUM_SHA256.to_string();
}
ChecksumMode::ChecksumCRC64NVME => {
return AMZ_CHECKSUM_CRC64NVME.to_string();
}
_ => {
return "".to_string();
}
}
}
pub fn can_composite(&self) -> bool {
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
match s.as_u8() {
2_u8 => true,
4_u8 => true,
8_u8 => true,
16_u8 => true,
_ => false,
}
}
pub fn can_merge_crc(&self) -> bool {
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
match s.as_u8() {
8_u8 => true,
16_u8 => true,
32_u8 => true,
_ => false,
}
}
pub fn full_object_requested(&self) -> bool {
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
match s.as_u8() {
//C_ChecksumFullObjectCRC32 as u8 => true,
//C_ChecksumFullObjectCRC32C as u8 => true,
32_u8 => true,
_ => false,
}
}
pub fn key_capitalized(&self) -> String {
self.key()
}
pub fn raw_byte_len(&self) -> usize {
let u = EnumSet::from(*self).intersection(*C_ChecksumMask).as_u8();
if u == ChecksumMode::ChecksumCRC32 as u8 || u == ChecksumMode::ChecksumCRC32C as u8 {
4
} else if u == ChecksumMode::ChecksumSHA1 as u8 {
use sha1::Digest;
sha1::Sha1::output_size() as usize
} else if u == ChecksumMode::ChecksumSHA256 as u8 {
use sha2::Digest;
sha2::Sha256::output_size() as usize
} else if u == ChecksumMode::ChecksumCRC64NVME as u8 {
8
} else {
0
}
}
pub fn hasher(&self) -> Result<Box<dyn rustfs_checksums::http::HttpChecksum>, std::io::Error> {
match /*C_ChecksumMask & **/self {
ChecksumMode::ChecksumCRC32 => {
return Ok(ChecksumAlgorithm::Crc32.into_impl());
}
ChecksumMode::ChecksumCRC32C => {
return Ok(ChecksumAlgorithm::Crc32c.into_impl());
}
ChecksumMode::ChecksumSHA1 => {
return Ok(ChecksumAlgorithm::Sha1.into_impl());
}
ChecksumMode::ChecksumSHA256 => {
return Ok(ChecksumAlgorithm::Sha256.into_impl());
}
ChecksumMode::ChecksumCRC64NVME => {
return Ok(ChecksumAlgorithm::Crc64Nvme.into_impl());
}
_ => return Err(std::io::Error::other("unsupported checksum type")),
}
}
pub fn is_set(&self) -> bool {
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
s.len() == 1
}
pub fn set_default(&mut self, t: ChecksumMode) {
if !self.is_set() {
*self = t;
}
}
pub fn encode_to_string(&self, b: &[u8]) -> Result<String, std::io::Error> {
if !self.is_set() {
return Ok("".to_string());
}
let mut h = self.hasher()?;
h.update(b);
let hash = h.finalize();
Ok(base64_encode(hash.as_ref()))
}
pub fn to_string(&self) -> String {
//match c & checksumMask {
match self {
ChecksumMode::ChecksumCRC32 => {
return "CRC32".to_string();
}
ChecksumMode::ChecksumCRC32C => {
return "CRC32C".to_string();
}
ChecksumMode::ChecksumSHA1 => {
return "SHA1".to_string();
}
ChecksumMode::ChecksumSHA256 => {
return "SHA256".to_string();
}
ChecksumMode::ChecksumNone => {
return "".to_string();
}
ChecksumMode::ChecksumCRC64NVME => {
return "CRC64NVME".to_string();
}
_ => {
return "<invalid>".to_string();
}
}
}
// pub fn check_sum_reader(&self, r: GetObjectReader) -> Result<Checksum, std::io::Error> {
// let mut h = self.hasher()?;
// Ok(Checksum::new(self.clone(), h.sum().as_bytes()))
// }
// pub fn check_sum_bytes(&self, b: &[u8]) -> Result<Checksum, std::io::Error> {
// let mut h = self.hasher()?;
// Ok(Checksum::new(self.clone(), h.sum().as_bytes()))
// }
pub fn composite_checksum(&self, p: &mut [ObjectPart]) -> Result<Checksum, std::io::Error> {
if !self.can_composite() {
return Err(std::io::Error::other("cannot do composite checksum"));
}
p.sort_by(|i, j| {
if i.part_num < j.part_num {
std::cmp::Ordering::Less
} else if i.part_num > j.part_num {
std::cmp::Ordering::Greater
} else {
std::cmp::Ordering::Equal
}
});
let c = self.base();
let crc_bytes = Vec::<u8>::with_capacity(p.len() * self.raw_byte_len() as usize);
let mut h = self.hasher()?;
h.update(crc_bytes.as_ref());
let hash = h.finalize();
Ok(Checksum {
checksum_type: self.clone(),
r: hash.as_ref().to_vec(),
computed: false,
})
}
pub fn full_object_checksum(&self, p: &mut [ObjectPart]) -> Result<Checksum, std::io::Error> {
todo!();
}
}
#[derive(Default)]
pub struct Checksum {
checksum_type: ChecksumMode,
r: Vec<u8>,
computed: bool,
}
#[allow(dead_code)]
impl Checksum {
fn new(t: ChecksumMode, b: &[u8]) -> Checksum {
if t.is_set() && b.len() == t.raw_byte_len() {
return Checksum {
checksum_type: t,
r: b.to_vec(),
computed: false,
};
}
Checksum::default()
}
#[allow(dead_code)]
fn new_checksum_string(t: ChecksumMode, s: &str) -> Result<Checksum, std::io::Error> {
let b = match base64_decode(s.as_bytes()) {
Ok(b) => b,
Err(err) => return Err(std::io::Error::other(err.to_string())),
};
if t.is_set() && b.len() == t.raw_byte_len() {
return Ok(Checksum {
checksum_type: t,
r: b,
computed: false,
});
}
Ok(Checksum::default())
}
fn is_set(&self) -> bool {
self.checksum_type.is_set() && self.r.len() == self.checksum_type.raw_byte_len()
}
fn encoded(&self) -> String {
if !self.is_set() {
return "".to_string();
}
base64_encode(&self.r)
}
#[allow(dead_code)]
fn raw(&self) -> Option<Vec<u8>> {
if !self.is_set() {
return None;
}
Some(self.r.clone())
}
}
pub fn add_auto_checksum_headers(opts: &mut PutObjectOptions) {
opts.user_metadata
.insert("X-Amz-Checksum-Algorithm".to_string(), opts.auto_checksum.to_string());
if opts.auto_checksum.full_object_requested() {
opts.user_metadata
.insert("X-Amz-Checksum-Type".to_string(), "FULL_OBJECT".to_string());
}
}
pub fn apply_auto_checksum(opts: &mut PutObjectOptions, all_parts: &mut [ObjectPart]) -> Result<(), std::io::Error> {
if opts.auto_checksum.can_composite() && !opts.auto_checksum.is(ChecksumMode::ChecksumFullObject) {
let crc = opts.auto_checksum.composite_checksum(all_parts)?;
opts.user_metadata = {
let mut hm = HashMap::new();
hm.insert(opts.auto_checksum.key(), crc.encoded());
hm
}
} else if opts.auto_checksum.can_merge_crc() {
let crc = opts.auto_checksum.full_object_checksum(all_parts)?;
opts.user_metadata = {
let mut hm = HashMap::new();
hm.insert(opts.auto_checksum.key_capitalized(), crc.encoded());
hm.insert("X-Amz-Checksum-Type".to_string(), "FULL_OBJECT".to_string());
hm
}
}
Ok(())
}

View File

@@ -1,270 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// use crate::error::StdError;
// use bytes::Bytes;
// use futures::pin_mut;
// use futures::stream::{Stream, StreamExt};
// use std::future::Future;
// use std::pin::Pin;
// use std::task::{Context, Poll};
// use transform_stream::AsyncTryStream;
// pub type SyncBoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + Sync + 'a>>;
// pub struct ChunkedStream<'a> {
// /// inner
// inner: AsyncTryStream<Bytes, StdError, SyncBoxFuture<'a, Result<(), StdError>>>,
// remaining_length: usize,
// }
// impl<'a> ChunkedStream<'a> {
// pub fn new<S>(body: S, content_length: usize, chunk_size: usize, need_padding: bool) -> Self
// where
// S: Stream<Item = Result<Bytes, StdError>> + Send + Sync + 'a,
// {
// let inner = AsyncTryStream::<_, _, SyncBoxFuture<'a, Result<(), StdError>>>::new(|mut y| {
// #[allow(clippy::shadow_same)] // necessary for `pin_mut!`
// Box::pin(async move {
// pin_mut!(body);
// // Data left over from the previous call
// let mut prev_bytes = Bytes::new();
// let mut read_size = 0;
// loop {
// let data: Vec<Bytes> = {
// // Read a fixed-size chunk
// match Self::read_data(body.as_mut(), prev_bytes, chunk_size).await {
// None => break,
// Some(Err(e)) => return Err(e),
// Some(Ok((data, remaining_bytes))) => {
// // debug!(
// // "content_length:{},read_size:{}, read_data data:{}, remaining_bytes: {} ",
// // content_length,
// // read_size,
// // data.len(),
// // remaining_bytes.len()
// // );
// prev_bytes = remaining_bytes;
// data
// }
// }
// };
// for bytes in data {
// read_size += bytes.len();
// // debug!("read_size {}, content_length {}", read_size, content_length,);
// y.yield_ok(bytes).await;
// }
// if read_size + prev_bytes.len() >= content_length {
// // debug!(
// // "Finished reading: read_size:{} + prev_bytes.len({}) == content_length {}",
// // read_size,
// // prev_bytes.len(),
// // content_length,
// // );
// // Pad with zeros?
// if !need_padding {
// y.yield_ok(prev_bytes).await;
// break;
// }
// let mut bytes = vec![0u8; chunk_size];
// let (left, _) = bytes.split_at_mut(prev_bytes.len());
// left.copy_from_slice(&prev_bytes);
// y.yield_ok(Bytes::from(bytes)).await;
// break;
// }
// }
// // debug!("chunked stream exit");
// Ok(())
// })
// });
// Self {
// inner,
// remaining_length: content_length,
// }
// }
// /// read data and return remaining bytes
// async fn read_data<S>(
// mut body: Pin<&mut S>,
// prev_bytes: Bytes,
// data_size: usize,
// ) -> Option<Result<(Vec<Bytes>, Bytes), StdError>>
// where
// S: Stream<Item = Result<Bytes, StdError>> + Send,
// {
// let mut bytes_buffer = Vec::new();
// // Run only once
// let mut push_data_bytes = |mut bytes: Bytes| {
// // debug!("read from body {} split per {}, prev_bytes: {}", bytes.len(), data_size, prev_bytes.len());
// if bytes.is_empty() {
// return None;
// }
// if data_size == 0 {
// return Some(bytes);
// }
// // Merge with the previous data
// if !prev_bytes.is_empty() {
// let need_size = data_size.wrapping_sub(prev_bytes.len());
// // debug!(
// // "Previous leftover {}, take {} now, total: {}",
// // prev_bytes.len(),
// // need_size,
// // prev_bytes.len() + need_size
// // );
// if bytes.len() >= need_size {
// let data = bytes.split_to(need_size);
// let mut combined = Vec::new();
// combined.extend_from_slice(&prev_bytes);
// combined.extend_from_slice(&data);
// // debug!(
// // "Fetched more bytes than needed: {}, merged result {}, remaining bytes {}",
// // need_size,
// // combined.len(),
// // bytes.len(),
// // );
// bytes_buffer.push(Bytes::from(combined));
// } else {
// let mut combined = Vec::new();
// combined.extend_from_slice(&prev_bytes);
// combined.extend_from_slice(&bytes);
// // debug!(
// // "Fetched fewer bytes than needed: {}, merged result {}, remaining bytes {}, return immediately",
// // need_size,
// // combined.len(),
// // bytes.len(),
// // );
// return Some(Bytes::from(combined));
// }
// }
// // If the fetched data exceeds the chunk, slice the required size
// if data_size <= bytes.len() {
// let n = bytes.len() / data_size;
// for _ in 0..n {
// let data = bytes.split_to(data_size);
// // println!("bytes_buffer.push: {}, remaining: {}", data.len(), bytes.len());
// bytes_buffer.push(data);
// }
// Some(bytes)
// } else {
// // Insufficient data
// Some(bytes)
// }
// };
// // Remaining data
// let remaining_bytes = 'outer: {
// // // Exit if the previous data was sufficient
// // if let Some(remaining_bytes) = push_data_bytes(prev_bytes) {
// // println!("Consuming leftovers");
// // break 'outer remaining_bytes;
// // }
// loop {
// match body.next().await? {
// Err(e) => return Some(Err(e)),
// Ok(bytes) => {
// if let Some(remaining_bytes) = push_data_bytes(bytes) {
// break 'outer remaining_bytes;
// }
// }
// }
// }
// };
// Some(Ok((bytes_buffer, remaining_bytes)))
// }
// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, StdError>>> {
// let ans = Pin::new(&mut self.inner).poll_next(cx);
// if let Poll::Ready(Some(Ok(ref bytes))) = ans {
// self.remaining_length = self.remaining_length.saturating_sub(bytes.len());
// }
// ans
// }
// // pub fn exact_remaining_length(&self) -> usize {
// // self.remaining_length
// // }
// }
// impl Stream for ChunkedStream<'_> {
// type Item = Result<Bytes, StdError>;
// fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
// self.poll(cx)
// }
// fn size_hint(&self) -> (usize, Option<usize>) {
// (0, None)
// }
// }
// #[cfg(test)]
// mod test {
// use super::*;
// #[tokio::test]
// async fn test_chunked_stream() {
// let chunk_size = 4;
// let data1 = vec![1u8; 7777]; // 65536
// let data2 = vec![1u8; 7777]; // 65536
// let content_length = data1.len() + data2.len();
// let chunk1 = Bytes::from(data1);
// let chunk2 = Bytes::from(data2);
// let chunk_results: Vec<Result<Bytes, _>> = vec![Ok(chunk1), Ok(chunk2)];
// let stream = futures::stream::iter(chunk_results);
// let mut chunked_stream = ChunkedStream::new(stream, content_length, chunk_size, true);
// loop {
// let ans1 = chunked_stream.next().await;
// if ans1.is_none() {
// break;
// }
// let bytes = ans1.unwrap().unwrap();
// assert!(bytes.len() == chunk_size)
// }
// // assert_eq!(ans1.unwrap(), chunk1_data.as_slice());
// }
// }

View File

@@ -18,19 +18,17 @@
#![allow(unused_must_use)]
#![allow(clippy::all)]
use crate::client::{
api_error_response::http_resp_to_error_response,
api_get_options::GetObjectOptions,
transition_api::{ObjectInfo, ReaderImpl, RequestMetadata, TransitionClient},
};
use bytes::Bytes;
use http::{HeaderMap, HeaderValue};
use rustfs_config::MAX_S3_CLIENT_RESPONSE_SIZE;
use rustfs_utils::EMPTY_STRING_SHA256_HASH;
use s3s::dto::Owner;
use std::collections::HashMap;
use std::io::Cursor;
use tokio::io::BufReader;
use crate::client::{
api_error_response::{err_invalid_argument, http_resp_to_error_response},
api_get_options::GetObjectOptions,
transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info},
};
use rustfs_utils::EMPTY_STRING_SHA256_HASH;
#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)]
pub struct Grantee {
@@ -90,7 +88,12 @@ impl TransitionClient {
return Err(std::io::Error::other(http_resp_to_error_response(&resp, b, bucket_name, object_name)));
}
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
let b = resp
.body_mut()
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
.await
.unwrap()
.to_vec();
let mut res = match quick_xml::de::from_str::<AccessControlPolicy>(&String::from_utf8(b).unwrap()) {
Ok(result) => result,
Err(err) => {

View File

@@ -21,24 +21,17 @@
use bytes::Bytes;
use http::{HeaderMap, HeaderValue};
use std::collections::HashMap;
use std::io::Cursor;
use time::OffsetDateTime;
use tokio::io::BufReader;
use crate::client::constants::{GET_OBJECT_ATTRIBUTES_MAX_PARTS, GET_OBJECT_ATTRIBUTES_TAGS, ISO8601_DATEFORMAT};
use rustfs_utils::EMPTY_STRING_SHA256_HASH;
use s3s::header::{
X_AMZ_DELETE_MARKER, X_AMZ_MAX_PARTS, X_AMZ_METADATA_DIRECTIVE, X_AMZ_OBJECT_ATTRIBUTES, X_AMZ_PART_NUMBER_MARKER,
X_AMZ_REQUEST_CHARGED, X_AMZ_RESTORE, X_AMZ_VERSION_ID,
};
use s3s::{Body, dto::Owner};
use crate::client::{
api_error_response::err_invalid_argument,
api_get_object_acl::AccessControlPolicy,
api_get_options::GetObjectOptions,
transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info},
transition_api::{ReaderImpl, RequestMetadata, TransitionClient},
};
use rustfs_config::MAX_S3_CLIENT_RESPONSE_SIZE;
use rustfs_utils::EMPTY_STRING_SHA256_HASH;
use s3s::Body;
use s3s::header::{X_AMZ_MAX_PARTS, X_AMZ_OBJECT_ATTRIBUTES, X_AMZ_PART_NUMBER_MARKER, X_AMZ_VERSION_ID};
pub struct ObjectAttributesOptions {
pub max_parts: i64,
@@ -143,7 +136,12 @@ impl ObjectAttributes {
self.last_modified = mod_time;
self.version_id = h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap().to_string();
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
let b = resp
.body_mut()
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
.await
.unwrap()
.to_vec();
let mut response = match quick_xml::de::from_str::<ObjectAttributesResponse>(&String::from_utf8(b).unwrap()) {
Ok(result) => result,
Err(err) => {
@@ -224,7 +222,12 @@ impl TransitionClient {
}
if resp.status() != http::StatusCode::OK {
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
let b = resp
.body_mut()
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
.await
.unwrap()
.to_vec();
let err_body = String::from_utf8(b).unwrap();
let mut er = match quick_xml::de::from_str::<AccessControlPolicy>(&err_body) {
Ok(result) => result,

View File

@@ -18,10 +18,6 @@
#![allow(unused_must_use)]
#![allow(clippy::all)]
use bytes::Bytes;
use http::{HeaderMap, StatusCode};
use std::collections::HashMap;
use crate::client::{
api_error_response::http_resp_to_error_response,
api_s3_datatypes::{
@@ -31,7 +27,11 @@ use crate::client::{
transition_api::{ReaderImpl, RequestMetadata, TransitionClient},
};
use crate::store_api::BucketInfo;
use bytes::Bytes;
use http::{HeaderMap, StatusCode};
use rustfs_config::MAX_S3_CLIENT_RESPONSE_SIZE;
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
use std::collections::HashMap;
impl TransitionClient {
pub fn list_buckets(&self) -> Result<Vec<BucketInfo>, std::io::Error> {
@@ -102,7 +102,12 @@ impl TransitionClient {
}
//let mut list_bucket_result = ListBucketV2Result::default();
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
let b = resp
.body_mut()
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
.await
.unwrap()
.to_vec();
let mut list_bucket_result = match quick_xml::de::from_str::<ListBucketV2Result>(&String::from_utf8(b).unwrap()) {
Ok(result) => result,
Err(err) => {

View File

@@ -18,23 +18,19 @@
#![allow(unused_must_use)]
#![allow(clippy::all)]
use http::Request;
use hyper::StatusCode;
use hyper::body::Incoming;
use std::{collections::HashMap, sync::Arc};
use tracing::warn;
use tracing::{debug, error, info};
use super::constants::UNSIGNED_PAYLOAD;
use super::credentials::SignatureType;
use crate::client::{
api_error_response::{http_resp_to_error_response, to_error_response},
api_error_response::http_resp_to_error_response,
transition_api::{CreateBucketConfiguration, LocationConstraint, TransitionClient},
};
use http::Request;
use hyper::StatusCode;
use rustfs_config::MAX_S3_CLIENT_RESPONSE_SIZE;
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
use s3s::Body;
use s3s::S3ErrorCode;
use super::constants::UNSIGNED_PAYLOAD;
use super::credentials::SignatureType;
use std::collections::HashMap;
#[derive(Debug, Clone)]
pub struct BucketLocationCache {
@@ -212,7 +208,12 @@ async fn process_bucket_location_response(
}
//}
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
let b = resp
.body_mut()
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
.await
.unwrap()
.to_vec();
let mut location = "".to_string();
if tier_type == "huaweicloud" {
let d = quick_xml::de::from_str::<CreateBucketConfiguration>(&String::from_utf8(b).unwrap()).unwrap();

View File

@@ -1,59 +0,0 @@
#![allow(clippy::map_entry)]
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{collections::HashMap, sync::Arc};
use crate::{
disk::{
error::{is_unformatted_disk, DiskError},
format::{DistributionAlgoVersion, FormatV3},
new_disk, DiskAPI, DiskInfo, DiskOption, DiskStore,
},
store_api::{
BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec,
ListMultipartsInfo, ListObjectVersionsInfo, ListObjectsV2Info, MakeBucketOptions, MultipartInfo, MultipartUploadResult,
ObjectIO, ObjectInfo, ObjectOptions, ObjectToDelete, PartInfo, PutObjReader, StorageAPI,
},
credentials::{Credentials, SignatureType,},
api_put_object_multipart::UploadPartParams,
};
use http::HeaderMap;
use tokio_util::sync::CancellationToken;
use tracing::warn;
use tracing::{error, info};
use url::Url;
struct HookReader {
source: GetObjectReader,
hook: GetObjectReader,
}
impl HookReader {
pub fn new(source: GetObjectReader, hook: GetObjectReader) -> HookReader {
HookReader {
source,
hook,
}
}
fn seek(&self, offset: i64, whence: i64) -> Result<i64> {
todo!();
}
fn read(&self, b: &[u8]) -> Result<i64> {
todo!();
}
}

View File

@@ -18,6 +18,20 @@
#![allow(unused_must_use)]
#![allow(clippy::all)]
use crate::client::bucket_cache::BucketLocationCache;
use crate::client::{
api_error_response::{err_invalid_argument, http_resp_to_error_response, to_error_response},
api_get_options::GetObjectOptions,
api_put_object::PutObjectOptions,
api_put_object_multipart::UploadPartParams,
api_s3_datatypes::{
CompleteMultipartUpload, CompletePart, ListBucketResult, ListBucketV2Result, ListMultipartUploadsResult,
ListObjectPartsResult, ObjectPart,
},
constants::{UNSIGNED_PAYLOAD, UNSIGNED_PAYLOAD_TRAILER},
credentials::{CredContext, Credentials, SignatureType, Static},
};
use crate::{client::checksum::ChecksumMode, store_api::GetObjectReader};
use bytes::Bytes;
use futures::{Future, StreamExt};
use http::{HeaderMap, HeaderName};
@@ -30,7 +44,18 @@ use hyper_util::{client::legacy::Client, client::legacy::connect::HttpConnector,
use md5::Digest;
use md5::Md5;
use rand::Rng;
use rustfs_config::MAX_S3_CLIENT_RESPONSE_SIZE;
use rustfs_rio::HashReader;
use rustfs_utils::HashAlgorithm;
use rustfs_utils::{
net::get_endpoint_url,
retry::{
DEFAULT_RETRY_CAP, DEFAULT_RETRY_UNIT, MAX_JITTER, MAX_RETRY, RetryTimer, is_http_status_retryable, is_s3code_retryable,
},
};
use s3s::S3ErrorCode;
use s3s::dto::ReplicationStatus;
use s3s::{Body, dto::Owner};
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use std::io::Cursor;
@@ -48,31 +73,6 @@ use tracing::{debug, error, warn};
use url::{Url, form_urlencoded};
use uuid::Uuid;
use crate::client::bucket_cache::BucketLocationCache;
use crate::client::{
api_error_response::{err_invalid_argument, http_resp_to_error_response, to_error_response},
api_get_options::GetObjectOptions,
api_put_object::PutObjectOptions,
api_put_object_multipart::UploadPartParams,
api_s3_datatypes::{
CompleteMultipartUpload, CompletePart, ListBucketResult, ListBucketV2Result, ListMultipartUploadsResult,
ListObjectPartsResult, ObjectPart,
},
constants::{UNSIGNED_PAYLOAD, UNSIGNED_PAYLOAD_TRAILER},
credentials::{CredContext, Credentials, SignatureType, Static},
};
use crate::{client::checksum::ChecksumMode, store_api::GetObjectReader};
use rustfs_rio::HashReader;
use rustfs_utils::{
net::get_endpoint_url,
retry::{
DEFAULT_RETRY_CAP, DEFAULT_RETRY_UNIT, MAX_JITTER, MAX_RETRY, RetryTimer, is_http_status_retryable, is_s3code_retryable,
},
};
use s3s::S3ErrorCode;
use s3s::dto::ReplicationStatus;
use s3s::{Body, dto::Owner};
const C_USER_AGENT: &str = "RustFS (linux; x86)";
const SUCCESS_STATUS: [StatusCode; 3] = [StatusCode::OK, StatusCode::NO_CONTENT, StatusCode::PARTIAL_CONTENT];
@@ -132,6 +132,25 @@ pub enum BucketLookupType {
BucketLookupPath,
}
fn load_root_store_from_tls_path() -> Option<rustls::RootCertStore> {
// Load the root certificate bundle from the path specified by the
// RUSTFS_TLS_PATH environment variable.
let tp = std::env::var("RUSTFS_TLS_PATH").ok()?;
let ca = std::path::Path::new(&tp).join(rustfs_config::RUSTFS_CA_CERT);
if !ca.exists() {
return None;
}
let der_list = rustfs_utils::load_cert_bundle_der_bytes(ca.to_str().unwrap_or_default()).ok()?;
let mut store = rustls::RootCertStore::empty();
for der in der_list {
if let Err(e) = store.add(der.into()) {
warn!("Warning: failed to add certificate from '{}' to root store: {e}", ca.display());
}
}
Some(store)
}
impl TransitionClient {
pub async fn new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
let clnt = Self::private_new(endpoint, opts, tier_type).await?;
@@ -142,18 +161,22 @@ impl TransitionClient {
async fn private_new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
let endpoint_url = get_endpoint_url(endpoint, opts.secure)?;
//#[cfg(feature = "ring")]
let _ = rustls::crypto::ring::default_provider().install_default();
//#[cfg(feature = "aws-lc-rs")]
// let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
let scheme = endpoint_url.scheme();
let client;
let tls = rustls::ClientConfig::builder().with_native_roots()?.with_no_client_auth();
let tls = if let Some(store) = load_root_store_from_tls_path() {
rustls::ClientConfig::builder()
.with_root_certificates(store)
.with_no_client_auth()
} else {
rustls::ClientConfig::builder().with_native_roots()?.with_no_client_auth()
};
let https = hyper_rustls::HttpsConnectorBuilder::new()
.with_tls_config(tls)
.https_or_http()
.enable_http1()
.enable_http2()
.build();
client = Client::builder(TokioExecutor::new()).build(https);
@@ -291,7 +314,12 @@ impl TransitionClient {
//if self.is_trace_enabled && !(self.trace_errors_only && resp.status() == StatusCode::OK) {
if resp.status() != StatusCode::OK {
//self.dump_http(&cloned_req, &resp)?;
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
let b = resp
.body_mut()
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
.await
.unwrap()
.to_vec();
warn!("err_body: {}", String::from_utf8(b).unwrap());
}
@@ -334,7 +362,12 @@ impl TransitionClient {
}
}
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
let b = resp
.body_mut()
.store_all_limited(MAX_S3_CLIENT_RESPONSE_SIZE)
.await
.unwrap()
.to_vec();
let mut err_response = http_resp_to_error_response(&resp, b.clone(), &metadata.bucket_name, &metadata.object_name);
err_response.message = format!("remote tier error: {}", err_response.message);

View File

@@ -14,7 +14,7 @@
use crate::config::{KV, KVS};
use rustfs_config::{
COMMENT_KEY, DEFAULT_DIR, DEFAULT_LIMIT, ENABLE_KEY, EnableState, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD,
COMMENT_KEY, DEFAULT_LIMIT, ENABLE_KEY, EVENT_DEFAULT_DIR, EnableState, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD,
MQTT_QOS, MQTT_QUEUE_DIR, MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN,
WEBHOOK_BATCH_SIZE, WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_HTTP_TIMEOUT, WEBHOOK_MAX_RETRY,
WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_LIMIT, WEBHOOK_RETRY_INTERVAL,
@@ -63,7 +63,7 @@ pub static DEFAULT_AUDIT_WEBHOOK_KVS: LazyLock<KVS> = LazyLock::new(|| {
},
KV {
key: WEBHOOK_QUEUE_DIR.to_owned(),
value: DEFAULT_DIR.to_owned(),
value: EVENT_DEFAULT_DIR.to_owned(),
hidden_if_empty: false,
},
KV {
@@ -131,7 +131,7 @@ pub static DEFAULT_AUDIT_MQTT_KVS: LazyLock<KVS> = LazyLock::new(|| {
},
KV {
key: MQTT_QUEUE_DIR.to_owned(),
value: DEFAULT_DIR.to_owned(),
value: EVENT_DEFAULT_DIR.to_owned(),
hidden_if_empty: false,
},
KV {

View File

@@ -14,7 +14,7 @@
use crate::config::{KV, KVS};
use rustfs_config::{
COMMENT_KEY, DEFAULT_DIR, DEFAULT_LIMIT, ENABLE_KEY, EnableState, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD,
COMMENT_KEY, DEFAULT_LIMIT, ENABLE_KEY, EVENT_DEFAULT_DIR, EnableState, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD,
MQTT_QOS, MQTT_QUEUE_DIR, MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN,
WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_LIMIT,
};
@@ -47,7 +47,7 @@ pub static DEFAULT_NOTIFY_WEBHOOK_KVS: LazyLock<KVS> = LazyLock::new(|| {
},
KV {
key: WEBHOOK_QUEUE_DIR.to_owned(),
value: DEFAULT_DIR.to_owned(),
value: EVENT_DEFAULT_DIR.to_owned(),
hidden_if_empty: false,
},
KV {
@@ -114,7 +114,7 @@ pub static DEFAULT_NOTIFY_MQTT_KVS: LazyLock<KVS> = LazyLock::new(|| {
},
KV {
key: MQTT_QUEUE_DIR.to_owned(),
value: DEFAULT_DIR.to_owned(),
value: EVENT_DEFAULT_DIR.to_owned(),
hidden_if_empty: false,
},
KV {

View File

@@ -0,0 +1,770 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::disk::{
CheckPartsResp, DeleteOptions, DiskAPI, DiskError, DiskInfo, DiskInfoOptions, DiskLocation, Endpoint, Error,
FileInfoVersions, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, Result, UpdateMetadataOpts, VolumeInfo,
WalkDirOptions, local::LocalDisk,
};
use bytes::Bytes;
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
use rustfs_utils::string::parse_bool_with_default;
use std::{
path::PathBuf,
sync::{
Arc,
atomic::{AtomicI64, AtomicU32, Ordering},
},
time::Duration,
};
use tokio::{sync::RwLock, time};
use tokio_util::sync::CancellationToken;
use tracing::{debug, info, warn};
use uuid::Uuid;
/// Disk health status constants
const DISK_HEALTH_OK: u32 = 0;
const DISK_HEALTH_FAULTY: u32 = 1;
pub const ENV_RUSTFS_DRIVE_ACTIVE_MONITORING: &str = "RUSTFS_DRIVE_ACTIVE_MONITORING";
pub const ENV_RUSTFS_DRIVE_MAX_TIMEOUT_DURATION: &str = "RUSTFS_DRIVE_MAX_TIMEOUT_DURATION";
pub const CHECK_EVERY: Duration = Duration::from_secs(15);
pub const SKIP_IF_SUCCESS_BEFORE: Duration = Duration::from_secs(5);
pub const CHECK_TIMEOUT_DURATION: Duration = Duration::from_secs(5);
lazy_static::lazy_static! {
static ref TEST_OBJ: String = format!("health-check-{}", Uuid::new_v4());
static ref TEST_DATA: Bytes = Bytes::from(vec![42u8; 2048]);
static ref TEST_BUCKET: String = ".rustfs.sys/tmp".to_string();
}
pub fn get_max_timeout_duration() -> Duration {
std::env::var(ENV_RUSTFS_DRIVE_MAX_TIMEOUT_DURATION)
.map(|v| Duration::from_secs(v.parse::<u64>().unwrap_or(30)))
.unwrap_or(Duration::from_secs(30))
}
/// DiskHealthTracker tracks the health status of a disk.
/// Similar to Go's diskHealthTracker.
#[derive(Debug)]
pub struct DiskHealthTracker {
/// Atomic timestamp of last successful operation
pub last_success: AtomicI64,
/// Atomic timestamp of last operation start
pub last_started: AtomicI64,
/// Atomic disk status (OK or Faulty)
pub status: AtomicU32,
/// Atomic number of waiting operations
pub waiting: AtomicU32,
}
impl DiskHealthTracker {
/// Create a new disk health tracker
pub fn new() -> Self {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
Self {
last_success: AtomicI64::new(now),
last_started: AtomicI64::new(now),
status: AtomicU32::new(DISK_HEALTH_OK),
waiting: AtomicU32::new(0),
}
}
/// Log a successful operation
pub fn log_success(&self) {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
self.last_success.store(now, Ordering::Relaxed);
}
/// Check if disk is faulty
pub fn is_faulty(&self) -> bool {
self.status.load(Ordering::Relaxed) == DISK_HEALTH_FAULTY
}
/// Set disk as faulty
pub fn set_faulty(&self) {
self.status.store(DISK_HEALTH_FAULTY, Ordering::Relaxed);
}
/// Set disk as OK
pub fn set_ok(&self) {
self.status.store(DISK_HEALTH_OK, Ordering::Relaxed);
}
pub fn swap_ok_to_faulty(&self) -> bool {
self.status
.compare_exchange(DISK_HEALTH_OK, DISK_HEALTH_FAULTY, Ordering::Relaxed, Ordering::Relaxed)
.is_ok()
}
/// Increment waiting operations counter
pub fn increment_waiting(&self) {
self.waiting.fetch_add(1, Ordering::Relaxed);
}
/// Decrement waiting operations counter
pub fn decrement_waiting(&self) {
self.waiting.fetch_sub(1, Ordering::Relaxed);
}
/// Get waiting operations count
pub fn waiting_count(&self) -> u32 {
self.waiting.load(Ordering::Relaxed)
}
/// Get last success timestamp
pub fn last_success(&self) -> i64 {
self.last_success.load(Ordering::Relaxed)
}
}
impl Default for DiskHealthTracker {
fn default() -> Self {
Self::new()
}
}
/// Health check context key for tracking disk operations
#[derive(Debug, Clone)]
struct HealthDiskCtxKey;
#[derive(Debug)]
struct HealthDiskCtxValue {
last_success: Arc<AtomicI64>,
}
impl HealthDiskCtxValue {
fn log_success(&self) {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
self.last_success.store(now, Ordering::Relaxed);
}
}
/// LocalDiskWrapper wraps a DiskStore with health tracking capabilities.
/// This is similar to Go's xlStorageDiskIDCheck.
#[derive(Debug, Clone)]
pub struct LocalDiskWrapper {
/// The underlying disk store
disk: Arc<LocalDisk>,
/// Health tracker
health: Arc<DiskHealthTracker>,
/// Whether health checking is enabled
health_check: bool,
/// Cancellation token for monitoring tasks
cancel_token: CancellationToken,
/// Disk ID for stale checking
disk_id: Arc<RwLock<Option<Uuid>>>,
}
impl LocalDiskWrapper {
/// Create a new LocalDiskWrapper
pub fn new(disk: Arc<LocalDisk>, health_check: bool) -> Self {
// Check environment variable for health check override
// Default to true if not set, but only enable if both param and env are true
let env_health_check = std::env::var(ENV_RUSTFS_DRIVE_ACTIVE_MONITORING)
.map(|v| parse_bool_with_default(&v, true))
.unwrap_or(true);
let ret = Self {
disk,
health: Arc::new(DiskHealthTracker::new()),
health_check: health_check && env_health_check,
cancel_token: CancellationToken::new(),
disk_id: Arc::new(RwLock::new(None)),
};
ret.start_monitoring();
ret
}
pub fn get_disk(&self) -> Arc<LocalDisk> {
self.disk.clone()
}
/// Start the disk monitoring if health_check is enabled
pub fn start_monitoring(&self) {
if self.health_check {
let health = Arc::clone(&self.health);
let cancel_token = self.cancel_token.clone();
let disk = Arc::clone(&self.disk);
tokio::spawn(async move {
Self::monitor_disk_writable(disk, health, cancel_token).await;
});
}
}
/// Stop the disk monitoring
pub async fn stop_monitoring(&self) {
self.cancel_token.cancel();
}
/// Monitor disk writability periodically
async fn monitor_disk_writable(disk: Arc<LocalDisk>, health: Arc<DiskHealthTracker>, cancel_token: CancellationToken) {
// TODO: config interval
let mut interval = time::interval(CHECK_EVERY);
loop {
tokio::select! {
_ = cancel_token.cancelled() => {
return;
}
_ = interval.tick() => {
if cancel_token.is_cancelled() {
return;
}
if health.status.load(Ordering::Relaxed) != DISK_HEALTH_OK {
continue;
}
let last_success_nanos = health.last_success.load(Ordering::Relaxed);
let elapsed = Duration::from_nanos(
(std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64 - last_success_nanos) as u64
);
if elapsed < SKIP_IF_SUCCESS_BEFORE {
continue;
}
tokio::time::sleep(Duration::from_secs(1)).await;
debug!("health check: performing health check");
if Self::perform_health_check(disk.clone(), &TEST_BUCKET, &TEST_OBJ, &TEST_DATA, true, CHECK_TIMEOUT_DURATION).await.is_err() && health.swap_ok_to_faulty() {
// Health check failed, disk is considered faulty
health.increment_waiting(); // Balance the increment from failed operation
let health_clone = Arc::clone(&health);
let disk_clone = disk.clone();
let cancel_clone = cancel_token.clone();
tokio::spawn(async move {
Self::monitor_disk_status(disk_clone, health_clone, cancel_clone).await;
});
}
}
}
}
}
/// Perform a health check by writing and reading a test file
async fn perform_health_check(
disk: Arc<LocalDisk>,
test_bucket: &str,
test_filename: &str,
test_data: &Bytes,
check_faulty_only: bool,
timeout_duration: Duration,
) -> Result<()> {
// Perform health check with timeout
let health_check_result = tokio::time::timeout(timeout_duration, async {
// Try to write test data
disk.write_all(test_bucket, test_filename, test_data.clone()).await?;
// Try to read back the data
let read_data = disk.read_all(test_bucket, test_filename).await?;
// Verify data integrity
if read_data.len() != test_data.len() {
warn!(
"health check: test file data length mismatch: expected {} bytes, got {}",
test_data.len(),
read_data.len()
);
if check_faulty_only {
return Ok(());
}
return Err(DiskError::FaultyDisk);
}
// Clean up
disk.delete(
test_bucket,
test_filename,
DeleteOptions {
recursive: false,
immediate: false,
undo_write: false,
old_data_dir: None,
},
)
.await?;
Ok(())
})
.await;
match health_check_result {
Ok(result) => match result {
Ok(()) => Ok(()),
Err(e) => {
debug!("health check: failed: {:?}", e);
if e == DiskError::FaultyDisk {
return Err(e);
}
if check_faulty_only { Ok(()) } else { Err(e) }
}
},
Err(_) => {
// Timeout occurred
warn!("health check: timeout after {:?}", timeout_duration);
Err(DiskError::FaultyDisk)
}
}
}
/// Monitor disk status and try to bring it back online
async fn monitor_disk_status(disk: Arc<LocalDisk>, health: Arc<DiskHealthTracker>, cancel_token: CancellationToken) {
const CHECK_EVERY: Duration = Duration::from_secs(5);
let mut interval = time::interval(CHECK_EVERY);
loop {
tokio::select! {
_ = cancel_token.cancelled() => {
return;
}
_ = interval.tick() => {
if cancel_token.is_cancelled() {
return;
}
match Self::perform_health_check(disk.clone(), &TEST_BUCKET, &TEST_OBJ, &TEST_DATA, false, CHECK_TIMEOUT_DURATION).await {
Ok(_) => {
info!("Disk {} is back online", disk.to_string());
health.set_ok();
health.decrement_waiting();
return;
}
Err(e) => {
warn!("Disk {} still faulty: {:?}", disk.to_string(), e);
}
}
}
}
}
}
async fn check_id(&self, want_id: Option<Uuid>) -> Result<()> {
if want_id.is_none() {
return Ok(());
}
let stored_disk_id = self.disk.get_disk_id().await?;
if stored_disk_id != want_id {
return Err(Error::other(format!("Disk ID mismatch wanted {:?}, got {:?}", want_id, stored_disk_id)));
}
Ok(())
}
/// Check if disk ID is stale
async fn check_disk_stale(&self) -> Result<()> {
let Some(current_disk_id) = *self.disk_id.read().await else {
return Ok(());
};
let stored_disk_id = match self.disk.get_disk_id().await? {
Some(id) => id,
None => return Ok(()), // Empty disk ID is allowed during initialization
};
if current_disk_id != stored_disk_id {
return Err(DiskError::DiskNotFound);
}
Ok(())
}
/// Set the disk ID
pub async fn set_disk_id_internal(&self, id: Option<Uuid>) -> Result<()> {
let mut disk_id = self.disk_id.write().await;
*disk_id = id;
Ok(())
}
/// Get the current disk ID
pub async fn get_current_disk_id(&self) -> Option<Uuid> {
*self.disk_id.read().await
}
/// Track disk health for an operation.
/// This method should wrap disk operations to ensure health checking.
pub async fn track_disk_health<T, F, Fut>(&self, operation: F, timeout_duration: Duration) -> Result<T>
where
F: FnOnce() -> Fut,
Fut: std::future::Future<Output = Result<T>>,
{
// Check if disk is faulty
if self.health.is_faulty() {
warn!("disk {} health is faulty, returning error", self.to_string());
return Err(DiskError::FaultyDisk);
}
// Check if disk is stale
self.check_disk_stale().await?;
// Record operation start
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
self.health.last_started.store(now, Ordering::Relaxed);
self.health.increment_waiting();
if timeout_duration == Duration::ZERO {
let result = operation().await;
self.health.decrement_waiting();
if result.is_ok() {
self.health.log_success();
}
return result;
}
// Execute the operation with timeout
let result = tokio::time::timeout(timeout_duration, operation()).await;
match result {
Ok(operation_result) => {
// Log success and decrement waiting counter
if operation_result.is_ok() {
self.health.log_success();
}
self.health.decrement_waiting();
operation_result
}
Err(_) => {
// Timeout occurred, mark disk as potentially faulty and decrement waiting counter
self.health.decrement_waiting();
warn!("disk operation timeout after {:?}", timeout_duration);
Err(DiskError::other(format!("disk operation timeout after {:?}", timeout_duration)))
}
}
}
}
#[async_trait::async_trait]
impl DiskAPI for LocalDiskWrapper {
fn to_string(&self) -> String {
self.disk.to_string()
}
async fn is_online(&self) -> bool {
let Ok(Some(disk_id)) = self.disk.get_disk_id().await else {
return false;
};
let Some(current_disk_id) = *self.disk_id.read().await else {
return false;
};
current_disk_id == disk_id
}
fn is_local(&self) -> bool {
self.disk.is_local()
}
fn host_name(&self) -> String {
self.disk.host_name()
}
fn endpoint(&self) -> Endpoint {
self.disk.endpoint()
}
async fn close(&self) -> Result<()> {
self.stop_monitoring().await;
self.disk.close().await
}
async fn get_disk_id(&self) -> Result<Option<Uuid>> {
self.disk.get_disk_id().await
}
async fn set_disk_id(&self, id: Option<Uuid>) -> Result<()> {
self.set_disk_id_internal(id).await
}
fn path(&self) -> PathBuf {
self.disk.path()
}
fn get_disk_location(&self) -> DiskLocation {
self.disk.get_disk_location()
}
async fn disk_info(&self, opts: &DiskInfoOptions) -> Result<DiskInfo> {
if opts.noop && opts.metrics {
let mut info = DiskInfo::default();
// Add health metrics
info.metrics.total_waiting = self.health.waiting_count();
if self.health.is_faulty() {
return Err(DiskError::FaultyDisk);
}
return Ok(info);
}
if self.health.is_faulty() {
return Err(DiskError::FaultyDisk);
}
let result = self.disk.disk_info(opts).await?;
if let Some(current_disk_id) = *self.disk_id.read().await
&& Some(current_disk_id) != result.id
{
return Err(DiskError::DiskNotFound);
};
Ok(result)
}
async fn make_volume(&self, volume: &str) -> Result<()> {
self.track_disk_health(|| async { self.disk.make_volume(volume).await }, get_max_timeout_duration())
.await
}
async fn make_volumes(&self, volumes: Vec<&str>) -> Result<()> {
self.track_disk_health(|| async { self.disk.make_volumes(volumes).await }, get_max_timeout_duration())
.await
}
async fn list_volumes(&self) -> Result<Vec<VolumeInfo>> {
self.track_disk_health(|| async { self.disk.list_volumes().await }, Duration::ZERO)
.await
}
async fn stat_volume(&self, volume: &str) -> Result<VolumeInfo> {
self.track_disk_health(|| async { self.disk.stat_volume(volume).await }, get_max_timeout_duration())
.await
}
async fn delete_volume(&self, volume: &str) -> Result<()> {
self.track_disk_health(|| async { self.disk.delete_volume(volume).await }, Duration::ZERO)
.await
}
async fn walk_dir<W: tokio::io::AsyncWrite + Unpin + Send>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()> {
self.track_disk_health(|| async { self.disk.walk_dir(opts, wr).await }, Duration::ZERO)
.await
}
async fn delete_version(
&self,
volume: &str,
path: &str,
fi: FileInfo,
force_del_marker: bool,
opts: DeleteOptions,
) -> Result<()> {
self.track_disk_health(
|| async { self.disk.delete_version(volume, path, fi, force_del_marker, opts).await },
get_max_timeout_duration(),
)
.await
}
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, opts: DeleteOptions) -> Vec<Option<Error>> {
// Check if disk is faulty before proceeding
if self.health.is_faulty() {
return vec![Some(DiskError::FaultyDisk); versions.len()];
}
// Check if disk is stale
if let Err(e) = self.check_disk_stale().await {
return vec![Some(e); versions.len()];
}
// Record operation start
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
self.health.last_started.store(now, Ordering::Relaxed);
self.health.increment_waiting();
// Execute the operation
let result = self.disk.delete_versions(volume, versions, opts).await;
self.health.decrement_waiting();
let has_err = result.iter().any(|e| e.is_some());
if !has_err {
// Log success and decrement waiting counter
self.health.log_success();
}
result
}
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> {
self.track_disk_health(|| async { self.disk.delete_paths(volume, paths).await }, get_max_timeout_duration())
.await
}
async fn write_metadata(&self, org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()> {
self.track_disk_health(
|| async { self.disk.write_metadata(org_volume, volume, path, fi).await },
get_max_timeout_duration(),
)
.await
}
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()> {
self.track_disk_health(
|| async { self.disk.update_metadata(volume, path, fi, opts).await },
get_max_timeout_duration(),
)
.await
}
async fn read_version(
&self,
org_volume: &str,
volume: &str,
path: &str,
version_id: &str,
opts: &ReadOptions,
) -> Result<FileInfo> {
self.track_disk_health(
|| async { self.disk.read_version(org_volume, volume, path, version_id, opts).await },
get_max_timeout_duration(),
)
.await
}
async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result<RawFileInfo> {
self.track_disk_health(|| async { self.disk.read_xl(volume, path, read_data).await }, get_max_timeout_duration())
.await
}
async fn rename_data(
&self,
src_volume: &str,
src_path: &str,
fi: FileInfo,
dst_volume: &str,
dst_path: &str,
) -> Result<RenameDataResp> {
self.track_disk_health(
|| async { self.disk.rename_data(src_volume, src_path, fi, dst_volume, dst_path).await },
get_max_timeout_duration(),
)
.await
}
async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result<Vec<String>> {
self.track_disk_health(
|| async { self.disk.list_dir(origvolume, volume, dir_path, count).await },
get_max_timeout_duration(),
)
.await
}
async fn read_file(&self, volume: &str, path: &str) -> Result<crate::disk::FileReader> {
self.track_disk_health(|| async { self.disk.read_file(volume, path).await }, get_max_timeout_duration())
.await
}
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<crate::disk::FileReader> {
self.track_disk_health(
|| async { self.disk.read_file_stream(volume, path, offset, length).await },
get_max_timeout_duration(),
)
.await
}
async fn append_file(&self, volume: &str, path: &str) -> Result<crate::disk::FileWriter> {
self.track_disk_health(|| async { self.disk.append_file(volume, path).await }, Duration::ZERO)
.await
}
async fn create_file(&self, origvolume: &str, volume: &str, path: &str, file_size: i64) -> Result<crate::disk::FileWriter> {
self.track_disk_health(
|| async { self.disk.create_file(origvolume, volume, path, file_size).await },
Duration::ZERO,
)
.await
}
async fn rename_file(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str) -> Result<()> {
self.track_disk_health(
|| async { self.disk.rename_file(src_volume, src_path, dst_volume, dst_path).await },
get_max_timeout_duration(),
)
.await
}
async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Bytes) -> Result<()> {
self.track_disk_health(
|| async { self.disk.rename_part(src_volume, src_path, dst_volume, dst_path, meta).await },
get_max_timeout_duration(),
)
.await
}
async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()> {
self.track_disk_health(|| async { self.disk.delete(volume, path, opt).await }, get_max_timeout_duration())
.await
}
async fn verify_file(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
self.track_disk_health(|| async { self.disk.verify_file(volume, path, fi).await }, Duration::ZERO)
.await
}
async fn check_parts(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
self.track_disk_health(|| async { self.disk.check_parts(volume, path, fi).await }, Duration::ZERO)
.await
}
async fn read_parts(&self, bucket: &str, paths: &[String]) -> Result<Vec<ObjectPartInfo>> {
self.track_disk_health(|| async { self.disk.read_parts(bucket, paths).await }, Duration::ZERO)
.await
}
async fn read_multiple(&self, req: ReadMultipleReq) -> Result<Vec<ReadMultipleResp>> {
self.track_disk_health(|| async { self.disk.read_multiple(req).await }, Duration::ZERO)
.await
}
async fn write_all(&self, volume: &str, path: &str, data: Bytes) -> Result<()> {
self.track_disk_health(|| async { self.disk.write_all(volume, path, data).await }, get_max_timeout_duration())
.await
}
async fn read_all(&self, volume: &str, path: &str) -> Result<Bytes> {
self.track_disk_health(|| async { self.disk.read_all(volume, path).await }, get_max_timeout_duration())
.await
}
}

View File

@@ -16,7 +16,6 @@
use std::hash::{Hash, Hasher};
use std::io::{self};
use std::path::PathBuf;
use tracing::error;
pub type Error = DiskError;
pub type Result<T> = core::result::Result<T, Error>;

View File

@@ -69,7 +69,7 @@ use tokio::sync::RwLock;
use tracing::{debug, error, info, warn};
use uuid::Uuid;
#[derive(Debug)]
#[derive(Debug, Clone)]
pub struct FormatInfo {
pub id: Option<Uuid>,
pub data: Bytes,
@@ -77,16 +77,6 @@ pub struct FormatInfo {
pub last_check: Option<OffsetDateTime>,
}
impl FormatInfo {
pub fn last_check_valid(&self) -> bool {
let now = OffsetDateTime::now_utc();
self.file_info.is_some()
&& self.id.is_some()
&& self.last_check.is_some()
&& (now.unix_timestamp() - self.last_check.unwrap().unix_timestamp() <= 1)
}
}
/// A helper enum to handle internal buffer types for writing data.
pub enum InternalBuf<'a> {
Ref(&'a [u8]),
@@ -185,7 +175,7 @@ impl LocalDisk {
};
let root_clone = root.clone();
let update_fn: UpdateFn<DiskInfo> = Box::new(move || {
let disk_id = id.map_or("".to_string(), |id| id.to_string());
let disk_id = id;
let root = root_clone.clone();
Box::pin(async move {
match get_disk_info(root.clone()).await {
@@ -200,7 +190,7 @@ impl LocalDisk {
minor: info.minor,
fs_type: info.fstype,
root_disk: root,
id: disk_id.to_string(),
id: disk_id,
..Default::default()
};
// if root {
@@ -1295,7 +1285,7 @@ impl DiskAPI for LocalDisk {
}
#[tracing::instrument(skip(self))]
async fn is_online(&self) -> bool {
self.check_format_json().await.is_ok()
true
}
#[tracing::instrument(skip(self))]
@@ -1342,24 +1332,40 @@ impl DiskAPI for LocalDisk {
#[tracing::instrument(level = "debug", skip(self))]
async fn get_disk_id(&self) -> Result<Option<Uuid>> {
let mut format_info = self.format_info.write().await;
let format_info = {
let format_info = self.format_info.read().await;
format_info.clone()
};
let id = format_info.id;
if format_info.last_check_valid() {
return Ok(id);
// if format_info.last_check_valid() {
// return Ok(id);
// }
if format_info.file_info.is_some() && id.is_some() {
// check last check time
if let Some(last_check) = format_info.last_check {
if last_check.unix_timestamp() + 1 < OffsetDateTime::now_utc().unix_timestamp() {
return Ok(id);
}
}
}
let file_meta = self.check_format_json().await?;
if let Some(file_info) = &format_info.file_info {
if super::fs::same_file(&file_meta, file_info) {
let mut format_info = self.format_info.write().await;
format_info.last_check = Some(OffsetDateTime::now_utc());
drop(format_info);
return Ok(id);
}
}
debug!("get_disk_id: read format.json");
let b = fs::read(&self.format_path).await.map_err(to_unformatted_disk_error)?;
let fm = FormatV3::try_from(b.as_slice()).map_err(|e| {
@@ -1375,20 +1381,19 @@ impl DiskAPI for LocalDisk {
return Err(DiskError::InconsistentDisk);
}
let mut format_info = self.format_info.write().await;
format_info.id = Some(disk_id);
format_info.file_info = Some(file_meta);
format_info.data = b.into();
format_info.last_check = Some(OffsetDateTime::now_utc());
drop(format_info);
Ok(Some(disk_id))
}
#[tracing::instrument(skip(self))]
async fn set_disk_id(&self, id: Option<Uuid>) -> Result<()> {
async fn set_disk_id(&self, _id: Option<Uuid>) -> Result<()> {
// No setup is required locally
// TODO: add check_id_store
let mut format_info = self.format_info.write().await;
format_info.id = id;
Ok(())
}
@@ -2438,6 +2443,10 @@ impl DiskAPI for LocalDisk {
info.endpoint = self.endpoint.to_string();
info.scanning = self.scanning.load(Ordering::SeqCst) == 1;
if info.id.is_none() {
info.id = self.get_disk_id().await.unwrap_or(None);
}
Ok(info)
}
}
@@ -2705,39 +2714,6 @@ mod test {
}
}
#[tokio::test]
async fn test_format_info_last_check_valid() {
let now = OffsetDateTime::now_utc();
// Valid format info
let valid_format_info = FormatInfo {
id: Some(Uuid::new_v4()),
data: vec![1, 2, 3].into(),
file_info: Some(fs::metadata("../../../..").await.unwrap()),
last_check: Some(now),
};
assert!(valid_format_info.last_check_valid());
// Invalid format info (missing id)
let invalid_format_info = FormatInfo {
id: None,
data: vec![1, 2, 3].into(),
file_info: Some(fs::metadata("../../../..").await.unwrap()),
last_check: Some(now),
};
assert!(!invalid_format_info.last_check_valid());
// Invalid format info (old timestamp)
let old_time = OffsetDateTime::now_utc() - time::Duration::seconds(10);
let old_format_info = FormatInfo {
id: Some(Uuid::new_v4()),
data: vec![1, 2, 3].into(),
file_info: Some(fs::metadata("../../../..").await.unwrap()),
last_check: Some(old_time),
};
assert!(!old_format_info.last_check_valid());
}
#[tokio::test]
async fn test_read_file_exists() {
let test_file = "./test_read_exists.txt";

Some files were not shown because too many files have changed in this diff Show More