Feat/e2e s3tests (#1120)

Signed-off-by: 安正超 <anzhengchao@gmail.com>
This commit is contained in:
安正超
2025-12-11 22:32:07 +08:00
committed by GitHub
parent 997f54e700
commit cb3e496b17
10 changed files with 762 additions and 10 deletions

185
.github/s3tests/s3tests.conf vendored Normal file
View File

@@ -0,0 +1,185 @@
# RustFS s3-tests configuration
# Based on: https://github.com/ceph/s3-tests/blob/master/s3tests.conf.SAMPLE
#
# Usage:
# Single-node: S3_HOST=rustfs-single envsubst < s3tests.conf > /tmp/s3tests.conf
# Multi-node: S3_HOST=lb envsubst < s3tests.conf > /tmp/s3tests.conf
[DEFAULT]
## this section is just used for host, port and bucket_prefix
# host set for RustFS - will be substituted via envsubst
host = ${S3_HOST}
# port for RustFS
port = 9000
## say "False" to disable TLS
is_secure = False
## say "False" to disable SSL Verify
ssl_verify = False
[fixtures]
## all the buckets created will start with this prefix;
## {random} will be filled with random characters to pad
## the prefix to 30 characters long, and avoid collisions
bucket prefix = rustfs-{random}-
# all the iam account resources (users, roles, etc) created
# will start with this name prefix
iam name prefix = s3-tests-
# all the iam account resources (users, roles, etc) created
# will start with this path prefix
iam path prefix = /s3-tests/
[s3 main]
# main display_name
display_name = RustFS Tester
# main user_id
user_id = rustfsadmin
# main email
email = tester@rustfs.local
# zonegroup api_name for bucket location
api_name = default
## main AWS access key
access_key = ${S3_ACCESS_KEY}
## main AWS secret key
secret_key = ${S3_SECRET_KEY}
## replace with key id obtained when secret is created, or delete if KMS not tested
#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
## Storage classes
#storage_classes = "LUKEWARM, FROZEN"
## Lifecycle debug interval (default: 10)
#lc_debug_interval = 20
## Restore debug interval (default: 100)
#rgw_restore_debug_interval = 60
#rgw_restore_processor_period = 60
[s3 alt]
# alt display_name
display_name = RustFS Alt Tester
## alt email
email = alt@rustfs.local
# alt user_id
user_id = rustfsalt
# alt AWS access key - same credentials for RustFS single-user mode
access_key = ${S3_ACCESS_KEY}
# alt AWS secret key
secret_key = ${S3_SECRET_KEY}
#[s3 cloud]
## to run the testcases with "cloud_transition" for transition
## and "cloud_restore" for restore attribute.
## Note: the waiting time may have to tweaked depending on
## the I/O latency to the cloud endpoint.
## host set for cloud endpoint
# host = localhost
## port set for cloud endpoint
# port = 8001
## say "False" to disable TLS
# is_secure = False
## cloud endpoint credentials
# access_key = 0555b35654ad1656d804
# secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
## storage class configured as cloud tier on local rgw server
# cloud_storage_class = CLOUDTIER
## Below are optional -
## Above configured cloud storage class config options
# retain_head_object = false
# allow_read_through = false # change it to enable read_through
# read_through_restore_days = 2
# target_storage_class = Target_SC
# target_path = cloud-bucket
## another regular storage class to test multiple transition rules,
# storage_class = S1
[s3 tenant]
# tenant display_name
display_name = RustFS Tenant Tester
# tenant user_id
user_id = rustfstenant
# tenant AWS access key
access_key = ${S3_ACCESS_KEY}
# tenant AWS secret key
secret_key = ${S3_SECRET_KEY}
# tenant email
email = tenant@rustfs.local
# tenant name
tenant = testx
#following section needs to be added for all sts-tests
[iam]
#used for iam operations in sts-tests
#email
email = s3@rustfs.local
#user_id
user_id = rustfsiam
#access_key
access_key = ${S3_ACCESS_KEY}
#secret_key
secret_key = ${S3_SECRET_KEY}
#display_name
display_name = RustFS IAM User
# iam account root user for iam_account tests
[iam root]
access_key = ${S3_ACCESS_KEY}
secret_key = ${S3_SECRET_KEY}
user_id = RGW11111111111111111
email = account1@rustfs.local
# iam account root user in a different account than [iam root]
[iam alt root]
access_key = ${S3_ACCESS_KEY}
secret_key = ${S3_SECRET_KEY}
user_id = RGW22222222222222222
email = account2@rustfs.local
#following section needs to be added when you want to run Assume Role With Webidentity test
[webidentity]
#used for assume role with web identity test in sts-tests
#all parameters will be obtained from ceph/qa/tasks/keycloak.py
#token=<access_token>
#aud=<obtained after introspecting token>
#sub=<obtained after introspecting token>
#azp=<obtained after introspecting token>
#user_token=<access token for a user, with attribute Department=[Engineering, Marketing>]
#thumbprint=<obtained from x509 certificate>
#KC_REALM=<name of the realm>

246
.github/workflows/e2e-mint.yml vendored Normal file
View File

@@ -0,0 +1,246 @@
name: e2e-mint
on:
push:
branches: [main]
paths:
- ".github/workflows/e2e-mint.yml"
- "Dockerfile.source"
- "rustfs/**"
- "crates/**"
workflow_dispatch:
inputs:
run-multi:
description: "Run multi-node Mint as well"
required: false
default: "false"
env:
ACCESS_KEY: rustfsadmin
SECRET_KEY: rustfsadmin
RUST_LOG: info
PLATFORM: linux/amd64
jobs:
mint-single:
runs-on: ubuntu-latest
timeout-minutes: 40
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Enable buildx
uses: docker/setup-buildx-action@v3
- name: Build RustFS image (source)
run: |
DOCKER_BUILDKIT=1 docker buildx build --load \
--platform ${PLATFORM} \
-t rustfs-ci \
-f Dockerfile.source .
- name: Create network
run: |
docker network inspect rustfs-net >/dev/null 2>&1 || docker network create rustfs-net
- name: Remove existing rustfs-single (if any)
run: docker rm -f rustfs-single >/dev/null 2>&1 || true
- name: Start single RustFS
run: |
docker run -d --name rustfs-single \
--network rustfs-net \
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
-e RUSTFS_ACCESS_KEY=$ACCESS_KEY \
-e RUSTFS_SECRET_KEY=$SECRET_KEY \
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
-v /tmp/rustfs-single:/data \
rustfs-ci
- name: Wait for RustFS ready
run: |
for i in {1..30}; do
if docker exec rustfs-single curl -sf http://localhost:9000/health >/dev/null; then
exit 0
fi
sleep 2
done
echo "RustFS did not become ready" >&2
docker logs rustfs-single || true
exit 1
- name: Run Mint (single, S3-only)
run: |
mkdir -p artifacts/mint-single
docker run --rm --network rustfs-net \
--platform ${PLATFORM} \
-e SERVER_ENDPOINT=rustfs-single:9000 \
-e ACCESS_KEY=$ACCESS_KEY \
-e SECRET_KEY=$SECRET_KEY \
-e ENABLE_HTTPS=0 \
-e SERVER_REGION=us-east-1 \
-e RUN_ON_FAIL=1 \
-e MINT_MODE=core \
-v ${GITHUB_WORKSPACE}/artifacts/mint-single:/mint/log \
--entrypoint /mint/mint.sh \
minio/mint:edge \
awscli aws-sdk-go aws-sdk-java-v2 aws-sdk-php aws-sdk-ruby s3cmd s3select
- name: Collect RustFS logs
run: |
mkdir -p artifacts/rustfs-single
docker logs rustfs-single > artifacts/rustfs-single/rustfs.log || true
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: mint-single
path: artifacts/**
mint-multi:
if: github.event_name == 'workflow_dispatch' && github.event.inputs.run-multi == 'true'
needs: mint-single
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Enable buildx
uses: docker/setup-buildx-action@v3
- name: Build RustFS image (source)
run: |
DOCKER_BUILDKIT=1 docker buildx build --load \
--platform ${PLATFORM} \
-t rustfs-ci \
-f Dockerfile.source .
- name: Prepare cluster compose
run: |
cat > compose.yml <<'EOF'
version: '3.8'
services:
rustfs1:
image: rustfs-ci
hostname: rustfs1
networks: [rustfs-net]
environment:
- RUSTFS_ADDRESS=0.0.0.0:9000
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
- RUSTFS_SECRET_KEY=${SECRET_KEY}
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
volumes:
- rustfs1-data:/data
rustfs2:
image: rustfs-ci
hostname: rustfs2
networks: [rustfs-net]
environment:
- RUSTFS_ADDRESS=0.0.0.0:9000
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
- RUSTFS_SECRET_KEY=${SECRET_KEY}
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
volumes:
- rustfs2-data:/data
rustfs3:
image: rustfs-ci
hostname: rustfs3
networks: [rustfs-net]
environment:
- RUSTFS_ADDRESS=0.0.0.0:9000
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
- RUSTFS_SECRET_KEY=${SECRET_KEY}
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
volumes:
- rustfs3-data:/data
rustfs4:
image: rustfs-ci
hostname: rustfs4
networks: [rustfs-net]
environment:
- RUSTFS_ADDRESS=0.0.0.0:9000
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
- RUSTFS_SECRET_KEY=${SECRET_KEY}
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
volumes:
- rustfs4-data:/data
lb:
image: haproxy:2.9
hostname: lb
networks: [rustfs-net]
ports:
- "9000:9000"
volumes:
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
networks:
rustfs-net:
name: rustfs-net
volumes:
rustfs1-data:
rustfs2-data:
rustfs3-data:
rustfs4-data:
EOF
cat > haproxy.cfg <<'EOF'
defaults
mode http
timeout connect 5s
timeout client 30s
timeout server 30s
frontend fe_s3
bind *:9000
default_backend be_s3
backend be_s3
balance roundrobin
server s1 rustfs1:9000 check
server s2 rustfs2:9000 check
server s3 rustfs3:9000 check
server s4 rustfs4:9000 check
EOF
- name: Launch cluster
run: docker compose -f compose.yml up -d
- name: Wait for LB ready
run: |
for i in {1..60}; do
if docker run --rm --network rustfs-net curlimages/curl -sf http://lb:9000/health >/dev/null; then
exit 0
fi
sleep 2
done
echo "LB or backend not ready" >&2
docker compose -f compose.yml logs --tail=200 || true
exit 1
- name: Run Mint (multi, S3-only)
run: |
mkdir -p artifacts/mint-multi
docker run --rm --network rustfs-net \
--platform ${PLATFORM} \
-e SERVER_ENDPOINT=lb:9000 \
-e ACCESS_KEY=$ACCESS_KEY \
-e SECRET_KEY=$SECRET_KEY \
-e ENABLE_HTTPS=0 \
-e SERVER_REGION=us-east-1 \
-e RUN_ON_FAIL=1 \
-e MINT_MODE=core \
-v ${GITHUB_WORKSPACE}/artifacts/mint-multi:/mint/log \
--entrypoint /mint/mint.sh \
minio/mint:edge \
awscli aws-sdk-go aws-sdk-java-v2 aws-sdk-php aws-sdk-ruby s3cmd s3select
- name: Collect logs
run: |
mkdir -p artifacts/cluster
docker compose -f compose.yml logs --no-color > artifacts/cluster/cluster.log || true
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: mint-multi
path: artifacts/**

296
.github/workflows/e2e-s3tests.yml vendored Normal file
View File

@@ -0,0 +1,296 @@
name: e2e-s3tests
on:
push:
branches: [main]
paths:
- ".github/workflows/e2e-s3tests.yml"
- ".github/s3tests/**"
- "Dockerfile.source"
- "entrypoint.sh"
- "rustfs/**"
- "crates/**"
workflow_dispatch:
inputs:
run-multi:
description: "Run multi-node s3-tests as well"
required: false
default: "false"
env:
S3_ACCESS_KEY: rustfsadmin
S3_SECRET_KEY: rustfsadmin
RUST_LOG: info
PLATFORM: linux/amd64
defaults:
run:
shell: bash
jobs:
s3tests-single:
runs-on: ubuntu-latest
timeout-minutes: 45
steps:
- uses: actions/checkout@v4
- name: Enable buildx
uses: docker/setup-buildx-action@v3
- name: Build RustFS image (source)
run: |
DOCKER_BUILDKIT=1 docker buildx build --load \
--platform ${PLATFORM} \
-t rustfs-ci \
-f Dockerfile.source .
- name: Create network
run: docker network inspect rustfs-net >/dev/null 2>&1 || docker network create rustfs-net
- name: Remove existing rustfs-single (if any)
run: docker rm -f rustfs-single >/dev/null 2>&1 || true
- name: Start single RustFS
run: |
docker run -d --name rustfs-single \
--network rustfs-net \
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
-e RUSTFS_ACCESS_KEY=$S3_ACCESS_KEY \
-e RUSTFS_SECRET_KEY=$S3_SECRET_KEY \
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
-v /tmp/rustfs-single:/data \
rustfs-ci
- name: Wait for RustFS ready
run: |
for i in {1..30}; do
if docker run --rm --network rustfs-net curlimages/curl:latest \
-sf http://rustfs-single:9000/health >/dev/null 2>&1; then
echo "RustFS is ready"
exit 0
fi
if [ "$(docker inspect -f '{{.State.Running}}' rustfs-single 2>/dev/null)" != "true" ]; then
echo "RustFS container not running" >&2
docker logs rustfs-single || true
exit 1
fi
sleep 2
done
echo "Health check failed; container is running, proceeding with caution" >&2
docker logs rustfs-single || true
- name: Prepare s3-tests
run: |
python3 -m pip install --user --upgrade pip tox
export PATH="$HOME/.local/bin:$PATH"
git clone --depth 1 https://github.com/ceph/s3-tests.git s3-tests
- name: Generate s3tests config
run: |
export S3_HOST=rustfs-single
envsubst < .github/s3tests/s3tests.conf > s3tests.conf
echo "Generated s3tests.conf:"
cat s3tests.conf
- name: Run ceph s3-tests (S3-compatible subset)
run: |
export PATH="$HOME/.local/bin:$PATH"
mkdir -p artifacts/s3tests-single
cd s3-tests
# Check available test directories
echo "Available test directories:"
ls -la s3tests*/functional/ 2>/dev/null || echo "No s3tests directories found"
# Use s3tests_boto3 if available, fallback to s3tests
if [ -f "s3tests_boto3/functional/test_s3.py" ]; then
TEST_FILE="s3tests_boto3/functional/test_s3.py"
else
TEST_FILE="s3tests/functional/test_s3.py"
fi
echo "Using test file: $TEST_FILE"
S3TEST_CONF=${GITHUB_WORKSPACE}/s3tests.conf \
tox -- \
-v \
--tb=short \
--junitxml=${GITHUB_WORKSPACE}/artifacts/s3tests-single/junit.xml \
"$TEST_FILE" \
-k 'not lifecycle and not versioning and not website and not logging and not encryption'
- name: Collect RustFS logs
if: always()
run: |
mkdir -p artifacts/rustfs-single
docker logs rustfs-single > artifacts/rustfs-single/rustfs.log 2>&1 || true
- name: Upload artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: s3tests-single
path: artifacts/**
s3tests-multi:
if: github.event_name == 'workflow_dispatch' && github.event.inputs.run-multi == 'true'
needs: s3tests-single
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
- name: Enable buildx
uses: docker/setup-buildx-action@v3
- name: Build RustFS image (source)
run: |
DOCKER_BUILDKIT=1 docker buildx build --load \
--platform ${PLATFORM} \
-t rustfs-ci \
-f Dockerfile.source .
- name: Prepare cluster compose
run: |
cat > compose.yml <<'EOF'
services:
rustfs1:
image: rustfs-ci
hostname: rustfs1
networks: [rustfs-net]
environment:
RUSTFS_ADDRESS: "0.0.0.0:9000"
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
volumes:
- rustfs1-data:/data
rustfs2:
image: rustfs-ci
hostname: rustfs2
networks: [rustfs-net]
environment:
RUSTFS_ADDRESS: "0.0.0.0:9000"
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
volumes:
- rustfs2-data:/data
rustfs3:
image: rustfs-ci
hostname: rustfs3
networks: [rustfs-net]
environment:
RUSTFS_ADDRESS: "0.0.0.0:9000"
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
volumes:
- rustfs3-data:/data
rustfs4:
image: rustfs-ci
hostname: rustfs4
networks: [rustfs-net]
environment:
RUSTFS_ADDRESS: "0.0.0.0:9000"
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
volumes:
- rustfs4-data:/data
lb:
image: haproxy:2.9
hostname: lb
networks: [rustfs-net]
ports:
- "9000:9000"
volumes:
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
networks:
rustfs-net:
name: rustfs-net
volumes:
rustfs1-data:
rustfs2-data:
rustfs3-data:
rustfs4-data:
EOF
cat > haproxy.cfg <<'EOF'
defaults
mode http
timeout connect 5s
timeout client 30s
timeout server 30s
frontend fe_s3
bind *:9000
default_backend be_s3
backend be_s3
balance roundrobin
server s1 rustfs1:9000 check
server s2 rustfs2:9000 check
server s3 rustfs3:9000 check
server s4 rustfs4:9000 check
EOF
- name: Launch cluster
run: docker compose -f compose.yml up -d
- name: Wait for LB ready
run: |
for i in {1..60}; do
if docker run --rm --network rustfs-net curlimages/curl \
-sf http://lb:9000/health >/dev/null 2>&1; then
echo "Load balancer is ready"
exit 0
fi
sleep 2
done
echo "LB or backend not ready" >&2
docker compose -f compose.yml logs --tail=200 || true
exit 1
- name: Generate s3tests config
run: |
export S3_HOST=lb
envsubst < .github/s3tests/s3tests.conf > s3tests.conf
echo "Generated s3tests.conf:"
cat s3tests.conf
- name: Run ceph s3-tests (multi, S3-compatible subset)
run: |
mkdir -p artifacts/s3tests-multi
docker run --rm --network rustfs-net \
--platform ${PLATFORM} \
-e S3TEST_CONF=/tmp/s3tests.conf \
-v ${GITHUB_WORKSPACE}/s3tests.conf:/tmp/s3tests.conf:ro \
-v ${GITHUB_WORKSPACE}/artifacts/s3tests-multi:/mnt/logs \
quay.io/ceph/s3-tests:latest \
bash -c '
if [ -f "s3tests_boto3/functional/test_s3.py" ]; then
TEST_FILE="s3tests_boto3/functional/test_s3.py"
else
TEST_FILE="s3tests/functional/test_s3.py"
fi
echo "Using test file: $TEST_FILE"
pytest -v --tb=short \
--junitxml=/mnt/logs/junit.xml \
"$TEST_FILE" \
-k "not lifecycle and not versioning and not website and not logging and not encryption"
'
- name: Collect logs
if: always()
run: |
mkdir -p artifacts/cluster
docker compose -f compose.yml logs --no-color > artifacts/cluster/cluster.log 2>&1 || true
- name: Upload artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: s3tests-multi
path: artifacts/**

8
.gitignore vendored
View File

@@ -23,4 +23,10 @@ profile.json
*.go
*.pb
*.svg
deploy/logs/*.log.*
deploy/logs/*.log.*
# s3-tests local artifacts (root directory only)
/s3-tests/
/s3-tests-local/
/s3tests.conf
/s3tests.conf.*

View File

@@ -2,6 +2,7 @@
## Communication Rules
- Respond to the user in Chinese; use English in all other contexts.
- Code and documentation must be written in English only. Chinese text is allowed solely as test data/fixtures when a case explicitly requires Chinese-language content for validation.
## Project Structure & Module Organization
The workspace root hosts shared dependencies in `Cargo.toml`. The service binary lives under `rustfs/src/main.rs`, while reusable crates sit in `crates/` (`crypto`, `iam`, `kms`, and `e2e_test`). Local fixtures for standalone flows reside in `test_standalone/`, deployment manifests are under `deploy/`, Docker assets sit at the root, and automation lives in `scripts/`. Skim each crates README or module docs before contributing changes.

View File

@@ -39,7 +39,9 @@ RUN set -eux; \
libssl-dev \
lld \
protobuf-compiler \
flatbuffers-compiler; \
flatbuffers-compiler \
gcc-aarch64-linux-gnu \
gcc-x86-64-linux-gnu; \
rm -rf /var/lib/apt/lists/*
# Optional: cross toolchain for aarch64 (only when targeting linux/arm64)
@@ -51,18 +53,18 @@ RUN set -eux; \
rm -rf /var/lib/apt/lists/*; \
fi
# Add Rust targets based on TARGETPLATFORM
# Add Rust targets for both arches (to support cross-builds on multi-arch runners)
RUN set -eux; \
case "${TARGETPLATFORM:-linux/amd64}" in \
linux/amd64) rustup target add x86_64-unknown-linux-gnu ;; \
linux/arm64) rustup target add aarch64-unknown-linux-gnu ;; \
*) echo "Unsupported TARGETPLATFORM=${TARGETPLATFORM}" >&2; exit 1 ;; \
esac
rustup target add x86_64-unknown-linux-gnu aarch64-unknown-linux-gnu; \
rustup component add rust-std-x86_64-unknown-linux-gnu rust-std-aarch64-unknown-linux-gnu
# Cross-compilation environment (used only when targeting aarch64)
ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc
ENV CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=x86_64-linux-gnu-gcc
ENV CC_x86_64_unknown_linux_gnu=x86_64-linux-gnu-gcc
ENV CXX_x86_64_unknown_linux_gnu=x86_64-linux-gnu-g++
WORKDIR /usr/src/rustfs
@@ -73,7 +75,6 @@ COPY Cargo.toml Cargo.lock ./
COPY rustfs/Cargo.toml rustfs/Cargo.toml
COPY crates/*/Cargo.toml crates/
# Pre-fetch dependencies for better caching
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \

View File

@@ -13,6 +13,8 @@ elif [ "${1#-}" != "$1" ]; then
elif [ "$1" = "rustfs" ]; then
shift
set -- /usr/bin/rustfs "$@"
elif [ "$1" = "/usr/bin/rustfs" ]; then
: # already normalized
elif [ "$1" = "cargo" ]; then
: # Pass through cargo command as-is
else

View File

@@ -1650,13 +1650,18 @@ pub fn get_concurrency_manager() -> &'static ConcurrencyManager {
&CONCURRENCY_MANAGER
}
/// Testing helper to reset the global request counter.
pub(crate) fn reset_active_get_requests() {
ACTIVE_GET_REQUESTS.store(0, Ordering::Relaxed);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_concurrent_request_tracking() {
// Ensure we start from a clean state
reset_active_get_requests();
assert_eq!(GetObjectGuard::concurrent_requests(), 0);
let _guard1 = GetObjectGuard::new();

View File

@@ -532,6 +532,8 @@ mod tests {
/// Test advanced buffer sizing with file patterns
#[tokio::test]
async fn test_advanced_buffer_sizing() {
crate::storage::concurrency::reset_active_get_requests();
let base_buffer = 256 * KI_B; // 256KB base
// Test small file optimization

View File

@@ -2454,6 +2454,13 @@ impl S3 for FS {
.map(|v| SSECustomerAlgorithm::from(v.clone()));
let sse_customer_key_md5 = metadata_map.get("x-amz-server-side-encryption-customer-key-md5").cloned();
let ssekms_key_id = metadata_map.get("x-amz-server-side-encryption-aws-kms-key-id").cloned();
// Prefer explicit storage_class from object info; fall back to persisted metadata header.
let storage_class = info
.storage_class
.clone()
.or_else(|| metadata_map.get("x-amz-storage-class").cloned())
.filter(|s| !s.is_empty())
.map(ObjectStorageClass::from);
let mut checksum_crc32 = None;
let mut checksum_crc32c = None;
@@ -2507,6 +2514,7 @@ impl S3 for FS {
checksum_sha256,
checksum_crc64nvme,
checksum_type,
storage_class,
// metadata: object_metadata,
..Default::default()
};