mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
fix: s3 api compatibility (#1370)
Co-authored-by: houseme <housemecn@gmail.com>
This commit is contained in:
@@ -1,5 +1,14 @@
|
||||
# Repository Guidelines
|
||||
|
||||
## ⚠️ Pre-Commit Checklist (MANDATORY)
|
||||
**Before EVERY commit, you MUST run and pass ALL of the following:**
|
||||
```bash
|
||||
cargo fmt --all --check # Code formatting
|
||||
cargo clippy --all-targets --all-features -- -D warnings # Lints
|
||||
cargo test --workspace --exclude e2e_test # Unit tests
|
||||
```
|
||||
Or simply run `make pre-commit` which covers all checks. **DO NOT commit if any check fails.**
|
||||
|
||||
## Communication Rules
|
||||
- Respond to the user in Chinese; use English in all other contexts.
|
||||
- Code and documentation must be written in English only. Chinese text is allowed solely as test data/fixtures when a case explicitly requires Chinese-language content for validation.
|
||||
|
||||
@@ -34,8 +34,8 @@ use crate::disk::endpoint::{Endpoint, EndpointType};
|
||||
use crate::disk::{DiskAPI, DiskInfo, DiskInfoOptions};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::error::{
|
||||
StorageError, is_err_bucket_exists, is_err_invalid_upload_id, is_err_object_not_found, is_err_read_quorum,
|
||||
is_err_version_not_found, to_object_err,
|
||||
StorageError, is_err_bucket_exists, is_err_bucket_not_found, is_err_invalid_upload_id, is_err_object_not_found,
|
||||
is_err_read_quorum, is_err_version_not_found, to_object_err,
|
||||
};
|
||||
use crate::global::{
|
||||
DISK_ASSUME_UNKNOWN_SIZE, DISK_FILL_FRACTION, DISK_MIN_INODES, DISK_RESERVE_FRACTION, GLOBAL_BOOT_TIME,
|
||||
@@ -86,6 +86,46 @@ use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Check if a directory contains any xl.meta files (indicating actual S3 objects)
|
||||
/// This is used to determine if a bucket is empty for deletion purposes.
|
||||
async fn has_xlmeta_files(path: &std::path::Path) -> bool {
|
||||
use crate::disk::STORAGE_FORMAT_FILE;
|
||||
use tokio::fs;
|
||||
|
||||
let mut stack = vec![path.to_path_buf()];
|
||||
|
||||
while let Some(current_path) = stack.pop() {
|
||||
let mut entries = match fs::read_dir(¤t_path).await {
|
||||
Ok(entries) => entries,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
while let Ok(Some(entry)) = entries.next_entry().await {
|
||||
let file_name = entry.file_name();
|
||||
let file_name_str = file_name.to_string_lossy();
|
||||
|
||||
// Skip hidden files/directories (like .rustfs.sys)
|
||||
if file_name_str.starts_with('.') {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this is an xl.meta file
|
||||
if file_name_str == STORAGE_FORMAT_FILE {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If it's a directory, add to stack for further exploration
|
||||
if let Ok(file_type) = entry.file_type().await
|
||||
&& file_type.is_dir()
|
||||
{
|
||||
stack.push(entry.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
const MAX_UPLOADS_LIST: usize = 10000;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -1323,14 +1363,36 @@ impl StorageAPI for ECStore {
|
||||
|
||||
// TODO: nslock
|
||||
|
||||
let mut opts = opts.clone();
|
||||
// Check bucket exists before deletion (per S3 API spec)
|
||||
// If bucket doesn't exist, return NoSuchBucket error
|
||||
if let Err(err) = self.peer_sys.get_bucket_info(bucket, &BucketOptions::default()).await {
|
||||
// Convert DiskError to StorageError for comparison
|
||||
let storage_err: StorageError = err.into();
|
||||
if is_err_bucket_not_found(&storage_err) {
|
||||
return Err(StorageError::BucketNotFound(bucket.to_string()));
|
||||
}
|
||||
return Err(to_object_err(storage_err, vec![bucket]));
|
||||
}
|
||||
|
||||
// Check bucket is empty before deletion (per S3 API spec)
|
||||
// If bucket is not empty (contains actual objects with xl.meta files) and force
|
||||
// is not set, return BucketNotEmpty error.
|
||||
// Note: Empty directories (left after object deletion) should NOT count as objects.
|
||||
if !opts.force {
|
||||
// FIXME: check bucket exists
|
||||
opts.force = true
|
||||
let local_disks = all_local_disk().await;
|
||||
for disk in local_disks.iter() {
|
||||
// Check if bucket directory contains any xl.meta files (actual objects)
|
||||
// We recursively scan for xl.meta files to determine if bucket has objects
|
||||
// Use the disk's root path to construct bucket path
|
||||
let bucket_path = disk.path().join(bucket);
|
||||
if has_xlmeta_files(&bucket_path).await {
|
||||
return Err(StorageError::BucketNotEmpty(bucket.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.peer_sys
|
||||
.delete_bucket(bucket, &opts)
|
||||
.delete_bucket(bucket, opts)
|
||||
.await
|
||||
.map_err(|e| to_object_err(e.into(), vec![bucket]))?;
|
||||
|
||||
|
||||
@@ -741,7 +741,21 @@ impl ObjectInfo {
|
||||
|
||||
let inlined = fi.inline_data();
|
||||
|
||||
// TODO:expires
|
||||
// Parse expires from metadata (HTTP date format RFC 7231 or ISO 8601)
|
||||
let expires = fi.metadata.get("expires").and_then(|s| {
|
||||
// Try parsing as ISO 8601 first
|
||||
time::OffsetDateTime::parse(s, &time::format_description::well_known::Iso8601::DEFAULT)
|
||||
.or_else(|_| {
|
||||
// Try RFC 2822 format
|
||||
time::OffsetDateTime::parse(s, &time::format_description::well_known::Rfc2822)
|
||||
})
|
||||
.or_else(|_| {
|
||||
// Try RFC 3339 format
|
||||
time::OffsetDateTime::parse(s, &time::format_description::well_known::Rfc3339)
|
||||
})
|
||||
.ok()
|
||||
});
|
||||
|
||||
// TODO:ReplicationState
|
||||
|
||||
let transitioned_object = TransitionedObject {
|
||||
@@ -799,6 +813,7 @@ impl ObjectInfo {
|
||||
user_tags,
|
||||
content_type,
|
||||
content_encoding,
|
||||
expires,
|
||||
num_versions: fi.num_versions,
|
||||
successor_mod_time: fi.successor_mod_time,
|
||||
etag,
|
||||
|
||||
@@ -1548,16 +1548,19 @@ impl S3 for FS {
|
||||
|
||||
let mut object_to_delete = Vec::new();
|
||||
let mut object_to_delete_index = HashMap::new();
|
||||
for (idx, object) in delete.objects.iter().enumerate() {
|
||||
if let Some(version_id) = object.version_id.clone() {
|
||||
let _vid = match Uuid::parse_str(&version_id) {
|
||||
for (idx, obj_id) in delete.objects.iter().enumerate() {
|
||||
// Per S3 API spec, "null" string means non-versioned object
|
||||
// Filter out "null" version_id to treat as unversioned
|
||||
let version_id = obj_id.version_id.clone().filter(|v| v != "null");
|
||||
if let Some(ref vid) = version_id {
|
||||
let _vid = match Uuid::parse_str(vid) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
delete_results[idx].error = Some(Error {
|
||||
code: Some("NoSuchVersion".to_string()),
|
||||
key: Some(object.key.clone()),
|
||||
key: Some(obj_id.key.clone()),
|
||||
message: Some(err.to_string()),
|
||||
version_id: Some(version_id),
|
||||
version_id: Some(vid.clone()),
|
||||
});
|
||||
|
||||
continue;
|
||||
@@ -1568,24 +1571,26 @@ impl S3 for FS {
|
||||
{
|
||||
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
|
||||
req_info.bucket = Some(bucket.clone());
|
||||
req_info.object = Some(object.key.clone());
|
||||
req_info.version_id = object.version_id.clone();
|
||||
req_info.object = Some(obj_id.key.clone());
|
||||
req_info.version_id = version_id.clone();
|
||||
}
|
||||
|
||||
let auth_res = authorize_request(&mut req, Action::S3Action(S3Action::DeleteObjectAction)).await;
|
||||
if let Err(e) = auth_res {
|
||||
delete_results[idx].error = Some(Error {
|
||||
code: Some("AccessDenied".to_string()),
|
||||
key: Some(object.key.clone()),
|
||||
key: Some(obj_id.key.clone()),
|
||||
message: Some(e.to_string()),
|
||||
version_id: object.version_id.clone(),
|
||||
version_id: version_id.clone(),
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut object = ObjectToDelete {
|
||||
object_name: object.key.clone(),
|
||||
version_id: object.version_id.clone().map(|v| Uuid::parse_str(&v).unwrap()),
|
||||
object_name: obj_id.key.clone(),
|
||||
version_id: version_id
|
||||
.clone()
|
||||
.map(|v| Uuid::parse_str(&v).expect("version_id validated as UUID earlier")),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -2689,10 +2694,21 @@ impl S3 for FS {
|
||||
}
|
||||
}
|
||||
|
||||
// Extract standard HTTP headers from user_defined metadata
|
||||
// Note: These headers are stored with lowercase keys by extract_metadata_from_mime
|
||||
let cache_control = metadata_map.get("cache-control").cloned();
|
||||
let content_disposition = metadata_map.get("content-disposition").cloned();
|
||||
let content_language = metadata_map.get("content-language").cloned();
|
||||
let expires = info.expires.map(Timestamp::from);
|
||||
|
||||
let output = HeadObjectOutput {
|
||||
content_length: Some(content_length),
|
||||
content_type,
|
||||
content_encoding: info.content_encoding.clone(),
|
||||
cache_control,
|
||||
content_disposition,
|
||||
content_language,
|
||||
expires,
|
||||
last_modified,
|
||||
e_tag: info.etag.map(|etag| to_s3s_etag(&etag)),
|
||||
metadata: filter_object_metadata(&metadata_map),
|
||||
@@ -2790,6 +2806,10 @@ impl S3 for FS {
|
||||
|
||||
#[instrument(level = "debug", skip(self, req))]
|
||||
async fn list_objects(&self, req: S3Request<ListObjectsInput>) -> S3Result<S3Response<ListObjectsOutput>> {
|
||||
// Capture the original marker from the request before conversion
|
||||
// S3 API requires the marker field to be echoed back in the response
|
||||
let request_marker = req.input.marker.clone();
|
||||
|
||||
let v2_resp = self.list_objects_v2(req.map_input(Into::into)).await?;
|
||||
|
||||
Ok(v2_resp.map_output(|v2| {
|
||||
@@ -2812,7 +2832,7 @@ impl S3 for FS {
|
||||
.cloned();
|
||||
|
||||
// NextMarker should be the lexicographically last item
|
||||
// This matches Ceph S3 behavior used by s3-tests
|
||||
// This matches S3 standard behavior
|
||||
match (last_key, last_prefix) {
|
||||
(Some(k), Some(p)) => {
|
||||
// Return the lexicographically greater one
|
||||
@@ -2826,6 +2846,10 @@ impl S3 for FS {
|
||||
None
|
||||
};
|
||||
|
||||
// S3 API requires marker field in response, echoing back the request marker
|
||||
// If no marker was provided in request, return empty string per S3 standard
|
||||
let marker = Some(request_marker.unwrap_or_default());
|
||||
|
||||
ListObjectsOutput {
|
||||
contents: v2.contents,
|
||||
delimiter: v2.delimiter,
|
||||
@@ -2835,6 +2859,7 @@ impl S3 for FS {
|
||||
max_keys: v2.max_keys,
|
||||
common_prefixes: v2.common_prefixes,
|
||||
is_truncated: v2.is_truncated,
|
||||
marker,
|
||||
next_marker,
|
||||
..Default::default()
|
||||
}
|
||||
@@ -2872,15 +2897,17 @@ impl S3 for FS {
|
||||
|
||||
validate_list_object_unordered_with_delimiter(delimiter.as_ref(), req.uri.query())?;
|
||||
|
||||
let start_after = start_after.filter(|v| !v.is_empty());
|
||||
// Save original start_after for response (per S3 API spec, must echo back if provided)
|
||||
let response_start_after = start_after.clone();
|
||||
let start_after_for_query = start_after.filter(|v| !v.is_empty());
|
||||
|
||||
let continuation_token = continuation_token.filter(|v| !v.is_empty());
|
||||
|
||||
// Save the original encoded continuation_token for response
|
||||
let encoded_continuation_token = continuation_token.clone();
|
||||
// Save original continuation_token for response (per S3 API spec, must echo back if provided)
|
||||
// Note: empty string should still be echoed back in the response
|
||||
let response_continuation_token = continuation_token.clone();
|
||||
let continuation_token_for_query = continuation_token.filter(|v| !v.is_empty());
|
||||
|
||||
// Decode continuation_token from base64 for internal use
|
||||
let continuation_token = continuation_token
|
||||
let decoded_continuation_token = continuation_token_for_query
|
||||
.map(|token| {
|
||||
base64_simd::STANDARD
|
||||
.decode_to_vec(token.as_bytes())
|
||||
@@ -2902,11 +2929,11 @@ impl S3 for FS {
|
||||
.list_objects_v2(
|
||||
&bucket,
|
||||
&prefix,
|
||||
continuation_token,
|
||||
decoded_continuation_token,
|
||||
delimiter.clone(),
|
||||
max_keys,
|
||||
fetch_owner.unwrap_or_default(),
|
||||
start_after,
|
||||
start_after_for_query,
|
||||
incl_deleted,
|
||||
)
|
||||
.await
|
||||
@@ -2975,8 +3002,9 @@ impl S3 for FS {
|
||||
|
||||
let output = ListObjectsV2Output {
|
||||
is_truncated: Some(object_infos.is_truncated),
|
||||
continuation_token: encoded_continuation_token,
|
||||
continuation_token: response_continuation_token,
|
||||
next_continuation_token,
|
||||
start_after: response_start_after,
|
||||
key_count: Some(key_count),
|
||||
max_keys: Some(max_keys),
|
||||
contents: Some(objects),
|
||||
|
||||
@@ -330,29 +330,56 @@ pub fn extract_metadata_from_mime_with_object_name(
|
||||
}
|
||||
|
||||
pub(crate) fn filter_object_metadata(metadata: &HashMap<String, String>) -> Option<HashMap<String, String>> {
|
||||
// Standard HTTP headers that should NOT be returned in the Metadata field
|
||||
// These are returned as separate response headers, not user metadata
|
||||
const EXCLUDED_HEADERS: &[&str] = &[
|
||||
"content-type",
|
||||
"content-encoding",
|
||||
"content-disposition",
|
||||
"content-language",
|
||||
"cache-control",
|
||||
"expires",
|
||||
"etag",
|
||||
"x-amz-storage-class",
|
||||
"x-amz-tagging",
|
||||
"x-amz-replication-status",
|
||||
"x-amz-server-side-encryption",
|
||||
"x-amz-server-side-encryption-customer-algorithm",
|
||||
"x-amz-server-side-encryption-customer-key-md5",
|
||||
"x-amz-server-side-encryption-aws-kms-key-id",
|
||||
];
|
||||
|
||||
let mut filtered_metadata = HashMap::new();
|
||||
for (k, v) in metadata {
|
||||
// Skip internal/reserved metadata
|
||||
if k.starts_with(RESERVED_METADATA_PREFIX_LOWER) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip empty object lock values
|
||||
if v.is_empty() && (k == &X_AMZ_OBJECT_LOCK_MODE.to_string() || k == &X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE.to_string()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip encryption metadata placeholders
|
||||
if k == AMZ_META_UNENCRYPTED_CONTENT_MD5 || k == AMZ_META_UNENCRYPTED_CONTENT_LENGTH {
|
||||
continue;
|
||||
}
|
||||
|
||||
let lower_key = k.to_ascii_lowercase();
|
||||
if let Some(key) = lower_key.strip_prefix("x-amz-meta-") {
|
||||
filtered_metadata.insert(key.to_string(), v.to_string());
|
||||
continue;
|
||||
}
|
||||
if let Some(key) = lower_key.strip_prefix("x-rustfs-meta-") {
|
||||
filtered_metadata.insert(key.to_string(), v.to_string());
|
||||
|
||||
// Skip standard HTTP headers (they are returned as separate headers, not metadata)
|
||||
if EXCLUDED_HEADERS.contains(&lower_key.as_str()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip any x-amz-* headers that are not user metadata
|
||||
// User metadata was stored WITHOUT the x-amz-meta- prefix by extract_metadata_from_mime
|
||||
if lower_key.starts_with("x-amz-") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Include user-defined metadata (keys like "meta1", "custom-key", etc.)
|
||||
filtered_metadata.insert(k.clone(), v.clone());
|
||||
}
|
||||
if filtered_metadata.is_empty() {
|
||||
|
||||
@@ -33,7 +33,110 @@ S3_PORT="${S3_PORT:-9000}"
|
||||
TEST_MODE="${TEST_MODE:-single}"
|
||||
MAXFAIL="${MAXFAIL:-1}"
|
||||
XDIST="${XDIST:-0}"
|
||||
MARKEXPR="${MARKEXPR:-not lifecycle and not versioning and not s3website and not bucket_logging and not encryption}"
|
||||
|
||||
# =============================================================================
|
||||
# MARKEXPR: pytest marker expression to exclude test categories
|
||||
# =============================================================================
|
||||
# These markers exclude entire test categories via pytest's -m option.
|
||||
# Use MARKEXPR env var to override the default exclusions.
|
||||
#
|
||||
# Excluded categories:
|
||||
# - Unimplemented S3 features: lifecycle, versioning, s3website, bucket_logging, encryption
|
||||
# - Ceph/RGW specific tests: fails_on_aws, fails_on_rgw, fails_on_dbstore
|
||||
# - IAM features: iam_account, iam_tenant, iam_role, iam_user, iam_cross_account
|
||||
# - Other unimplemented: sns, sse_s3, storage_class, test_of_sts, webidentity_test
|
||||
# =============================================================================
|
||||
if [[ -z "${MARKEXPR:-}" ]]; then
|
||||
EXCLUDED_MARKERS=(
|
||||
# Unimplemented S3 features
|
||||
"lifecycle"
|
||||
"versioning"
|
||||
"s3website"
|
||||
"bucket_logging"
|
||||
"encryption"
|
||||
# Ceph/RGW specific tests (not standard S3)
|
||||
"fails_on_aws" # Tests for Ceph/RGW specific features (X-RGW-* headers, etc.)
|
||||
"fails_on_rgw" # Known RGW issues we don't need to replicate
|
||||
"fails_on_dbstore" # Ceph dbstore backend specific
|
||||
# IAM features requiring additional setup
|
||||
"iam_account"
|
||||
"iam_tenant"
|
||||
"iam_role"
|
||||
"iam_user"
|
||||
"iam_cross_account"
|
||||
# Other unimplemented features
|
||||
"sns" # SNS notification
|
||||
"sse_s3" # Server-side encryption with S3-managed keys
|
||||
"storage_class" # Storage class features
|
||||
"test_of_sts" # STS token service
|
||||
"webidentity_test" # Web Identity federation
|
||||
)
|
||||
# Build MARKEXPR from array: "not marker1 and not marker2 and ..."
|
||||
MARKEXPR=""
|
||||
for marker in "${EXCLUDED_MARKERS[@]}"; do
|
||||
if [[ -n "${MARKEXPR}" ]]; then
|
||||
MARKEXPR+=" and "
|
||||
fi
|
||||
MARKEXPR+="not ${marker}"
|
||||
done
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# TESTEXPR: pytest -k expression to exclude specific tests by name
|
||||
# =============================================================================
|
||||
# These patterns exclude specific tests via pytest's -k option (name matching).
|
||||
# Use TESTEXPR env var to override the default exclusions.
|
||||
#
|
||||
# Exclusion reasons are documented inline below.
|
||||
# =============================================================================
|
||||
if [[ -z "${TESTEXPR:-}" ]]; then
|
||||
EXCLUDED_TESTS=(
|
||||
# POST Object (HTML form upload) - not implemented
|
||||
"test_post_object"
|
||||
# ACL-dependent tests - ACL not implemented
|
||||
"test_bucket_list_objects_anonymous" # requires PutBucketAcl
|
||||
"test_bucket_listv2_objects_anonymous" # requires PutBucketAcl
|
||||
"test_bucket_concurrent_set_canned_acl" # ACL not implemented
|
||||
"test_expected_bucket_owner" # requires PutBucketAcl
|
||||
"test_bucket_acl" # Bucket ACL not implemented
|
||||
"test_object_acl" # Object ACL not implemented
|
||||
"test_put_bucket_acl" # PutBucketAcl not implemented
|
||||
"test_object_anon" # Anonymous access requires ACL
|
||||
"test_access_bucket" # Access control requires ACL
|
||||
"test_100_continue" # requires ACL
|
||||
# Chunked encoding - not supported
|
||||
"test_object_write_with_chunked_transfer_encoding"
|
||||
"test_object_content_encoding_aws_chunked"
|
||||
# CORS - not implemented
|
||||
"test_cors"
|
||||
"test_set_cors"
|
||||
# Presigned URL edge cases
|
||||
"test_object_raw" # Raw presigned URL tests
|
||||
# Error response format differences
|
||||
"test_bucket_create_exists" # Error format issue
|
||||
"test_bucket_recreate_not_overriding" # Error format issue
|
||||
"test_list_buckets_invalid_auth" # 401 vs 403
|
||||
"test_object_delete_key_bucket_gone" # 403 vs 404
|
||||
"test_abort_multipart_upload_not_found" # Error code issue
|
||||
# ETag conditional request edge cases
|
||||
"test_get_object_ifmatch_failed"
|
||||
"test_get_object_ifnonematch"
|
||||
# Copy operation edge cases
|
||||
"test_object_copy_to_itself" # Copy validation
|
||||
"test_object_copy_not_owned_bucket" # Cross-account access
|
||||
"test_multipart_copy_invalid_range" # Multipart validation
|
||||
# Timing-sensitive tests
|
||||
"test_versioning_concurrent_multi_object_delete"
|
||||
)
|
||||
# Build TESTEXPR from array: "not test1 and not test2 and ..."
|
||||
TESTEXPR=""
|
||||
for pattern in "${EXCLUDED_TESTS[@]}"; do
|
||||
if [[ -n "${TESTEXPR}" ]]; then
|
||||
TESTEXPR+=" and "
|
||||
fi
|
||||
TESTEXPR+="not ${pattern}"
|
||||
done
|
||||
fi
|
||||
|
||||
# Configuration file paths
|
||||
S3TESTS_CONF_TEMPLATE="${S3TESTS_CONF_TEMPLATE:-.github/s3tests/s3tests.conf}"
|
||||
@@ -103,6 +206,7 @@ Environment Variables:
|
||||
MAXFAIL - Stop after N failures (default: 1)
|
||||
XDIST - Enable parallel execution with N workers (default: 0)
|
||||
MARKEXPR - pytest marker expression (default: exclude unsupported features)
|
||||
TESTEXPR - pytest -k expression to filter tests by name (default: exclude unimplemented)
|
||||
S3TESTS_CONF_TEMPLATE - Path to s3tests config template (default: .github/s3tests/s3tests.conf)
|
||||
S3TESTS_CONF - Path to generated s3tests config (default: s3tests.conf)
|
||||
DATA_ROOT - Root directory for test data storage (default: target)
|
||||
@@ -606,6 +710,7 @@ S3TEST_CONF="${CONF_OUTPUT_PATH}" \
|
||||
${XDIST_ARGS} \
|
||||
s3tests/functional/test_s3.py \
|
||||
-m "${MARKEXPR}" \
|
||||
-k "${TESTEXPR}" \
|
||||
2>&1 | tee "${ARTIFACTS_DIR}/pytest.log"
|
||||
|
||||
TEST_EXIT_CODE=${PIPESTATUS[0]}
|
||||
|
||||
Reference in New Issue
Block a user