mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
Merge branch 'main' of github.com:rustfs/rustfs into fix/axum-trusted-proxies
This commit is contained in:
32
Cargo.lock
generated
32
Cargo.lock
generated
@@ -3434,18 +3434,6 @@ dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "enum-as-inner"
|
||||
version = "0.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.113",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "enum_dispatch"
|
||||
version = "0.3.13"
|
||||
@@ -7782,6 +7770,7 @@ dependencies = [
|
||||
"hyper",
|
||||
"hyper-util",
|
||||
"jemalloc_pprof",
|
||||
"libc",
|
||||
"libsystemd",
|
||||
"libunftp",
|
||||
"matchit 0.9.1",
|
||||
@@ -7831,7 +7820,6 @@ dependencies = [
|
||||
"socket2",
|
||||
"ssh-key",
|
||||
"subtle",
|
||||
"sysctl",
|
||||
"sysinfo",
|
||||
"thiserror 2.0.17",
|
||||
"tikv-jemalloc-ctl",
|
||||
@@ -8146,11 +8134,9 @@ name = "rustfs-lock"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"crossbeam-queue",
|
||||
"futures",
|
||||
"parking_lot",
|
||||
"rustfs-protos",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"smallvec",
|
||||
@@ -8159,7 +8145,6 @@ dependencies = [
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tracing",
|
||||
"url",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
@@ -8285,7 +8270,6 @@ dependencies = [
|
||||
"flatbuffers",
|
||||
"prost 0.14.1",
|
||||
"rustfs-common",
|
||||
"rustfs-credentials",
|
||||
"tonic",
|
||||
"tonic-prost",
|
||||
"tonic-prost-build",
|
||||
@@ -9721,20 +9705,6 @@ dependencies = [
|
||||
"syn 2.0.113",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sysctl"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cca424247104946a59dacd27eaad296223b7feec3d168a6dd04585183091eb0b"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"byteorder",
|
||||
"enum-as-inner",
|
||||
"libc",
|
||||
"thiserror 2.0.17",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sysinfo"
|
||||
version = "0.37.2"
|
||||
|
||||
@@ -237,7 +237,6 @@ snafu = "0.8.9"
|
||||
snap = "1.1.1"
|
||||
starshard = { version = "0.6.0", features = ["rayon", "async", "serde"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysctl = "0.7.1"
|
||||
sysinfo = "0.37.2"
|
||||
temp-env = "0.3.6"
|
||||
tempfile = "3.24.0"
|
||||
|
||||
@@ -37,6 +37,8 @@ datas = "datas"
|
||||
bre = "bre"
|
||||
abd = "abd"
|
||||
mak = "mak"
|
||||
# s3-tests original test names (cannot be changed)
|
||||
nonexisted = "nonexisted"
|
||||
|
||||
[files]
|
||||
extend-exclude = []
|
||||
extend-exclude = []
|
||||
|
||||
@@ -306,7 +306,7 @@ fn compute_object_usage(bucket: &str, object: &str, file_meta: &FileMeta) -> Res
|
||||
versions_count = versions_count.saturating_add(1);
|
||||
|
||||
if latest_file_info.is_none()
|
||||
&& let Ok(info) = file_meta.into_fileinfo(bucket, object, "", false, false)
|
||||
&& let Ok(info) = file_meta.into_fileinfo(bucket, object, "", false, false, false)
|
||||
{
|
||||
latest_file_info = Some(info);
|
||||
}
|
||||
|
||||
@@ -170,12 +170,6 @@ pub const KI_B: usize = 1024;
|
||||
/// Default value: 1048576
|
||||
pub const MI_B: usize = 1024 * 1024;
|
||||
|
||||
/// Environment variable for gRPC authentication token
|
||||
/// Used to set the authentication token for gRPC communication
|
||||
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -27,11 +27,11 @@ pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
/// Example: --secret-key rustfsadmin
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Environment variable for gRPC authentication token
|
||||
/// Used to set the authentication token for gRPC communication
|
||||
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
|
||||
/// Environment variable for RPC authentication token
|
||||
/// Used to set the authentication token for RPC communication
|
||||
/// Example: RUSTFS_RPC_SECRET=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
|
||||
pub const ENV_RPC_SECRET: &str = "RUSTFS_RPC_SECRET";
|
||||
|
||||
/// IAM Policy Types
|
||||
/// Used to differentiate between embedded and inherited policies
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{DEFAULT_SECRET_KEY, ENV_GRPC_AUTH_TOKEN, IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use crate::{DEFAULT_SECRET_KEY, ENV_RPC_SECRET, IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use rand::{Rng, RngCore};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
@@ -25,8 +25,8 @@ use time::OffsetDateTime;
|
||||
/// Global active credentials
|
||||
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
|
||||
|
||||
/// Global gRPC authentication token
|
||||
static GLOBAL_GRPC_AUTH_TOKEN: OnceLock<String> = OnceLock::new();
|
||||
/// Global RPC authentication token
|
||||
pub static GLOBAL_RUSTFS_RPC_SECRET: OnceLock<String> = OnceLock::new();
|
||||
|
||||
/// Initialize the global action credentials
|
||||
///
|
||||
@@ -181,15 +181,15 @@ pub fn gen_secret_key(length: usize) -> std::io::Result<String> {
|
||||
Ok(key_str)
|
||||
}
|
||||
|
||||
/// Get the gRPC authentication token from environment variable
|
||||
/// Get the RPC authentication token from environment variable
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The gRPC authentication token
|
||||
/// * `String` - The RPC authentication token
|
||||
///
|
||||
pub fn get_grpc_token() -> String {
|
||||
GLOBAL_GRPC_AUTH_TOKEN
|
||||
pub fn get_rpc_token() -> String {
|
||||
GLOBAL_RUSTFS_RPC_SECRET
|
||||
.get_or_init(|| {
|
||||
env::var(ENV_GRPC_AUTH_TOKEN)
|
||||
env::var(ENV_RPC_SECRET)
|
||||
.unwrap_or_else(|_| get_global_secret_key_opt().unwrap_or_else(|| DEFAULT_SECRET_KEY.to_string()))
|
||||
})
|
||||
.clone()
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use crate::data_usage::{DATA_USAGE_CACHE_NAME, DATA_USAGE_ROOT, load_data_usage_from_backend};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::rpc::node_service_time_out_client_no_auth;
|
||||
use crate::rpc::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use crate::{
|
||||
disk::endpoint::Endpoint,
|
||||
global::{GLOBAL_BOOT_TIME, GLOBAL_Endpoints},
|
||||
@@ -101,9 +101,9 @@ async fn is_server_resolvable(endpoint: &Endpoint) -> Result<()> {
|
||||
let decoded_payload = flatbuffers::root::<PingBody>(finished_data);
|
||||
assert!(decoded_payload.is_ok());
|
||||
|
||||
let mut client = node_service_time_out_client_no_auth(&addr)
|
||||
let mut client = node_service_time_out_client(&addr, TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
|
||||
let request = Request::new(PingRequest {
|
||||
version: 1,
|
||||
|
||||
@@ -95,22 +95,22 @@ impl DiskHealthTracker {
|
||||
|
||||
/// Check if disk is faulty
|
||||
pub fn is_faulty(&self) -> bool {
|
||||
self.status.load(Ordering::Relaxed) == DISK_HEALTH_FAULTY
|
||||
self.status.load(Ordering::Acquire) == DISK_HEALTH_FAULTY
|
||||
}
|
||||
|
||||
/// Set disk as faulty
|
||||
pub fn set_faulty(&self) {
|
||||
self.status.store(DISK_HEALTH_FAULTY, Ordering::Relaxed);
|
||||
self.status.store(DISK_HEALTH_FAULTY, Ordering::Release);
|
||||
}
|
||||
|
||||
/// Set disk as OK
|
||||
pub fn set_ok(&self) {
|
||||
self.status.store(DISK_HEALTH_OK, Ordering::Relaxed);
|
||||
self.status.store(DISK_HEALTH_OK, Ordering::Release);
|
||||
}
|
||||
|
||||
pub fn swap_ok_to_faulty(&self) -> bool {
|
||||
self.status
|
||||
.compare_exchange(DISK_HEALTH_OK, DISK_HEALTH_FAULTY, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.compare_exchange(DISK_HEALTH_OK, DISK_HEALTH_FAULTY, Ordering::AcqRel, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ impl DiskHealthTracker {
|
||||
|
||||
/// Get last success timestamp
|
||||
pub fn last_success(&self) -> i64 {
|
||||
self.last_success.load(Ordering::Relaxed)
|
||||
self.last_success.load(Ordering::Acquire)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -384,7 +384,7 @@ impl LocalDiskWrapper {
|
||||
let stored_disk_id = self.disk.get_disk_id().await?;
|
||||
|
||||
if stored_disk_id != want_id {
|
||||
return Err(Error::other(format!("Disk ID mismatch wanted {:?}, got {:?}", want_id, stored_disk_id)));
|
||||
return Err(Error::other(format!("Disk ID mismatch wanted {want_id:?}, got {stored_disk_id:?}")));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -468,7 +468,7 @@ impl LocalDiskWrapper {
|
||||
// Timeout occurred, mark disk as potentially faulty and decrement waiting counter
|
||||
self.health.decrement_waiting();
|
||||
warn!("disk operation timeout after {:?}", timeout_duration);
|
||||
Err(DiskError::other(format!("disk operation timeout after {:?}", timeout_duration)))
|
||||
Err(DiskError::other(format!("disk operation timeout after {timeout_duration:?}")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ use super::{
|
||||
};
|
||||
use super::{endpoint::Endpoint, error::DiskError, format::FormatV3};
|
||||
|
||||
use crate::config::storageclass::DEFAULT_INLINE_BLOCK;
|
||||
use crate::data_usage::local_snapshot::ensure_data_usage_layout;
|
||||
use crate::disk::error::FileAccessDeniedWithContext;
|
||||
use crate::disk::error_conv::{to_access_error, to_file_error, to_unformatted_disk_error, to_volume_error};
|
||||
@@ -489,9 +490,17 @@ impl LocalDisk {
|
||||
let file_dir = self.get_bucket_path(volume)?;
|
||||
let (data, _) = self.read_raw(volume, file_dir, file_path, opts.read_data).await?;
|
||||
|
||||
get_file_info(&data, volume, path, version_id, FileInfoOpts { data: opts.read_data })
|
||||
.await
|
||||
.map_err(|_e| DiskError::Unexpected)
|
||||
get_file_info(
|
||||
&data,
|
||||
volume,
|
||||
path,
|
||||
version_id,
|
||||
FileInfoOpts {
|
||||
data: opts.read_data,
|
||||
include_free_versions: false,
|
||||
},
|
||||
)
|
||||
.map_err(|_e| DiskError::Unexpected)
|
||||
}
|
||||
|
||||
// Batch metadata reading for multiple objects
|
||||
@@ -2240,20 +2249,93 @@ impl DiskAPI for LocalDisk {
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn read_version(
|
||||
&self,
|
||||
_org_volume: &str,
|
||||
org_volume: &str,
|
||||
volume: &str,
|
||||
path: &str,
|
||||
version_id: &str,
|
||||
opts: &ReadOptions,
|
||||
) -> Result<FileInfo> {
|
||||
if !org_volume.is_empty() {
|
||||
let org_volume_path = self.get_bucket_path(org_volume)?;
|
||||
if !skip_access_checks(org_volume) {
|
||||
access(&org_volume_path)
|
||||
.await
|
||||
.map_err(|e| to_access_error(e, DiskError::VolumeAccessDenied))?;
|
||||
}
|
||||
}
|
||||
|
||||
let file_path = self.get_object_path(volume, path)?;
|
||||
let file_dir = self.get_bucket_path(volume)?;
|
||||
let volume_dir = self.get_bucket_path(volume)?;
|
||||
|
||||
check_path_length(file_path.to_string_lossy().as_ref())?;
|
||||
|
||||
let read_data = opts.read_data;
|
||||
|
||||
let (data, _) = self.read_raw(volume, file_dir, file_path, read_data).await?;
|
||||
let (data, _) = self
|
||||
.read_raw(volume, volume_dir.clone(), file_path, read_data)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if e == DiskError::FileNotFound && !version_id.is_empty() {
|
||||
DiskError::FileVersionNotFound
|
||||
} else {
|
||||
e
|
||||
}
|
||||
})?;
|
||||
|
||||
let fi = get_file_info(&data, volume, path, version_id, FileInfoOpts { data: read_data }).await?;
|
||||
let mut fi = get_file_info(
|
||||
&data,
|
||||
volume,
|
||||
path,
|
||||
version_id,
|
||||
FileInfoOpts {
|
||||
data: read_data,
|
||||
include_free_versions: opts.incl_free_versions,
|
||||
},
|
||||
)?;
|
||||
|
||||
if opts.read_data {
|
||||
if fi.data.as_ref().is_some_and(|d| !d.is_empty()) || fi.size == 0 {
|
||||
if fi.inline_data() {
|
||||
return Ok(fi);
|
||||
}
|
||||
|
||||
if fi.size == 0 || fi.version_id.is_none_or(|v| v.is_nil()) {
|
||||
fi.set_inline_data();
|
||||
return Ok(fi);
|
||||
};
|
||||
if let Some(part) = fi.parts.first() {
|
||||
let part_path = format!("part.{}", part.number);
|
||||
let part_path = path_join_buf(&[
|
||||
path,
|
||||
fi.data_dir.map_or("".to_string(), |dir| dir.to_string()).as_str(),
|
||||
part_path.as_str(),
|
||||
]);
|
||||
let part_path = self.get_object_path(volume, part_path.as_str())?;
|
||||
if lstat(&part_path).await.is_err() {
|
||||
fi.set_inline_data();
|
||||
return Ok(fi);
|
||||
}
|
||||
}
|
||||
|
||||
fi.data = None;
|
||||
}
|
||||
|
||||
let inline = fi.transition_status.is_empty() && fi.data_dir.is_some() && fi.parts.len() == 1;
|
||||
if inline && fi.shard_file_size(fi.parts[0].actual_size) < DEFAULT_INLINE_BLOCK as i64 {
|
||||
let part_path = path_join_buf(&[
|
||||
path,
|
||||
fi.data_dir.map_or("".to_string(), |dir| dir.to_string()).as_str(),
|
||||
format!("part.{}", fi.parts[0].number).as_str(),
|
||||
]);
|
||||
let part_path = self.get_object_path(volume, part_path.as_str())?;
|
||||
|
||||
let data = self.read_all_data(volume, volume_dir, part_path.clone()).await.map_err(|e| {
|
||||
warn!("read_version read_all_data {:?} failed: {e}", part_path);
|
||||
e
|
||||
})?;
|
||||
fi.data = Some(Bytes::from(data));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(fi)
|
||||
}
|
||||
|
||||
@@ -15,11 +15,8 @@
|
||||
use base64::Engine as _;
|
||||
use base64::engine::general_purpose;
|
||||
use hmac::{Hmac, KeyInit, Mac};
|
||||
use http::HeaderMap;
|
||||
use http::HeaderValue;
|
||||
use http::Method;
|
||||
use http::Uri;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use http::{HeaderMap, HeaderValue, Method, Uri};
|
||||
use rustfs_credentials::{DEFAULT_SECRET_KEY, ENV_RPC_SECRET, get_global_secret_key_opt};
|
||||
use sha2::Sha256;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::error;
|
||||
@@ -33,12 +30,16 @@ pub const TONIC_RPC_PREFIX: &str = "/node_service.NodeService";
|
||||
|
||||
/// Get the shared secret for HMAC signing
|
||||
fn get_shared_secret() -> String {
|
||||
if let Some(cred) = get_global_action_cred() {
|
||||
cred.secret_key
|
||||
} else {
|
||||
// Fallback to environment variable if global credentials are not available
|
||||
std::env::var("RUSTFS_RPC_SECRET").unwrap_or_else(|_| "rustfs-default-secret".to_string())
|
||||
}
|
||||
rustfs_credentials::GLOBAL_RUSTFS_RPC_SECRET
|
||||
.get_or_init(|| {
|
||||
rustfs_utils::get_env_str(
|
||||
ENV_RPC_SECRET,
|
||||
get_global_secret_key_opt()
|
||||
.unwrap_or_else(|| DEFAULT_SECRET_KEY.to_string())
|
||||
.as_str(),
|
||||
)
|
||||
})
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// Generate HMAC-SHA256 signature for the given data
|
||||
|
||||
@@ -18,9 +18,7 @@ use crate::disk::error::{Error, Result};
|
||||
use crate::disk::error_reduce::{BUCKET_OP_IGNORED_ERRS, is_all_buckets_not_found, reduce_write_quorum_errs};
|
||||
use crate::disk::{DiskAPI, DiskStore, disk_store::get_max_timeout_duration};
|
||||
use crate::global::GLOBAL_LOCAL_DISK_MAP;
|
||||
use crate::rpc::client::{
|
||||
TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client, node_service_time_out_client_no_auth,
|
||||
};
|
||||
use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use crate::store::all_local_disk;
|
||||
use crate::store_utils::is_reserved_or_invalid_bucket;
|
||||
use crate::{
|
||||
@@ -675,7 +673,7 @@ impl RemotePeerS3Client {
|
||||
async fn perform_connectivity_check(addr: &str) -> Result<()> {
|
||||
use tokio::time::timeout;
|
||||
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {}", e)))?;
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {e}")))?;
|
||||
|
||||
let Some(host) = url.host_str() else {
|
||||
return Err(Error::other("No host in URL".to_string()));
|
||||
@@ -686,7 +684,7 @@ impl RemotePeerS3Client {
|
||||
// Try to establish TCP connection
|
||||
match timeout(CHECK_TIMEOUT_DURATION, TcpStream::connect((host, port))).await {
|
||||
Ok(Ok(_)) => Ok(()),
|
||||
_ => Err(Error::other(format!("Cannot connect to {}:{}", host, port))),
|
||||
_ => Err(Error::other(format!("Cannot connect to {host}:{port}"))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -725,7 +723,7 @@ impl RemotePeerS3Client {
|
||||
// Timeout occurred, mark peer as potentially faulty
|
||||
self.health.decrement_waiting();
|
||||
warn!("Remote peer operation timeout after {:?}", timeout_duration);
|
||||
Err(Error::other(format!("Remote peer operation timeout after {:?}", timeout_duration)))
|
||||
Err(Error::other(format!("Remote peer operation timeout after {timeout_duration:?}")))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -823,9 +821,7 @@ impl PeerS3Client for RemotePeerS3Client {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client_no_auth(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetBucketInfoRequest {
|
||||
bucket: bucket.to_string(),
|
||||
options,
|
||||
|
||||
@@ -206,7 +206,7 @@ impl RemoteDisk {
|
||||
|
||||
/// Perform basic connectivity check for remote disk
|
||||
async fn perform_connectivity_check(addr: &str) -> Result<()> {
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {}", e)))?;
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {e}")))?;
|
||||
|
||||
let Some(host) = url.host_str() else {
|
||||
return Err(Error::other("No host in URL".to_string()));
|
||||
@@ -220,7 +220,7 @@ impl RemoteDisk {
|
||||
drop(stream);
|
||||
Ok(())
|
||||
}
|
||||
_ => Err(Error::other(format!("Cannot connect to {}:{}", host, port))),
|
||||
_ => Err(Error::other(format!("Cannot connect to {host}:{port}"))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,7 +260,7 @@ impl RemoteDisk {
|
||||
// Timeout occurred, mark disk as potentially faulty
|
||||
self.health.decrement_waiting();
|
||||
warn!("Remote disk operation timeout after {:?}", timeout_duration);
|
||||
Err(Error::other(format!("Remote disk operation timeout after {:?}", timeout_duration)))
|
||||
Err(Error::other(format!("Remote disk operation timeout after {timeout_duration:?}")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1485,20 +1485,8 @@ impl SetDisks {
|
||||
let object = object.clone();
|
||||
let version_id = version_id.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Some(disk) = disk
|
||||
&& disk.is_online().await
|
||||
{
|
||||
if version_id.is_empty() {
|
||||
match disk.read_xl(&bucket, &object, read_data).await {
|
||||
Ok(info) => {
|
||||
let fi = file_info_from_raw(info, &bucket, &object, read_data).await?;
|
||||
Ok(fi)
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
} else {
|
||||
disk.read_version(&org_bucket, &bucket, &object, &version_id, &opts).await
|
||||
}
|
||||
if let Some(disk) = disk {
|
||||
disk.read_version(&org_bucket, &bucket, &object, &version_id, &opts).await
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
}
|
||||
@@ -1626,7 +1614,7 @@ impl SetDisks {
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
read_data: bool,
|
||||
_incl_free_vers: bool,
|
||||
incl_free_vers: bool,
|
||||
) -> (Vec<FileInfo>, Vec<Option<DiskError>>) {
|
||||
let mut metadata_array = vec![None; fileinfos.len()];
|
||||
let mut meta_file_infos = vec![FileInfo::default(); fileinfos.len()];
|
||||
@@ -1676,7 +1664,7 @@ impl SetDisks {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let finfo = match meta.into_fileinfo(bucket, object, "", true, true) {
|
||||
let finfo = match meta.into_fileinfo(bucket, object, "", true, incl_free_vers, true) {
|
||||
Ok(res) => res,
|
||||
Err(err) => {
|
||||
for item in errs.iter_mut() {
|
||||
@@ -1703,7 +1691,7 @@ impl SetDisks {
|
||||
|
||||
for (idx, meta_op) in metadata_array.iter().enumerate() {
|
||||
if let Some(meta) = meta_op {
|
||||
match meta.into_fileinfo(bucket, object, vid.to_string().as_str(), read_data, true) {
|
||||
match meta.into_fileinfo(bucket, object, vid.to_string().as_str(), read_data, incl_free_vers, true) {
|
||||
Ok(res) => meta_file_infos[idx] = res,
|
||||
Err(err) => errs[idx] = Some(err.into()),
|
||||
}
|
||||
|
||||
@@ -28,14 +28,15 @@ use http::{HeaderMap, HeaderValue};
|
||||
use rustfs_common::heal_channel::HealOpts;
|
||||
use rustfs_filemeta::{
|
||||
FileInfo, MetaCacheEntriesSorted, ObjectPartInfo, REPLICATION_RESET, REPLICATION_STATUS, ReplicateDecision, ReplicationState,
|
||||
ReplicationStatusType, VersionPurgeStatusType, replication_statuses_map, version_purge_statuses_map,
|
||||
ReplicationStatusType, RestoreStatusOps as _, VersionPurgeStatusType, parse_restore_obj_status, replication_statuses_map,
|
||||
version_purge_statuses_map,
|
||||
};
|
||||
use rustfs_madmin::heal_commands::HealResultItem;
|
||||
use rustfs_rio::Checksum;
|
||||
use rustfs_rio::{DecompressReader, HashReader, LimitReader, WarpReader};
|
||||
use rustfs_utils::CompressionAlgorithm;
|
||||
use rustfs_utils::http::AMZ_STORAGE_CLASS;
|
||||
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX_LOWER};
|
||||
use rustfs_utils::http::{AMZ_BUCKET_REPLICATION_STATUS, AMZ_RESTORE, AMZ_STORAGE_CLASS};
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
@@ -756,7 +757,24 @@ impl ObjectInfo {
|
||||
.ok()
|
||||
});
|
||||
|
||||
// TODO:ReplicationState
|
||||
let replication_status_internal = fi
|
||||
.replication_state_internal
|
||||
.as_ref()
|
||||
.and_then(|v| v.replication_status_internal.clone());
|
||||
let version_purge_status_internal = fi
|
||||
.replication_state_internal
|
||||
.as_ref()
|
||||
.and_then(|v| v.version_purge_status_internal.clone());
|
||||
|
||||
let mut replication_status = fi.replication_status();
|
||||
if replication_status.is_empty()
|
||||
&& let Some(status) = fi.metadata.get(AMZ_BUCKET_REPLICATION_STATUS).cloned()
|
||||
&& status == ReplicationStatusType::Replica.as_str()
|
||||
{
|
||||
replication_status = ReplicationStatusType::Replica;
|
||||
}
|
||||
|
||||
let version_purge_status = fi.version_purge_status();
|
||||
|
||||
let transitioned_object = TransitionedObject {
|
||||
name: fi.transitioned_objname.clone(),
|
||||
@@ -777,10 +795,24 @@ impl ObjectInfo {
|
||||
};
|
||||
|
||||
// Extract storage class from metadata, default to STANDARD if not found
|
||||
let storage_class = metadata
|
||||
.get(AMZ_STORAGE_CLASS)
|
||||
.cloned()
|
||||
.or_else(|| Some(storageclass::STANDARD.to_string()));
|
||||
let storage_class = if !fi.transition_tier.is_empty() {
|
||||
Some(fi.transition_tier.clone())
|
||||
} else {
|
||||
fi.metadata
|
||||
.get(AMZ_STORAGE_CLASS)
|
||||
.cloned()
|
||||
.or_else(|| Some(storageclass::STANDARD.to_string()))
|
||||
};
|
||||
|
||||
let mut restore_ongoing = false;
|
||||
let mut restore_expires = None;
|
||||
if let Some(restore_status) = fi.metadata.get(AMZ_RESTORE).cloned() {
|
||||
//
|
||||
if let Ok(restore_status) = parse_restore_obj_status(&restore_status) {
|
||||
restore_ongoing = restore_status.on_going();
|
||||
restore_expires = restore_status.expiry();
|
||||
}
|
||||
}
|
||||
|
||||
// Convert parts from rustfs_filemeta::ObjectPartInfo to store_api::ObjectPartInfo
|
||||
let parts = fi
|
||||
@@ -798,6 +830,8 @@ impl ObjectInfo {
|
||||
})
|
||||
.collect();
|
||||
|
||||
// TODO: part checksums
|
||||
|
||||
ObjectInfo {
|
||||
bucket: bucket.to_string(),
|
||||
name,
|
||||
@@ -822,6 +856,12 @@ impl ObjectInfo {
|
||||
transitioned_object,
|
||||
checksum: fi.checksum.clone(),
|
||||
storage_class,
|
||||
restore_ongoing,
|
||||
restore_expires,
|
||||
replication_status_internal,
|
||||
replication_status,
|
||||
version_purge_status_internal,
|
||||
version_purge_status,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -505,6 +505,10 @@ impl FileInfo {
|
||||
ReplicationStatusType::Empty
|
||||
}
|
||||
}
|
||||
|
||||
pub fn shard_file_size(&self, total_length: i64) -> i64 {
|
||||
self.erasure.shard_file_size(total_length)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
@@ -590,7 +594,7 @@ impl RestoreStatusOps for RestoreStatus {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_restore_obj_status(restore_hdr: &str) -> Result<RestoreStatus> {
|
||||
pub fn parse_restore_obj_status(restore_hdr: &str) -> Result<RestoreStatus> {
|
||||
let tokens: Vec<&str> = restore_hdr.splitn(2, ",").collect();
|
||||
let progress_tokens: Vec<&str> = tokens[0].splitn(2, "=").collect();
|
||||
if progress_tokens.len() != 2 {
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
|
||||
use crate::{
|
||||
ErasureAlgo, ErasureInfo, Error, FileInfo, FileInfoVersions, InlineData, ObjectPartInfo, RawFileInfo, ReplicationState,
|
||||
ReplicationStatusType, Result, VersionPurgeStatusType, replication_statuses_map, version_purge_statuses_map,
|
||||
ReplicationStatusType, Result, TIER_FV_ID, TIER_FV_MARKER, VersionPurgeStatusType, replication_statuses_map,
|
||||
version_purge_statuses_map,
|
||||
};
|
||||
use byteorder::ByteOrder;
|
||||
use bytes::Bytes;
|
||||
@@ -909,6 +910,7 @@ impl FileMeta {
|
||||
path: &str,
|
||||
version_id: &str,
|
||||
read_data: bool,
|
||||
include_free_versions: bool,
|
||||
all_parts: bool,
|
||||
) -> Result<FileInfo> {
|
||||
let vid = {
|
||||
@@ -921,11 +923,35 @@ impl FileMeta {
|
||||
|
||||
let mut is_latest = true;
|
||||
let mut succ_mod_time = None;
|
||||
let mut non_free_versions = self.versions.len();
|
||||
|
||||
let mut found = false;
|
||||
let mut found_free_version = None;
|
||||
let mut found_fi = None;
|
||||
|
||||
for ver in self.versions.iter() {
|
||||
let header = &ver.header;
|
||||
|
||||
// TODO: freeVersion
|
||||
if header.free_version() {
|
||||
non_free_versions -= 1;
|
||||
if include_free_versions && found_free_version.is_none() {
|
||||
let mut found_free_fi = FileMetaVersion::default();
|
||||
if found_free_fi.unmarshal_msg(&ver.meta).is_ok() && found_free_fi.version_type != VersionType::Invalid {
|
||||
let mut free_fi = found_free_fi.into_fileinfo(volume, path, all_parts);
|
||||
free_fi.is_latest = true;
|
||||
found_free_version = Some(free_fi);
|
||||
}
|
||||
}
|
||||
|
||||
if header.version_id != Some(vid) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !version_id.is_empty() && header.version_id != Some(vid) {
|
||||
is_latest = false;
|
||||
@@ -933,6 +959,8 @@ impl FileMeta {
|
||||
continue;
|
||||
}
|
||||
|
||||
found = true;
|
||||
|
||||
let mut fi = ver.into_fileinfo(volume, path, all_parts)?;
|
||||
fi.is_latest = is_latest;
|
||||
|
||||
@@ -947,7 +975,25 @@ impl FileMeta {
|
||||
.map(bytes::Bytes::from);
|
||||
}
|
||||
|
||||
fi.num_versions = self.versions.len();
|
||||
found_fi = Some(fi);
|
||||
}
|
||||
|
||||
if !found {
|
||||
if version_id.is_empty() {
|
||||
if include_free_versions
|
||||
&& non_free_versions == 0
|
||||
&& let Some(free_version) = found_free_version
|
||||
{
|
||||
return Ok(free_version);
|
||||
}
|
||||
return Err(Error::FileNotFound);
|
||||
} else {
|
||||
return Err(Error::FileVersionNotFound);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(mut fi) = found_fi {
|
||||
fi.num_versions = non_free_versions;
|
||||
|
||||
return Ok(fi);
|
||||
}
|
||||
@@ -1767,14 +1813,27 @@ impl MetaObject {
|
||||
metadata.insert(k.to_owned(), v.to_owned());
|
||||
}
|
||||
|
||||
let tier_fvidkey = format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_ID}").to_lowercase();
|
||||
let tier_fvmarker_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_MARKER}").to_lowercase();
|
||||
|
||||
for (k, v) in &self.meta_sys {
|
||||
if k == AMZ_STORAGE_CLASS && v == b"STANDARD" {
|
||||
let lower_k = k.to_lowercase();
|
||||
|
||||
if lower_k == tier_fvidkey || lower_k == tier_fvmarker_key {
|
||||
continue;
|
||||
}
|
||||
|
||||
if lower_k == VERSION_PURGE_STATUS_KEY.to_lowercase() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if lower_k == AMZ_STORAGE_CLASS.to_lowercase() && v == b"STANDARD" {
|
||||
continue;
|
||||
}
|
||||
|
||||
if k.starts_with(RESERVED_METADATA_PREFIX)
|
||||
|| k.starts_with(RESERVED_METADATA_PREFIX_LOWER)
|
||||
|| k == VERSION_PURGE_STATUS_KEY
|
||||
|| lower_k == VERSION_PURGE_STATUS_KEY.to_lowercase()
|
||||
{
|
||||
metadata.insert(k.to_owned(), String::from_utf8(v.to_owned()).unwrap_or_default());
|
||||
}
|
||||
@@ -2511,15 +2570,31 @@ pub fn merge_file_meta_versions(
|
||||
merged
|
||||
}
|
||||
|
||||
pub async fn file_info_from_raw(ri: RawFileInfo, bucket: &str, object: &str, read_data: bool) -> Result<FileInfo> {
|
||||
get_file_info(&ri.buf, bucket, object, "", FileInfoOpts { data: read_data }).await
|
||||
pub fn file_info_from_raw(
|
||||
ri: RawFileInfo,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
read_data: bool,
|
||||
include_free_versions: bool,
|
||||
) -> Result<FileInfo> {
|
||||
get_file_info(
|
||||
&ri.buf,
|
||||
bucket,
|
||||
object,
|
||||
"",
|
||||
FileInfoOpts {
|
||||
data: read_data,
|
||||
include_free_versions,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub struct FileInfoOpts {
|
||||
pub data: bool,
|
||||
pub include_free_versions: bool,
|
||||
}
|
||||
|
||||
pub async fn get_file_info(buf: &[u8], volume: &str, path: &str, version_id: &str, opts: FileInfoOpts) -> Result<FileInfo> {
|
||||
pub fn get_file_info(buf: &[u8], volume: &str, path: &str, version_id: &str, opts: FileInfoOpts) -> Result<FileInfo> {
|
||||
let vid = {
|
||||
if version_id.is_empty() {
|
||||
None
|
||||
@@ -2541,7 +2616,7 @@ pub async fn get_file_info(buf: &[u8], volume: &str, path: &str, version_id: &st
|
||||
});
|
||||
}
|
||||
|
||||
let fi = meta.into_fileinfo(volume, path, version_id, opts.data, true)?;
|
||||
let fi = meta.into_fileinfo(volume, path, version_id, opts.data, opts.include_free_versions, true)?;
|
||||
Ok(fi)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{Error, FileInfo, FileInfoVersions, FileMeta, FileMetaShallowVersion, Result, VersionType, merge_file_meta_versions};
|
||||
use crate::{
|
||||
Error, FileInfo, FileInfoOpts, FileInfoVersions, FileMeta, FileMetaShallowVersion, Result, VersionType, get_file_info,
|
||||
merge_file_meta_versions,
|
||||
};
|
||||
use rmp::Marker;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cmp::Ordering;
|
||||
@@ -141,8 +144,7 @@ impl MetaCacheEntry {
|
||||
});
|
||||
}
|
||||
|
||||
if self.cached.is_some() {
|
||||
let fm = self.cached.as_ref().unwrap();
|
||||
if let Some(fm) = &self.cached {
|
||||
if fm.versions.is_empty() {
|
||||
return Ok(FileInfo {
|
||||
volume: bucket.to_owned(),
|
||||
@@ -154,14 +156,20 @@ impl MetaCacheEntry {
|
||||
});
|
||||
}
|
||||
|
||||
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false)?;
|
||||
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false, true)?;
|
||||
return Ok(fi);
|
||||
}
|
||||
|
||||
let mut fm = FileMeta::new();
|
||||
fm.unmarshal_msg(&self.metadata)?;
|
||||
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false)?;
|
||||
Ok(fi)
|
||||
get_file_info(
|
||||
&self.metadata,
|
||||
bucket,
|
||||
self.name.as_str(),
|
||||
"",
|
||||
FileInfoOpts {
|
||||
data: false,
|
||||
include_free_versions: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn file_info_versions(&self, bucket: &str) -> Result<FileInfoVersions> {
|
||||
|
||||
@@ -30,15 +30,12 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
futures.workspace = true
|
||||
rustfs-protos.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
tracing.workspace = true
|
||||
url.workspace = true
|
||||
uuid.workspace = true
|
||||
thiserror.workspace = true
|
||||
parking_lot.workspace = true
|
||||
|
||||
@@ -1,403 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use rustfs_protos::{
|
||||
node_service_time_out_client, node_service_time_out_client_no_auth,
|
||||
proto_gen::node_service::{GenerallyLockRequest, PingRequest},
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::Request;
|
||||
use tracing::info;
|
||||
|
||||
use crate::{
|
||||
error::{LockError, Result},
|
||||
types::{LockId, LockInfo, LockRequest, LockResponse, LockStats},
|
||||
};
|
||||
|
||||
use super::LockClient;
|
||||
|
||||
/// Remote lock client implementation
|
||||
#[derive(Debug)]
|
||||
pub struct RemoteClient {
|
||||
addr: String,
|
||||
// Track active locks with their original owner information
|
||||
active_locks: Arc<RwLock<HashMap<LockId, String>>>, // lock_id -> owner
|
||||
}
|
||||
|
||||
impl Clone for RemoteClient {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
addr: self.addr.clone(),
|
||||
active_locks: self.active_locks.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RemoteClient {
|
||||
pub fn new(endpoint: String) -> Self {
|
||||
Self {
|
||||
addr: endpoint,
|
||||
active_locks: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_url(url: url::Url) -> Self {
|
||||
Self {
|
||||
addr: url.to_string(),
|
||||
active_locks: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a minimal LockRequest for unlock operations
|
||||
fn create_unlock_request(&self, lock_id: &LockId, owner: &str) -> LockRequest {
|
||||
LockRequest {
|
||||
lock_id: lock_id.clone(),
|
||||
resource: lock_id.resource.clone(),
|
||||
lock_type: crate::types::LockType::Exclusive, // Type doesn't matter for unlock
|
||||
owner: owner.to_string(),
|
||||
acquire_timeout: std::time::Duration::from_secs(30),
|
||||
ttl: std::time::Duration::from_secs(300),
|
||||
metadata: crate::types::LockMetadata::default(),
|
||||
priority: crate::types::LockPriority::Normal,
|
||||
deadlock_detection: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl LockClient for RemoteClient {
|
||||
async fn acquire_exclusive(&self, request: &LockRequest) -> Result<LockResponse> {
|
||||
info!("remote acquire_exclusive for {}", request.resource);
|
||||
let mut client = node_service_time_out_client_no_auth(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
});
|
||||
let resp = client
|
||||
.lock(req)
|
||||
.await
|
||||
.map_err(|e| LockError::internal(e.to_string()))?
|
||||
.into_inner();
|
||||
|
||||
// Check for explicit error first
|
||||
if let Some(error_info) = resp.error_info {
|
||||
return Err(LockError::internal(error_info));
|
||||
}
|
||||
|
||||
// Check if the lock acquisition was successful
|
||||
if resp.success {
|
||||
// Save the lock information for later release
|
||||
let mut locks = self.active_locks.write().await;
|
||||
locks.insert(request.lock_id.clone(), request.owner.clone());
|
||||
|
||||
Ok(LockResponse::success(
|
||||
LockInfo {
|
||||
id: request.lock_id.clone(),
|
||||
resource: request.resource.clone(),
|
||||
lock_type: request.lock_type,
|
||||
status: crate::types::LockStatus::Acquired,
|
||||
owner: request.owner.clone(),
|
||||
acquired_at: std::time::SystemTime::now(),
|
||||
expires_at: std::time::SystemTime::now() + request.ttl,
|
||||
last_refreshed: std::time::SystemTime::now(),
|
||||
metadata: request.metadata.clone(),
|
||||
priority: request.priority,
|
||||
wait_start_time: None,
|
||||
},
|
||||
std::time::Duration::ZERO,
|
||||
))
|
||||
} else {
|
||||
// Lock acquisition failed
|
||||
Ok(LockResponse::failure(
|
||||
"Lock acquisition failed on remote server".to_string(),
|
||||
std::time::Duration::ZERO,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
async fn acquire_shared(&self, request: &LockRequest) -> Result<LockResponse> {
|
||||
info!("remote acquire_shared for {}", request.resource);
|
||||
let mut client = node_service_time_out_client_no_auth(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
});
|
||||
let resp = client
|
||||
.r_lock(req)
|
||||
.await
|
||||
.map_err(|e| LockError::internal(e.to_string()))?
|
||||
.into_inner();
|
||||
|
||||
// Check for explicit error first
|
||||
if let Some(error_info) = resp.error_info {
|
||||
return Err(LockError::internal(error_info));
|
||||
}
|
||||
|
||||
// Check if the lock acquisition was successful
|
||||
if resp.success {
|
||||
// Save the lock information for later release
|
||||
let mut locks = self.active_locks.write().await;
|
||||
locks.insert(request.lock_id.clone(), request.owner.clone());
|
||||
|
||||
Ok(LockResponse::success(
|
||||
LockInfo {
|
||||
id: request.lock_id.clone(),
|
||||
resource: request.resource.clone(),
|
||||
lock_type: request.lock_type,
|
||||
status: crate::types::LockStatus::Acquired,
|
||||
owner: request.owner.clone(),
|
||||
acquired_at: std::time::SystemTime::now(),
|
||||
expires_at: std::time::SystemTime::now() + request.ttl,
|
||||
last_refreshed: std::time::SystemTime::now(),
|
||||
metadata: request.metadata.clone(),
|
||||
priority: request.priority,
|
||||
wait_start_time: None,
|
||||
},
|
||||
std::time::Duration::ZERO,
|
||||
))
|
||||
} else {
|
||||
// Lock acquisition failed
|
||||
Ok(LockResponse::failure(
|
||||
"Shared lock acquisition failed on remote server".to_string(),
|
||||
std::time::Duration::ZERO,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
async fn release(&self, lock_id: &LockId) -> Result<bool> {
|
||||
info!("remote release for {}", lock_id);
|
||||
|
||||
// Get the original owner for this lock
|
||||
let owner = {
|
||||
let locks = self.active_locks.read().await;
|
||||
locks.get(lock_id).cloned().unwrap_or_else(|| "remote".to_string())
|
||||
};
|
||||
|
||||
let unlock_request = self.create_unlock_request(lock_id, &owner);
|
||||
|
||||
let request_string = serde_json::to_string(&unlock_request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?;
|
||||
let mut client = node_service_time_out_client_no_auth(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
|
||||
// Try UnLock first (for exclusive locks)
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: request_string.clone(),
|
||||
});
|
||||
let resp = client.un_lock(req).await;
|
||||
|
||||
let success = if resp.is_err() {
|
||||
// If that fails, try RUnLock (for shared locks)
|
||||
let req = Request::new(GenerallyLockRequest { args: request_string });
|
||||
let resp = client
|
||||
.r_un_lock(req)
|
||||
.await
|
||||
.map_err(|e| LockError::internal(e.to_string()))?
|
||||
.into_inner();
|
||||
if let Some(error_info) = resp.error_info {
|
||||
return Err(LockError::internal(error_info));
|
||||
}
|
||||
resp.success
|
||||
} else {
|
||||
let resp = resp.map_err(|e| LockError::internal(e.to_string()))?.into_inner();
|
||||
|
||||
if let Some(error_info) = resp.error_info {
|
||||
return Err(LockError::internal(error_info));
|
||||
}
|
||||
resp.success
|
||||
};
|
||||
|
||||
// Remove the lock from our tracking if successful
|
||||
if success {
|
||||
let mut locks = self.active_locks.write().await;
|
||||
locks.remove(lock_id);
|
||||
}
|
||||
|
||||
Ok(success)
|
||||
}
|
||||
|
||||
async fn refresh(&self, lock_id: &LockId) -> Result<bool> {
|
||||
info!("remote refresh for {}", lock_id);
|
||||
let refresh_request = self.create_unlock_request(lock_id, "remote");
|
||||
let mut client = node_service_time_out_client_no_auth(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&refresh_request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
});
|
||||
let resp = client
|
||||
.refresh(req)
|
||||
.await
|
||||
.map_err(|e| LockError::internal(e.to_string()))?
|
||||
.into_inner();
|
||||
if let Some(error_info) = resp.error_info {
|
||||
return Err(LockError::internal(error_info));
|
||||
}
|
||||
Ok(resp.success)
|
||||
}
|
||||
|
||||
async fn force_release(&self, lock_id: &LockId) -> Result<bool> {
|
||||
info!("remote force_release for {}", lock_id);
|
||||
let force_request = self.create_unlock_request(lock_id, "remote");
|
||||
let mut client = node_service_time_out_client_no_auth(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&force_request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
});
|
||||
let resp = client
|
||||
.force_un_lock(req)
|
||||
.await
|
||||
.map_err(|e| LockError::internal(e.to_string()))?
|
||||
.into_inner();
|
||||
if let Some(error_info) = resp.error_info {
|
||||
return Err(LockError::internal(error_info));
|
||||
}
|
||||
Ok(resp.success)
|
||||
}
|
||||
|
||||
async fn check_status(&self, lock_id: &LockId) -> Result<Option<LockInfo>> {
|
||||
info!("remote check_status for {}", lock_id);
|
||||
|
||||
// Since there's no direct status query in the gRPC service,
|
||||
// we attempt a non-blocking lock acquisition to check if the resource is available
|
||||
let status_request = self.create_unlock_request(lock_id, "remote");
|
||||
let mut client = node_service_time_out_client_no_auth(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
|
||||
// Try to acquire a very short-lived lock to test availability
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&status_request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
});
|
||||
|
||||
// Try exclusive lock first with very short timeout
|
||||
let resp = client.lock(req).await;
|
||||
|
||||
match resp {
|
||||
Ok(response) => {
|
||||
let resp = response.into_inner();
|
||||
if resp.success {
|
||||
// If we successfully acquired the lock, the resource was free
|
||||
// Immediately release it
|
||||
let release_req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&status_request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
});
|
||||
let _ = client.un_lock(release_req).await; // Best effort release
|
||||
|
||||
// Return None since no one was holding the lock
|
||||
Ok(None)
|
||||
} else {
|
||||
// Lock acquisition failed, meaning someone is holding it
|
||||
// We can't determine the exact details remotely, so return a generic status
|
||||
Ok(Some(LockInfo {
|
||||
id: lock_id.clone(),
|
||||
resource: lock_id.as_str().to_string(),
|
||||
lock_type: crate::types::LockType::Exclusive, // We can't know the exact type
|
||||
status: crate::types::LockStatus::Acquired,
|
||||
owner: "unknown".to_string(), // Remote client can't determine owner
|
||||
acquired_at: std::time::SystemTime::now(),
|
||||
expires_at: std::time::SystemTime::now() + std::time::Duration::from_secs(3600),
|
||||
last_refreshed: std::time::SystemTime::now(),
|
||||
metadata: crate::types::LockMetadata::default(),
|
||||
priority: crate::types::LockPriority::Normal,
|
||||
wait_start_time: None,
|
||||
}))
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
// Communication error or lock is held
|
||||
Ok(Some(LockInfo {
|
||||
id: lock_id.clone(),
|
||||
resource: lock_id.as_str().to_string(),
|
||||
lock_type: crate::types::LockType::Exclusive,
|
||||
status: crate::types::LockStatus::Acquired,
|
||||
owner: "unknown".to_string(),
|
||||
acquired_at: std::time::SystemTime::now(),
|
||||
expires_at: std::time::SystemTime::now() + std::time::Duration::from_secs(3600),
|
||||
last_refreshed: std::time::SystemTime::now(),
|
||||
metadata: crate::types::LockMetadata::default(),
|
||||
priority: crate::types::LockPriority::Normal,
|
||||
wait_start_time: None,
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_stats(&self) -> Result<LockStats> {
|
||||
info!("remote get_stats from {}", self.addr);
|
||||
|
||||
// Since there's no direct statistics endpoint in the gRPC service,
|
||||
// we return basic stats indicating this is a remote client
|
||||
let stats = LockStats {
|
||||
last_updated: std::time::SystemTime::now(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// We could potentially enhance this by:
|
||||
// 1. Keeping local counters of operations performed
|
||||
// 2. Adding a stats gRPC method to the service
|
||||
// 3. Querying server health endpoints
|
||||
|
||||
// For now, return minimal stats indicating remote connectivity
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
async fn close(&self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn is_online(&self) -> bool {
|
||||
// Use Ping interface to test if remote service is online
|
||||
let mut client = match self.get_client().await {
|
||||
Ok(client) => client,
|
||||
Err(_) => {
|
||||
info!("remote client {} connection failed", self.addr);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
let ping_req = Request::new(PingRequest {
|
||||
version: 1,
|
||||
body: bytes::Bytes::new(),
|
||||
});
|
||||
|
||||
match client.ping(ping_req).await {
|
||||
Ok(_) => {
|
||||
info!("remote client {} is online", self.addr);
|
||||
true
|
||||
}
|
||||
Err(_) => {
|
||||
info!("remote client {} ping failed", self.addr);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn is_local(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
@@ -22,10 +22,42 @@ use strum::{EnumString, IntoStaticStr};
|
||||
|
||||
use super::{Error as IamError, Validator, utils::wildcard};
|
||||
|
||||
#[derive(Serialize, Clone, Default, Debug)]
|
||||
/// A set of policy actions that serializes as a single string when containing one item,
|
||||
/// or as an array when containing multiple items (matching AWS S3 API format).
|
||||
#[derive(Clone, Default, Debug)]
|
||||
pub struct ActionSet(pub HashSet<Action>);
|
||||
|
||||
impl Serialize for ActionSet {
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
use serde::ser::SerializeSeq;
|
||||
|
||||
if self.0.len() == 1 {
|
||||
// Serialize single action as string (not array)
|
||||
if let Some(action) = self.0.iter().next() {
|
||||
let action_str: &str = action.into();
|
||||
return serializer.serialize_str(action_str);
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize multiple actions as array
|
||||
let mut seq = serializer.serialize_seq(Some(self.0.len()))?;
|
||||
for action in &self.0 {
|
||||
let action_str: &str = action.into();
|
||||
seq.serialize_element(action_str)?;
|
||||
}
|
||||
seq.end()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActionSet {
|
||||
/// Returns true if the action set is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
|
||||
pub fn is_match(&self, action: &Action) -> bool {
|
||||
for act in self.0.iter() {
|
||||
if act.is_match(action) {
|
||||
@@ -150,6 +182,10 @@ impl Action {
|
||||
impl TryFrom<&str> for Action {
|
||||
type Error = Error;
|
||||
fn try_from(value: &str) -> std::result::Result<Self, Self::Error> {
|
||||
// Support wildcard "*" which matches all S3 actions (AWS S3 standard)
|
||||
if value == "*" {
|
||||
return Ok(Self::S3Action(S3Action::AllActions));
|
||||
}
|
||||
if value.starts_with(Self::S3_PREFIX) {
|
||||
Ok(Self::S3Action(
|
||||
S3Action::try_from(value).map_err(|_| IamError::InvalidAction(value.into()))?,
|
||||
@@ -559,3 +595,53 @@ pub enum KmsAction {
|
||||
#[strum(serialize = "kms:*")]
|
||||
AllActions,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashSet;
|
||||
|
||||
#[test]
|
||||
fn test_action_wildcard_parsing() {
|
||||
// Test that "*" parses to S3Action::AllActions
|
||||
let action = Action::try_from("*").expect("Should parse wildcard");
|
||||
assert!(matches!(action, Action::S3Action(S3Action::AllActions)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_actionset_serialize_single_element() {
|
||||
// Single element should serialize as string
|
||||
let mut set = HashSet::new();
|
||||
set.insert(Action::S3Action(S3Action::GetObjectAction));
|
||||
let actionset = ActionSet(set);
|
||||
|
||||
let json = serde_json::to_string(&actionset).expect("Should serialize");
|
||||
assert_eq!(json, "\"s3:GetObject\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_actionset_serialize_multiple_elements() {
|
||||
// Multiple elements should serialize as array
|
||||
let mut set = HashSet::new();
|
||||
set.insert(Action::S3Action(S3Action::GetObjectAction));
|
||||
set.insert(Action::S3Action(S3Action::PutObjectAction));
|
||||
let actionset = ActionSet(set);
|
||||
|
||||
let json = serde_json::to_string(&actionset).expect("Should serialize");
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
assert!(parsed.is_array());
|
||||
let arr = parsed.as_array().expect("Should be array");
|
||||
assert_eq!(arr.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_actionset_wildcard_serialization() {
|
||||
// Wildcard action should serialize correctly
|
||||
let mut set = HashSet::new();
|
||||
set.insert(Action::try_from("*").expect("Should parse wildcard"));
|
||||
let actionset = ActionSet(set);
|
||||
|
||||
let json = serde_json::to_string(&actionset).expect("Should serialize");
|
||||
assert_eq!(json, "\"s3:*\"");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,13 @@ use super::Validator;
|
||||
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
|
||||
pub struct ID(pub String);
|
||||
|
||||
impl ID {
|
||||
/// Returns true if the ID is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl Validator for ID {
|
||||
type Error = Error;
|
||||
/// if id is a valid utf string, then it is valid.
|
||||
|
||||
@@ -63,16 +63,23 @@ pub struct Policy {
|
||||
|
||||
impl Policy {
|
||||
pub async fn is_allowed(&self, args: &Args<'_>) -> bool {
|
||||
// First, check all Deny statements - if any Deny matches, deny the request
|
||||
for statement in self.statements.iter().filter(|s| matches!(s.effect, Effect::Deny)) {
|
||||
if !statement.is_allowed(args).await {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if args.deny_only || args.is_owner {
|
||||
// Owner has all permissions
|
||||
if args.is_owner {
|
||||
return true;
|
||||
}
|
||||
|
||||
if args.deny_only {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check Allow statements
|
||||
for statement in self.statements.iter().filter(|s| matches!(s.effect, Effect::Allow)) {
|
||||
if statement.is_allowed(args).await {
|
||||
return true;
|
||||
@@ -177,9 +184,11 @@ pub struct BucketPolicyArgs<'a> {
|
||||
pub object: &'a str,
|
||||
}
|
||||
|
||||
/// Bucket Policy with AWS S3-compatible JSON serialization.
|
||||
/// Empty optional fields are omitted from output to match AWS format.
|
||||
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
|
||||
pub struct BucketPolicy {
|
||||
#[serde(default, rename = "ID")]
|
||||
#[serde(default, rename = "ID", skip_serializing_if = "ID::is_empty")]
|
||||
pub id: ID,
|
||||
#[serde(rename = "Version")]
|
||||
pub version: String,
|
||||
@@ -592,6 +601,102 @@ mod test {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deny_only_security_fix() -> Result<()> {
|
||||
let data = r#"
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:GetObject"],
|
||||
"Resource": ["arn:aws:s3:::bucket1/*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
|
||||
let policy = Policy::parse_config(data.as_bytes())?;
|
||||
let conditions = HashMap::new();
|
||||
let claims = HashMap::new();
|
||||
|
||||
// Test with deny_only=true but no matching Allow statement
|
||||
let args_deny_only = Args {
|
||||
account: "testuser",
|
||||
groups: &None,
|
||||
action: Action::S3Action(crate::policy::action::S3Action::PutObjectAction),
|
||||
bucket: "bucket2",
|
||||
conditions: &conditions,
|
||||
is_owner: false,
|
||||
object: "test.txt",
|
||||
claims: &claims,
|
||||
deny_only: true, // Should NOT automatically allow
|
||||
};
|
||||
|
||||
// Should return false because deny_only=true, regardless of whether there's a matching Allow statement
|
||||
assert!(
|
||||
!policy.is_allowed(&args_deny_only).await,
|
||||
"deny_only should return false when deny_only=true, regardless of Allow statements"
|
||||
);
|
||||
|
||||
// Test with deny_only=true and matching Allow statement
|
||||
let args_deny_only_allowed = Args {
|
||||
account: "testuser",
|
||||
groups: &None,
|
||||
action: Action::S3Action(crate::policy::action::S3Action::GetObjectAction),
|
||||
bucket: "bucket1",
|
||||
conditions: &conditions,
|
||||
is_owner: false,
|
||||
object: "test.txt",
|
||||
claims: &claims,
|
||||
deny_only: true,
|
||||
};
|
||||
|
||||
// Should return false because deny_only=true prevents checking Allow statements (unless is_owner=true)
|
||||
assert!(
|
||||
!policy.is_allowed(&args_deny_only_allowed).await,
|
||||
"deny_only should return false even with matching Allow statement"
|
||||
);
|
||||
|
||||
// Test with deny_only=false (normal case)
|
||||
let args_normal = Args {
|
||||
account: "testuser",
|
||||
groups: &None,
|
||||
action: Action::S3Action(crate::policy::action::S3Action::GetObjectAction),
|
||||
bucket: "bucket1",
|
||||
conditions: &conditions,
|
||||
is_owner: false,
|
||||
object: "test.txt",
|
||||
claims: &claims,
|
||||
deny_only: false,
|
||||
};
|
||||
|
||||
// Should return true because there's an Allow statement
|
||||
assert!(
|
||||
policy.is_allowed(&args_normal).await,
|
||||
"normal policy evaluation should allow with matching Allow statement"
|
||||
);
|
||||
|
||||
let args_owner_deny_only = Args {
|
||||
account: "testuser",
|
||||
groups: &None,
|
||||
action: Action::S3Action(crate::policy::action::S3Action::PutObjectAction),
|
||||
bucket: "bucket2",
|
||||
conditions: &conditions,
|
||||
is_owner: true, // Owner has all permissions
|
||||
object: "test.txt",
|
||||
claims: &claims,
|
||||
deny_only: true, // Even with deny_only=true, owner should be allowed
|
||||
};
|
||||
|
||||
assert!(
|
||||
policy.is_allowed(&args_owner_deny_only).await,
|
||||
"owner should retain all permissions even when deny_only=true"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_aws_username_policy_variable() -> Result<()> {
|
||||
let data = r#"
|
||||
@@ -950,4 +1055,106 @@ mod test {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_policy_serialize_omits_empty_fields() {
|
||||
use crate::policy::action::{Action, ActionSet, S3Action};
|
||||
use crate::policy::resource::{Resource, ResourceSet};
|
||||
use crate::policy::{Effect, Functions, Principal};
|
||||
|
||||
// Create a BucketPolicy with empty optional fields
|
||||
// Use JSON deserialization to create Principal (since aws field is private)
|
||||
let principal: Principal = serde_json::from_str(r#"{"AWS": "*"}"#).expect("Should parse principal");
|
||||
|
||||
let mut policy = BucketPolicy {
|
||||
id: ID::default(), // Empty ID
|
||||
version: "2012-10-17".to_string(),
|
||||
statements: vec![BPStatement {
|
||||
sid: ID::default(), // Empty Sid
|
||||
effect: Effect::Allow,
|
||||
principal,
|
||||
actions: ActionSet::default(),
|
||||
not_actions: ActionSet::default(), // Empty NotAction
|
||||
resources: ResourceSet::default(),
|
||||
not_resources: ResourceSet::default(), // Empty NotResource
|
||||
conditions: Functions::default(), // Empty Condition
|
||||
}],
|
||||
};
|
||||
|
||||
// Set actions and resources (required fields)
|
||||
policy.statements[0]
|
||||
.actions
|
||||
.0
|
||||
.insert(Action::S3Action(S3Action::ListBucketAction));
|
||||
policy.statements[0]
|
||||
.resources
|
||||
.0
|
||||
.insert(Resource::try_from("arn:aws:s3:::test/*").unwrap());
|
||||
|
||||
let json = serde_json::to_string(&policy).expect("Should serialize");
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
|
||||
// Verify empty fields are omitted
|
||||
assert!(!parsed.as_object().unwrap().contains_key("ID"), "Empty ID should be omitted");
|
||||
|
||||
let statement = &parsed["Statement"][0];
|
||||
assert!(!statement.as_object().unwrap().contains_key("Sid"), "Empty Sid should be omitted");
|
||||
assert!(
|
||||
!statement.as_object().unwrap().contains_key("NotAction"),
|
||||
"Empty NotAction should be omitted"
|
||||
);
|
||||
assert!(
|
||||
!statement.as_object().unwrap().contains_key("NotResource"),
|
||||
"Empty NotResource should be omitted"
|
||||
);
|
||||
assert!(
|
||||
!statement.as_object().unwrap().contains_key("Condition"),
|
||||
"Empty Condition should be omitted"
|
||||
);
|
||||
|
||||
// Verify required fields are present
|
||||
assert_eq!(parsed["Version"], "2012-10-17");
|
||||
assert_eq!(statement["Effect"], "Allow");
|
||||
assert_eq!(statement["Principal"]["AWS"], "*");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_policy_serialize_single_action_as_string() {
|
||||
use crate::policy::action::{Action, ActionSet, S3Action};
|
||||
use crate::policy::resource::{Resource, ResourceSet};
|
||||
use crate::policy::{Effect, Principal};
|
||||
|
||||
// Use JSON deserialization to create Principal (since aws field is private)
|
||||
let principal: Principal = serde_json::from_str(r#"{"AWS": "*"}"#).expect("Should parse principal");
|
||||
|
||||
let mut policy = BucketPolicy {
|
||||
version: "2012-10-17".to_string(),
|
||||
statements: vec![BPStatement {
|
||||
effect: Effect::Allow,
|
||||
principal,
|
||||
actions: ActionSet::default(),
|
||||
resources: ResourceSet::default(),
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Single action
|
||||
policy.statements[0]
|
||||
.actions
|
||||
.0
|
||||
.insert(Action::S3Action(S3Action::ListBucketAction));
|
||||
policy.statements[0]
|
||||
.resources
|
||||
.0
|
||||
.insert(Resource::try_from("arn:aws:s3:::test/*").unwrap());
|
||||
|
||||
let json = serde_json::to_string(&policy).expect("Should serialize");
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
let action = &parsed["Statement"][0]["Action"];
|
||||
|
||||
// Single action should be serialized as string
|
||||
assert!(action.is_string(), "Single action should serialize as string");
|
||||
assert_eq!(action.as_str().unwrap(), "s3:ListBucket");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,13 +17,35 @@ use crate::error::Error;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashSet;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Default, PartialEq, Eq)]
|
||||
#[serde(rename_all = "PascalCase", default)]
|
||||
/// Principal that serializes AWS field as single string when containing only "*",
|
||||
/// or as an array otherwise (matching AWS S3 API format).
|
||||
#[derive(Debug, Clone, Default, PartialEq, Eq)]
|
||||
pub struct Principal {
|
||||
#[serde(rename = "AWS")]
|
||||
aws: HashSet<String>,
|
||||
}
|
||||
|
||||
impl Serialize for Principal {
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
use serde::ser::SerializeMap;
|
||||
|
||||
let mut map = serializer.serialize_map(Some(1))?;
|
||||
|
||||
// If single element, serialize as string; otherwise as array
|
||||
if self.aws.len() == 1 {
|
||||
if let Some(val) = self.aws.iter().next() {
|
||||
map.serialize_entry("AWS", val)?;
|
||||
}
|
||||
} else {
|
||||
map.serialize_entry("AWS", &self.aws)?;
|
||||
}
|
||||
|
||||
map.end()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum PrincipalFormat {
|
||||
@@ -118,4 +140,30 @@ mod test {
|
||||
};
|
||||
assert!(result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_principal_serialize_single_element() {
|
||||
// Single element should serialize as string (AWS format)
|
||||
let principal = Principal {
|
||||
aws: HashSet::from(["*".to_string()]),
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&principal).expect("Should serialize");
|
||||
assert_eq!(json, r#"{"AWS":"*"}"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_principal_serialize_multiple_elements() {
|
||||
// Multiple elements should serialize as array
|
||||
let principal = Principal {
|
||||
aws: HashSet::from(["*".to_string(), "arn:aws:iam::123456789012:root".to_string()]),
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&principal).expect("Should serialize");
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
let aws_value = parsed.get("AWS").expect("Should have AWS field");
|
||||
assert!(aws_value.is_array());
|
||||
let arr = aws_value.as_array().expect("Should be array");
|
||||
assert_eq!(arr.len(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,6 +35,11 @@ use super::{
|
||||
pub struct ResourceSet(pub HashSet<Resource>);
|
||||
|
||||
impl ResourceSet {
|
||||
/// Returns true if the resource set is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
|
||||
pub async fn is_match(&self, resource: &str, conditions: &HashMap<String, Vec<String>>) -> bool {
|
||||
self.is_match_with_resolver(resource, conditions, None).await
|
||||
}
|
||||
|
||||
@@ -179,10 +179,12 @@ impl PartialEq for Statement {
|
||||
}
|
||||
}
|
||||
|
||||
/// Bucket Policy Statement with AWS S3-compatible JSON serialization.
|
||||
/// Empty optional fields are omitted from output to match AWS format.
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
#[serde(rename_all = "PascalCase", default)]
|
||||
pub struct BPStatement {
|
||||
#[serde(rename = "Sid", default)]
|
||||
#[serde(rename = "Sid", default, skip_serializing_if = "ID::is_empty")]
|
||||
pub sid: ID,
|
||||
#[serde(rename = "Effect")]
|
||||
pub effect: Effect,
|
||||
@@ -190,13 +192,13 @@ pub struct BPStatement {
|
||||
pub principal: Principal,
|
||||
#[serde(rename = "Action")]
|
||||
pub actions: ActionSet,
|
||||
#[serde(rename = "NotAction", default)]
|
||||
#[serde(rename = "NotAction", default, skip_serializing_if = "ActionSet::is_empty")]
|
||||
pub not_actions: ActionSet,
|
||||
#[serde(rename = "Resource", default)]
|
||||
pub resources: ResourceSet,
|
||||
#[serde(rename = "NotResource", default)]
|
||||
#[serde(rename = "NotResource", default, skip_serializing_if = "ResourceSet::is_empty")]
|
||||
pub not_resources: ResourceSet,
|
||||
#[serde(rename = "Condition", default)]
|
||||
#[serde(rename = "Condition", default, skip_serializing_if = "Functions::is_empty")]
|
||||
pub conditions: Functions,
|
||||
}
|
||||
|
||||
|
||||
@@ -34,10 +34,9 @@ path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
rustfs-common.workspace = true
|
||||
rustfs-credentials = { workspace = true }
|
||||
flatbuffers = { workspace = true }
|
||||
prost = { workspace = true }
|
||||
tonic = { workspace = true, features = ["transport"] }
|
||||
tonic-prost = { workspace = true }
|
||||
tonic-prost-build = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
@@ -51,6 +51,7 @@ pub const AMZ_TAG_COUNT: &str = "x-amz-tagging-count";
|
||||
pub const AMZ_TAG_DIRECTIVE: &str = "X-Amz-Tagging-Directive";
|
||||
|
||||
// S3 transition restore
|
||||
pub const AMZ_RESTORE: &str = "x-amz-restore";
|
||||
pub const AMZ_RESTORE_EXPIRY_DAYS: &str = "X-Amz-Restore-Expiry-Days";
|
||||
pub const AMZ_RESTORE_REQUEST_DATE: &str = "X-Amz-Restore-Request-Date";
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use rustfs_config::VERSION;
|
||||
use std::env;
|
||||
use std::fmt;
|
||||
#[cfg(not(any(target_os = "openbsd", target_os = "freebsd")))]
|
||||
use sysinfo::System;
|
||||
|
||||
/// Business Type Enumeration
|
||||
|
||||
@@ -126,6 +126,7 @@ url = { workspace = true }
|
||||
urlencoding = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
zip = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
|
||||
# Observability and Metrics
|
||||
metrics = { workspace = true }
|
||||
@@ -136,9 +137,6 @@ russh = { workspace = true }
|
||||
russh-sftp = { workspace = true }
|
||||
ssh-key = { workspace = true }
|
||||
|
||||
[target.'cfg(any(target_os = "macos", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))'.dependencies]
|
||||
sysctl = { workspace = true }
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
libsystemd.workspace = true
|
||||
|
||||
|
||||
@@ -112,8 +112,6 @@ impl Operation for AddServiceAccount {
|
||||
return Err(s3_error!(InvalidRequest, "iam not init"));
|
||||
};
|
||||
|
||||
let deny_only = constant_time_eq(&cred.access_key, &target_user) || constant_time_eq(&cred.parent_user, &target_user);
|
||||
|
||||
if !iam_store
|
||||
.is_allowed(&Args {
|
||||
account: &cred.access_key,
|
||||
@@ -130,7 +128,7 @@ impl Operation for AddServiceAccount {
|
||||
is_owner: owner,
|
||||
object: "",
|
||||
claims: cred.claims.as_ref().unwrap_or(&HashMap::new()),
|
||||
deny_only,
|
||||
deny_only: false, // Always require explicit Allow permission
|
||||
})
|
||||
.await
|
||||
{
|
||||
|
||||
@@ -118,12 +118,14 @@ impl Operation for AddUser {
|
||||
return Err(s3_error!(InvalidArgument, "access key is not utf8"));
|
||||
}
|
||||
|
||||
let deny_only = ak == cred.access_key;
|
||||
// Security fix: Always require explicit Allow permission for CreateUser
|
||||
// Do not use deny_only to bypass permission checks, even when creating for self
|
||||
// This ensures consistent security semantics and prevents privilege escalation
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
deny_only,
|
||||
false, // Always require explicit Allow permission
|
||||
vec![Action::AdminAction(AdminAction::CreateUserAdminAction)],
|
||||
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
|
||||
)
|
||||
@@ -375,12 +377,14 @@ impl Operation for GetUserInfo {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
let deny_only = ak == cred.access_key;
|
||||
// Security fix: Always require explicit Allow permission for GetUser
|
||||
// Users should have explicit GetUser permission to view account information
|
||||
// This ensures consistent security semantics across all admin operations
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
deny_only,
|
||||
false, // Always require explicit Allow permission
|
||||
vec![Action::AdminAction(AdminAction::GetUserAdminAction)],
|
||||
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
|
||||
)
|
||||
@@ -651,7 +655,7 @@ impl Operation for ImportIam {
|
||||
&cred,
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ExportIAMAction)],
|
||||
vec![Action::AdminAction(AdminAction::ImportIAMAction)],
|
||||
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -336,7 +336,7 @@ pub async fn init_ftp_system(
|
||||
let ftps_address_str = rustfs_utils::get_env_str(rustfs_config::ENV_FTPS_ADDRESS, rustfs_config::DEFAULT_FTPS_ADDRESS);
|
||||
let addr: SocketAddr = ftps_address_str
|
||||
.parse()
|
||||
.map_err(|e| format!("Invalid FTPS address '{}': {}", ftps_address_str, e))?;
|
||||
.map_err(|e| format!("Invalid FTPS address '{ftps_address_str}': {e}"))?;
|
||||
|
||||
// Get FTPS configuration from environment variables
|
||||
let cert_file = rustfs_utils::get_env_opt_str(rustfs_config::ENV_FTPS_CERTS_FILE);
|
||||
@@ -402,7 +402,7 @@ pub async fn init_sftp_system(
|
||||
let sftp_address_str = rustfs_utils::get_env_str(rustfs_config::ENV_SFTP_ADDRESS, rustfs_config::DEFAULT_SFTP_ADDRESS);
|
||||
let addr: SocketAddr = sftp_address_str
|
||||
.parse()
|
||||
.map_err(|e| format!("Invalid SFTP address '{}': {}", sftp_address_str, e))?;
|
||||
.map_err(|e| format!("Invalid SFTP address '{sftp_address_str}': {e}"))?;
|
||||
|
||||
// Get SFTP configuration from environment variables
|
||||
let host_key = rustfs_utils::get_env_opt_str(rustfs_config::ENV_SFTP_HOST_KEY);
|
||||
|
||||
@@ -26,7 +26,7 @@ pub enum RustFSError {
|
||||
impl std::fmt::Display for RustFSError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RustFSError::Cert(msg) => write!(f, "Certificate error: {}", msg),
|
||||
RustFSError::Cert(msg) => write!(f, "Certificate error: {msg}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -78,7 +78,7 @@ fn parse_pem_private_key(pem: &[u8]) -> Result<PrivateKeyDer<'static>, RustFSErr
|
||||
async fn read_file(path: &PathBuf, desc: &str) -> Result<Vec<u8>, RustFSError> {
|
||||
tokio::fs::read(path)
|
||||
.await
|
||||
.map_err(|e| RustFSError::Cert(format!("read {} {:?}: {e}", desc, path)))
|
||||
.map_err(|e| RustFSError::Cert(format!("read {desc} {path:?}: {e}")))
|
||||
}
|
||||
|
||||
/// Initialize TLS material for both server and outbound client connections.
|
||||
|
||||
@@ -30,7 +30,10 @@ use hyper_util::{
|
||||
};
|
||||
use metrics::{counter, histogram};
|
||||
use rustfs_common::GlobalReadiness;
|
||||
#[cfg(not(target_os = "openbsd"))]
|
||||
use rustfs_config::{MI_B, RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
#[cfg(target_os = "openbsd")]
|
||||
use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
use rustfs_ecstore::rpc::{TONIC_RPC_PREFIX, verify_rpc_signature};
|
||||
use rustfs_protos::proto_gen::node_service::node_service_server::NodeServiceServer;
|
||||
use rustfs_trusted_proxies::{ClientInfo, TrustedProxiesLayer};
|
||||
@@ -376,12 +379,20 @@ pub async fn start_http_server(
|
||||
|
||||
// Enable TCP Keepalive to detect dead clients (e.g. power loss)
|
||||
// Idle: 10s, Interval: 5s, Retries: 3
|
||||
let ka = TcpKeepalive::new()
|
||||
.with_time(Duration::from_secs(10))
|
||||
.with_interval(Duration::from_secs(5));
|
||||
let ka = {
|
||||
#[cfg(not(target_os = "openbsd"))]
|
||||
let ka = TcpKeepalive::new()
|
||||
.with_time(Duration::from_secs(10))
|
||||
.with_interval(Duration::from_secs(5))
|
||||
.with_retries(3);
|
||||
|
||||
#[cfg(not(any(target_os = "openbsd", target_os = "netbsd")))]
|
||||
let ka = ka.with_retries(3);
|
||||
// On OpenBSD socket2 only supports configuring the initial
|
||||
// TCP keepalive timeout; intervals and retries cannot be set.
|
||||
#[cfg(target_os = "openbsd")]
|
||||
let ka = TcpKeepalive::new().with_time(Duration::from_secs(10));
|
||||
|
||||
ka
|
||||
};
|
||||
|
||||
if let Err(err) = socket_ref.set_tcp_keepalive(&ka) {
|
||||
warn!(?err, "Failed to set TCP_KEEPALIVE");
|
||||
@@ -390,9 +401,11 @@ pub async fn start_http_server(
|
||||
if let Err(err) = socket_ref.set_tcp_nodelay(true) {
|
||||
warn!(?err, "Failed to set TCP_NODELAY");
|
||||
}
|
||||
#[cfg(not(any(target_os = "openbsd")))]
|
||||
if let Err(err) = socket_ref.set_recv_buffer_size(4 * MI_B) {
|
||||
warn!(?err, "Failed to set set_recv_buffer_size");
|
||||
}
|
||||
#[cfg(not(any(target_os = "openbsd")))]
|
||||
if let Err(err) = socket_ref.set_send_buffer_size(4 * MI_B) {
|
||||
warn!(?err, "Failed to set set_send_buffer_size");
|
||||
}
|
||||
@@ -742,38 +755,56 @@ fn check_auth(req: Request<()>) -> std::result::Result<Request<()>, Status> {
|
||||
/// It tries to read the system's maximum connection queue length (`somaxconn`).
|
||||
/// If reading fails, it falls back to a default value (e.g., 1024).
|
||||
/// This makes the backlog size adaptive to the system configuration.
|
||||
#[cfg(target_os = "linux")]
|
||||
fn get_listen_backlog() -> i32 {
|
||||
const DEFAULT_BACKLOG: i32 = 1024;
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
// For Linux, read from /proc/sys/net/core/somaxconn
|
||||
match std::fs::read_to_string("/proc/sys/net/core/somaxconn") {
|
||||
Ok(s) => s.trim().parse().unwrap_or(DEFAULT_BACKLOG),
|
||||
Err(_) => DEFAULT_BACKLOG,
|
||||
}
|
||||
}
|
||||
#[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))]
|
||||
{
|
||||
// For macOS and BSD variants, use sysctl
|
||||
use sysctl::Sysctl;
|
||||
match sysctl::Ctl::new("kern.ipc.somaxconn") {
|
||||
Ok(ctl) => match ctl.value() {
|
||||
Ok(sysctl::CtlValue::Int(val)) => val,
|
||||
_ => DEFAULT_BACKLOG,
|
||||
},
|
||||
Err(_) => DEFAULT_BACKLOG,
|
||||
}
|
||||
}
|
||||
#[cfg(not(any(
|
||||
target_os = "linux",
|
||||
target_os = "macos",
|
||||
target_os = "freebsd",
|
||||
target_os = "netbsd",
|
||||
target_os = "openbsd"
|
||||
)))]
|
||||
{
|
||||
// Fallback for Windows and other operating systems
|
||||
DEFAULT_BACKLOG
|
||||
// For Linux, read from /proc/sys/net/core/somaxconn
|
||||
match std::fs::read_to_string("/proc/sys/net/core/somaxconn") {
|
||||
Ok(s) => s.trim().parse().unwrap_or(DEFAULT_BACKLOG),
|
||||
Err(_) => DEFAULT_BACKLOG,
|
||||
}
|
||||
}
|
||||
|
||||
// For macOS and BSD variants use the syscall way of getting the connection queue length.
|
||||
#[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))]
|
||||
#[allow(unsafe_code)]
|
||||
fn get_listen_backlog() -> i32 {
|
||||
const DEFAULT_BACKLOG: i32 = 1024;
|
||||
|
||||
#[cfg(target_os = "openbsd")]
|
||||
let mut name = [libc::CTL_KERN, libc::KERN_SOMAXCONN];
|
||||
#[cfg(any(target_os = "netbsd", target_os = "macos", target_os = "freebsd"))]
|
||||
let mut name = [libc::CTL_KERN, libc::KERN_IPC, libc::KIPC_SOMAXCONN];
|
||||
let mut buf = [0; 1];
|
||||
let mut buf_len = std::mem::size_of_val(&buf);
|
||||
|
||||
if unsafe {
|
||||
libc::sysctl(
|
||||
name.as_mut_ptr(),
|
||||
name.len() as u32,
|
||||
buf.as_mut_ptr() as *mut libc::c_void,
|
||||
&mut buf_len,
|
||||
std::ptr::null_mut(),
|
||||
0,
|
||||
)
|
||||
} != 0
|
||||
{
|
||||
return DEFAULT_BACKLOG;
|
||||
}
|
||||
|
||||
buf[0]
|
||||
}
|
||||
|
||||
// Fallback for Windows and other operating systems
|
||||
#[cfg(not(any(
|
||||
target_os = "linux",
|
||||
target_os = "macos",
|
||||
target_os = "freebsd",
|
||||
target_os = "netbsd",
|
||||
target_os = "openbsd"
|
||||
)))]
|
||||
fn get_listen_backlog() -> i32 {
|
||||
const DEFAULT_BACKLOG: i32 = 1024;
|
||||
DEFAULT_BACKLOG
|
||||
}
|
||||
|
||||
@@ -1784,7 +1784,7 @@ impl Node for NodeService {
|
||||
return Ok(Response::new(GetMetricsResponse {
|
||||
success: false,
|
||||
realtime_metrics: Bytes::new(),
|
||||
error_info: Some(format!("Invalid metric_type: {}", err)),
|
||||
error_info: Some(format!("Invalid metric_type: {err}")),
|
||||
}));
|
||||
}
|
||||
};
|
||||
@@ -1798,7 +1798,7 @@ impl Node for NodeService {
|
||||
return Ok(Response::new(GetMetricsResponse {
|
||||
success: false,
|
||||
realtime_metrics: Bytes::new(),
|
||||
error_info: Some(format!("Invalid opts: {}", err)),
|
||||
error_info: Some(format!("Invalid opts: {err}")),
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
139
scripts/s3-tests/implemented_tests.txt
Normal file
139
scripts/s3-tests/implemented_tests.txt
Normal file
@@ -0,0 +1,139 @@
|
||||
# Implemented S3 feature tests
|
||||
# ============================
|
||||
#
|
||||
# These tests SHOULD PASS on RustFS for standard S3 API compatibility.
|
||||
# Run these tests to verify RustFS S3 compatibility.
|
||||
#
|
||||
# Covered operations:
|
||||
# - Bucket: Create, Delete, List, Head, GetLocation
|
||||
# - Object: Put, Get, Delete, Copy, Head
|
||||
# - ListObjects/ListObjectsV2: prefix, delimiter, marker, maxkeys
|
||||
# - Multipart Upload: Create, Upload, Complete, Abort, List
|
||||
# - Tagging: Bucket and Object tags
|
||||
# - Bucket Policy: Put, Get, Delete
|
||||
# - Public Access Block: Put, Get, Delete
|
||||
# - Presigned URLs: GET and PUT operations
|
||||
# - Range requests: Partial object retrieval
|
||||
# - Metadata: User-defined metadata
|
||||
# - Conditional GET: If-Match, If-None-Match, If-Modified-Since
|
||||
#
|
||||
# Total: 118 tests
|
||||
|
||||
test_basic_key_count
|
||||
test_bucket_create_naming_bad_short_one
|
||||
test_bucket_create_naming_bad_short_two
|
||||
test_bucket_create_naming_bad_starts_nonalpha
|
||||
test_bucket_create_naming_dns_dash_at_end
|
||||
test_bucket_create_naming_dns_dash_dot
|
||||
test_bucket_create_naming_dns_dot_dash
|
||||
test_bucket_create_naming_dns_dot_dot
|
||||
test_bucket_create_naming_dns_underscore
|
||||
test_bucket_create_naming_good_contains_hyphen
|
||||
test_bucket_create_naming_good_contains_period
|
||||
test_bucket_create_naming_good_long_60
|
||||
test_bucket_create_naming_good_long_61
|
||||
test_bucket_create_naming_good_long_62
|
||||
test_bucket_create_naming_good_long_63
|
||||
test_bucket_create_naming_good_starts_alpha
|
||||
test_bucket_create_naming_good_starts_digit
|
||||
test_bucket_delete_nonempty
|
||||
test_bucket_delete_notexist
|
||||
test_bucket_head
|
||||
test_bucket_head_notexist
|
||||
test_bucket_list_distinct
|
||||
test_bucket_list_empty
|
||||
test_bucket_list_long_name
|
||||
test_bucket_list_marker_after_list
|
||||
test_bucket_list_marker_empty
|
||||
test_bucket_list_marker_none
|
||||
test_bucket_list_marker_not_in_list
|
||||
test_bucket_list_marker_unreadable
|
||||
test_bucket_list_maxkeys_invalid
|
||||
test_bucket_list_maxkeys_none
|
||||
test_bucket_list_maxkeys_zero
|
||||
test_bucket_list_prefix_alt
|
||||
test_bucket_list_prefix_basic
|
||||
test_bucket_list_prefix_delimiter_alt
|
||||
test_bucket_list_prefix_delimiter_basic
|
||||
test_bucket_list_prefix_delimiter_delimiter_not_exist
|
||||
test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist
|
||||
test_bucket_list_prefix_delimiter_prefix_not_exist
|
||||
test_bucket_list_prefix_empty
|
||||
test_bucket_list_prefix_none
|
||||
test_bucket_list_prefix_not_exist
|
||||
test_bucket_list_prefix_unreadable
|
||||
test_bucket_list_special_prefix
|
||||
test_bucket_list_delimiter_alt
|
||||
test_bucket_list_delimiter_dot
|
||||
test_bucket_list_delimiter_empty
|
||||
test_bucket_list_delimiter_none
|
||||
test_bucket_list_delimiter_not_exist
|
||||
test_bucket_list_delimiter_percentage
|
||||
test_bucket_list_delimiter_prefix_ends_with_delimiter
|
||||
test_bucket_list_delimiter_unreadable
|
||||
test_bucket_list_delimiter_whitespace
|
||||
test_bucket_listv2_continuationtoken
|
||||
test_bucket_listv2_continuationtoken_empty
|
||||
test_bucket_listv2_fetchowner_defaultempty
|
||||
test_bucket_listv2_fetchowner_empty
|
||||
test_bucket_listv2_fetchowner_notempty
|
||||
test_bucket_listv2_maxkeys_none
|
||||
test_bucket_listv2_maxkeys_zero
|
||||
test_bucket_listv2_prefix_alt
|
||||
test_bucket_listv2_prefix_basic
|
||||
test_bucket_listv2_prefix_delimiter_alt
|
||||
test_bucket_listv2_prefix_delimiter_basic
|
||||
test_bucket_listv2_prefix_delimiter_delimiter_not_exist
|
||||
test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist
|
||||
test_bucket_listv2_prefix_delimiter_prefix_not_exist
|
||||
test_bucket_listv2_prefix_empty
|
||||
test_bucket_listv2_prefix_none
|
||||
test_bucket_listv2_prefix_not_exist
|
||||
test_bucket_listv2_prefix_unreadable
|
||||
test_bucket_listv2_startafter_after_list
|
||||
test_bucket_listv2_startafter_not_in_list
|
||||
test_bucket_listv2_startafter_unreadable
|
||||
test_bucket_notexist
|
||||
test_buckets_create_then_list
|
||||
test_buckets_list_ctime
|
||||
test_bucketv2_notexist
|
||||
test_bucketv2_policy_another_bucket
|
||||
test_get_bucket_policy_status
|
||||
test_get_nonpublicpolicy_principal_bucket_policy_status
|
||||
test_get_object_ifmatch_good
|
||||
test_get_object_ifmodifiedsince_good
|
||||
test_get_object_ifunmodifiedsince_failed
|
||||
test_list_buckets_bad_auth
|
||||
test_multi_object_delete
|
||||
test_multi_object_delete_key_limit
|
||||
test_multi_objectv2_delete
|
||||
test_multi_objectv2_delete_key_limit
|
||||
test_multipart_copy_without_range
|
||||
test_multipart_upload_empty
|
||||
test_multipart_upload_incorrect_etag
|
||||
test_multipart_upload_missing_part
|
||||
test_multipart_upload_multiple_sizes
|
||||
test_multipart_upload_on_a_bucket_with_policy
|
||||
test_multipart_upload_overwrite_existing_object
|
||||
test_multipart_upload_size_too_small
|
||||
test_object_copy_bucket_not_found
|
||||
test_object_copy_key_not_found
|
||||
test_object_copy_not_owned_object_bucket
|
||||
test_object_head_zero_bytes
|
||||
test_object_metadata_replaced_on_put
|
||||
test_object_put_authenticated
|
||||
test_object_read_not_exist
|
||||
test_object_set_get_metadata_none_to_empty
|
||||
test_object_set_get_metadata_none_to_good
|
||||
test_object_set_get_metadata_overwrite_to_empty
|
||||
test_object_write_cache_control
|
||||
test_object_write_check_etag
|
||||
test_object_write_expires
|
||||
test_object_write_file
|
||||
test_object_write_read_update_read_delete
|
||||
test_object_write_to_nonexist_bucket
|
||||
test_put_max_kvsize_tags
|
||||
test_ranged_request_empty_object
|
||||
test_ranged_request_invalid_range
|
||||
test_set_multipart_tagging
|
||||
test_upload_part_copy_percent_encoded_key
|
||||
505
scripts/s3-tests/non_standard_tests.txt
Normal file
505
scripts/s3-tests/non_standard_tests.txt
Normal file
@@ -0,0 +1,505 @@
|
||||
# Non-standard S3 tests (Ceph/RGW/MinIO specific)
|
||||
# ================================================
|
||||
#
|
||||
# These tests use vendor-specific extensions not part of AWS S3 API.
|
||||
# They are PERMANENTLY EXCLUDED from RustFS compatibility testing.
|
||||
#
|
||||
# Exclusion reasons:
|
||||
# - fails_on_aws marker: Ceph-specific features
|
||||
# - X-RGW-* headers: Ceph proprietary headers
|
||||
# - allowUnordered: Ceph-specific query parameter
|
||||
# - ACL tests: RustFS uses IAM policy-based access control
|
||||
# - CORS tests: Not implemented
|
||||
# - POST Object: HTML form upload not implemented
|
||||
# - Error format differences: Minor response format variations
|
||||
#
|
||||
# Total: non-standard tests listed below
|
||||
|
||||
test_100_continue
|
||||
test_100_continue_error_retry
|
||||
test_abort_multipart_upload_not_found
|
||||
test_access_bucket_private_object_private
|
||||
test_access_bucket_private_object_publicread
|
||||
test_access_bucket_private_object_publicreadwrite
|
||||
test_access_bucket_private_objectv2_private
|
||||
test_access_bucket_private_objectv2_publicread
|
||||
test_access_bucket_private_objectv2_publicreadwrite
|
||||
test_access_bucket_publicread_object_private
|
||||
test_access_bucket_publicread_object_publicread
|
||||
test_access_bucket_publicread_object_publicreadwrite
|
||||
test_access_bucket_publicreadwrite_object_private
|
||||
test_access_bucket_publicreadwrite_object_publicread
|
||||
test_access_bucket_publicreadwrite_object_publicreadwrite
|
||||
test_account_usage
|
||||
test_atomic_conditional_write_1mb
|
||||
test_atomic_dual_conditional_write_1mb
|
||||
test_atomic_write_bucket_gone
|
||||
test_block_public_restrict_public_buckets
|
||||
test_bucket_acl_canned
|
||||
test_bucket_acl_canned_authenticatedread
|
||||
test_bucket_acl_canned_during_create
|
||||
test_bucket_acl_canned_private_to_private
|
||||
test_bucket_acl_canned_publicreadwrite
|
||||
test_bucket_acl_default
|
||||
test_bucket_acl_grant_email
|
||||
test_bucket_acl_grant_email_not_exist
|
||||
test_bucket_acl_grant_nonexist_user
|
||||
test_bucket_acl_grant_userid_fullcontrol
|
||||
test_bucket_acl_grant_userid_read
|
||||
test_bucket_acl_grant_userid_readacp
|
||||
test_bucket_acl_grant_userid_write
|
||||
test_bucket_acl_grant_userid_writeacp
|
||||
test_bucket_acl_revoke_all
|
||||
test_bucket_concurrent_set_canned_acl
|
||||
test_bucket_create_exists
|
||||
test_bucket_create_exists_nonowner
|
||||
test_bucket_create_naming_bad_ip
|
||||
test_bucket_create_naming_dns_long
|
||||
test_bucket_create_special_key_names
|
||||
test_bucket_get_location
|
||||
test_bucket_head_extended
|
||||
test_bucket_header_acl_grants
|
||||
test_bucket_list_delimiter_not_skip_special
|
||||
test_bucket_list_delimiter_prefix
|
||||
test_bucket_list_delimiter_prefix_underscore
|
||||
test_bucket_list_many
|
||||
test_bucket_list_maxkeys_one
|
||||
test_bucket_list_objects_anonymous
|
||||
test_bucket_list_objects_anonymous_fail
|
||||
test_bucket_list_return_data
|
||||
test_bucket_list_return_data_versioning
|
||||
test_bucket_list_unordered
|
||||
test_bucket_listv2_both_continuationtoken_startafter
|
||||
test_bucket_listv2_delimiter_prefix
|
||||
test_bucket_listv2_delimiter_prefix_underscore
|
||||
test_bucket_listv2_many
|
||||
test_bucket_listv2_maxkeys_one
|
||||
test_bucket_listv2_objects_anonymous
|
||||
test_bucket_listv2_objects_anonymous_fail
|
||||
test_bucket_listv2_unordered
|
||||
test_bucket_logging_bucket_acl_required
|
||||
test_bucket_logging_bucket_auth_type
|
||||
test_bucket_logging_cleanup_bucket_concurrent_deletion_j
|
||||
test_bucket_logging_cleanup_bucket_concurrent_deletion_j_single
|
||||
test_bucket_logging_cleanup_bucket_concurrent_deletion_s
|
||||
test_bucket_logging_cleanup_bucket_concurrent_deletion_s_single
|
||||
test_bucket_logging_cleanup_bucket_deletion_j
|
||||
test_bucket_logging_cleanup_bucket_deletion_j_single
|
||||
test_bucket_logging_cleanup_bucket_deletion_s
|
||||
test_bucket_logging_cleanup_bucket_deletion_s_single
|
||||
test_bucket_logging_cleanup_concurrent_disabling_j
|
||||
test_bucket_logging_cleanup_concurrent_disabling_j_single
|
||||
test_bucket_logging_cleanup_concurrent_disabling_s
|
||||
test_bucket_logging_cleanup_concurrent_disabling_s_single
|
||||
test_bucket_logging_cleanup_concurrent_updating_j
|
||||
test_bucket_logging_cleanup_concurrent_updating_j_single
|
||||
test_bucket_logging_cleanup_concurrent_updating_s
|
||||
test_bucket_logging_cleanup_concurrent_updating_s_single
|
||||
test_bucket_logging_cleanup_disabling_j
|
||||
test_bucket_logging_cleanup_disabling_j_single
|
||||
test_bucket_logging_cleanup_disabling_s
|
||||
test_bucket_logging_cleanup_disabling_s_single
|
||||
test_bucket_logging_cleanup_updating_j
|
||||
test_bucket_logging_cleanup_updating_j_single
|
||||
test_bucket_logging_cleanup_updating_s
|
||||
test_bucket_logging_cleanup_updating_s_single
|
||||
test_bucket_logging_concurrent_flush_j
|
||||
test_bucket_logging_concurrent_flush_j_single
|
||||
test_bucket_logging_concurrent_flush_s
|
||||
test_bucket_logging_concurrent_flush_s_single
|
||||
test_bucket_logging_conf_concurrent_updating_pfx_j
|
||||
test_bucket_logging_conf_concurrent_updating_pfx_s
|
||||
test_bucket_logging_conf_concurrent_updating_roll_j
|
||||
test_bucket_logging_conf_concurrent_updating_roll_s
|
||||
test_bucket_logging_conf_updating_pfx_j
|
||||
test_bucket_logging_conf_updating_pfx_s
|
||||
test_bucket_logging_conf_updating_roll_j
|
||||
test_bucket_logging_conf_updating_roll_s
|
||||
test_bucket_logging_copy_objects
|
||||
test_bucket_logging_copy_objects_bucket
|
||||
test_bucket_logging_copy_objects_bucket_versioned
|
||||
test_bucket_logging_copy_objects_versioned
|
||||
test_bucket_logging_delete_objects
|
||||
test_bucket_logging_delete_objects_versioned
|
||||
test_bucket_logging_event_type_j
|
||||
test_bucket_logging_event_type_s
|
||||
test_bucket_logging_flush_empty
|
||||
test_bucket_logging_flush_j
|
||||
test_bucket_logging_flush_j_single
|
||||
test_bucket_logging_flush_s
|
||||
test_bucket_logging_flush_s_single
|
||||
test_bucket_logging_get_objects
|
||||
test_bucket_logging_get_objects_versioned
|
||||
test_bucket_logging_head_objects
|
||||
test_bucket_logging_head_objects_versioned
|
||||
test_bucket_logging_key_filter_j
|
||||
test_bucket_logging_key_filter_s
|
||||
test_bucket_logging_mpu_copy
|
||||
test_bucket_logging_mpu_copy_versioned
|
||||
test_bucket_logging_mpu_j
|
||||
test_bucket_logging_mpu_s
|
||||
test_bucket_logging_mpu_versioned_j
|
||||
test_bucket_logging_mpu_versioned_s
|
||||
test_bucket_logging_mtime
|
||||
test_bucket_logging_multi_delete
|
||||
test_bucket_logging_multi_delete_versioned
|
||||
test_bucket_logging_multiple_prefixes
|
||||
test_bucket_logging_notupdating_j
|
||||
test_bucket_logging_notupdating_j_single
|
||||
test_bucket_logging_notupdating_s
|
||||
test_bucket_logging_notupdating_s_single
|
||||
test_bucket_logging_object_acl_required
|
||||
test_bucket_logging_object_meta
|
||||
test_bucket_logging_part_cleanup_concurrent_deletion_j
|
||||
test_bucket_logging_part_cleanup_concurrent_deletion_s
|
||||
test_bucket_logging_part_cleanup_concurrent_disabling_j
|
||||
test_bucket_logging_part_cleanup_concurrent_disabling_s
|
||||
test_bucket_logging_part_cleanup_concurrent_updating_j
|
||||
test_bucket_logging_part_cleanup_concurrent_updating_s
|
||||
test_bucket_logging_part_cleanup_deletion_j
|
||||
test_bucket_logging_part_cleanup_deletion_s
|
||||
test_bucket_logging_part_cleanup_disabling_j
|
||||
test_bucket_logging_part_cleanup_disabling_s
|
||||
test_bucket_logging_part_cleanup_updating_j
|
||||
test_bucket_logging_part_cleanup_updating_s
|
||||
test_bucket_logging_partitioned_key
|
||||
test_bucket_logging_permission_change_j
|
||||
test_bucket_logging_permission_change_s
|
||||
test_bucket_logging_put_and_flush
|
||||
test_bucket_logging_put_concurrency
|
||||
test_bucket_logging_put_objects
|
||||
test_bucket_logging_put_objects_versioned
|
||||
test_bucket_logging_roll_time
|
||||
test_bucket_logging_simple_key
|
||||
test_bucket_logging_single_prefix
|
||||
test_bucket_logging_target_cleanup_j
|
||||
test_bucket_logging_target_cleanup_j_single
|
||||
test_bucket_logging_target_cleanup_s
|
||||
test_bucket_logging_target_cleanup_s_single
|
||||
test_bucket_policy_get_obj_acl_existing_tag
|
||||
test_bucket_policy_get_obj_existing_tag
|
||||
test_bucket_policy_get_obj_tagging_existing_tag
|
||||
test_bucket_policy_put_obj_copy_source
|
||||
test_bucket_policy_put_obj_copy_source_meta
|
||||
test_bucket_policy_put_obj_kms_noenc
|
||||
test_bucket_policy_put_obj_request_obj_tag
|
||||
test_bucket_policy_put_obj_s3_incorrect_algo_sse_s3
|
||||
test_bucket_policy_put_obj_s3_noenc
|
||||
test_bucket_policy_put_obj_tagging_existing_tag
|
||||
test_bucket_policy_set_condition_operator_end_with_IfExists
|
||||
test_bucket_policy_upload_part_copy
|
||||
test_bucket_recreate_new_acl
|
||||
test_bucket_recreate_not_overriding
|
||||
test_bucket_recreate_overwrite_acl
|
||||
test_copy_object_ifmatch_failed
|
||||
test_copy_object_ifmatch_good
|
||||
test_copy_object_ifnonematch_failed
|
||||
test_copy_object_ifnonematch_good
|
||||
test_cors_header_option
|
||||
test_cors_origin_response
|
||||
test_cors_origin_wildcard
|
||||
test_cors_presigned_get_object
|
||||
test_cors_presigned_get_object_tenant
|
||||
test_cors_presigned_get_object_tenant_v2
|
||||
test_cors_presigned_get_object_v2
|
||||
test_cors_presigned_put_object
|
||||
test_cors_presigned_put_object_tenant
|
||||
test_cors_presigned_put_object_tenant_v2
|
||||
test_cors_presigned_put_object_tenant_with_acl
|
||||
test_cors_presigned_put_object_v2
|
||||
test_cors_presigned_put_object_with_acl
|
||||
test_create_bucket_bucket_owner_enforced
|
||||
test_create_bucket_bucket_owner_preferred
|
||||
test_create_bucket_object_writer
|
||||
test_delete_marker_expiration
|
||||
test_delete_marker_nonversioned
|
||||
test_delete_marker_suspended
|
||||
test_delete_marker_versioned
|
||||
test_delete_object_current_if_match
|
||||
test_delete_object_current_if_match_last_modified_time
|
||||
test_delete_object_current_if_match_size
|
||||
test_delete_object_if_match
|
||||
test_delete_object_if_match_last_modified_time
|
||||
test_delete_object_if_match_size
|
||||
test_delete_object_version_if_match
|
||||
test_delete_object_version_if_match_last_modified_time
|
||||
test_delete_object_version_if_match_size
|
||||
test_delete_objects_current_if_match
|
||||
test_delete_objects_current_if_match_last_modified_time
|
||||
test_delete_objects_current_if_match_size
|
||||
test_delete_objects_if_match
|
||||
test_delete_objects_if_match_last_modified_time
|
||||
test_delete_objects_if_match_size
|
||||
test_delete_objects_version_if_match
|
||||
test_delete_objects_version_if_match_last_modified_time
|
||||
test_delete_objects_version_if_match_size
|
||||
test_delete_tags_obj_public
|
||||
test_encrypted_transfer_13b
|
||||
test_encrypted_transfer_1MB
|
||||
test_encrypted_transfer_1b
|
||||
test_encrypted_transfer_1kb
|
||||
test_encryption_sse_c_deny_algo_with_bucket_policy
|
||||
test_encryption_sse_c_enforced_with_bucket_policy
|
||||
test_encryption_sse_c_multipart_invalid_chunks_1
|
||||
test_encryption_sse_c_multipart_invalid_chunks_2
|
||||
test_encryption_sse_c_multipart_upload
|
||||
test_encryption_sse_c_post_object_authenticated_request
|
||||
test_encryption_sse_c_unaligned_multipart_upload
|
||||
test_expected_bucket_owner
|
||||
test_get_multipart_checksum_object_attributes
|
||||
test_get_multipart_object_attributes
|
||||
test_get_obj_tagging
|
||||
test_get_object_attributes
|
||||
test_get_object_ifmatch_failed
|
||||
test_get_object_ifmodifiedsince_failed
|
||||
test_get_object_ifnonematch_failed
|
||||
test_get_object_ifnonematch_good
|
||||
test_get_object_ifunmodifiedsince_good
|
||||
test_get_paginated_multipart_object_attributes
|
||||
test_get_single_multipart_object_attributes
|
||||
test_get_sse_c_encrypted_object_attributes
|
||||
test_get_tags_acl_public
|
||||
test_head_bucket_usage
|
||||
test_lifecycle_cloud_multiple_transition
|
||||
test_lifecycle_cloud_transition
|
||||
test_lifecycle_cloud_transition_large_obj
|
||||
test_lifecycle_deletemarker_expiration
|
||||
test_lifecycle_deletemarker_expiration_with_days_tag
|
||||
test_lifecycle_expiration
|
||||
test_lifecycle_expiration_date
|
||||
test_lifecycle_expiration_header_and_tags_head
|
||||
test_lifecycle_expiration_header_head
|
||||
test_lifecycle_expiration_header_tags_head
|
||||
test_lifecycle_expiration_newer_noncurrent
|
||||
test_lifecycle_expiration_noncur_tags1
|
||||
test_lifecycle_expiration_size_gt
|
||||
test_lifecycle_expiration_size_lt
|
||||
test_lifecycle_expiration_tags1
|
||||
test_lifecycle_expiration_tags2
|
||||
test_lifecycle_expiration_versioned_tags2
|
||||
test_lifecycle_expiration_versioning_enabled
|
||||
test_lifecycle_multipart_expiration
|
||||
test_lifecycle_noncur_cloud_transition
|
||||
test_lifecycle_noncur_expiration
|
||||
test_lifecycle_noncur_transition
|
||||
test_lifecycle_transition
|
||||
test_lifecycle_transition_single_rule_multi_trans
|
||||
test_lifecyclev2_expiration
|
||||
test_list_buckets_anonymous
|
||||
test_list_buckets_invalid_auth
|
||||
test_list_buckets_paginated
|
||||
test_list_multipart_upload
|
||||
test_list_multipart_upload_owner
|
||||
test_multipart_checksum_sha256
|
||||
test_multipart_copy_improper_range
|
||||
test_multipart_copy_invalid_range
|
||||
test_multipart_copy_multiple_sizes
|
||||
test_multipart_copy_small
|
||||
test_multipart_copy_special_names
|
||||
test_multipart_copy_versioned
|
||||
test_multipart_get_part
|
||||
test_multipart_put_current_object_if_match
|
||||
test_multipart_put_current_object_if_none_match
|
||||
test_multipart_put_object_if_match
|
||||
test_multipart_single_get_part
|
||||
test_multipart_sse_c_get_part
|
||||
test_multipart_upload
|
||||
test_multipart_upload_contents
|
||||
test_multipart_upload_resend_part
|
||||
test_multipart_upload_small
|
||||
test_multipart_use_cksum_helper_crc32
|
||||
test_multipart_use_cksum_helper_crc32c
|
||||
test_multipart_use_cksum_helper_crc64nvme
|
||||
test_multipart_use_cksum_helper_sha1
|
||||
test_multipart_use_cksum_helper_sha256
|
||||
test_non_multipart_get_part
|
||||
test_non_multipart_sse_c_get_part
|
||||
test_object_acl
|
||||
test_object_acl_canned
|
||||
test_object_acl_canned_authenticatedread
|
||||
test_object_acl_canned_bucketownerfullcontrol
|
||||
test_object_acl_canned_bucketownerread
|
||||
test_object_acl_canned_during_create
|
||||
test_object_acl_canned_publicreadwrite
|
||||
test_object_acl_default
|
||||
test_object_acl_full_control_verify_attributes
|
||||
test_object_acl_full_control_verify_owner
|
||||
test_object_acl_read
|
||||
test_object_acl_readacp
|
||||
test_object_acl_write
|
||||
test_object_acl_writeacp
|
||||
test_object_anon_put
|
||||
test_object_anon_put_write_access
|
||||
test_object_content_encoding_aws_chunked
|
||||
test_object_copy_16m
|
||||
test_object_copy_canned_acl
|
||||
test_object_copy_diff_bucket
|
||||
test_object_copy_not_owned_bucket
|
||||
test_object_copy_replacing_metadata
|
||||
test_object_copy_retaining_metadata
|
||||
test_object_copy_same_bucket
|
||||
test_object_copy_to_itself
|
||||
test_object_copy_to_itself_with_metadata
|
||||
test_object_copy_verify_contenttype
|
||||
test_object_copy_versioned_bucket
|
||||
test_object_copy_versioned_url_encoding
|
||||
test_object_copy_versioning_multipart_upload
|
||||
test_object_copy_zero_size
|
||||
test_object_delete_key_bucket_gone
|
||||
test_object_header_acl_grants
|
||||
test_object_lock_changing_mode_from_compliance
|
||||
test_object_lock_changing_mode_from_governance_with_bypass
|
||||
test_object_lock_changing_mode_from_governance_without_bypass
|
||||
test_object_lock_delete_multipart_object_with_legal_hold_on
|
||||
test_object_lock_delete_multipart_object_with_retention
|
||||
test_object_lock_delete_object_with_legal_hold_off
|
||||
test_object_lock_delete_object_with_legal_hold_on
|
||||
test_object_lock_delete_object_with_retention
|
||||
test_object_lock_delete_object_with_retention_and_marker
|
||||
test_object_lock_get_legal_hold
|
||||
test_object_lock_get_obj_lock
|
||||
test_object_lock_get_obj_metadata
|
||||
test_object_lock_get_obj_retention
|
||||
test_object_lock_get_obj_retention_iso8601
|
||||
test_object_lock_multi_delete_object_with_retention
|
||||
test_object_lock_put_legal_hold
|
||||
test_object_lock_put_legal_hold_invalid_status
|
||||
test_object_lock_put_obj_lock
|
||||
test_object_lock_put_obj_lock_invalid_days
|
||||
test_object_lock_put_obj_lock_invalid_mode
|
||||
test_object_lock_put_obj_lock_invalid_status
|
||||
test_object_lock_put_obj_lock_invalid_years
|
||||
test_object_lock_put_obj_lock_with_days_and_years
|
||||
test_object_lock_put_obj_retention
|
||||
test_object_lock_put_obj_retention_increase_period
|
||||
test_object_lock_put_obj_retention_invalid_mode
|
||||
test_object_lock_put_obj_retention_override_default_retention
|
||||
test_object_lock_put_obj_retention_shorten_period
|
||||
test_object_lock_put_obj_retention_shorten_period_bypass
|
||||
test_object_lock_put_obj_retention_versionid
|
||||
test_object_lock_suspend_versioning
|
||||
test_object_lock_uploading_obj
|
||||
test_object_raw_authenticated
|
||||
test_object_raw_authenticated_bucket_acl
|
||||
test_object_raw_authenticated_bucket_gone
|
||||
test_object_raw_authenticated_object_acl
|
||||
test_object_raw_authenticated_object_gone
|
||||
test_object_raw_get
|
||||
test_object_raw_get_bucket_acl
|
||||
test_object_raw_get_bucket_gone
|
||||
test_object_raw_get_object_acl
|
||||
test_object_raw_get_object_gone
|
||||
test_object_raw_get_x_amz_expires_not_expired
|
||||
test_object_raw_get_x_amz_expires_not_expired_tenant
|
||||
test_object_raw_get_x_amz_expires_out_max_range
|
||||
test_object_raw_get_x_amz_expires_out_positive_range
|
||||
test_object_raw_get_x_amz_expires_out_range_zero
|
||||
test_object_raw_put_authenticated_expired
|
||||
test_object_raw_response_headers
|
||||
test_object_read_unreadable
|
||||
test_object_requestid_matches_header_on_error
|
||||
test_object_set_get_unicode_metadata
|
||||
test_object_write_with_chunked_transfer_encoding
|
||||
test_post_object_anonymous_request
|
||||
test_post_object_authenticated_no_content_type
|
||||
test_post_object_authenticated_request
|
||||
test_post_object_authenticated_request_bad_access_key
|
||||
test_post_object_case_insensitive_condition_fields
|
||||
test_post_object_condition_is_case_sensitive
|
||||
test_post_object_empty_conditions
|
||||
test_post_object_escaped_field_values
|
||||
test_post_object_expired_policy
|
||||
test_post_object_expires_is_case_sensitive
|
||||
test_post_object_ignored_header
|
||||
test_post_object_invalid_access_key
|
||||
test_post_object_invalid_content_length_argument
|
||||
test_post_object_invalid_date_format
|
||||
test_post_object_invalid_request_field_value
|
||||
test_post_object_invalid_signature
|
||||
test_post_object_missing_conditions_list
|
||||
test_post_object_missing_content_length_argument
|
||||
test_post_object_missing_expires_condition
|
||||
test_post_object_missing_policy_condition
|
||||
test_post_object_missing_signature
|
||||
test_post_object_no_key_specified
|
||||
test_post_object_request_missing_policy_specified_field
|
||||
test_post_object_set_invalid_success_code
|
||||
test_post_object_set_key_from_filename
|
||||
test_post_object_set_success_code
|
||||
test_post_object_success_redirect_action
|
||||
test_post_object_tags_anonymous_request
|
||||
test_post_object_tags_authenticated_request
|
||||
test_post_object_upload_larger_than_chunk
|
||||
test_post_object_upload_size_below_minimum
|
||||
test_post_object_upload_size_limit_exceeded
|
||||
test_post_object_upload_size_rgw_chunk_size_bug
|
||||
test_post_object_user_specified_header
|
||||
test_post_object_wrong_bucket
|
||||
test_put_bucket_acl_grant_group_read
|
||||
test_put_bucket_logging_account_j
|
||||
test_put_bucket_logging_account_s
|
||||
test_put_bucket_logging_extensions
|
||||
test_put_bucket_logging_policy_wildcard_objects
|
||||
test_put_bucket_logging_tenant_j
|
||||
test_put_bucket_logging_tenant_s
|
||||
test_put_bucket_ownership_bucket_owner_enforced
|
||||
test_put_bucket_ownership_bucket_owner_preferred
|
||||
test_put_bucket_ownership_object_writer
|
||||
test_put_current_object_if_match
|
||||
test_put_current_object_if_none_match
|
||||
test_put_delete_tags
|
||||
test_put_max_tags
|
||||
test_put_modify_tags
|
||||
test_put_obj_with_tags
|
||||
test_put_object_current_if_match
|
||||
test_put_object_if_match
|
||||
test_put_object_ifmatch_failed
|
||||
test_put_object_ifmatch_good
|
||||
test_put_object_ifmatch_nonexisted_failed
|
||||
test_put_object_ifmatch_overwrite_existed_good
|
||||
test_put_object_ifnonmatch_failed
|
||||
test_put_object_ifnonmatch_good
|
||||
test_put_object_ifnonmatch_nonexisted_good
|
||||
test_put_object_ifnonmatch_overwrite_existed_failed
|
||||
test_put_tags_acl_public
|
||||
test_ranged_big_request_response_code
|
||||
test_ranged_request_response_code
|
||||
test_ranged_request_return_trailing_bytes_response_code
|
||||
test_ranged_request_skip_leading_bytes_response_code
|
||||
test_read_through
|
||||
test_restore_noncur_obj
|
||||
test_restore_object_permanent
|
||||
test_restore_object_temporary
|
||||
test_set_cors
|
||||
test_sse_kms_default_post_object_authenticated_request
|
||||
test_sse_kms_default_upload_1b
|
||||
test_sse_kms_default_upload_1kb
|
||||
test_sse_kms_default_upload_1mb
|
||||
test_sse_kms_default_upload_8mb
|
||||
test_sse_kms_method_head
|
||||
test_sse_kms_multipart_invalid_chunks_1
|
||||
test_sse_kms_multipart_invalid_chunks_2
|
||||
test_sse_kms_multipart_upload
|
||||
test_sse_kms_post_object_authenticated_request
|
||||
test_sse_kms_present
|
||||
test_sse_kms_transfer_13b
|
||||
test_sse_kms_transfer_1MB
|
||||
test_sse_kms_transfer_1b
|
||||
test_sse_kms_transfer_1kb
|
||||
test_sse_s3_default_method_head
|
||||
test_sse_s3_default_multipart_upload
|
||||
test_sse_s3_default_post_object_authenticated_request
|
||||
test_sse_s3_default_upload_1b
|
||||
test_sse_s3_default_upload_1kb
|
||||
test_sse_s3_default_upload_1mb
|
||||
test_sse_s3_default_upload_8mb
|
||||
test_sse_s3_encrypted_upload_1b
|
||||
test_sse_s3_encrypted_upload_1kb
|
||||
test_sse_s3_encrypted_upload_1mb
|
||||
test_sse_s3_encrypted_upload_8mb
|
||||
test_versioned_object_acl_no_version_specified
|
||||
test_versioning_copy_obj_version
|
||||
test_versioning_multi_object_delete_with_marker_create
|
||||
test_versioning_obj_create_overwrite_multipart
|
||||
test_versioning_obj_suspended_copy
|
||||
test_versioning_stack_delete_merkers
|
||||
@@ -34,132 +34,9 @@ TEST_MODE="${TEST_MODE:-single}"
|
||||
MAXFAIL="${MAXFAIL:-1}"
|
||||
XDIST="${XDIST:-0}"
|
||||
|
||||
# =============================================================================
|
||||
# MARKEXPR: pytest marker expression to exclude test categories
|
||||
# =============================================================================
|
||||
# These markers exclude entire test categories via pytest's -m option.
|
||||
# Use MARKEXPR env var to override the default exclusions.
|
||||
#
|
||||
# Excluded categories:
|
||||
# - Unimplemented S3 features: lifecycle, versioning, s3website, bucket_logging, encryption
|
||||
# - Ceph/RGW specific tests: fails_on_aws, fails_on_rgw, fails_on_dbstore
|
||||
# - IAM features: iam_account, iam_tenant, iam_role, iam_user, iam_cross_account
|
||||
# - Other unimplemented: sns, sse_s3, storage_class, test_of_sts, webidentity_test
|
||||
# =============================================================================
|
||||
if [[ -z "${MARKEXPR:-}" ]]; then
|
||||
EXCLUDED_MARKERS=(
|
||||
# Unimplemented S3 features
|
||||
"lifecycle"
|
||||
"versioning"
|
||||
"s3website"
|
||||
"bucket_logging"
|
||||
"encryption"
|
||||
# Ceph/RGW specific tests (not standard S3)
|
||||
"fails_on_aws" # Tests for Ceph/RGW specific features (X-RGW-* headers, etc.)
|
||||
"fails_on_rgw" # Known RGW issues we don't need to replicate
|
||||
"fails_on_dbstore" # Ceph dbstore backend specific
|
||||
# IAM features requiring additional setup
|
||||
"iam_account"
|
||||
"iam_tenant"
|
||||
"iam_role"
|
||||
"iam_user"
|
||||
"iam_cross_account"
|
||||
# Other unimplemented features
|
||||
"sns" # SNS notification
|
||||
"sse_s3" # Server-side encryption with S3-managed keys
|
||||
"storage_class" # Storage class features
|
||||
"test_of_sts" # STS token service
|
||||
"webidentity_test" # Web Identity federation
|
||||
)
|
||||
# Build MARKEXPR from array: "not marker1 and not marker2 and ..."
|
||||
MARKEXPR=""
|
||||
for marker in "${EXCLUDED_MARKERS[@]}"; do
|
||||
if [[ -n "${MARKEXPR}" ]]; then
|
||||
MARKEXPR+=" and "
|
||||
fi
|
||||
MARKEXPR+="not ${marker}"
|
||||
done
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# TESTEXPR: pytest -k expression to exclude specific tests by name
|
||||
# =============================================================================
|
||||
# These patterns exclude specific tests via pytest's -k option (name matching).
|
||||
# Use TESTEXPR env var to override the default exclusions.
|
||||
#
|
||||
# Exclusion reasons are documented inline below.
|
||||
# =============================================================================
|
||||
if [[ -z "${TESTEXPR:-}" ]]; then
|
||||
EXCLUDED_TESTS=(
|
||||
# POST Object (HTML form upload) - not implemented
|
||||
"test_post_object"
|
||||
# ACL-dependent tests - ACL not implemented
|
||||
"test_bucket_list_objects_anonymous" # requires PutBucketAcl
|
||||
"test_bucket_listv2_objects_anonymous" # requires PutBucketAcl
|
||||
"test_bucket_concurrent_set_canned_acl" # ACL not implemented
|
||||
"test_expected_bucket_owner" # requires PutBucketAcl
|
||||
"test_bucket_acl" # Bucket ACL not implemented
|
||||
"test_object_acl" # Object ACL not implemented
|
||||
"test_put_bucket_acl" # PutBucketAcl not implemented
|
||||
"test_object_anon" # Anonymous access requires ACL
|
||||
"test_access_bucket" # Access control requires ACL
|
||||
"test_100_continue" # requires ACL
|
||||
# Chunked encoding - not supported
|
||||
"test_object_write_with_chunked_transfer_encoding"
|
||||
"test_object_content_encoding_aws_chunked"
|
||||
# CORS - not implemented
|
||||
"test_cors"
|
||||
"test_set_cors"
|
||||
# Presigned URL edge cases
|
||||
"test_object_raw" # Raw presigned URL tests
|
||||
# Error response format differences
|
||||
"test_bucket_create_exists" # Error format issue
|
||||
"test_bucket_recreate_not_overriding" # Error format issue
|
||||
"test_list_buckets_invalid_auth" # 401 vs 403
|
||||
"test_object_delete_key_bucket_gone" # 403 vs 404
|
||||
"test_abort_multipart_upload_not_found" # Error code issue
|
||||
# ETag conditional request edge cases
|
||||
"test_get_object_ifmatch_failed"
|
||||
"test_get_object_ifnonematch"
|
||||
# Copy operation edge cases
|
||||
"test_object_copy_to_itself" # Copy validation
|
||||
"test_object_copy_not_owned_bucket" # Cross-account access
|
||||
"test_multipart_copy_invalid_range" # Multipart validation
|
||||
# Timing-sensitive tests
|
||||
"test_versioning_concurrent_multi_object_delete"
|
||||
)
|
||||
# Build TESTEXPR from array: "not test1 and not test2 and ..."
|
||||
TESTEXPR=""
|
||||
for pattern in "${EXCLUDED_TESTS[@]}"; do
|
||||
if [[ -n "${TESTEXPR}" ]]; then
|
||||
TESTEXPR+=" and "
|
||||
fi
|
||||
TESTEXPR+="not ${pattern}"
|
||||
done
|
||||
fi
|
||||
|
||||
# Configuration file paths
|
||||
S3TESTS_CONF_TEMPLATE="${S3TESTS_CONF_TEMPLATE:-.github/s3tests/s3tests.conf}"
|
||||
S3TESTS_CONF="${S3TESTS_CONF:-s3tests.conf}"
|
||||
|
||||
# Service deployment mode: "build", "binary", "docker", or "existing"
|
||||
# - "build": Compile with cargo build --release and run (default)
|
||||
# - "binary": Use pre-compiled binary (RUSTFS_BINARY path or default)
|
||||
# - "docker": Build Docker image and run in container
|
||||
# - "existing": Use already running service (skip start, use S3_HOST and S3_PORT)
|
||||
DEPLOY_MODE="${DEPLOY_MODE:-build}"
|
||||
RUSTFS_BINARY="${RUSTFS_BINARY:-}"
|
||||
NO_CACHE="${NO_CACHE:-false}"
|
||||
|
||||
# Directories
|
||||
# Directories (define early for use in test list loading)
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||
ARTIFACTS_DIR="${PROJECT_ROOT}/artifacts/s3tests-${TEST_MODE}"
|
||||
CONTAINER_NAME="rustfs-${TEST_MODE}"
|
||||
NETWORK_NAME="rustfs-net"
|
||||
DATA_ROOT="${DATA_ROOT:-target}"
|
||||
DATA_DIR="${PROJECT_ROOT}/${DATA_ROOT}/test-data/${CONTAINER_NAME}"
|
||||
RUSTFS_PID=""
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
@@ -180,6 +57,137 @@ log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $*"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# Test Classification Files
|
||||
# =============================================================================
|
||||
# Tests are classified into three categories stored in text files:
|
||||
# - non_standard_tests.txt: Ceph/RGW specific tests (permanently excluded)
|
||||
# - unimplemented_tests.txt: Standard S3 features not yet implemented
|
||||
# - implemented_tests.txt: Tests that should pass on RustFS
|
||||
#
|
||||
# By default, only tests listed in implemented_tests.txt are run.
|
||||
# Use TESTEXPR env var to override and run custom test selection.
|
||||
# =============================================================================
|
||||
|
||||
# Test list files location
|
||||
TEST_LISTS_DIR="${SCRIPT_DIR}"
|
||||
IMPLEMENTED_TESTS_FILE="${TEST_LISTS_DIR}/implemented_tests.txt"
|
||||
NON_STANDARD_TESTS_FILE="${TEST_LISTS_DIR}/non_standard_tests.txt"
|
||||
UNIMPLEMENTED_TESTS_FILE="${TEST_LISTS_DIR}/unimplemented_tests.txt"
|
||||
|
||||
# =============================================================================
|
||||
# build_testexpr_from_file: Read test names from file and build pytest -k expr
|
||||
# =============================================================================
|
||||
# Reads test names from a file (one per line, ignoring comments and empty lines)
|
||||
# and builds a pytest -k expression to include only those tests.
|
||||
# =============================================================================
|
||||
build_testexpr_from_file() {
|
||||
local file="$1"
|
||||
local expr=""
|
||||
|
||||
if [[ ! -f "${file}" ]]; then
|
||||
log_error "Test list file not found: ${file}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
while IFS= read -r line || [[ -n "$line" ]]; do
|
||||
# Skip empty lines and comments
|
||||
[[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && continue
|
||||
# Trim whitespace
|
||||
line=$(echo "$line" | xargs)
|
||||
[[ -z "$line" ]] && continue
|
||||
|
||||
if [[ -n "${expr}" ]]; then
|
||||
expr+=" or "
|
||||
fi
|
||||
expr+="${line}"
|
||||
done < "${file}"
|
||||
|
||||
echo "${expr}"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MARKEXPR: pytest marker expression (safety net for marker-based filtering)
|
||||
# =============================================================================
|
||||
# Even though we use file-based test selection, we keep marker exclusions
|
||||
# as a safety net to ensure no non-standard tests slip through.
|
||||
# =============================================================================
|
||||
if [[ -z "${MARKEXPR:-}" ]]; then
|
||||
# Minimal marker exclusions as safety net (file-based filtering is primary)
|
||||
MARKEXPR="not fails_on_aws and not fails_on_rgw and not fails_on_dbstore"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# TESTEXPR: pytest -k expression to select specific tests
|
||||
# =============================================================================
|
||||
# By default, builds an inclusion expression from implemented_tests.txt.
|
||||
# Use TESTEXPR env var to override with custom selection.
|
||||
#
|
||||
# The file-based approach provides:
|
||||
# 1. Clear visibility of which tests are run
|
||||
# 2. Easy maintenance - edit txt files to add/remove tests
|
||||
# 3. Separation of concerns - test classification vs test execution
|
||||
# =============================================================================
|
||||
if [[ -z "${TESTEXPR:-}" ]]; then
|
||||
if [[ -f "${IMPLEMENTED_TESTS_FILE}" ]]; then
|
||||
log_info "Loading test list from: ${IMPLEMENTED_TESTS_FILE}"
|
||||
TESTEXPR=$(build_testexpr_from_file "${IMPLEMENTED_TESTS_FILE}")
|
||||
if [[ -z "${TESTEXPR}" ]]; then
|
||||
log_error "No tests found in ${IMPLEMENTED_TESTS_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
# Count tests for logging
|
||||
TEST_COUNT=$(grep -v '^#' "${IMPLEMENTED_TESTS_FILE}" | grep -v '^[[:space:]]*$' | wc -l | xargs)
|
||||
log_info "Loaded ${TEST_COUNT} tests from implemented_tests.txt"
|
||||
else
|
||||
log_warn "Test list file not found: ${IMPLEMENTED_TESTS_FILE}"
|
||||
log_warn "Falling back to exclusion-based filtering"
|
||||
# Fallback to exclusion-based filtering if file doesn't exist
|
||||
EXCLUDED_TESTS=(
|
||||
"test_post_object"
|
||||
"test_bucket_list_objects_anonymous"
|
||||
"test_bucket_listv2_objects_anonymous"
|
||||
"test_bucket_concurrent_set_canned_acl"
|
||||
"test_bucket_acl"
|
||||
"test_object_acl"
|
||||
"test_access_bucket"
|
||||
"test_100_continue"
|
||||
"test_cors"
|
||||
"test_object_raw"
|
||||
"test_versioning"
|
||||
"test_versioned"
|
||||
)
|
||||
TESTEXPR=""
|
||||
for pattern in "${EXCLUDED_TESTS[@]}"; do
|
||||
if [[ -n "${TESTEXPR}" ]]; then
|
||||
TESTEXPR+=" and "
|
||||
fi
|
||||
TESTEXPR+="not ${pattern}"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Configuration file paths
|
||||
S3TESTS_CONF_TEMPLATE="${S3TESTS_CONF_TEMPLATE:-.github/s3tests/s3tests.conf}"
|
||||
S3TESTS_CONF="${S3TESTS_CONF:-s3tests.conf}"
|
||||
|
||||
# Service deployment mode: "build", "binary", "docker", or "existing"
|
||||
# - "build": Compile with cargo build --release and run (default)
|
||||
# - "binary": Use pre-compiled binary (RUSTFS_BINARY path or default)
|
||||
# - "docker": Build Docker image and run in container
|
||||
# - "existing": Use already running service (skip start, use S3_HOST and S3_PORT)
|
||||
DEPLOY_MODE="${DEPLOY_MODE:-build}"
|
||||
RUSTFS_BINARY="${RUSTFS_BINARY:-}"
|
||||
NO_CACHE="${NO_CACHE:-false}"
|
||||
|
||||
# Additional directories (SCRIPT_DIR and PROJECT_ROOT defined earlier)
|
||||
ARTIFACTS_DIR="${PROJECT_ROOT}/artifacts/s3tests-${TEST_MODE}"
|
||||
CONTAINER_NAME="rustfs-${TEST_MODE}"
|
||||
NETWORK_NAME="rustfs-net"
|
||||
DATA_ROOT="${DATA_ROOT:-target}"
|
||||
DATA_DIR="${PROJECT_ROOT}/${DATA_ROOT}/test-data/${CONTAINER_NAME}"
|
||||
RUSTFS_PID=""
|
||||
|
||||
show_usage() {
|
||||
cat << EOF
|
||||
Usage: $0 [OPTIONS]
|
||||
@@ -205,15 +213,22 @@ Environment Variables:
|
||||
S3_ALT_SECRET_KEY - Alt user secret key (default: rustfsalt)
|
||||
MAXFAIL - Stop after N failures (default: 1)
|
||||
XDIST - Enable parallel execution with N workers (default: 0)
|
||||
MARKEXPR - pytest marker expression (default: exclude unsupported features)
|
||||
TESTEXPR - pytest -k expression to filter tests by name (default: exclude unimplemented)
|
||||
MARKEXPR - pytest marker expression (default: safety net exclusions)
|
||||
TESTEXPR - pytest -k expression (default: from implemented_tests.txt)
|
||||
S3TESTS_CONF_TEMPLATE - Path to s3tests config template (default: .github/s3tests/s3tests.conf)
|
||||
S3TESTS_CONF - Path to generated s3tests config (default: s3tests.conf)
|
||||
DATA_ROOT - Root directory for test data storage (default: target)
|
||||
Final path: ${DATA_ROOT}/test-data/${CONTAINER_NAME}
|
||||
Final path: \${DATA_ROOT}/test-data/\${CONTAINER_NAME}
|
||||
|
||||
Test Classification Files (in scripts/s3-tests/):
|
||||
implemented_tests.txt - Tests that should pass (run by default)
|
||||
unimplemented_tests.txt - Standard S3 features not yet implemented
|
||||
non_standard_tests.txt - Ceph/RGW specific tests (permanently excluded)
|
||||
|
||||
Notes:
|
||||
- In build mode, if the binary exists and was compiled less than 5 minutes ago,
|
||||
- Tests are loaded from implemented_tests.txt by default
|
||||
- Set TESTEXPR to override with custom test selection
|
||||
- In build mode, if the binary exists and was compiled less than 30 minutes ago,
|
||||
compilation will be skipped unless --no-cache is specified.
|
||||
|
||||
Examples:
|
||||
|
||||
183
scripts/s3-tests/unimplemented_tests.txt
Normal file
183
scripts/s3-tests/unimplemented_tests.txt
Normal file
@@ -0,0 +1,183 @@
|
||||
# Unimplemented S3 feature tests
|
||||
# ==============================
|
||||
#
|
||||
# These tests cover STANDARD S3 features not yet implemented in RustFS.
|
||||
# They are TEMPORARILY EXCLUDED and should be enabled as features are added.
|
||||
#
|
||||
# Unimplemented features:
|
||||
# - Versioning: Object versioning support
|
||||
# - Lifecycle: Object lifecycle management
|
||||
# - S3 Website: Static website hosting
|
||||
# - Bucket Logging: Access logging
|
||||
# - SSE-S3: Server-side encryption with S3-managed keys
|
||||
# - Object Lock: WORM protection
|
||||
# - IAM: Identity and Access Management roles/users
|
||||
# - SNS: Event notifications
|
||||
# - STS: Security Token Service
|
||||
# - Checksum: Full checksum validation
|
||||
# - Conditional writes: If-Match/If-None-Match for writes
|
||||
# - Object ownership: BucketOwnerEnforced/Preferred
|
||||
#
|
||||
# Total: all unimplemented S3 feature tests listed below (keep this comment in sync with the list)
|
||||
|
||||
test_bucket_create_delete_bucket_ownership
|
||||
test_bucket_logging_owner
|
||||
test_bucket_policy_deny_self_denied_policy
|
||||
test_bucket_policy_deny_self_denied_policy_confirm_header
|
||||
test_bucket_policy_put_obj_kms_s3
|
||||
test_bucket_policy_put_obj_s3_kms
|
||||
test_copy_enc
|
||||
test_copy_part_enc
|
||||
test_delete_bucket_encryption_kms
|
||||
test_delete_bucket_encryption_s3
|
||||
test_encryption_key_no_sse_c
|
||||
test_encryption_sse_c_invalid_md5
|
||||
test_encryption_sse_c_method_head
|
||||
test_encryption_sse_c_multipart_bad_download
|
||||
test_encryption_sse_c_no_key
|
||||
test_encryption_sse_c_no_md5
|
||||
test_encryption_sse_c_other_key
|
||||
test_encryption_sse_c_present
|
||||
test_get_bucket_encryption_kms
|
||||
test_get_bucket_encryption_s3
|
||||
test_get_versioned_object_attributes
|
||||
test_lifecycle_delete
|
||||
test_lifecycle_expiration_days0
|
||||
test_lifecycle_expiration_header_put
|
||||
test_lifecycle_get
|
||||
test_lifecycle_get_no_id
|
||||
test_lifecycle_id_too_long
|
||||
test_lifecycle_invalid_status
|
||||
test_lifecycle_plain_null_version_current_transition
|
||||
test_lifecycle_same_id
|
||||
test_lifecycle_set
|
||||
test_lifecycle_set_date
|
||||
test_lifecycle_set_deletemarker
|
||||
test_lifecycle_set_empty_filter
|
||||
test_lifecycle_set_filter
|
||||
test_lifecycle_set_invalid_date
|
||||
test_lifecycle_set_multipart
|
||||
test_lifecycle_set_noncurrent
|
||||
test_lifecycle_set_noncurrent_transition
|
||||
test_lifecycle_transition_encrypted
|
||||
test_lifecycle_transition_set_invalid_date
|
||||
test_object_checksum_crc64nvme
|
||||
test_object_checksum_sha256
|
||||
test_object_lock_get_legal_hold_invalid_bucket
|
||||
test_object_lock_get_obj_lock_invalid_bucket
|
||||
test_object_lock_get_obj_retention_invalid_bucket
|
||||
test_object_lock_put_legal_hold_invalid_bucket
|
||||
test_object_lock_put_obj_lock_enable_after_create
|
||||
test_object_lock_put_obj_lock_invalid_bucket
|
||||
test_object_lock_put_obj_retention_invalid_bucket
|
||||
test_post_object_upload_checksum
|
||||
test_put_bucket_encryption_kms
|
||||
test_put_bucket_encryption_s3
|
||||
test_put_bucket_logging
|
||||
test_put_bucket_logging_errors
|
||||
test_put_bucket_logging_permissions
|
||||
test_put_bucket_logging_policy_wildcard
|
||||
test_put_obj_enc_conflict_bad_enc_kms
|
||||
test_put_obj_enc_conflict_c_kms
|
||||
test_put_obj_enc_conflict_c_s3
|
||||
test_put_obj_enc_conflict_s3_kms
|
||||
test_rm_bucket_logging
|
||||
test_sse_kms_no_key
|
||||
test_sse_kms_not_declared
|
||||
test_sse_kms_read_declare
|
||||
test_versioned_concurrent_object_create_and_remove
|
||||
test_versioned_concurrent_object_create_concurrent_remove
|
||||
test_versioned_object_acl
|
||||
test_versioning_bucket_atomic_upload_return_version_id
|
||||
test_versioning_bucket_create_suspend
|
||||
test_versioning_bucket_multipart_upload_return_version_id
|
||||
test_versioning_concurrent_multi_object_delete
|
||||
test_versioning_multi_object_delete
|
||||
test_versioning_multi_object_delete_with_marker
|
||||
test_versioning_obj_create_read_remove
|
||||
test_versioning_obj_create_read_remove_head
|
||||
test_versioning_obj_create_versions_remove_all
|
||||
test_versioning_obj_create_versions_remove_special_names
|
||||
test_versioning_obj_list_marker
|
||||
test_versioning_obj_plain_null_version_overwrite
|
||||
test_versioning_obj_plain_null_version_overwrite_suspended
|
||||
test_versioning_obj_plain_null_version_removal
|
||||
test_versioning_obj_suspend_versions
|
||||
|
||||
# Teardown issues (list_object_versions on non-versioned buckets)
|
||||
# These tests pass but have cleanup issues with list_object_versions
|
||||
test_bucket_list_delimiter_basic
|
||||
test_bucket_list_encoding_basic
|
||||
test_bucket_listv2_delimiter_alt
|
||||
test_bucket_listv2_delimiter_basic
|
||||
test_bucket_listv2_delimiter_dot
|
||||
test_bucket_listv2_delimiter_empty
|
||||
test_bucket_listv2_delimiter_none
|
||||
test_bucket_listv2_delimiter_not_exist
|
||||
test_bucket_listv2_delimiter_percentage
|
||||
test_bucket_listv2_delimiter_prefix_ends_with_delimiter
|
||||
test_bucket_listv2_delimiter_unreadable
|
||||
test_bucket_listv2_delimiter_whitespace
|
||||
test_bucket_listv2_encoding_basic
|
||||
|
||||
# Checksum and atomic write tests (require x-amz-checksum-* support)
|
||||
test_atomic_dual_write_1mb
|
||||
test_atomic_dual_write_4mb
|
||||
test_atomic_dual_write_8mb
|
||||
test_atomic_multipart_upload_write
|
||||
test_atomic_read_1mb
|
||||
test_atomic_read_4mb
|
||||
test_atomic_read_8mb
|
||||
test_atomic_write_1mb
|
||||
test_atomic_write_4mb
|
||||
test_atomic_write_8mb
|
||||
test_set_bucket_tagging
|
||||
|
||||
# Tests with implementation issues (need investigation)
|
||||
test_bucket_policy_acl
|
||||
test_bucket_policy_different_tenant
|
||||
test_bucketv2_policy_acl
|
||||
test_multipart_resend_first_finishes_last
|
||||
|
||||
# Multipart abort and policy issues
|
||||
test_abort_multipart_upload
|
||||
test_bucket_policy_multipart
|
||||
|
||||
# Tests with prefix conflicts or ACL/tenant dependencies
|
||||
test_bucket_policy
|
||||
test_bucket_policy_allow_notprincipal
|
||||
test_bucket_policy_another_bucket
|
||||
test_bucket_policy_put_obj_acl
|
||||
test_bucket_policy_put_obj_grant
|
||||
test_bucket_policy_tenanted_bucket
|
||||
test_bucketv2_policy
|
||||
test_object_presigned_put_object_with_acl
|
||||
test_object_presigned_put_object_with_acl_tenant
|
||||
test_object_put_acl_mtime
|
||||
|
||||
# ACL-dependent tests (PutBucketAcl not implemented)
|
||||
test_block_public_object_canned_acls
|
||||
test_block_public_put_bucket_acls
|
||||
test_get_authpublic_acl_bucket_policy_status
|
||||
test_get_nonpublicpolicy_acl_bucket_policy_status
|
||||
test_get_public_acl_bucket_policy_status
|
||||
test_get_publicpolicy_acl_bucket_policy_status
|
||||
test_ignore_public_acls
|
||||
|
||||
# PublicAccessBlock and tag validation tests
|
||||
test_block_public_policy
|
||||
test_block_public_policy_with_principal
|
||||
test_get_obj_head_tagging
|
||||
test_get_public_block_deny_bucket_policy
|
||||
test_get_undefined_public_block
|
||||
test_put_excess_key_tags
|
||||
test_put_excess_tags
|
||||
test_put_excess_val_tags
|
||||
test_put_get_delete_public_block
|
||||
test_put_public_block
|
||||
test_set_get_del_bucket_policy
|
||||
|
||||
# Object attributes and torrent tests
|
||||
test_create_bucket_no_ownership_controls
|
||||
test_get_checksum_object_attributes
|
||||
test_get_object_torrent
|
||||
Reference in New Issue
Block a user