mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-16 17:20:33 +00:00
lifecycle test
This commit is contained in:
1
Cargo.lock
generated
1
Cargo.lock
generated
@@ -7614,6 +7614,7 @@ dependencies = [
|
||||
"async-trait",
|
||||
"chrono",
|
||||
"futures",
|
||||
"heed",
|
||||
"http 1.4.0",
|
||||
"path-clean",
|
||||
"rand 0.10.0-rc.5",
|
||||
|
||||
@@ -608,7 +608,7 @@ mod serial_tests {
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
|
||||
#[serial]
|
||||
//#[ignore]
|
||||
#[ignore]
|
||||
async fn test_lifecycle_transition_basic() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
@@ -621,7 +621,13 @@ mod serial_tests {
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(&ecstore, bucket_name.as_str(), object_name, b"Hello, this is test data for lifecycle expiry 1111-11111111-1111 !").await;
|
||||
upload_test_object(
|
||||
&ecstore,
|
||||
bucket_name.as_str(),
|
||||
object_name,
|
||||
b"Hello, this is test data for lifecycle expiry 1111-11111111-1111 !",
|
||||
)
|
||||
.await;
|
||||
//create_test_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
|
||||
|
||||
|
||||
@@ -19,10 +19,7 @@
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use rustfs_filemeta::{ReplicationStatusType, VersionPurgeStatusType};
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, ExpirationStatus, LifecycleExpiration, LifecycleRule, NoncurrentVersionTransition,
|
||||
ObjectLockConfiguration, ObjectLockEnabled, RestoreRequest, Transition,
|
||||
};
|
||||
use s3s::dto::{BucketLifecycleConfiguration, ExpirationStatus, LifecycleExpiration, LifecycleRule, NoncurrentVersionTransition, ObjectLockConfiguration, ObjectLockEnabled, Prefix, RestoreRequest, Transition};
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
@@ -173,44 +170,49 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
continue;
|
||||
}
|
||||
|
||||
let rule_prefix = rule.prefix.as_ref().expect("err!");
|
||||
let rule_prefix = &rule.prefix.clone().unwrap_or_default();
|
||||
if prefix.len() > 0 && rule_prefix.len() > 0 && !prefix.starts_with(rule_prefix) && !rule_prefix.starts_with(&prefix)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let rule_noncurrent_version_expiration = rule.noncurrent_version_expiration.as_ref().expect("err!");
|
||||
if rule_noncurrent_version_expiration.noncurrent_days.expect("err!") > 0 {
|
||||
if let Some(rule_noncurrent_version_expiration) = &rule.noncurrent_version_expiration {
|
||||
if let Some(noncurrent_days) = rule_noncurrent_version_expiration.noncurrent_days {
|
||||
if noncurrent_days > 0 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if let Some(newer_noncurrent_versions) = rule_noncurrent_version_expiration.newer_noncurrent_versions {
|
||||
if newer_noncurrent_versions > 0 {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if rule.noncurrent_version_transitions.is_some() {
|
||||
return true;
|
||||
}
|
||||
if rule_noncurrent_version_expiration.newer_noncurrent_versions.expect("err!") > 0 {
|
||||
return true;
|
||||
if let Some(rule_expiration) = &rule.expiration {
|
||||
if let Some(date1) = rule_expiration.date.clone() {
|
||||
if OffsetDateTime::from(date1).unix_timestamp() < OffsetDateTime::now_utc().unix_timestamp() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if rule_expiration.date.is_some() {
|
||||
return true;
|
||||
}
|
||||
if let Some(expired_object_delete_marker) = rule_expiration.expired_object_delete_marker && expired_object_delete_marker {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if !rule.noncurrent_version_transitions.is_none() {
|
||||
return true;
|
||||
if let Some(rule_transitions) = &rule.transitions {
|
||||
let rule_transitions_0 = rule_transitions[0].clone();
|
||||
if let Some(date1) = rule_transitions_0.date {
|
||||
if OffsetDateTime::from(date1).unix_timestamp() < OffsetDateTime::now_utc().unix_timestamp() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
let rule_expiration = rule.expiration.as_ref().expect("err!");
|
||||
if !rule_expiration.date.is_none()
|
||||
&& OffsetDateTime::from(rule_expiration.date.clone().expect("err!")).unix_timestamp()
|
||||
< OffsetDateTime::now_utc().unix_timestamp()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if !rule_expiration.date.is_none() {
|
||||
return true;
|
||||
}
|
||||
if rule_expiration.expired_object_delete_marker.expect("err!") {
|
||||
return true;
|
||||
}
|
||||
let rule_transitions: &[Transition] = &rule.transitions.as_ref().expect("err!");
|
||||
let rule_transitions_0 = rule_transitions[0].clone();
|
||||
if !rule_transitions_0.date.is_none()
|
||||
&& OffsetDateTime::from(rule_transitions_0.date.expect("err!")).unix_timestamp()
|
||||
< OffsetDateTime::now_utc().unix_timestamp()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if !rule.transitions.is_none() {
|
||||
if rule.transitions.is_some() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -452,6 +452,8 @@ pub fn path2_bucket_object_with_base_path(base_path: &str, path: &str) -> (Strin
|
||||
.strip_prefix(SLASH_SEPARATOR)
|
||||
.unwrap_or(path);
|
||||
// Find the position of the first '/'
|
||||
#[cfg(windows)]
|
||||
let trimmed_path = trimmed_path.replace('\\', "/");
|
||||
let Some(pos) = trimmed_path.find(SLASH_SEPARATOR) else {
|
||||
return (trimmed_path.to_string(), "".to_string());
|
||||
};
|
||||
|
||||
@@ -59,3 +59,4 @@ tokio-test = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
serial_test = { workspace = true }
|
||||
heed = { workspace = true }
|
||||
@@ -44,7 +44,7 @@ use rustfs_ecstore::pools::{path2_bucket_object, path2_bucket_object_with_base_p
|
||||
use rustfs_ecstore::store_api::{ObjectInfo, ObjectToDelete};
|
||||
use rustfs_ecstore::store_utils::is_reserved_or_invalid_bucket;
|
||||
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, ReplicationStatusType};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf, path_to_bucket_object_with_base_path};
|
||||
use s3s::dto::{BucketLifecycleConfiguration, ObjectLockConfiguration};
|
||||
use tokio::select;
|
||||
use tokio::sync::mpsc;
|
||||
@@ -554,9 +554,9 @@ impl FolderScanner {
|
||||
|
||||
let file_path = entry.path().to_string_lossy().to_string();
|
||||
|
||||
let trim_dir_name = file_path.strip_prefix(&dir_path).unwrap_or(&file_path);
|
||||
//let trim_dir_name = file_path.strip_prefix(&dir_path).unwrap_or(&file_path);
|
||||
|
||||
let entry_name = path_join_buf(&[&folder.name, trim_dir_name]);
|
||||
let entry_name = path_join_buf(&[&folder.name, &file_name]);
|
||||
|
||||
if entry_name.is_empty() || entry_name == folder.name {
|
||||
debug!("scan_folder: done for now entry_name is empty or equals folder name");
|
||||
|
||||
Reference in New Issue
Block a user