refactor: Reimplement bucket replication system with enhanced architecture (#590)

* feat:refactor replication

* use aws sdk for replication client

* refactor/replication

* merge main

* fix lifecycle test
This commit is contained in:
weisd
2025-09-26 14:27:53 +08:00
committed by GitHub
parent 9b029d18b2
commit 90f21a9102
91 changed files with 10532 additions and 4917 deletions

14
Cargo.lock generated
View File

@@ -1654,6 +1654,15 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6"
[[package]]
name = "convert_case"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baaaa0ecca5b51987b9423ccdc971514dd8b0bb7b4060b983d3664dad3f1f89f"
dependencies = [
"unicode-segmentation",
]
[[package]]
name = "core-foundation"
version = "0.9.4"
@@ -6384,7 +6393,10 @@ dependencies = [
"async-channel",
"async-recursion",
"async-trait",
"aws-credential-types",
"aws-sdk-s3",
"aws-smithy-runtime-api",
"aws-smithy-types",
"base64 0.22.1",
"byteorder",
"bytes",
@@ -6497,6 +6509,7 @@ dependencies = [
"thiserror 2.0.16",
"time",
"tokio",
"tokio-util",
"tracing",
]
@@ -6802,6 +6815,7 @@ dependencies = [
"blake3",
"brotli",
"bytes",
"convert_case",
"crc32fast",
"flate2",
"futures",

View File

@@ -29,6 +29,7 @@ use rustfs_ecstore::{
data_usage::{aggregate_local_snapshots, store_data_usage_in_backend},
};
use rustfs_filemeta::{MetacacheReader, VersionType};
use s3s::dto::{BucketVersioningStatus, VersioningConfiguration};
use tokio::sync::{Mutex, RwLock};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
@@ -50,7 +51,6 @@ use rustfs_common::data_usage::{DataUsageInfo, SizeSummary};
use rustfs_common::metrics::{Metric, Metrics, globalMetrics};
use rustfs_ecstore::bucket::versioning::VersioningApi;
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
use rustfs_ecstore::cmd::bucket_targets::VersioningConfig;
use rustfs_ecstore::disk::RUSTFS_META_BUCKET;
use uuid;
@@ -300,8 +300,13 @@ impl Scanner {
.map(|(c, _)| Arc::new(c));
// Get bucket versioning configuration
let versioning_config = Arc::new(VersioningConfig {
enabled: bucket_info.versioning,
let versioning_config = Arc::new(VersioningConfiguration {
status: if bucket_info.versioning {
Some(BucketVersioningStatus::from_static(BucketVersioningStatus::ENABLED))
} else {
None
},
..Default::default()
});
let records = match bucket_objects_map.get(bucket_name) {
@@ -1825,7 +1830,16 @@ impl Scanner {
}
};
let bucket_info = ecstore.get_bucket_info(bucket, &Default::default()).await.ok();
let versioning_config = bucket_info.map(|bi| Arc::new(VersioningConfig { enabled: bi.versioning }));
let versioning_config = bucket_info.map(|bi| {
Arc::new(VersioningConfiguration {
status: if bi.versioning {
Some(BucketVersioningStatus::from_static(BucketVersioningStatus::ENABLED))
} else {
None
},
..Default::default()
})
});
let lifecycle_config = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket)
.await
.ok()
@@ -2651,7 +2665,7 @@ mod tests {
// create ECStore with dynamic port
let port = port.unwrap_or(9000);
let server_addr: SocketAddr = format!("127.0.0.1:{port}").parse().expect("Invalid server address format");
let ecstore = ECStore::new(server_addr, endpoint_pools)
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
.await
.expect("Failed to create ECStore");

View File

@@ -28,10 +28,9 @@ use rustfs_ecstore::bucket::metadata_sys::get_object_lock_config;
use rustfs_ecstore::bucket::object_lock::objectlock_sys::{BucketObjectLockSys, enforce_retention_for_deletion};
use rustfs_ecstore::bucket::versioning::VersioningApi;
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
use rustfs_ecstore::cmd::bucket_targets::VersioningConfig;
use rustfs_ecstore::store_api::{ObjectInfo, ObjectToDelete};
use rustfs_filemeta::FileInfo;
use s3s::dto::BucketLifecycleConfiguration as LifecycleConfig;
use s3s::dto::{BucketLifecycleConfiguration as LifecycleConfig, VersioningConfiguration};
use time::OffsetDateTime;
use tracing::info;
@@ -43,11 +42,15 @@ pub struct ScannerItem {
pub bucket: String,
pub object_name: String,
pub lifecycle: Option<Arc<LifecycleConfig>>,
pub versioning: Option<Arc<VersioningConfig>>,
pub versioning: Option<Arc<VersioningConfiguration>>,
}
impl ScannerItem {
pub fn new(bucket: String, lifecycle: Option<Arc<LifecycleConfig>>, versioning: Option<Arc<VersioningConfig>>) -> Self {
pub fn new(
bucket: String,
lifecycle: Option<Arc<LifecycleConfig>>,
versioning: Option<Arc<VersioningConfiguration>>,
) -> Self {
Self {
bucket,
object_name: "".to_string(),
@@ -145,6 +148,7 @@ impl ScannerItem {
to_del.push(ObjectToDelete {
object_name: obj.name,
version_id: obj.version_id,
..Default::default()
});
}
@@ -233,7 +237,7 @@ impl ScannerItem {
IlmAction::DeleteAction => {
info!("apply_lifecycle: Object {} marked for deletion", oi.name);
if let Some(vcfg) = &self.versioning {
if !vcfg.is_enabled() {
if !vcfg.enabled() {
info!("apply_lifecycle: Versioning disabled, setting new_size=0");
new_size = 0;
}

View File

@@ -444,7 +444,7 @@ mod tests {
let delete_marker = MetaDeleteMarker {
version_id: Some(Uuid::new_v4()),
mod_time: Some(OffsetDateTime::now_utc()),
meta_sys: None,
meta_sys: HashMap::new(),
};
let version = FileMetaVersion {

View File

@@ -18,6 +18,7 @@ use rustfs_ecstore::disk::endpoint::Endpoint;
use rustfs_ecstore::endpoints::{EndpointServerPools, Endpoints, PoolEndpoints};
use std::net::SocketAddr;
use tempfile::TempDir;
use tokio_util::sync::CancellationToken;
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_endpoint_index_settings() -> anyhow::Result<()> {
@@ -73,7 +74,7 @@ async fn test_endpoint_index_settings() -> anyhow::Result<()> {
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await?;
let server_addr: SocketAddr = "127.0.0.1:0".parse().unwrap();
let ecstore = rustfs_ecstore::store::ECStore::new(server_addr, endpoint_pools).await?;
let ecstore = rustfs_ecstore::store::ECStore::new(server_addr, endpoint_pools, CancellationToken::new()).await?;
println!("ECStore initialized successfully with {} pools", ecstore.pools.len());

View File

@@ -29,6 +29,7 @@ use std::sync::Once;
use std::sync::OnceLock;
use std::{path::PathBuf, sync::Arc, time::Duration};
use tokio::fs;
use tokio_util::sync::CancellationToken;
use tracing::info;
use walkdir::WalkDir;
@@ -98,7 +99,9 @@ async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage
// create ECStore with dynamic port 0 (let OS assign) or fixed 9001 if free
let port = 9001; // for simplicity
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
let ecstore = ECStore::new(server_addr, endpoint_pools).await.unwrap();
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
.await
.unwrap();
// init bucket metadata system
let buckets_list = ecstore

View File

@@ -29,6 +29,7 @@ use std::sync::OnceLock;
use std::{path::PathBuf, sync::Arc, time::Duration};
use tokio::fs;
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
use tracing::warn;
use tracing::{debug, info};
@@ -99,7 +100,9 @@ async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
// create ECStore with dynamic port 0 (let OS assign) or fixed 9002 if free
let port = 9002; // for simplicity
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
let ecstore = ECStore::new(server_addr, endpoint_pools).await.unwrap();
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
.await
.unwrap();
// init bucket metadata system
let buckets_list = ecstore
@@ -124,7 +127,7 @@ async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
}
/// Test helper: Create a test bucket
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
async fn _create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
(**ecstore)
.make_bucket(bucket_name, &Default::default())
.await
@@ -312,7 +315,7 @@ mod serial_tests {
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
let test_data = b"Hello, this is test data for lifecycle expiry!";
create_test_bucket(&ecstore, bucket_name).await;
create_test_lock_bucket(&ecstore, bucket_name).await;
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
// Verify object exists initially
@@ -458,7 +461,7 @@ mod serial_tests {
let check_result = object_exists(&ecstore, bucket_name, object_name).await;
println!("Object exists after lifecycle processing: {check_result}");
if !check_result {
if check_result {
println!("❌ Object was not deleted by lifecycle processing");
// Let's try to get object info to see its details
match ecstore
@@ -479,7 +482,7 @@ mod serial_tests {
println!("✅ Object was successfully deleted by lifecycle processing");
}
assert!(check_result);
assert!(!check_result);
println!("✅ Object successfully expired");
// Stop scanner
@@ -501,7 +504,7 @@ mod serial_tests {
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
let test_data = b"Hello, this is test data for lifecycle expiry!";
create_test_bucket(&ecstore, bucket_name).await;
create_test_lock_bucket(&ecstore, bucket_name).await;
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
// Verify object exists initially

View File

@@ -14,6 +14,7 @@
use std::{fs, net::SocketAddr, sync::Arc, sync::OnceLock, time::Duration};
use tempfile::TempDir;
use tokio_util::sync::CancellationToken;
use serial_test::serial;
@@ -89,7 +90,9 @@ async fn prepare_test_env(test_dir: Option<&str>, port: Option<u16>) -> (Vec<std
// create ECStore with dynamic port
let port = port.unwrap_or(9000);
let server_addr: SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
let ecstore = ECStore::new(server_addr, endpoint_pools).await.unwrap();
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
.await
.unwrap();
// init bucket metadata system
let buckets_list = ecstore

View File

@@ -202,22 +202,19 @@ impl AuditMetrics {
// Generate recommendations
if !validation.meets_eps_requirement {
validation.recommendations.push(format!(
"EPS ({:.0}) is below requirement (3000). Consider optimizing target dispatch or adding more target instances.",
eps
"EPS ({eps:.0}) is below requirement (3000). Consider optimizing target dispatch or adding more target instances."
));
}
if !validation.meets_latency_requirement {
validation.recommendations.push(format!(
"Average latency ({:.2}ms) exceeds requirement (30ms). Consider optimizing target responses or increasing timeout values.",
avg_latency_ms
"Average latency ({avg_latency_ms:.2}ms) exceeds requirement (30ms). Consider optimizing target responses or increasing timeout values."
));
}
if !validation.meets_error_rate_requirement {
validation.recommendations.push(format!(
"Error rate ({:.2}%) exceeds recommendation (1%). Check target connectivity and configuration.",
error_rate
"Error rate ({error_rate:.2}%) exceeds recommendation (1%). Check target connectivity and configuration."
));
}
@@ -307,7 +304,7 @@ impl PerformanceValidation {
);
for rec in &self.recommendations {
result.push_str(&format!("\n{}", rec));
result.push_str(&format!("\n{rec}"));
}
result

View File

@@ -303,7 +303,7 @@ async fn create_audit_target(
let target = rustfs_targets::target::mqtt::MQTTTarget::new(id.to_string(), args)?;
Ok(Box::new(target))
}
_ => Err(TargetError::Configuration(format!("Unknown target type: {}", target_type))),
_ => Err(TargetError::Configuration(format!("Unknown target type: {target_type}"))),
}
}
@@ -352,7 +352,7 @@ fn parse_webhook_args(_id: &str, config: &KVS) -> Result<WebhookArgs, TargetErro
.ok_or_else(|| TargetError::Configuration("webhook endpoint is required".to_string()))?;
let endpoint_url =
Url::parse(&endpoint).map_err(|e| TargetError::Configuration(format!("invalid webhook endpoint URL: {}", e)))?;
Url::parse(&endpoint).map_err(|e| TargetError::Configuration(format!("invalid webhook endpoint URL: {e}")))?;
let args = WebhookArgs {
enable: true, // Already validated as enabled
@@ -379,7 +379,7 @@ fn parse_mqtt_args(_id: &str, config: &KVS) -> Result<MQTTArgs, TargetError> {
.filter(|s| !s.is_empty())
.ok_or_else(|| TargetError::Configuration("MQTT broker is required".to_string()))?;
let broker_url = Url::parse(&broker).map_err(|e| TargetError::Configuration(format!("invalid MQTT broker URL: {}", e)))?;
let broker_url = Url::parse(&broker).map_err(|e| TargetError::Configuration(format!("invalid MQTT broker URL: {e}")))?;
let topic = config
.lookup(MQTT_TOPIC)

View File

@@ -461,7 +461,7 @@ impl AuditSystem {
info!(target_id = %target_id, "Target enabled");
Ok(())
} else {
Err(AuditError::Configuration(format!("Target not found: {}", target_id)))
Err(AuditError::Configuration(format!("Target not found: {target_id}")))
}
}
@@ -474,7 +474,7 @@ impl AuditSystem {
info!(target_id = %target_id, "Target disabled");
Ok(())
} else {
Err(AuditError::Configuration(format!("Target not found: {}", target_id)))
Err(AuditError::Configuration(format!("Target not found: {target_id}")))
}
}
@@ -488,7 +488,7 @@ impl AuditSystem {
info!(target_id = %target_id, "Target removed");
Ok(())
} else {
Err(AuditError::Configuration(format!("Target not found: {}", target_id)))
Err(AuditError::Configuration(format!("Target not found: {target_id}")))
}
}

View File

@@ -94,7 +94,7 @@ fn build_rustfs_binary() {
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
panic!("Failed to build RustFS binary. Error: {}", stderr);
panic!("Failed to build RustFS binary. Error: {stderr}");
}
info!("✅ RustFS binary built successfully");
@@ -134,8 +134,8 @@ impl RustFSTestEnvironment {
// Use a unique port for each test environment
let port = Self::find_available_port().await?;
let address = format!("127.0.0.1:{}", port);
let url = format!("http://{}", address);
let address = format!("127.0.0.1:{port}");
let url = format!("http://{address}");
Ok(Self {
temp_dir,
@@ -152,7 +152,7 @@ impl RustFSTestEnvironment {
let temp_dir = format!("/tmp/rustfs_e2e_test_{}", Uuid::new_v4());
fs::create_dir_all(&temp_dir).await?;
let url = format!("http://{}", address);
let url = format!("http://{address}");
Ok(Self {
temp_dir,
@@ -327,7 +327,7 @@ pub async fn execute_awscurl(
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
return Err(format!("awscurl failed: {}", stderr).into());
return Err(format!("awscurl failed: {stderr}").into());
}
let response = String::from_utf8_lossy(&output.stdout).to_string();

View File

@@ -59,7 +59,7 @@ pub async fn configure_kms(
access_key: &str,
secret_key: &str,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let url = format!("{}/rustfs/admin/v3/kms/configure", base_url);
let url = format!("{base_url}/rustfs/admin/v3/kms/configure");
awscurl_post(&url, config_json, access_key, secret_key).await?;
info!("KMS configured successfully");
Ok(())
@@ -71,7 +71,7 @@ pub async fn start_kms(
access_key: &str,
secret_key: &str,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let url = format!("{}/rustfs/admin/v3/kms/start", base_url);
let url = format!("{base_url}/rustfs/admin/v3/kms/start");
awscurl_post(&url, "{}", access_key, secret_key).await?;
info!("KMS started successfully");
Ok(())
@@ -83,7 +83,7 @@ pub async fn get_kms_status(
access_key: &str,
secret_key: &str,
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
let url = format!("{}/rustfs/admin/v3/kms/status", base_url);
let url = format!("{base_url}/rustfs/admin/v3/kms/status");
let status = awscurl_get(&url, access_key, secret_key).await?;
info!("KMS status retrieved: {}", status);
Ok(status)
@@ -101,7 +101,7 @@ pub async fn create_default_key(
})
.to_string();
let url = format!("{}/rustfs/admin/v3/kms/keys", base_url);
let url = format!("{base_url}/rustfs/admin/v3/kms/keys");
let response = awscurl_post(&url, &create_key_body, access_key, secret_key).await?;
// Parse response to get the actual key ID
@@ -141,7 +141,7 @@ pub async fn create_key_with_specific_id(key_dir: &str, key_id: &str) -> Result<
});
// Write the key to file with the specified ID as JSON
let key_path = format!("{}/{}.key", key_dir, key_id);
let key_path = format!("{key_dir}/{key_id}.key");
let content = serde_json::to_vec_pretty(&stored_key)?;
fs::write(&key_path, &content).await?;
@@ -281,13 +281,8 @@ pub async fn test_kms_key_management(
})
.to_string();
let create_response = awscurl_post(
&format!("{}/rustfs/admin/v3/kms/keys", base_url),
&create_key_body,
access_key,
secret_key,
)
.await?;
let create_response =
awscurl_post(&format!("{base_url}/rustfs/admin/v3/kms/keys"), &create_key_body, access_key, secret_key).await?;
let create_result: serde_json::Value = serde_json::from_str(&create_response)?;
let key_id = create_result["key_id"]
@@ -296,8 +291,7 @@ pub async fn test_kms_key_management(
info!("Created key with ID: {}", key_id);
// Test DescribeKey
let describe_response =
awscurl_get(&format!("{}/rustfs/admin/v3/kms/keys/{}", base_url, key_id), access_key, secret_key).await?;
let describe_response = awscurl_get(&format!("{base_url}/rustfs/admin/v3/kms/keys/{key_id}"), access_key, secret_key).await?;
info!("DescribeKey response: {}", describe_response);
let describe_result: serde_json::Value = serde_json::from_str(&describe_response)?;
@@ -306,7 +300,7 @@ pub async fn test_kms_key_management(
info!("Successfully described key: {}", key_id);
// Test ListKeys
let list_response = awscurl_get(&format!("{}/rustfs/admin/v3/kms/keys", base_url), access_key, secret_key).await?;
let list_response = awscurl_get(&format!("{base_url}/rustfs/admin/v3/kms/keys"), access_key, secret_key).await?;
let list_result: serde_json::Value = serde_json::from_str(&list_response)?;
let keys = list_result["keys"]
@@ -412,7 +406,7 @@ impl VaultTestEnvironment {
let port_check = TcpStream::connect(VAULT_ADDRESS).await.is_ok();
if port_check {
// Additional check by making a health request
if let Ok(response) = reqwest::get(&format!("{}/v1/sys/health", VAULT_URL)).await {
if let Ok(response) = reqwest::get(&format!("{VAULT_URL}/v1/sys/health")).await {
if response.status().is_success() {
info!("Vault server is ready after {} seconds", i);
return Ok(());
@@ -438,7 +432,7 @@ impl VaultTestEnvironment {
// Enable transit secrets engine
let enable_response = client
.post(format!("{}/v1/sys/mounts/{}", VAULT_URL, VAULT_TRANSIT_PATH))
.post(format!("{VAULT_URL}/v1/sys/mounts/{VAULT_TRANSIT_PATH}"))
.header("X-Vault-Token", VAULT_TOKEN)
.json(&serde_json::json!({
"type": "transit"
@@ -448,14 +442,14 @@ impl VaultTestEnvironment {
if !enable_response.status().is_success() && enable_response.status() != 400 {
let error_text = enable_response.text().await?;
return Err(format!("Failed to enable transit engine: {}", error_text).into());
return Err(format!("Failed to enable transit engine: {error_text}").into());
}
info!("Creating Vault encryption key");
// Create encryption key
let key_response = client
.post(format!("{}/v1/{}/keys/{}", VAULT_URL, VAULT_TRANSIT_PATH, VAULT_KEY_NAME))
.post(format!("{VAULT_URL}/v1/{VAULT_TRANSIT_PATH}/keys/{VAULT_KEY_NAME}"))
.header("X-Vault-Token", VAULT_TOKEN)
.json(&serde_json::json!({
"type": "aes256-gcm96"
@@ -465,7 +459,7 @@ impl VaultTestEnvironment {
if !key_response.status().is_success() && key_response.status() != 400 {
let error_text = key_response.text().await?;
return Err(format!("Failed to create encryption key: {}", error_text).into());
return Err(format!("Failed to create encryption key: {error_text}").into());
}
info!("Vault transit engine setup completed");
@@ -713,10 +707,10 @@ pub async fn test_all_multipart_encryption_types(
// Test configurations for all encryption types
let test_configs = vec![
MultipartTestConfig::new(format!("{}-no-encryption", base_object_key), part_size, total_parts, EncryptionType::None),
MultipartTestConfig::new(format!("{}-sse-s3", base_object_key), part_size, total_parts, EncryptionType::SSES3),
MultipartTestConfig::new(format!("{}-sse-kms", base_object_key), part_size, total_parts, EncryptionType::SSEKMS),
MultipartTestConfig::new(format!("{}-sse-c", base_object_key), part_size, total_parts, create_sse_c_config()),
MultipartTestConfig::new(format!("{base_object_key}-no-encryption"), part_size, total_parts, EncryptionType::None),
MultipartTestConfig::new(format!("{base_object_key}-sse-s3"), part_size, total_parts, EncryptionType::SSES3),
MultipartTestConfig::new(format!("{base_object_key}-sse-kms"), part_size, total_parts, EncryptionType::SSEKMS),
MultipartTestConfig::new(format!("{base_object_key}-sse-c"), part_size, total_parts, create_sse_c_config()),
];
// Run tests for each encryption type

View File

@@ -33,11 +33,10 @@ fn assert_encryption_metadata(metadata: &HashMap<String, String>, expected_size:
"x-rustfs-encryption-context",
"x-rustfs-encryption-original-size",
] {
assert!(metadata.contains_key(key), "expected managed encryption metadata '{}' to be present", key);
assert!(metadata.contains_key(key), "expected managed encryption metadata '{key}' to be present");
assert!(
!metadata.get(key).unwrap().is_empty(),
"managed encryption metadata '{}' should not be empty",
key
"managed encryption metadata '{key}' should not be empty"
);
}
@@ -84,10 +83,7 @@ fn assert_storage_encrypted(storage_root: &std::path::Path, bucket: &str, key: &
assert!(
scanned > 0,
"Failed to locate stored data files for bucket '{}' and key '{}' under {:?}",
bucket,
key,
storage_root
"Failed to locate stored data files for bucket '{bucket}' and key '{key}' under {storage_root:?}"
);
assert!(plaintext_path.is_none(), "Plaintext detected on disk at {:?}", plaintext_path.unwrap());
}
@@ -220,7 +216,7 @@ async fn test_head_reports_managed_metadata_for_sse_kms_and_copy() -> Result<(),
assert_encryption_metadata(source_metadata, payload.len());
let dest_key = "metadata-sse-kms-object-copy";
let copy_source = format!("{}/{}", TEST_BUCKET, source_key);
let copy_source = format!("{TEST_BUCKET}/{source_key}");
s3_client
.copy_object()

View File

@@ -389,8 +389,8 @@ async fn test_kms_concurrent_encryption() -> Result<(), Box<dyn std::error::Erro
let task = tokio::spawn(async move {
let _permit = sem.acquire().await.unwrap();
let test_data = format!("Concurrent test data {}", i).into_bytes();
let object_key = format!("concurrent-test-{}", i);
let test_data = format!("Concurrent test data {i}").into_bytes();
let object_key = format!("concurrent-test-{i}");
// Alternate between different encryption types
let result = match i % 3 {
@@ -418,7 +418,7 @@ async fn test_kms_concurrent_encryption() -> Result<(), Box<dyn std::error::Erro
}
2 => {
// SSE-C
let key = format!("testkey{:026}", i); // 32-byte key
let key = format!("testkey{i:026}"); // 32-byte key
let key_b64 = base64::engine::general_purpose::STANDARD.encode(&key);
let key_md5 = format!("{:x}", md5::compute(&key));
@@ -459,9 +459,7 @@ async fn test_kms_concurrent_encryption() -> Result<(), Box<dyn std::error::Erro
assert!(
successful_uploads >= num_concurrent - 1,
"Most concurrent uploads should succeed (got {}/{})",
successful_uploads,
num_concurrent
"Most concurrent uploads should succeed (got {successful_uploads}/{num_concurrent})"
);
info!("✅ Successfully completed {}/{} concurrent uploads", successful_uploads, num_concurrent);

View File

@@ -152,7 +152,7 @@ async fn test_kms_corrupted_key_files() -> Result<(), Box<dyn std::error::Error
// Corrupt the default key file
info!("🔧 Corrupting default key file");
let key_file_path = format!("{}/{}.key", kms_env.kms_keys_dir, default_key_id);
let backup_key_path = format!("{}.backup", key_file_path);
let backup_key_path = format!("{key_file_path}.backup");
// Backup the original key file
fs::copy(&key_file_path, &backup_key_path)?;
@@ -417,8 +417,8 @@ async fn test_kms_resource_constraints() -> Result<(), Box<dyn std::error::Error
for i in 0..10 {
let client = s3_client.clone();
let test_data = format!("Rapid test data {}", i).into_bytes();
let object_key = format!("rapid-test-{}", i);
let test_data = format!("Rapid test data {i}").into_bytes();
let object_key = format!("rapid-test-{i}");
let task = tokio::spawn(async move {
let result = client

View File

@@ -740,7 +740,7 @@ async fn test_large_multipart_upload(
// Verify data integrity
for (i, (&actual, &expected)) in downloaded_data.iter().zip(test_data.iter()).enumerate() {
if actual != expected {
panic!("Data mismatch at byte {}: got {}, expected {}", i, actual, expected);
panic!("Data mismatch at byte {i}: got {actual}, expected {expected}");
}
}

View File

@@ -321,13 +321,9 @@ async fn test_vault_kms_key_crud(
})
.to_string();
let create_response = crate::common::awscurl_post(
&format!("{}/rustfs/admin/v3/kms/keys", base_url),
&create_key_body,
access_key,
secret_key,
)
.await?;
let create_response =
crate::common::awscurl_post(&format!("{base_url}/rustfs/admin/v3/kms/keys"), &create_key_body, access_key, secret_key)
.await?;
let create_result: serde_json::Value = serde_json::from_str(&create_response)?;
let key_id = create_result["key_id"]
@@ -337,7 +333,7 @@ async fn test_vault_kms_key_crud(
// Read
let describe_response =
crate::common::awscurl_get(&format!("{}/rustfs/admin/v3/kms/keys/{}", base_url, key_id), access_key, secret_key).await?;
crate::common::awscurl_get(&format!("{base_url}/rustfs/admin/v3/kms/keys/{key_id}"), access_key, secret_key).await?;
let describe_result: serde_json::Value = serde_json::from_str(&describe_response)?;
assert_eq!(describe_result["key_metadata"]["key_id"], key_id);
@@ -380,7 +376,7 @@ async fn test_vault_kms_key_crud(
// Read
let list_response =
crate::common::awscurl_get(&format!("{}/rustfs/admin/v3/kms/keys", base_url), access_key, secret_key).await?;
crate::common::awscurl_get(&format!("{base_url}/rustfs/admin/v3/kms/keys"), access_key, secret_key).await?;
let list_result: serde_json::Value = serde_json::from_str(&list_response)?;
let keys = list_result["keys"]
@@ -407,7 +403,7 @@ async fn test_vault_kms_key_crud(
// Delete
let delete_response = crate::common::execute_awscurl(
&format!("{}/rustfs/admin/v3/kms/keys/delete?keyId={}", base_url, key_id),
&format!("{base_url}/rustfs/admin/v3/kms/keys/delete?keyId={key_id}"),
"DELETE",
None,
access_key,
@@ -422,7 +418,7 @@ async fn test_vault_kms_key_crud(
// Verify key state after deletion
let describe_deleted_response =
crate::common::awscurl_get(&format!("{}/rustfs/admin/v3/kms/keys/{}", base_url, key_id), access_key, secret_key).await?;
crate::common::awscurl_get(&format!("{base_url}/rustfs/admin/v3/kms/keys/{key_id}"), access_key, secret_key).await?;
let describe_result: serde_json::Value = serde_json::from_str(&describe_deleted_response)?;
let key_state = describe_result["key_metadata"]["key_state"]
@@ -439,7 +435,7 @@ async fn test_vault_kms_key_crud(
// Force Delete - Force immediate deletion for PendingDeletion key
let force_delete_response = crate::common::execute_awscurl(
&format!("{}/rustfs/admin/v3/kms/keys/delete?keyId={}&force_immediate=true", base_url, key_id),
&format!("{base_url}/rustfs/admin/v3/kms/keys/delete?keyId={key_id}&force_immediate=true"),
"DELETE",
None,
access_key,
@@ -454,7 +450,7 @@ async fn test_vault_kms_key_crud(
// Verify key no longer exists after force deletion (should return error)
let describe_force_deleted_result =
crate::common::awscurl_get(&format!("{}/rustfs/admin/v3/kms/keys/{}", base_url, key_id), access_key, secret_key).await;
crate::common::awscurl_get(&format!("{base_url}/rustfs/admin/v3/kms/keys/{key_id}"), access_key, secret_key).await;
// After force deletion, key should not be found (GET should fail)
assert!(describe_force_deleted_result.is_err(), "Force deleted key should not be found");

View File

@@ -419,7 +419,7 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
// 逐字节验证数据(对于大文件更严格)
for (i, (&actual, &expected)) in downloaded_data.iter().zip(test_data.iter()).enumerate() {
if actual != expected {
panic!("大文件数据在第{}字节不匹配: 实际={}, 期待={}", i, actual, expected);
panic!("大文件数据在第{i}字节不匹配: 实际={actual}, 期待={expected}");
}
}

View File

@@ -476,7 +476,7 @@ async fn test_kms_critical_suite() -> Result<(), Box<dyn std::error::Error + Sen
let failed_count = results.iter().filter(|r| !r.success).count();
if failed_count > 0 {
return Err(format!("Critical test suite failed: {} tests failed", failed_count).into());
return Err(format!("Critical test suite failed: {failed_count} tests failed").into());
}
info!("✅ 所有关键测试通过");
@@ -498,7 +498,7 @@ async fn test_kms_full_suite() -> Result<(), Box<dyn std::error::Error + Send +
// Allow up to 10% failure rate for non-critical tests
if success_rate < 90.0 {
return Err(format!("Test suite success rate too low: {:.1}%", success_rate).into());
return Err(format!("Test suite success rate too low: {success_rate:.1}%").into());
}
info!("✅ 完整测试套件通过");

View File

@@ -101,8 +101,11 @@ rustfs-signer.workspace = true
rustfs-checksums.workspace = true
futures-util.workspace = true
async-recursion.workspace = true
aws-credential-types = "1.2.6"
aws-smithy-types = "1.3.2"
parking_lot = "0.12"
moka = { version = "0.12", features = ["future"] }
aws-smithy-runtime-api = "1.9.0"
[target.'cfg(not(windows))'.dependencies]
nix = { workspace = true }

File diff suppressed because it is too large Load Diff

View File

@@ -16,6 +16,10 @@ use super::{quota::BucketQuota, target::BucketTargets};
use super::object_lock::ObjectLockApi;
use super::versioning::VersioningApi;
use crate::bucket::utils::deserialize;
use crate::config::com::{read_config, save_config};
use crate::error::{Error, Result};
use crate::new_object_layer_fn;
use byteorder::{BigEndian, ByteOrder, LittleEndian};
use rmp_serde::Serializer as rmpSerializer;
use rustfs_policy::policy::BucketPolicy;
@@ -30,12 +34,6 @@ use std::sync::Arc;
use time::OffsetDateTime;
use tracing::error;
use crate::bucket::target::BucketTarget;
use crate::bucket::utils::deserialize;
use crate::config::com::{read_config, save_config};
use crate::error::{Error, Result};
use crate::new_object_layer_fn;
use crate::disk::BUCKET_META_PREFIX;
use crate::store::ECStore;
@@ -322,7 +320,9 @@ impl BucketMetadata {
LittleEndian::write_u16(&mut buf[2..4], BUCKET_METADATA_VERSION);
let data = self.marshal_msg()?;
let data = self
.marshal_msg()
.map_err(|e| Error::other(format!("save bucket metadata failed: {e}")))?;
buf.extend_from_slice(&data);
@@ -362,8 +362,8 @@ impl BucketMetadata {
}
//let temp = self.bucket_targets_config_json.clone();
if !self.bucket_targets_config_json.is_empty() {
let arr: Vec<BucketTarget> = serde_json::from_slice(&self.bucket_targets_config_json)?;
self.bucket_target_config = Some(BucketTargets { targets: arr });
let bucket_targets: BucketTargets = serde_json::from_slice(&self.bucket_targets_config_json)?;
self.bucket_target_config = Some(bucket_targets);
} else {
self.bucket_target_config = Some(BucketTargets::default())
}
@@ -451,4 +451,154 @@ mod test {
assert_eq!(bm.name, new.name);
}
#[tokio::test]
async fn marshal_msg_complete_example() {
// Create a complete BucketMetadata with various configurations
let mut bm = BucketMetadata::new("test-bucket");
// Set creation time to current time
bm.created = OffsetDateTime::now_utc();
bm.lock_enabled = true;
// Add policy configuration
let policy_json = r#"{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":"*","Action":"s3:GetObject","Resource":"arn:aws:s3:::test-bucket/*"}]}"#;
bm.policy_config_json = policy_json.as_bytes().to_vec();
bm.policy_config_updated_at = OffsetDateTime::now_utc();
// Add lifecycle configuration
let lifecycle_xml = r#"<LifecycleConfiguration><Rule><ID>rule1</ID><Status>Enabled</Status><Expiration><Days>30</Days></Expiration></Rule></LifecycleConfiguration>"#;
bm.lifecycle_config_xml = lifecycle_xml.as_bytes().to_vec();
bm.lifecycle_config_updated_at = OffsetDateTime::now_utc();
// Add versioning configuration
let versioning_xml = r#"<VersioningConfiguration><Status>Enabled</Status></VersioningConfiguration>"#;
bm.versioning_config_xml = versioning_xml.as_bytes().to_vec();
bm.versioning_config_updated_at = OffsetDateTime::now_utc();
// Add encryption configuration
let encryption_xml = r#"<ServerSideEncryptionConfiguration><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>AES256</SSEAlgorithm></ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>"#;
bm.encryption_config_xml = encryption_xml.as_bytes().to_vec();
bm.encryption_config_updated_at = OffsetDateTime::now_utc();
// Add tagging configuration
let tagging_xml = r#"<Tagging><TagSet><Tag><Key>Environment</Key><Value>Test</Value></Tag><Tag><Key>Owner</Key><Value>RustFS</Value></Tag></TagSet></Tagging>"#;
bm.tagging_config_xml = tagging_xml.as_bytes().to_vec();
bm.tagging_config_updated_at = OffsetDateTime::now_utc();
// Add quota configuration
let quota_json = r#"{"quota":1073741824,"quotaType":"hard"}"#; // 1GB quota
bm.quota_config_json = quota_json.as_bytes().to_vec();
bm.quota_config_updated_at = OffsetDateTime::now_utc();
// Add object lock configuration
let object_lock_xml = r#"<ObjectLockConfiguration><ObjectLockEnabled>Enabled</ObjectLockEnabled><Rule><DefaultRetention><Mode>GOVERNANCE</Mode><Days>7</Days></DefaultRetention></Rule></ObjectLockConfiguration>"#;
bm.object_lock_config_xml = object_lock_xml.as_bytes().to_vec();
bm.object_lock_config_updated_at = OffsetDateTime::now_utc();
// Add notification configuration
let notification_xml = r#"<NotificationConfiguration><CloudWatchConfiguration><Id>notification1</Id><Event>s3:ObjectCreated:*</Event><CloudWatchConfiguration><LogGroupName>test-log-group</LogGroupName></CloudWatchConfiguration></CloudWatchConfiguration></NotificationConfiguration>"#;
bm.notification_config_xml = notification_xml.as_bytes().to_vec();
bm.notification_config_updated_at = OffsetDateTime::now_utc();
// Add replication configuration
let replication_xml = r#"<ReplicationConfiguration><Role>arn:aws:iam::123456789012:role/replication-role</Role><Rule><ID>rule1</ID><Status>Enabled</Status><Prefix>documents/</Prefix><Destination><Bucket>arn:aws:s3:::destination-bucket</Bucket></Destination></Rule></ReplicationConfiguration>"#;
bm.replication_config_xml = replication_xml.as_bytes().to_vec();
bm.replication_config_updated_at = OffsetDateTime::now_utc();
// Add bucket targets configuration
let bucket_targets_json = r#"[{"endpoint":"http://target1.example.com","credentials":{"accessKey":"key1","secretKey":"secret1"},"targetBucket":"target-bucket-1","region":"us-east-1"},{"endpoint":"http://target2.example.com","credentials":{"accessKey":"key2","secretKey":"secret2"},"targetBucket":"target-bucket-2","region":"us-west-2"}]"#;
bm.bucket_targets_config_json = bucket_targets_json.as_bytes().to_vec();
bm.bucket_targets_config_updated_at = OffsetDateTime::now_utc();
// Add bucket targets meta configuration
let bucket_targets_meta_json = r#"{"replicationId":"repl-123","syncMode":"async","bandwidth":"100MB"}"#;
bm.bucket_targets_config_meta_json = bucket_targets_meta_json.as_bytes().to_vec();
bm.bucket_targets_config_meta_updated_at = OffsetDateTime::now_utc();
// Test serialization
let buf = bm.marshal_msg().unwrap();
assert!(!buf.is_empty(), "Serialized buffer should not be empty");
// Test deserialization
let deserialized_bm = BucketMetadata::unmarshal(&buf).unwrap();
// Verify all fields are correctly serialized and deserialized
assert_eq!(bm.name, deserialized_bm.name);
assert_eq!(bm.created.unix_timestamp(), deserialized_bm.created.unix_timestamp());
assert_eq!(bm.lock_enabled, deserialized_bm.lock_enabled);
// Verify configuration data
assert_eq!(bm.policy_config_json, deserialized_bm.policy_config_json);
assert_eq!(bm.lifecycle_config_xml, deserialized_bm.lifecycle_config_xml);
assert_eq!(bm.versioning_config_xml, deserialized_bm.versioning_config_xml);
assert_eq!(bm.encryption_config_xml, deserialized_bm.encryption_config_xml);
assert_eq!(bm.tagging_config_xml, deserialized_bm.tagging_config_xml);
assert_eq!(bm.quota_config_json, deserialized_bm.quota_config_json);
assert_eq!(bm.object_lock_config_xml, deserialized_bm.object_lock_config_xml);
assert_eq!(bm.notification_config_xml, deserialized_bm.notification_config_xml);
assert_eq!(bm.replication_config_xml, deserialized_bm.replication_config_xml);
assert_eq!(bm.bucket_targets_config_json, deserialized_bm.bucket_targets_config_json);
assert_eq!(bm.bucket_targets_config_meta_json, deserialized_bm.bucket_targets_config_meta_json);
// Verify timestamps (comparing unix timestamps to avoid precision issues)
assert_eq!(
bm.policy_config_updated_at.unix_timestamp(),
deserialized_bm.policy_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.lifecycle_config_updated_at.unix_timestamp(),
deserialized_bm.lifecycle_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.versioning_config_updated_at.unix_timestamp(),
deserialized_bm.versioning_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.encryption_config_updated_at.unix_timestamp(),
deserialized_bm.encryption_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.tagging_config_updated_at.unix_timestamp(),
deserialized_bm.tagging_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.quota_config_updated_at.unix_timestamp(),
deserialized_bm.quota_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.object_lock_config_updated_at.unix_timestamp(),
deserialized_bm.object_lock_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.notification_config_updated_at.unix_timestamp(),
deserialized_bm.notification_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.replication_config_updated_at.unix_timestamp(),
deserialized_bm.replication_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.bucket_targets_config_updated_at.unix_timestamp(),
deserialized_bm.bucket_targets_config_updated_at.unix_timestamp()
);
assert_eq!(
bm.bucket_targets_config_meta_updated_at.unix_timestamp(),
deserialized_bm.bucket_targets_config_meta_updated_at.unix_timestamp()
);
// Test that the serialized data contains expected content
let buf_str = String::from_utf8_lossy(&buf);
assert!(buf_str.contains("test-bucket"), "Serialized data should contain bucket name");
// Verify the buffer size is reasonable (should be larger due to all the config data)
assert!(buf.len() > 1000, "Buffer should be substantial in size due to all configurations");
println!("✅ Complete BucketMetadata serialization test passed");
println!(" - Bucket name: {}", deserialized_bm.name);
println!(" - Lock enabled: {}", deserialized_bm.lock_enabled);
println!(" - Policy config size: {} bytes", deserialized_bm.policy_config_json.len());
println!(" - Lifecycle config size: {} bytes", deserialized_bm.lifecycle_config_xml.len());
println!(" - Serialized buffer size: {} bytes", buf.len());
}
}

View File

@@ -12,19 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::StorageAPI;
use crate::StorageAPI as _;
use crate::bucket::bucket_target_sys::BucketTargetSys;
use crate::bucket::metadata::{BUCKET_LIFECYCLE_CONFIG, load_bucket_metadata_parse};
use crate::bucket::utils::{deserialize, is_meta_bucketname};
use crate::cmd::bucket_targets;
use crate::error::{Error, Result, is_err_bucket_not_found};
use crate::global::{GLOBAL_Endpoints, is_dist_erasure, is_erasure, new_object_layer_fn};
use crate::store::ECStore;
use futures::future::join_all;
use rustfs_common::heal_channel::HealOpts;
use rustfs_policy::policy::BucketPolicy;
use s3s::dto::ReplicationConfiguration;
use s3s::dto::{
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ReplicationConfiguration,
ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ServerSideEncryptionConfiguration, Tagging,
VersioningConfiguration,
};
use std::collections::HashSet;
use std::sync::OnceLock;
@@ -261,7 +262,8 @@ impl BucketMetadataSys {
if let Some(bucket) = buckets.get(idx) {
let x = Arc::new(res);
mp.insert(bucket.clone(), x.clone());
bucket_targets::init_bucket_targets(bucket, x.clone()).await;
// TODO:EventNotifier,BucketTargetSys
BucketTargetSys::get().set(bucket, &x).await;
}
}
Err(e) => {
@@ -348,6 +350,7 @@ impl BucketMetadataSys {
if !is_erasure().await && !is_dist_erasure().await && is_err_bucket_not_found(&err) {
BucketMetadata::new(bucket)
} else {
error!("load bucket metadata failed: {}", err);
return Err(err);
}
}

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod bucket_target_sys;
pub mod error;
pub mod lifecycle;
pub mod metadata;

View File

@@ -0,0 +1,233 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::ReplicationRuleExt as _;
use crate::bucket::tagging::decode_tags_to_map;
use rustfs_filemeta::ReplicationType;
use s3s::dto::DeleteMarkerReplicationStatus;
use s3s::dto::DeleteReplicationStatus;
use s3s::dto::Destination;
use s3s::dto::{ExistingObjectReplicationStatus, ReplicationConfiguration, ReplicationRuleStatus, ReplicationRules};
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use uuid::Uuid;
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ObjectOpts {
pub name: String,
pub user_tags: String,
pub version_id: Option<Uuid>,
pub delete_marker: bool,
pub ssec: bool,
pub op_type: ReplicationType,
pub replica: bool,
pub existing_object: bool,
pub target_arn: String,
}
pub trait ReplicationConfigurationExt {
fn replicate(&self, opts: &ObjectOpts) -> bool;
fn has_existing_object_replication(&self, arn: &str) -> (bool, bool);
fn filter_actionable_rules(&self, obj: &ObjectOpts) -> ReplicationRules;
fn get_destination(&self) -> Destination;
fn has_active_rules(&self, prefix: &str, recursive: bool) -> bool;
fn filter_target_arns(&self, obj: &ObjectOpts) -> Vec<String>;
}
impl ReplicationConfigurationExt for ReplicationConfiguration {
/// 检查是否有现有对象复制规则
fn has_existing_object_replication(&self, arn: &str) -> (bool, bool) {
let mut has_arn = false;
for rule in &self.rules {
if rule.destination.bucket == arn || self.role == arn {
if !has_arn {
has_arn = true;
}
if let Some(status) = &rule.existing_object_replication {
if status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::ENABLED) {
return (true, true);
}
}
}
}
(has_arn, false)
}
fn filter_actionable_rules(&self, obj: &ObjectOpts) -> ReplicationRules {
if obj.name.is_empty() && obj.op_type != ReplicationType::Resync && obj.op_type != ReplicationType::All {
return vec![];
}
let mut rules = ReplicationRules::default();
for rule in &self.rules {
if rule.status == ReplicationRuleStatus::from_static(ReplicationRuleStatus::DISABLED) {
continue;
}
if !obj.target_arn.is_empty() && rule.destination.bucket != obj.target_arn && self.role != obj.target_arn {
continue;
}
if obj.op_type == ReplicationType::Resync || obj.op_type == ReplicationType::All {
rules.push(rule.clone());
continue;
}
if let Some(status) = &rule.existing_object_replication {
if obj.existing_object
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
{
continue;
}
}
if !obj.name.starts_with(rule.prefix()) {
continue;
}
if let Some(filter) = &rule.filter {
let object_tags = decode_tags_to_map(&obj.user_tags);
if filter.test_tags(&object_tags) {
rules.push(rule.clone());
}
}
}
rules.sort_by(|a, b| {
if a.destination == b.destination {
a.priority.cmp(&b.priority)
} else {
std::cmp::Ordering::Equal
}
});
rules
}
/// 获取目标配置
fn get_destination(&self) -> Destination {
if !self.rules.is_empty() {
self.rules[0].destination.clone()
} else {
Destination {
account: None,
bucket: "".to_string(),
encryption_configuration: None,
metrics: None,
replication_time: None,
access_control_translation: None,
storage_class: None,
}
}
}
/// 判断对象是否应该被复制
fn replicate(&self, obj: &ObjectOpts) -> bool {
let rules = self.filter_actionable_rules(obj);
for rule in rules.iter() {
if rule.status == ReplicationRuleStatus::from_static(ReplicationRuleStatus::DISABLED) {
continue;
}
if let Some(status) = &rule.existing_object_replication {
if obj.existing_object
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
{
return false;
}
}
if obj.op_type == ReplicationType::Delete {
if obj.version_id.is_some() {
return rule
.delete_replication
.clone()
.is_some_and(|d| d.status == DeleteReplicationStatus::from_static(DeleteReplicationStatus::ENABLED));
} else {
return rule.delete_marker_replication.clone().is_some_and(|d| {
d.status == Some(DeleteMarkerReplicationStatus::from_static(DeleteMarkerReplicationStatus::ENABLED))
});
}
}
// 常规对象/元数据复制
return rule.metadata_replicate(obj);
}
false
}
/// 检查是否有活跃的规则
/// 可选择性地提供前缀
/// 如果recursive为true函数还会在前缀下的任何级别有活跃规则时返回true
/// 如果没有指定前缀recursive实际上为true
fn has_active_rules(&self, prefix: &str, recursive: bool) -> bool {
if self.rules.is_empty() {
return false;
}
for rule in &self.rules {
if rule.status == ReplicationRuleStatus::from_static(ReplicationRuleStatus::DISABLED) {
continue;
}
if let Some(filter) = &rule.filter {
if let Some(filter_prefix) = &filter.prefix {
if !prefix.is_empty() && !filter_prefix.is_empty() {
// 传入的前缀必须在规则前缀中
if !recursive && !prefix.starts_with(filter_prefix) {
continue;
}
}
// 如果是递归的,我们可以跳过这个规则,如果它不匹配测试前缀或前缀下的级别不匹配
if recursive && !rule.prefix().starts_with(prefix) && !prefix.starts_with(rule.prefix()) {
continue;
}
}
}
return true;
}
false
}
/// 过滤目标ARN返回配置中不同目标ARN的切片
fn filter_target_arns(&self, obj: &ObjectOpts) -> Vec<String> {
let mut arns = Vec::new();
let mut targets_map: HashSet<String> = HashSet::new();
let rules = self.filter_actionable_rules(obj);
for rule in rules {
if rule.status == ReplicationRuleStatus::from_static(ReplicationRuleStatus::DISABLED) {
continue;
}
if !self.role.is_empty() {
arns.push(self.role.clone()); // 如果存在使用传统的RoleArn
return arns;
}
if !targets_map.contains(&rule.destination.bucket) {
targets_map.insert(rule.destination.bucket.clone());
}
}
for arn in targets_map {
arns.push(arn);
}
arns
}
}

View File

@@ -12,30 +12,36 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Replication status type for x-amz-replication-status header
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum StatusType {
Pending,
Completed,
CompletedLegacy,
Failed,
Replica,
use serde::{Deserialize, Serialize};
use std::fmt;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
pub enum ResyncStatusType {
#[default]
NoResync,
ResyncPending,
ResyncCanceled,
ResyncStarted,
ResyncCompleted,
ResyncFailed,
}
impl StatusType {
// Converts the enum variant to its string representation
pub fn as_str(&self) -> &'static str {
match self {
StatusType::Pending => "PENDING",
StatusType::Completed => "COMPLETED",
StatusType::CompletedLegacy => "COMPLETE",
StatusType::Failed => "FAILED",
StatusType::Replica => "REPLICA",
}
}
// Checks if the status is empty (not set)
pub fn is_empty(&self) -> bool {
matches!(self, StatusType::Pending) // Adjust this as needed
impl ResyncStatusType {
pub fn is_valid(&self) -> bool {
*self != ResyncStatusType::NoResync
}
}
impl fmt::Display for ResyncStatusType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
ResyncStatusType::ResyncStarted => "Ongoing",
ResyncStatusType::ResyncCompleted => "Completed",
ResyncStatusType::ResyncFailed => "Failed",
ResyncStatusType::ResyncPending => "Pending",
ResyncStatusType::ResyncCanceled => "Canceled",
ResyncStatusType::NoResync => "",
};
write!(f, "{s}")
}
}

View File

@@ -12,4 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod config;
pub mod datatypes;
mod replication_pool;
mod replication_resyncer;
mod replication_state;
mod replication_type;
mod rule;
pub use config::*;
pub use datatypes::*;
pub use replication_pool::*;
pub use replication_resyncer::*;
pub use replication_type::*;
pub use rule::*;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,470 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::{Error, Result};
use crate::store_api::ObjectInfo;
use regex::Regex;
use rustfs_filemeta::VersionPurgeStatusType;
use rustfs_filemeta::{ReplicatedInfos, ReplicationType};
use rustfs_filemeta::{ReplicationState, ReplicationStatusType};
use rustfs_utils::http::RESERVED_METADATA_PREFIX_LOWER;
use rustfs_utils::http::RUSTFS_REPLICATION_RESET_STATUS;
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::collections::HashMap;
use std::fmt;
use time::OffsetDateTime;
use uuid::Uuid;
pub const REPLICATION_RESET: &str = "replication-reset";
pub const REPLICATION_STATUS: &str = "replication-status";
// ReplicateQueued - replication being queued trail
pub const REPLICATE_QUEUED: &str = "replicate:queue";
// ReplicateExisting - audit trail for existing objects replication
pub const REPLICATE_EXISTING: &str = "replicate:existing";
// ReplicateExistingDelete - audit trail for delete replication triggered for existing delete markers
pub const REPLICATE_EXISTING_DELETE: &str = "replicate:existing:delete";
// ReplicateMRF - audit trail for replication from Most Recent Failures (MRF) queue
pub const REPLICATE_MRF: &str = "replicate:mrf";
// ReplicateIncoming - audit trail of inline replication
pub const REPLICATE_INCOMING: &str = "replicate:incoming";
// ReplicateIncomingDelete - audit trail of inline replication of deletes.
pub const REPLICATE_INCOMING_DELETE: &str = "replicate:incoming:delete";
// ReplicateHeal - audit trail for healing of failed/pending replications
pub const REPLICATE_HEAL: &str = "replicate:heal";
// ReplicateHealDelete - audit trail of healing of failed/pending delete replications.
pub const REPLICATE_HEAL_DELETE: &str = "replicate:heal:delete";
#[derive(Serialize, Deserialize, Debug)]
pub struct MrfReplicateEntry {
#[serde(rename = "bucket")]
pub bucket: String,
#[serde(rename = "object")]
pub object: String,
#[serde(skip_serializing, skip_deserializing)]
pub version_id: Option<Uuid>,
#[serde(rename = "retryCount")]
pub retry_count: i32,
#[serde(skip_serializing, skip_deserializing)]
pub size: i64,
}
pub trait ReplicationWorkerOperation: Any + Send + Sync {
fn to_mrf_entry(&self) -> MrfReplicateEntry;
fn as_any(&self) -> &dyn Any;
fn get_bucket(&self) -> &str;
fn get_object(&self) -> &str;
fn get_size(&self) -> i64;
fn is_delete_marker(&self) -> bool;
fn get_op_type(&self) -> ReplicationType;
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ReplicateTargetDecision {
pub replicate: bool,
pub synchronous: bool,
pub arn: String,
pub id: String,
}
impl ReplicateTargetDecision {
pub fn new(arn: String, replicate: bool, sync: bool) -> Self {
Self {
replicate,
synchronous: sync,
arn,
id: String::new(),
}
}
}
impl fmt::Display for ReplicateTargetDecision {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{};{};{};{}", self.replicate, self.synchronous, self.arn, self.id)
}
}
/// ReplicateDecision represents replication decision for each target
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReplicateDecision {
pub targets_map: HashMap<String, ReplicateTargetDecision>,
}
impl ReplicateDecision {
pub fn new() -> Self {
Self {
targets_map: HashMap::new(),
}
}
/// Returns true if at least one target qualifies for replication
pub fn replicate_any(&self) -> bool {
self.targets_map.values().any(|t| t.replicate)
}
/// Returns true if at least one target qualifies for synchronous replication
pub fn is_synchronous(&self) -> bool {
self.targets_map.values().any(|t| t.synchronous)
}
/// Updates ReplicateDecision with target's replication decision
pub fn set(&mut self, target: ReplicateTargetDecision) {
self.targets_map.insert(target.arn.clone(), target);
}
/// Returns a stringified representation of internal replication status with all targets marked as `PENDING`
pub fn pending_status(&self) -> Option<String> {
let mut result = String::new();
for target in self.targets_map.values() {
if target.replicate {
result.push_str(&format!("{}={};", target.arn, ReplicationStatusType::Pending.as_str()));
}
}
if result.is_empty() { None } else { Some(result) }
}
}
impl fmt::Display for ReplicateDecision {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut result = String::new();
for (key, value) in &self.targets_map {
result.push_str(&format!("{key}={value},"));
}
write!(f, "{}", result.trim_end_matches(','))
}
}
impl Default for ReplicateDecision {
fn default() -> Self {
Self::new()
}
}
// parse k-v pairs of target ARN to stringified ReplicateTargetDecision delimited by ',' into a
// ReplicateDecision struct
pub fn parse_replicate_decision(_bucket: &str, s: &str) -> Result<ReplicateDecision> {
let mut decision = ReplicateDecision::new();
if s.is_empty() {
return Ok(decision);
}
for p in s.split(',') {
if p.is_empty() {
continue;
}
let slc = p.split('=').collect::<Vec<&str>>();
if slc.len() != 2 {
return Err(Error::other(format!("invalid replicate decision format: {s}")));
}
let tgt_str = slc[1].trim_matches('"');
let tgt = tgt_str.split(';').collect::<Vec<&str>>();
if tgt.len() != 4 {
return Err(Error::other(format!("invalid replicate decision format: {s}")));
}
let tgt = ReplicateTargetDecision {
replicate: tgt[0] == "true",
synchronous: tgt[1] == "true",
arn: tgt[2].to_string(),
id: tgt[3].to_string(),
};
decision.targets_map.insert(slc[0].to_string(), tgt);
}
Ok(decision)
// r = ReplicateDecision{
// targetsMap: make(map[string]replicateTargetDecision),
// }
// if len(s) == 0 {
// return
// }
// for _, p := range strings.Split(s, ",") {
// if p == "" {
// continue
// }
// slc := strings.Split(p, "=")
// if len(slc) != 2 {
// return r, errInvalidReplicateDecisionFormat
// }
// tgtStr := strings.TrimSuffix(strings.TrimPrefix(slc[1], `"`), `"`)
// tgt := strings.Split(tgtStr, ";")
// if len(tgt) != 4 {
// return r, errInvalidReplicateDecisionFormat
// }
// r.targetsMap[slc[0]] = replicateTargetDecision{Replicate: tgt[0] == "true", Synchronous: tgt[1] == "true", Arn: tgt[2], ID: tgt[3]}
// }
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ResyncTargetDecision {
pub replicate: bool,
pub reset_id: String,
pub reset_before_date: Option<OffsetDateTime>,
}
pub fn target_reset_header(arn: &str) -> String {
format!("{RESERVED_METADATA_PREFIX_LOWER}{REPLICATION_RESET}-{arn}")
}
impl ResyncTargetDecision {
pub fn resync_target(
oi: &ObjectInfo,
arn: &str,
reset_id: &str,
reset_before_date: Option<OffsetDateTime>,
status: ReplicationStatusType,
) -> Self {
let rs = oi
.user_defined
.get(target_reset_header(arn).as_str())
.or(oi.user_defined.get(RUSTFS_REPLICATION_RESET_STATUS))
.map(|s| s.to_string());
let mut dec = Self::default();
let mod_time = oi.mod_time.unwrap_or(OffsetDateTime::UNIX_EPOCH);
if rs.is_none() {
let reset_before_date = reset_before_date.unwrap_or(OffsetDateTime::UNIX_EPOCH);
if !reset_id.is_empty() && mod_time < reset_before_date {
dec.replicate = true;
return dec;
}
dec.replicate = status == ReplicationStatusType::Empty;
return dec;
}
if reset_id.is_empty() || reset_before_date.is_none() {
return dec;
}
let rs = rs.unwrap();
let reset_before_date = reset_before_date.unwrap();
let parts: Vec<&str> = rs.splitn(2, ';').collect();
if parts.len() != 2 {
return dec;
}
let new_reset = parts[0] == reset_id;
if !new_reset && status == ReplicationStatusType::Completed {
return dec;
}
dec.replicate = new_reset && mod_time < reset_before_date;
dec
}
}
/// ResyncDecision is a struct representing a map with target's individual resync decisions
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResyncDecision {
pub targets: HashMap<String, ResyncTargetDecision>,
}
impl ResyncDecision {
pub fn new() -> Self {
Self { targets: HashMap::new() }
}
/// Returns true if no targets with resync decision present
pub fn is_empty(&self) -> bool {
self.targets.is_empty()
}
pub fn must_resync(&self) -> bool {
self.targets.values().any(|v| v.replicate)
}
pub fn must_resync_target(&self, tgt_arn: &str) -> bool {
self.targets.get(tgt_arn).map(|v| v.replicate).unwrap_or(false)
}
}
impl Default for ResyncDecision {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReplicateObjectInfo {
pub name: String,
pub size: i64,
pub actual_size: i64,
pub bucket: String,
pub version_id: Option<Uuid>,
pub etag: Option<String>,
pub mod_time: Option<OffsetDateTime>,
pub replication_status: ReplicationStatusType,
pub replication_status_internal: Option<String>,
pub delete_marker: bool,
pub version_purge_status_internal: Option<String>,
pub version_purge_status: VersionPurgeStatusType,
pub replication_state: Option<ReplicationState>,
pub op_type: ReplicationType,
pub event_type: String,
pub dsc: ReplicateDecision,
pub existing_obj_resync: ResyncDecision,
pub target_statuses: HashMap<String, ReplicationStatusType>,
pub target_purge_statuses: HashMap<String, VersionPurgeStatusType>,
pub replication_timestamp: Option<OffsetDateTime>,
pub ssec: bool,
pub user_tags: String,
pub checksum: Vec<u8>,
pub retry_count: u32,
}
impl ReplicationWorkerOperation for ReplicateObjectInfo {
fn as_any(&self) -> &dyn Any {
self
}
fn to_mrf_entry(&self) -> MrfReplicateEntry {
MrfReplicateEntry {
bucket: self.bucket.clone(),
object: self.name.clone(),
version_id: self.version_id,
retry_count: self.retry_count as i32,
size: self.size,
}
}
fn get_bucket(&self) -> &str {
&self.bucket
}
fn get_object(&self) -> &str {
&self.name
}
fn get_size(&self) -> i64 {
self.size
}
fn is_delete_marker(&self) -> bool {
self.delete_marker
}
fn get_op_type(&self) -> ReplicationType {
self.op_type
}
}
lazy_static::lazy_static! {
static ref REPL_STATUS_REGEX: Regex = Regex::new(r"([^=].*?)=([^,].*?);").unwrap();
}
impl ReplicateObjectInfo {
/// Returns replication status of a target
pub fn target_replication_status(&self, arn: &str) -> ReplicationStatusType {
let binding = self.replication_status_internal.clone().unwrap_or_default();
let captures = REPL_STATUS_REGEX.captures_iter(&binding);
for cap in captures {
if cap.len() == 3 && &cap[1] == arn {
return ReplicationStatusType::from(&cap[2]);
}
}
ReplicationStatusType::default()
}
/// Returns the relevant info needed by MRF
pub fn to_mrf_entry(&self) -> MrfReplicateEntry {
MrfReplicateEntry {
bucket: self.bucket.clone(),
object: self.name.clone(),
version_id: self.version_id,
retry_count: self.retry_count as i32,
size: self.size,
}
}
}
// constructs a replication status map from string representation
pub fn replication_statuses_map(s: &str) -> HashMap<String, ReplicationStatusType> {
let mut targets = HashMap::new();
let rep_stat_matches = REPL_STATUS_REGEX.captures_iter(s).map(|c| c.extract());
for (_, [arn, status]) in rep_stat_matches {
if arn.is_empty() {
continue;
}
let status = ReplicationStatusType::from(status);
targets.insert(arn.to_string(), status);
}
targets
}
// constructs a version purge status map from string representation
pub fn version_purge_statuses_map(s: &str) -> HashMap<String, VersionPurgeStatusType> {
let mut targets = HashMap::new();
let purge_status_matches = REPL_STATUS_REGEX.captures_iter(s).map(|c| c.extract());
for (_, [arn, status]) in purge_status_matches {
if arn.is_empty() {
continue;
}
let status = VersionPurgeStatusType::from(status);
targets.insert(arn.to_string(), status);
}
targets
}
pub fn get_replication_state(rinfos: &ReplicatedInfos, prev_state: &ReplicationState, _vid: Option<String>) -> ReplicationState {
let reset_status_map: Vec<(String, String)> = rinfos
.targets
.iter()
.filter(|v| !v.resync_timestamp.is_empty())
.map(|t| (target_reset_header(t.arn.as_str()), t.resync_timestamp.clone()))
.collect();
let repl_statuses = rinfos.replication_status_internal();
let vpurge_statuses = rinfos.version_purge_status_internal();
let mut reset_statuses_map = prev_state.reset_statuses_map.clone();
for (key, value) in reset_status_map {
reset_statuses_map.insert(key, value);
}
ReplicationState {
replicate_decision_str: prev_state.replicate_decision_str.clone(),
reset_statuses_map,
replica_timestamp: prev_state.replica_timestamp,
replica_status: prev_state.replica_status.clone(),
targets: replication_statuses_map(&repl_statuses.clone().unwrap_or_default()),
replication_status_internal: repl_statuses,
replication_timestamp: rinfos.replication_timestamp,
purge_targets: version_purge_statuses_map(&vpurge_statuses.clone().unwrap_or_default()),
version_purge_status_internal: vpurge_statuses,
..Default::default()
}
}

View File

@@ -0,0 +1,51 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use s3s::dto::ReplicaModificationsStatus;
use s3s::dto::ReplicationRule;
use super::ObjectOpts;
pub trait ReplicationRuleExt {
fn prefix(&self) -> &str;
fn metadata_replicate(&self, obj: &ObjectOpts) -> bool;
}
impl ReplicationRuleExt for ReplicationRule {
fn prefix(&self) -> &str {
if let Some(filter) = &self.filter {
if let Some(prefix) = &filter.prefix {
prefix
} else if let Some(and) = &filter.and {
and.prefix.as_deref().unwrap_or("")
} else {
""
}
} else {
""
}
}
fn metadata_replicate(&self, obj: &ObjectOpts) -> bool {
if !obj.replica {
return true;
}
self.source_selection_criteria.as_ref().is_some_and(|s| {
s.replica_modifications
.clone()
.is_some_and(|r| r.status == ReplicaModificationsStatus::from_static(ReplicaModificationsStatus::ENABLED))
})
}
}

View File

@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use s3s::dto::Tag;
use url::form_urlencoded;
@@ -34,6 +36,20 @@ pub fn decode_tags(tags: &str) -> Vec<Tag> {
list
}
pub fn decode_tags_to_map(tags: &str) -> HashMap<String, String> {
let mut list = HashMap::new();
for (k, v) in form_urlencoded::parse(tags.as_bytes()) {
if k.is_empty() || v.is_empty() {
continue;
}
list.insert(k.to_string(), v.to_string());
}
list
}
pub fn encode_tags(tags: Vec<Tag>) -> String {
let mut encoded = form_urlencoded::Serializer::new(String::new());

View File

@@ -0,0 +1,66 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::BucketTargetType;
use std::fmt::Display;
use std::str::FromStr;
pub struct ARN {
pub arn_type: BucketTargetType,
pub id: String,
pub region: String,
pub bucket: String,
}
impl ARN {
pub fn new(arn_type: BucketTargetType, id: String, region: String, bucket: String) -> Self {
Self {
arn_type,
id,
region,
bucket,
}
}
pub fn is_empty(&self) -> bool {
self.arn_type.is_valid()
}
}
impl Display for ARN {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "arn:rustfs:{}:{}:{}:{}", self.arn_type, self.region, self.id, self.bucket)
}
}
impl FromStr for ARN {
type Err = std::io::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if !s.starts_with("arn:rustfs:") {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Invalid ARN format"));
}
let parts: Vec<&str> = s.split(':').collect();
if parts.len() != 6 {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Invalid ARN format"));
}
Ok(ARN {
arn_type: BucketTargetType::from_str(parts[2]).unwrap_or_default(),
id: parts[3].to_string(),
region: parts[4].to_string(),
bucket: parts[5].to_string(),
})
}
}

View File

@@ -0,0 +1,800 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::{Error, Result};
use rmp_serde::Serializer as rmpSerializer;
use serde::{Deserialize, Serialize};
use std::{
fmt::{self, Display},
str::FromStr,
time::Duration,
};
use time::OffsetDateTime;
use url::Url;
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
pub struct Credentials {
#[serde(rename = "accessKey")]
pub access_key: String,
#[serde(rename = "secretKey")]
pub secret_key: String,
pub session_token: Option<String>,
pub expiration: Option<chrono::DateTime<chrono::Utc>>,
}
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
pub enum ServiceType {
#[default]
Replication,
}
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
pub struct LatencyStat {
#[serde(with = "duration_milliseconds")]
pub curr: Duration, // Current latency
#[serde(with = "duration_milliseconds")]
pub avg: Duration, // Average latency
#[serde(with = "duration_milliseconds")]
pub max: Duration, // Maximum latency
}
mod duration_milliseconds {
use serde::{Deserialize, Deserializer, Serializer};
use std::time::Duration;
pub fn serialize<S>(duration: &Duration, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_u64(duration.as_millis() as u64)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Duration, D::Error>
where
D: Deserializer<'de>,
{
let millis = u64::deserialize(deserializer)?;
Ok(Duration::from_millis(millis))
}
}
mod duration_seconds {
use serde::{Deserialize, Deserializer, Serializer};
use std::time::Duration;
pub fn serialize<S>(duration: &Duration, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_u64(duration.as_secs())
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Duration, D::Error>
where
D: Deserializer<'de>,
{
let secs = u64::deserialize(deserializer)?;
Ok(Duration::from_secs(secs))
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq)]
pub enum BucketTargetType {
#[default]
None,
#[serde(rename = "replication")]
ReplicationService,
#[serde(rename = "ilm")]
IlmService,
}
impl BucketTargetType {
pub fn is_valid(&self) -> bool {
match self {
BucketTargetType::None => false,
BucketTargetType::ReplicationService | BucketTargetType::IlmService => true,
}
}
}
impl FromStr for BucketTargetType {
type Err = std::io::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
match s {
"replication" => Ok(BucketTargetType::ReplicationService),
"ilm" => Ok(BucketTargetType::IlmService),
_ => Ok(BucketTargetType::None),
}
}
}
impl fmt::Display for BucketTargetType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
BucketTargetType::None => write!(f, ""),
BucketTargetType::ReplicationService => write!(f, "replication"),
BucketTargetType::IlmService => write!(f, "ilm"),
}
}
}
// Define BucketTarget structure
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
pub struct BucketTarget {
#[serde(rename = "sourcebucket", default)]
pub source_bucket: String,
#[serde(default)]
pub endpoint: String,
#[serde(default)]
pub credentials: Option<Credentials>,
#[serde(rename = "targetbucket", default)]
pub target_bucket: String,
#[serde(default)]
pub secure: bool,
#[serde(default)]
pub path: String,
#[serde(default)]
pub api: String,
#[serde(default)]
pub arn: String,
#[serde(rename = "type", default)]
pub target_type: BucketTargetType,
#[serde(default)]
pub region: String,
#[serde(alias = "bandwidth", default)]
pub bandwidth_limit: i64,
#[serde(rename = "replicationSync", default)]
pub replication_sync: bool,
#[serde(default)]
pub storage_class: String,
#[serde(rename = "healthCheckDuration", with = "duration_seconds", default)]
pub health_check_duration: Duration,
#[serde(rename = "disableProxy", default)]
pub disable_proxy: bool,
#[serde(rename = "resetBeforeDate", with = "time::serde::rfc3339::option", default)]
pub reset_before_date: Option<OffsetDateTime>,
#[serde(default)]
pub reset_id: String,
#[serde(rename = "totalDowntime", with = "duration_seconds", default)]
pub total_downtime: Duration,
#[serde(rename = "lastOnline", with = "time::serde::rfc3339::option", default)]
pub last_online: Option<OffsetDateTime>,
#[serde(rename = "isOnline", default)]
pub online: bool,
#[serde(default)]
pub latency: LatencyStat,
#[serde(default)]
pub deployment_id: String,
#[serde(default)]
pub edge: bool,
#[serde(rename = "edgeSyncBeforeExpiry", default)]
pub edge_sync_before_expiry: bool,
#[serde(rename = "offlineCount", default)]
pub offline_count: u64,
}
impl BucketTarget {
pub fn is_empty(self) -> bool {
self.target_bucket.is_empty() && self.endpoint.is_empty() && self.arn.is_empty()
}
pub fn url(&self) -> Result<Url> {
let scheme = if self.secure { "https" } else { "http" };
Url::parse(&format!("{}://{}", scheme, self.endpoint)).map_err(Error::other)
}
}
impl Display for BucketTarget {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{} ", self.endpoint)?;
write!(f, "{}", self.target_bucket.clone())?;
Ok(())
}
}
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
pub struct BucketTargets {
pub targets: Vec<BucketTarget>,
}
impl BucketTargets {
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
let mut buf = Vec::new();
self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?;
Ok(buf)
}
pub fn unmarshal(buf: &[u8]) -> Result<Self> {
let t: BucketTargets = rmp_serde::from_slice(buf)?;
Ok(t)
}
pub fn is_empty(&self) -> bool {
if self.targets.is_empty() {
return true;
}
for target in &self.targets {
if !target.clone().is_empty() {
return false;
}
}
true
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
use std::time::Duration;
use time::OffsetDateTime;
#[test]
fn test_bucket_target_json_deserialize() {
let json = r#"
{
"sourcebucket": "source-bucket-name",
"endpoint": "s3.amazonaws.com",
"credentials": {
"accessKey": "test-access-key",
"secretKey": "test-secret-key",
"session_token": "test-session-token",
"expiration": "2024-12-31T23:59:59Z"
},
"targetbucket": "target-bucket-name",
"secure": true,
"path": "/api/v1",
"api": "s3v4",
"arn": "arn:aws:s3:::target-bucket-name",
"type": "replication",
"region": "us-east-1",
"bandwidth_limit": 1000000,
"replicationSync": true,
"storage_class": "STANDARD",
"healthCheckDuration": 30,
"disableProxy": false,
"resetBeforeDate": null,
"reset_id": "reset-123",
"totalDowntime": 3600,
"last_online": null,
"isOnline": true,
"latency": {
"curr": 100,
"avg": 150,
"max": 300
},
"deployment_id": "deployment-456",
"edge": false,
"edgeSyncBeforeExpiry": true,
"offlineCount": 5
}
"#;
let result: std::result::Result<BucketTarget, _> = serde_json::from_str(json);
assert!(result.is_ok(), "Failed to deserialize BucketTarget: {:?}", result.err());
let target = result.unwrap();
// Verify basic fields
assert_eq!(target.source_bucket, "source-bucket-name");
assert_eq!(target.endpoint, "s3.amazonaws.com");
assert_eq!(target.target_bucket, "target-bucket-name");
assert!(target.secure);
assert_eq!(target.path, "/api/v1");
assert_eq!(target.api, "s3v4");
assert_eq!(target.arn, "arn:aws:s3:::target-bucket-name");
assert_eq!(target.target_type, BucketTargetType::ReplicationService);
assert_eq!(target.region, "us-east-1");
assert_eq!(target.bandwidth_limit, 1000000);
assert!(target.replication_sync);
assert_eq!(target.storage_class, "STANDARD");
assert_eq!(target.health_check_duration, Duration::from_secs(30));
assert!(!target.disable_proxy);
assert_eq!(target.reset_id, "reset-123");
assert_eq!(target.total_downtime, Duration::from_secs(3600));
assert!(target.online);
assert_eq!(target.deployment_id, "deployment-456");
assert!(!target.edge);
assert!(target.edge_sync_before_expiry);
assert_eq!(target.offline_count, 5);
// Verify credentials
assert!(target.credentials.is_some());
let credentials = target.credentials.unwrap();
assert_eq!(credentials.access_key, "test-access-key");
assert_eq!(credentials.secret_key, "test-secret-key");
assert_eq!(credentials.session_token, Some("test-session-token".to_string()));
assert!(credentials.expiration.is_some());
// Verify latency statistics
assert_eq!(target.latency.curr, Duration::from_millis(100));
assert_eq!(target.latency.avg, Duration::from_millis(150));
assert_eq!(target.latency.max, Duration::from_millis(300));
// Verify time fields
assert!(target.reset_before_date.is_none());
assert!(target.last_online.is_none());
}
#[test]
fn test_bucket_target_json_serialize_deserialize_roundtrip() {
let original = BucketTarget {
source_bucket: "test-source".to_string(),
endpoint: "rustfs.example.com".to_string(),
credentials: Some(Credentials {
access_key: "rustfsaccess".to_string(),
secret_key: "rustfssecret".to_string(),
session_token: None,
expiration: None,
}),
target_bucket: "test-target".to_string(),
secure: false,
path: "/".to_string(),
api: "s3v4".to_string(),
arn: "arn:rustfs:s3:::test-target".to_string(),
target_type: BucketTargetType::ReplicationService,
region: "us-west-2".to_string(),
bandwidth_limit: 500000,
replication_sync: false,
storage_class: "REDUCED_REDUNDANCY".to_string(),
health_check_duration: Duration::from_secs(60),
disable_proxy: true,
reset_before_date: Some(OffsetDateTime::now_utc()),
reset_id: "reset-456".to_string(),
total_downtime: Duration::from_secs(1800),
last_online: Some(OffsetDateTime::now_utc()),
online: false,
latency: LatencyStat {
curr: Duration::from_millis(250),
avg: Duration::from_millis(200),
max: Duration::from_millis(500),
},
deployment_id: "deploy-789".to_string(),
edge: true,
edge_sync_before_expiry: false,
offline_count: 10,
};
// Serialize to JSON
let json = serde_json::to_string(&original).expect("Failed to serialize to JSON");
// Deserialize from JSON
let deserialized: BucketTarget = serde_json::from_str(&json).expect("Failed to deserialize from JSON");
// Verify key fields are equal
assert_eq!(original.source_bucket, deserialized.source_bucket);
assert_eq!(original.endpoint, deserialized.endpoint);
assert_eq!(original.target_bucket, deserialized.target_bucket);
assert_eq!(original.secure, deserialized.secure);
assert_eq!(original.target_type, deserialized.target_type);
assert_eq!(original.region, deserialized.region);
assert_eq!(original.bandwidth_limit, deserialized.bandwidth_limit);
assert_eq!(original.replication_sync, deserialized.replication_sync);
assert_eq!(original.health_check_duration, deserialized.health_check_duration);
assert_eq!(original.online, deserialized.online);
assert_eq!(original.edge, deserialized.edge);
assert_eq!(original.offline_count, deserialized.offline_count);
}
#[test]
fn test_bucket_target_type_json_deserialize() {
// Test BucketTargetType JSON deserialization
let replication_json = r#""replication""#;
let ilm_json = r#""ilm""#;
let replication_type: BucketTargetType =
serde_json::from_str(replication_json).expect("Failed to deserialize replication type");
let ilm_type: BucketTargetType = serde_json::from_str(ilm_json).expect("Failed to deserialize ilm type");
assert_eq!(replication_type, BucketTargetType::ReplicationService);
assert_eq!(ilm_type, BucketTargetType::IlmService);
// Verify type validity
assert!(replication_type.is_valid());
assert!(ilm_type.is_valid());
assert!(!BucketTargetType::None.is_valid());
}
#[test]
fn test_credentials_json_deserialize() {
let json = r#"
{
"accessKey": "AKIAIOSFODNN7EXAMPLE",
"secretKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
"session_token": "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT",
"expiration": "2024-12-31T23:59:59Z"
}
"#;
let credentials: Credentials = serde_json::from_str(json).expect("Failed to deserialize credentials");
assert_eq!(credentials.access_key, "AKIAIOSFODNN7EXAMPLE");
assert_eq!(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY");
assert_eq!(
credentials.session_token,
Some("AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT".to_string())
);
assert!(credentials.expiration.is_some());
}
#[test]
fn test_latency_stat_json_deserialize() {
let json = r#"
{
"curr": 50,
"avg": 75,
"max": 200
}
"#;
let latency: LatencyStat = serde_json::from_str(json).expect("Failed to deserialize latency stat");
assert_eq!(latency.curr, Duration::from_millis(50));
assert_eq!(latency.avg, Duration::from_millis(75));
assert_eq!(latency.max, Duration::from_millis(200));
}
#[test]
fn test_bucket_targets_json_deserialize() {
let json = r#"
{
"targets": [
{
"sourcebucket": "bucket1",
"endpoint": "s3.amazonaws.com",
"targetbucket": "target1",
"secure": true,
"path": "/",
"api": "s3v4",
"arn": "arn:aws:s3:::target1",
"type": "replication",
"region": "us-east-1",
"bandwidth_limit": 0,
"replicationSync": false,
"storage_class": "",
"healthCheckDuration": 0,
"disableProxy": false,
"resetBeforeDate": null,
"reset_id": "",
"totalDowntime": 0,
"lastOnline": null,
"isOnline": false,
"latency": {
"curr": 0,
"avg": 0,
"max": 0
},
"deployment_id": "",
"edge": false,
"edgeSyncBeforeExpiry": false,
"offlineCount": 0
}
]
}
"#;
let targets: BucketTargets = serde_json::from_str(json).expect("Failed to deserialize bucket targets");
assert_eq!(targets.targets.len(), 1);
assert_eq!(targets.targets[0].source_bucket, "bucket1");
assert_eq!(targets.targets[0].endpoint, "s3.amazonaws.com");
assert_eq!(targets.targets[0].target_bucket, "target1");
assert!(!targets.is_empty());
}
#[test]
fn test_user_provided_json_deserialize() {
// Test the specific JSON provided by the user with missing required fields added
let json = r#"
{
"sourcebucket": "mc-test-bucket-22139",
"endpoint": "localhost:8000",
"credentials": {
"accessKey": "rustfsadmin",
"secretKey": "rustfsadmin",
"expiration": "0001-01-01T00:00:00Z"
},
"targetbucket": "test",
"secure": false,
"path": "auto",
"api": "s3v4",
"type": "replication",
"replicationSync": false,
"healthCheckDuration": 60,
"disableProxy": false,
"resetBeforeDate": "0001-01-01T00:00:00Z",
"totalDowntime": 0,
"lastOnline": "0001-01-01T00:00:00Z",
"isOnline": false,
"latency": {
"curr": 0,
"avg": 0,
"max": 0
},
"deployment_id": "",
"edge": false,
"edgeSyncBeforeExpiry": false,
"offlineCount": 0,
"bandwidth": 107374182400
}
"#;
let target: BucketTarget = serde_json::from_str(json).expect("Failed to deserialize user provided JSON to BucketTarget");
// Verify the deserialized values match the original JSON
assert_eq!(target.source_bucket, "mc-test-bucket-22139");
assert_eq!(target.endpoint, "localhost:8000");
assert_eq!(target.target_bucket, "test");
assert!(!target.secure);
assert_eq!(target.path, "auto");
assert_eq!(target.api, "s3v4");
assert_eq!(target.target_type, BucketTargetType::ReplicationService);
assert!(!target.replication_sync);
assert_eq!(target.health_check_duration, Duration::from_secs(60));
assert!(!target.disable_proxy);
assert!(!target.online);
assert!(!target.edge);
assert!(!target.edge_sync_before_expiry);
assert_eq!(target.bandwidth_limit, 107374182400); // bandwidth field mapped to bandwidth_limit
// Verify credentials
assert!(target.credentials.is_some());
let credentials = target.credentials.unwrap();
assert_eq!(credentials.access_key, "rustfsadmin");
assert_eq!(credentials.secret_key, "rustfsadmin");
// Verify latency statistics
assert_eq!(target.latency.curr, Duration::from_millis(0));
assert_eq!(target.latency.avg, Duration::from_millis(0));
assert_eq!(target.latency.max, Duration::from_millis(0));
// Verify time fields parsing (should handle "0001-01-01T00:00:00Z" as None due to being the zero time)
assert!(target.reset_before_date.is_some());
assert!(target.last_online.is_some());
println!("✅ User provided JSON successfully deserialized to BucketTarget");
}
#[test]
fn test_user_provided_json_as_bucket_targets() {
// Test wrapping the user JSON in BucketTargets structure
let json = r#"
{
"targets": [
{
"sourcebucket": "mc-test-bucket-22139",
"endpoint": "localhost:8000",
"credentials": {
"accessKey": "rustfsadmin",
"secretKey": "rustfsadmin",
"expiration": "0001-01-01T00:00:00Z"
},
"targetbucket": "test",
"secure": false,
"path": "auto",
"api": "s3v4",
"arn": "",
"type": "replication",
"region": "",
"replicationSync": false,
"storage_class": "",
"healthCheckDuration": 60,
"disableProxy": false,
"resetBeforeDate": "0001-01-01T00:00:00Z",
"reset_id": "",
"totalDowntime": 0,
"lastOnline": "0001-01-01T00:00:00Z",
"isOnline": false,
"latency": {
"curr": 0,
"avg": 0,
"max": 0
},
"deployment_id": "",
"edge": false,
"edgeSyncBeforeExpiry": false,
"offlineCount": 0,
"bandwidth": 107374182400
}
]
}
"#;
let bucket_targets: BucketTargets =
serde_json::from_str(json).expect("Failed to deserialize user provided JSON to BucketTargets");
assert_eq!(bucket_targets.targets.len(), 1);
assert!(!bucket_targets.is_empty());
let target = &bucket_targets.targets[0];
assert_eq!(target.source_bucket, "mc-test-bucket-22139");
assert_eq!(target.endpoint, "localhost:8000");
assert_eq!(target.target_bucket, "test");
assert_eq!(target.bandwidth_limit, 107374182400);
println!("✅ User provided JSON successfully deserialized to BucketTargets");
}
#[test]
fn test_bucket_target_minimal_json_with_defaults() {
// Test that BucketTarget can be deserialized with minimal JSON using defaults
let minimal_json = r#"
{
"sourcebucket": "test-source",
"endpoint": "localhost:9000",
"targetbucket": "test-target"
}
"#;
let target: BucketTarget =
serde_json::from_str(minimal_json).expect("Failed to deserialize minimal JSON to BucketTarget");
// Verify required fields
assert_eq!(target.source_bucket, "test-source");
assert_eq!(target.endpoint, "localhost:9000");
assert_eq!(target.target_bucket, "test-target");
// Verify default values
assert!(!target.secure); // bool default is false
assert_eq!(target.path, ""); // String default is empty
assert_eq!(target.api, ""); // String default is empty
assert_eq!(target.arn, ""); // String default is empty
assert_eq!(target.target_type, BucketTargetType::None); // enum default
assert_eq!(target.region, ""); // String default is empty
assert_eq!(target.bandwidth_limit, 0); // i64 default is 0
assert!(!target.replication_sync); // bool default is false
assert_eq!(target.storage_class, ""); // String default is empty
assert_eq!(target.health_check_duration, Duration::from_secs(0)); // Duration default
assert!(!target.disable_proxy); // bool default is false
assert!(target.reset_before_date.is_none()); // Option default is None
assert_eq!(target.reset_id, ""); // String default is empty
assert_eq!(target.total_downtime, Duration::from_secs(0)); // Duration default
assert!(target.last_online.is_none()); // Option default is None
assert!(!target.online); // bool default is false
assert_eq!(target.latency.curr, Duration::from_millis(0)); // LatencyStat default
assert_eq!(target.latency.avg, Duration::from_millis(0));
assert_eq!(target.latency.max, Duration::from_millis(0));
assert_eq!(target.deployment_id, ""); // String default is empty
assert!(!target.edge); // bool default is false
assert!(!target.edge_sync_before_expiry); // bool default is false
assert_eq!(target.offline_count, 0); // u64 default is 0
assert!(target.credentials.is_none()); // Option default is None
println!("✅ Minimal JSON with defaults successfully deserialized to BucketTarget");
}
#[test]
fn test_bucket_target_empty_json_with_defaults() {
// Test that BucketTarget can be deserialized with completely empty JSON using all defaults
let empty_json = r#"{}"#;
let target: BucketTarget = serde_json::from_str(empty_json).expect("Failed to deserialize empty JSON to BucketTarget");
// Verify all fields use default values
assert_eq!(target.source_bucket, "");
assert_eq!(target.endpoint, "");
assert_eq!(target.target_bucket, "");
assert!(!target.secure);
assert_eq!(target.path, "");
assert_eq!(target.api, "");
assert_eq!(target.arn, "");
assert_eq!(target.target_type, BucketTargetType::None);
assert_eq!(target.region, "");
assert_eq!(target.bandwidth_limit, 0);
assert!(!target.replication_sync);
assert_eq!(target.storage_class, "");
assert_eq!(target.health_check_duration, Duration::from_secs(0));
assert!(!target.disable_proxy);
assert!(target.reset_before_date.is_none());
assert_eq!(target.reset_id, "");
assert_eq!(target.total_downtime, Duration::from_secs(0));
assert!(target.last_online.is_none());
assert!(!target.online);
assert_eq!(target.latency.curr, Duration::from_millis(0));
assert_eq!(target.latency.avg, Duration::from_millis(0));
assert_eq!(target.latency.max, Duration::from_millis(0));
assert_eq!(target.deployment_id, "");
assert!(!target.edge);
assert!(!target.edge_sync_before_expiry);
assert_eq!(target.offline_count, 0);
assert!(target.credentials.is_none());
println!("✅ Empty JSON with all defaults successfully deserialized to BucketTarget");
}
#[test]
fn test_original_user_json_with_defaults() {
// Test the original user JSON without extra required fields
let json = r#"
{
"sourcebucket": "mc-test-bucket-22139",
"endpoint": "localhost:8000",
"credentials": {
"accessKey": "rustfsadmin",
"secretKey": "rustfsadmin",
"expiration": "0001-01-01T00:00:00Z"
},
"targetbucket": "test",
"secure": false,
"path": "auto",
"api": "s3v4",
"type": "replication",
"replicationSync": false,
"healthCheckDuration": 60,
"disableProxy": false,
"resetBeforeDate": "0001-01-01T00:00:00Z",
"totalDowntime": 0,
"lastOnline": "0001-01-01T00:00:00Z",
"isOnline": false,
"latency": {
"curr": 0,
"avg": 0,
"max": 0
},
"edge": false,
"edgeSyncBeforeExpiry": false,
"bandwidth": 107374182400
}
"#;
let target: BucketTarget = serde_json::from_str(json).expect("Failed to deserialize original user JSON to BucketTarget");
// Verify the deserialized values
assert_eq!(target.source_bucket, "mc-test-bucket-22139");
assert_eq!(target.endpoint, "localhost:8000");
assert_eq!(target.target_bucket, "test");
assert!(!target.secure);
assert_eq!(target.path, "auto");
assert_eq!(target.api, "s3v4");
assert_eq!(target.target_type, BucketTargetType::ReplicationService);
assert!(!target.replication_sync);
assert_eq!(target.health_check_duration, Duration::from_secs(60));
assert!(!target.disable_proxy);
assert!(!target.online);
assert!(!target.edge);
assert!(!target.edge_sync_before_expiry);
assert_eq!(target.bandwidth_limit, 107374182400);
// Fields not specified should use defaults
assert_eq!(target.arn, ""); // default empty string
assert_eq!(target.region, ""); // default empty string
assert_eq!(target.storage_class, ""); // default empty string
assert_eq!(target.reset_id, ""); // default empty string
assert_eq!(target.deployment_id, ""); // default empty string
assert_eq!(target.offline_count, 0); // default u64
// Verify credentials
assert!(target.credentials.is_some());
let credentials = target.credentials.unwrap();
assert_eq!(credentials.access_key, "rustfsadmin");
assert_eq!(credentials.secret_key, "rustfsadmin");
println!("✅ Original user JSON with defaults successfully deserialized to BucketTarget");
}
}

View File

@@ -12,124 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::Result;
use rmp_serde::Serializer as rmpSerializer;
use serde::{Deserialize, Serialize};
use time::OffsetDateTime;
mod arn;
mod bucket_target;
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
pub struct Credentials {
#[serde(rename = "accessKey")]
pub access_key: String,
#[serde(rename = "secretKey")]
pub secret_key: String,
pub session_token: Option<String>,
pub expiration: Option<chrono::DateTime<chrono::Utc>>,
}
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
pub enum ServiceType {
#[default]
Replication,
}
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
pub struct LatencyStat {
curr: u64, // current latency
avg: u64, // average latency
max: u64, // maximum latency
}
// Define BucketTarget struct
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
pub struct BucketTarget {
#[serde(rename = "sourcebucket")]
pub source_bucket: String,
pub endpoint: String,
pub credentials: Option<Credentials>,
#[serde(rename = "targetbucket")]
pub target_bucket: String,
secure: bool,
pub path: Option<String>,
api: Option<String>,
pub arn: Option<String>,
#[serde(rename = "type")]
pub type_: Option<String>,
pub region: Option<String>,
bandwidth_limit: Option<i64>,
#[serde(rename = "replicationSync")]
replication_sync: bool,
storage_class: Option<String>,
#[serde(rename = "healthCheckDuration")]
health_check_duration: u64,
#[serde(rename = "disableProxy")]
disable_proxy: bool,
#[serde(rename = "resetBeforeDate")]
reset_before_date: String,
reset_id: Option<String>,
#[serde(rename = "totalDowntime")]
total_downtime: u64,
last_online: Option<OffsetDateTime>,
#[serde(rename = "isOnline")]
online: bool,
latency: Option<LatencyStat>,
deployment_id: Option<String>,
edge: bool,
#[serde(rename = "edgeSyncBeforeExpiry")]
edge_sync_before_expiry: bool,
}
impl BucketTarget {
pub fn is_empty(self) -> bool {
//self.target_bucket.is_empty() && self.endpoint.is_empty() && self.arn.is_empty()
self.target_bucket.is_empty() && self.endpoint.is_empty() && self.arn.is_none()
}
}
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
pub struct BucketTargets {
pub targets: Vec<BucketTarget>,
}
impl BucketTargets {
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
let mut buf = Vec::new();
self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?;
Ok(buf)
}
pub fn unmarshal(buf: &[u8]) -> Result<Self> {
let t: BucketTargets = rmp_serde::from_slice(buf)?;
Ok(t)
}
pub fn is_empty(&self) -> bool {
if self.targets.is_empty() {
return true;
}
for target in &self.targets {
if !target.clone().is_empty() {
return false;
}
}
true
}
}
pub use arn::*;
pub use bucket_target::*;

View File

@@ -17,7 +17,8 @@ use crate::disk::{self, DiskAPI, DiskStore, WalkDirOptions};
use futures::future::join_all;
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetacacheReader, is_io_eof};
use std::{future::Future, pin::Pin, sync::Arc};
use tokio::{spawn, sync::broadcast::Receiver as B_Receiver};
use tokio::spawn;
use tokio_util::sync::CancellationToken;
use tracing::{error, warn};
pub type AgreedFn = Box<dyn Fn(MetaCacheEntry) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
@@ -63,7 +64,7 @@ impl Clone for ListPathRawOptions {
}
}
pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -> disk::error::Result<()> {
pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> disk::error::Result<()> {
if opts.disks.is_empty() {
return Err(DiskError::other("list_path_raw: 0 drives provided"));
}
@@ -72,13 +73,13 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
let mut readers = Vec::with_capacity(opts.disks.len());
let fds = Arc::new(opts.fallback_disks.clone());
let (cancel_tx, cancel_rx) = tokio::sync::broadcast::channel::<bool>(1);
let cancel_rx = CancellationToken::new();
for disk in opts.disks.iter() {
let opdisk = disk.clone();
let opts_clone = opts.clone();
let fds_clone = fds.clone();
let mut cancel_rx_clone = cancel_rx.resubscribe();
let cancel_rx_clone = cancel_rx.clone();
let (rd, mut wr) = tokio::io::duplex(64);
readers.push(MetacacheReader::new(rd));
jobs.push(spawn(async move {
@@ -106,7 +107,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
need_fallback = true;
}
if cancel_rx_clone.try_recv().is_ok() {
if cancel_rx_clone.is_cancelled() {
// warn!("list_path_raw: cancel_rx_clone.try_recv().await.is_ok()");
return Ok(());
}
@@ -173,7 +174,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
// opts.bucket, opts.path, &current.name
// );
if rx.try_recv().is_ok() {
if rx.is_cancelled() {
return Err(DiskError::other("canceled"));
}
@@ -351,7 +352,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
if let Err(err) = revjob.await.map_err(std::io::Error::other)? {
error!("list_path_raw: revjob err {:?}", err);
let _ = cancel_tx.send(true);
cancel_rx.cancel();
return Err(err);
}

View File

@@ -44,6 +44,8 @@ pub struct GetObjectOptions {
pub internal: AdvancedGetOptions,
}
pub type StatObjectOptions = GetObjectOptions;
impl Default for GetObjectOptions {
fn default() -> Self {
Self {

View File

@@ -46,11 +46,11 @@ pub struct RemoveBucketOptions {
#[derive(Debug)]
#[allow(dead_code)]
pub struct AdvancedRemoveOptions {
replication_delete_marker: bool,
replication_status: ReplicationStatus,
replication_mtime: OffsetDateTime,
replication_request: bool,
replication_validity_check: bool,
pub replication_delete_marker: bool,
pub replication_status: ReplicationStatus,
pub replication_mtime: Option<OffsetDateTime>,
pub replication_request: bool,
pub replication_validity_check: bool,
}
impl Default for AdvancedRemoveOptions {
@@ -58,7 +58,7 @@ impl Default for AdvancedRemoveOptions {
Self {
replication_delete_marker: false,
replication_status: ReplicationStatus::from_static(ReplicationStatus::PENDING),
replication_mtime: OffsetDateTime::now_utc(),
replication_mtime: None,
replication_request: false,
replication_validity_check: false,
}
@@ -140,8 +140,7 @@ impl TransitionClient {
}
pub async fn remove_object(&self, bucket_name: &str, object_name: &str, opts: RemoveObjectOptions) -> Option<std::io::Error> {
let res = self.remove_object_inner(bucket_name, object_name, opts).await.expect("err");
res.err
self.remove_object_inner(bucket_name, object_name, opts).await.err()
}
pub async fn remove_object_inner(

View File

@@ -23,6 +23,7 @@ use http::{HeaderMap, HeaderValue};
use rustfs_utils::EMPTY_STRING_SHA256_HASH;
use std::{collections::HashMap, str::FromStr};
use tokio::io::BufReader;
use tracing::warn;
use uuid::Uuid;
use crate::client::{
@@ -30,7 +31,10 @@ use crate::client::{
api_get_options::GetObjectOptions,
transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info},
};
use s3s::header::{X_AMZ_DELETE_MARKER, X_AMZ_VERSION_ID};
use s3s::{
dto::VersioningConfiguration,
header::{X_AMZ_DELETE_MARKER, X_AMZ_VERSION_ID},
};
impl TransitionClient {
pub async fn bucket_exists(&self, bucket_name: &str) -> Result<bool, std::io::Error> {
@@ -58,8 +62,14 @@ impl TransitionClient {
.await;
if let Ok(resp) = resp {
if resp.status() != http::StatusCode::OK {
return Ok(false);
}
let b = resp.body().bytes().expect("err").to_vec();
let resperr = http_resp_to_error_response(&resp, b, bucket_name, "");
warn!("bucket exists, resp: {:?}, resperr: {:?}", resp, resperr);
/*if to_error_response(resperr).code == "NoSuchBucket" {
return Ok(false);
}
@@ -70,6 +80,46 @@ impl TransitionClient {
Ok(true)
}
pub async fn get_bucket_versioning(&self, bucket_name: &str) -> Result<VersioningConfiguration, std::io::Error> {
let mut query_values = HashMap::new();
query_values.insert("versioning".to_string(), "".to_string());
let resp = self
.execute_method(
http::Method::GET,
&mut RequestMetadata {
bucket_name: bucket_name.to_string(),
object_name: "".to_string(),
query_values,
custom_header: HeaderMap::new(),
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
content_md5_base64: "".to_string(),
content_body: ReaderImpl::Body(Bytes::new()),
content_length: 0,
stream_sha256: false,
trailer: HeaderMap::new(),
pre_sign_url: Default::default(),
add_crc: Default::default(),
extra_pre_sign_header: Default::default(),
bucket_location: Default::default(),
expires: Default::default(),
},
)
.await;
match resp {
Ok(resp) => {
let b = resp.body().bytes().expect("get bucket versioning err").to_vec();
let resperr = http_resp_to_error_response(&resp, b, bucket_name, "");
warn!("get bucket versioning, resp: {:?}, resperr: {:?}", resp, resperr);
Ok(VersioningConfiguration::default())
}
Err(err) => Err(std::io::Error::other(err)),
}
}
pub async fn stat_object(
&self,
bucket_name: &str,
@@ -131,24 +181,20 @@ impl TransitionClient {
..Default::default()
};
return Ok(ObjectInfo {
version_id: match Uuid::from_str(h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap()) {
Ok(v) => v,
Err(e) => {
return Err(std::io::Error::other(e));
}
},
version_id: h
.get(X_AMZ_VERSION_ID)
.and_then(|v| v.to_str().ok())
.and_then(|s| Uuid::from_str(s).ok()),
is_delete_marker: delete_marker,
..Default::default()
});
//err_resp
}
return Ok(ObjectInfo {
version_id: match Uuid::from_str(h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap()) {
Ok(v) => v,
Err(e) => {
return Err(std::io::Error::other(e));
}
},
version_id: h
.get(X_AMZ_VERSION_ID)
.and_then(|v| v.to_str().ok())
.and_then(|s| Uuid::from_str(s).ok()),
is_delete_marker: delete_marker,
replication_ready: replication_ready,
..Default::default()

View File

@@ -36,6 +36,7 @@ use s3s::S3ErrorCode;
use super::constants::UNSIGNED_PAYLOAD;
use super::credentials::SignatureType;
#[derive(Debug, Clone)]
pub struct BucketLocationCache {
items: HashMap<String, String>,
}

View File

@@ -89,6 +89,7 @@ pub enum ReaderImpl {
pub type ReadCloser = BufReader<Cursor<Vec<u8>>>;
#[derive(Debug)]
pub struct TransitionClient {
pub endpoint_url: Url,
pub creds_provider: Arc<Mutex<Credentials<Static>>>,
@@ -809,6 +810,7 @@ impl TransitionCore {
}
}
#[derive(Debug, Clone, Default)]
pub struct PutObjectPartOptions {
pub md5_base64: String,
pub sha256_hex: String,
@@ -820,23 +822,23 @@ pub struct PutObjectPartOptions {
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ObjectInfo {
pub etag: String,
pub etag: Option<String>,
pub name: String,
pub mod_time: OffsetDateTime,
pub size: usize,
pub mod_time: Option<OffsetDateTime>,
pub size: i64,
pub content_type: Option<String>,
#[serde(skip)]
pub metadata: HeaderMap,
pub user_metadata: HashMap<String, String>,
pub user_tags: String,
pub user_tag_count: i64,
pub user_tag_count: usize,
#[serde(skip)]
pub owner: Owner,
//pub grant: Vec<Grant>,
pub storage_class: String,
pub is_latest: bool,
pub is_delete_marker: bool,
pub version_id: Uuid,
pub version_id: Option<Uuid>,
#[serde(skip, default = "replication_status_default")]
pub replication_status: ReplicationStatus,
@@ -862,9 +864,9 @@ fn replication_status_default() -> ReplicationStatus {
impl Default for ObjectInfo {
fn default() -> Self {
Self {
etag: "".to_string(),
etag: None,
name: "".to_string(),
mod_time: OffsetDateTime::now_utc(),
mod_time: None,
size: 0,
content_type: None,
metadata: HeaderMap::new(),
@@ -875,7 +877,7 @@ impl Default for ObjectInfo {
storage_class: "".to_string(),
is_latest: false,
is_delete_marker: false,
version_id: Uuid::nil(),
version_id: None,
replication_status: ReplicationStatus::from_static(ReplicationStatus::PENDING),
replication_ready: false,
expiration: OffsetDateTime::now_utc(),

File diff suppressed because it is too large Load Diff

View File

@@ -1,69 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use chrono::{DateTime, Utc};
// Representation of the replication status
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum StatusType {
Pending,
Completed,
CompletedLegacy,
Failed,
Replica,
}
// Representation of version purge status type (customize as needed)
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum VersionPurgeStatusType {
Pending,
Completed,
Failed,
}
// ReplicationState struct definition
#[derive(Debug, Clone)]
pub struct ReplicationState {
// Timestamp when the last replica update was received
pub replica_time_stamp: DateTime<Utc>,
// Replica status
pub replica_status: StatusType,
// Represents DeleteMarker replication state
pub delete_marker: bool,
// Timestamp when the last replication activity happened
pub replication_time_stamp: DateTime<Utc>,
// Stringified representation of all replication activity
pub replication_status_internal: String,
// Stringified representation of all version purge statuses
// Example format: "arn1=PENDING;arn2=COMPLETED;"
pub version_purge_status_internal: String,
// Stringified representation of replication decision for each target
pub replicate_decision_str: String,
// Map of ARN -> replication status for ongoing replication activity
pub targets: HashMap<String, StatusType>,
// Map of ARN -> VersionPurgeStatus for all the targets
pub purge_targets: HashMap<String, VersionPurgeStatusType>,
// Map of ARN -> stringified reset id and timestamp for all the targets
pub reset_statuses_map: HashMap<String, String>,
}

View File

@@ -1,890 +0,0 @@
#![allow(unused_variables)]
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
use crate::{
StorageAPI,
bucket::{metadata_sys, target::BucketTarget},
endpoints::Node,
rpc::{PeerS3Client, RemotePeerS3Client},
};
use crate::{
bucket::{self, target::BucketTargets},
new_object_layer_fn, store_api,
};
//use tokio::sync::RwLock;
use aws_sdk_s3::Client as S3Client;
use chrono::Utc;
use lazy_static::lazy_static;
use std::sync::Arc;
use std::{
collections::HashMap,
time::{Duration, SystemTime},
};
use thiserror::Error;
use tokio::sync::RwLock;
pub struct TClient {
pub s3cli: S3Client,
pub remote_peer_client: RemotePeerS3Client,
pub arn: String,
}
impl TClient {
pub fn new(s3cli: S3Client, remote_peer_client: RemotePeerS3Client, arn: String) -> Self {
TClient {
s3cli,
remote_peer_client,
arn,
}
}
}
pub struct EpHealth {
pub endpoint: String,
pub scheme: String,
pub online: bool,
pub last_online: SystemTime,
pub last_hc_at: SystemTime,
pub offline_duration: Duration,
pub latency: LatencyStat, // Assuming LatencyStat is a custom struct
}
impl EpHealth {
pub fn new(
endpoint: String,
scheme: String,
online: bool,
last_online: SystemTime,
last_hc_at: SystemTime,
offline_duration: Duration,
latency: LatencyStat,
) -> Self {
EpHealth {
endpoint,
scheme,
online,
last_online,
last_hc_at,
offline_duration,
latency,
}
}
}
pub struct LatencyStat {
// Define the fields of LatencyStat as per your requirements
}
pub struct ArnTarget {
client: TargetClient,
last_refresh: chrono::DateTime<Utc>,
}
impl ArnTarget {
pub fn new(bucket: String, endpoint: String, ak: String, sk: String) -> Self {
Self {
client: TargetClient {
bucket,
storage_class: "STANDARD".to_string(),
disable_proxy: false,
health_check_duration: Duration::from_secs(100),
endpoint,
reset_id: "0".to_string(),
replicate_sync: false,
secure: false,
arn: "".to_string(),
client: reqwest::Client::new(),
ak,
sk,
},
last_refresh: Utc::now(),
}
}
}
// pub fn get_s3client_from_para(
// ak: &str,
// sk: &str,
// url: &str,
// _region: &str,
// ) -> Result<S3Client, Box<dyn Error>> {
// let credentials = Credentials::new(ak, sk, None, None, "");
// let region = Region::new("us-east-1".to_string());
// let config = Config::builder()
// .region(region)
// .endpoint_url(url.to_string())
// .credentials_provider(credentials)
// .behavior_version(BehaviorVersion::latest()) // Adjust as necessary
// .build();
// Ok(S3Client::from_conf(config))
// }
pub struct BucketTargetSys {
arn_remote_map: Arc<RwLock<HashMap<String, ArnTarget>>>,
targets_map: Arc<RwLock<HashMap<String, Vec<bucket::target::BucketTarget>>>>,
hc: HashMap<String, EpHealth>,
//store:Option<Arc<ecstore::store::ECStore>>,
}
lazy_static! {
pub static ref GLOBAL_Bucket_Target_Sys: std::sync::OnceLock<BucketTargetSys> = BucketTargetSys::new().into();
}
//#[derive(Debug)]
// pub enum SetTargetError {
// NotFound,
// }
pub async fn get_bucket_target_client(bucket: &str, arn: &str) -> Result<TargetClient, SetTargetError> {
if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() {
sys.get_remote_target_client2(arn).await
} else {
Err(SetTargetError::TargetNotFound(bucket.to_string()))
}
}
#[derive(Debug)]
pub struct BucketRemoteTargetNotFound {
pub bucket: String,
}
pub async fn init_bucket_targets(bucket: &str, meta: Arc<bucket::metadata::BucketMetadata>) {
println!("140 {bucket}");
if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() {
if let Some(tgts) = meta.bucket_target_config.clone() {
for tgt in tgts.targets {
warn!("ak and sk is:{:?}", tgt.credentials);
let _ = sys.set_target(bucket, &tgt, false, true).await;
//sys.targets_map.
}
}
}
}
pub async fn remove_bucket_target(bucket: &str, arn_str: &str) {
if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() {
let _ = sys.remove_target(bucket, arn_str).await;
}
}
pub async fn list_bucket_targets(bucket: &str) -> Result<BucketTargets, BucketRemoteTargetNotFound> {
if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() {
sys.list_bucket_targets(bucket).await
} else {
Err(BucketRemoteTargetNotFound {
bucket: bucket.to_string(),
})
}
}
impl Default for BucketTargetSys {
fn default() -> Self {
Self::new()
}
}
impl BucketTargetSys {
pub fn new() -> Self {
BucketTargetSys {
arn_remote_map: Arc::new(RwLock::new(HashMap::new())),
targets_map: Arc::new(RwLock::new(HashMap::new())),
hc: HashMap::new(),
}
}
pub async fn list_bucket_targets(&self, bucket: &str) -> Result<BucketTargets, BucketRemoteTargetNotFound> {
let targets_map = self.targets_map.read().await;
if let Some(targets) = targets_map.get(bucket) {
Ok(BucketTargets {
targets: targets.clone(),
})
} else {
Err(BucketRemoteTargetNotFound {
bucket: bucket.to_string(),
})
}
}
pub async fn list_targets(&self, bucket: Option<&str>, _arn_type: Option<&str>) -> Vec<BucketTarget> {
let _ = _arn_type;
//let health_stats = self.health_stats();
let mut targets = Vec::new();
if let Some(bucket_name) = bucket {
if let Ok(ts) = self.list_bucket_targets(bucket_name).await {
for t in ts.targets {
//if arn_type.map_or(true, |arn| t.target_type == arn) {
//if let Some(hs) = health_stats.get(&t.url().host) {
// t.total_downtime = hs.offline_duration;
// t.online = hs.online;
// t.last_online = hs.last_online;
// t.latency = LatencyStat {
// curr: hs.latency.curr,
// avg: hs.latency.avg,
// max: hs.latency.peak,
// };
//}
targets.push(t.clone());
//}
}
}
return targets;
}
// Locking and iterating over all targets in the system
let targets_map = self.targets_map.read().await;
for tgts in targets_map.values() {
for t in tgts {
//if arn_type.map_or(true, |arn| t.target_type == arn) {
// if let Some(hs) = health_stats.get(&t.url().host) {
// t.total_downtime = hs.offline_duration;
// t.online = hs.online;
// t.last_online = hs.last_online;
// t.latency = LatencyStat {
// curr: hs.latency.curr,
// avg: hs.latency.avg,
// max: hs.latency.peak,
// };
// }
targets.push(t.clone());
//}
}
}
targets
}
pub async fn remove_target(&self, bucket: &str, arn_str: &str) -> Result<(), SetTargetError> {
//to do need lock;
let mut targets_map = self.targets_map.write().await;
let tgts = targets_map.get(bucket);
let mut arn_remotes_map = self.arn_remote_map.write().await;
if tgts.is_none() {
//Err(SetTargetError::TargetNotFound(bucket.to_string()));
return Ok(());
}
let tgts = tgts.unwrap(); // 安全解引用
let mut targets = Vec::with_capacity(tgts.len());
let mut found = false;
// 遍历 targets找出不匹配的 ARN
for tgt in tgts {
if tgt.arn != Some(arn_str.to_string()) {
targets.push(tgt.clone()); // 克隆符合条件的项
} else {
found = true; // 找到匹配的 ARN
}
}
// 如果没有找到匹配的 ARN则返回错误
if !found {
return Ok(());
}
// 更新 targets_map
targets_map.insert(bucket.to_string(), targets);
arn_remotes_map.remove(arn_str);
let targets = self.list_targets(Some(bucket), None).await;
println!("targets is {}", targets.len());
match serde_json::to_vec(&targets) {
Ok(json) => {
let _ = metadata_sys::update(bucket, "bucket-targets.json", json).await;
}
Err(e) => {
println!("序列化失败{e}");
}
}
Ok(())
}
pub async fn get_remote_arn(&self, bucket: &str, target: Option<&BucketTarget>, depl_id: &str) -> (Option<String>, bool) {
if target.is_none() {
return (None, false);
}
let target = target.unwrap();
let targets_map = self.targets_map.read().await;
// 获取锁以访问 arn_remote_map
let mut _arn_remotes_map = self.arn_remote_map.read().await;
if let Some(tgts) = targets_map.get(bucket) {
for tgt in tgts {
if tgt.type_ == target.type_
&& tgt.target_bucket == target.target_bucket
&& tgt.endpoint == target.endpoint
&& tgt.credentials.as_ref().unwrap().access_key == target.credentials.as_ref().unwrap().access_key
{
return (tgt.arn.clone(), true);
}
}
}
// if !target.type_.is_valid() {
// return (None, false);
// }
println!("generate_arn");
(Some(generate_arn(target.clone(), depl_id.to_string())), false)
}
pub async fn get_remote_target_client2(&self, arn: &str) -> Result<TargetClient, SetTargetError> {
let map = self.arn_remote_map.read().await;
info!("get remote target client and arn is: {}", arn);
if let Some(value) = map.get(arn) {
let mut x = value.client.clone();
x.arn = arn.to_string();
Ok(x)
} else {
error!("not find target");
Err(SetTargetError::TargetNotFound(arn.to_string()))
}
}
// pub async fn get_remote_target_client(&self, _tgt: &BucketTarget) -> Result<TargetClient, SetTargetError> {
// // Mocked implementation for obtaining a remote client
// let tcli = TargetClient {
// bucket: _tgt.target_bucket.clone(),
// storage_class: "STANDARD".to_string(),
// disable_proxy: false,
// health_check_duration: Duration::from_secs(100),
// endpoint: _tgt.endpoint.clone(),
// reset_id: "0".to_string(),
// replicate_sync: false,
// secure: false,
// arn: "".to_string(),
// client: reqwest::Client::new(),
// ak: _tgt.
// };
// Ok(tcli)
// }
// pub async fn get_remote_target_client_with_bucket(&self, _bucket: String) -> Result<TargetClient, SetTargetError> {
// // Mocked implementation for obtaining a remote client
// let tcli = TargetClient {
// bucket: _tgt.target_bucket.clone(),
// storage_class: "STANDARD".to_string(),
// disable_proxy: false,
// health_check_duration: Duration::from_secs(100),
// endpoint: _tgt.endpoint.clone(),
// reset_id: "0".to_string(),
// replicate_sync: false,
// secure: false,
// arn: "".to_string(),
// client: reqwest::Client::new(),
// };
// Ok(tcli)
// }
async fn local_is_bucket_versioned(&self, _bucket: &str) -> bool {
let Some(store) = new_object_layer_fn() else {
return false;
};
//store.get_bucket_info(bucket, opts)
// let binfo:BucketInfo = store
// .get_bucket_info(bucket, &ecstore::store_api::BucketOptions::default()).await;
match store.get_bucket_info(_bucket, &store_api::BucketOptions::default()).await {
Ok(info) => {
println!("Bucket Info: {info:?}");
info.versioning
}
Err(err) => {
eprintln!("Error: {err:?}");
false
}
}
}
async fn is_bucket_versioned(&self, _bucket: &str) -> bool {
true
// let url_str = "http://127.0.0.1:9001";
// // 转换为 Url 类型
// let parsed_url = url::Url::parse(url_str).unwrap();
// let node = Node {
// url: parsed_url,
// pools: vec![],
// is_local: false,
// grid_host: "".to_string(),
// };
// let cli = ecstore::peer::RemotePeerS3Client::new(Some(node), None);
// match cli.get_bucket_info(_bucket, &ecstore::store_api::BucketOptions::default()).await
// {
// Ok(info) => {
// println!("Bucket Info: {:?}", info);
// info.versioning
// }
// Err(err) => {
// eprintln!("Error: {:?}", err);
// return false;
// }
// }
}
pub async fn set_target(&self, bucket: &str, tgt: &BucketTarget, update: bool, fromdisk: bool) -> Result<(), SetTargetError> {
// if !tgt.type_.is_valid() && !update {
// return Err(SetTargetError::InvalidTargetType(bucket.to_string()));
// }
//let client = self.get_remote_target_client(tgt).await?;
if tgt.type_ == Some("replication".to_string()) && !fromdisk {
let versioning_config = self.local_is_bucket_versioned(bucket).await;
if !versioning_config {
// println!("111111111");
return Err(SetTargetError::TargetNotVersioned(bucket.to_string()));
}
}
let url_str = format!("http://{}", tgt.endpoint.clone());
println!("url str is {url_str}");
// 转换为 Url 类型
let parsed_url = url::Url::parse(&url_str).unwrap();
let node = Node {
url: parsed_url,
pools: vec![],
is_local: false,
grid_host: "".to_string(),
};
let cli = RemotePeerS3Client::new(Some(node), None);
match cli
.get_bucket_info(&tgt.target_bucket, &store_api::BucketOptions::default())
.await
{
Ok(info) => {
println!("Bucket Info: {info:?}");
if !info.versioning {
return Err(SetTargetError::TargetNotVersioned(tgt.target_bucket.to_string()));
}
}
Err(err) => {
println!("remote bucket 369 is:{}", tgt.target_bucket);
eprintln!("Error: {err:?}");
return Err(SetTargetError::SourceNotVersioned(tgt.target_bucket.to_string()));
}
}
//if tgt.target_type == BucketTargetType::ReplicationService {
// Check if target is a rustfs server and alive
// let hc_result = tokio::time::timeout(Duration::from_secs(3), client.health_check(&tgt.endpoint)).await;
// match hc_result {
// Ok(Ok(true)) => {} // Server is alive
// Ok(Ok(false)) | Ok(Err(_)) | Err(_) => {
// return Err(SetTargetError::HealthCheckFailed(tgt.target_bucket.clone()));
// }
// }
//Lock and update target maps
let mut targets_map = self.targets_map.write().await;
let mut arn_remotes_map = self.arn_remote_map.write().await;
let targets = targets_map.entry(bucket.to_string()).or_default();
let mut found = false;
for existing_target in targets.iter_mut() {
println!("418 exist:{}", existing_target.source_bucket.clone());
if existing_target.type_ == tgt.type_ {
if existing_target.arn == tgt.arn {
if !update {
return Err(SetTargetError::TargetAlreadyExists(existing_target.target_bucket.clone()));
}
*existing_target = tgt.clone();
found = true;
break;
}
if existing_target.endpoint == tgt.endpoint {
println!("endpoint is same:{}", tgt.endpoint.clone());
return Err(SetTargetError::TargetAlreadyExists(existing_target.target_bucket.clone()));
}
}
}
if !found && !update {
println!("437 exist:{}", tgt.arn.clone().unwrap());
targets.push(tgt.clone());
}
let arntgt: ArnTarget = ArnTarget::new(
tgt.target_bucket.clone(),
tgt.endpoint.clone(),
tgt.credentials.clone().unwrap().access_key.clone(),
tgt.credentials.clone().unwrap().secret_key,
);
arn_remotes_map.insert(tgt.arn.clone().unwrap().clone(), arntgt);
//self.update_bandwidth_limit(bucket, &tgt.arn, tgt.bandwidth_limit).await;
Ok(())
}
}
#[derive(Clone)]
pub struct TargetClient {
pub client: reqwest::Client, // Using reqwest HTTP client
pub health_check_duration: Duration,
pub bucket: String, // Remote bucket target
pub replicate_sync: bool,
pub storage_class: String, // Storage class on remote
pub disable_proxy: bool,
pub arn: String, // ARN to uniquely identify remote target
pub reset_id: String,
pub endpoint: String,
pub secure: bool,
pub ak: String,
pub sk: String,
}
#[allow(clippy::too_many_arguments)]
impl TargetClient {
#[allow(clippy::too_many_arguments)]
pub fn new(
client: reqwest::Client,
health_check_duration: Duration,
bucket: String,
replicate_sync: bool,
storage_class: String,
disable_proxy: bool,
arn: String,
reset_id: String,
endpoint: String,
secure: bool,
ak: String,
sk: String,
) -> Self {
TargetClient {
client,
health_check_duration,
bucket,
replicate_sync,
storage_class,
disable_proxy,
arn,
reset_id,
endpoint,
secure,
ak,
sk,
}
}
pub async fn bucket_exists(&self, _bucket: &str) -> Result<bool, SetTargetError> {
Ok(true) // Mocked implementation
}
}
use tracing::{error, info, warn};
use uuid::Uuid;
#[derive(Debug, Clone)]
pub struct VersioningConfig {
pub enabled: bool,
}
impl VersioningConfig {
pub fn is_enabled(&self) -> bool {
self.enabled
}
}
#[derive(Debug)]
pub struct Client;
impl Client {
pub async fn bucket_exists(&self, _bucket: &str) -> Result<bool, SetTargetError> {
Ok(true) // Mocked implementation
}
pub async fn get_bucket_versioning(&self, _bucket: &str) -> Result<VersioningConfig, SetTargetError> {
Ok(VersioningConfig { enabled: true })
}
pub async fn health_check(&self, _endpoint: &str) -> Result<bool, SetTargetError> {
Ok(true) // Mocked health check
}
}
#[derive(Debug, PartialEq)]
pub struct ServiceType(String);
impl ServiceType {
pub fn is_valid(&self) -> bool {
!self.0.is_empty() // 根据需求添加具体的验证逻辑
}
}
#[derive(Debug, PartialEq)]
pub struct ARN {
pub arn_type: String,
pub id: String,
pub region: String,
pub bucket: String,
}
impl ARN {
/// 检查 ARN 是否为空
pub fn is_empty(&self) -> bool {
//!self.arn_type.is_valid()
false
}
// 从字符串解析 ARN
pub fn parse(s: &str) -> Result<Self, String> {
// ARN 必须是格式 arn:rustfs:<Type>:<REGION>:<ID>:<remote-bucket>
if !s.starts_with("arn:rustfs:") {
return Err(format!("Invalid ARN {s}"));
}
let tokens: Vec<&str> = s.split(':').collect();
if tokens.len() != 6 || tokens[4].is_empty() || tokens[5].is_empty() {
return Err(format!("Invalid ARN {s}"));
}
Ok(ARN {
arn_type: tokens[2].to_string(),
region: tokens[3].to_string(),
id: tokens[4].to_string(),
bucket: tokens[5].to_string(),
})
}
}
// 实现 `Display` trait使得可以直接使用 `format!` 或 `{}` 输出 ARN
impl std::fmt::Display for ARN {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "arn:rustfs:{}:{}:{}:{}", self.arn_type, self.region, self.id, self.bucket)
}
}
fn must_get_uuid() -> String {
Uuid::new_v4().to_string()
// match Uuid::new_v4() {
// Ok(uuid) => uuid.to_string(),
// Err(err) => {
// error!("Critical error: {}", err);
// panic!("Failed to generate UUID: {}", err); // Ensures similar behavior as Go's logger.CriticalIf
// }
// }
}
fn generate_arn(target: BucketTarget, depl_id: String) -> String {
let mut uuid: String = depl_id;
if uuid.is_empty() {
uuid = must_get_uuid();
}
let arn: ARN = ARN {
arn_type: target.type_.unwrap(),
id: (uuid),
region: "us-east-1".to_string(),
bucket: (target.target_bucket),
};
arn.to_string()
}
// use std::collections::HashMap;
// use std::sync::{Arc, Mutex, RwLock};
// use std::time::Duration;
// use tokio::time::timeout;
// use tokio::sync::RwLock as AsyncRwLock;
// use serde::Deserialize;
// use thiserror::Error;
// #[derive(Debug, Clone, PartialEq)]
// pub enum BucketTargetType {
// ReplicationService,
// // Add other service types as needed
// }
// impl BucketTargetType {
// pub fn is_valid(&self) -> bool {
// matches!(self, BucketTargetType::ReplicationService)
// }
// }
// #[derive(Debug, Clone)]
// pub struct BucketTarget {
// pub arn: String,
// pub target_bucket: String,
// pub endpoint: String,
// pub credentials: Credentials,
// pub secure: bool,
// pub bandwidth_limit: Option<u64>,
// pub target_type: BucketTargetType,
// }
// #[derive(Debug, Clone)]
// pub struct Credentials {
// pub access_key: String,
// pub secret_key: String,
// }
// #[derive(Debug)]
// pub struct BucketTargetSys {
// targets_map: Arc<RwLock<HashMap<String, Vec<BucketTarget>>>>,
// arn_remotes_map: Arc<Mutex<HashMap<String, ArnTarget>>>,
// }
// impl BucketTargetSys {
// pub fn new() -> Self {
// Self {
// targets_map: Arc::new(RwLock::new(HashMap::new())),
// arn_remotes_map: Arc::new(Mutex::new(HashMap::new())),
// }
// }
// pub async fn set_target(
// &self,
// bucket: &str,
// tgt: &BucketTarget,
// update: bool,
// ) -> Result<(), SetTargetError> {
// if !tgt.target_type.is_valid() && !update {
// return Err(SetTargetError::InvalidTargetType(bucket.to_string()));
// }
// let client = self.get_remote_target_client(tgt).await?;
// // Validate if target credentials are OK
// let exists = client.bucket_exists(&tgt.target_bucket).await?;
// if !exists {
// return Err(SetTargetError::TargetNotFound(tgt.target_bucket.clone()));
// }
// if tgt.target_type == BucketTargetType::ReplicationService {
// if !self.is_bucket_versioned(bucket).await {
// return Err(SetTargetError::SourceNotVersioned(bucket.to_string()));
// }
// let versioning_config = client.get_bucket_versioning(&tgt.target_bucket).await?;
// if !versioning_config.is_enabled() {
// return Err(SetTargetError::TargetNotVersioned(tgt.target_bucket.clone()));
// }
// }
// // Check if target is a rustfs server and alive
// let hc_result = timeout(Duration::from_secs(3), client.health_check(&tgt.endpoint)).await;
// match hc_result {
// Ok(Ok(true)) => {} // Server is alive
// Ok(Ok(false)) | Ok(Err(_)) | Err(_) => {
// return Err(SetTargetError::HealthCheckFailed(tgt.target_bucket.clone()));
// }
// }
// // Lock and update target maps
// let mut targets_map = self.targets_map.write().await;
// let mut arn_remotes_map = self.arn_remotes_map.lock().unwrap();
// let targets = targets_map.entry(bucket.to_string()).or_default();
// let mut found = false;
// for existing_target in targets.iter_mut() {
// if existing_target.target_type == tgt.target_type {
// if existing_target.arn == tgt.arn {
// if !update {
// return Err(SetTargetError::TargetAlreadyExists(existing_target.target_bucket.clone()));
// }
// *existing_target = tgt.clone();
// found = true;
// break;
// }
// if existing_target.endpoint == tgt.endpoint {
// return Err(SetTargetError::TargetAlreadyExists(existing_target.target_bucket.clone()));
// }
// }
// }
// if !found && !update {
// targets.push(tgt.clone());
// }
// arn_remotes_map.insert(tgt.arn.clone(), ArnTarget { client });
// self.update_bandwidth_limit(bucket, &tgt.arn, tgt.bandwidth_limit).await;
// Ok(())
// }
// async fn get_remote_target_client(&self, tgt: &BucketTarget) -> Result<Client, SetTargetError> {
// // Mocked implementation for obtaining a remote client
// Ok(Client {})
// }
// async fn is_bucket_versioned(&self, bucket: &str) -> bool {
// // Mocked implementation for checking if a bucket is versioned
// true
// }
// async fn update_bandwidth_limit(
// &self,
// bucket: &str,
// arn: &str,
// limit: Option<u64>,
// ) {
// // Mocked implementation for updating bandwidth limits
// }
// }
// #[derive(Debug)]
// pub struct Client;
// impl Client {
// pub async fn bucket_exists(&self, _bucket: &str) -> Result<bool, SetTargetError> {
// Ok(true) // Mocked implementation
// }
// pub async fn get_bucket_versioning(
// &self,
// _bucket: &str,
// ) -> Result<VersioningConfig, SetTargetError> {
// Ok(VersioningConfig { enabled: true })
// }
// pub async fn health_check(&self, _endpoint: &str) -> Result<bool, SetTargetError> {
// Ok(true) // Mocked health check
// }
// }
// #[derive(Debug, Clone)]
// pub struct ArnTarget {
// pub client: Client,
// }
#[derive(Debug, Error)]
pub enum SetTargetError {
#[error("Invalid target type for bucket {0}")]
InvalidTargetType(String),
#[error("Target bucket {0} not found")]
TargetNotFound(String),
#[error("Source bucket {0} is not versioned")]
SourceNotVersioned(String),
#[error("Target bucket {0} is not versioned")]
TargetNotVersioned(String),
#[error("Health check failed for bucket {0}")]
HealthCheckFailed(String),
#[error("Target bucket {0} already exists")]
TargetAlreadyExists(String),
}

View File

@@ -1,14 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

View File

@@ -1,16 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod bucket_replication;
pub mod bucket_targets;

View File

@@ -88,7 +88,7 @@ impl LocalUsageSnapshot {
/// Build the snapshot file name `<disk-id>.json`.
pub fn snapshot_file_name(disk_id: &str) -> String {
format!("{}.json", disk_id)
format!("{disk_id}.json")
}
/// Build the object path relative to `RUSTFS_META_BUCKET`, e.g. `datausage/<disk-id>.json`.

View File

@@ -2349,12 +2349,7 @@ impl DiskAPI for LocalDisk {
self.delete_file(&volume_dir, &xl_path, true, false).await
}
#[tracing::instrument(level = "debug", skip(self))]
async fn delete_versions(
&self,
volume: &str,
versions: Vec<FileInfoVersions>,
_opts: DeleteOptions,
) -> Result<Vec<Option<Error>>> {
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, _opts: DeleteOptions) -> Vec<Option<Error>> {
let mut errs = Vec::with_capacity(versions.len());
for _ in 0..versions.len() {
errs.push(None);
@@ -2368,7 +2363,7 @@ impl DiskAPI for LocalDisk {
}
}
Ok(errs)
errs
}
#[tracing::instrument(skip(self))]

View File

@@ -201,12 +201,7 @@ impl DiskAPI for Disk {
}
#[tracing::instrument(skip(self))]
async fn delete_versions(
&self,
volume: &str,
versions: Vec<FileInfoVersions>,
opts: DeleteOptions,
) -> Result<Vec<Option<Error>>> {
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, opts: DeleteOptions) -> Vec<Option<Error>> {
match self {
Disk::Local(local_disk) => local_disk.delete_versions(volume, versions, opts).await,
Disk::Remote(remote_disk) => remote_disk.delete_versions(volume, versions, opts).await,
@@ -448,12 +443,7 @@ pub trait DiskAPI: Debug + Send + Sync + 'static {
force_del_marker: bool,
opts: DeleteOptions,
) -> Result<()>;
async fn delete_versions(
&self,
volume: &str,
versions: Vec<FileInfoVersions>,
opts: DeleteOptions,
) -> Result<Vec<Option<Error>>>;
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, opts: DeleteOptions) -> Vec<Option<Error>>;
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()>;
async fn write_metadata(&self, org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()>;
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()>;

View File

@@ -21,7 +21,6 @@ pub mod bitrot;
pub mod bucket;
pub mod cache_value;
mod chunk_stream;
pub mod cmd;
pub mod compress;
pub mod config;
pub mod data_usage;

View File

@@ -48,7 +48,7 @@ use std::path::PathBuf;
use std::sync::Arc;
use time::{Duration, OffsetDateTime};
use tokio::io::{AsyncReadExt, BufReader};
use tokio::sync::broadcast::Receiver as B_Receiver;
use tokio_util::sync::CancellationToken;
use tracing::{error, info, warn};
pub const POOL_META_NAME: &str = "pool.bin";
@@ -651,7 +651,7 @@ impl ECStore {
}
#[tracing::instrument(skip(self, rx))]
pub async fn decommission(&self, rx: B_Receiver<bool>, indices: Vec<usize>) -> Result<()> {
pub async fn decommission(&self, rx: CancellationToken, indices: Vec<usize>) -> Result<()> {
warn!("decommission: {:?}", indices);
if indices.is_empty() {
return Err(Error::other("InvalidArgument"));
@@ -663,13 +663,14 @@ impl ECStore {
self.start_decommission(indices.clone()).await?;
let rx_clone = rx.clone();
tokio::spawn(async move {
let Some(store) = new_object_layer_fn() else {
error!("store not init");
return;
};
for idx in indices.iter() {
store.do_decommission_in_routine(rx.resubscribe(), *idx).await;
store.do_decommission_in_routine(rx_clone.clone(), *idx).await;
}
});
@@ -891,7 +892,7 @@ impl ECStore {
#[tracing::instrument(skip(self, rx))]
async fn decommission_pool(
self: &Arc<Self>,
rx: B_Receiver<bool>,
rx: CancellationToken,
idx: usize,
pool: Arc<Sets>,
bi: DecomBucketInfo,
@@ -936,20 +937,20 @@ impl ECStore {
});
let set = set.clone();
let mut rx = rx.resubscribe();
let rx_clone = rx.clone();
let bi = bi.clone();
let set_id = set_idx;
let wk_clone = wk.clone();
tokio::spawn(async move {
loop {
if rx.try_recv().is_ok() {
if rx_clone.is_cancelled() {
warn!("decommission_pool: cancel {}", set_id);
break;
}
warn!("decommission_pool: list_objects_to_decommission {} {}", set_id, &bi.name);
match set
.list_objects_to_decommission(rx.resubscribe(), bi.clone(), decommission_entry.clone())
.list_objects_to_decommission(rx_clone.clone(), bi.clone(), decommission_entry.clone())
.await
{
Ok(_) => {
@@ -982,7 +983,7 @@ impl ECStore {
}
#[tracing::instrument(skip(self, rx))]
pub async fn do_decommission_in_routine(self: &Arc<Self>, rx: B_Receiver<bool>, idx: usize) {
pub async fn do_decommission_in_routine(self: &Arc<Self>, rx: CancellationToken, idx: usize) {
if let Err(err) = self.decommission_in_background(rx, idx).await {
error!("decom err {:?}", &err);
if let Err(er) = self.decommission_failed(idx).await {
@@ -1060,7 +1061,7 @@ impl ECStore {
}
#[tracing::instrument(skip(self, rx))]
async fn decommission_in_background(self: &Arc<Self>, rx: B_Receiver<bool>, idx: usize) -> Result<()> {
async fn decommission_in_background(self: &Arc<Self>, rx: CancellationToken, idx: usize) -> Result<()> {
let pool = self.pools[idx].clone();
let pending = {
@@ -1090,10 +1091,7 @@ impl ECStore {
warn!("decommission: currently on bucket {}", &bucket.name);
if let Err(err) = self
.decommission_pool(rx.resubscribe(), idx, pool.clone(), bucket.clone())
.await
{
if let Err(err) = self.decommission_pool(rx.clone(), idx, pool.clone(), bucket.clone()).await {
error!("decommission: decommission_pool err {:?}", &err);
return Err(err);
} else {
@@ -1329,7 +1327,7 @@ impl SetDisks {
#[tracing::instrument(skip(self, rx, cb_func))]
async fn list_objects_to_decommission(
self: &Arc<Self>,
rx: B_Receiver<bool>,
rx: CancellationToken,
bucket_info: DecomBucketInfo,
cb_func: ListCallback,
) -> Result<()> {

View File

@@ -34,8 +34,8 @@ use std::io::Cursor;
use std::sync::Arc;
use time::OffsetDateTime;
use tokio::io::{AsyncReadExt, BufReader};
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
use tokio::time::{Duration, Instant};
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
use uuid::Uuid;
@@ -151,7 +151,7 @@ pub struct DiskStat {
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct RebalanceMeta {
#[serde(skip)]
pub cancel: Option<broadcast::Sender<bool>>, // To be invoked on rebalance-stop
pub cancel: Option<CancellationToken>, // To be invoked on rebalance-stop
#[serde(skip)]
pub last_refreshed_at: Option<OffsetDateTime>,
#[serde(rename = "stopTs")]
@@ -493,8 +493,8 @@ impl ECStore {
pub async fn stop_rebalance(self: &Arc<Self>) -> Result<()> {
let rebalance_meta = self.rebalance_meta.read().await;
if let Some(meta) = rebalance_meta.as_ref() {
if let Some(tx) = meta.cancel.as_ref() {
let _ = tx.send(true);
if let Some(cancel_tx) = meta.cancel.as_ref() {
cancel_tx.cancel();
}
}
@@ -506,13 +506,14 @@ impl ECStore {
info!("start_rebalance: start rebalance");
// let rebalance_meta = self.rebalance_meta.read().await;
let (tx, rx) = broadcast::channel::<bool>(1);
let cancel_tx = CancellationToken::new();
let rx = cancel_tx.clone();
{
let mut rebalance_meta = self.rebalance_meta.write().await;
if let Some(meta) = rebalance_meta.as_mut() {
meta.cancel = Some(tx)
meta.cancel = Some(cancel_tx)
} else {
info!("start_rebalance: rebalance_meta is None exit");
return;
@@ -565,9 +566,9 @@ impl ECStore {
let pool_idx = idx;
let store = self.clone();
let rx = rx.resubscribe();
let rx_clone = rx.clone();
tokio::spawn(async move {
if let Err(err) = store.rebalance_buckets(rx, pool_idx).await {
if let Err(err) = store.rebalance_buckets(rx_clone, pool_idx).await {
error!("Rebalance failed for pool {}: {}", pool_idx, err);
} else {
info!("Rebalance completed for pool {}", pool_idx);
@@ -579,7 +580,7 @@ impl ECStore {
}
#[tracing::instrument(skip(self, rx))]
async fn rebalance_buckets(self: &Arc<Self>, mut rx: B_Receiver<bool>, pool_index: usize) -> Result<()> {
async fn rebalance_buckets(self: &Arc<Self>, rx: CancellationToken, pool_index: usize) -> Result<()> {
let (done_tx, mut done_rx) = tokio::sync::mpsc::channel::<Result<()>>(1);
// Save rebalance metadata periodically
@@ -651,7 +652,7 @@ impl ECStore {
info!("Pool {} rebalancing is started", pool_index);
loop {
if let Ok(true) = rx.try_recv() {
if rx.is_cancelled() {
info!("Pool {} rebalancing is stopped", pool_index);
done_tx.send(Err(Error::other("rebalance stopped canceled"))).await.ok();
break;
@@ -660,7 +661,7 @@ impl ECStore {
if let Some(bucket) = self.next_rebal_bucket(pool_index).await? {
info!("Rebalancing bucket: start {}", bucket);
if let Err(err) = self.rebalance_bucket(rx.resubscribe(), bucket.clone(), pool_index).await {
if let Err(err) = self.rebalance_bucket(rx.clone(), bucket.clone(), pool_index).await {
if err.to_string().contains("not initialized") {
info!("rebalance_bucket: rebalance not initialized, continue");
continue;
@@ -1033,7 +1034,7 @@ impl ECStore {
}
#[tracing::instrument(skip(self, rx))]
async fn rebalance_bucket(self: &Arc<Self>, rx: B_Receiver<bool>, bucket: String, pool_index: usize) -> Result<()> {
async fn rebalance_bucket(self: &Arc<Self>, rx: CancellationToken, bucket: String, pool_index: usize) -> Result<()> {
// Placeholder for actual bucket rebalance logic
info!("Rebalancing bucket {} in pool {}", bucket, pool_index);
@@ -1072,7 +1073,7 @@ impl ECStore {
});
let set = set.clone();
let rx = rx.resubscribe();
let rx = rx.clone();
let bucket = bucket.clone();
// let wk = wk.clone();
@@ -1144,7 +1145,7 @@ impl SetDisks {
#[tracing::instrument(skip(self, rx, cb))]
pub async fn list_objects_to_rebalance(
self: &Arc<Self>,
rx: B_Receiver<bool>,
rx: CancellationToken,
bucket: String,
cb: ListCallback,
) -> Result<()> {

View File

@@ -345,21 +345,43 @@ impl DiskAPI for RemoteDisk {
}
#[tracing::instrument(skip(self))]
async fn delete_versions(
&self,
volume: &str,
versions: Vec<FileInfoVersions>,
opts: DeleteOptions,
) -> Result<Vec<Option<Error>>> {
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, opts: DeleteOptions) -> Vec<Option<Error>> {
info!("delete_versions");
let opts = serde_json::to_string(&opts)?;
let opts = match serde_json::to_string(&opts) {
Ok(opts) => opts,
Err(err) => {
let mut errors = Vec::with_capacity(versions.len());
for _ in 0..versions.len() {
errors.push(Some(Error::other(err.to_string())));
}
return errors;
}
};
let mut versions_str = Vec::with_capacity(versions.len());
for file_info_versions in versions.iter() {
versions_str.push(serde_json::to_string(file_info_versions)?);
versions_str.push(match serde_json::to_string(file_info_versions) {
Ok(versions_str) => versions_str,
Err(err) => {
let mut errors = Vec::with_capacity(versions.len());
for _ in 0..versions.len() {
errors.push(Some(Error::other(err.to_string())));
}
return errors;
}
});
}
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let mut client = match node_service_time_out_client(&self.addr).await {
Ok(client) => client,
Err(err) => {
let mut errors = Vec::with_capacity(versions.len());
for _ in 0..versions.len() {
errors.push(Some(Error::other(err.to_string())));
}
return errors;
}
};
let request = Request::new(DeleteVersionsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -368,11 +390,27 @@ impl DiskAPI for RemoteDisk {
});
// TODO: use Error not string
let response = client.delete_versions(request).await?.into_inner();
let response = match client.delete_versions(request).await {
Ok(response) => response,
Err(err) => {
let mut errors = Vec::with_capacity(versions.len());
for _ in 0..versions.len() {
errors.push(Some(Error::other(err.to_string())));
}
return errors;
}
};
let response = response.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
let mut errors = Vec::with_capacity(versions.len());
for _ in 0..versions.len() {
errors.push(Some(Error::other(response.error.clone().map(|e| e.error_info).unwrap_or_default())));
}
return errors;
}
let errors = response
response
.errors
.iter()
.map(|error| {
@@ -382,9 +420,7 @@ impl DiskAPI for RemoteDisk {
Some(Error::other(error.to_string()))
}
})
.collect();
Ok(errors)
.collect()
}
#[tracing::instrument(skip(self))]

View File

@@ -1301,28 +1301,22 @@ impl Node for NodeService {
}));
}
};
match disk.delete_versions(&request.volume, versions, opts).await {
Ok(errors) => {
let errors = errors
.into_iter()
.map(|error| match error {
Some(e) => e.to_string(),
None => "".to_string(),
})
.collect();
Ok(tonic::Response::new(DeleteVersionsResponse {
success: true,
errors,
error: None,
}))
}
Err(err) => Ok(tonic::Response::new(DeleteVersionsResponse {
success: false,
errors: Vec::new(),
error: Some(err.into()),
})),
}
let errors = disk
.delete_versions(&request.volume, versions, opts)
.await
.into_iter()
.map(|error| match error {
Some(e) => e.to_string(),
None => "".to_string(),
})
.collect();
Ok(tonic::Response::new(DeleteVersionsResponse {
success: true,
errors,
error: None,
}))
} else {
Ok(tonic::Response::new(DeleteVersionsResponse {
success: false,

View File

@@ -18,6 +18,7 @@
use crate::batch_processor::{AsyncBatchProcessor, get_global_processors};
use crate::bitrot::{create_bitrot_reader, create_bitrot_writer};
use crate::bucket::lifecycle::lifecycle::TRANSITION_COMPLETE;
use crate::bucket::replication::check_replicate_delete;
use crate::bucket::versioning::VersioningApi;
use crate::bucket::versioning_sys::BucketVersioningSys;
use crate::client::{object_api_utils::extract_etag, transition_api::ReaderImpl};
@@ -29,11 +30,12 @@ use crate::disk::{
};
use crate::erasure_coding;
use crate::erasure_coding::bitrot_verify;
use crate::error::{Error, Result};
use crate::error::{Error, Result, is_err_version_not_found};
use crate::error::{ObjectApiError, is_err_object_not_found};
use crate::global::{GLOBAL_LocalNodeName, GLOBAL_TierConfigMgr};
use crate::store_api::ListObjectVersionsInfo;
use crate::store_api::{ListPartsInfo, ObjectToDelete};
use crate::store_api::{ListPartsInfo, ObjectOptions, ObjectToDelete};
use crate::store_api::{ObjectInfoOrErr, WalkOptions};
use crate::{
bucket::lifecycle::bucket_lifecycle_ops::{gen_transition_objname, get_transitioned_object_reader, put_restore_opts},
cache_value::metacache_set::{ListPathRawOptions, list_path_raw},
@@ -50,7 +52,7 @@ use crate::{
store_api::{
BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec,
ListMultipartsInfo, ListObjectsV2Info, MakeBucketOptions, MultipartInfo, MultipartUploadResult, ObjectIO, ObjectInfo,
ObjectOptions, PartInfo, PutObjReader, StorageAPI,
PartInfo, PutObjReader, StorageAPI,
},
store_init::load_format_erasure,
};
@@ -64,16 +66,16 @@ use md5::{Digest as Md5Digest, Md5};
use rand::{Rng, seq::SliceRandom};
use regex::Regex;
use rustfs_common::heal_channel::{DriveState, HealChannelPriority, HealItemType, HealOpts, HealScanMode, send_heal_disk};
use rustfs_filemeta::headers::RESERVED_METADATA_PREFIX_LOWER;
use rustfs_filemeta::{
FileInfo, FileMeta, FileMetaShallowVersion, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, ObjectPartInfo,
RawFileInfo, file_info_from_raw,
headers::{AMZ_OBJECT_TAGGING, AMZ_STORAGE_CLASS},
merge_file_meta_versions,
RawFileInfo, ReplicationStatusType, VersionPurgeStatusType, file_info_from_raw, merge_file_meta_versions,
};
use rustfs_lock::fast_lock::types::LockResult;
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
use rustfs_rio::{EtagResolvable, HashReader, TryGetIndex as _, WarpReader};
use rustfs_utils::http::headers::AMZ_OBJECT_TAGGING;
use rustfs_utils::http::headers::AMZ_STORAGE_CLASS;
use rustfs_utils::http::headers::RESERVED_METADATA_PREFIX_LOWER;
use rustfs_utils::{
HashAlgorithm,
crypto::{base64_decode, base64_encode, hex},
@@ -102,6 +104,7 @@ use tokio::{
sync::mpsc::{self, Sender},
time::interval,
};
use tokio_util::sync::CancellationToken;
use tracing::error;
use tracing::{debug, info, warn};
use uuid::Uuid;
@@ -3810,7 +3813,7 @@ impl ObjectIO for SetDisks {
}
}
fi.is_latest = true;
fi.replication_state_internal = Some(opts.put_replication_state());
// TODO: version support
Ok(ObjectInfo::from_file_info(&fi, bucket, object, opts.versioned || opts.version_suspended))
@@ -3976,12 +3979,12 @@ impl StorageAPI for SetDisks {
}
#[tracing::instrument(skip(self))]
async fn delete_object_version(&self, bucket: &str, object: &str, fi: &FileInfo, force_del_marker: bool) -> Result<()> {
// Guard lock for single object delete-version
let _lock_guard = self
.fast_lock_manager
.acquire_write_lock(bucket, object, self.locker_owner.as_str())
.await
.map_err(|e| Error::other(self.format_lock_error(bucket, object, "write", &e)))?;
// // Guard lock for single object delete-version
// let _lock_guard = self
// .fast_lock_manager
// .acquire_write_lock("", object, self.locker_owner.as_str())
// .await
// .map_err(|_| Error::other("can not get lock. please retry".to_string()))?;
let disks = self.get_disks(0, 0).await?;
let write_quorum = disks.len() / 2 + 1;
@@ -4028,7 +4031,7 @@ impl StorageAPI for SetDisks {
bucket: &str,
objects: Vec<ObjectToDelete>,
opts: ObjectOptions,
) -> Result<(Vec<DeletedObject>, Vec<Option<Error>>)> {
) -> (Vec<DeletedObject>, Vec<Option<Error>>) {
// 默认返回值
let mut del_objects = vec![DeletedObject::default(); objects.len()];
@@ -4080,6 +4083,7 @@ impl StorageAPI for SetDisks {
name: dobj.object_name.clone(),
version_id: dobj.version_id,
idx: i,
replication_state_internal: Some(dobj.replication_state()),
..Default::default()
};
@@ -4117,15 +4121,17 @@ impl StorageAPI for SetDisks {
if vr.deleted {
del_objects[i] = DeletedObject {
delete_marker: vr.deleted,
delete_marker_version_id: vr.version_id.map(|v| v.to_string()),
delete_marker_version_id: vr.version_id,
delete_marker_mtime: vr.mod_time,
object_name: vr.name.clone(),
replication_state: vr.replication_state_internal.clone(),
..Default::default()
}
} else {
del_objects[i] = DeletedObject {
object_name: vr.name.clone(),
version_id: vr.version_id.map(|v| v.to_string()),
version_id: vr.version_id,
replication_state: vr.replication_state_internal.clone(),
..Default::default()
}
}
@@ -4163,25 +4169,73 @@ impl StorageAPI for SetDisks {
if let Some(disk) = disk {
disk.delete_versions(bucket, vers, DeleteOptions::default()).await
} else {
Err(DiskError::DiskNotFound)
let mut errs = Vec::with_capacity(vers.len());
for _ in 0..vers.len() {
errs.push(Some(DiskError::DiskNotFound));
}
errs
}
});
}
let results = join_all(futures).await;
for errs in results.into_iter().flatten() {
// TODO: handle err reduceWriteQuorumErrs
for err in errs.iter().flatten() {
warn!("result err {:?}", err);
let mut del_obj_errs: Vec<Vec<Option<DiskError>>> = vec![vec![None; objects.len()]; disks.len()];
// 每个磁盘, 删除所有对象
for (disk_idx, errors) in results.into_iter().enumerate() {
// 所有对象的删除结果
for idx in 0..vers.len() {
if errors[idx].is_some() {
for fi in vers[idx].versions.iter() {
del_obj_errs[disk_idx][fi.idx] = errors[idx].clone();
}
}
}
}
Ok((del_objects, del_errs))
for obj_idx in 0..objects.len() {
let mut disk_err = vec![None; disks.len()];
for disk_idx in 0..disks.len() {
if del_obj_errs[disk_idx][obj_idx].is_some() {
disk_err[disk_idx] = del_obj_errs[disk_idx][obj_idx].clone();
}
}
let mut has_err = reduce_write_quorum_errs(&disk_err, OBJECT_OP_IGNORED_ERRS, disks.len() / 2 + 1);
if let Some(err) = has_err.clone() {
let er = err.into();
if (is_err_object_not_found(&er) || is_err_version_not_found(&er)) && !del_objects[obj_idx].delete_marker {
has_err = None;
}
} else {
del_objects[obj_idx].found = true;
}
if let Some(err) = has_err {
if del_objects[obj_idx].version_id.is_some() {
del_errs[obj_idx] = Some(to_object_err(
err.into(),
vec![
bucket,
&objects[obj_idx].object_name.clone(),
&objects[obj_idx].version_id.unwrap_or_default().to_string(),
],
));
} else {
del_errs[obj_idx] = Some(to_object_err(err.into(), vec![bucket, &objects[obj_idx].object_name.clone()]));
}
}
}
// TODO: add_partial
(del_objects, del_errs)
}
#[tracing::instrument(skip(self))]
async fn delete_object(&self, bucket: &str, object: &str, opts: ObjectOptions) -> Result<ObjectInfo> {
async fn delete_object(&self, bucket: &str, object: &str, mut opts: ObjectOptions) -> Result<ObjectInfo> {
// Guard lock for single object delete
let _lock_guard = if !opts.delete_prefix {
Some(
@@ -4201,17 +4255,55 @@ impl StorageAPI for SetDisks {
return Ok(ObjectInfo::default());
}
let (oi, write_quorum) = match self.get_object_info_and_quorum(bucket, object, &opts).await {
Ok((oi, wq)) => (oi, wq),
Err(e) => {
return Err(to_object_err(e, vec![bucket, object]));
}
let (mut goi, write_quorum, gerr) = match self.get_object_info_and_quorum(bucket, object, &opts).await {
Ok((oi, wq)) => (oi, wq, None),
Err(e) => (ObjectInfo::default(), 0, Some(e)),
};
let mark_delete = oi.version_id.is_some();
let otd = ObjectToDelete {
object_name: object.to_string(),
version_id: opts
.version_id
.clone()
.map(|v| Uuid::parse_str(v.as_str()).ok().unwrap_or_default()),
..Default::default()
};
let version_found = if opts.delete_marker { gerr.is_none() } else { true };
let dsc = check_replicate_delete(bucket, &otd, &goi, &opts, gerr.map(|e| e.to_string())).await;
if dsc.replicate_any() {
opts.set_delete_replication_state(dsc);
goi.replication_decision = opts
.delete_replication
.as_ref()
.map(|v| v.replicate_decision_str.clone())
.unwrap_or_default();
}
let mut mark_delete = goi.version_id.is_some();
let mut delete_marker = opts.versioned;
if opts.version_id.is_some() {
if version_found && opts.delete_marker_replication_status() == ReplicationStatusType::Replica {
mark_delete = false;
}
if opts.version_purge_status().is_empty() && opts.delete_marker_replication_status().is_empty() {
mark_delete = false;
}
if opts.version_purge_status() != VersionPurgeStatusType::Complete {
mark_delete = false;
}
if version_found && (goi.version_purge_status.is_empty() || !goi.delete_marker) {
delete_marker = false;
}
}
let mod_time = if let Some(mt) = opts.mod_time {
mt
} else {
@@ -4230,7 +4322,8 @@ impl StorageAPI for SetDisks {
deleted: delete_marker,
mark_deleted: mark_delete,
mod_time: Some(mod_time),
..Default::default() // TODO: replication
replication_state_internal: opts.delete_replication.clone(),
..Default::default() // TODO: Transition
};
fi.set_tier_free_version_id(&find_vid.to_string());
@@ -4257,88 +4350,27 @@ impl StorageAPI for SetDisks {
let version_id = opts.version_id.as_ref().and_then(|v| Uuid::parse_str(v).ok());
// Create a single object deletion request
let mut vr = FileInfo {
let mut dfi = FileInfo {
name: object.to_string(),
version_id: opts.version_id.as_ref().and_then(|v| Uuid::parse_str(v).ok()),
mark_deleted: mark_delete,
deleted: delete_marker,
mod_time: Some(mod_time),
replication_state_internal: opts.delete_replication.clone(),
..Default::default()
};
// Handle versioning
let (suspended, versioned) = (opts.version_suspended, opts.versioned);
if opts.version_id.is_none() && (suspended || versioned) {
vr.mod_time = Some(OffsetDateTime::now_utc());
vr.deleted = true;
if versioned {
vr.version_id = Some(Uuid::new_v4());
}
dfi.set_tier_free_version_id(&find_vid.to_string());
if opts.skip_free_version {
dfi.set_skip_tier_free_version();
}
let vers = vec![FileInfoVersions {
name: vr.name.clone(),
versions: vec![vr.clone()],
..Default::default()
}];
self.delete_object_version(bucket, object, &dfi, opts.delete_marker)
.await
.map_err(|e| to_object_err(e, vec![bucket, object]))?;
let disks = self.disks.read().await;
let disks = disks.clone();
let write_quorum = disks.len() / 2 + 1;
let mut futures = Vec::with_capacity(disks.len());
let mut errs = Vec::with_capacity(disks.len());
for disk in disks.iter() {
let vers = vers.clone();
futures.push(async move {
if let Some(disk) = disk {
disk.delete_versions(bucket, vers, DeleteOptions::default()).await
} else {
Err(DiskError::DiskNotFound)
}
});
}
let results = join_all(futures).await;
for result in results {
match result {
Ok(disk_errs) => {
// Handle errors from disk operations
for err in disk_errs.iter().flatten() {
warn!("delete_object disk error: {:?}", err);
}
errs.push(None);
}
Err(e) => {
errs.push(Some(e));
}
}
}
// Check write quorum
if let Some(err) = reduce_write_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, write_quorum) {
return Err(to_object_err(err.into(), vec![bucket, object]));
}
// Create result ObjectInfo
let result_info = if vr.deleted {
ObjectInfo {
bucket: bucket.to_string(),
name: object.to_string(),
delete_marker: true,
mod_time: vr.mod_time,
version_id: vr.version_id,
..Default::default()
}
} else {
ObjectInfo {
bucket: bucket.to_string(),
name: object.to_string(),
version_id: vr.version_id,
..Default::default()
}
};
Ok(result_info)
Ok(ObjectInfo::from_file_info(&dfi, bucket, object, opts.versioned || opts.version_suspended))
}
#[tracing::instrument(skip(self))]
@@ -4368,6 +4400,17 @@ impl StorageAPI for SetDisks {
unimplemented!()
}
async fn walk(
self: Arc<Self>,
_rx: CancellationToken,
_bucket: &str,
_prefix: &str,
_result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
_opts: WalkOptions,
) -> Result<()> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
// Acquire a shared read-lock to protect consistency during info fetch
@@ -4994,7 +5037,7 @@ impl StorageAPI for SetDisks {
// Extract storage class from metadata, default to STANDARD if not found
let storage_class = fi
.metadata
.get(rustfs_filemeta::headers::AMZ_STORAGE_CLASS)
.get(AMZ_STORAGE_CLASS)
.cloned()
.unwrap_or_else(|| storageclass::STANDARD.to_string());

View File

@@ -17,7 +17,7 @@ use std::{collections::HashMap, sync::Arc};
use crate::disk::error_reduce::count_errs;
use crate::error::{Error, Result};
use crate::store_api::ListPartsInfo;
use crate::store_api::{ListPartsInfo, ObjectInfoOrErr, WalkOptions};
use crate::{
disk::{
DiskAPI, DiskInfo, DiskOption, DiskStore,
@@ -48,6 +48,7 @@ use rustfs_filemeta::FileInfo;
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
use rustfs_utils::{crc_hash, path::path_join_buf, sip_hash};
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
use uuid::Uuid;
use tokio::sync::broadcast::{Receiver, Sender};
@@ -459,6 +460,17 @@ impl StorageAPI for Sets {
unimplemented!()
}
async fn walk(
self: Arc<Self>,
_rx: CancellationToken,
_bucket: &str,
_prefix: &str,
_result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
_opts: WalkOptions,
) -> Result<()> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object).get_object_info(bucket, object, opts).await
@@ -543,7 +555,7 @@ impl StorageAPI for Sets {
bucket: &str,
objects: Vec<ObjectToDelete>,
opts: ObjectOptions,
) -> Result<(Vec<DeletedObject>, Vec<Option<Error>>)> {
) -> (Vec<DeletedObject>, Vec<Option<Error>>) {
// Default return value
let mut del_objects = vec![DeletedObject::default(); objects.len()];
@@ -576,38 +588,11 @@ impl StorageAPI for Sets {
}
}
// let semaphore = Arc::new(Semaphore::new(num_cpus::get()));
// let mut jhs = Vec::with_capacity(semaphore.available_permits());
// for (k, v) in set_obj_map {
// let disks = self.get_disks(k);
// let semaphore = semaphore.clone();
// let opts = opts.clone();
// let bucket = bucket.to_string();
// let jh = tokio::spawn(async move {
// let _permit = semaphore.acquire().await.unwrap();
// let objs: Vec<ObjectToDelete> = v.iter().map(|v| v.obj.clone()).collect();
// disks.delete_objects(&bucket, objs, opts).await
// });
// jhs.push(jh);
// }
// let mut results = Vec::with_capacity(jhs.len());
// for jh in jhs {
// results.push(jh.await?.unwrap());
// }
// for (dobjects, errs) in results {
// del_objects.extend(dobjects);
// del_errs.extend(errs);
// }
// TODO: Implement concurrency
// TODO: concurrency
for (k, v) in set_obj_map {
let disks = self.get_disks(k);
let objs: Vec<ObjectToDelete> = v.iter().map(|v| v.obj.clone()).collect();
let (dobjects, errs) = disks.delete_objects(bucket, objs, opts.clone()).await?;
let (dobjects, errs) = disks.delete_objects(bucket, objs, opts.clone()).await;
for (i, err) in errs.into_iter().enumerate() {
let obj = v.get(i).unwrap();
@@ -618,7 +603,7 @@ impl StorageAPI for Sets {
}
}
Ok((del_objects, del_errs))
(del_objects, del_errs)
}
async fn list_object_parts(

View File

@@ -34,7 +34,9 @@ use crate::global::{
use crate::notification_sys::get_global_notification_sys;
use crate::pools::PoolMeta;
use crate::rebalance::RebalanceMeta;
use crate::store_api::{ListMultipartsInfo, ListObjectVersionsInfo, ListPartsInfo, MultipartInfo, ObjectIO};
use crate::store_api::{
ListMultipartsInfo, ListObjectVersionsInfo, ListPartsInfo, MultipartInfo, ObjectIO, ObjectInfoOrErr, WalkOptions,
};
use crate::store_init::{check_disk_fatal_errs, ec_drives_no_config};
use crate::{
bucket::{lifecycle::bucket_lifecycle_ops::TransitionState, metadata::BucketMetadata},
@@ -68,8 +70,9 @@ use std::time::SystemTime;
use std::{collections::HashMap, sync::Arc, time::Duration};
use time::OffsetDateTime;
use tokio::select;
use tokio::sync::{RwLock, broadcast};
use tokio::sync::RwLock;
use tokio::time::sleep;
use tokio_util::sync::CancellationToken;
use tracing::{debug, info};
use tracing::{error, warn};
use uuid::Uuid;
@@ -109,7 +112,7 @@ pub struct ECStore {
impl ECStore {
#[allow(clippy::new_ret_no_self)]
#[tracing::instrument(level = "debug", skip(endpoint_pools))]
pub async fn new(address: SocketAddr, endpoint_pools: EndpointServerPools) -> Result<Arc<Self>> {
pub async fn new(address: SocketAddr, endpoint_pools: EndpointServerPools, ctx: CancellationToken) -> Result<Arc<Self>> {
// let layouts = DisksLayout::from_volumes(endpoints.as_slice())?;
let mut deployment_id = None;
@@ -251,7 +254,7 @@ impl ECStore {
let wait_sec = 5;
let mut exit_count = 0;
loop {
if let Err(err) = ec.init().await {
if let Err(err) = ec.init(ctx.clone()).await {
error!("init err: {}", err);
error!("retry after {} second", wait_sec);
sleep(Duration::from_secs(wait_sec)).await;
@@ -273,7 +276,7 @@ impl ECStore {
Ok(ec)
}
pub async fn init(self: &Arc<Self>) -> Result<()> {
pub async fn init(self: &Arc<Self>, rx: CancellationToken) -> Result<()> {
GLOBAL_BOOT_TIME.get_or_init(|| async { SystemTime::now() }).await;
if self.load_rebalance_meta().await.is_ok() {
@@ -317,18 +320,16 @@ impl ECStore {
if !pool_indices.is_empty() {
let idx = pool_indices[0];
if endpoints.as_ref()[idx].endpoints.as_ref()[0].is_local {
let (_tx, rx) = broadcast::channel(1);
let store = self.clone();
tokio::spawn(async move {
// wait 3 minutes for cluster init
tokio::time::sleep(Duration::from_secs(60 * 3)).await;
if let Err(err) = store.decommission(rx.resubscribe(), pool_indices.clone()).await {
if let Err(err) = store.decommission(rx.clone(), pool_indices.clone()).await {
if err == StorageError::DecommissionAlreadyRunning {
for i in pool_indices.iter() {
store.do_decommission_in_routine(rx.resubscribe(), *i).await;
store.do_decommission_in_routine(rx.clone(), *i).await;
}
return;
}
@@ -700,9 +701,13 @@ impl ECStore {
opts: &ObjectOptions,
) -> Result<(PoolObjInfo, Vec<PoolErr>)> {
let mut futures = Vec::new();
for pool in self.pools.iter() {
futures.push(pool.get_object_info(bucket, object, opts));
let mut pool_opts = opts.clone();
if !pool_opts.metadata_chg {
pool_opts.version_id = None;
}
futures.push(async move { pool.get_object_info(bucket, object, &pool_opts).await });
}
let results = join_all(futures).await;
@@ -1351,6 +1356,17 @@ impl StorageAPI for ECStore {
.await
}
async fn walk(
self: Arc<Self>,
rx: CancellationToken,
bucket: &str,
prefix: &str,
result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
opts: WalkOptions,
) -> Result<()> {
self.walk_internal(rx, bucket, prefix, result, opts).await
}
#[tracing::instrument(skip(self))]
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
check_object_args(bucket, object)?;
@@ -1450,9 +1466,12 @@ impl StorageAPI for ECStore {
let object = encode_dir_object(object);
let object = object.as_str();
let mut gopts = opts.clone();
gopts.no_lock = true;
// 查询在哪个 pool
let (mut pinfo, errs) = self
.get_pool_info_existing_with_opts(bucket, object, &opts)
.get_pool_info_existing_with_opts(bucket, object, &gopts)
.await
.map_err(|e| {
if is_err_read_quorum(&e) {
@@ -1513,7 +1532,7 @@ impl StorageAPI for ECStore {
bucket: &str,
objects: Vec<ObjectToDelete>,
opts: ObjectOptions,
) -> Result<(Vec<DeletedObject>, Vec<Option<Error>>)> {
) -> (Vec<DeletedObject>, Vec<Option<Error>>) {
// encode object name
let objects: Vec<ObjectToDelete> = objects
.iter()
@@ -1534,131 +1553,160 @@ impl StorageAPI for ECStore {
// TODO: nslock
let mut futures = Vec::with_capacity(objects.len());
let mut futures = Vec::with_capacity(self.pools.len());
for obj in objects.iter() {
futures.push(async move {
self.internal_get_pool_info_existing_with_opts(
bucket,
&obj.object_name,
&ObjectOptions {
no_lock: true,
..Default::default()
},
)
.await
});
for pool in self.pools.iter() {
futures.push(pool.delete_objects(bucket, objects.clone(), opts.clone()));
}
let results = join_all(futures).await;
// let mut jhs = Vec::new();
// let semaphore = Arc::new(Semaphore::new(num_cpus::get()));
// let pools = Arc::new(self.pools.clone());
for idx in 0..del_objects.len() {
for (dels, errs) in results.iter() {
if errs[idx].is_none() && dels[idx].found {
del_errs[idx] = None;
del_objects[idx] = dels[idx].clone();
break;
}
if del_errs[idx].is_none() {
del_errs[idx] = errs[idx].clone();
del_objects[idx] = dels[idx].clone();
}
}
}
del_objects.iter_mut().for_each(|v| {
v.object_name = decode_dir_object(&v.object_name);
});
(del_objects, del_errs)
// let mut futures = Vec::with_capacity(objects.len());
// for obj in objects.iter() {
// let (semaphore, pools, bucket, object_name, opt) = (
// semaphore.clone(),
// pools.clone(),
// bucket.to_string(),
// obj.object_name.to_string(),
// ObjectOptions::default(),
// );
// let jh = tokio::spawn(async move {
// let _permit = semaphore.acquire().await.unwrap();
// self.internal_get_pool_info_existing_with_opts(pools.as_ref(), &bucket, &object_name, &opt)
// .await
// futures.push(async move {
// self.internal_get_pool_info_existing_with_opts(
// bucket,
// &obj.object_name,
// &ObjectOptions {
// no_lock: true,
// ..Default::default()
// },
// )
// .await
// });
// jhs.push(jh);
// }
// let mut results = Vec::new();
// for jh in jhs {
// results.push(jh.await.unwrap());
// }
// 记录 pool Index 对应的 objects pool_idx -> objects idx
let mut pool_obj_idx_map = HashMap::new();
let mut orig_index_map = HashMap::new();
// let results = join_all(futures).await;
for (i, res) in results.into_iter().enumerate() {
match res {
Ok((pinfo, _)) => {
if let Some(obj) = objects.get(i) {
if pinfo.object_info.delete_marker && obj.version_id.is_none() {
del_objects[i] = DeletedObject {
delete_marker: pinfo.object_info.delete_marker,
delete_marker_version_id: pinfo.object_info.version_id.map(|v| v.to_string()),
object_name: decode_dir_object(&pinfo.object_info.name),
delete_marker_mtime: pinfo.object_info.mod_time,
..Default::default()
};
continue;
}
// // let mut jhs = Vec::new();
// // let semaphore = Arc::new(Semaphore::new(num_cpus::get()));
// // let pools = Arc::new(self.pools.clone());
if !pool_obj_idx_map.contains_key(&pinfo.index) {
pool_obj_idx_map.insert(pinfo.index, vec![obj.clone()]);
} else if let Some(val) = pool_obj_idx_map.get_mut(&pinfo.index) {
val.push(obj.clone());
}
// // for obj in objects.iter() {
// // let (semaphore, pools, bucket, object_name, opt) = (
// // semaphore.clone(),
// // pools.clone(),
// // bucket.to_string(),
// // obj.object_name.to_string(),
// // ObjectOptions::default(),
// // );
if !orig_index_map.contains_key(&pinfo.index) {
orig_index_map.insert(pinfo.index, vec![i]);
} else if let Some(val) = orig_index_map.get_mut(&pinfo.index) {
val.push(i);
}
}
}
Err(e) => {
if !is_err_object_not_found(&e) && is_err_version_not_found(&e) {
del_errs[i] = Some(e)
}
// // let jh = tokio::spawn(async move {
// // let _permit = semaphore.acquire().await.unwrap();
// // self.internal_get_pool_info_existing_with_opts(pools.as_ref(), &bucket, &object_name, &opt)
// // .await
// // });
// // jhs.push(jh);
// // }
// // let mut results = Vec::new();
// // for jh in jhs {
// // results.push(jh.await.unwrap());
// // }
if let Some(obj) = objects.get(i) {
del_objects[i] = DeletedObject {
object_name: decode_dir_object(&obj.object_name),
version_id: obj.version_id.map(|v| v.to_string()),
..Default::default()
}
}
}
}
}
// // 记录 pool Index 对应的 objects pool_idx -> objects idx
// let mut pool_obj_idx_map = HashMap::new();
// let mut orig_index_map = HashMap::new();
if !pool_obj_idx_map.is_empty() {
for (i, sets) in self.pools.iter().enumerate() {
// 取 pool idx 对应的 objects index
if let Some(objs) = pool_obj_idx_map.get(&i) {
// 取对应 obj理论上不会 none
// let objs: Vec<ObjectToDelete> = obj_idxs.iter().filter_map(|&idx| objects.get(idx).cloned()).collect();
// for (i, res) in results.into_iter().enumerate() {
// match res {
// Ok((pinfo, _)) => {
// if let Some(obj) = objects.get(i) {
// if pinfo.object_info.delete_marker && obj.version_id.is_none() {
// del_objects[i] = DeletedObject {
// delete_marker: pinfo.object_info.delete_marker,
// delete_marker_version_id: pinfo.object_info.version_id.map(|v| v.to_string()),
// object_name: decode_dir_object(&pinfo.object_info.name),
// delete_marker_mtime: pinfo.object_info.mod_time,
// ..Default::default()
// };
// continue;
// }
if objs.is_empty() {
continue;
}
// if !pool_obj_idx_map.contains_key(&pinfo.index) {
// pool_obj_idx_map.insert(pinfo.index, vec![obj.clone()]);
// } else if let Some(val) = pool_obj_idx_map.get_mut(&pinfo.index) {
// val.push(obj.clone());
// }
let (pdel_objs, perrs) = sets.delete_objects(bucket, objs.clone(), opts.clone()).await?;
// if !orig_index_map.contains_key(&pinfo.index) {
// orig_index_map.insert(pinfo.index, vec![i]);
// } else if let Some(val) = orig_index_map.get_mut(&pinfo.index) {
// val.push(i);
// }
// }
// }
// Err(e) => {
// if !is_err_object_not_found(&e) && is_err_version_not_found(&e) {
// del_errs[i] = Some(e)
// }
// 同时存入不可能为 none
let org_indexes = orig_index_map.get(&i).unwrap();
// if let Some(obj) = objects.get(i) {
// del_objects[i] = DeletedObject {
// object_name: decode_dir_object(&obj.object_name),
// version_id: obj.version_id.map(|v| v.to_string()),
// ..Default::default()
// }
// }
// }
// }
// }
// perrs 的顺序理论上跟 obj_idxs 顺序一致
for (i, err) in perrs.into_iter().enumerate() {
let obj_idx = org_indexes[i];
// if !pool_obj_idx_map.is_empty() {
// for (i, sets) in self.pools.iter().enumerate() {
// // 取 pool idx 对应的 objects index
// if let Some(objs) = pool_obj_idx_map.get(&i) {
// // 取对应 obj理论上不会 none
// // let objs: Vec<ObjectToDelete> = obj_idxs.iter().filter_map(|&idx| objects.get(idx).cloned()).collect();
if err.is_some() {
del_errs[obj_idx] = err;
}
// if objs.is_empty() {
// continue;
// }
let mut dobj = pdel_objs.get(i).unwrap().clone();
dobj.object_name = decode_dir_object(&dobj.object_name);
// let (pdel_objs, perrs) = sets.delete_objects(bucket, objs.clone(), opts.clone()).await?;
del_objects[obj_idx] = dobj;
}
}
}
}
// // 同时存入不可能为 none
// let org_indexes = orig_index_map.get(&i).unwrap();
Ok((del_objects, del_errs))
// // perrs 的顺序理论上跟 obj_idxs 顺序一致
// for (i, err) in perrs.into_iter().enumerate() {
// let obj_idx = org_indexes[i];
// if err.is_some() {
// del_errs[obj_idx] = err;
// }
// let mut dobj = pdel_objs.get(i).unwrap().clone();
// dobj.object_name = decode_dir_object(&dobj.object_name);
// del_objects[obj_idx] = dobj;
// }
// }
// }
// }
// Ok((del_objects, del_errs))
}
#[tracing::instrument(skip(self))]

View File

@@ -13,8 +13,10 @@
// limitations under the License.
use crate::bucket::metadata_sys::get_versioning_config;
use crate::bucket::replication::REPLICATION_RESET;
use crate::bucket::replication::REPLICATION_STATUS;
use crate::bucket::replication::{ReplicateDecision, replication_statuses_map, version_purge_statuses_map};
use crate::bucket::versioning::VersioningApi as _;
use crate::cmd::bucket_replication::{ReplicationStatusType, VersionPurgeStatusType};
use crate::disk::DiskStore;
use crate::error::{Error, Result};
use crate::store_utils::clean_metadata;
@@ -25,20 +27,25 @@ use crate::{
};
use http::{HeaderMap, HeaderValue};
use rustfs_common::heal_channel::HealOpts;
use rustfs_filemeta::headers::RESERVED_METADATA_PREFIX_LOWER;
use rustfs_filemeta::{FileInfo, MetaCacheEntriesSorted, ObjectPartInfo, headers::AMZ_OBJECT_TAGGING};
use rustfs_filemeta::{
FileInfo, MetaCacheEntriesSorted, ObjectPartInfo, ReplicationState, ReplicationStatusType, VersionPurgeStatusType,
};
use rustfs_madmin::heal_commands::HealResultItem;
use rustfs_rio::{DecompressReader, HashReader, LimitReader, WarpReader};
use rustfs_utils::CompressionAlgorithm;
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX_LOWER};
use rustfs_utils::path::decode_dir_object;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt::Debug;
use std::io::Cursor;
use std::pin::Pin;
use std::str::FromStr as _;
use std::sync::Arc;
use std::task::{Context, Poll};
use time::OffsetDateTime;
use tokio::io::{AsyncRead, AsyncReadExt};
use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf};
use tokio_util::sync::CancellationToken;
use tracing::warn;
use uuid::Uuid;
@@ -221,6 +228,12 @@ impl GetObjectReader {
}
}
impl AsyncRead for GetObjectReader {
fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<std::io::Result<()>> {
Pin::new(&mut self.stream).poll_read(cx, buf)
}
}
#[derive(Debug, Clone)]
pub struct HTTPRangeSpec {
pub is_suffix_length: bool,
@@ -326,6 +339,7 @@ pub struct ObjectOptions {
pub skip_decommissioned: bool,
pub skip_rebalancing: bool,
pub skip_free_version: bool,
pub data_movement: bool,
pub src_pool_idx: usize,
@@ -334,11 +348,10 @@ pub struct ObjectOptions {
pub metadata_chg: bool,
pub http_preconditions: Option<HTTPPreconditions>,
pub delete_replication: Option<ReplicationState>,
pub replication_request: bool,
pub delete_marker: bool,
pub skip_free_version: bool,
pub transition: TransitionOptions,
pub expiration: ExpirationOptions,
pub lifecycle_audit_event: LcAuditEvent,
@@ -346,15 +359,66 @@ pub struct ObjectOptions {
pub eval_metadata: Option<HashMap<String, String>>,
}
// impl Default for ObjectOptions {
// fn default() -> Self {
// Self {
// max_parity: Default::default(),
// mod_time: OffsetDateTime::UNIX_EPOCH,
// part_number: Default::default(),
// }
// }
// }
impl ObjectOptions {
pub fn set_delete_replication_state(&mut self, dsc: ReplicateDecision) {
let mut rs = ReplicationState {
replicate_decision_str: dsc.to_string(),
..Default::default()
};
if self.version_id.is_none() {
rs.replication_status_internal = dsc.pending_status();
rs.targets = replication_statuses_map(rs.replication_status_internal.as_deref().unwrap_or_default());
} else {
rs.version_purge_status_internal = dsc.pending_status();
rs.purge_targets = version_purge_statuses_map(rs.version_purge_status_internal.as_deref().unwrap_or_default());
}
self.delete_replication = Some(rs)
}
pub fn set_replica_status(&mut self, status: ReplicationStatusType) {
if let Some(rs) = self.delete_replication.as_mut() {
rs.replica_status = status;
rs.replica_timestamp = Some(OffsetDateTime::now_utc());
} else {
self.delete_replication = Some(ReplicationState {
replica_status: status,
replica_timestamp: Some(OffsetDateTime::now_utc()),
..Default::default()
});
}
}
pub fn version_purge_status(&self) -> VersionPurgeStatusType {
self.delete_replication
.as_ref()
.map(|v| v.composite_version_purge_status())
.unwrap_or(VersionPurgeStatusType::Empty)
}
pub fn delete_marker_replication_status(&self) -> ReplicationStatusType {
self.delete_replication
.as_ref()
.map(|v| v.composite_replication_status())
.unwrap_or(ReplicationStatusType::Empty)
}
pub fn put_replication_state(&self) -> ReplicationState {
let rs = match self
.user_defined
.get(format!("{RESERVED_METADATA_PREFIX_LOWER}{REPLICATION_STATUS}").as_str())
{
Some(v) => v.to_string(),
None => return ReplicationState::default(),
};
ReplicationState {
replication_status_internal: Some(rs.to_string()),
targets: replication_statuses_map(rs.as_str()),
..Default::default()
}
}
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct BucketOptions {
@@ -423,6 +487,7 @@ pub struct ObjectInfo {
pub is_latest: bool,
pub content_type: Option<String>,
pub content_encoding: Option<String>,
pub expires: Option<OffsetDateTime>,
pub num_versions: usize,
pub successor_mod_time: Option<OffsetDateTime>,
pub put_object_reader: Option<PutObjReader>,
@@ -430,10 +495,11 @@ pub struct ObjectInfo {
pub inlined: bool,
pub metadata_only: bool,
pub version_only: bool,
pub replication_status_internal: String,
pub replication_status_internal: Option<String>,
pub replication_status: ReplicationStatusType,
pub version_purge_status_internal: String,
pub version_purge_status_internal: Option<String>,
pub version_purge_status: VersionPurgeStatusType,
pub replication_decision: String,
pub checksum: Vec<u8>,
}
@@ -470,7 +536,9 @@ impl Clone for ObjectInfo {
replication_status: self.replication_status.clone(),
version_purge_status_internal: self.version_purge_status_internal.clone(),
version_purge_status: self.version_purge_status.clone(),
replication_decision: self.replication_decision.clone(),
checksum: Default::default(),
expires: self.expires,
}
}
}
@@ -665,7 +733,10 @@ impl ObjectInfo {
};
for fi in versions.iter() {
// TODO:VersionPurgeStatus
if !fi.version_purge_status().is_empty() {
continue;
}
let versioned = vcfg.clone().map(|v| v.0.versioned(&entry.name)).unwrap_or_default();
objects.push(ObjectInfo::from_file_info(fi, bucket, &entry.name, versioned));
}
@@ -770,6 +841,32 @@ impl ObjectInfo {
objects
}
pub fn replication_state(&self) -> ReplicationState {
ReplicationState {
replication_status_internal: self.replication_status_internal.clone(),
version_purge_status_internal: self.version_purge_status_internal.clone(),
replicate_decision_str: self.replication_decision.clone(),
targets: replication_statuses_map(self.replication_status_internal.clone().unwrap_or_default().as_str()),
purge_targets: version_purge_statuses_map(self.version_purge_status_internal.clone().unwrap_or_default().as_str()),
reset_statuses_map: self
.user_defined
.iter()
.filter_map(|(k, v)| {
if k.starts_with(&format!("{RESERVED_METADATA_PREFIX_LOWER}{REPLICATION_RESET}")) {
Some((
k.trim_start_matches(&format!("{RESERVED_METADATA_PREFIX_LOWER}{REPLICATION_RESET}-"))
.to_string(),
v.clone(),
))
} else {
None
}
})
.collect(),
..Default::default()
}
}
}
#[derive(Debug, Default)]
@@ -927,17 +1024,52 @@ pub struct ListPartsInfo {
pub struct ObjectToDelete {
pub object_name: String,
pub version_id: Option<Uuid>,
pub delete_marker_replication_status: Option<String>,
pub version_purge_status: Option<VersionPurgeStatusType>,
pub version_purge_statuses: Option<String>,
pub replicate_decision_str: Option<String>,
}
impl ObjectToDelete {
pub fn replication_state(&self) -> ReplicationState {
ReplicationState {
replication_status_internal: self.delete_marker_replication_status.clone(),
version_purge_status_internal: self.version_purge_statuses.clone(),
replicate_decision_str: self.replicate_decision_str.clone().unwrap_or_default(),
targets: replication_statuses_map(self.delete_marker_replication_status.as_deref().unwrap_or_default()),
purge_targets: version_purge_statuses_map(self.version_purge_statuses.as_deref().unwrap_or_default()),
..Default::default()
}
}
}
#[derive(Debug, Default, Clone)]
pub struct DeletedObject {
pub delete_marker: bool,
pub delete_marker_version_id: Option<String>,
pub delete_marker_version_id: Option<Uuid>,
pub object_name: String,
pub version_id: Option<String>,
pub version_id: Option<Uuid>,
// MTime of DeleteMarker on source that needs to be propagated to replica
pub delete_marker_mtime: Option<OffsetDateTime>,
// to support delete marker replication
// pub replication_state: ReplicationState,
pub replication_state: Option<ReplicationState>,
pub found: bool,
}
impl DeletedObject {
pub fn version_purge_status(&self) -> VersionPurgeStatusType {
self.replication_state
.as_ref()
.map(|v| v.composite_version_purge_status())
.unwrap_or(VersionPurgeStatusType::Empty)
}
pub fn delete_marker_replication_status(&self) -> ReplicationStatusType {
self.replication_state
.as_ref()
.map(|v| v.composite_replication_status())
.unwrap_or(ReplicationStatusType::Empty)
}
}
#[derive(Debug, Default, Clone)]
@@ -949,8 +1081,33 @@ pub struct ListObjectVersionsInfo {
pub prefixes: Vec<String>,
}
type WalkFilter = fn(&FileInfo) -> bool;
#[derive(Clone, Default)]
pub struct WalkOptions {
pub filter: Option<WalkFilter>, // return WalkFilter returns 'true/false'
pub marker: Option<String>, // set to skip until this object
pub latest_only: bool, // returns only latest versions for all matching objects
pub ask_disks: String, // dictates how many disks are being listed
pub versions_sort: WalkVersionsSortOrder, // sort order for versions of the same object; default: Ascending order in ModTime
pub limit: usize, // maximum number of items, 0 means no limit
}
#[derive(Clone, Default, PartialEq, Eq)]
pub enum WalkVersionsSortOrder {
#[default]
Ascending,
Descending,
}
#[derive(Debug)]
pub struct ObjectInfoOrErr {
pub item: Option<ObjectInfo>,
pub err: Option<Error>,
}
#[async_trait::async_trait]
pub trait ObjectIO: Send + Sync + 'static {
pub trait ObjectIO: Send + Sync + Debug + 'static {
// GetObjectNInfo FIXME:
async fn get_object_reader(
&self,
@@ -966,7 +1123,7 @@ pub trait ObjectIO: Send + Sync + 'static {
#[async_trait::async_trait]
#[allow(clippy::too_many_arguments)]
pub trait StorageAPI: ObjectIO {
pub trait StorageAPI: ObjectIO + Debug {
// NewNSLock TODO:
// Shutdown TODO:
// NSScanner TODO:
@@ -1000,7 +1157,15 @@ pub trait StorageAPI: ObjectIO {
delimiter: Option<String>,
max_keys: i32,
) -> Result<ListObjectVersionsInfo>;
// Walk TODO:
async fn walk(
self: Arc<Self>,
rx: CancellationToken,
bucket: &str,
prefix: &str,
result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
opts: WalkOptions,
) -> Result<()>;
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
async fn verify_object_integrity(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
@@ -1021,7 +1186,7 @@ pub trait StorageAPI: ObjectIO {
bucket: &str,
objects: Vec<ObjectToDelete>,
opts: ObjectOptions,
) -> Result<(Vec<DeletedObject>, Vec<Option<Error>>)>;
) -> (Vec<DeletedObject>, Vec<Option<Error>>);
// TransitionObject TODO:
// RestoreTransitionedObject TODO:

View File

@@ -23,20 +23,23 @@ use crate::error::{
};
use crate::set_disk::SetDisks;
use crate::store::check_list_objs_args;
use crate::store_api::{ListObjectVersionsInfo, ListObjectsInfo, ObjectInfo, ObjectOptions};
use crate::store_api::{
ListObjectVersionsInfo, ListObjectsInfo, ObjectInfo, ObjectInfoOrErr, ObjectOptions, WalkOptions, WalkVersionsSortOrder,
};
use crate::store_utils::is_reserved_or_invalid_bucket;
use crate::{store::ECStore, store_api::ListObjectsV2Info};
use futures::future::join_all;
use rand::seq::SliceRandom;
use rustfs_filemeta::{
FileInfo, MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams,
MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams,
merge_file_meta_versions,
};
use rustfs_utils::path::{self, SLASH_SEPARATOR, base_dir_from_prefix};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
use tokio::sync::broadcast::{self};
use tokio::sync::mpsc::{self, Receiver, Sender};
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
use uuid::Uuid;
@@ -529,14 +532,15 @@ impl ECStore {
}
// cancel channel
let (cancel_tx, cancel_rx) = broadcast::channel(1);
let cancel = CancellationToken::new();
let (err_tx, mut err_rx) = broadcast::channel::<Arc<Error>>(1);
let (sender, recv) = mpsc::channel(o.limit as usize);
let store = self.clone();
let opts = o.clone();
let cancel_rx1 = cancel_rx.resubscribe();
let cancel_rx1 = cancel.clone();
let err_tx1 = err_tx.clone();
let job1 = tokio::spawn(async move {
let mut opts = opts;
@@ -547,7 +551,7 @@ impl ECStore {
}
});
let cancel_rx2 = cancel_rx.resubscribe();
let cancel_rx2 = cancel.clone();
let (result_tx, mut result_rx) = mpsc::channel(1);
let err_tx2 = err_tx.clone();
@@ -559,7 +563,7 @@ impl ECStore {
}
// cancel call exit spawns
let _ = cancel_tx.send(true);
cancel.cancel();
});
let mut result = {
@@ -615,7 +619,7 @@ impl ECStore {
// Read all
async fn list_merged(
&self,
rx: B_Receiver<bool>,
rx: CancellationToken,
opts: ListPathOptions,
sender: Sender<MetaCacheEntry>,
) -> Result<Vec<ObjectInfo>> {
@@ -631,9 +635,8 @@ impl ECStore {
inputs.push(recv);
let opts = opts.clone();
let rx = rx.resubscribe();
futures.push(set.list_path(rx, opts, send));
let rx_clone = rx.clone();
futures.push(set.list_path(rx_clone, opts, send));
}
}
@@ -695,9 +698,9 @@ impl ECStore {
}
#[allow(unused_assignments)]
pub async fn walk(
pub async fn walk_internal(
self: Arc<Self>,
rx: B_Receiver<bool>,
rx: CancellationToken,
bucket: &str,
prefix: &str,
result: Sender<ObjectInfoOrErr>,
@@ -711,11 +714,11 @@ impl ECStore {
for eset in self.pools.iter() {
for set in eset.disk_set.iter() {
let (mut disks, infos, _) = set.get_online_disks_with_healing_and_info(true).await;
let rx = rx.resubscribe();
let opts = opts.clone();
let (sender, list_out_rx) = mpsc::channel::<MetaCacheEntry>(1);
inputs.push(list_out_rx);
let rx_clone = rx.clone();
futures.push(async move {
let mut ask_disks = get_list_quorum(&opts.ask_disks, set.set_drive_count as i32);
if ask_disks == -1 {
@@ -770,7 +773,7 @@ impl ECStore {
let tx2 = sender.clone();
list_path_raw(
rx.resubscribe(),
rx_clone,
ListPathRawOptions {
disks: disks.iter().cloned().map(Some).collect(),
fallback_disks: fallback_disks.iter().cloned().map(Some).collect(),
@@ -936,33 +939,8 @@ impl ECStore {
}
}
type WalkFilter = fn(&FileInfo) -> bool;
#[derive(Clone, Default)]
pub struct WalkOptions {
pub filter: Option<WalkFilter>, // return WalkFilter returns 'true/false'
pub marker: Option<String>, // set to skip until this object
pub latest_only: bool, // returns only latest versions for all matching objects
pub ask_disks: String, // dictates how many disks are being listed
pub versions_sort: WalkVersionsSortOrder, // sort order for versions of the same object; default: Ascending order in ModTime
pub limit: usize, // maximum number of items, 0 means no limit
}
#[derive(Clone, Default, PartialEq, Eq)]
pub enum WalkVersionsSortOrder {
#[default]
Ascending,
Descending,
}
#[derive(Debug)]
pub struct ObjectInfoOrErr {
pub item: Option<ObjectInfo>,
pub err: Option<Error>,
}
async fn gather_results(
_rx: B_Receiver<bool>,
_rx: CancellationToken,
opts: ListPathOptions,
recv: Receiver<MetaCacheEntry>,
results_tx: Sender<MetaCacheEntriesSortedResult>,
@@ -1067,12 +1045,11 @@ async fn select_from(
// TODO: exit when cancel
async fn merge_entry_channels(
rx: B_Receiver<bool>,
rx: CancellationToken,
in_channels: Vec<Receiver<MetaCacheEntry>>,
out_channel: Sender<MetaCacheEntry>,
read_quorum: usize,
) -> Result<()> {
let mut rx = rx;
let mut in_channels = in_channels;
if in_channels.len() == 1 {
loop {
@@ -1085,7 +1062,7 @@ async fn merge_entry_channels(
return Ok(())
}
},
_ = rx.recv()=>{
_ = rx.cancelled()=>{
info!("merge_entry_channels rx.recv() cancel");
return Ok(())
},
@@ -1228,7 +1205,7 @@ async fn merge_entry_channels(
}
impl SetDisks {
pub async fn list_path(&self, rx: B_Receiver<bool>, opts: ListPathOptions, sender: Sender<MetaCacheEntry>) -> Result<()> {
pub async fn list_path(&self, rx: CancellationToken, opts: ListPathOptions, sender: Sender<MetaCacheEntry>) -> Result<()> {
let (mut disks, infos, _) = self.get_online_disks_with_healing_and_info(true).await;
let mut ask_disks = get_list_quorum(&opts.ask_disks, self.set_drive_count as i32);

View File

@@ -15,8 +15,8 @@
use crate::config::storageclass::STANDARD;
use crate::disk::RUSTFS_META_BUCKET;
use regex::Regex;
use rustfs_filemeta::headers::AMZ_OBJECT_TAGGING;
use rustfs_filemeta::headers::AMZ_STORAGE_CLASS;
use rustfs_utils::http::headers::AMZ_OBJECT_TAGGING;
use rustfs_utils::http::headers::AMZ_STORAGE_CLASS;
use std::collections::HashMap;
use std::io::{Error, Result};

View File

@@ -35,7 +35,7 @@ uuid = { workspace = true, features = ["v4", "fast-rng", "serde"] }
tokio = { workspace = true, features = ["io-util", "macros", "sync"] }
xxhash-rust = { workspace = true, features = ["xxh64"] }
bytes.workspace = true
rustfs-utils = { workspace = true, features = ["hash"] }
rustfs-utils = { workspace = true, features = ["hash","http"] }
byteorder = { workspace = true }
tracing.workspace = true
thiserror.workspace = true

View File

@@ -13,11 +13,11 @@
// limitations under the License.
use crate::error::{Error, Result};
use crate::headers::RESERVED_METADATA_PREFIX_LOWER;
use crate::headers::RUSTFS_HEALING;
use crate::{ReplicationState, ReplicationStatusType, VersionPurgeStatusType};
use bytes::Bytes;
use rmp_serde::Serializer;
use rustfs_utils::HashAlgorithm;
use rustfs_utils::http::headers::{RESERVED_METADATA_PREFIX_LOWER, RUSTFS_HEALING};
use serde::Deserialize;
use serde::Serialize;
use std::collections::HashMap;
@@ -202,7 +202,7 @@ pub struct FileInfo {
// MarkDeleted marks this version as deleted
pub mark_deleted: bool,
// ReplicationState - Internal replication state to be passed back in ObjectInfo
// pub replication_state: Option<ReplicationState>, // TODO: implement ReplicationState
pub replication_state_internal: Option<ReplicationState>,
pub data: Option<Bytes>,
pub num_versions: usize,
pub successor_mod_time: Option<OffsetDateTime>,
@@ -471,6 +471,29 @@ impl FileInfo {
// TODO: Add replication_state comparison when implemented
// && self.replication_state == other.replication_state
}
pub fn version_purge_status(&self) -> VersionPurgeStatusType {
self.replication_state_internal
.as_ref()
.map(|v| v.composite_version_purge_status())
.unwrap_or(VersionPurgeStatusType::Empty)
}
pub fn replication_status(&self) -> ReplicationStatusType {
self.replication_state_internal
.as_ref()
.map(|v| v.composite_replication_status())
.unwrap_or(ReplicationStatusType::Empty)
}
pub fn delete_marker_replication_status(&self) -> ReplicationStatusType {
if self.deleted {
self.replication_state_internal
.as_ref()
.map(|v| v.composite_replication_status())
.unwrap_or(ReplicationStatusType::Empty)
} else {
ReplicationStatusType::Empty
}
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]

View File

@@ -15,12 +15,13 @@
use crate::error::{Error, Result};
use crate::fileinfo::{ErasureAlgo, ErasureInfo, FileInfo, FileInfoVersions, ObjectPartInfo, RawFileInfo};
use crate::filemeta_inline::InlineData;
use crate::headers::{
use crate::{ReplicationStatusType, VersionPurgeStatusType};
use byteorder::ByteOrder;
use bytes::Bytes;
use rustfs_utils::http::headers::{
self, AMZ_META_UNENCRYPTED_CONTENT_LENGTH, AMZ_META_UNENCRYPTED_CONTENT_MD5, AMZ_STORAGE_CLASS, RESERVED_METADATA_PREFIX,
RESERVED_METADATA_PREFIX_LOWER, VERSION_PURGE_STATUS_KEY,
};
use byteorder::ByteOrder;
use bytes::Bytes;
use s3s::header::X_AMZ_RESTORE;
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
@@ -30,6 +31,7 @@ use std::io::{Read, Write};
use std::{collections::HashMap, io::Cursor};
use time::OffsetDateTime;
use tokio::io::AsyncRead;
use tracing::error;
use uuid::Uuid;
use xxhash_rust::xxh64;
@@ -159,39 +161,57 @@ impl FileMeta {
let i = buf.len() as u64;
// check version, buf = buf[8..]
let (buf, _, _) = Self::check_xl2_v1(buf)?;
let (buf, _, _) = Self::check_xl2_v1(buf).map_err(|e| {
error!("failed to check XL2 v1 format: {}", e);
e
})?;
let (mut size_buf, buf) = buf.split_at(5);
// Get meta data, buf = crc + data
let bin_len = rmp::decode::read_bin_len(&mut size_buf)?;
let bin_len = rmp::decode::read_bin_len(&mut size_buf).map_err(|e| {
error!("failed to read binary length for metadata: {}", e);
Error::other(format!("failed to read binary length for metadata: {e}"))
})?;
if buf.len() < bin_len as usize {
error!("insufficient data for metadata: expected {} bytes, got {} bytes", bin_len, buf.len());
return Err(Error::other("insufficient data for metadata"));
}
let (meta, buf) = buf.split_at(bin_len as usize);
if buf.len() < 5 {
error!("insufficient data for CRC: expected 5 bytes, got {} bytes", buf.len());
return Err(Error::other("insufficient data for CRC"));
}
let (mut crc_buf, buf) = buf.split_at(5);
// crc check
let crc = rmp::decode::read_u32(&mut crc_buf)?;
let crc = rmp::decode::read_u32(&mut crc_buf).map_err(|e| {
error!("failed to read CRC value: {}", e);
Error::other(format!("failed to read CRC value: {e}"))
})?;
let meta_crc = xxh64::xxh64(meta, XXHASH_SEED) as u32;
if crc != meta_crc {
error!("xl file crc check failed: expected CRC {:#x}, got {:#x}", meta_crc, crc);
return Err(Error::other("xl file crc check failed"));
}
if !buf.is_empty() {
self.data.update(buf);
self.data.validate()?;
self.data.validate().map_err(|e| {
error!("data validation failed: {}", e);
e
})?;
}
// Parse meta
if !meta.is_empty() {
let (versions_len, _, meta_ver, meta) = Self::decode_xl_headers(meta)?;
let (versions_len, _, meta_ver, meta) = Self::decode_xl_headers(meta).map_err(|e| {
error!("failed to decode XL headers: {}", e);
e
})?;
// let (_, meta) = meta.split_at(read_size as usize);
@@ -201,24 +221,30 @@ impl FileMeta {
let mut cur: Cursor<&[u8]> = Cursor::new(meta);
for _ in 0..versions_len {
let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize;
let start = cur.position() as usize;
let end = start + bin_len;
let header_buf = &meta[start..end];
let bin_len = rmp::decode::read_bin_len(&mut cur).map_err(|e| {
error!("failed to read binary length for version header: {}", e);
Error::other(format!("failed to read binary length for version header: {e}"))
})? as usize;
let mut header_buf = vec![0u8; bin_len];
cur.read_exact(&mut header_buf)?;
let mut ver = FileMetaShallowVersion::default();
ver.header.unmarshal_msg(header_buf)?;
ver.header.unmarshal_msg(&header_buf).map_err(|e| {
error!("failed to unmarshal version header: {}", e);
e
})?;
cur.set_position(end as u64);
let bin_len = rmp::decode::read_bin_len(&mut cur).map_err(|e| {
error!("failed to read binary length for version metadata: {}", e);
Error::other(format!("failed to read binary length for version metadata: {e}"))
})? as usize;
let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize;
let start = cur.position() as usize;
let end = start + bin_len;
let ver_meta_buf = &meta[start..end];
let mut ver_meta_buf = vec![0u8; bin_len];
cur.read_exact(&mut ver_meta_buf)?;
ver.meta.extend_from_slice(ver_meta_buf);
cur.set_position(end as u64);
ver.meta.extend_from_slice(&ver_meta_buf);
self.versions.push(ver);
}
@@ -487,39 +513,39 @@ impl FileMeta {
let version = FileMetaVersion::from(fi);
self.add_version_filemata(version)
}
pub fn add_version_filemata(&mut self, version: FileMetaVersion) -> Result<()> {
if !version.valid() {
return Err(Error::other("file meta version invalid"));
}
// should replace
for (idx, ver) in self.versions.iter().enumerate() {
if ver.header.version_id != vid {
continue;
}
return self.set_idx(idx, version);
// 1000 is the limit of versions TODO: make it configurable
if self.versions.len() + 1 > 1000 {
return Err(Error::other(
"You've exceeded the limit on the number of versions you can create on this object",
));
}
// TODO: version count limit !
if self.versions.is_empty() {
self.versions.push(FileMetaShallowVersion::try_from(version)?);
return Ok(());
}
let vid = version.get_version_id();
if let Some(fidx) = self.versions.iter().position(|v| v.header.version_id == vid) {
return self.set_idx(fidx, version);
}
let mod_time = version.get_mod_time();
// puth a -1 mod time value , so we can relplace this
self.versions.push(FileMetaShallowVersion {
header: FileMetaVersionHeader {
mod_time: Some(OffsetDateTime::from_unix_timestamp(-1)?),
..Default::default()
},
..Default::default()
});
for (idx, exist) in self.versions.iter().enumerate() {
if let Some(ref ex_mt) = exist.header.mod_time {
if let Some(ref in_md) = mod_time {
if ex_mt <= in_md {
// insert
self.versions.insert(idx, FileMetaShallowVersion::try_from(version)?);
self.versions.pop();
return Ok(());
}
}
@@ -527,35 +553,33 @@ impl FileMeta {
}
Err(Error::other("add_version failed"))
}
pub fn add_version_filemata(&mut self, ver: FileMetaVersion) -> Result<()> {
if !ver.valid() {
return Err(Error::other("attempted to add invalid version"));
}
// if !ver.valid() {
// return Err(Error::other("attempted to add invalid version"));
// }
if self.versions.len() + 1 >= 100 {
return Err(Error::other(
"You've exceeded the limit on the number of versions you can create on this object",
));
}
// if self.versions.len() + 1 >= 100 {
// return Err(Error::other(
// "You've exceeded the limit on the number of versions you can create on this object",
// ));
// }
let mod_time = ver.get_mod_time();
let encoded = ver.marshal_msg()?;
let new_version = FileMetaShallowVersion {
header: ver.header(),
meta: encoded,
};
// let mod_time = ver.get_mod_time();
// let encoded = ver.marshal_msg()?;
// let new_version = FileMetaShallowVersion {
// header: ver.header(),
// meta: encoded,
// };
// Find the insertion position: insert before the first element with mod_time >= new mod_time
// This maintains descending order by mod_time (newest first)
let insert_pos = self
.versions
.iter()
.position(|existing| existing.header.mod_time <= mod_time)
.unwrap_or(self.versions.len());
self.versions.insert(insert_pos, new_version);
Ok(())
// // Find the insertion position: insert before the first element with mod_time >= new mod_time
// // This maintains descending order by mod_time (newest first)
// let insert_pos = self
// .versions
// .iter()
// .position(|existing| existing.header.mod_time <= mod_time)
// .unwrap_or(self.versions.len());
// self.versions.insert(insert_pos, new_version);
// Ok(())
}
// delete_version deletes version, returns data_dir
@@ -575,10 +599,97 @@ impl FileMeta {
}
let mut update_version = fi.mark_deleted;
/*if fi.version_purge_status().is_empty()
if fi.version_purge_status().is_empty()
&& (fi.delete_marker_replication_status() == ReplicationStatusType::Replica
|| fi.delete_marker_replication_status() == ReplicationStatusType::Empty)
{
update_version = fi.mark_deleted;
}*/
} else {
if fi.deleted
&& fi.version_purge_status() != VersionPurgeStatusType::Complete
&& (!fi.version_purge_status().is_empty() || fi.delete_marker_replication_status().is_empty())
{
update_version = true;
}
if !fi.version_purge_status().is_empty() && fi.version_purge_status() != VersionPurgeStatusType::Complete {
update_version = true;
}
}
if fi.deleted {
if !fi.delete_marker_replication_status().is_empty() {
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
if fi.delete_marker_replication_status() == ReplicationStatusType::Replica {
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-status"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replica_status.clone())
.unwrap_or_default()
.as_str()
.as_bytes()
.to_vec(),
);
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-timestamp"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replica_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
} else {
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replication_status_internal.clone().unwrap_or_default())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replication_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
}
}
}
if !fi.version_purge_status().is_empty() {
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
delete_marker.meta_sys.insert(
VERSION_PURGE_STATUS_KEY.to_string(),
fi.replication_state_internal
.as_ref()
.map(|v| v.version_purge_status_internal.clone().unwrap_or_default())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
}
}
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
for (k, v) in fi
.replication_state_internal
.as_ref()
.map(|v| v.reset_statuses_map.clone())
.unwrap_or_default()
{
delete_marker.meta_sys.insert(k.clone(), v.clone().as_bytes().to_vec());
}
}
}
// ???
if fi.transition_status == TRANSITION_COMPLETE {
update_version = false;
}
@@ -591,22 +702,111 @@ impl FileMeta {
match ver.header.version_type {
VersionType::Invalid | VersionType::Legacy => return Err(Error::other("invalid file meta version")),
VersionType::Delete => {
self.versions.remove(i);
if fi.deleted && fi.version_id.is_none() {
self.add_version_filemata(ventry)?;
if update_version {
let mut v = self.get_idx(i)?;
if v.delete_marker.is_none() {
v.delete_marker = Some(MetaDeleteMarker {
version_id: fi.version_id,
mod_time: fi.mod_time,
meta_sys: HashMap::new(),
});
}
if let Some(delete_marker) = v.delete_marker.as_mut() {
if !fi.delete_marker_replication_status().is_empty() {
if fi.delete_marker_replication_status() == ReplicationStatusType::Replica {
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-status"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replica_status.clone())
.unwrap_or_default()
.as_str()
.as_bytes()
.to_vec(),
);
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-timestamp"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replica_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
} else {
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replication_status_internal.clone().unwrap_or_default())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replication_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
}
}
for (k, v) in fi
.replication_state_internal
.as_ref()
.map(|v| v.reset_statuses_map.clone())
.unwrap_or_default()
{
delete_marker.meta_sys.insert(k.clone(), v.clone().as_bytes().to_vec());
}
}
self.set_idx(i, v)?;
return Ok(None);
}
self.versions.remove(i);
if (fi.mark_deleted && fi.version_purge_status() != VersionPurgeStatusType::Complete)
|| (fi.deleted && fi.version_id.is_none())
{
self.add_version_filemata(ventry)?;
}
return Ok(None);
}
VersionType::Object => {
if update_version && !fi.deleted {
let v = self.get_idx(i)?;
let mut v = self.get_idx(i)?;
self.versions.remove(i);
if let Some(obj) = v.object.as_mut() {
obj.meta_sys.insert(
VERSION_PURGE_STATUS_KEY.to_string(),
fi.replication_state_internal
.as_ref()
.map(|v| v.version_purge_status_internal.clone().unwrap_or_default())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
for (k, v) in fi
.replication_state_internal
.as_ref()
.map(|v| v.reset_statuses_map.clone())
.unwrap_or_default()
{
obj.meta_sys.insert(k.clone(), v.clone().as_bytes().to_vec());
}
}
let a = v.object.map(|v| v.data_dir).unwrap_or_default();
return Ok(a);
let old_dir = v.object.as_ref().map(|v| v.data_dir).unwrap_or_default();
self.set_idx(i, v)?;
return Ok(old_dir);
}
}
}
@@ -641,31 +841,37 @@ impl FileMeta {
let obj_version_id = obj.version_id;
let obj_data_dir = obj.data_dir;
if fi.expire_restored {
let mut err = if fi.expire_restored {
obj.remove_restore_hdrs();
self.set_idx(i, ver)?;
self.set_idx(i, ver).err()
} else if fi.transition_status == TRANSITION_COMPLETE {
obj.set_transition(fi);
obj.reset_inline_data();
self.set_idx(i, ver)?;
self.set_idx(i, ver).err()
} else {
self.versions.remove(i);
let (free_version, to_free) = obj.init_free_version(fi);
if to_free {
self.add_version_filemata(free_version)?;
self.add_version_filemata(free_version).err()
} else {
None
}
}
};
if fi.deleted {
self.add_version_filemata(ventry)?;
err = self.add_version_filemata(ventry).err();
}
if self.shared_data_dir_count(obj_version_id, obj_data_dir) > 0 {
return Ok(None);
}
if let Some(e) = err {
return Err(e);
}
Ok(obj_data_dir)
}
@@ -1642,17 +1848,15 @@ impl MetaObject {
free_entry.delete_marker = Some(MetaDeleteMarker {
version_id: Some(vid),
mod_time: self.mod_time,
meta_sys: Some(HashMap::<String, Vec<u8>>::new()),
meta_sys: HashMap::<String, Vec<u8>>::new(),
});
free_entry
.delete_marker
.as_mut()
.unwrap()
let delete_marker = free_entry.delete_marker.as_mut().unwrap();
delete_marker
.meta_sys
.as_mut()
.unwrap()
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{FREE_VERSION}"), vec![]);
let tier_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITION_TIER}");
let tier_obj_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_OBJECTNAME}");
let tier_obj_vid_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_VERSION_ID}");
@@ -1660,14 +1864,7 @@ impl MetaObject {
let aa = [tier_key, tier_obj_key, tier_obj_vid_key];
for (k, v) in &self.meta_sys {
if aa.contains(k) {
free_entry
.delete_marker
.as_mut()
.unwrap()
.meta_sys
.as_mut()
.unwrap()
.insert(k.clone(), v.clone());
delete_marker.meta_sys.insert(k.clone(), v.clone());
}
}
return (free_entry, true);
@@ -1737,19 +1934,16 @@ pub struct MetaDeleteMarker {
#[serde(rename = "MTime")]
pub mod_time: Option<OffsetDateTime>, // Object delete marker modified time
#[serde(rename = "MetaSys")]
pub meta_sys: Option<HashMap<String, Vec<u8>>>, // Delete marker internal metadata
pub meta_sys: HashMap<String, Vec<u8>>, // Delete marker internal metadata
}
impl MetaDeleteMarker {
pub fn free_version(&self) -> bool {
self.meta_sys
.as_ref()
.map(|v| v.get(FREE_VERSION_META_HEADER).is_some())
.unwrap_or_default()
self.meta_sys.contains_key(FREE_VERSION_META_HEADER)
}
pub fn into_fileinfo(&self, volume: &str, path: &str, _all_parts: bool) -> FileInfo {
let metadata = self.meta_sys.clone().unwrap_or_default();
let metadata = self.meta_sys.clone();
FileInfo {
version_id: self.version_id.filter(|&vid| !vid.is_nil()),
@@ -1895,7 +2089,7 @@ impl From<FileInfo> for MetaDeleteMarker {
Self {
version_id: value.version_id,
mod_time: value.mod_time,
meta_sys: None,
meta_sys: HashMap::new(),
}
}
}
@@ -2794,7 +2988,7 @@ mod test {
let delete_marker = MetaDeleteMarker {
version_id: Some(Uuid::new_v4()),
mod_time: Some(OffsetDateTime::now_utc()),
meta_sys: None,
meta_sys: HashMap::new(),
};
let delete_version = FileMetaVersion {

View File

@@ -16,8 +16,9 @@ mod error;
pub mod fileinfo;
mod filemeta;
mod filemeta_inline;
pub mod headers;
pub mod metacache;
// pub mod headers;
mod metacache;
mod replication;
pub mod test_data;
@@ -26,3 +27,4 @@ pub use fileinfo::*;
pub use filemeta::*;
pub use filemeta_inline::*;
pub use metacache::*;
pub use replication::*;

View File

@@ -0,0 +1,494 @@
use core::fmt;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::time::Duration;
use time::OffsetDateTime;
/// StatusType of Replication for x-amz-replication-status header
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default, Hash)]
pub enum ReplicationStatusType {
/// Pending - replication is pending.
Pending,
/// Completed - replication completed ok.
Completed,
/// CompletedLegacy was called "COMPLETE" incorrectly.
CompletedLegacy,
/// Failed - replication failed.
Failed,
/// Replica - this is a replica.
Replica,
#[default]
Empty,
}
impl ReplicationStatusType {
/// Returns string representation of status
pub fn as_str(&self) -> &'static str {
match self {
ReplicationStatusType::Pending => "PENDING",
ReplicationStatusType::Completed => "COMPLETED",
ReplicationStatusType::CompletedLegacy => "COMPLETE",
ReplicationStatusType::Failed => "FAILED",
ReplicationStatusType::Replica => "REPLICA",
ReplicationStatusType::Empty => "",
}
}
pub fn is_empty(&self) -> bool {
matches!(self, ReplicationStatusType::Empty)
}
}
impl fmt::Display for ReplicationStatusType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<&str> for ReplicationStatusType {
fn from(s: &str) -> Self {
match s {
"PENDING" => ReplicationStatusType::Pending,
"COMPLETED" => ReplicationStatusType::Completed,
"COMPLETE" => ReplicationStatusType::CompletedLegacy,
"FAILED" => ReplicationStatusType::Failed,
"REPLICA" => ReplicationStatusType::Replica,
_ => ReplicationStatusType::Empty,
}
}
}
impl From<VersionPurgeStatusType> for ReplicationStatusType {
fn from(status: VersionPurgeStatusType) -> Self {
match status {
VersionPurgeStatusType::Pending => ReplicationStatusType::Pending,
VersionPurgeStatusType::Complete => ReplicationStatusType::Completed,
VersionPurgeStatusType::Failed => ReplicationStatusType::Failed,
VersionPurgeStatusType::Empty => ReplicationStatusType::Empty,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
pub enum VersionPurgeStatusType {
Pending,
Complete,
Failed,
#[default]
Empty,
}
impl VersionPurgeStatusType {
/// Returns string representation of version purge status
pub fn as_str(&self) -> &'static str {
match self {
VersionPurgeStatusType::Pending => "PENDING",
VersionPurgeStatusType::Complete => "COMPLETE",
VersionPurgeStatusType::Failed => "FAILED",
VersionPurgeStatusType::Empty => "",
}
}
/// Returns true if the version is pending purge.
pub fn is_pending(&self) -> bool {
matches!(self, VersionPurgeStatusType::Pending | VersionPurgeStatusType::Failed)
}
pub fn is_empty(&self) -> bool {
matches!(self, VersionPurgeStatusType::Empty)
}
}
impl fmt::Display for VersionPurgeStatusType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<&str> for VersionPurgeStatusType {
fn from(s: &str) -> Self {
match s {
"PENDING" => VersionPurgeStatusType::Pending,
"COMPLETE" => VersionPurgeStatusType::Complete,
"FAILED" => VersionPurgeStatusType::Failed,
_ => VersionPurgeStatusType::Empty,
}
}
}
/// Type - replication type enum
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
pub enum ReplicationType {
#[default]
Unset,
Object,
Delete,
Metadata,
Heal,
ExistingObject,
Resync,
All,
}
impl ReplicationType {
pub fn as_str(&self) -> &'static str {
match self {
ReplicationType::Unset => "",
ReplicationType::Object => "OBJECT",
ReplicationType::Delete => "DELETE",
ReplicationType::Metadata => "METADATA",
ReplicationType::Heal => "HEAL",
ReplicationType::ExistingObject => "EXISTING_OBJECT",
ReplicationType::Resync => "RESYNC",
ReplicationType::All => "ALL",
}
}
pub fn is_valid(&self) -> bool {
matches!(
self,
ReplicationType::Object
| ReplicationType::Delete
| ReplicationType::Metadata
| ReplicationType::Heal
| ReplicationType::ExistingObject
| ReplicationType::Resync
| ReplicationType::All
)
}
pub fn is_data_replication(&self) -> bool {
matches!(self, ReplicationType::Object | ReplicationType::Delete | ReplicationType::Heal)
}
}
impl fmt::Display for ReplicationType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<&str> for ReplicationType {
fn from(s: &str) -> Self {
match s {
"UNSET" => ReplicationType::Unset,
"OBJECT" => ReplicationType::Object,
"DELETE" => ReplicationType::Delete,
"METADATA" => ReplicationType::Metadata,
"HEAL" => ReplicationType::Heal,
"EXISTING_OBJECT" => ReplicationType::ExistingObject,
"RESYNC" => ReplicationType::Resync,
"ALL" => ReplicationType::All,
_ => ReplicationType::Unset,
}
}
}
/// ReplicationState represents internal replication state
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq)]
pub struct ReplicationState {
pub replica_timestamp: Option<OffsetDateTime>,
pub replica_status: ReplicationStatusType,
pub delete_marker: bool,
pub replication_timestamp: Option<OffsetDateTime>,
pub replication_status_internal: Option<String>,
pub version_purge_status_internal: Option<String>,
pub replicate_decision_str: String,
pub targets: HashMap<String, ReplicationStatusType>,
pub purge_targets: HashMap<String, VersionPurgeStatusType>,
pub reset_statuses_map: HashMap<String, String>,
}
impl ReplicationState {
pub fn new() -> Self {
Self::default()
}
/// Returns true if replication state is identical for version purge statuses and replication statuses
pub fn equal(&self, other: &ReplicationState) -> bool {
self.replica_status == other.replica_status
&& self.replication_status_internal == other.replication_status_internal
&& self.version_purge_status_internal == other.version_purge_status_internal
}
/// Returns overall replication status for the object version being replicated
pub fn composite_replication_status(&self) -> ReplicationStatusType {
if let Some(replication_status_internal) = &self.replication_status_internal {
match ReplicationStatusType::from(replication_status_internal.as_str()) {
ReplicationStatusType::Pending
| ReplicationStatusType::Completed
| ReplicationStatusType::Failed
| ReplicationStatusType::Replica => {
return ReplicationStatusType::from(replication_status_internal.as_str());
}
_ => {
let repl_status = get_composite_replication_status(&self.targets);
if self.replica_timestamp.is_none() {
return repl_status;
}
if repl_status == ReplicationStatusType::Completed {
if let (Some(replica_timestamp), Some(replication_timestamp)) =
(self.replica_timestamp, self.replication_timestamp)
{
if replica_timestamp > replication_timestamp {
return self.replica_status.clone();
}
}
}
return repl_status;
}
}
} else if self.replica_status != ReplicationStatusType::default() {
return self.replica_status.clone();
}
ReplicationStatusType::default()
}
/// Returns overall replication purge status for the permanent delete being replicated
pub fn composite_version_purge_status(&self) -> VersionPurgeStatusType {
match VersionPurgeStatusType::from(self.version_purge_status_internal.clone().unwrap_or_default().as_str()) {
VersionPurgeStatusType::Pending | VersionPurgeStatusType::Complete | VersionPurgeStatusType::Failed => {
VersionPurgeStatusType::from(self.version_purge_status_internal.clone().unwrap_or_default().as_str())
}
_ => get_composite_version_purge_status(&self.purge_targets),
}
}
/// Returns replicatedInfos struct initialized with the previous state of replication
pub fn target_state(&self, arn: &str) -> ReplicatedTargetInfo {
ReplicatedTargetInfo {
arn: arn.to_string(),
prev_replication_status: self.targets.get(arn).cloned().unwrap_or_default(),
version_purge_status: self.purge_targets.get(arn).cloned().unwrap_or_default(),
resync_timestamp: self.reset_statuses_map.get(arn).cloned().unwrap_or_default(),
..Default::default()
}
}
}
pub fn get_composite_replication_status(targets: &HashMap<String, ReplicationStatusType>) -> ReplicationStatusType {
if targets.is_empty() {
return ReplicationStatusType::Empty;
}
let mut completed = 0;
for status in targets.values() {
match status {
ReplicationStatusType::Failed => return ReplicationStatusType::Failed,
ReplicationStatusType::Completed => completed += 1,
_ => {}
}
}
if completed == targets.len() {
ReplicationStatusType::Completed
} else {
ReplicationStatusType::Pending
}
}
pub fn get_composite_version_purge_status(targets: &HashMap<String, VersionPurgeStatusType>) -> VersionPurgeStatusType {
if targets.is_empty() {
return VersionPurgeStatusType::default();
}
let mut completed = 0;
for status in targets.values() {
match status {
VersionPurgeStatusType::Failed => return VersionPurgeStatusType::Failed,
VersionPurgeStatusType::Complete => completed += 1,
_ => {}
}
}
if completed == targets.len() {
VersionPurgeStatusType::Complete
} else {
VersionPurgeStatusType::Pending
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
pub enum ReplicationAction {
/// Replicate all data
All,
/// Replicate only metadata
Metadata,
/// Do not replicate
#[default]
None,
}
impl ReplicationAction {
/// Returns string representation of replication action
pub fn as_str(&self) -> &'static str {
match self {
ReplicationAction::All => "all",
ReplicationAction::Metadata => "metadata",
ReplicationAction::None => "none",
}
}
}
impl fmt::Display for ReplicationAction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<&str> for ReplicationAction {
fn from(s: &str) -> Self {
match s {
"all" => ReplicationAction::All,
"metadata" => ReplicationAction::Metadata,
"none" => ReplicationAction::None,
_ => ReplicationAction::None,
}
}
}
/// ReplicatedTargetInfo struct represents replication info on a target
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ReplicatedTargetInfo {
pub arn: String,
pub size: i64,
pub duration: Duration,
pub replication_action: ReplicationAction,
pub op_type: ReplicationType,
pub replication_status: ReplicationStatusType,
pub prev_replication_status: ReplicationStatusType,
pub version_purge_status: VersionPurgeStatusType,
pub resync_timestamp: String,
pub replication_resynced: bool,
pub endpoint: String,
pub secure: bool,
pub error: Option<String>,
}
impl ReplicatedTargetInfo {
/// Returns true for a target if arn is empty
pub fn is_empty(&self) -> bool {
self.arn.is_empty()
}
}
/// ReplicatedInfos struct contains replication information for multiple targets
#[derive(Debug, Clone)]
pub struct ReplicatedInfos {
pub replication_timestamp: Option<OffsetDateTime>,
pub targets: Vec<ReplicatedTargetInfo>,
}
impl ReplicatedInfos {
/// Returns the total size of completed replications
pub fn completed_size(&self) -> i64 {
let mut sz = 0i64;
for target in &self.targets {
if target.is_empty() {
continue;
}
if target.replication_status == ReplicationStatusType::Completed
&& target.prev_replication_status != ReplicationStatusType::Completed
{
sz += target.size;
}
}
sz
}
/// Returns true if replication was attempted on any of the targets for the object version queued
pub fn replication_resynced(&self) -> bool {
for target in &self.targets {
if target.is_empty() || !target.replication_resynced {
continue;
}
return true;
}
false
}
/// Returns internal representation of replication status for all targets
pub fn replication_status_internal(&self) -> Option<String> {
let mut result = String::new();
for target in &self.targets {
if target.is_empty() {
continue;
}
result.push_str(&format!("{}={};", target.arn, target.replication_status));
}
if result.is_empty() { None } else { Some(result) }
}
/// Returns overall replication status across all targets
pub fn replication_status(&self) -> ReplicationStatusType {
if self.targets.is_empty() {
return ReplicationStatusType::Empty;
}
let mut completed = 0;
for target in &self.targets {
match target.replication_status {
ReplicationStatusType::Failed => return ReplicationStatusType::Failed,
ReplicationStatusType::Completed => completed += 1,
_ => {}
}
}
if completed == self.targets.len() {
ReplicationStatusType::Completed
} else {
ReplicationStatusType::Pending
}
}
/// Returns overall version purge status across all targets
pub fn version_purge_status(&self) -> VersionPurgeStatusType {
if self.targets.is_empty() {
return VersionPurgeStatusType::Empty;
}
let mut completed = 0;
for target in &self.targets {
match target.version_purge_status {
VersionPurgeStatusType::Failed => return VersionPurgeStatusType::Failed,
VersionPurgeStatusType::Complete => completed += 1,
_ => {}
}
}
if completed == self.targets.len() {
VersionPurgeStatusType::Complete
} else {
VersionPurgeStatusType::Pending
}
}
/// Returns internal representation of version purge status for all targets
pub fn version_purge_status_internal(&self) -> Option<String> {
let mut result = String::new();
for target in &self.targets {
if target.is_empty() || target.version_purge_status.is_empty() {
continue;
}
result.push_str(&format!("{}={};", target.arn, target.version_purge_status));
}
if result.is_empty() { None } else { Some(result) }
}
/// Returns replication action based on target that actually performed replication
pub fn action(&self) -> ReplicationAction {
for target in &self.targets {
if target.is_empty() {
continue;
}
// rely on replication action from target that actually performed replication now.
if target.prev_replication_status != ReplicationStatusType::Completed {
return target.replication_action;
}
}
ReplicationAction::None
}
}

View File

@@ -67,7 +67,7 @@ pub fn create_real_xlmeta() -> Result<Vec<u8>> {
let delete_marker = MetaDeleteMarker {
version_id: Some(delete_version_id),
mod_time: Some(OffsetDateTime::from_unix_timestamp(1705312260)?), // 1 minute later
meta_sys: None,
meta_sys: HashMap::new(),
};
let delete_file_version = FileMetaVersion {
@@ -151,7 +151,7 @@ pub fn create_complex_xlmeta() -> Result<Vec<u8>> {
let delete_marker = MetaDeleteMarker {
version_id: Some(delete_version_id),
mod_time: Some(OffsetDateTime::from_unix_timestamp(1705312200 + i * 60 + 30)?),
meta_sys: None,
meta_sys: HashMap::new(),
};
let delete_file_version = FileMetaVersion {

View File

@@ -46,5 +46,6 @@ jsonwebtoken = { workspace = true }
tracing.workspace = true
rustfs-madmin.workspace = true
rustfs-utils = { workspace = true, features = ["path"] }
tokio-util.workspace = true
[dev-dependencies]

View File

@@ -20,6 +20,8 @@ use crate::{
manager::{extract_jwt_claims, get_default_policyes},
};
use futures::future::join_all;
use rustfs_ecstore::StorageAPI as _;
use rustfs_ecstore::store_api::{ObjectInfoOrErr, WalkOptions};
use rustfs_ecstore::{
config::{
RUSTFS_CONFIG_PREFIX,
@@ -28,15 +30,14 @@ use rustfs_ecstore::{
global::get_global_action_cred,
store::ECStore,
store_api::{ObjectInfo, ObjectOptions},
store_list_objects::{ObjectInfoOrErr, WalkOptions},
};
use rustfs_policy::{auth::UserIdentity, policy::PolicyDoc};
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};
use serde::{Serialize, de::DeserializeOwned};
use std::sync::LazyLock;
use std::{collections::HashMap, sync::Arc};
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
use tokio::sync::mpsc::{self, Sender};
use tokio_util::sync::CancellationToken;
use tracing::{debug, info, warn};
pub static IAM_CONFIG_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam"));
@@ -144,7 +145,7 @@ impl ObjectStore {
Ok((Self::decrypt_data(&data)?, obj))
}
async fn list_iam_config_items(&self, prefix: &str, ctx_rx: B_Receiver<bool>, sender: Sender<StringOrErr>) {
async fn list_iam_config_items(&self, prefix: &str, ctx: CancellationToken, sender: Sender<StringOrErr>) {
// debug!("list iam config items, prefix: {}", &prefix);
// TODO: Implement walk, use walk
@@ -156,7 +157,11 @@ impl ObjectStore {
let (tx, mut rx) = mpsc::channel::<ObjectInfoOrErr>(100);
let path = prefix.to_owned();
tokio::spawn(async move { store.walk(ctx_rx, Self::BUCKET_NAME, &path, tx, WalkOptions::default()).await });
tokio::spawn(async move {
store
.walk(ctx.clone(), Self::BUCKET_NAME, &path, tx, WalkOptions::default())
.await
});
let prefix = prefix.to_owned();
tokio::spawn(async move {
@@ -190,10 +195,11 @@ impl ObjectStore {
}
async fn list_all_iamconfig_items(&self) -> Result<HashMap<String, Vec<String>>> {
let (ctx_tx, ctx_rx) = broadcast::channel(1);
let (tx, mut rx) = mpsc::channel::<StringOrErr>(100);
self.list_iam_config_items(format!("{}/", *IAM_CONFIG_PREFIX).as_str(), ctx_rx, tx)
let ctx = CancellationToken::new();
self.list_iam_config_items(format!("{}/", *IAM_CONFIG_PREFIX).as_str(), ctx.clone(), tx)
.await;
let mut res = HashMap::new();
@@ -201,7 +207,7 @@ impl ObjectStore {
while let Some(v) = rx.recv().await {
if let Some(err) = v.err {
warn!("list_iam_config_items {:?}", err);
let _ = ctx_tx.send(true);
ctx.cancel();
return Err(err);
}
@@ -215,7 +221,7 @@ impl ObjectStore {
}
}
let _ = ctx_tx.send(true);
ctx.cancel();
Ok(res)
}
@@ -477,15 +483,15 @@ impl Store for ObjectStore {
UserType::None => "",
};
let (ctx_tx, ctx_rx) = broadcast::channel(1);
let ctx = CancellationToken::new();
let (tx, mut rx) = mpsc::channel::<StringOrErr>(100);
self.list_iam_config_items(base_prefix, ctx_rx, tx).await;
self.list_iam_config_items(base_prefix, ctx.clone(), tx).await;
while let Some(v) = rx.recv().await {
if let Some(err) = v.err {
warn!("list_iam_config_items {:?}", err);
let _ = ctx_tx.send(true);
let _ = ctx.cancel();
return Err(err);
}
@@ -495,7 +501,7 @@ impl Store for ObjectStore {
self.load_user(&name, user_type, m).await?;
}
}
let _ = ctx_tx.send(true);
let _ = ctx.cancel();
Ok(())
}
async fn load_secret_key(&self, name: &str, user_type: UserType) -> Result<String> {
@@ -539,15 +545,15 @@ impl Store for ObjectStore {
Ok(())
}
async fn load_groups(&self, m: &mut HashMap<String, GroupInfo>) -> Result<()> {
let (ctx_tx, ctx_rx) = broadcast::channel(1);
let ctx = CancellationToken::new();
let (tx, mut rx) = mpsc::channel::<StringOrErr>(100);
self.list_iam_config_items(&IAM_CONFIG_GROUPS_PREFIX, ctx_rx, tx).await;
self.list_iam_config_items(&IAM_CONFIG_GROUPS_PREFIX, ctx.clone(), tx).await;
while let Some(v) = rx.recv().await {
if let Some(err) = v.err {
warn!("list_iam_config_items {:?}", err);
let _ = ctx_tx.send(true);
let _ = ctx.cancel();
return Err(err);
}
@@ -557,7 +563,7 @@ impl Store for ObjectStore {
self.load_group(&name, m).await?;
}
}
let _ = ctx_tx.send(true);
let _ = ctx.cancel();
Ok(())
}
@@ -603,15 +609,15 @@ impl Store for ObjectStore {
Ok(())
}
async fn load_policy_docs(&self, m: &mut HashMap<String, PolicyDoc>) -> Result<()> {
let (ctx_tx, ctx_rx) = broadcast::channel(1);
let ctx = CancellationToken::new();
let (tx, mut rx) = mpsc::channel::<StringOrErr>(100);
self.list_iam_config_items(&IAM_CONFIG_POLICIES_PREFIX, ctx_rx, tx).await;
self.list_iam_config_items(&IAM_CONFIG_POLICIES_PREFIX, ctx.clone(), tx).await;
while let Some(v) = rx.recv().await {
if let Some(err) = v.err {
warn!("list_iam_config_items {:?}", err);
let _ = ctx_tx.send(true);
let _ = ctx.cancel();
return Err(err);
}
@@ -621,7 +627,7 @@ impl Store for ObjectStore {
self.load_policy_doc(&name, m).await?;
}
}
let _ = ctx_tx.send(true);
let _ = ctx.cancel();
Ok(())
}
@@ -678,15 +684,15 @@ impl Store for ObjectStore {
}
}
};
let (ctx_tx, ctx_rx) = broadcast::channel(1);
let ctx = CancellationToken::new();
let (tx, mut rx) = mpsc::channel::<StringOrErr>(100);
self.list_iam_config_items(base_path, ctx_rx, tx).await;
self.list_iam_config_items(base_path, ctx.clone(), tx).await;
while let Some(v) = rx.recv().await {
if let Some(err) = v.err {
warn!("list_iam_config_items {:?}", err);
let _ = ctx_tx.send(true);
let _ = ctx.cancel();
return Err(err);
}
@@ -696,7 +702,7 @@ impl Store for ObjectStore {
self.load_mapped_policy(name, user_type, is_group, m).await?;
}
}
let _ = ctx_tx.send(true);
let _ = ctx.cancel(); // TODO: check if this is needed
Ok(())
}

View File

@@ -111,7 +111,7 @@ impl LocalKmsClient {
/// Get the file path for a master key
fn master_key_path(&self, key_id: &str) -> PathBuf {
self.config.key_dir.join(format!("{}.key", key_id))
self.config.key_dir.join(format!("{key_id}.key"))
}
/// Load a master key from disk
@@ -334,12 +334,11 @@ impl KmsClient for LocalKmsClient {
if let Some(actual_value) = envelope.encryption_context.get(key) {
if actual_value != expected_value {
return Err(KmsError::context_mismatch(format!(
"Context mismatch for key '{}': expected '{}', got '{}'",
key, expected_value, actual_value
"Context mismatch for key '{key}': expected '{expected_value}', got '{actual_value}'"
)));
}
} else {
return Err(KmsError::context_mismatch(format!("Missing context key '{}'", key)));
return Err(KmsError::context_mismatch(format!("Missing context key '{key}'")));
}
}
}
@@ -720,14 +719,14 @@ impl KmsBackend for LocalKmsBackend {
.client
.load_master_key(key_id)
.await
.map_err(|_| crate::error::KmsError::key_not_found(format!("Key {} not found", key_id)))?;
.map_err(|_| crate::error::KmsError::key_not_found(format!("Key {key_id} not found")))?;
let (deletion_date_str, deletion_date_dt) = if request.force_immediate.unwrap_or(false) {
// For immediate deletion, actually delete the key from filesystem
let key_path = self.client.master_key_path(key_id);
tokio::fs::remove_file(&key_path)
.await
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to delete key file: {}", e)))?;
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to delete key file: {e}")))?;
// Remove from cache
let mut cache = self.client.key_cache.write().await;
@@ -773,9 +772,9 @@ impl KmsBackend for LocalKmsBackend {
let key_path = self.client.master_key_path(key_id);
let content = tokio::fs::read(&key_path)
.await
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to read key file: {}", e)))?;
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to read key file: {e}")))?;
let stored_key: crate::backends::local::StoredMasterKey = serde_json::from_slice(&content)
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to parse stored key: {}", e)))?;
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to parse stored key: {e}")))?;
// Decrypt the existing key material to preserve it
let existing_key_material = if let Some(ref cipher) = self.client.master_cipher {
@@ -821,13 +820,10 @@ impl KmsBackend for LocalKmsBackend {
.client
.load_master_key(key_id)
.await
.map_err(|_| crate::error::KmsError::key_not_found(format!("Key {} not found", key_id)))?;
.map_err(|_| crate::error::KmsError::key_not_found(format!("Key {key_id} not found")))?;
if master_key.status != KeyStatus::PendingDeletion {
return Err(crate::error::KmsError::invalid_key_state(format!(
"Key {} is not pending deletion",
key_id
)));
return Err(crate::error::KmsError::invalid_key_state(format!("Key {key_id} is not pending deletion")));
}
// Cancel the deletion by resetting the state

View File

@@ -89,10 +89,10 @@ impl VaultKmsClient {
let settings = settings_builder
.build()
.map_err(|e| KmsError::backend_error(format!("Failed to build Vault client settings: {}", e)))?;
.map_err(|e| KmsError::backend_error(format!("Failed to build Vault client settings: {e}")))?;
let client =
VaultClient::new(settings).map_err(|e| KmsError::backend_error(format!("Failed to create Vault client: {}", e)))?;
VaultClient::new(settings).map_err(|e| KmsError::backend_error(format!("Failed to create Vault client: {e}")))?;
info!("Successfully connected to Vault at {}", config.address);
@@ -144,7 +144,7 @@ impl VaultKmsClient {
kv2::set(&self.client, &self.kv_mount, &path, key_data)
.await
.map_err(|e| KmsError::backend_error(format!("Failed to store key in Vault: {}", e)))?;
.map_err(|e| KmsError::backend_error(format!("Failed to store key in Vault: {e}")))?;
debug!("Stored key {} in Vault at path {}", key_id, path);
Ok(())
@@ -176,7 +176,7 @@ impl VaultKmsClient {
let secret: VaultKeyData = kv2::read(&self.client, &self.kv_mount, &path).await.map_err(|e| match e {
vaultrs::error::ClientError::ResponseWrapError => KmsError::key_not_found(key_id),
vaultrs::error::ClientError::APIError { code: 404, .. } => KmsError::key_not_found(key_id),
_ => KmsError::backend_error(format!("Failed to read key from Vault: {}", e)),
_ => KmsError::backend_error(format!("Failed to read key from Vault: {e}")),
})?;
debug!("Retrieved key {} from Vault, tags: {:?}", key_id, secret.tags);
@@ -200,7 +200,7 @@ impl VaultKmsClient {
debug!("Key path doesn't exist in Vault (404), returning empty list");
Ok(Vec::new())
}
Err(e) => Err(KmsError::backend_error(format!("Failed to list keys in Vault: {}", e))),
Err(e) => Err(KmsError::backend_error(format!("Failed to list keys in Vault: {e}"))),
}
}
@@ -214,7 +214,7 @@ impl VaultKmsClient {
.await
.map_err(|e| match e {
vaultrs::error::ClientError::APIError { code: 404, .. } => KmsError::key_not_found(key_id),
_ => KmsError::backend_error(format!("Failed to delete key metadata from Vault: {}", e)),
_ => KmsError::backend_error(format!("Failed to delete key metadata from Vault: {e}")),
})?;
debug!("Permanently deleted key {} metadata from Vault at path {}", key_id, path);
@@ -649,7 +649,7 @@ impl KmsBackend for VaultKmsBackend {
let mut key_metadata = match self.describe_key(describe_request).await {
Ok(response) => response.key_metadata,
Err(_) => {
return Err(crate::error::KmsError::key_not_found(format!("Key {} not found", key_id)));
return Err(crate::error::KmsError::key_not_found(format!("Key {key_id} not found")));
}
};
@@ -705,15 +705,12 @@ impl KmsBackend for VaultKmsBackend {
let mut key_metadata = match self.describe_key(describe_request).await {
Ok(response) => response.key_metadata,
Err(_) => {
return Err(crate::error::KmsError::key_not_found(format!("Key {} not found", key_id)));
return Err(crate::error::KmsError::key_not_found(format!("Key {key_id} not found")));
}
};
if key_metadata.key_state != KeyState::PendingDeletion {
return Err(crate::error::KmsError::invalid_key_state(format!(
"Key {} is not pending deletion",
key_id
)));
return Err(crate::error::KmsError::invalid_key_state(format!("Key {key_id} is not pending deletion")));
}
// Cancel the deletion by resetting the state

View File

@@ -315,7 +315,7 @@ impl KmsConfig {
config.backend = match backend_type.to_lowercase().as_str() {
"local" => KmsBackend::Local,
"vault" => KmsBackend::Vault,
_ => return Err(KmsError::configuration_error(format!("Unknown KMS backend: {}", backend_type))),
_ => return Err(KmsError::configuration_error(format!("Unknown KMS backend: {backend_type}"))),
};
}

View File

@@ -227,7 +227,7 @@ impl ObjectEncryptionService {
self.kms_manager
.create_key(create_req)
.await
.map_err(|e| KmsError::backend_error(format!("Failed to auto-create SSE-S3 key {}: {}", actual_key_id, e)))?;
.map_err(|e| KmsError::backend_error(format!("Failed to auto-create SSE-S3 key {actual_key_id}: {e}")))?;
}
} else {
// For SSE-KMS, key must exist
@@ -235,7 +235,7 @@ impl ObjectEncryptionService {
key_id: actual_key_id.to_string(),
};
self.kms_manager.describe_key(describe_req).await.map_err(|_| {
KmsError::invalid_operation(format!("SSE-KMS key '{}' not found. Please create it first.", actual_key_id))
KmsError::invalid_operation(format!("SSE-KMS key '{actual_key_id}' not found. Please create it first."))
})?;
}
@@ -250,7 +250,7 @@ impl ObjectEncryptionService {
.kms_manager
.generate_data_key(request)
.await
.map_err(|e| KmsError::backend_error(format!("Failed to generate data key: {}", e)))?;
.map_err(|e| KmsError::backend_error(format!("Failed to generate data key: {e}")))?;
let plaintext_key = data_key.plaintext_key;
@@ -325,7 +325,7 @@ impl ObjectEncryptionService {
.kms_manager
.decrypt(decrypt_request)
.await
.map_err(|e| KmsError::backend_error(format!("Failed to decrypt data key: {}", e)))?;
.map_err(|e| KmsError::backend_error(format!("Failed to decrypt data key: {e}")))?;
// Create cipher
let cipher = create_cipher(&algorithm, &decrypt_response.plaintext)?;
@@ -379,7 +379,7 @@ impl ObjectEncryptionService {
// Validate key MD5 if provided
if let Some(expected_md5) = customer_key_md5 {
let actual_md5 = md5::compute(customer_key);
let actual_md5_hex = format!("{:x}", actual_md5);
let actual_md5_hex = format!("{actual_md5:x}");
if actual_md5_hex != expected_md5.to_lowercase() {
return Err(KmsError::validation_error("Customer key MD5 mismatch"));
}
@@ -487,12 +487,11 @@ impl ObjectEncryptionService {
Some(actual_value) if actual_value == expected_value => continue,
Some(actual_value) => {
return Err(KmsError::context_mismatch(format!(
"Context mismatch for '{}': expected '{}', got '{}'",
key, expected_value, actual_value
"Context mismatch for '{key}': expected '{expected_value}', got '{actual_value}'"
)));
}
None => {
return Err(KmsError::context_mismatch(format!("Missing context key '{}'", key)));
return Err(KmsError::context_mismatch(format!("Missing context key '{key}'")));
}
}
}
@@ -562,13 +561,13 @@ impl ObjectEncryptionService {
.ok_or_else(|| KmsError::validation_error("Missing IV header"))?;
let iv = base64::engine::general_purpose::STANDARD
.decode(iv)
.map_err(|e| KmsError::validation_error(format!("Invalid IV: {}", e)))?;
.map_err(|e| KmsError::validation_error(format!("Invalid IV: {e}")))?;
let tag = if let Some(tag_str) = headers.get("x-rustfs-encryption-tag") {
Some(
base64::engine::general_purpose::STANDARD
.decode(tag_str)
.map_err(|e| KmsError::validation_error(format!("Invalid tag: {}", e)))?,
.map_err(|e| KmsError::validation_error(format!("Invalid tag: {e}")))?,
)
} else {
None
@@ -577,14 +576,14 @@ impl ObjectEncryptionService {
let encrypted_data_key = if let Some(key_str) = headers.get("x-rustfs-encryption-key") {
base64::engine::general_purpose::STANDARD
.decode(key_str)
.map_err(|e| KmsError::validation_error(format!("Invalid encrypted key: {}", e)))?
.map_err(|e| KmsError::validation_error(format!("Invalid encrypted key: {e}")))?
} else {
Vec::new() // Empty for SSE-C
};
let encryption_context = if let Some(context_str) = headers.get("x-rustfs-encryption-context") {
serde_json::from_str(context_str)
.map_err(|e| KmsError::validation_error(format!("Invalid encryption context: {}", e)))?
.map_err(|e| KmsError::validation_error(format!("Invalid encryption context: {e}")))?
} else {
HashMap::new()
};

View File

@@ -225,7 +225,7 @@ impl KmsError {
impl From<url::ParseError> for KmsError {
fn from(error: url::ParseError) -> Self {
Self::ConfigurationError {
message: format!("Invalid URL: {}", error),
message: format!("Invalid URL: {error}"),
}
}
}
@@ -233,7 +233,7 @@ impl From<url::ParseError> for KmsError {
impl From<reqwest::Error> for KmsError {
fn from(error: reqwest::Error) -> Self {
Self::BackendError {
message: format!("HTTP request failed: {}", error),
message: format!("HTTP request failed: {error}"),
}
}
}

View File

@@ -137,7 +137,7 @@ impl KmsServiceManager {
Ok(())
}
Err(e) => {
let err_msg = format!("Failed to create KMS backend: {}", e);
let err_msg = format!("Failed to create KMS backend: {e}");
error!("{}", err_msg);
let mut status = self.status.write().await;
*status = KmsServiceStatus::Error(err_msg.clone());
@@ -218,7 +218,7 @@ impl KmsServiceManager {
error!("KMS health check error: {}", e);
// Update status to error
let mut status = self.status.write().await;
*status = KmsServiceStatus::Error(format!("Health check failed: {}", e));
*status = KmsServiceStatus::Error(format!("Health check failed: {e}"));
Err(e)
}
}

View File

@@ -106,6 +106,10 @@ impl FastObjectLockManager {
object: impl Into<Arc<str>>,
owner: impl Into<Arc<str>>,
) -> Result<FastLockGuard, LockResult> {
// let bucket = bucket.into();
// let object = object.into();
// let owner = owner.into();
// error!("acquire_write_lock: bucket={:?}, object={:?}, owner={:?}", bucket, object, owner);
let request = ObjectLockRequest::new_write(bucket, object, owner);
self.acquire_lock(request).await
}

View File

@@ -296,11 +296,11 @@ impl S3Client {
.context(format!("Failed to resolve file path: {local_path}"))?;
if !canonical_path.exists() {
anyhow::bail!("File does not exist: {}", local_path);
anyhow::bail!("File does not exist: {local_path}");
}
if !canonical_path.is_file() {
anyhow::bail!("Path is not a file: {}", local_path);
anyhow::bail!("Path is not a file: {local_path}");
}
let metadata = tokio::fs::metadata(&canonical_path)
@@ -432,7 +432,7 @@ impl S3Client {
while let Some(bytes_result) = byte_stream.try_next().await.context("Failed to read object content")? {
if total_read + bytes_result.len() > max_size {
anyhow::bail!("Object size exceeds maximum allowed size of {} bytes", max_size);
anyhow::bail!("Object size exceeds maximum allowed size of {max_size} bytes");
}
content.extend_from_slice(&bytes_result);
total_read += bytes_result.len();

View File

@@ -82,6 +82,7 @@ pub trait HashReaderDetector {
impl Reader for crate::HashReader {}
impl Reader for crate::HardLimitReader {}
impl Reader for crate::EtagReader {}
impl<R> Reader for crate::LimitReader<R> where R: Reader {}
impl<R> Reader for crate::CompressReader<R> where R: Reader {}
impl<R> Reader for crate::EncryptReader<R> where R: Reader {}
impl<R> Reader for crate::DecryptReader<R> where R: Reader {}

View File

@@ -37,7 +37,7 @@ use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
use crate::{EtagResolvable, HashReaderDetector, HashReaderMut};
use crate::{EtagResolvable, HashReaderDetector, HashReaderMut, TryGetIndex};
pin_project! {
#[derive(Debug)]
@@ -118,6 +118,8 @@ where
}
}
impl<R> TryGetIndex for LimitReader<R> where R: AsyncRead + Unpin + Send + Sync {}
#[cfg(test)]
mod tests {
use std::io::Cursor;

View File

@@ -53,6 +53,7 @@ s3s = { workspace = true, optional = true }
serde = { workspace = true, optional = true }
sha1 = { workspace = true, optional = true }
sha2 = { workspace = true, optional = true }
convert_case = "0.8.0"
siphasher = { workspace = true, optional = true }
snap = { workspace = true, optional = true }
sysinfo = { workspace = true, optional = true }
@@ -90,4 +91,5 @@ hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde", "dep:s
os = ["dep:nix", "dep:tempfile", "winapi"] # operating system utilities
integration = [] # integration test features
sys = ["dep:sysinfo"] # system information features
full = ["ip", "tls", "net", "io", "hash", "os", "integration", "path", "crypto", "string", "compress", "sys", "notify"] # all features
http = []
full = ["ip", "tls", "net", "io", "hash", "os", "integration", "path", "crypto", "string", "compress", "sys", "notify","http"] # all features

View File

@@ -109,6 +109,8 @@ use siphasher::sip::SipHasher;
pub const EMPTY_STRING_SHA256_HASH: &str = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
pub const DEFAULT_SIP_HASH_KEY: [u8; 16] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
pub fn sip_hash(key: &str, cardinality: usize, id: &[u8; 16]) -> usize {
// Your key, must be 16 bytes

View File

@@ -0,0 +1,277 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use convert_case::{Case, Casing};
use std::collections::HashMap;
use std::sync::LazyLock;
pub const LAST_MODIFIED: &str = "Last-Modified";
pub const DATE: &str = "Date";
pub const ETAG: &str = "ETag";
pub const CONTENT_TYPE: &str = "Content-Type";
pub const CONTENT_MD5: &str = "Content-Md5";
pub const CONTENT_ENCODING: &str = "Content-Encoding";
pub const EXPIRES: &str = "Expires";
pub const CONTENT_LENGTH: &str = "Content-Length";
pub const CONTENT_LANGUAGE: &str = "Content-Language";
pub const CONTENT_RANGE: &str = "Content-Range";
pub const CONNECTION: &str = "Connection";
pub const ACCEPT_RANGES: &str = "Accept-Ranges";
pub const AMZ_BUCKET_REGION: &str = "X-Amz-Bucket-Region";
pub const SERVER_INFO: &str = "Server";
pub const RETRY_AFTER: &str = "Retry-After";
pub const LOCATION: &str = "Location";
pub const CACHE_CONTROL: &str = "Cache-Control";
pub const CONTENT_DISPOSITION: &str = "Content-Disposition";
pub const AUTHORIZATION: &str = "Authorization";
pub const ACTION: &str = "Action";
pub const RANGE: &str = "Range";
// S3 storage class
pub const AMZ_STORAGE_CLASS: &str = "x-amz-storage-class";
// S3 object version ID
pub const AMZ_VERSION_ID: &str = "x-amz-version-id";
pub const AMZ_DELETE_MARKER: &str = "x-amz-delete-marker";
// S3 object tagging
pub const AMZ_OBJECT_TAGGING: &str = "X-Amz-Tagging";
pub const AMZ_TAG_COUNT: &str = "x-amz-tagging-count";
pub const AMZ_TAG_DIRECTIVE: &str = "X-Amz-Tagging-Directive";
// S3 transition restore
pub const AMZ_RESTORE: &str = "x-amz-restore";
pub const AMZ_RESTORE_EXPIRY_DAYS: &str = "X-Amz-Restore-Expiry-Days";
pub const AMZ_RESTORE_REQUEST_DATE: &str = "X-Amz-Restore-Request-Date";
pub const AMZ_RESTORE_OUTPUT_PATH: &str = "x-amz-restore-output-path";
// S3 extensions
pub const AMZ_COPY_SOURCE_IF_MODIFIED_SINCE: &str = "x-amz-copy-source-if-modified-since";
pub const AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE: &str = "x-amz-copy-source-if-unmodified-since";
pub const AMZ_COPY_SOURCE_IF_NONE_MATCH: &str = "x-amz-copy-source-if-none-match";
pub const AMZ_COPY_SOURCE_IF_MATCH: &str = "x-amz-copy-source-if-match";
pub const AMZ_COPY_SOURCE: &str = "X-Amz-Copy-Source";
pub const AMZ_COPY_SOURCE_VERSION_ID: &str = "X-Amz-Copy-Source-Version-Id";
pub const AMZ_COPY_SOURCE_RANGE: &str = "X-Amz-Copy-Source-Range";
pub const AMZ_METADATA_DIRECTIVE: &str = "X-Amz-Metadata-Directive";
pub const AMZ_OBJECT_LOCK_MODE: &str = "X-Amz-Object-Lock-Mode";
pub const AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE: &str = "X-Amz-Object-Lock-Retain-Until-Date";
pub const AMZ_OBJECT_LOCK_LEGAL_HOLD: &str = "X-Amz-Object-Lock-Legal-Hold";
pub const AMZ_OBJECT_LOCK_BYPASS_GOVERNANCE: &str = "X-Amz-Bypass-Governance-Retention";
pub const AMZ_BUCKET_REPLICATION_STATUS: &str = "X-Amz-Replication-Status";
// AmzSnowballExtract will trigger unpacking of an archive content
pub const AMZ_SNOWBALL_EXTRACT: &str = "X-Amz-Meta-Snowball-Auto-Extract";
// Object lock enabled
pub const AMZ_OBJECT_LOCK_ENABLED: &str = "x-amz-bucket-object-lock-enabled";
// Multipart parts count
pub const AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
// Object date/time of expiration
pub const AMZ_EXPIRATION: &str = "x-amz-expiration";
// Dummy putBucketACL
pub const AMZ_ACL: &str = "x-amz-acl";
// Signature V4 related constants.
pub const AMZ_CONTENT_SHA256: &str = "X-Amz-Content-Sha256";
pub const AMZ_DATE: &str = "X-Amz-Date";
pub const AMZ_ALGORITHM: &str = "X-Amz-Algorithm";
pub const AMZ_EXPIRES: &str = "X-Amz-Expires";
pub const AMZ_SIGNED_HEADERS: &str = "X-Amz-SignedHeaders";
pub const AMZ_SIGNATURE: &str = "X-Amz-Signature";
pub const AMZ_CREDENTIAL: &str = "X-Amz-Credential";
pub const AMZ_SECURITY_TOKEN: &str = "X-Amz-Security-Token";
pub const AMZ_DECODED_CONTENT_LENGTH: &str = "X-Amz-Decoded-Content-Length";
pub const AMZ_TRAILER: &str = "X-Amz-Trailer";
pub const AMZ_MAX_PARTS: &str = "X-Amz-Max-Parts";
pub const AMZ_PART_NUMBER_MARKER: &str = "X-Amz-Part-Number-Marker";
// Constants used for GetObjectAttributes and GetObjectVersionAttributes
pub const AMZ_OBJECT_ATTRIBUTES: &str = "X-Amz-Object-Attributes";
// AWS server-side encryption headers for SSE-S3, SSE-KMS and SSE-C.
pub const AMZ_SERVER_SIDE_ENCRYPTION: &str = "X-Amz-Server-Side-Encryption";
pub const AMZ_SERVER_SIDE_ENCRYPTION_KMS_ID: &str = "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id";
pub const AMZ_SERVER_SIDE_ENCRYPTION_KMS_CONTEXT: &str = "X-Amz-Server-Side-Encryption-Context";
pub const AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: &str = "X-Amz-Server-Side-Encryption-Customer-Algorithm";
pub const AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: &str = "X-Amz-Server-Side-Encryption-Customer-Key";
pub const AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: &str = "X-Amz-Server-Side-Encryption-Customer-Key-Md5";
pub const AMZ_SERVER_SIDE_ENCRYPTION_COPY_CUSTOMER_ALGORITHM: &str =
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm";
pub const AMZ_SERVER_SIDE_ENCRYPTION_COPY_CUSTOMER_KEY: &str = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key";
pub const AMZ_SERVER_SIDE_ENCRYPTION_COPY_CUSTOMER_KEY_MD5: &str = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5";
pub const AMZ_ENCRYPTION_AES: &str = "AES256";
pub const AMZ_ENCRYPTION_KMS: &str = "aws:kms";
// Signature v2 related constants
pub const AMZ_SIGNATURE_V2: &str = "Signature";
pub const AMZ_ACCESS_KEY_ID: &str = "AWSAccessKeyId";
// Response request id.
pub const AMZ_REQUEST_ID: &str = "x-amz-request-id";
pub const AMZ_REQUEST_HOST_ID: &str = "x-amz-id-2";
// Content Checksums
pub const AMZ_CHECKSUM_ALGO: &str = "x-amz-checksum-algorithm";
pub const AMZ_CHECKSUM_CRC32: &str = "x-amz-checksum-crc32";
pub const AMZ_CHECKSUM_CRC32C: &str = "x-amz-checksum-crc32c";
pub const AMZ_CHECKSUM_SHA1: &str = "x-amz-checksum-sha1";
pub const AMZ_CHECKSUM_SHA256: &str = "x-amz-checksum-sha256";
pub const AMZ_CHECKSUM_CRC64NVME: &str = "x-amz-checksum-crc64nvme";
pub const AMZ_CHECKSUM_MODE: &str = "x-amz-checksum-mode";
pub const AMZ_CHECKSUM_TYPE: &str = "x-amz-checksum-type";
pub const AMZ_CHECKSUM_TYPE_FULL_OBJECT: &str = "FULL_OBJECT";
pub const AMZ_CHECKSUM_TYPE_COMPOSITE: &str = "COMPOSITE";
// Post Policy related
pub const AMZ_META_UUID: &str = "X-Amz-Meta-Uuid";
pub const AMZ_META_NAME: &str = "X-Amz-Meta-Name";
pub const AMZ_META_UNENCRYPTED_CONTENT_LENGTH: &str = "X-Amz-Meta-X-Amz-Unencrypted-Content-Length";
pub const AMZ_META_UNENCRYPTED_CONTENT_MD5: &str = "X-Amz-Meta-X-Amz-Unencrypted-Content-Md5";
pub const RESERVED_METADATA_PREFIX: &str = "X-RustFS-Internal-";
pub const RESERVED_METADATA_PREFIX_LOWER: &str = "x-rustfs-internal-";
pub const RUSTFS_HEALING: &str = "X-Rustfs-Internal-healing";
// pub const RUSTFS_DATA_MOVE: &str = "X-Rustfs-Internal-data-mov";
// pub const X_RUSTFS_INLINE_DATA: &str = "x-rustfs-inline-data";
pub const VERSION_PURGE_STATUS_KEY: &str = "X-Rustfs-Internal-purgestatus";
pub const X_RUSTFS_HEALING: &str = "X-Rustfs-Internal-healing";
pub const X_RUSTFS_DATA_MOV: &str = "X-Rustfs-Internal-data-mov";
pub const AMZ_TAGGING_DIRECTIVE: &str = "X-Amz-Tagging-Directive";
pub const RUSTFS_DATA_MOVE: &str = "X-Rustfs-Internal-data-mov";
pub const RUSTFS_FORCE_DELETE: &str = "X-Rustfs-Force-Delete";
pub const RUSTFS_REPLICATION_RESET_STATUS: &str = "X-Rustfs-Replication-Reset-Status";
pub const RUSTFS_REPLICATION_AUTUAL_OBJECT_SIZE: &str = "X-Rustfs-Replication-Actual-Object-Size";
pub const RUSTFS_BUCKET_SOURCE_VERSION_ID: &str = "X-Rustfs-Source-Version-Id";
pub const RUSTFS_BUCKET_SOURCE_MTIME: &str = "X-Rustfs-Source-Mtime";
pub const RUSTFS_BUCKET_SOURCE_ETAG: &str = "X-Rustfs-Source-Etag";
pub const RUSTFS_BUCKET_REPLICATION_DELETE_MARKER: &str = "X-Rustfs-Source-DeleteMarker";
pub const RUSTFS_BUCKET_REPLICATION_PROXY_REQUEST: &str = "X-Rustfs-Source-Proxy-Request";
pub const RUSTFS_BUCKET_REPLICATION_REQUEST: &str = "X-Rustfs-Source-Replication-Request";
pub const RUSTFS_BUCKET_REPLICATION_CHECK: &str = "X-Rustfs-Source-Replication-Check";
// SSEC encryption header constants
pub const SSEC_ALGORITHM_HEADER: &str = "x-amz-server-side-encryption-customer-algorithm";
pub const SSEC_KEY_HEADER: &str = "x-amz-server-side-encryption-customer-key";
pub const SSEC_KEY_MD5_HEADER: &str = "x-amz-server-side-encryption-customer-key-md5";
pub const AMZ_WEBSITE_REDIRECT_LOCATION: &str = "x-amz-website-redirect-location";
pub trait HeaderExt {
fn lookup(&self, s: &str) -> Option<&str>;
}
impl HeaderExt for HashMap<String, String> {
fn lookup(&self, s: &str) -> Option<&str> {
let train = s.to_case(Case::Train);
let lower = s.to_ascii_lowercase();
let keys = [s, lower.as_str(), train.as_str()];
for key in keys {
if let Some(v) = self.get(key) {
return Some(v);
}
}
None
}
}
static SUPPORTED_QUERY_VALUES: LazyLock<HashMap<String, bool>> = LazyLock::new(|| {
let mut m = HashMap::new();
m.insert("attributes".to_string(), true);
m.insert("partNumber".to_string(), true);
m.insert("versionId".to_string(), true);
m.insert("response-cache-control".to_string(), true);
m.insert("response-content-disposition".to_string(), true);
m.insert("response-content-encoding".to_string(), true);
m.insert("response-content-language".to_string(), true);
m.insert("response-content-type".to_string(), true);
m.insert("response-expires".to_string(), true);
m
});
static SUPPORTED_HEADERS: LazyLock<HashMap<String, bool>> = LazyLock::new(|| {
let mut m = HashMap::new();
m.insert("content-type".to_string(), true);
m.insert("cache-control".to_string(), true);
m.insert("content-encoding".to_string(), true);
m.insert("content-disposition".to_string(), true);
m.insert("content-language".to_string(), true);
m.insert("x-amz-website-redirect-location".to_string(), true);
m.insert("x-amz-object-lock-mode".to_string(), true);
m.insert("x-amz-metadata-directive".to_string(), true);
m.insert("x-amz-object-lock-retain-until-date".to_string(), true);
m.insert("expires".to_string(), true);
m.insert("x-amz-replication-status".to_string(), true);
m
});
static SSE_HEADERS: LazyLock<HashMap<String, bool>> = LazyLock::new(|| {
let mut m = HashMap::new();
m.insert("x-amz-server-side-encryption".to_string(), true);
m.insert("x-amz-server-side-encryption-aws-kms-key-id".to_string(), true);
m.insert("x-amz-server-side-encryption-context".to_string(), true);
m.insert("x-amz-server-side-encryption-customer-algorithm".to_string(), true);
m.insert("x-amz-server-side-encryption-customer-key".to_string(), true);
m.insert("x-amz-server-side-encryption-customer-key-md5".to_string(), true);
m
});
pub fn is_standard_query_value(qs_key: &str) -> bool {
*SUPPORTED_QUERY_VALUES.get(qs_key).unwrap_or(&false)
}
pub fn is_storageclass_header(header_key: &str) -> bool {
header_key.to_lowercase() == AMZ_STORAGE_CLASS.to_lowercase()
}
pub fn is_standard_header(header_key: &str) -> bool {
*SUPPORTED_HEADERS.get(&header_key.to_lowercase()).unwrap_or(&false)
}
pub fn is_sse_header(header_key: &str) -> bool {
*SSE_HEADERS.get(&header_key.to_lowercase()).unwrap_or(&false)
}
pub fn is_amz_header(header_key: &str) -> bool {
let key = header_key.to_lowercase();
key.starts_with("x-amz-meta-")
|| key.starts_with("x-amz-grant-")
|| key == "x-amz-acl"
|| is_sse_header(header_key)
|| key.starts_with("x-amz-checksum-")
}
pub fn is_rustfs_header(header_key: &str) -> bool {
header_key.to_lowercase().starts_with("x-rustfs-")
}
pub fn is_minio_header(header_key: &str) -> bool {
header_key.to_lowercase().starts_with("x-minio-")
}

View File

@@ -0,0 +1,3 @@
pub mod headers;
pub use headers::*;

View File

@@ -21,6 +21,9 @@ pub mod ip;
#[cfg(feature = "net")]
pub mod net;
#[cfg(feature = "http")]
pub mod http;
#[cfg(feature = "net")]
pub use dns_resolver::*;
#[cfg(feature = "net")]

View File

@@ -354,6 +354,17 @@ pub fn gen_secret_key(length: usize) -> Result<String> {
Ok(key_str)
}
/// Tests whether the string s begins with prefix ignoring case
pub fn strings_has_prefix_fold(s: &str, prefix: &str) -> bool {
if s.len() < prefix.len() {
return false;
}
let s_prefix = &s[..prefix.len()];
// Test match with case first, then case-insensitive
s_prefix == prefix || s_prefix.to_lowercase() == prefix.to_lowercase()
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -23,18 +23,19 @@ use futures::{Stream, StreamExt};
use http::{HeaderMap, Uri};
use hyper::StatusCode;
use matchit::Params;
use percent_encoding::{AsciiSet, CONTROLS, percent_encode};
use rustfs_common::heal_channel::HealOpts;
use rustfs_ecstore::admin_server_info::get_server_info;
use rustfs_ecstore::bucket::metadata_sys::{self, get_replication_config};
use rustfs_ecstore::bucket::bucket_target_sys::BucketTargetSys;
use rustfs_ecstore::bucket::metadata::BUCKET_TARGETS_FILE;
use rustfs_ecstore::bucket::metadata_sys;
use rustfs_ecstore::bucket::target::BucketTarget;
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
use rustfs_ecstore::cmd::bucket_targets::{self, GLOBAL_Bucket_Target_Sys};
use rustfs_ecstore::data_usage::{
aggregate_local_snapshots, compute_bucket_usage, load_data_usage_from_backend, store_data_usage_in_backend,
};
use rustfs_ecstore::error::StorageError;
use rustfs_ecstore::global::get_global_action_cred;
use rustfs_ecstore::global::global_rustfs_port;
use rustfs_ecstore::metrics_realtime::{CollectMetricsOpts, MetricType, collect_local_metrics};
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::pools::{get_total_usable_capacity, get_total_usable_capacity_free};
@@ -57,7 +58,6 @@ use s3s::stream::{ByteStream, DynByteStream};
use s3s::{Body, S3Error, S3Request, S3Response, S3Result, s3_error};
use s3s::{S3ErrorCode, StdError};
use serde::{Deserialize, Serialize};
// use serde_json::to_vec;
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
use std::pin::Pin;
@@ -70,6 +70,7 @@ use tokio::{select, spawn};
use tokio_stream::wrappers::ReceiverStream;
use tracing::debug;
use tracing::{error, info, warn};
use url::Host;
// use url::UrlQuery;
pub mod bucket_meta;
@@ -88,7 +89,6 @@ pub mod trace;
pub mod user;
#[cfg(not(target_os = "windows"))]
use pprof::protos::Message;
use urlencoding::decode;
#[allow(dead_code)]
#[derive(Debug, Serialize, Default)]
@@ -971,128 +971,135 @@ impl Operation for GetReplicationMetricsHandler {
pub struct SetRemoteTargetHandler {}
#[async_trait::async_trait]
impl Operation for SetRemoteTargetHandler {
async fn call(&self, mut _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
//return Ok(S3Response::new((StatusCode::OK, Body::from("OK".to_string()))));
debug!("Processing SetRemoteTargetHandler request");
info!("SetRemoteTargetHandler credentials: {:?}", _req.credentials);
let queries = extract_query_params(&_req.uri);
let Some(_cred) = _req.credentials else {
error!("credentials null");
return Err(s3_error!(InvalidRequest, "get cred failed"));
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let queries = extract_query_params(&req.uri);
let Some(bucket) = queries.get("bucket") else {
return Err(s3_error!(InvalidRequest, "bucket is required"));
};
let _is_owner = true; // Treat as true for now, decide based on request later
let body = _req.input.store_all_unlimited().await.unwrap();
debug!("Request body received, size: {} bytes", body.len());
if let Some(bucket) = queries.get("bucket") {
if bucket.is_empty() {
info!("have bucket: {}", bucket);
return Err(S3Error::with_message(S3ErrorCode::InternalError, "No buckets found".to_string()));
let update = queries.get("update").is_some_and(|v| v == "true");
warn!("set remote target, bucket: {}, update: {}", bucket, update);
if bucket.is_empty() {
return Err(s3_error!(InvalidRequest, "bucket is required"));
}
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(bucket, &rustfs_ecstore::store_api::BucketOptions::default())
.await
.map_err(ApiError::from)?;
let mut input = req.input;
let body = match input.store_all_unlimited().await {
Ok(b) => b,
Err(e) => {
warn!("get body failed, e: {:?}", e);
return Err(s3_error!(InvalidRequest, "get body failed"));
}
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
};
// let binfo:BucketInfo = store
// .get_bucket_info(bucket, &rustfs_ecstore::store_api::BucketOptions::default()).await;
match store
.get_bucket_info(bucket, &rustfs_ecstore::store_api::BucketOptions::default())
.await
{
Ok(info) => {
info!("Bucket Info: {:?}", info);
if !info.versioning {
return Ok(S3Response::new((StatusCode::FORBIDDEN, Body::from("bucket need versioned".to_string()))));
}
}
Err(err) => {
error!("Error: {:?}", err);
return Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::from("empty bucket".to_string()))));
}
}
let mut remote_target: BucketTarget = serde_json::from_slice(&body).map_err(|e| {
tracing::error!("Failed to parse BucketTarget from body: {}", e);
ApiError::other(e)
})?;
tracing::debug!("body is: {}", std::str::from_utf8(&body).unwrap_or("Invalid UTF-8"));
let Ok(target_url) = remote_target.url() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Invalid target url".to_string()));
};
let mut remote_target: BucketTarget = serde_json::from_slice(&body).map_err(|e| {
tracing::error!("Failed to parse BucketTarget from body: {}", e);
ApiError::other(e)
})?;
remote_target.source_bucket = bucket.clone();
let same_target = rustfs_utils::net::is_local_host(
target_url.host().unwrap_or(Host::Domain("localhost")),
target_url.port().unwrap_or(80),
global_rustfs_port(),
)
.unwrap_or_default();
info!("remote target {} And arn is:", remote_target.source_bucket.clone());
if same_target && bucket == &remote_target.target_bucket {
return Err(S3Error::with_message(S3ErrorCode::IncorrectEndpoint, "Same target".to_string()));
}
if let Some(val) = remote_target.arn.clone() {
info!("arn is {}", val);
}
remote_target.source_bucket = bucket.clone();
if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() {
let (arn, exist) = sys.get_remote_arn(bucket, Some(&remote_target), "").await;
info!("exist: {} {}", exist, arn.clone().unwrap_or_default());
if exist && arn.is_some() {
let jsonarn = serde_json::to_string(&arn).expect("failed to serialize");
//Ok(S3Response::new)
return Ok(S3Response::new((StatusCode::OK, Body::from(jsonarn))));
} else {
remote_target.arn = arn;
match sys.set_target(bucket, &remote_target, false, false).await {
Ok(_) => {
{
//todo various persistence work
let targets = sys.list_targets(Some(bucket), None).await;
info!("targets is {}", targets.len());
match serde_json::to_vec(&targets) {
Ok(json) => {
debug!("Serialized targets configuration, size: {} bytes", json.len());
//metadata_sys::GLOBAL_BucketMetadataSys::
//BUCKET_TARGETS_FILE: &str = "bucket-targets.json"
let _ = metadata_sys::update(bucket, "bucket-targets.json", json).await;
// if let Err(err) = metadata_sys::GLOBAL_BucketMetadataSys.get().
// .update(ctx, bucket, "bucketTargetsFile", tgt_bytes)
// .await
// {
// write_error_response(ctx, &err)?;
// return Err(err);
// }
}
Err(e) => {
error!("Serialization failed: {}", e);
}
}
}
let bucket_target_sys = BucketTargetSys::get();
let jsonarn = serde_json::to_string(&remote_target.arn.clone()).expect("failed to serialize");
return Ok(S3Response::new((StatusCode::OK, Body::from(jsonarn))));
}
Err(e) => {
error!("set target error {}", e);
return Ok(S3Response::new((
StatusCode::BAD_REQUEST,
Body::from("remote target not ready".to_string()),
)));
}
}
}
} else {
error!("GLOBAL_BUCKET _TARGET_SYS is not initialized");
return Err(S3Error::with_message(
S3ErrorCode::InternalError,
"GLOBAL_BUCKET_TARGET_SYS is not initialized".to_string(),
));
if !update {
let (arn, exist) = bucket_target_sys.get_remote_arn(bucket, Some(&remote_target), "").await;
remote_target.arn = arn.clone();
if exist && !arn.is_empty() {
let arn_str = serde_json::to_string(&arn).unwrap_or_default();
warn!("return exists, arn: {}", arn_str);
return Ok(S3Response::new((StatusCode::OK, Body::from(arn_str))));
}
}
// return Err(s3_error!(InvalidArgument));
return Ok(S3Response::new((StatusCode::OK, Body::from("Ok".to_string()))));
if remote_target.arn.is_empty() {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "ARN is empty".to_string()));
}
if update {
let Some(mut target) = bucket_target_sys
.get_remote_bucket_target_by_arn(bucket, &remote_target.arn)
.await
else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Target not found".to_string()));
};
target.credentials = remote_target.credentials;
target.endpoint = remote_target.endpoint;
target.secure = remote_target.secure;
target.target_bucket = remote_target.target_bucket;
target.path = remote_target.path;
target.replication_sync = remote_target.replication_sync;
target.bandwidth_limit = remote_target.bandwidth_limit;
target.health_check_duration = remote_target.health_check_duration;
warn!("update target, target: {:?}", target);
remote_target = target;
}
let arn = remote_target.arn.clone();
bucket_target_sys
.set_target(bucket, &remote_target, update)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let targets = bucket_target_sys.list_bucket_targets(bucket).await.map_err(|e| {
error!("Failed to list bucket targets: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, "Failed to list bucket targets".to_string())
})?;
let json_targets = serde_json::to_vec(&targets).map_err(|e| {
error!("Serialization error: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, "Failed to serialize targets".to_string())
})?;
metadata_sys::update(bucket, BUCKET_TARGETS_FILE, json_targets)
.await
.map_err(|e| {
error!("Failed to update bucket targets: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, format!("Failed to update bucket targets: {e}"))
})?;
let arn_str = serde_json::to_string(&arn).unwrap_or_default();
Ok(S3Response::new((StatusCode::OK, Body::from(arn_str))))
}
}
pub struct ListRemoteTargetHandler {}
#[async_trait::async_trait]
impl Operation for ListRemoteTargetHandler {
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("list GetRemoteTargetHandler, params: {:?}", _req.credentials);
let queries = extract_query_params(&_req.uri);
let Some(_cred) = _req.credentials else {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let queries = extract_query_params(&req.uri);
let Some(_cred) = req.credentials else {
error!("credentials null");
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
@@ -1110,65 +1117,48 @@ impl Operation for ListRemoteTargetHandler {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not initialized".to_string()));
};
match store
if let Err(err) = store
.get_bucket_info(bucket, &rustfs_ecstore::store_api::BucketOptions::default())
.await
{
Ok(info) => {
info!("Bucket Info: {:?}", info);
if !info.versioning {
return Ok(S3Response::new((
StatusCode::FORBIDDEN,
Body::from("Bucket needs versioning".to_string()),
)));
}
}
Err(err) => {
error!("Error fetching bucket info: {:?}", err);
return Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::from("Invalid bucket".to_string()))));
}
error!("Error fetching bucket info: {:?}", err);
return Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::from("Invalid bucket".to_string()))));
}
if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() {
let targets = sys.list_targets(Some(bucket), None).await;
info!("target sys len {}", targets.len());
if targets.is_empty() {
return Ok(S3Response::new((
StatusCode::NOT_FOUND,
Body::from("No remote targets found".to_string()),
)));
}
let sys = BucketTargetSys::get();
let targets = sys.list_targets(bucket, "").await;
let json_targets = serde_json::to_string(&targets).map_err(|e| {
error!("Serialization error: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, "Failed to serialize targets".to_string())
})?;
let json_targets = serde_json::to_vec(&targets).map_err(|e| {
error!("Serialization error: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, "Failed to serialize targets".to_string())
})?;
return Ok(S3Response::new((StatusCode::OK, Body::from(json_targets))));
} else {
error!("GLOBAL_BUCKET_TARGET_SYS is not initialized");
return Err(S3Error::with_message(
S3ErrorCode::InternalError,
"GLOBAL_BUCKET_TARGET_SYS is not initialized".to_string(),
));
}
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::OK, Body::from(json_targets)), header));
}
warn!("Bucket parameter is missing in request");
Ok(S3Response::new((
StatusCode::BAD_REQUEST,
Body::from("Bucket parameter is required".to_string()),
)))
//return Err(s3_error!(NotImplemented));
let targets: Vec<BucketTarget> = Vec::new();
let json_targets = serde_json::to_vec(&targets).map_err(|e| {
error!("Serialization error: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, "Failed to serialize targets".to_string())
})?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
return Ok(S3Response::with_headers((StatusCode::OK, Body::from(json_targets)), header));
}
}
const COLON: AsciiSet = CONTROLS.add(b':');
pub struct RemoveRemoteTargetHandler {}
#[async_trait::async_trait]
impl Operation for RemoveRemoteTargetHandler {
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
debug!("remove remote target called");
let queries = extract_query_params(&_req.uri);
let queries = extract_query_params(&req.uri);
let Some(bucket) = queries.get("bucket") else {
return Ok(S3Response::new((
StatusCode::BAD_REQUEST,
@@ -1176,54 +1166,45 @@ impl Operation for RemoveRemoteTargetHandler {
)));
};
let mut need_delete = true;
let Some(arn_str) = queries.get("arn") else {
return Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::from("ARN is required".to_string()))));
};
if let Some(arnstr) = queries.get("arn") {
let _arn = bucket_targets::ARN::parse(arnstr);
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not initialized".to_string()));
};
match get_replication_config(bucket).await {
Ok((conf, _ts)) => {
for ru in conf.rules {
let encoded = percent_encode(ru.destination.bucket.as_bytes(), &COLON);
let encoded_str = encoded.to_string();
if *arnstr == encoded_str {
//error!("target in use");
//return Ok(S3Response::new((StatusCode::OK, Body::from("Ok".to_string()))));
need_delete = false;
break;
}
//info!("bucket: {} and arn str is {} ", encoded_str, arnstr);
}
}
Err(err) => {
error!("get replication config err: {}", err);
return Ok(S3Response::new((StatusCode::NOT_FOUND, Body::from(err.to_string()))));
}
}
if need_delete {
info!("arn {} is in use, cannot delete", arnstr);
let decoded_str = decode(arnstr).unwrap();
error!("need delete target is {}", decoded_str);
bucket_targets::remove_bucket_target(bucket, arnstr).await;
}
if let Err(err) = store
.get_bucket_info(bucket, &rustfs_ecstore::store_api::BucketOptions::default())
.await
{
error!("Error fetching bucket info: {:?}", err);
return Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::from("Invalid bucket".to_string()))));
}
// List bucket targets and return as JSON to client
// match bucket_targets::list_bucket_targets(bucket).await {
// Ok(targets) => {
// let json_targets = serde_json::to_string(&targets).map_err(|e| {
// error!("Serialization error: {}", e);
// S3Error::with_message(S3ErrorCode::InternalError, "Failed to serialize targets".to_string())
// })?;
// return Ok(S3Response::new((StatusCode::OK, Body::from(json_targets))));
// }
// Err(e) => {
// error!("list bucket targets failed: {:?}", e);
// return Err(S3Error::with_message(
// S3ErrorCode::InternalError,
// "list bucket targets failed".to_string(),
// ));
// }
// }
let sys = BucketTargetSys::get();
sys.remove_target(bucket, arn_str).await.map_err(|e| {
error!("Failed to remove target: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, "Failed to remove target".to_string())
})?;
let targets = sys.list_bucket_targets(bucket).await.map_err(|e| {
error!("Failed to list bucket targets: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, "Failed to list bucket targets".to_string())
})?;
let json_targets = serde_json::to_vec(&targets).map_err(|e| {
error!("Serialization error: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, "Failed to serialize targets".to_string())
})?;
metadata_sys::update(bucket, BUCKET_TARGETS_FILE, json_targets)
.await
.map_err(|e| {
error!("Failed to update bucket targets: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, format!("Failed to update bucket targets: {e}"))
})?;
return Ok(S3Response::new((StatusCode::NO_CONTENT, Body::from("".to_string()))));
}

View File

@@ -19,7 +19,7 @@ use rustfs_policy::policy::action::{Action, AdminAction};
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
use serde::Deserialize;
use serde_urlencoded::from_bytes;
use tokio::sync::broadcast;
use tokio_util::sync::CancellationToken;
use tracing::warn;
use crate::{
@@ -232,8 +232,7 @@ impl Operation for StartDecommission {
let pools: Vec<&str> = query.pool.split(",").collect();
let mut pools_indices = Vec::with_capacity(pools.len());
// TODO: ctx
let (_ctx_tx, ctx_rx) = broadcast::channel::<bool>(1);
let ctx = CancellationToken::new();
for pool in pools.iter() {
let idx = {
@@ -264,7 +263,7 @@ impl Operation for StartDecommission {
}
if !pools_indices.is_empty() {
store.decommission(ctx_rx, pools_indices).await.map_err(ApiError::from)?;
store.decommission(ctx.clone(), pools_indices).await.map_err(ApiError::from)?;
}
Ok(S3Response::new((StatusCode::OK, Body::default())))

View File

@@ -39,21 +39,22 @@ use rustfs_ahm::{
scanner::data_scanner::ScannerConfig, shutdown_ahm_services,
};
use rustfs_common::globals::set_global_addr;
use rustfs_config::{DEFAULT_UPDATE_CHECK, ENV_UPDATE_CHECK};
use rustfs_config::DEFAULT_UPDATE_CHECK;
use rustfs_config::ENV_UPDATE_CHECK;
use rustfs_ecstore::bucket::metadata_sys;
use rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys;
use rustfs_ecstore::bucket::replication::{GLOBAL_REPLICATION_POOL, init_background_replication};
use rustfs_ecstore::config as ecconfig;
use rustfs_ecstore::config::GLOBAL_CONFIG_SYS;
use rustfs_ecstore::store_api::BucketOptions;
use rustfs_ecstore::{
StorageAPI,
bucket::metadata_sys,
bucket::metadata_sys::init_bucket_metadata_sys,
cmd::bucket_replication::init_bucket_replication_pool,
config as ecconfig,
config::GLOBAL_CONFIG_SYS,
endpoints::EndpointServerPools,
global::{set_global_rustfs_port, shutdown_background_services},
notification_sys::new_global_notification_sys,
set_global_endpoints,
store::ECStore,
store::init_local_disks,
store_api::BucketOptions,
update_erasure_type,
};
use rustfs_iam::init_iam_sys;
@@ -65,6 +66,7 @@ use s3s::s3_error;
use std::io::{Error, Result};
use std::str::FromStr;
use std::sync::Arc;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, instrument, warn};
#[cfg(all(target_os = "linux", target_env = "gnu"))]
@@ -276,15 +278,21 @@ async fn run(opt: config::Opt) -> Result<()> {
// Initialize the local disk
init_local_disks(endpoint_pools.clone()).await.map_err(Error::other)?;
let ctx = CancellationToken::new();
// init store
let store = ECStore::new(server_addr, endpoint_pools.clone()).await.inspect_err(|err| {
error!("ECStore::new {:?}", err);
})?;
let store = ECStore::new(server_addr, endpoint_pools.clone(), ctx.clone())
.await
.inspect_err(|err| {
error!("ECStore::new {:?}", err);
})?;
ecconfig::init();
// config system configuration
GLOBAL_CONFIG_SYS.init(store.clone()).await?;
// init replication_pool
init_background_replication(store.clone()).await;
// Initialize KMS system if enabled
init_kms_system(&opt).await?;
@@ -307,6 +315,10 @@ async fn run(opt: config::Opt) -> Result<()> {
// Collect bucket names into a vector
let buckets: Vec<String> = buckets_list.into_iter().map(|v| v.name).collect();
if let Some(pool) = GLOBAL_REPLICATION_POOL.get() {
pool.clone().init_resync(ctx.clone(), buckets.clone()).await?;
}
init_bucket_metadata_sys(store.clone(), buckets.clone()).await;
init_iam_sys(store.clone()).await.map_err(Error::other)?;
@@ -358,8 +370,6 @@ async fn run(opt: config::Opt) -> Result<()> {
// print server info
print_server_info();
// initialize bucket replication pool
init_bucket_replication_pool().await;
init_update_check();
@@ -369,11 +379,11 @@ async fn run(opt: config::Opt) -> Result<()> {
match wait_for_shutdown().await {
#[cfg(unix)]
ShutdownSignal::CtrlC | ShutdownSignal::Sigint | ShutdownSignal::Sigterm => {
handle_shutdown(&state_manager, &shutdown_tx).await;
handle_shutdown(&state_manager, &shutdown_tx, ctx.clone()).await;
}
#[cfg(not(unix))]
ShutdownSignal::CtrlC => {
handle_shutdown(&state_manager, &shutdown_tx).await;
handle_shutdown(&state_manager, &shutdown_tx, ctx.clone()).await;
}
}
@@ -393,7 +403,13 @@ fn parse_bool_env_var(var_name: &str, default: bool) -> bool {
}
/// Handles the shutdown process of the server
async fn handle_shutdown(state_manager: &ServiceStateManager, shutdown_tx: &tokio::sync::broadcast::Sender<()>) {
async fn handle_shutdown(
state_manager: &ServiceStateManager,
shutdown_tx: &tokio::sync::broadcast::Sender<()>,
ctx: CancellationToken,
) {
ctx.cancel();
info!(
target: "rustfs::main::handle_shutdown",
"Shutdown signal received in main thread"
@@ -630,13 +646,13 @@ async fn init_kms_system(opt: &config::Opt) -> Result<()> {
service_manager
.configure(kms_config)
.await
.map_err(|e| Error::other(format!("Failed to configure KMS: {}", e)))?;
.map_err(|e| Error::other(format!("Failed to configure KMS: {e}")))?;
// Start the KMS service
service_manager
.start()
.await
.map_err(|e| Error::other(format!("Failed to start KMS: {}", e)))?;
.map_err(|e| Error::other(format!("Failed to start KMS: {e}")))?;
info!("KMS service configured and started successfully");
} else {

View File

@@ -29,6 +29,33 @@ use chrono::Utc;
use datafusion::arrow::csv::WriterBuilder as CsvWriterBuilder;
use datafusion::arrow::json::WriterBuilder as JsonWriterBuilder;
use datafusion::arrow::json::writer::JsonArray;
use http::StatusCode;
use rustfs_ecstore::bucket::metadata_sys::get_replication_config;
use rustfs_ecstore::bucket::object_lock::objectlock_sys::BucketObjectLockSys;
use rustfs_ecstore::bucket::replication::DeletedObjectReplicationInfo;
use rustfs_ecstore::bucket::replication::REPLICATE_INCOMING_DELETE;
use rustfs_ecstore::bucket::replication::ReplicationConfigurationExt;
use rustfs_ecstore::bucket::replication::check_replicate_delete;
use rustfs_ecstore::bucket::replication::get_must_replicate_options;
use rustfs_ecstore::bucket::replication::must_replicate;
use rustfs_ecstore::bucket::replication::schedule_replication;
use rustfs_ecstore::bucket::replication::schedule_replication_delete;
use rustfs_ecstore::bucket::versioning::VersioningApi;
use rustfs_ecstore::disk::error::DiskError;
use rustfs_ecstore::disk::error_reduce::is_all_buckets_not_found;
use rustfs_ecstore::error::is_err_bucket_not_found;
use rustfs_ecstore::error::is_err_object_not_found;
use rustfs_ecstore::error::is_err_version_not_found;
use rustfs_ecstore::set_disk::MAX_PARTS_COUNT;
use rustfs_ecstore::store_api::ObjectInfo;
use rustfs_filemeta::ReplicationStatusType;
use rustfs_filemeta::ReplicationType;
use rustfs_filemeta::VersionPurgeStatusType;
use rustfs_s3select_api::object_store::bytes_stream;
use rustfs_s3select_api::query::Context;
use rustfs_s3select_api::query::Query;
use rustfs_s3select_query::get_global_db;
// use rustfs_ecstore::store_api::RESERVED_METADATA_PREFIX;
use base64::{Engine, engine::general_purpose::STANDARD as BASE64_STANDARD};
use futures::StreamExt;
@@ -49,16 +76,10 @@ use rustfs_ecstore::bucket::tagging::decode_tags;
use rustfs_ecstore::bucket::tagging::encode_tags;
use rustfs_ecstore::bucket::utils::serialize;
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
use rustfs_ecstore::cmd::bucket_replication::ReplicationStatusType;
use rustfs_ecstore::cmd::bucket_replication::ReplicationType;
use rustfs_ecstore::cmd::bucket_replication::get_must_replicate_options;
use rustfs_ecstore::cmd::bucket_replication::must_replicate;
use rustfs_ecstore::cmd::bucket_replication::schedule_replication;
use rustfs_ecstore::compress::MIN_COMPRESSIBLE_SIZE;
use rustfs_ecstore::compress::is_compressible;
use rustfs_ecstore::error::StorageError;
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::set_disk::MAX_PARTS_COUNT;
use rustfs_ecstore::set_disk::{DEFAULT_READ_BUFFER_SIZE, is_valid_storage_class};
use rustfs_ecstore::store_api::BucketOptions;
use rustfs_ecstore::store_api::CompletePart;
@@ -72,8 +93,6 @@ use rustfs_ecstore::store_api::ObjectToDelete;
use rustfs_ecstore::store_api::PutObjReader;
use rustfs_ecstore::store_api::StorageAPI;
use rustfs_filemeta::fileinfo::ObjectPartInfo;
use rustfs_filemeta::headers::RESERVED_METADATA_PREFIX_LOWER;
use rustfs_filemeta::headers::{AMZ_DECODED_CONTENT_LENGTH, AMZ_OBJECT_TAGGING};
use rustfs_kms::DataKey;
use rustfs_kms::service_manager::get_global_encryption_service;
use rustfs_kms::types::{EncryptionMetadata, ObjectEncryptionContext};
@@ -88,13 +107,13 @@ use rustfs_rio::HashReader;
use rustfs_rio::Reader;
use rustfs_rio::WarpReader;
use rustfs_rio::{DecryptReader, EncryptReader, HardLimitReader};
use rustfs_s3select_api::object_store::bytes_stream;
use rustfs_s3select_api::query::Context;
use rustfs_s3select_api::query::Query;
use rustfs_s3select_query::get_global_db;
use rustfs_targets::EventName;
use rustfs_targets::arn::{TargetID, TargetIDError};
use rustfs_utils::CompressionAlgorithm;
use rustfs_utils::http::AMZ_BUCKET_REPLICATION_STATUS;
use rustfs_utils::http::headers::RESERVED_METADATA_PREFIX_LOWER;
use rustfs_utils::http::headers::{AMZ_DECODED_CONTENT_LENGTH, AMZ_OBJECT_TAGGING};
use rustfs_utils::path::is_dir_object;
use rustfs_utils::path::path_join_buf;
use rustfs_zip::CompressionFormat;
use s3s::S3;
@@ -188,7 +207,7 @@ async fn create_managed_encryption_material(
let (data_key, encrypted_data_key) = service
.create_data_key(&kms_key_candidate, &context)
.await
.map_err(|e| ApiError::from(StorageError::other(format!("Failed to create data key: {}", e))))?;
.map_err(|e| ApiError::from(StorageError::other(format!("Failed to create data key: {e}"))))?;
let metadata = EncryptionMetadata {
algorithm: algorithm_str.to_string(),
@@ -227,7 +246,7 @@ async fn decrypt_managed_encryption_key(
let parsed = service
.headers_to_metadata(metadata)
.map_err(|e| ApiError::from(StorageError::other(format!("Failed to parse encryption metadata: {}", e))))?;
.map_err(|e| ApiError::from(StorageError::other(format!("Failed to parse encryption metadata: {e}"))))?;
if parsed.iv.len() != 12 {
return Err(ApiError::from(StorageError::other("Invalid encryption nonce length; expected 12 bytes")));
@@ -237,7 +256,7 @@ async fn decrypt_managed_encryption_key(
let data_key = service
.decrypt_data_key(&parsed.encrypted_data_key, &context)
.await
.map_err(|e| ApiError::from(StorageError::other(format!("Failed to decrypt data key: {}", e))))?;
.map_err(|e| ApiError::from(StorageError::other(format!("Failed to decrypt data key: {e}"))))?;
let key_bytes = data_key.plaintext_key;
let mut nonce = [0u8; 12];
@@ -967,67 +986,120 @@ impl S3 for FS {
/// Delete an object
#[tracing::instrument(level = "debug", skip(self, req))]
async fn delete_object(&self, req: S3Request<DeleteObjectInput>) -> S3Result<S3Response<DeleteObjectOutput>> {
async fn delete_object(&self, mut req: S3Request<DeleteObjectInput>) -> S3Result<S3Response<DeleteObjectOutput>> {
let DeleteObjectInput {
bucket, key, version_id, ..
} = req.input.clone();
let replica = req
.headers
.get(AMZ_BUCKET_REPLICATION_STATUS)
.map(|v| v.to_str().unwrap_or_default() == ReplicationStatusType::Replica.as_str())
.unwrap_or_default();
if replica {
authorize_request(&mut req, Action::S3Action(S3Action::ReplicateDeleteAction)).await?;
}
let metadata = extract_metadata(&req.headers);
let opts: ObjectOptions = del_opts(&bucket, &key, version_id, &req.headers, metadata)
let mut opts: ObjectOptions = del_opts(&bucket, &key, version_id, &req.headers, metadata)
.await
.map_err(ApiError::from)?;
let version_id = opts.version_id.as_ref().map(|v| Uuid::parse_str(v).ok()).unwrap_or_default();
let dobj = ObjectToDelete {
object_name: key.clone(),
version_id,
};
// TODO: check object lock
let objects: Vec<ObjectToDelete> = vec![dobj];
let lock_cfg = BucketObjectLockSys::get(&bucket).await;
if lock_cfg.is_some() && opts.delete_prefix {
return Err(S3Error::with_message(
S3ErrorCode::Custom("force-delete is forbidden on Object Locking enabled buckets".into()),
"force-delete is forbidden on Object Locking enabled buckets",
));
}
// let mut vid = opts.version_id.clone();
if replica {
opts.set_replica_status(ReplicationStatusType::Replica);
// if opts.version_purge_status().is_empty() {
// vid = None;
// }
}
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let (dobjs, _errs) = store.delete_objects(&bucket, objects, opts).await.map_err(ApiError::from)?;
// TODO: let errors;
let obj_info = {
match store.delete_object(&bucket, &key, opts).await {
Ok(obj) => obj,
Err(err) => {
if is_err_bucket_not_found(&err) {
return Err(S3Error::with_message(S3ErrorCode::NoSuchBucket, "Bucket not found".to_string()));
}
let (delete_marker, version_id) = {
if let Some((a, b)) = dobjs
.iter()
.map(|v| {
let delete_marker = { if v.delete_marker { Some(true) } else { None } };
if is_err_object_not_found(&err) || is_err_version_not_found(&err) {
// TODO: send event
let version_id = v.version_id.clone();
return Ok(S3Response::with_status(DeleteObjectOutput::default(), StatusCode::NO_CONTENT));
}
(delete_marker, version_id)
})
.next()
{
(a, b)
} else {
(None, None)
return Err(ApiError::from(err).into());
}
}
};
let del_version_id = version_id.as_ref().map(|v| v.to_string()).unwrap_or_default();
if obj_info.name.is_empty() {
return Ok(S3Response::with_status(DeleteObjectOutput::default(), StatusCode::NO_CONTENT));
}
if obj_info.replication_status == ReplicationStatusType::Replica
|| obj_info.version_purge_status == VersionPurgeStatusType::Pending
{
schedule_replication_delete(DeletedObjectReplicationInfo {
delete_object: rustfs_ecstore::store_api::DeletedObject {
delete_marker: obj_info.delete_marker,
delete_marker_version_id: if obj_info.delete_marker { obj_info.version_id } else { None },
object_name: key.clone(),
version_id: if obj_info.delete_marker { None } else { obj_info.version_id },
delete_marker_mtime: obj_info.mod_time,
replication_state: Some(obj_info.replication_state()),
..Default::default()
},
bucket: bucket.clone(),
event_type: REPLICATE_INCOMING_DELETE.to_string(),
..Default::default()
})
.await;
}
let delete_marker = obj_info.delete_marker;
let version_id = obj_info.version_id;
let output = DeleteObjectOutput {
delete_marker,
version_id,
delete_marker: Some(delete_marker),
version_id: version_id.map(|v| v.to_string()),
..Default::default()
};
let event_name = if delete_marker {
EventName::ObjectRemovedDeleteMarkerCreated
} else {
EventName::ObjectRemovedDelete
};
let event_args = rustfs_notify::event::EventArgs {
event_name: EventName::ObjectRemovedDelete,
event_name,
bucket_name: bucket.clone(),
object: rustfs_ecstore::store_api::ObjectInfo {
name: key,
bucket,
name: key.clone(),
bucket: bucket.clone(),
..Default::default()
},
req_params: rustfs_utils::extract_req_params_header(&req.headers),
resp_elements: rustfs_utils::extract_resp_elements(&S3Response::new(DeleteBucketOutput {})),
version_id: del_version_id,
version_id: version_id.map(|v| v.to_string()).unwrap_or_default(),
host: rustfs_utils::get_request_host(&req.headers),
user_agent: rustfs_utils::get_request_user_agent(&req.headers),
};
@@ -1043,54 +1115,228 @@ impl S3 for FS {
/// Delete multiple objects
#[tracing::instrument(level = "debug", skip(self, req))]
async fn delete_objects(&self, req: S3Request<DeleteObjectsInput>) -> S3Result<S3Response<DeleteObjectsOutput>> {
// info!("delete_objects args {:?}", req.input);
let DeleteObjectsInput { bucket, delete, .. } = req.input;
let objects: Vec<ObjectToDelete> = delete
.objects
.iter()
.map(|v| {
let version_id = v.version_id.as_ref().map(|v| Uuid::parse_str(v).ok()).unwrap_or_default();
ObjectToDelete {
if delete.objects.is_empty() || delete.objects.len() > 1000 {
return Err(S3Error::with_message(
S3ErrorCode::InvalidArgument,
"No objects to delete or too many objects to delete".to_string(),
));
}
let replicate_deletes = has_replication_rules(
&bucket,
&delete
.objects
.iter()
.map(|v| ObjectToDelete {
object_name: v.key.clone(),
version_id,
}
})
.collect();
..Default::default()
})
.collect::<Vec<ObjectToDelete>>(),
)
.await;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let metadata = extract_metadata(&req.headers);
let has_lock_enable = BucketObjectLockSys::get(&bucket).await.is_some();
let opts: ObjectOptions = del_opts(&bucket, "", None, &req.headers, metadata)
.await
.map_err(ApiError::from)?;
let version_cfg = BucketVersioningSys::get(&bucket).await.unwrap_or_default();
let (dobjs, errs) = store.delete_objects(&bucket, objects, opts).await.map_err(ApiError::from)?;
#[derive(Default, Clone)]
struct DeleteResult {
delete_object: Option<rustfs_ecstore::store_api::DeletedObject>,
error: Option<Error>,
}
let deleted = dobjs
let mut delete_results = vec![DeleteResult::default(); delete.objects.len()];
let mut object_to_delete = Vec::new();
let mut object_to_delete_index = HashMap::new();
for (idx, object) in delete.objects.iter().enumerate() {
// TODO: check auth
if let Some(version_id) = object.version_id.clone() {
let _vid = match Uuid::parse_str(&version_id) {
Ok(v) => v,
Err(err) => {
delete_results[idx].error = Some(Error {
code: Some("NoSuchVersion".to_string()),
key: Some(object.key.clone()),
message: Some(err.to_string()),
version_id: Some(version_id),
});
continue;
}
};
};
let mut object = ObjectToDelete {
object_name: object.key.clone(),
version_id: object.version_id.clone().map(|v| Uuid::parse_str(&v).unwrap()),
..Default::default()
};
let opts = ObjectOptions {
version_id: object.version_id.map(|v| v.to_string()),
versioned: version_cfg.prefix_enabled(&object.object_name),
version_suspended: version_cfg.suspended(),
..Default::default()
};
let mut goi = ObjectInfo::default();
let mut gerr = None;
if replicate_deletes || object.version_id.is_some() && has_lock_enable {
(goi, gerr) = match store.get_object_info(&bucket, &object.object_name, &opts).await {
Ok(res) => (res, None),
Err(e) => (ObjectInfo::default(), Some(e.to_string())),
};
}
if is_dir_object(&object.object_name) && object.version_id.is_none() {
object.version_id = Some(Uuid::nil());
}
if replicate_deletes {
let dsc = check_replicate_delete(
&bucket,
&ObjectToDelete {
object_name: object.object_name.clone(),
version_id: object.version_id,
..Default::default()
},
&goi,
&opts,
gerr.clone(),
)
.await;
if dsc.replicate_any() {
if object.version_id.is_some() {
object.version_purge_status = Some(VersionPurgeStatusType::Pending);
object.version_purge_statuses = dsc.pending_status();
} else {
object.delete_marker_replication_status = dsc.pending_status();
}
object.replicate_decision_str = Some(dsc.to_string());
}
}
// TODO: Retention
object_to_delete_index.insert(object.object_name.clone(), idx);
object_to_delete.push(object);
}
let (mut dobjs, errs) = {
store
.delete_objects(
&bucket,
object_to_delete.clone(),
ObjectOptions {
version_suspended: version_cfg.suspended(),
..Default::default()
},
)
.await
};
if is_all_buckets_not_found(
&errs
.iter()
.map(|v| v.as_ref().map(|v| v.clone().into()))
.collect::<Vec<Option<DiskError>>>() as &[Option<DiskError>],
) {
return Err(S3Error::with_message(S3ErrorCode::NoSuchBucket, "Bucket not found".to_string()));
}
for (i, err) in errs.into_iter().enumerate() {
let obj = dobjs[i].clone();
// let replication_state = obj.replication_state.clone().unwrap_or_default();
// let obj_to_del = ObjectToDelete {
// object_name: decode_dir_object(dobjs[i].object_name.as_str()),
// version_id: obj.version_id,
// delete_marker_replication_status: replication_state.replication_status_internal.clone(),
// version_purge_status: Some(obj.version_purge_status()),
// version_purge_statuses: replication_state.version_purge_status_internal.clone(),
// replicate_decision_str: Some(replication_state.replicate_decision_str.clone()),
// };
let Some(didx) = object_to_delete_index.get(&obj.object_name) else {
continue;
};
if err.is_none()
|| err
.clone()
.is_some_and(|v| is_err_object_not_found(&v) || is_err_version_not_found(&v))
{
if replicate_deletes {
dobjs[i].replication_state = Some(object_to_delete[i].replication_state());
}
delete_results[*didx].delete_object = Some(dobjs[i].clone());
continue;
}
if let Some(err) = err {
delete_results[*didx].error = Some(Error {
code: Some(err.to_string()),
key: Some(object_to_delete[i].object_name.clone()),
message: Some(err.to_string()),
version_id: object_to_delete[i].version_id.map(|v| v.to_string()),
});
}
}
let deleted = delete_results
.iter()
.filter_map(|v| v.delete_object.clone())
.map(|v| DeletedObject {
delete_marker: { if v.delete_marker { Some(true) } else { None } },
delete_marker_version_id: v.delete_marker_version_id.clone(),
delete_marker_version_id: v.delete_marker_version_id.map(|v| v.to_string()),
key: Some(v.object_name.clone()),
version_id: v.version_id.clone(),
version_id: if is_dir_object(v.object_name.as_str()) && v.version_id == Some(Uuid::nil()) {
None
} else {
v.version_id.map(|v| v.to_string())
},
})
.collect();
// TODO: let errors;
for err in errs.iter().flatten() {
warn!("delete_objects err {:?}", err);
}
let errors = delete_results.iter().filter_map(|v| v.error.clone()).collect::<Vec<Error>>();
let output = DeleteObjectsOutput {
deleted: Some(deleted),
// errors,
errors: Some(errors),
..Default::default()
};
for dobjs in delete_results.iter() {
if let Some(dobj) = &dobjs.delete_object {
if replicate_deletes
&& (dobj.delete_marker_replication_status() == ReplicationStatusType::Pending
|| dobj.version_purge_status() == VersionPurgeStatusType::Pending)
{
let mut dobj = dobj.clone();
if is_dir_object(dobj.object_name.as_str()) && dobj.version_id.is_none() {
dobj.version_id = Some(Uuid::nil());
}
let deleted_object = DeletedObjectReplicationInfo {
delete_object: dobj,
bucket: bucket.clone(),
event_type: REPLICATE_INCOMING_DELETE.to_string(),
..Default::default()
};
schedule_replication_delete(deleted_object).await;
}
}
}
// Asynchronous call will not block the response of the current request
tokio::spawn(async move {
for dobj in dobjs {
@@ -1330,7 +1576,7 @@ impl S3 for FS {
// Decode the base64 key
let key_bytes = BASE64_STANDARD
.decode(sse_key)
.map_err(|e| ApiError::from(StorageError::other(format!("Invalid SSE-C key: {}", e))))?;
.map_err(|e| ApiError::from(StorageError::other(format!("Invalid SSE-C key: {e}"))))?;
// Verify key length (should be 32 bytes for AES-256)
if key_bytes.len() != 32 {
@@ -1349,7 +1595,7 @@ impl S3 for FS {
// Generate the same deterministic nonce from object key
let mut nonce = [0u8; 12];
let nonce_source = format!("{}-{}", bucket, key);
let nonce_source = format!("{bucket}-{key}");
let nonce_hash = md5::compute(nonce_source.as_bytes());
nonce.copy_from_slice(&nonce_hash.0[..12]);
@@ -1999,7 +2245,7 @@ impl S3 for FS {
// Decode the base64 key
let key_bytes = BASE64_STANDARD
.decode(sse_key)
.map_err(|e| ApiError::from(StorageError::other(format!("Invalid SSE-C key: {}", e))))?;
.map_err(|e| ApiError::from(StorageError::other(format!("Invalid SSE-C key: {e}"))))?;
// Verify key length (should be 32 bytes for AES-256)
if key_bytes.len() != 32 {
@@ -2025,7 +2271,7 @@ impl S3 for FS {
// Generate a deterministic nonce from object key for consistency
let mut nonce = [0u8; 12];
let nonce_source = format!("{}-{}", bucket, key);
let nonce_source = format!("{bucket}-{key}");
let nonce_hash = md5::compute(nonce_source.as_bytes());
nonce.copy_from_slice(&nonce_hash.0[..12]);
@@ -2070,17 +2316,17 @@ impl S3 for FS {
.map_err(ApiError::from)?;
let repoptions =
get_must_replicate_options(&mt2, "", ReplicationStatusType::Unknown, ReplicationType::ObjectReplicationType, &opts);
get_must_replicate_options(&mt2, "".to_string(), ReplicationStatusType::Empty, ReplicationType::Object, opts.clone());
let dsc = must_replicate(&bucket, &key, repoptions).await;
let dsc = must_replicate(&bucket, &key, &repoptions).await;
// warn!("dsc {}", &dsc.replicate_any().clone());
if dsc.replicate_any() {
let k = format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp");
let now: DateTime<Utc> = Utc::now();
let formatted_time = now.to_rfc3339();
opts.user_defined.insert(k, formatted_time);
let k = format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status");
opts.user_defined.insert(k, dsc.pending_status());
opts.user_defined.insert(k, dsc.pending_status().unwrap_or_default());
}
let obj_info = store
@@ -2091,13 +2337,12 @@ impl S3 for FS {
let e_tag = obj_info.etag.clone();
let repoptions =
get_must_replicate_options(&mt2, "", ReplicationStatusType::Unknown, ReplicationType::ObjectReplicationType, &opts);
get_must_replicate_options(&mt2, "".to_string(), ReplicationStatusType::Empty, ReplicationType::Object, opts);
let dsc = must_replicate(&bucket, &key, &repoptions).await;
let dsc = must_replicate(&bucket, &key, repoptions).await;
if dsc.replicate_any() {
let objectlayer = new_object_layer_fn();
schedule_replication(obj_info, objectlayer.unwrap(), dsc, 1).await;
schedule_replication(obj_info, store, dsc, ReplicationType::Object).await;
}
let output = PutObjectOutput {
@@ -2853,6 +3098,7 @@ impl S3 for FS {
);
let obj_info = store
.clone()
.complete_multipart_upload(&bucket, &key, &upload_id, uploaded_parts, opts)
.await
.map_err(ApiError::from)?;
@@ -2879,14 +3125,13 @@ impl S3 for FS {
let mt2 = HashMap::new();
let repoptions =
get_must_replicate_options(&mt2, "", ReplicationStatusType::Unknown, ReplicationType::ObjectReplicationType, opts);
get_must_replicate_options(&mt2, "".to_string(), ReplicationStatusType::Empty, ReplicationType::Object, opts.clone());
let dsc = must_replicate(&bucket, &key, &repoptions).await;
let dsc = must_replicate(&bucket, &key, repoptions).await;
if dsc.replicate_any() {
warn!("need multipart replication");
let objectlayer = new_object_layer_fn();
schedule_replication(obj_info, objectlayer.unwrap(), dsc, 1).await;
schedule_replication(obj_info, store, dsc, ReplicationType::Object).await;
}
tracing::info!(
"TDD: About to return S3Response with output: SSE={:?}, KMS={:?}",
@@ -4371,6 +4616,22 @@ pub(crate) fn process_lambda_configurations<F>(
}
}
pub(crate) async fn has_replication_rules(bucket: &str, objects: &[ObjectToDelete]) -> bool {
let (cfg, _created) = match get_replication_config(bucket).await {
Ok(replication_config) => replication_config,
Err(_err) => {
return false;
}
};
for object in objects {
if cfg.has_active_rules(&object.object_name, true) {
return true;
}
}
false
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -18,10 +18,13 @@ use rustfs_ecstore::error::Result;
use rustfs_ecstore::error::StorageError;
use rustfs_ecstore::store_api::{HTTPPreconditions, HTTPRangeSpec, ObjectOptions};
use rustfs_utils::http::RUSTFS_BUCKET_REPLICATION_DELETE_MARKER;
use rustfs_utils::http::RUSTFS_BUCKET_SOURCE_VERSION_ID;
use rustfs_utils::path::is_dir_object;
use s3s::{S3Result, s3_error};
use std::collections::HashMap;
use std::sync::LazyLock;
use tracing::error;
use uuid::Uuid;
/// Creates options for deleting an object in a bucket.
@@ -35,22 +38,32 @@ pub async fn del_opts(
let versioned = BucketVersioningSys::prefix_enabled(bucket, object).await;
let version_suspended = BucketVersioningSys::suspended(bucket).await;
// TODO: delete_prefix
let vid = if vid.is_none() {
headers
.get(RUSTFS_BUCKET_SOURCE_VERSION_ID)
.map(|v| v.to_str().unwrap().to_owned())
} else {
vid
};
let vid = vid.map(|v| v.as_str().trim().to_owned());
if let Some(ref id) = vid {
if let Err(_err) = Uuid::parse_str(id.as_str()) {
if let Err(err) = Uuid::parse_str(id.as_str()) {
error!("del_opts: invalid version id: {} error: {}", id, err);
return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone()));
}
if !versioned {
error!("del_opts: object not versioned: {}", object);
return Err(StorageError::InvalidArgument(bucket.to_owned(), object.to_owned(), id.clone()));
}
}
let mut opts = put_opts_from_headers(headers, metadata.clone())
.map_err(|err| StorageError::InvalidArgument(bucket.to_owned(), object.to_owned(), err.to_string()))?;
let mut opts = put_opts_from_headers(headers, metadata.clone()).map_err(|err| {
error!("del_opts: invalid argument: {} error: {}", object, err);
StorageError::InvalidArgument(bucket.to_owned(), object.to_owned(), err.to_string())
})?;
opts.version_id = {
if is_dir_object(object) && vid.is_none() {
@@ -62,6 +75,11 @@ pub async fn del_opts(
opts.version_suspended = version_suspended;
opts.versioned = versioned;
opts.delete_marker = headers
.get(RUSTFS_BUCKET_REPLICATION_DELETE_MARKER)
.map(|v| v.to_str().unwrap() == "true")
.unwrap_or_default();
Ok(opts)
}
@@ -144,6 +162,14 @@ pub async fn put_opts(
let versioned = BucketVersioningSys::prefix_enabled(bucket, object).await;
let version_suspended = BucketVersioningSys::prefix_suspended(bucket, object).await;
let vid = if vid.is_none() {
headers
.get(RUSTFS_BUCKET_SOURCE_VERSION_ID)
.map(|v| v.to_str().unwrap().to_owned())
} else {
vid
};
let vid = vid.map(|v| v.as_str().trim().to_owned());
if let Some(ref id) = vid {