mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
Compare commits
20 Commits
1.0.0-alph
...
feat/net-m
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7df97fb266 | ||
|
|
8f310cd4a8 | ||
|
|
e99da872ac | ||
|
|
8ed01a3e06 | ||
|
|
9e1739ed8d | ||
|
|
7abbfc9c2c | ||
|
|
639bf0c233 | ||
|
|
ad99019749 | ||
|
|
aac9b1edb7 | ||
|
|
5689311cff | ||
|
|
007d9c0b21 | ||
|
|
626c7ed34a | ||
|
|
0e680eae31 | ||
|
|
7622b37f7b | ||
|
|
f1dd3a982e | ||
|
|
4f73760a45 | ||
|
|
be66cf8bd3 | ||
|
|
23b40d398f | ||
|
|
90f21a9102 | ||
|
|
9b029d18b2 |
1267
Cargo.lock
generated
1267
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
65
Cargo.toml
65
Cargo.toml
@@ -99,10 +99,12 @@ async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
async-compression = { version = "0.4.19" }
|
||||
atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.6" }
|
||||
aws-sdk-s3 = { version = "1.106.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
axum = "0.8.4"
|
||||
axum-extra = "0.10.1"
|
||||
aws-config = { version = "1.8.8" }
|
||||
aws-credential-types = { version = "1.2.8" }
|
||||
aws-smithy-types = { version = "1.3.3" }
|
||||
aws-sdk-s3 = { version = "1.108.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
axum = "0.8.6"
|
||||
axum-extra = "0.10.3"
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
base64-simd = "0.8.0"
|
||||
base64 = "0.22.1"
|
||||
@@ -111,6 +113,7 @@ bytes = { version = "1.10.1", features = ["serde"] }
|
||||
bytesize = "2.1.0"
|
||||
byteorder = "1.5.0"
|
||||
cfg-if = "1.0.3"
|
||||
convert_case = "0.8.0"
|
||||
crc-fast = "1.3.0"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
@@ -119,18 +122,18 @@ const-str = { version = "0.7.0", features = ["std", "proc"] }
|
||||
crc32fast = "1.5.0"
|
||||
criterion = { version = "0.7", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
dashmap = "6.1.0"
|
||||
datafusion = "50.0.0"
|
||||
datafusion = "50.1.0"
|
||||
derive_builder = "0.20.2"
|
||||
enumset = "1.1.10"
|
||||
flatbuffers = "25.9.23"
|
||||
flate2 = "1.1.2"
|
||||
flexi_logger = { version = "0.31.4", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv"] }
|
||||
flate2 = "1.1.4"
|
||||
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv"] }
|
||||
form_urlencoded = "1.2.2"
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
glob = "0.3.3"
|
||||
hashbrown = { version = "0.16.0", features = ["serde", "rayon"] }
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hickory-resolver = { version = "0.25.2", features = ["tls-ring"] }
|
||||
@@ -146,8 +149,9 @@ http = "1.3.1"
|
||||
http-body = "1.0.1"
|
||||
humantime = "2.3.0"
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
jsonwebtoken = "9.3.1"
|
||||
jsonwebtoken = { version = "10.0.0", features = ["rust_crypto"] }
|
||||
lazy_static = "1.5.0"
|
||||
libc = "0.2.177"
|
||||
libsystemd = { version = "0.7.2" }
|
||||
local-ip-address = "0.6.5"
|
||||
lz4 = "1.28.1"
|
||||
@@ -158,39 +162,39 @@ mime_guess = "2.0.5"
|
||||
moka = { version = "0.12.11", features = ["future"] }
|
||||
netif = "0.1.6"
|
||||
nix = { version = "0.30.1", features = ["fs"] }
|
||||
nu-ansi-term = "0.50.1"
|
||||
nu-ansi-term = "0.50.3"
|
||||
num_cpus = { version = "1.17.0" }
|
||||
nvml-wrapper = "0.11.0"
|
||||
object_store = "0.12.4"
|
||||
once_cell = "1.21.3"
|
||||
opentelemetry = { version = "0.30.0" }
|
||||
opentelemetry-appender-tracing = { version = "0.30.1", features = [
|
||||
opentelemetry = { version = "0.31.0" }
|
||||
opentelemetry-appender-tracing = { version = "0.31.1", features = [
|
||||
"experimental_use_tracing_span_context",
|
||||
"experimental_metadata_attributes",
|
||||
"spec_unstable_logs_enabled"
|
||||
] }
|
||||
opentelemetry_sdk = { version = "0.30.0" }
|
||||
opentelemetry-stdout = { version = "0.30.0" }
|
||||
opentelemetry-otlp = { version = "0.30.0", default-features = false, features = [
|
||||
opentelemetry_sdk = { version = "0.31.0" }
|
||||
opentelemetry-stdout = { version = "0.31.0" }
|
||||
opentelemetry-otlp = { version = "0.31.0", default-features = false, features = [
|
||||
"grpc-tonic", "gzip-tonic", "trace", "metrics", "logs", "internal-logs"
|
||||
] }
|
||||
opentelemetry-semantic-conventions = { version = "0.30.0", features = [
|
||||
opentelemetry-semantic-conventions = { version = "0.31.0", features = [
|
||||
"semconv_experimental",
|
||||
] }
|
||||
parking_lot = "0.12.4"
|
||||
parking_lot = "0.12.5"
|
||||
path-absolutize = "3.1.1"
|
||||
path-clean = "1.0.1"
|
||||
blake3 = { version = "1.8.2" }
|
||||
pbkdf2 = "0.12.2"
|
||||
percent-encoding = "2.3.2"
|
||||
pin-project-lite = "0.2.16"
|
||||
prost = "0.14.1"
|
||||
pretty_assertions = "1.4.1"
|
||||
quick-xml = "0.38.3"
|
||||
rand = "0.9.2"
|
||||
rayon = "1.11.0"
|
||||
rdkafka = { version = "0.38.0", features = ["tokio"] }
|
||||
reed-solomon-simd = { version = "3.0.1" }
|
||||
regex = { version = "1.11.2" }
|
||||
regex = { version = "1.12.1" }
|
||||
reqwest = { version = "0.12.23", default-features = false, features = [
|
||||
"rustls-tls-webpki-roots",
|
||||
"charset",
|
||||
@@ -200,38 +204,39 @@ reqwest = { version = "0.12.23", default-features = false, features = [
|
||||
"json",
|
||||
"blocking",
|
||||
] }
|
||||
rmcp = { version = "0.6.4" }
|
||||
rmcp = { version = "0.8.1" }
|
||||
rmp = "0.8.14"
|
||||
rmp-serde = "1.3.0"
|
||||
rsa = "0.9.8"
|
||||
rumqttc = { version = "0.25.0" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rustfs-rsc = "2025.506.1"
|
||||
rustc-hash = { version = "2.1.1" }
|
||||
rustls = { version = "0.23.32", features = ["ring", "logging", "std", "tls12"], default-features = false }
|
||||
rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
s3s = { version = "0.12.0-minio-preview.3" }
|
||||
s3s = { version = "0.12.0-rc.2", features = ["minio"] }
|
||||
schemars = "1.0.4"
|
||||
serde = { version = "1.0.226", features = ["derive"] }
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.145", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
serial_test = "3.2.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
shadow-rs = { version = "1.3.0", default-features = false }
|
||||
shadow-rs = { version = "1.4.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
smartstring = "1.0.1"
|
||||
snafu = "0.8.9"
|
||||
snap = "1.1.1"
|
||||
socket2 = "0.6.0"
|
||||
starshard = { version = "0.5.0", features = ["rayon", "async", "serde"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysinfo = "0.37.0"
|
||||
sysinfo = "0.37.1"
|
||||
sysctl = "0.7.1"
|
||||
tempfile = "3.23.0"
|
||||
temp-env = "0.3.6"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.16"
|
||||
thiserror = "2.0.17"
|
||||
time = { version = "0.3.44", features = [
|
||||
"std",
|
||||
"parsing",
|
||||
@@ -240,7 +245,7 @@ time = { version = "0.3.44", features = [
|
||||
"serde",
|
||||
] }
|
||||
tokio = { version = "1.47.1", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.3", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
tokio-test = "0.4.4"
|
||||
@@ -253,7 +258,7 @@ tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
tracing = "0.1.41"
|
||||
tracing-core = "0.1.34"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
tracing-subscriber = { version = "0.3.20", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.7"
|
||||
@@ -266,10 +271,10 @@ uuid = { version = "1.18.1", features = [
|
||||
vaultrs = { version = "0.7.4" }
|
||||
walkdir = "2.5.0"
|
||||
wildmatch = { version = "2.5.0", features = ["serde"] }
|
||||
zeroize = { version = "1.8.1", features = ["derive"] }
|
||||
zeroize = { version = "1.8.2", features = ["derive"] }
|
||||
winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "5.1.1"
|
||||
zip = "6.0.0"
|
||||
zstd = "0.13.3"
|
||||
|
||||
|
||||
|
||||
10
README.md
10
README.md
@@ -172,8 +172,18 @@ RustFS is a community-driven project, and we appreciate all contributions. Check
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
|
||||
</a>
|
||||
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending top charts.
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
**RustFS** is a trademark of RustFS, Inc. All other trademarks are the property of their respective owners.
|
||||
|
||||
|
||||
@@ -122,6 +122,14 @@ RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看[贡
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
|
||||
</a >
|
||||
|
||||
|
||||
## Github 全球推荐榜
|
||||
|
||||
🚀 RustFS 受到了全世界开源爱好者和企业用户的喜欢,多次登顶Github Trending全球榜。
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
|
||||
## 许可证
|
||||
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
@@ -29,6 +29,7 @@ use rustfs_ecstore::{
|
||||
data_usage::{aggregate_local_snapshots, store_data_usage_in_backend},
|
||||
};
|
||||
use rustfs_filemeta::{MetacacheReader, VersionType};
|
||||
use s3s::dto::{BucketVersioningStatus, VersioningConfiguration};
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, error, info, warn};
|
||||
@@ -50,7 +51,6 @@ use rustfs_common::data_usage::{DataUsageInfo, SizeSummary};
|
||||
use rustfs_common::metrics::{Metric, Metrics, globalMetrics};
|
||||
use rustfs_ecstore::bucket::versioning::VersioningApi;
|
||||
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
|
||||
use rustfs_ecstore::cmd::bucket_targets::VersioningConfig;
|
||||
use rustfs_ecstore::disk::RUSTFS_META_BUCKET;
|
||||
use uuid;
|
||||
|
||||
@@ -300,8 +300,13 @@ impl Scanner {
|
||||
.map(|(c, _)| Arc::new(c));
|
||||
|
||||
// Get bucket versioning configuration
|
||||
let versioning_config = Arc::new(VersioningConfig {
|
||||
enabled: bucket_info.versioning,
|
||||
let versioning_config = Arc::new(VersioningConfiguration {
|
||||
status: if bucket_info.versioning {
|
||||
Some(BucketVersioningStatus::from_static(BucketVersioningStatus::ENABLED))
|
||||
} else {
|
||||
None
|
||||
},
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let records = match bucket_objects_map.get(bucket_name) {
|
||||
@@ -1825,7 +1830,16 @@ impl Scanner {
|
||||
}
|
||||
};
|
||||
let bucket_info = ecstore.get_bucket_info(bucket, &Default::default()).await.ok();
|
||||
let versioning_config = bucket_info.map(|bi| Arc::new(VersioningConfig { enabled: bi.versioning }));
|
||||
let versioning_config = bucket_info.map(|bi| {
|
||||
Arc::new(VersioningConfiguration {
|
||||
status: if bi.versioning {
|
||||
Some(BucketVersioningStatus::from_static(BucketVersioningStatus::ENABLED))
|
||||
} else {
|
||||
None
|
||||
},
|
||||
..Default::default()
|
||||
})
|
||||
});
|
||||
let lifecycle_config = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket)
|
||||
.await
|
||||
.ok()
|
||||
@@ -2651,7 +2665,7 @@ mod tests {
|
||||
// create ECStore with dynamic port
|
||||
let port = port.unwrap_or(9000);
|
||||
let server_addr: SocketAddr = format!("127.0.0.1:{port}").parse().expect("Invalid server address format");
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools)
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
|
||||
.await
|
||||
.expect("Failed to create ECStore");
|
||||
|
||||
|
||||
@@ -28,10 +28,9 @@ use rustfs_ecstore::bucket::metadata_sys::get_object_lock_config;
|
||||
use rustfs_ecstore::bucket::object_lock::objectlock_sys::{BucketObjectLockSys, enforce_retention_for_deletion};
|
||||
use rustfs_ecstore::bucket::versioning::VersioningApi;
|
||||
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
|
||||
use rustfs_ecstore::cmd::bucket_targets::VersioningConfig;
|
||||
use rustfs_ecstore::store_api::{ObjectInfo, ObjectToDelete};
|
||||
use rustfs_filemeta::FileInfo;
|
||||
use s3s::dto::BucketLifecycleConfiguration as LifecycleConfig;
|
||||
use s3s::dto::{BucketLifecycleConfiguration as LifecycleConfig, VersioningConfiguration};
|
||||
use time::OffsetDateTime;
|
||||
use tracing::info;
|
||||
|
||||
@@ -43,11 +42,15 @@ pub struct ScannerItem {
|
||||
pub bucket: String,
|
||||
pub object_name: String,
|
||||
pub lifecycle: Option<Arc<LifecycleConfig>>,
|
||||
pub versioning: Option<Arc<VersioningConfig>>,
|
||||
pub versioning: Option<Arc<VersioningConfiguration>>,
|
||||
}
|
||||
|
||||
impl ScannerItem {
|
||||
pub fn new(bucket: String, lifecycle: Option<Arc<LifecycleConfig>>, versioning: Option<Arc<VersioningConfig>>) -> Self {
|
||||
pub fn new(
|
||||
bucket: String,
|
||||
lifecycle: Option<Arc<LifecycleConfig>>,
|
||||
versioning: Option<Arc<VersioningConfiguration>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
bucket,
|
||||
object_name: "".to_string(),
|
||||
@@ -145,6 +148,7 @@ impl ScannerItem {
|
||||
to_del.push(ObjectToDelete {
|
||||
object_name: obj.name,
|
||||
version_id: obj.version_id,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
|
||||
@@ -233,7 +237,7 @@ impl ScannerItem {
|
||||
IlmAction::DeleteAction => {
|
||||
info!("apply_lifecycle: Object {} marked for deletion", oi.name);
|
||||
if let Some(vcfg) = &self.versioning {
|
||||
if !vcfg.is_enabled() {
|
||||
if !vcfg.enabled() {
|
||||
info!("apply_lifecycle: Versioning disabled, setting new_size=0");
|
||||
new_size = 0;
|
||||
}
|
||||
|
||||
@@ -444,7 +444,7 @@ mod tests {
|
||||
let delete_marker = MetaDeleteMarker {
|
||||
version_id: Some(Uuid::new_v4()),
|
||||
mod_time: Some(OffsetDateTime::now_utc()),
|
||||
meta_sys: None,
|
||||
meta_sys: HashMap::new(),
|
||||
};
|
||||
|
||||
let version = FileMetaVersion {
|
||||
|
||||
@@ -18,6 +18,7 @@ use rustfs_ecstore::disk::endpoint::Endpoint;
|
||||
use rustfs_ecstore::endpoints::{EndpointServerPools, Endpoints, PoolEndpoints};
|
||||
use std::net::SocketAddr;
|
||||
use tempfile::TempDir;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn test_endpoint_index_settings() -> anyhow::Result<()> {
|
||||
@@ -73,7 +74,7 @@ async fn test_endpoint_index_settings() -> anyhow::Result<()> {
|
||||
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await?;
|
||||
|
||||
let server_addr: SocketAddr = "127.0.0.1:0".parse().unwrap();
|
||||
let ecstore = rustfs_ecstore::store::ECStore::new(server_addr, endpoint_pools).await?;
|
||||
let ecstore = rustfs_ecstore::store::ECStore::new(server_addr, endpoint_pools, CancellationToken::new()).await?;
|
||||
|
||||
println!("ECStore initialized successfully with {} pools", ecstore.pools.len());
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ use std::sync::Once;
|
||||
use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::Arc, time::Duration};
|
||||
use tokio::fs;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::info;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
@@ -98,7 +99,9 @@ async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage
|
||||
// create ECStore with dynamic port 0 (let OS assign) or fixed 9001 if free
|
||||
let port = 9001; // for simplicity
|
||||
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools).await.unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// init bucket metadata system
|
||||
let buckets_list = ecstore
|
||||
|
||||
@@ -29,6 +29,7 @@ use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::Arc, time::Duration};
|
||||
use tokio::fs;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use tracing::{debug, info};
|
||||
|
||||
@@ -99,7 +100,9 @@ async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
|
||||
// create ECStore with dynamic port 0 (let OS assign) or fixed 9002 if free
|
||||
let port = 9002; // for simplicity
|
||||
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools).await.unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// init bucket metadata system
|
||||
let buckets_list = ecstore
|
||||
@@ -124,7 +127,7 @@ async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
|
||||
}
|
||||
|
||||
/// Test helper: Create a test bucket
|
||||
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
async fn _create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(bucket_name, &Default::default())
|
||||
.await
|
||||
@@ -222,7 +225,7 @@ async fn set_bucket_lifecycle_transition(bucket_name: &str) -> Result<(), Box<dy
|
||||
</Rule>
|
||||
<Rule>
|
||||
<ID>test-rule2</ID>
|
||||
<Status>Desabled</Status>
|
||||
<Status>Disabled</Status>
|
||||
<Filter>
|
||||
<Prefix>test/</Prefix>
|
||||
</Filter>
|
||||
@@ -299,6 +302,22 @@ async fn object_is_transitioned(ecstore: &Arc<ECStore>, bucket: &str, object: &s
|
||||
}
|
||||
}
|
||||
|
||||
async fn wait_for_object_absence(ecstore: &Arc<ECStore>, bucket: &str, object: &str, timeout: Duration) -> bool {
|
||||
let deadline = tokio::time::Instant::now() + timeout;
|
||||
|
||||
loop {
|
||||
if !object_exists(ecstore, bucket, object).await {
|
||||
return true;
|
||||
}
|
||||
|
||||
if tokio::time::Instant::now() >= deadline {
|
||||
return false;
|
||||
}
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
}
|
||||
}
|
||||
|
||||
mod serial_tests {
|
||||
use super::*;
|
||||
|
||||
@@ -308,25 +327,26 @@ mod serial_tests {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let bucket_name = "test-lifecycle-expiry-basic-bucket";
|
||||
let suffix = uuid::Uuid::new_v4().simple().to_string();
|
||||
let bucket_name = format!("test-lc-expiry-basic-{}", &suffix[..8]);
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
assert!(object_exists(&ecstore, bucket_name, object_name).await);
|
||||
assert!(object_exists(&ecstore, bucket_name.as_str(), object_name).await);
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
|
||||
set_bucket_lifecycle(bucket_name)
|
||||
set_bucket_lifecycle(bucket_name.as_str())
|
||||
.await
|
||||
.expect("Failed to set lifecycle configuration");
|
||||
println!("✅ Lifecycle configuration set for bucket: {bucket_name}");
|
||||
println!("✅ Lifecycle configuration set for bucket: {}", bucket_name);
|
||||
|
||||
// Verify lifecycle configuration was set
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name).await {
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name.as_str()).await {
|
||||
Ok(bucket_meta) => {
|
||||
assert!(bucket_meta.lifecycle_config.is_some());
|
||||
println!("✅ Bucket metadata retrieved successfully");
|
||||
@@ -357,20 +377,60 @@ mod serial_tests {
|
||||
scanner.scan_cycle().await.expect("Failed to trigger scan cycle");
|
||||
println!("✅ Manual scan cycle completed");
|
||||
|
||||
// Wait a bit more for background workers to process expiry tasks
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
let mut expired = false;
|
||||
for attempt in 0..3 {
|
||||
if attempt > 0 {
|
||||
scanner.scan_cycle().await.expect("Failed to trigger scan cycle on retry");
|
||||
}
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(5)).await;
|
||||
if expired {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if object has been expired (delete_marker)
|
||||
let check_result = object_exists(&ecstore, bucket_name, object_name).await;
|
||||
println!("Object is_delete_marker after lifecycle processing: {check_result}");
|
||||
println!("Object is_delete_marker after lifecycle processing: {}", !expired);
|
||||
|
||||
if check_result {
|
||||
println!("❌ Object was not deleted by lifecycle processing");
|
||||
if !expired {
|
||||
let pending = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::GLOBAL_ExpiryState
|
||||
.read()
|
||||
.await
|
||||
.pending_tasks()
|
||||
.await;
|
||||
println!("Pending expiry tasks: {pending}");
|
||||
|
||||
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(object_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config,
|
||||
None,
|
||||
None,
|
||||
&object_info,
|
||||
)
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&object_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
|
||||
if !expired {
|
||||
println!("❌ Object was not deleted by lifecycle processing");
|
||||
}
|
||||
} else {
|
||||
println!("✅ Object was successfully deleted by lifecycle processing");
|
||||
// Let's try to get object info to see its details
|
||||
match ecstore
|
||||
.get_object_info(bucket_name, object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
Ok(obj_info) => {
|
||||
@@ -385,7 +445,7 @@ mod serial_tests {
|
||||
}
|
||||
}
|
||||
|
||||
assert!(!check_result);
|
||||
assert!(expired);
|
||||
println!("✅ Object successfully expired");
|
||||
|
||||
// Stop scanner
|
||||
@@ -401,25 +461,26 @@ mod serial_tests {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let bucket_name = "test-lifecycle-expiry-deletemarker-bucket";
|
||||
let suffix = uuid::Uuid::new_v4().simple().to_string();
|
||||
let bucket_name = format!("test-lc-expiry-marker-{}", &suffix[..8]);
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_lock_bucket(&ecstore, bucket_name).await;
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
assert!(object_exists(&ecstore, bucket_name, object_name).await);
|
||||
assert!(object_exists(&ecstore, bucket_name.as_str(), object_name).await);
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
|
||||
set_bucket_lifecycle_deletemarker(bucket_name)
|
||||
set_bucket_lifecycle_deletemarker(bucket_name.as_str())
|
||||
.await
|
||||
.expect("Failed to set lifecycle configuration");
|
||||
println!("✅ Lifecycle configuration set for bucket: {bucket_name}");
|
||||
println!("✅ Lifecycle configuration set for bucket: {}", bucket_name);
|
||||
|
||||
// Verify lifecycle configuration was set
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name).await {
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name.as_str()).await {
|
||||
Ok(bucket_meta) => {
|
||||
assert!(bucket_meta.lifecycle_config.is_some());
|
||||
println!("✅ Bucket metadata retrieved successfully");
|
||||
@@ -450,36 +511,64 @@ mod serial_tests {
|
||||
scanner.scan_cycle().await.expect("Failed to trigger scan cycle");
|
||||
println!("✅ Manual scan cycle completed");
|
||||
|
||||
// Wait a bit more for background workers to process expiry tasks
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
let mut deleted = false;
|
||||
for attempt in 0..3 {
|
||||
if attempt > 0 {
|
||||
scanner.scan_cycle().await.expect("Failed to trigger scan cycle on retry");
|
||||
}
|
||||
deleted = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(5)).await;
|
||||
if deleted {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if object has been expired (deleted)
|
||||
//let check_result = object_is_delete_marker(&ecstore, bucket_name, object_name).await;
|
||||
let check_result = object_exists(&ecstore, bucket_name, object_name).await;
|
||||
println!("Object exists after lifecycle processing: {check_result}");
|
||||
println!("Object exists after lifecycle processing: {}", !deleted);
|
||||
|
||||
if !check_result {
|
||||
println!("❌ Object was not deleted by lifecycle processing");
|
||||
// Let's try to get object info to see its details
|
||||
match ecstore
|
||||
.get_object_info(bucket_name, object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
if !deleted {
|
||||
let pending = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::GLOBAL_ExpiryState
|
||||
.read()
|
||||
.await
|
||||
{
|
||||
Ok(obj_info) => {
|
||||
println!(
|
||||
"Object info: name={}, size={}, mod_time={:?}",
|
||||
obj_info.name, obj_info.size, obj_info.mod_time
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Error getting object info: {e:?}");
|
||||
.pending_tasks()
|
||||
.await;
|
||||
println!("Pending expiry tasks: {pending}");
|
||||
|
||||
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(obj_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config, None, None, &obj_info,
|
||||
)
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&obj_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
deleted = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
|
||||
if !deleted {
|
||||
println!(
|
||||
"Object info: name={}, size={}, mod_time={:?}",
|
||||
obj_info.name, obj_info.size, obj_info.mod_time
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !deleted {
|
||||
println!("❌ Object was not deleted by lifecycle processing");
|
||||
}
|
||||
} else {
|
||||
println!("✅ Object was successfully deleted by lifecycle processing");
|
||||
}
|
||||
|
||||
assert!(check_result);
|
||||
assert!(deleted);
|
||||
println!("✅ Object successfully expired");
|
||||
|
||||
// Stop scanner
|
||||
@@ -497,15 +586,16 @@ mod serial_tests {
|
||||
//create_test_tier().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let bucket_name = "test-lifecycle-transition-basic-bucket";
|
||||
let suffix = uuid::Uuid::new_v4().simple().to_string();
|
||||
let bucket_name = format!("test-lc-transition-{}", &suffix[..8]);
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
assert!(object_exists(&ecstore, bucket_name, object_name).await);
|
||||
assert!(object_exists(&ecstore, bucket_name.as_str(), object_name).await);
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
|
||||
@@ -551,14 +641,14 @@ mod serial_tests {
|
||||
|
||||
// Check if object has been expired (deleted)
|
||||
//let check_result = object_is_transitioned(&ecstore, bucket_name, object_name).await;
|
||||
let check_result = object_exists(&ecstore, bucket_name, object_name).await;
|
||||
let check_result = object_exists(&ecstore, bucket_name.as_str(), object_name).await;
|
||||
println!("Object exists after lifecycle processing: {check_result}");
|
||||
|
||||
if check_result {
|
||||
println!("✅ Object was not deleted by lifecycle processing");
|
||||
// Let's try to get object info to see its details
|
||||
match ecstore
|
||||
.get_object_info(bucket_name, object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
Ok(obj_info) => {
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::{fs, net::SocketAddr, sync::Arc, sync::OnceLock, time::Duration};
|
||||
use tempfile::TempDir;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use serial_test::serial;
|
||||
|
||||
@@ -89,7 +90,9 @@ async fn prepare_test_env(test_dir: Option<&str>, port: Option<u16>) -> (Vec<std
|
||||
// create ECStore with dynamic port
|
||||
let port = port.unwrap_or(9000);
|
||||
let server_addr: SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools).await.unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// init bucket metadata system
|
||||
let buckets_list = ecstore
|
||||
|
||||
@@ -12,9 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::AuditEntry;
|
||||
use crate::AuditResult;
|
||||
use crate::AuditSystem;
|
||||
use crate::{AuditEntry, AuditResult, AuditSystem};
|
||||
use once_cell::sync::OnceCell;
|
||||
use rustfs_ecstore::config::Config;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -202,22 +202,19 @@ impl AuditMetrics {
|
||||
// Generate recommendations
|
||||
if !validation.meets_eps_requirement {
|
||||
validation.recommendations.push(format!(
|
||||
"EPS ({:.0}) is below requirement (3000). Consider optimizing target dispatch or adding more target instances.",
|
||||
eps
|
||||
"EPS ({eps:.0}) is below requirement (3000). Consider optimizing target dispatch or adding more target instances."
|
||||
));
|
||||
}
|
||||
|
||||
if !validation.meets_latency_requirement {
|
||||
validation.recommendations.push(format!(
|
||||
"Average latency ({:.2}ms) exceeds requirement (30ms). Consider optimizing target responses or increasing timeout values.",
|
||||
avg_latency_ms
|
||||
"Average latency ({avg_latency_ms:.2}ms) exceeds requirement (30ms). Consider optimizing target responses or increasing timeout values."
|
||||
));
|
||||
}
|
||||
|
||||
if !validation.meets_error_rate_requirement {
|
||||
validation.recommendations.push(format!(
|
||||
"Error rate ({:.2}%) exceeds recommendation (1%). Check target connectivity and configuration.",
|
||||
error_rate
|
||||
"Error rate ({error_rate:.2}%) exceeds recommendation (1%). Check target connectivity and configuration."
|
||||
));
|
||||
}
|
||||
|
||||
@@ -307,7 +304,7 @@ impl PerformanceValidation {
|
||||
);
|
||||
|
||||
for rec in &self.recommendations {
|
||||
result.push_str(&format!("\n• {}", rec));
|
||||
result.push_str(&format!("\n• {rec}"));
|
||||
}
|
||||
|
||||
result
|
||||
|
||||
@@ -303,7 +303,7 @@ async fn create_audit_target(
|
||||
let target = rustfs_targets::target::mqtt::MQTTTarget::new(id.to_string(), args)?;
|
||||
Ok(Box::new(target))
|
||||
}
|
||||
_ => Err(TargetError::Configuration(format!("Unknown target type: {}", target_type))),
|
||||
_ => Err(TargetError::Configuration(format!("Unknown target type: {target_type}"))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -352,7 +352,7 @@ fn parse_webhook_args(_id: &str, config: &KVS) -> Result<WebhookArgs, TargetErro
|
||||
.ok_or_else(|| TargetError::Configuration("webhook endpoint is required".to_string()))?;
|
||||
|
||||
let endpoint_url =
|
||||
Url::parse(&endpoint).map_err(|e| TargetError::Configuration(format!("invalid webhook endpoint URL: {}", e)))?;
|
||||
Url::parse(&endpoint).map_err(|e| TargetError::Configuration(format!("invalid webhook endpoint URL: {e}")))?;
|
||||
|
||||
let args = WebhookArgs {
|
||||
enable: true, // Already validated as enabled
|
||||
@@ -379,7 +379,7 @@ fn parse_mqtt_args(_id: &str, config: &KVS) -> Result<MQTTArgs, TargetError> {
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| TargetError::Configuration("MQTT broker is required".to_string()))?;
|
||||
|
||||
let broker_url = Url::parse(&broker).map_err(|e| TargetError::Configuration(format!("invalid MQTT broker URL: {}", e)))?;
|
||||
let broker_url = Url::parse(&broker).map_err(|e| TargetError::Configuration(format!("invalid MQTT broker URL: {e}")))?;
|
||||
|
||||
let topic = config
|
||||
.lookup(MQTT_TOPIC)
|
||||
|
||||
@@ -17,9 +17,11 @@ use crate::AuditRegistry;
|
||||
use crate::observability;
|
||||
use crate::{AuditError, AuditResult};
|
||||
use rustfs_ecstore::config::Config;
|
||||
use rustfs_targets::store::{Key, Store};
|
||||
use rustfs_targets::target::EntityTarget;
|
||||
use rustfs_targets::{StoreError, Target, TargetError};
|
||||
use rustfs_targets::{
|
||||
StoreError, Target, TargetError,
|
||||
store::{Key, Store},
|
||||
target::EntityTarget,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use tracing::{error, info, warn};
|
||||
@@ -257,7 +259,7 @@ impl AuditSystem {
|
||||
let target_id_clone = target_id.clone();
|
||||
|
||||
// Create EntityTarget for the audit log entry
|
||||
let entity_target = rustfs_targets::target::EntityTarget {
|
||||
let entity_target = EntityTarget {
|
||||
object_name: entry.api.name.clone().unwrap_or_default(),
|
||||
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
|
||||
event_name: rustfs_targets::EventName::ObjectCreatedPut, // Default, should be derived from entry
|
||||
@@ -337,7 +339,7 @@ impl AuditSystem {
|
||||
let mut success_count = 0;
|
||||
let mut errors = Vec::new();
|
||||
for entry in entries_clone {
|
||||
let entity_target = rustfs_targets::target::EntityTarget {
|
||||
let entity_target = EntityTarget {
|
||||
object_name: entry.api.name.clone().unwrap_or_default(),
|
||||
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
|
||||
event_name: rustfs_targets::EventName::ObjectCreatedPut,
|
||||
@@ -461,7 +463,7 @@ impl AuditSystem {
|
||||
info!(target_id = %target_id, "Target enabled");
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuditError::Configuration(format!("Target not found: {}", target_id)))
|
||||
Err(AuditError::Configuration(format!("Target not found: {target_id}")))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -474,7 +476,7 @@ impl AuditSystem {
|
||||
info!(target_id = %target_id, "Target disabled");
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuditError::Configuration(format!("Target not found: {}", target_id)))
|
||||
Err(AuditError::Configuration(format!("Target not found: {target_id}")))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -488,7 +490,7 @@ impl AuditSystem {
|
||||
info!(target_id = %target_id, "Target removed");
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuditError::Configuration(format!("Target not found: {}", target_id)))
|
||||
Err(AuditError::Configuration(format!("Target not found: {target_id}")))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ fn build_rustfs_binary() {
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
panic!("Failed to build RustFS binary. Error: {}", stderr);
|
||||
panic!("Failed to build RustFS binary. Error: {stderr}");
|
||||
}
|
||||
|
||||
info!("✅ RustFS binary built successfully");
|
||||
@@ -134,8 +134,8 @@ impl RustFSTestEnvironment {
|
||||
|
||||
// Use a unique port for each test environment
|
||||
let port = Self::find_available_port().await?;
|
||||
let address = format!("127.0.0.1:{}", port);
|
||||
let url = format!("http://{}", address);
|
||||
let address = format!("127.0.0.1:{port}");
|
||||
let url = format!("http://{address}");
|
||||
|
||||
Ok(Self {
|
||||
temp_dir,
|
||||
@@ -152,7 +152,7 @@ impl RustFSTestEnvironment {
|
||||
let temp_dir = format!("/tmp/rustfs_e2e_test_{}", Uuid::new_v4());
|
||||
fs::create_dir_all(&temp_dir).await?;
|
||||
|
||||
let url = format!("http://{}", address);
|
||||
let url = format!("http://{address}");
|
||||
|
||||
Ok(Self {
|
||||
temp_dir,
|
||||
@@ -327,7 +327,7 @@ pub async fn execute_awscurl(
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(format!("awscurl failed: {}", stderr).into());
|
||||
return Err(format!("awscurl failed: {stderr}").into());
|
||||
}
|
||||
|
||||
let response = String::from_utf8_lossy(&output.stdout).to_string();
|
||||
|
||||
@@ -388,23 +388,11 @@ async fn test_bucket_default_encryption_multipart_upload() -> Result<(), Box<dyn
|
||||
complete_multipart_response.ssekms_key_id()
|
||||
);
|
||||
|
||||
// Verify: complete_multipart_upload response should contain encryption information
|
||||
// KNOWN BUG: s3s library bug where CompleteMultipartUploadOutput encryption fields serialize as None
|
||||
// even when properly set. Our server implementation is correct (see server logs above).
|
||||
// TODO: Remove this workaround when s3s library is fixed
|
||||
warn!("KNOWN BUG: s3s library - complete_multipart_upload response encryption fields return None even when set");
|
||||
|
||||
if complete_multipart_response.server_side_encryption().is_some() {
|
||||
// If s3s library is fixed, verify the encryption info
|
||||
assert_eq!(
|
||||
complete_multipart_response.server_side_encryption(),
|
||||
Some(&ServerSideEncryption::AwsKms),
|
||||
"complete_multipart_upload response should contain SSE-KMS encryption information"
|
||||
);
|
||||
} else {
|
||||
// Expected behavior due to s3s library bug - log and continue
|
||||
warn!("Skipping assertion due to known s3s library bug - server logs confirm correct encryption handling");
|
||||
}
|
||||
assert_eq!(
|
||||
complete_multipart_response.server_side_encryption(),
|
||||
Some(&ServerSideEncryption::AwsKms),
|
||||
"complete_multipart_upload response should contain SSE-KMS encryption information"
|
||||
);
|
||||
|
||||
// Step 4: Download file and verify encryption status
|
||||
info!("Downloading file and verifying encryption status");
|
||||
|
||||
@@ -59,7 +59,7 @@ pub async fn configure_kms(
|
||||
access_key: &str,
|
||||
secret_key: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/kms/configure", base_url);
|
||||
let url = format!("{base_url}/rustfs/admin/v3/kms/configure");
|
||||
awscurl_post(&url, config_json, access_key, secret_key).await?;
|
||||
info!("KMS configured successfully");
|
||||
Ok(())
|
||||
@@ -71,7 +71,7 @@ pub async fn start_kms(
|
||||
access_key: &str,
|
||||
secret_key: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/kms/start", base_url);
|
||||
let url = format!("{base_url}/rustfs/admin/v3/kms/start");
|
||||
awscurl_post(&url, "{}", access_key, secret_key).await?;
|
||||
info!("KMS started successfully");
|
||||
Ok(())
|
||||
@@ -83,7 +83,7 @@ pub async fn get_kms_status(
|
||||
access_key: &str,
|
||||
secret_key: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/kms/status", base_url);
|
||||
let url = format!("{base_url}/rustfs/admin/v3/kms/status");
|
||||
let status = awscurl_get(&url, access_key, secret_key).await?;
|
||||
info!("KMS status retrieved: {}", status);
|
||||
Ok(status)
|
||||
@@ -101,7 +101,7 @@ pub async fn create_default_key(
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let url = format!("{}/rustfs/admin/v3/kms/keys", base_url);
|
||||
let url = format!("{base_url}/rustfs/admin/v3/kms/keys");
|
||||
let response = awscurl_post(&url, &create_key_body, access_key, secret_key).await?;
|
||||
|
||||
// Parse response to get the actual key ID
|
||||
@@ -141,7 +141,7 @@ pub async fn create_key_with_specific_id(key_dir: &str, key_id: &str) -> Result<
|
||||
});
|
||||
|
||||
// Write the key to file with the specified ID as JSON
|
||||
let key_path = format!("{}/{}.key", key_dir, key_id);
|
||||
let key_path = format!("{key_dir}/{key_id}.key");
|
||||
let content = serde_json::to_vec_pretty(&stored_key)?;
|
||||
fs::write(&key_path, &content).await?;
|
||||
|
||||
@@ -281,13 +281,8 @@ pub async fn test_kms_key_management(
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let create_response = awscurl_post(
|
||||
&format!("{}/rustfs/admin/v3/kms/keys", base_url),
|
||||
&create_key_body,
|
||||
access_key,
|
||||
secret_key,
|
||||
)
|
||||
.await?;
|
||||
let create_response =
|
||||
awscurl_post(&format!("{base_url}/rustfs/admin/v3/kms/keys"), &create_key_body, access_key, secret_key).await?;
|
||||
|
||||
let create_result: serde_json::Value = serde_json::from_str(&create_response)?;
|
||||
let key_id = create_result["key_id"]
|
||||
@@ -296,8 +291,7 @@ pub async fn test_kms_key_management(
|
||||
info!("Created key with ID: {}", key_id);
|
||||
|
||||
// Test DescribeKey
|
||||
let describe_response =
|
||||
awscurl_get(&format!("{}/rustfs/admin/v3/kms/keys/{}", base_url, key_id), access_key, secret_key).await?;
|
||||
let describe_response = awscurl_get(&format!("{base_url}/rustfs/admin/v3/kms/keys/{key_id}"), access_key, secret_key).await?;
|
||||
|
||||
info!("DescribeKey response: {}", describe_response);
|
||||
let describe_result: serde_json::Value = serde_json::from_str(&describe_response)?;
|
||||
@@ -306,7 +300,7 @@ pub async fn test_kms_key_management(
|
||||
info!("Successfully described key: {}", key_id);
|
||||
|
||||
// Test ListKeys
|
||||
let list_response = awscurl_get(&format!("{}/rustfs/admin/v3/kms/keys", base_url), access_key, secret_key).await?;
|
||||
let list_response = awscurl_get(&format!("{base_url}/rustfs/admin/v3/kms/keys"), access_key, secret_key).await?;
|
||||
|
||||
let list_result: serde_json::Value = serde_json::from_str(&list_response)?;
|
||||
let keys = list_result["keys"]
|
||||
@@ -412,7 +406,7 @@ impl VaultTestEnvironment {
|
||||
let port_check = TcpStream::connect(VAULT_ADDRESS).await.is_ok();
|
||||
if port_check {
|
||||
// Additional check by making a health request
|
||||
if let Ok(response) = reqwest::get(&format!("{}/v1/sys/health", VAULT_URL)).await {
|
||||
if let Ok(response) = reqwest::get(&format!("{VAULT_URL}/v1/sys/health")).await {
|
||||
if response.status().is_success() {
|
||||
info!("Vault server is ready after {} seconds", i);
|
||||
return Ok(());
|
||||
@@ -438,7 +432,7 @@ impl VaultTestEnvironment {
|
||||
|
||||
// Enable transit secrets engine
|
||||
let enable_response = client
|
||||
.post(format!("{}/v1/sys/mounts/{}", VAULT_URL, VAULT_TRANSIT_PATH))
|
||||
.post(format!("{VAULT_URL}/v1/sys/mounts/{VAULT_TRANSIT_PATH}"))
|
||||
.header("X-Vault-Token", VAULT_TOKEN)
|
||||
.json(&serde_json::json!({
|
||||
"type": "transit"
|
||||
@@ -448,14 +442,14 @@ impl VaultTestEnvironment {
|
||||
|
||||
if !enable_response.status().is_success() && enable_response.status() != 400 {
|
||||
let error_text = enable_response.text().await?;
|
||||
return Err(format!("Failed to enable transit engine: {}", error_text).into());
|
||||
return Err(format!("Failed to enable transit engine: {error_text}").into());
|
||||
}
|
||||
|
||||
info!("Creating Vault encryption key");
|
||||
|
||||
// Create encryption key
|
||||
let key_response = client
|
||||
.post(format!("{}/v1/{}/keys/{}", VAULT_URL, VAULT_TRANSIT_PATH, VAULT_KEY_NAME))
|
||||
.post(format!("{VAULT_URL}/v1/{VAULT_TRANSIT_PATH}/keys/{VAULT_KEY_NAME}"))
|
||||
.header("X-Vault-Token", VAULT_TOKEN)
|
||||
.json(&serde_json::json!({
|
||||
"type": "aes256-gcm96"
|
||||
@@ -465,7 +459,7 @@ impl VaultTestEnvironment {
|
||||
|
||||
if !key_response.status().is_success() && key_response.status() != 400 {
|
||||
let error_text = key_response.text().await?;
|
||||
return Err(format!("Failed to create encryption key: {}", error_text).into());
|
||||
return Err(format!("Failed to create encryption key: {error_text}").into());
|
||||
}
|
||||
|
||||
info!("Vault transit engine setup completed");
|
||||
@@ -713,10 +707,10 @@ pub async fn test_all_multipart_encryption_types(
|
||||
|
||||
// Test configurations for all encryption types
|
||||
let test_configs = vec![
|
||||
MultipartTestConfig::new(format!("{}-no-encryption", base_object_key), part_size, total_parts, EncryptionType::None),
|
||||
MultipartTestConfig::new(format!("{}-sse-s3", base_object_key), part_size, total_parts, EncryptionType::SSES3),
|
||||
MultipartTestConfig::new(format!("{}-sse-kms", base_object_key), part_size, total_parts, EncryptionType::SSEKMS),
|
||||
MultipartTestConfig::new(format!("{}-sse-c", base_object_key), part_size, total_parts, create_sse_c_config()),
|
||||
MultipartTestConfig::new(format!("{base_object_key}-no-encryption"), part_size, total_parts, EncryptionType::None),
|
||||
MultipartTestConfig::new(format!("{base_object_key}-sse-s3"), part_size, total_parts, EncryptionType::SSES3),
|
||||
MultipartTestConfig::new(format!("{base_object_key}-sse-kms"), part_size, total_parts, EncryptionType::SSEKMS),
|
||||
MultipartTestConfig::new(format!("{base_object_key}-sse-c"), part_size, total_parts, create_sse_c_config()),
|
||||
];
|
||||
|
||||
// Run tests for each encryption type
|
||||
|
||||
@@ -33,11 +33,10 @@ fn assert_encryption_metadata(metadata: &HashMap<String, String>, expected_size:
|
||||
"x-rustfs-encryption-context",
|
||||
"x-rustfs-encryption-original-size",
|
||||
] {
|
||||
assert!(metadata.contains_key(key), "expected managed encryption metadata '{}' to be present", key);
|
||||
assert!(metadata.contains_key(key), "expected managed encryption metadata '{key}' to be present");
|
||||
assert!(
|
||||
!metadata.get(key).unwrap().is_empty(),
|
||||
"managed encryption metadata '{}' should not be empty",
|
||||
key
|
||||
"managed encryption metadata '{key}' should not be empty"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -84,10 +83,7 @@ fn assert_storage_encrypted(storage_root: &std::path::Path, bucket: &str, key: &
|
||||
|
||||
assert!(
|
||||
scanned > 0,
|
||||
"Failed to locate stored data files for bucket '{}' and key '{}' under {:?}",
|
||||
bucket,
|
||||
key,
|
||||
storage_root
|
||||
"Failed to locate stored data files for bucket '{bucket}' and key '{key}' under {storage_root:?}"
|
||||
);
|
||||
assert!(plaintext_path.is_none(), "Plaintext detected on disk at {:?}", plaintext_path.unwrap());
|
||||
}
|
||||
@@ -220,7 +216,7 @@ async fn test_head_reports_managed_metadata_for_sse_kms_and_copy() -> Result<(),
|
||||
assert_encryption_metadata(source_metadata, payload.len());
|
||||
|
||||
let dest_key = "metadata-sse-kms-object-copy";
|
||||
let copy_source = format!("{}/{}", TEST_BUCKET, source_key);
|
||||
let copy_source = format!("{TEST_BUCKET}/{source_key}");
|
||||
|
||||
s3_client
|
||||
.copy_object()
|
||||
|
||||
@@ -389,8 +389,8 @@ async fn test_kms_concurrent_encryption() -> Result<(), Box<dyn std::error::Erro
|
||||
let task = tokio::spawn(async move {
|
||||
let _permit = sem.acquire().await.unwrap();
|
||||
|
||||
let test_data = format!("Concurrent test data {}", i).into_bytes();
|
||||
let object_key = format!("concurrent-test-{}", i);
|
||||
let test_data = format!("Concurrent test data {i}").into_bytes();
|
||||
let object_key = format!("concurrent-test-{i}");
|
||||
|
||||
// Alternate between different encryption types
|
||||
let result = match i % 3 {
|
||||
@@ -418,7 +418,7 @@ async fn test_kms_concurrent_encryption() -> Result<(), Box<dyn std::error::Erro
|
||||
}
|
||||
2 => {
|
||||
// SSE-C
|
||||
let key = format!("testkey{:026}", i); // 32-byte key
|
||||
let key = format!("testkey{i:026}"); // 32-byte key
|
||||
let key_b64 = base64::engine::general_purpose::STANDARD.encode(&key);
|
||||
let key_md5 = format!("{:x}", md5::compute(&key));
|
||||
|
||||
@@ -459,9 +459,7 @@ async fn test_kms_concurrent_encryption() -> Result<(), Box<dyn std::error::Erro
|
||||
|
||||
assert!(
|
||||
successful_uploads >= num_concurrent - 1,
|
||||
"Most concurrent uploads should succeed (got {}/{})",
|
||||
successful_uploads,
|
||||
num_concurrent
|
||||
"Most concurrent uploads should succeed (got {successful_uploads}/{num_concurrent})"
|
||||
);
|
||||
|
||||
info!("✅ Successfully completed {}/{} concurrent uploads", successful_uploads, num_concurrent);
|
||||
|
||||
@@ -152,7 +152,7 @@ async fn test_kms_corrupted_key_files() -> Result<(), Box<dyn std::error::Error
|
||||
// Corrupt the default key file
|
||||
info!("🔧 Corrupting default key file");
|
||||
let key_file_path = format!("{}/{}.key", kms_env.kms_keys_dir, default_key_id);
|
||||
let backup_key_path = format!("{}.backup", key_file_path);
|
||||
let backup_key_path = format!("{key_file_path}.backup");
|
||||
|
||||
// Backup the original key file
|
||||
fs::copy(&key_file_path, &backup_key_path)?;
|
||||
@@ -417,8 +417,8 @@ async fn test_kms_resource_constraints() -> Result<(), Box<dyn std::error::Error
|
||||
|
||||
for i in 0..10 {
|
||||
let client = s3_client.clone();
|
||||
let test_data = format!("Rapid test data {}", i).into_bytes();
|
||||
let object_key = format!("rapid-test-{}", i);
|
||||
let test_data = format!("Rapid test data {i}").into_bytes();
|
||||
let object_key = format!("rapid-test-{i}");
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
let result = client
|
||||
|
||||
@@ -740,7 +740,7 @@ async fn test_large_multipart_upload(
|
||||
// Verify data integrity
|
||||
for (i, (&actual, &expected)) in downloaded_data.iter().zip(test_data.iter()).enumerate() {
|
||||
if actual != expected {
|
||||
panic!("Data mismatch at byte {}: got {}, expected {}", i, actual, expected);
|
||||
panic!("Data mismatch at byte {i}: got {actual}, expected {expected}");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -321,13 +321,9 @@ async fn test_vault_kms_key_crud(
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let create_response = crate::common::awscurl_post(
|
||||
&format!("{}/rustfs/admin/v3/kms/keys", base_url),
|
||||
&create_key_body,
|
||||
access_key,
|
||||
secret_key,
|
||||
)
|
||||
.await?;
|
||||
let create_response =
|
||||
crate::common::awscurl_post(&format!("{base_url}/rustfs/admin/v3/kms/keys"), &create_key_body, access_key, secret_key)
|
||||
.await?;
|
||||
|
||||
let create_result: serde_json::Value = serde_json::from_str(&create_response)?;
|
||||
let key_id = create_result["key_id"]
|
||||
@@ -337,7 +333,7 @@ async fn test_vault_kms_key_crud(
|
||||
|
||||
// Read
|
||||
let describe_response =
|
||||
crate::common::awscurl_get(&format!("{}/rustfs/admin/v3/kms/keys/{}", base_url, key_id), access_key, secret_key).await?;
|
||||
crate::common::awscurl_get(&format!("{base_url}/rustfs/admin/v3/kms/keys/{key_id}"), access_key, secret_key).await?;
|
||||
|
||||
let describe_result: serde_json::Value = serde_json::from_str(&describe_response)?;
|
||||
assert_eq!(describe_result["key_metadata"]["key_id"], key_id);
|
||||
@@ -380,7 +376,7 @@ async fn test_vault_kms_key_crud(
|
||||
|
||||
// Read
|
||||
let list_response =
|
||||
crate::common::awscurl_get(&format!("{}/rustfs/admin/v3/kms/keys", base_url), access_key, secret_key).await?;
|
||||
crate::common::awscurl_get(&format!("{base_url}/rustfs/admin/v3/kms/keys"), access_key, secret_key).await?;
|
||||
|
||||
let list_result: serde_json::Value = serde_json::from_str(&list_response)?;
|
||||
let keys = list_result["keys"]
|
||||
@@ -407,7 +403,7 @@ async fn test_vault_kms_key_crud(
|
||||
|
||||
// Delete
|
||||
let delete_response = crate::common::execute_awscurl(
|
||||
&format!("{}/rustfs/admin/v3/kms/keys/delete?keyId={}", base_url, key_id),
|
||||
&format!("{base_url}/rustfs/admin/v3/kms/keys/delete?keyId={key_id}"),
|
||||
"DELETE",
|
||||
None,
|
||||
access_key,
|
||||
@@ -422,7 +418,7 @@ async fn test_vault_kms_key_crud(
|
||||
|
||||
// Verify key state after deletion
|
||||
let describe_deleted_response =
|
||||
crate::common::awscurl_get(&format!("{}/rustfs/admin/v3/kms/keys/{}", base_url, key_id), access_key, secret_key).await?;
|
||||
crate::common::awscurl_get(&format!("{base_url}/rustfs/admin/v3/kms/keys/{key_id}"), access_key, secret_key).await?;
|
||||
|
||||
let describe_result: serde_json::Value = serde_json::from_str(&describe_deleted_response)?;
|
||||
let key_state = describe_result["key_metadata"]["key_state"]
|
||||
@@ -439,7 +435,7 @@ async fn test_vault_kms_key_crud(
|
||||
|
||||
// Force Delete - Force immediate deletion for PendingDeletion key
|
||||
let force_delete_response = crate::common::execute_awscurl(
|
||||
&format!("{}/rustfs/admin/v3/kms/keys/delete?keyId={}&force_immediate=true", base_url, key_id),
|
||||
&format!("{base_url}/rustfs/admin/v3/kms/keys/delete?keyId={key_id}&force_immediate=true"),
|
||||
"DELETE",
|
||||
None,
|
||||
access_key,
|
||||
@@ -454,7 +450,7 @@ async fn test_vault_kms_key_crud(
|
||||
|
||||
// Verify key no longer exists after force deletion (should return error)
|
||||
let describe_force_deleted_result =
|
||||
crate::common::awscurl_get(&format!("{}/rustfs/admin/v3/kms/keys/{}", base_url, key_id), access_key, secret_key).await;
|
||||
crate::common::awscurl_get(&format!("{base_url}/rustfs/admin/v3/kms/keys/{key_id}"), access_key, secret_key).await;
|
||||
|
||||
// After force deletion, key should not be found (GET should fail)
|
||||
assert!(describe_force_deleted_result.is_err(), "Force deleted key should not be found");
|
||||
|
||||
@@ -419,7 +419,7 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
// 逐字节验证数据(对于大文件更严格)
|
||||
for (i, (&actual, &expected)) in downloaded_data.iter().zip(test_data.iter()).enumerate() {
|
||||
if actual != expected {
|
||||
panic!("大文件数据在第{}字节不匹配: 实际={}, 期待={}", i, actual, expected);
|
||||
panic!("大文件数据在第{i}字节不匹配: 实际={actual}, 期待={expected}");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -476,7 +476,7 @@ async fn test_kms_critical_suite() -> Result<(), Box<dyn std::error::Error + Sen
|
||||
|
||||
let failed_count = results.iter().filter(|r| !r.success).count();
|
||||
if failed_count > 0 {
|
||||
return Err(format!("Critical test suite failed: {} tests failed", failed_count).into());
|
||||
return Err(format!("Critical test suite failed: {failed_count} tests failed").into());
|
||||
}
|
||||
|
||||
info!("✅ 所有关键测试通过");
|
||||
@@ -498,7 +498,7 @@ async fn test_kms_full_suite() -> Result<(), Box<dyn std::error::Error + Send +
|
||||
|
||||
// Allow up to 10% failure rate for non-critical tests
|
||||
if success_rate < 90.0 {
|
||||
return Err(format!("Test suite success rate too low: {:.1}%", success_rate).into());
|
||||
return Err(format!("Test suite success rate too low: {success_rate:.1}%").into());
|
||||
}
|
||||
|
||||
info!("✅ 完整测试套件通过");
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use rustfs_ecstore::{disk::endpoint::Endpoint, lock_utils::create_unique_clients};
|
||||
use rustfs_lock::client::{LockClient, local::LocalClient};
|
||||
use rustfs_ecstore::disk::endpoint::Endpoint;
|
||||
use rustfs_lock::client::{LockClient, local::LocalClient, remote::RemoteClient};
|
||||
use rustfs_lock::types::{LockInfo, LockResponse, LockStats};
|
||||
use rustfs_lock::{LockId, LockMetadata, LockPriority, LockType};
|
||||
use rustfs_lock::{LockRequest, NamespaceLock, NamespaceLockManager};
|
||||
use rustfs_protos::{node_service_time_out_client, proto_gen::node_service::GenerallyLockRequest};
|
||||
use serial_test::serial;
|
||||
use std::{error::Error, sync::Arc, time::Duration};
|
||||
use std::{collections::HashMap, error::Error, sync::Arc, time::Duration};
|
||||
use tokio::time::sleep;
|
||||
use tonic::Request;
|
||||
use url::Url;
|
||||
@@ -38,6 +38,34 @@ fn get_cluster_endpoints() -> Vec<Endpoint> {
|
||||
}]
|
||||
}
|
||||
|
||||
async fn create_unique_clients(endpoints: &[Endpoint]) -> Result<Vec<Arc<dyn LockClient>>, Box<dyn Error>> {
|
||||
let mut unique_endpoints: HashMap<String, &Endpoint> = HashMap::new();
|
||||
|
||||
for endpoint in endpoints {
|
||||
if endpoint.is_local {
|
||||
unique_endpoints.insert("local".to_string(), endpoint);
|
||||
} else {
|
||||
let host_port = format!(
|
||||
"{}:{}",
|
||||
endpoint.url.host_str().unwrap_or("localhost"),
|
||||
endpoint.url.port().unwrap_or(9000)
|
||||
);
|
||||
unique_endpoints.insert(host_port, endpoint);
|
||||
}
|
||||
}
|
||||
|
||||
let mut clients = Vec::new();
|
||||
for (_key, endpoint) in unique_endpoints {
|
||||
if endpoint.is_local {
|
||||
clients.push(Arc::new(LocalClient::new()) as Arc<dyn LockClient>);
|
||||
} else {
|
||||
clients.push(Arc::new(RemoteClient::new(endpoint.url.to_string())) as Arc<dyn LockClient>);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(clients)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
|
||||
@@ -75,7 +75,6 @@ hyper-util.workspace = true
|
||||
hyper-rustls.workspace = true
|
||||
rustls.workspace = true
|
||||
tokio = { workspace = true, features = ["io-util", "sync", "signal"] }
|
||||
tokio-stream = { workspace = true }
|
||||
tonic.workspace = true
|
||||
xxhash-rust = { workspace = true, features = ["xxh64", "xxh3"] }
|
||||
tower.workspace = true
|
||||
@@ -89,8 +88,6 @@ rustfs-madmin.workspace = true
|
||||
rustfs-workers.workspace = true
|
||||
reqwest = { workspace = true }
|
||||
aws-sdk-s3 = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
rustfs-rsc = { workspace = true }
|
||||
urlencoding = { workspace = true }
|
||||
smallvec = { workspace = true }
|
||||
shadow-rs.workspace = true
|
||||
@@ -99,10 +96,11 @@ rustfs-utils = { workspace = true, features = ["full"] }
|
||||
rustfs-rio.workspace = true
|
||||
rustfs-signer.workspace = true
|
||||
rustfs-checksums.workspace = true
|
||||
futures-util.workspace = true
|
||||
async-recursion.workspace = true
|
||||
parking_lot = "0.12"
|
||||
moka = { version = "0.12", features = ["future"] }
|
||||
aws-credential-types = { workspace = true }
|
||||
aws-smithy-types = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
moka = { workspace = true }
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
nix = { workspace = true }
|
||||
|
||||
1400
crates/ecstore/src/bucket/bucket_target_sys.rs
Normal file
1400
crates/ecstore/src/bucket/bucket_target_sys.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -16,6 +16,10 @@ use super::{quota::BucketQuota, target::BucketTargets};
|
||||
|
||||
use super::object_lock::ObjectLockApi;
|
||||
use super::versioning::VersioningApi;
|
||||
use crate::bucket::utils::deserialize;
|
||||
use crate::config::com::{read_config, save_config};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::new_object_layer_fn;
|
||||
use byteorder::{BigEndian, ByteOrder, LittleEndian};
|
||||
use rmp_serde::Serializer as rmpSerializer;
|
||||
use rustfs_policy::policy::BucketPolicy;
|
||||
@@ -30,12 +34,6 @@ use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::error;
|
||||
|
||||
use crate::bucket::target::BucketTarget;
|
||||
use crate::bucket::utils::deserialize;
|
||||
use crate::config::com::{read_config, save_config};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::new_object_layer_fn;
|
||||
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use crate::store::ECStore;
|
||||
|
||||
@@ -322,7 +320,9 @@ impl BucketMetadata {
|
||||
|
||||
LittleEndian::write_u16(&mut buf[2..4], BUCKET_METADATA_VERSION);
|
||||
|
||||
let data = self.marshal_msg()?;
|
||||
let data = self
|
||||
.marshal_msg()
|
||||
.map_err(|e| Error::other(format!("save bucket metadata failed: {e}")))?;
|
||||
|
||||
buf.extend_from_slice(&data);
|
||||
|
||||
@@ -362,8 +362,8 @@ impl BucketMetadata {
|
||||
}
|
||||
//let temp = self.bucket_targets_config_json.clone();
|
||||
if !self.bucket_targets_config_json.is_empty() {
|
||||
let arr: Vec<BucketTarget> = serde_json::from_slice(&self.bucket_targets_config_json)?;
|
||||
self.bucket_target_config = Some(BucketTargets { targets: arr });
|
||||
let bucket_targets: BucketTargets = serde_json::from_slice(&self.bucket_targets_config_json)?;
|
||||
self.bucket_target_config = Some(bucket_targets);
|
||||
} else {
|
||||
self.bucket_target_config = Some(BucketTargets::default())
|
||||
}
|
||||
@@ -451,4 +451,154 @@ mod test {
|
||||
|
||||
assert_eq!(bm.name, new.name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn marshal_msg_complete_example() {
|
||||
// Create a complete BucketMetadata with various configurations
|
||||
let mut bm = BucketMetadata::new("test-bucket");
|
||||
|
||||
// Set creation time to current time
|
||||
bm.created = OffsetDateTime::now_utc();
|
||||
bm.lock_enabled = true;
|
||||
|
||||
// Add policy configuration
|
||||
let policy_json = r#"{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":"*","Action":"s3:GetObject","Resource":"arn:aws:s3:::test-bucket/*"}]}"#;
|
||||
bm.policy_config_json = policy_json.as_bytes().to_vec();
|
||||
bm.policy_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Add lifecycle configuration
|
||||
let lifecycle_xml = r#"<LifecycleConfiguration><Rule><ID>rule1</ID><Status>Enabled</Status><Expiration><Days>30</Days></Expiration></Rule></LifecycleConfiguration>"#;
|
||||
bm.lifecycle_config_xml = lifecycle_xml.as_bytes().to_vec();
|
||||
bm.lifecycle_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Add versioning configuration
|
||||
let versioning_xml = r#"<VersioningConfiguration><Status>Enabled</Status></VersioningConfiguration>"#;
|
||||
bm.versioning_config_xml = versioning_xml.as_bytes().to_vec();
|
||||
bm.versioning_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Add encryption configuration
|
||||
let encryption_xml = r#"<ServerSideEncryptionConfiguration><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>AES256</SSEAlgorithm></ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>"#;
|
||||
bm.encryption_config_xml = encryption_xml.as_bytes().to_vec();
|
||||
bm.encryption_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Add tagging configuration
|
||||
let tagging_xml = r#"<Tagging><TagSet><Tag><Key>Environment</Key><Value>Test</Value></Tag><Tag><Key>Owner</Key><Value>RustFS</Value></Tag></TagSet></Tagging>"#;
|
||||
bm.tagging_config_xml = tagging_xml.as_bytes().to_vec();
|
||||
bm.tagging_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Add quota configuration
|
||||
let quota_json = r#"{"quota":1073741824,"quotaType":"hard"}"#; // 1GB quota
|
||||
bm.quota_config_json = quota_json.as_bytes().to_vec();
|
||||
bm.quota_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Add object lock configuration
|
||||
let object_lock_xml = r#"<ObjectLockConfiguration><ObjectLockEnabled>Enabled</ObjectLockEnabled><Rule><DefaultRetention><Mode>GOVERNANCE</Mode><Days>7</Days></DefaultRetention></Rule></ObjectLockConfiguration>"#;
|
||||
bm.object_lock_config_xml = object_lock_xml.as_bytes().to_vec();
|
||||
bm.object_lock_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Add notification configuration
|
||||
let notification_xml = r#"<NotificationConfiguration><CloudWatchConfiguration><Id>notification1</Id><Event>s3:ObjectCreated:*</Event><CloudWatchConfiguration><LogGroupName>test-log-group</LogGroupName></CloudWatchConfiguration></CloudWatchConfiguration></NotificationConfiguration>"#;
|
||||
bm.notification_config_xml = notification_xml.as_bytes().to_vec();
|
||||
bm.notification_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Add replication configuration
|
||||
let replication_xml = r#"<ReplicationConfiguration><Role>arn:aws:iam::123456789012:role/replication-role</Role><Rule><ID>rule1</ID><Status>Enabled</Status><Prefix>documents/</Prefix><Destination><Bucket>arn:aws:s3:::destination-bucket</Bucket></Destination></Rule></ReplicationConfiguration>"#;
|
||||
bm.replication_config_xml = replication_xml.as_bytes().to_vec();
|
||||
bm.replication_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Add bucket targets configuration
|
||||
let bucket_targets_json = r#"[{"endpoint":"http://target1.example.com","credentials":{"accessKey":"key1","secretKey":"secret1"},"targetBucket":"target-bucket-1","region":"us-east-1"},{"endpoint":"http://target2.example.com","credentials":{"accessKey":"key2","secretKey":"secret2"},"targetBucket":"target-bucket-2","region":"us-west-2"}]"#;
|
||||
bm.bucket_targets_config_json = bucket_targets_json.as_bytes().to_vec();
|
||||
bm.bucket_targets_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Add bucket targets meta configuration
|
||||
let bucket_targets_meta_json = r#"{"replicationId":"repl-123","syncMode":"async","bandwidth":"100MB"}"#;
|
||||
bm.bucket_targets_config_meta_json = bucket_targets_meta_json.as_bytes().to_vec();
|
||||
bm.bucket_targets_config_meta_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Test serialization
|
||||
let buf = bm.marshal_msg().unwrap();
|
||||
assert!(!buf.is_empty(), "Serialized buffer should not be empty");
|
||||
|
||||
// Test deserialization
|
||||
let deserialized_bm = BucketMetadata::unmarshal(&buf).unwrap();
|
||||
|
||||
// Verify all fields are correctly serialized and deserialized
|
||||
assert_eq!(bm.name, deserialized_bm.name);
|
||||
assert_eq!(bm.created.unix_timestamp(), deserialized_bm.created.unix_timestamp());
|
||||
assert_eq!(bm.lock_enabled, deserialized_bm.lock_enabled);
|
||||
|
||||
// Verify configuration data
|
||||
assert_eq!(bm.policy_config_json, deserialized_bm.policy_config_json);
|
||||
assert_eq!(bm.lifecycle_config_xml, deserialized_bm.lifecycle_config_xml);
|
||||
assert_eq!(bm.versioning_config_xml, deserialized_bm.versioning_config_xml);
|
||||
assert_eq!(bm.encryption_config_xml, deserialized_bm.encryption_config_xml);
|
||||
assert_eq!(bm.tagging_config_xml, deserialized_bm.tagging_config_xml);
|
||||
assert_eq!(bm.quota_config_json, deserialized_bm.quota_config_json);
|
||||
assert_eq!(bm.object_lock_config_xml, deserialized_bm.object_lock_config_xml);
|
||||
assert_eq!(bm.notification_config_xml, deserialized_bm.notification_config_xml);
|
||||
assert_eq!(bm.replication_config_xml, deserialized_bm.replication_config_xml);
|
||||
assert_eq!(bm.bucket_targets_config_json, deserialized_bm.bucket_targets_config_json);
|
||||
assert_eq!(bm.bucket_targets_config_meta_json, deserialized_bm.bucket_targets_config_meta_json);
|
||||
|
||||
// Verify timestamps (comparing unix timestamps to avoid precision issues)
|
||||
assert_eq!(
|
||||
bm.policy_config_updated_at.unix_timestamp(),
|
||||
deserialized_bm.policy_config_updated_at.unix_timestamp()
|
||||
);
|
||||
assert_eq!(
|
||||
bm.lifecycle_config_updated_at.unix_timestamp(),
|
||||
deserialized_bm.lifecycle_config_updated_at.unix_timestamp()
|
||||
);
|
||||
assert_eq!(
|
||||
bm.versioning_config_updated_at.unix_timestamp(),
|
||||
deserialized_bm.versioning_config_updated_at.unix_timestamp()
|
||||
);
|
||||
assert_eq!(
|
||||
bm.encryption_config_updated_at.unix_timestamp(),
|
||||
deserialized_bm.encryption_config_updated_at.unix_timestamp()
|
||||
);
|
||||
assert_eq!(
|
||||
bm.tagging_config_updated_at.unix_timestamp(),
|
||||
deserialized_bm.tagging_config_updated_at.unix_timestamp()
|
||||
);
|
||||
assert_eq!(
|
||||
bm.quota_config_updated_at.unix_timestamp(),
|
||||
deserialized_bm.quota_config_updated_at.unix_timestamp()
|
||||
);
|
||||
assert_eq!(
|
||||
bm.object_lock_config_updated_at.unix_timestamp(),
|
||||
deserialized_bm.object_lock_config_updated_at.unix_timestamp()
|
||||
);
|
||||
assert_eq!(
|
||||
bm.notification_config_updated_at.unix_timestamp(),
|
||||
deserialized_bm.notification_config_updated_at.unix_timestamp()
|
||||
);
|
||||
assert_eq!(
|
||||
bm.replication_config_updated_at.unix_timestamp(),
|
||||
deserialized_bm.replication_config_updated_at.unix_timestamp()
|
||||
);
|
||||
assert_eq!(
|
||||
bm.bucket_targets_config_updated_at.unix_timestamp(),
|
||||
deserialized_bm.bucket_targets_config_updated_at.unix_timestamp()
|
||||
);
|
||||
assert_eq!(
|
||||
bm.bucket_targets_config_meta_updated_at.unix_timestamp(),
|
||||
deserialized_bm.bucket_targets_config_meta_updated_at.unix_timestamp()
|
||||
);
|
||||
|
||||
// Test that the serialized data contains expected content
|
||||
let buf_str = String::from_utf8_lossy(&buf);
|
||||
assert!(buf_str.contains("test-bucket"), "Serialized data should contain bucket name");
|
||||
|
||||
// Verify the buffer size is reasonable (should be larger due to all the config data)
|
||||
assert!(buf.len() > 1000, "Buffer should be substantial in size due to all configurations");
|
||||
|
||||
println!("✅ Complete BucketMetadata serialization test passed");
|
||||
println!(" - Bucket name: {}", deserialized_bm.name);
|
||||
println!(" - Lock enabled: {}", deserialized_bm.lock_enabled);
|
||||
println!(" - Policy config size: {} bytes", deserialized_bm.policy_config_json.len());
|
||||
println!(" - Lifecycle config size: {} bytes", deserialized_bm.lifecycle_config_xml.len());
|
||||
println!(" - Serialized buffer size: {} bytes", buf.len());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,19 +12,20 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::StorageAPI;
|
||||
use crate::StorageAPI as _;
|
||||
use crate::bucket::bucket_target_sys::BucketTargetSys;
|
||||
use crate::bucket::metadata::{BUCKET_LIFECYCLE_CONFIG, load_bucket_metadata_parse};
|
||||
use crate::bucket::utils::{deserialize, is_meta_bucketname};
|
||||
use crate::cmd::bucket_targets;
|
||||
use crate::error::{Error, Result, is_err_bucket_not_found};
|
||||
use crate::global::{GLOBAL_Endpoints, is_dist_erasure, is_erasure, new_object_layer_fn};
|
||||
use crate::store::ECStore;
|
||||
use futures::future::join_all;
|
||||
use rustfs_common::heal_channel::HealOpts;
|
||||
use rustfs_policy::policy::BucketPolicy;
|
||||
use s3s::dto::ReplicationConfiguration;
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ReplicationConfiguration,
|
||||
ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
|
||||
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ServerSideEncryptionConfiguration, Tagging,
|
||||
VersioningConfiguration,
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::OnceLock;
|
||||
@@ -261,7 +262,8 @@ impl BucketMetadataSys {
|
||||
if let Some(bucket) = buckets.get(idx) {
|
||||
let x = Arc::new(res);
|
||||
mp.insert(bucket.clone(), x.clone());
|
||||
bucket_targets::init_bucket_targets(bucket, x.clone()).await;
|
||||
// TODO:EventNotifier,BucketTargetSys
|
||||
BucketTargetSys::get().set(bucket, &x).await;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -348,6 +350,7 @@ impl BucketMetadataSys {
|
||||
if !is_erasure().await && !is_dist_erasure().await && is_err_bucket_not_found(&err) {
|
||||
BucketMetadata::new(bucket)
|
||||
} else {
|
||||
error!("load bucket metadata failed: {}", err);
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod bucket_target_sys;
|
||||
pub mod error;
|
||||
pub mod lifecycle;
|
||||
pub mod metadata;
|
||||
|
||||
233
crates/ecstore/src/bucket/replication/config.rs
Normal file
233
crates/ecstore/src/bucket/replication/config.rs
Normal file
@@ -0,0 +1,233 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::ReplicationRuleExt as _;
|
||||
use crate::bucket::tagging::decode_tags_to_map;
|
||||
use rustfs_filemeta::ReplicationType;
|
||||
use s3s::dto::DeleteMarkerReplicationStatus;
|
||||
use s3s::dto::DeleteReplicationStatus;
|
||||
use s3s::dto::Destination;
|
||||
use s3s::dto::{ExistingObjectReplicationStatus, ReplicationConfiguration, ReplicationRuleStatus, ReplicationRules};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct ObjectOpts {
|
||||
pub name: String,
|
||||
pub user_tags: String,
|
||||
pub version_id: Option<Uuid>,
|
||||
pub delete_marker: bool,
|
||||
pub ssec: bool,
|
||||
pub op_type: ReplicationType,
|
||||
pub replica: bool,
|
||||
pub existing_object: bool,
|
||||
pub target_arn: String,
|
||||
}
|
||||
|
||||
pub trait ReplicationConfigurationExt {
|
||||
fn replicate(&self, opts: &ObjectOpts) -> bool;
|
||||
fn has_existing_object_replication(&self, arn: &str) -> (bool, bool);
|
||||
fn filter_actionable_rules(&self, obj: &ObjectOpts) -> ReplicationRules;
|
||||
fn get_destination(&self) -> Destination;
|
||||
fn has_active_rules(&self, prefix: &str, recursive: bool) -> bool;
|
||||
fn filter_target_arns(&self, obj: &ObjectOpts) -> Vec<String>;
|
||||
}
|
||||
|
||||
impl ReplicationConfigurationExt for ReplicationConfiguration {
|
||||
/// 检查是否有现有对象复制规则
|
||||
fn has_existing_object_replication(&self, arn: &str) -> (bool, bool) {
|
||||
let mut has_arn = false;
|
||||
|
||||
for rule in &self.rules {
|
||||
if rule.destination.bucket == arn || self.role == arn {
|
||||
if !has_arn {
|
||||
has_arn = true;
|
||||
}
|
||||
if let Some(status) = &rule.existing_object_replication {
|
||||
if status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::ENABLED) {
|
||||
return (true, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
(has_arn, false)
|
||||
}
|
||||
|
||||
fn filter_actionable_rules(&self, obj: &ObjectOpts) -> ReplicationRules {
|
||||
if obj.name.is_empty() && obj.op_type != ReplicationType::Resync && obj.op_type != ReplicationType::All {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
let mut rules = ReplicationRules::default();
|
||||
|
||||
for rule in &self.rules {
|
||||
if rule.status == ReplicationRuleStatus::from_static(ReplicationRuleStatus::DISABLED) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !obj.target_arn.is_empty() && rule.destination.bucket != obj.target_arn && self.role != obj.target_arn {
|
||||
continue;
|
||||
}
|
||||
|
||||
if obj.op_type == ReplicationType::Resync || obj.op_type == ReplicationType::All {
|
||||
rules.push(rule.clone());
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(status) = &rule.existing_object_replication {
|
||||
if obj.existing_object
|
||||
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if !obj.name.starts_with(rule.prefix()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(filter) = &rule.filter {
|
||||
let object_tags = decode_tags_to_map(&obj.user_tags);
|
||||
if filter.test_tags(&object_tags) {
|
||||
rules.push(rule.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rules.sort_by(|a, b| {
|
||||
if a.destination == b.destination {
|
||||
a.priority.cmp(&b.priority)
|
||||
} else {
|
||||
std::cmp::Ordering::Equal
|
||||
}
|
||||
});
|
||||
|
||||
rules
|
||||
}
|
||||
|
||||
/// 获取目标配置
|
||||
fn get_destination(&self) -> Destination {
|
||||
if !self.rules.is_empty() {
|
||||
self.rules[0].destination.clone()
|
||||
} else {
|
||||
Destination {
|
||||
account: None,
|
||||
bucket: "".to_string(),
|
||||
encryption_configuration: None,
|
||||
metrics: None,
|
||||
replication_time: None,
|
||||
access_control_translation: None,
|
||||
storage_class: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// 判断对象是否应该被复制
|
||||
fn replicate(&self, obj: &ObjectOpts) -> bool {
|
||||
let rules = self.filter_actionable_rules(obj);
|
||||
|
||||
for rule in rules.iter() {
|
||||
if rule.status == ReplicationRuleStatus::from_static(ReplicationRuleStatus::DISABLED) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(status) = &rule.existing_object_replication {
|
||||
if obj.existing_object
|
||||
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if obj.op_type == ReplicationType::Delete {
|
||||
if obj.version_id.is_some() {
|
||||
return rule
|
||||
.delete_replication
|
||||
.clone()
|
||||
.is_some_and(|d| d.status == DeleteReplicationStatus::from_static(DeleteReplicationStatus::ENABLED));
|
||||
} else {
|
||||
return rule.delete_marker_replication.clone().is_some_and(|d| {
|
||||
d.status == Some(DeleteMarkerReplicationStatus::from_static(DeleteMarkerReplicationStatus::ENABLED))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// 常规对象/元数据复制
|
||||
return rule.metadata_replicate(obj);
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// 检查是否有活跃的规则
|
||||
/// 可选择性地提供前缀
|
||||
/// 如果recursive为true,函数还会在前缀下的任何级别有活跃规则时返回true
|
||||
/// 如果没有指定前缀,recursive实际上为true
|
||||
fn has_active_rules(&self, prefix: &str, recursive: bool) -> bool {
|
||||
if self.rules.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
for rule in &self.rules {
|
||||
if rule.status == ReplicationRuleStatus::from_static(ReplicationRuleStatus::DISABLED) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(filter) = &rule.filter {
|
||||
if let Some(filter_prefix) = &filter.prefix {
|
||||
if !prefix.is_empty() && !filter_prefix.is_empty() {
|
||||
// 传入的前缀必须在规则前缀中
|
||||
if !recursive && !prefix.starts_with(filter_prefix) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// 如果是递归的,我们可以跳过这个规则,如果它不匹配测试前缀或前缀下的级别不匹配
|
||||
if recursive && !rule.prefix().starts_with(prefix) && !prefix.starts_with(rule.prefix()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// 过滤目标ARN,返回配置中不同目标ARN的切片
|
||||
fn filter_target_arns(&self, obj: &ObjectOpts) -> Vec<String> {
|
||||
let mut arns = Vec::new();
|
||||
let mut targets_map: HashSet<String> = HashSet::new();
|
||||
let rules = self.filter_actionable_rules(obj);
|
||||
|
||||
for rule in rules {
|
||||
if rule.status == ReplicationRuleStatus::from_static(ReplicationRuleStatus::DISABLED) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !self.role.is_empty() {
|
||||
arns.push(self.role.clone()); // 如果存在,使用传统的RoleArn
|
||||
return arns;
|
||||
}
|
||||
|
||||
if !targets_map.contains(&rule.destination.bucket) {
|
||||
targets_map.insert(rule.destination.bucket.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for arn in targets_map {
|
||||
arns.push(arn);
|
||||
}
|
||||
arns
|
||||
}
|
||||
}
|
||||
@@ -12,30 +12,36 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Replication status type for x-amz-replication-status header
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum StatusType {
|
||||
Pending,
|
||||
Completed,
|
||||
CompletedLegacy,
|
||||
Failed,
|
||||
Replica,
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub enum ResyncStatusType {
|
||||
#[default]
|
||||
NoResync,
|
||||
ResyncPending,
|
||||
ResyncCanceled,
|
||||
ResyncStarted,
|
||||
ResyncCompleted,
|
||||
ResyncFailed,
|
||||
}
|
||||
|
||||
impl StatusType {
|
||||
// Converts the enum variant to its string representation
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
StatusType::Pending => "PENDING",
|
||||
StatusType::Completed => "COMPLETED",
|
||||
StatusType::CompletedLegacy => "COMPLETE",
|
||||
StatusType::Failed => "FAILED",
|
||||
StatusType::Replica => "REPLICA",
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if the status is empty (not set)
|
||||
pub fn is_empty(&self) -> bool {
|
||||
matches!(self, StatusType::Pending) // Adjust this as needed
|
||||
impl ResyncStatusType {
|
||||
pub fn is_valid(&self) -> bool {
|
||||
*self != ResyncStatusType::NoResync
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ResyncStatusType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let s = match self {
|
||||
ResyncStatusType::ResyncStarted => "Ongoing",
|
||||
ResyncStatusType::ResyncCompleted => "Completed",
|
||||
ResyncStatusType::ResyncFailed => "Failed",
|
||||
ResyncStatusType::ResyncPending => "Pending",
|
||||
ResyncStatusType::ResyncCanceled => "Canceled",
|
||||
ResyncStatusType::NoResync => "",
|
||||
};
|
||||
write!(f, "{s}")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,4 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod config;
|
||||
pub mod datatypes;
|
||||
mod replication_pool;
|
||||
mod replication_resyncer;
|
||||
mod replication_state;
|
||||
mod replication_type;
|
||||
mod rule;
|
||||
|
||||
pub use config::*;
|
||||
pub use datatypes::*;
|
||||
pub use replication_pool::*;
|
||||
pub use replication_resyncer::*;
|
||||
pub use replication_type::*;
|
||||
pub use rule::*;
|
||||
|
||||
1035
crates/ecstore/src/bucket/replication/replication_pool.rs
Normal file
1035
crates/ecstore/src/bucket/replication/replication_pool.rs
Normal file
File diff suppressed because it is too large
Load Diff
2407
crates/ecstore/src/bucket/replication/replication_resyncer.rs
Normal file
2407
crates/ecstore/src/bucket/replication/replication_resyncer.rs
Normal file
File diff suppressed because it is too large
Load Diff
1201
crates/ecstore/src/bucket/replication/replication_state.rs
Normal file
1201
crates/ecstore/src/bucket/replication/replication_state.rs
Normal file
File diff suppressed because it is too large
Load Diff
470
crates/ecstore/src/bucket/replication/replication_type.rs
Normal file
470
crates/ecstore/src/bucket/replication/replication_type.rs
Normal file
@@ -0,0 +1,470 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::store_api::ObjectInfo;
|
||||
|
||||
use regex::Regex;
|
||||
|
||||
use rustfs_filemeta::VersionPurgeStatusType;
|
||||
use rustfs_filemeta::{ReplicatedInfos, ReplicationType};
|
||||
use rustfs_filemeta::{ReplicationState, ReplicationStatusType};
|
||||
use rustfs_utils::http::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use rustfs_utils::http::RUSTFS_REPLICATION_RESET_STATUS;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const REPLICATION_RESET: &str = "replication-reset";
|
||||
pub const REPLICATION_STATUS: &str = "replication-status";
|
||||
|
||||
// ReplicateQueued - replication being queued trail
|
||||
pub const REPLICATE_QUEUED: &str = "replicate:queue";
|
||||
|
||||
// ReplicateExisting - audit trail for existing objects replication
|
||||
pub const REPLICATE_EXISTING: &str = "replicate:existing";
|
||||
// ReplicateExistingDelete - audit trail for delete replication triggered for existing delete markers
|
||||
pub const REPLICATE_EXISTING_DELETE: &str = "replicate:existing:delete";
|
||||
|
||||
// ReplicateMRF - audit trail for replication from Most Recent Failures (MRF) queue
|
||||
pub const REPLICATE_MRF: &str = "replicate:mrf";
|
||||
// ReplicateIncoming - audit trail of inline replication
|
||||
pub const REPLICATE_INCOMING: &str = "replicate:incoming";
|
||||
// ReplicateIncomingDelete - audit trail of inline replication of deletes.
|
||||
pub const REPLICATE_INCOMING_DELETE: &str = "replicate:incoming:delete";
|
||||
|
||||
// ReplicateHeal - audit trail for healing of failed/pending replications
|
||||
pub const REPLICATE_HEAL: &str = "replicate:heal";
|
||||
// ReplicateHealDelete - audit trail of healing of failed/pending delete replications.
|
||||
pub const REPLICATE_HEAL_DELETE: &str = "replicate:heal:delete";
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct MrfReplicateEntry {
|
||||
#[serde(rename = "bucket")]
|
||||
pub bucket: String,
|
||||
|
||||
#[serde(rename = "object")]
|
||||
pub object: String,
|
||||
|
||||
#[serde(skip_serializing, skip_deserializing)]
|
||||
pub version_id: Option<Uuid>,
|
||||
|
||||
#[serde(rename = "retryCount")]
|
||||
pub retry_count: i32,
|
||||
|
||||
#[serde(skip_serializing, skip_deserializing)]
|
||||
pub size: i64,
|
||||
}
|
||||
|
||||
pub trait ReplicationWorkerOperation: Any + Send + Sync {
|
||||
fn to_mrf_entry(&self) -> MrfReplicateEntry;
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
fn get_bucket(&self) -> &str;
|
||||
fn get_object(&self) -> &str;
|
||||
fn get_size(&self) -> i64;
|
||||
fn is_delete_marker(&self) -> bool;
|
||||
fn get_op_type(&self) -> ReplicationType;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct ReplicateTargetDecision {
|
||||
pub replicate: bool,
|
||||
pub synchronous: bool,
|
||||
pub arn: String,
|
||||
pub id: String,
|
||||
}
|
||||
|
||||
impl ReplicateTargetDecision {
|
||||
pub fn new(arn: String, replicate: bool, sync: bool) -> Self {
|
||||
Self {
|
||||
replicate,
|
||||
synchronous: sync,
|
||||
arn,
|
||||
id: String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ReplicateTargetDecision {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{};{};{};{}", self.replicate, self.synchronous, self.arn, self.id)
|
||||
}
|
||||
}
|
||||
|
||||
/// ReplicateDecision represents replication decision for each target
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ReplicateDecision {
|
||||
pub targets_map: HashMap<String, ReplicateTargetDecision>,
|
||||
}
|
||||
|
||||
impl ReplicateDecision {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
targets_map: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if at least one target qualifies for replication
|
||||
pub fn replicate_any(&self) -> bool {
|
||||
self.targets_map.values().any(|t| t.replicate)
|
||||
}
|
||||
|
||||
/// Returns true if at least one target qualifies for synchronous replication
|
||||
pub fn is_synchronous(&self) -> bool {
|
||||
self.targets_map.values().any(|t| t.synchronous)
|
||||
}
|
||||
|
||||
/// Updates ReplicateDecision with target's replication decision
|
||||
pub fn set(&mut self, target: ReplicateTargetDecision) {
|
||||
self.targets_map.insert(target.arn.clone(), target);
|
||||
}
|
||||
|
||||
/// Returns a stringified representation of internal replication status with all targets marked as `PENDING`
|
||||
pub fn pending_status(&self) -> Option<String> {
|
||||
let mut result = String::new();
|
||||
for target in self.targets_map.values() {
|
||||
if target.replicate {
|
||||
result.push_str(&format!("{}={};", target.arn, ReplicationStatusType::Pending.as_str()));
|
||||
}
|
||||
}
|
||||
if result.is_empty() { None } else { Some(result) }
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ReplicateDecision {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut result = String::new();
|
||||
for (key, value) in &self.targets_map {
|
||||
result.push_str(&format!("{key}={value},"));
|
||||
}
|
||||
write!(f, "{}", result.trim_end_matches(','))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ReplicateDecision {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// parse k-v pairs of target ARN to stringified ReplicateTargetDecision delimited by ',' into a
|
||||
// ReplicateDecision struct
|
||||
pub fn parse_replicate_decision(_bucket: &str, s: &str) -> Result<ReplicateDecision> {
|
||||
let mut decision = ReplicateDecision::new();
|
||||
|
||||
if s.is_empty() {
|
||||
return Ok(decision);
|
||||
}
|
||||
|
||||
for p in s.split(',') {
|
||||
if p.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let slc = p.split('=').collect::<Vec<&str>>();
|
||||
if slc.len() != 2 {
|
||||
return Err(Error::other(format!("invalid replicate decision format: {s}")));
|
||||
}
|
||||
|
||||
let tgt_str = slc[1].trim_matches('"');
|
||||
let tgt = tgt_str.split(';').collect::<Vec<&str>>();
|
||||
if tgt.len() != 4 {
|
||||
return Err(Error::other(format!("invalid replicate decision format: {s}")));
|
||||
}
|
||||
|
||||
let tgt = ReplicateTargetDecision {
|
||||
replicate: tgt[0] == "true",
|
||||
synchronous: tgt[1] == "true",
|
||||
arn: tgt[2].to_string(),
|
||||
id: tgt[3].to_string(),
|
||||
};
|
||||
decision.targets_map.insert(slc[0].to_string(), tgt);
|
||||
}
|
||||
|
||||
Ok(decision)
|
||||
|
||||
// r = ReplicateDecision{
|
||||
// targetsMap: make(map[string]replicateTargetDecision),
|
||||
// }
|
||||
// if len(s) == 0 {
|
||||
// return
|
||||
// }
|
||||
// for _, p := range strings.Split(s, ",") {
|
||||
// if p == "" {
|
||||
// continue
|
||||
// }
|
||||
// slc := strings.Split(p, "=")
|
||||
// if len(slc) != 2 {
|
||||
// return r, errInvalidReplicateDecisionFormat
|
||||
// }
|
||||
// tgtStr := strings.TrimSuffix(strings.TrimPrefix(slc[1], `"`), `"`)
|
||||
// tgt := strings.Split(tgtStr, ";")
|
||||
// if len(tgt) != 4 {
|
||||
// return r, errInvalidReplicateDecisionFormat
|
||||
// }
|
||||
// r.targetsMap[slc[0]] = replicateTargetDecision{Replicate: tgt[0] == "true", Synchronous: tgt[1] == "true", Arn: tgt[2], ID: tgt[3]}
|
||||
// }
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct ResyncTargetDecision {
|
||||
pub replicate: bool,
|
||||
pub reset_id: String,
|
||||
pub reset_before_date: Option<OffsetDateTime>,
|
||||
}
|
||||
|
||||
pub fn target_reset_header(arn: &str) -> String {
|
||||
format!("{RESERVED_METADATA_PREFIX_LOWER}{REPLICATION_RESET}-{arn}")
|
||||
}
|
||||
|
||||
impl ResyncTargetDecision {
|
||||
pub fn resync_target(
|
||||
oi: &ObjectInfo,
|
||||
arn: &str,
|
||||
reset_id: &str,
|
||||
reset_before_date: Option<OffsetDateTime>,
|
||||
status: ReplicationStatusType,
|
||||
) -> Self {
|
||||
let rs = oi
|
||||
.user_defined
|
||||
.get(target_reset_header(arn).as_str())
|
||||
.or(oi.user_defined.get(RUSTFS_REPLICATION_RESET_STATUS))
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let mut dec = Self::default();
|
||||
|
||||
let mod_time = oi.mod_time.unwrap_or(OffsetDateTime::UNIX_EPOCH);
|
||||
|
||||
if rs.is_none() {
|
||||
let reset_before_date = reset_before_date.unwrap_or(OffsetDateTime::UNIX_EPOCH);
|
||||
if !reset_id.is_empty() && mod_time < reset_before_date {
|
||||
dec.replicate = true;
|
||||
return dec;
|
||||
}
|
||||
|
||||
dec.replicate = status == ReplicationStatusType::Empty;
|
||||
|
||||
return dec;
|
||||
}
|
||||
|
||||
if reset_id.is_empty() || reset_before_date.is_none() {
|
||||
return dec;
|
||||
}
|
||||
|
||||
let rs = rs.unwrap();
|
||||
let reset_before_date = reset_before_date.unwrap();
|
||||
|
||||
let parts: Vec<&str> = rs.splitn(2, ';').collect();
|
||||
|
||||
if parts.len() != 2 {
|
||||
return dec;
|
||||
}
|
||||
|
||||
let new_reset = parts[0] == reset_id;
|
||||
|
||||
if !new_reset && status == ReplicationStatusType::Completed {
|
||||
return dec;
|
||||
}
|
||||
|
||||
dec.replicate = new_reset && mod_time < reset_before_date;
|
||||
|
||||
dec
|
||||
}
|
||||
}
|
||||
|
||||
/// ResyncDecision is a struct representing a map with target's individual resync decisions
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ResyncDecision {
|
||||
pub targets: HashMap<String, ResyncTargetDecision>,
|
||||
}
|
||||
|
||||
impl ResyncDecision {
|
||||
pub fn new() -> Self {
|
||||
Self { targets: HashMap::new() }
|
||||
}
|
||||
|
||||
/// Returns true if no targets with resync decision present
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.targets.is_empty()
|
||||
}
|
||||
|
||||
pub fn must_resync(&self) -> bool {
|
||||
self.targets.values().any(|v| v.replicate)
|
||||
}
|
||||
|
||||
pub fn must_resync_target(&self, tgt_arn: &str) -> bool {
|
||||
self.targets.get(tgt_arn).map(|v| v.replicate).unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ResyncDecision {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ReplicateObjectInfo {
|
||||
pub name: String,
|
||||
pub size: i64,
|
||||
pub actual_size: i64,
|
||||
pub bucket: String,
|
||||
pub version_id: Option<Uuid>,
|
||||
pub etag: Option<String>,
|
||||
pub mod_time: Option<OffsetDateTime>,
|
||||
pub replication_status: ReplicationStatusType,
|
||||
pub replication_status_internal: Option<String>,
|
||||
pub delete_marker: bool,
|
||||
pub version_purge_status_internal: Option<String>,
|
||||
pub version_purge_status: VersionPurgeStatusType,
|
||||
pub replication_state: Option<ReplicationState>,
|
||||
pub op_type: ReplicationType,
|
||||
pub event_type: String,
|
||||
pub dsc: ReplicateDecision,
|
||||
pub existing_obj_resync: ResyncDecision,
|
||||
pub target_statuses: HashMap<String, ReplicationStatusType>,
|
||||
pub target_purge_statuses: HashMap<String, VersionPurgeStatusType>,
|
||||
pub replication_timestamp: Option<OffsetDateTime>,
|
||||
pub ssec: bool,
|
||||
pub user_tags: String,
|
||||
pub checksum: Vec<u8>,
|
||||
pub retry_count: u32,
|
||||
}
|
||||
|
||||
impl ReplicationWorkerOperation for ReplicateObjectInfo {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn to_mrf_entry(&self) -> MrfReplicateEntry {
|
||||
MrfReplicateEntry {
|
||||
bucket: self.bucket.clone(),
|
||||
object: self.name.clone(),
|
||||
version_id: self.version_id,
|
||||
retry_count: self.retry_count as i32,
|
||||
size: self.size,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_bucket(&self) -> &str {
|
||||
&self.bucket
|
||||
}
|
||||
|
||||
fn get_object(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
fn get_size(&self) -> i64 {
|
||||
self.size
|
||||
}
|
||||
|
||||
fn is_delete_marker(&self) -> bool {
|
||||
self.delete_marker
|
||||
}
|
||||
|
||||
fn get_op_type(&self) -> ReplicationType {
|
||||
self.op_type
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref REPL_STATUS_REGEX: Regex = Regex::new(r"([^=].*?)=([^,].*?);").unwrap();
|
||||
}
|
||||
|
||||
impl ReplicateObjectInfo {
|
||||
/// Returns replication status of a target
|
||||
pub fn target_replication_status(&self, arn: &str) -> ReplicationStatusType {
|
||||
let binding = self.replication_status_internal.clone().unwrap_or_default();
|
||||
let captures = REPL_STATUS_REGEX.captures_iter(&binding);
|
||||
for cap in captures {
|
||||
if cap.len() == 3 && &cap[1] == arn {
|
||||
return ReplicationStatusType::from(&cap[2]);
|
||||
}
|
||||
}
|
||||
ReplicationStatusType::default()
|
||||
}
|
||||
|
||||
/// Returns the relevant info needed by MRF
|
||||
pub fn to_mrf_entry(&self) -> MrfReplicateEntry {
|
||||
MrfReplicateEntry {
|
||||
bucket: self.bucket.clone(),
|
||||
object: self.name.clone(),
|
||||
version_id: self.version_id,
|
||||
retry_count: self.retry_count as i32,
|
||||
size: self.size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// constructs a replication status map from string representation
|
||||
pub fn replication_statuses_map(s: &str) -> HashMap<String, ReplicationStatusType> {
|
||||
let mut targets = HashMap::new();
|
||||
let rep_stat_matches = REPL_STATUS_REGEX.captures_iter(s).map(|c| c.extract());
|
||||
for (_, [arn, status]) in rep_stat_matches {
|
||||
if arn.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let status = ReplicationStatusType::from(status);
|
||||
targets.insert(arn.to_string(), status);
|
||||
}
|
||||
targets
|
||||
}
|
||||
|
||||
// constructs a version purge status map from string representation
|
||||
pub fn version_purge_statuses_map(s: &str) -> HashMap<String, VersionPurgeStatusType> {
|
||||
let mut targets = HashMap::new();
|
||||
let purge_status_matches = REPL_STATUS_REGEX.captures_iter(s).map(|c| c.extract());
|
||||
for (_, [arn, status]) in purge_status_matches {
|
||||
if arn.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let status = VersionPurgeStatusType::from(status);
|
||||
targets.insert(arn.to_string(), status);
|
||||
}
|
||||
targets
|
||||
}
|
||||
|
||||
pub fn get_replication_state(rinfos: &ReplicatedInfos, prev_state: &ReplicationState, _vid: Option<String>) -> ReplicationState {
|
||||
let reset_status_map: Vec<(String, String)> = rinfos
|
||||
.targets
|
||||
.iter()
|
||||
.filter(|v| !v.resync_timestamp.is_empty())
|
||||
.map(|t| (target_reset_header(t.arn.as_str()), t.resync_timestamp.clone()))
|
||||
.collect();
|
||||
|
||||
let repl_statuses = rinfos.replication_status_internal();
|
||||
let vpurge_statuses = rinfos.version_purge_status_internal();
|
||||
|
||||
let mut reset_statuses_map = prev_state.reset_statuses_map.clone();
|
||||
for (key, value) in reset_status_map {
|
||||
reset_statuses_map.insert(key, value);
|
||||
}
|
||||
|
||||
ReplicationState {
|
||||
replicate_decision_str: prev_state.replicate_decision_str.clone(),
|
||||
reset_statuses_map,
|
||||
replica_timestamp: prev_state.replica_timestamp,
|
||||
replica_status: prev_state.replica_status.clone(),
|
||||
targets: replication_statuses_map(&repl_statuses.clone().unwrap_or_default()),
|
||||
replication_status_internal: repl_statuses,
|
||||
replication_timestamp: rinfos.replication_timestamp,
|
||||
purge_targets: version_purge_statuses_map(&vpurge_statuses.clone().unwrap_or_default()),
|
||||
version_purge_status_internal: vpurge_statuses,
|
||||
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
51
crates/ecstore/src/bucket/replication/rule.rs
Normal file
51
crates/ecstore/src/bucket/replication/rule.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use s3s::dto::ReplicaModificationsStatus;
|
||||
use s3s::dto::ReplicationRule;
|
||||
|
||||
use super::ObjectOpts;
|
||||
|
||||
pub trait ReplicationRuleExt {
|
||||
fn prefix(&self) -> &str;
|
||||
fn metadata_replicate(&self, obj: &ObjectOpts) -> bool;
|
||||
}
|
||||
|
||||
impl ReplicationRuleExt for ReplicationRule {
|
||||
fn prefix(&self) -> &str {
|
||||
if let Some(filter) = &self.filter {
|
||||
if let Some(prefix) = &filter.prefix {
|
||||
prefix
|
||||
} else if let Some(and) = &filter.and {
|
||||
and.prefix.as_deref().unwrap_or("")
|
||||
} else {
|
||||
""
|
||||
}
|
||||
} else {
|
||||
""
|
||||
}
|
||||
}
|
||||
|
||||
fn metadata_replicate(&self, obj: &ObjectOpts) -> bool {
|
||||
if !obj.replica {
|
||||
return true;
|
||||
}
|
||||
|
||||
self.source_selection_criteria.as_ref().is_some_and(|s| {
|
||||
s.replica_modifications
|
||||
.clone()
|
||||
.is_some_and(|r| r.status == ReplicaModificationsStatus::from_static(ReplicaModificationsStatus::ENABLED))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use s3s::dto::Tag;
|
||||
use url::form_urlencoded;
|
||||
|
||||
@@ -34,6 +36,20 @@ pub fn decode_tags(tags: &str) -> Vec<Tag> {
|
||||
list
|
||||
}
|
||||
|
||||
pub fn decode_tags_to_map(tags: &str) -> HashMap<String, String> {
|
||||
let mut list = HashMap::new();
|
||||
|
||||
for (k, v) in form_urlencoded::parse(tags.as_bytes()) {
|
||||
if k.is_empty() || v.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
list.insert(k.to_string(), v.to_string());
|
||||
}
|
||||
|
||||
list
|
||||
}
|
||||
|
||||
pub fn encode_tags(tags: Vec<Tag>) -> String {
|
||||
let mut encoded = form_urlencoded::Serializer::new(String::new());
|
||||
|
||||
|
||||
66
crates/ecstore/src/bucket/target/arn.rs
Normal file
66
crates/ecstore/src/bucket/target/arn.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::BucketTargetType;
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
|
||||
pub struct ARN {
|
||||
pub arn_type: BucketTargetType,
|
||||
pub id: String,
|
||||
pub region: String,
|
||||
pub bucket: String,
|
||||
}
|
||||
|
||||
impl ARN {
|
||||
pub fn new(arn_type: BucketTargetType, id: String, region: String, bucket: String) -> Self {
|
||||
Self {
|
||||
arn_type,
|
||||
id,
|
||||
region,
|
||||
bucket,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.arn_type.is_valid()
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ARN {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "arn:rustfs:{}:{}:{}:{}", self.arn_type, self.region, self.id, self.bucket)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for ARN {
|
||||
type Err = std::io::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if !s.starts_with("arn:rustfs:") {
|
||||
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Invalid ARN format"));
|
||||
}
|
||||
|
||||
let parts: Vec<&str> = s.split(':').collect();
|
||||
if parts.len() != 6 {
|
||||
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Invalid ARN format"));
|
||||
}
|
||||
Ok(ARN {
|
||||
arn_type: BucketTargetType::from_str(parts[2]).unwrap_or_default(),
|
||||
id: parts[3].to_string(),
|
||||
region: parts[4].to_string(),
|
||||
bucket: parts[5].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
800
crates/ecstore/src/bucket/target/bucket_target.rs
Normal file
800
crates/ecstore/src/bucket/target/bucket_target.rs
Normal file
@@ -0,0 +1,800 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use rmp_serde::Serializer as rmpSerializer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fmt::{self, Display},
|
||||
str::FromStr,
|
||||
time::Duration,
|
||||
};
|
||||
use time::OffsetDateTime;
|
||||
use url::Url;
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub struct Credentials {
|
||||
#[serde(rename = "accessKey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretKey")]
|
||||
pub secret_key: String,
|
||||
pub session_token: Option<String>,
|
||||
pub expiration: Option<chrono::DateTime<chrono::Utc>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub enum ServiceType {
|
||||
#[default]
|
||||
Replication,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub struct LatencyStat {
|
||||
#[serde(with = "duration_milliseconds")]
|
||||
pub curr: Duration, // Current latency
|
||||
#[serde(with = "duration_milliseconds")]
|
||||
pub avg: Duration, // Average latency
|
||||
#[serde(with = "duration_milliseconds")]
|
||||
pub max: Duration, // Maximum latency
|
||||
}
|
||||
|
||||
mod duration_milliseconds {
|
||||
use serde::{Deserialize, Deserializer, Serializer};
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn serialize<S>(duration: &Duration, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_u64(duration.as_millis() as u64)
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Duration, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let millis = u64::deserialize(deserializer)?;
|
||||
Ok(Duration::from_millis(millis))
|
||||
}
|
||||
}
|
||||
|
||||
mod duration_seconds {
|
||||
use serde::{Deserialize, Deserializer, Serializer};
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn serialize<S>(duration: &Duration, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_u64(duration.as_secs())
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Duration, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let secs = u64::deserialize(deserializer)?;
|
||||
Ok(Duration::from_secs(secs))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq)]
|
||||
pub enum BucketTargetType {
|
||||
#[default]
|
||||
None,
|
||||
#[serde(rename = "replication")]
|
||||
ReplicationService,
|
||||
#[serde(rename = "ilm")]
|
||||
IlmService,
|
||||
}
|
||||
|
||||
impl BucketTargetType {
|
||||
pub fn is_valid(&self) -> bool {
|
||||
match self {
|
||||
BucketTargetType::None => false,
|
||||
BucketTargetType::ReplicationService | BucketTargetType::IlmService => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for BucketTargetType {
|
||||
type Err = std::io::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s {
|
||||
"replication" => Ok(BucketTargetType::ReplicationService),
|
||||
"ilm" => Ok(BucketTargetType::IlmService),
|
||||
_ => Ok(BucketTargetType::None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for BucketTargetType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
BucketTargetType::None => write!(f, ""),
|
||||
BucketTargetType::ReplicationService => write!(f, "replication"),
|
||||
BucketTargetType::IlmService => write!(f, "ilm"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Define BucketTarget structure
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub struct BucketTarget {
|
||||
#[serde(rename = "sourcebucket", default)]
|
||||
pub source_bucket: String,
|
||||
|
||||
#[serde(default)]
|
||||
pub endpoint: String,
|
||||
|
||||
#[serde(default)]
|
||||
pub credentials: Option<Credentials>,
|
||||
#[serde(rename = "targetbucket", default)]
|
||||
pub target_bucket: String,
|
||||
|
||||
#[serde(default)]
|
||||
pub secure: bool,
|
||||
#[serde(default)]
|
||||
pub path: String,
|
||||
#[serde(default)]
|
||||
pub api: String,
|
||||
#[serde(default)]
|
||||
pub arn: String,
|
||||
#[serde(rename = "type", default)]
|
||||
pub target_type: BucketTargetType,
|
||||
|
||||
#[serde(default)]
|
||||
pub region: String,
|
||||
|
||||
#[serde(alias = "bandwidth", default)]
|
||||
pub bandwidth_limit: i64,
|
||||
|
||||
#[serde(rename = "replicationSync", default)]
|
||||
pub replication_sync: bool,
|
||||
#[serde(default)]
|
||||
pub storage_class: String,
|
||||
#[serde(rename = "healthCheckDuration", with = "duration_seconds", default)]
|
||||
pub health_check_duration: Duration,
|
||||
#[serde(rename = "disableProxy", default)]
|
||||
pub disable_proxy: bool,
|
||||
|
||||
#[serde(rename = "resetBeforeDate", with = "time::serde::rfc3339::option", default)]
|
||||
pub reset_before_date: Option<OffsetDateTime>,
|
||||
#[serde(default)]
|
||||
pub reset_id: String,
|
||||
#[serde(rename = "totalDowntime", with = "duration_seconds", default)]
|
||||
pub total_downtime: Duration,
|
||||
|
||||
#[serde(rename = "lastOnline", with = "time::serde::rfc3339::option", default)]
|
||||
pub last_online: Option<OffsetDateTime>,
|
||||
#[serde(rename = "isOnline", default)]
|
||||
pub online: bool,
|
||||
|
||||
#[serde(default)]
|
||||
pub latency: LatencyStat,
|
||||
|
||||
#[serde(default)]
|
||||
pub deployment_id: String,
|
||||
|
||||
#[serde(default)]
|
||||
pub edge: bool,
|
||||
#[serde(rename = "edgeSyncBeforeExpiry", default)]
|
||||
pub edge_sync_before_expiry: bool,
|
||||
#[serde(rename = "offlineCount", default)]
|
||||
pub offline_count: u64,
|
||||
}
|
||||
|
||||
impl BucketTarget {
|
||||
pub fn is_empty(self) -> bool {
|
||||
self.target_bucket.is_empty() && self.endpoint.is_empty() && self.arn.is_empty()
|
||||
}
|
||||
pub fn url(&self) -> Result<Url> {
|
||||
let scheme = if self.secure { "https" } else { "http" };
|
||||
Url::parse(&format!("{}://{}", scheme, self.endpoint)).map_err(Error::other)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for BucketTarget {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{} ", self.endpoint)?;
|
||||
write!(f, "{}", self.target_bucket.clone())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub struct BucketTargets {
|
||||
pub targets: Vec<BucketTarget>,
|
||||
}
|
||||
|
||||
impl BucketTargets {
|
||||
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
|
||||
let mut buf = Vec::new();
|
||||
|
||||
self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?;
|
||||
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
pub fn unmarshal(buf: &[u8]) -> Result<Self> {
|
||||
let t: BucketTargets = rmp_serde::from_slice(buf)?;
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
if self.targets.is_empty() {
|
||||
return true;
|
||||
}
|
||||
|
||||
for target in &self.targets {
|
||||
if !target.clone().is_empty() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde_json;
|
||||
use std::time::Duration;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
#[test]
|
||||
fn test_bucket_target_json_deserialize() {
|
||||
let json = r#"
|
||||
{
|
||||
"sourcebucket": "source-bucket-name",
|
||||
"endpoint": "s3.amazonaws.com",
|
||||
"credentials": {
|
||||
"accessKey": "test-access-key",
|
||||
"secretKey": "test-secret-key",
|
||||
"session_token": "test-session-token",
|
||||
"expiration": "2024-12-31T23:59:59Z"
|
||||
},
|
||||
"targetbucket": "target-bucket-name",
|
||||
"secure": true,
|
||||
"path": "/api/v1",
|
||||
"api": "s3v4",
|
||||
"arn": "arn:aws:s3:::target-bucket-name",
|
||||
"type": "replication",
|
||||
"region": "us-east-1",
|
||||
"bandwidth_limit": 1000000,
|
||||
"replicationSync": true,
|
||||
"storage_class": "STANDARD",
|
||||
"healthCheckDuration": 30,
|
||||
"disableProxy": false,
|
||||
"resetBeforeDate": null,
|
||||
"reset_id": "reset-123",
|
||||
"totalDowntime": 3600,
|
||||
"last_online": null,
|
||||
"isOnline": true,
|
||||
"latency": {
|
||||
"curr": 100,
|
||||
"avg": 150,
|
||||
"max": 300
|
||||
},
|
||||
"deployment_id": "deployment-456",
|
||||
"edge": false,
|
||||
"edgeSyncBeforeExpiry": true,
|
||||
"offlineCount": 5
|
||||
}
|
||||
"#;
|
||||
|
||||
let result: std::result::Result<BucketTarget, _> = serde_json::from_str(json);
|
||||
assert!(result.is_ok(), "Failed to deserialize BucketTarget: {:?}", result.err());
|
||||
|
||||
let target = result.unwrap();
|
||||
|
||||
// Verify basic fields
|
||||
assert_eq!(target.source_bucket, "source-bucket-name");
|
||||
assert_eq!(target.endpoint, "s3.amazonaws.com");
|
||||
assert_eq!(target.target_bucket, "target-bucket-name");
|
||||
assert!(target.secure);
|
||||
assert_eq!(target.path, "/api/v1");
|
||||
assert_eq!(target.api, "s3v4");
|
||||
assert_eq!(target.arn, "arn:aws:s3:::target-bucket-name");
|
||||
assert_eq!(target.target_type, BucketTargetType::ReplicationService);
|
||||
assert_eq!(target.region, "us-east-1");
|
||||
assert_eq!(target.bandwidth_limit, 1000000);
|
||||
assert!(target.replication_sync);
|
||||
assert_eq!(target.storage_class, "STANDARD");
|
||||
assert_eq!(target.health_check_duration, Duration::from_secs(30));
|
||||
assert!(!target.disable_proxy);
|
||||
assert_eq!(target.reset_id, "reset-123");
|
||||
assert_eq!(target.total_downtime, Duration::from_secs(3600));
|
||||
assert!(target.online);
|
||||
assert_eq!(target.deployment_id, "deployment-456");
|
||||
assert!(!target.edge);
|
||||
assert!(target.edge_sync_before_expiry);
|
||||
assert_eq!(target.offline_count, 5);
|
||||
|
||||
// Verify credentials
|
||||
assert!(target.credentials.is_some());
|
||||
let credentials = target.credentials.unwrap();
|
||||
assert_eq!(credentials.access_key, "test-access-key");
|
||||
assert_eq!(credentials.secret_key, "test-secret-key");
|
||||
assert_eq!(credentials.session_token, Some("test-session-token".to_string()));
|
||||
assert!(credentials.expiration.is_some());
|
||||
|
||||
// Verify latency statistics
|
||||
assert_eq!(target.latency.curr, Duration::from_millis(100));
|
||||
assert_eq!(target.latency.avg, Duration::from_millis(150));
|
||||
assert_eq!(target.latency.max, Duration::from_millis(300));
|
||||
|
||||
// Verify time fields
|
||||
assert!(target.reset_before_date.is_none());
|
||||
assert!(target.last_online.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_target_json_serialize_deserialize_roundtrip() {
|
||||
let original = BucketTarget {
|
||||
source_bucket: "test-source".to_string(),
|
||||
endpoint: "rustfs.example.com".to_string(),
|
||||
credentials: Some(Credentials {
|
||||
access_key: "rustfsaccess".to_string(),
|
||||
secret_key: "rustfssecret".to_string(),
|
||||
session_token: None,
|
||||
expiration: None,
|
||||
}),
|
||||
target_bucket: "test-target".to_string(),
|
||||
secure: false,
|
||||
path: "/".to_string(),
|
||||
api: "s3v4".to_string(),
|
||||
arn: "arn:rustfs:s3:::test-target".to_string(),
|
||||
target_type: BucketTargetType::ReplicationService,
|
||||
region: "us-west-2".to_string(),
|
||||
bandwidth_limit: 500000,
|
||||
replication_sync: false,
|
||||
storage_class: "REDUCED_REDUNDANCY".to_string(),
|
||||
health_check_duration: Duration::from_secs(60),
|
||||
disable_proxy: true,
|
||||
reset_before_date: Some(OffsetDateTime::now_utc()),
|
||||
reset_id: "reset-456".to_string(),
|
||||
total_downtime: Duration::from_secs(1800),
|
||||
last_online: Some(OffsetDateTime::now_utc()),
|
||||
online: false,
|
||||
latency: LatencyStat {
|
||||
curr: Duration::from_millis(250),
|
||||
avg: Duration::from_millis(200),
|
||||
max: Duration::from_millis(500),
|
||||
},
|
||||
deployment_id: "deploy-789".to_string(),
|
||||
edge: true,
|
||||
edge_sync_before_expiry: false,
|
||||
offline_count: 10,
|
||||
};
|
||||
|
||||
// Serialize to JSON
|
||||
let json = serde_json::to_string(&original).expect("Failed to serialize to JSON");
|
||||
|
||||
// Deserialize from JSON
|
||||
let deserialized: BucketTarget = serde_json::from_str(&json).expect("Failed to deserialize from JSON");
|
||||
|
||||
// Verify key fields are equal
|
||||
assert_eq!(original.source_bucket, deserialized.source_bucket);
|
||||
assert_eq!(original.endpoint, deserialized.endpoint);
|
||||
assert_eq!(original.target_bucket, deserialized.target_bucket);
|
||||
assert_eq!(original.secure, deserialized.secure);
|
||||
assert_eq!(original.target_type, deserialized.target_type);
|
||||
assert_eq!(original.region, deserialized.region);
|
||||
assert_eq!(original.bandwidth_limit, deserialized.bandwidth_limit);
|
||||
assert_eq!(original.replication_sync, deserialized.replication_sync);
|
||||
assert_eq!(original.health_check_duration, deserialized.health_check_duration);
|
||||
assert_eq!(original.online, deserialized.online);
|
||||
assert_eq!(original.edge, deserialized.edge);
|
||||
assert_eq!(original.offline_count, deserialized.offline_count);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_target_type_json_deserialize() {
|
||||
// Test BucketTargetType JSON deserialization
|
||||
let replication_json = r#""replication""#;
|
||||
let ilm_json = r#""ilm""#;
|
||||
|
||||
let replication_type: BucketTargetType =
|
||||
serde_json::from_str(replication_json).expect("Failed to deserialize replication type");
|
||||
let ilm_type: BucketTargetType = serde_json::from_str(ilm_json).expect("Failed to deserialize ilm type");
|
||||
|
||||
assert_eq!(replication_type, BucketTargetType::ReplicationService);
|
||||
assert_eq!(ilm_type, BucketTargetType::IlmService);
|
||||
|
||||
// Verify type validity
|
||||
assert!(replication_type.is_valid());
|
||||
assert!(ilm_type.is_valid());
|
||||
assert!(!BucketTargetType::None.is_valid());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_json_deserialize() {
|
||||
let json = r#"
|
||||
{
|
||||
"accessKey": "AKIAIOSFODNN7EXAMPLE",
|
||||
"secretKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
"session_token": "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT",
|
||||
"expiration": "2024-12-31T23:59:59Z"
|
||||
}
|
||||
"#;
|
||||
|
||||
let credentials: Credentials = serde_json::from_str(json).expect("Failed to deserialize credentials");
|
||||
|
||||
assert_eq!(credentials.access_key, "AKIAIOSFODNN7EXAMPLE");
|
||||
assert_eq!(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY");
|
||||
assert_eq!(
|
||||
credentials.session_token,
|
||||
Some("AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT".to_string())
|
||||
);
|
||||
assert!(credentials.expiration.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_latency_stat_json_deserialize() {
|
||||
let json = r#"
|
||||
{
|
||||
"curr": 50,
|
||||
"avg": 75,
|
||||
"max": 200
|
||||
}
|
||||
"#;
|
||||
|
||||
let latency: LatencyStat = serde_json::from_str(json).expect("Failed to deserialize latency stat");
|
||||
|
||||
assert_eq!(latency.curr, Duration::from_millis(50));
|
||||
assert_eq!(latency.avg, Duration::from_millis(75));
|
||||
assert_eq!(latency.max, Duration::from_millis(200));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_targets_json_deserialize() {
|
||||
let json = r#"
|
||||
{
|
||||
"targets": [
|
||||
{
|
||||
"sourcebucket": "bucket1",
|
||||
"endpoint": "s3.amazonaws.com",
|
||||
"targetbucket": "target1",
|
||||
"secure": true,
|
||||
"path": "/",
|
||||
"api": "s3v4",
|
||||
"arn": "arn:aws:s3:::target1",
|
||||
"type": "replication",
|
||||
"region": "us-east-1",
|
||||
"bandwidth_limit": 0,
|
||||
"replicationSync": false,
|
||||
"storage_class": "",
|
||||
"healthCheckDuration": 0,
|
||||
"disableProxy": false,
|
||||
"resetBeforeDate": null,
|
||||
"reset_id": "",
|
||||
"totalDowntime": 0,
|
||||
"lastOnline": null,
|
||||
"isOnline": false,
|
||||
"latency": {
|
||||
"curr": 0,
|
||||
"avg": 0,
|
||||
"max": 0
|
||||
},
|
||||
"deployment_id": "",
|
||||
"edge": false,
|
||||
"edgeSyncBeforeExpiry": false,
|
||||
"offlineCount": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
|
||||
let targets: BucketTargets = serde_json::from_str(json).expect("Failed to deserialize bucket targets");
|
||||
|
||||
assert_eq!(targets.targets.len(), 1);
|
||||
assert_eq!(targets.targets[0].source_bucket, "bucket1");
|
||||
assert_eq!(targets.targets[0].endpoint, "s3.amazonaws.com");
|
||||
assert_eq!(targets.targets[0].target_bucket, "target1");
|
||||
assert!(!targets.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_user_provided_json_deserialize() {
|
||||
// Test the specific JSON provided by the user with missing required fields added
|
||||
let json = r#"
|
||||
{
|
||||
"sourcebucket": "mc-test-bucket-22139",
|
||||
"endpoint": "localhost:8000",
|
||||
"credentials": {
|
||||
"accessKey": "rustfsadmin",
|
||||
"secretKey": "rustfsadmin",
|
||||
"expiration": "0001-01-01T00:00:00Z"
|
||||
},
|
||||
"targetbucket": "test",
|
||||
"secure": false,
|
||||
"path": "auto",
|
||||
"api": "s3v4",
|
||||
"type": "replication",
|
||||
"replicationSync": false,
|
||||
"healthCheckDuration": 60,
|
||||
"disableProxy": false,
|
||||
"resetBeforeDate": "0001-01-01T00:00:00Z",
|
||||
"totalDowntime": 0,
|
||||
"lastOnline": "0001-01-01T00:00:00Z",
|
||||
"isOnline": false,
|
||||
"latency": {
|
||||
"curr": 0,
|
||||
"avg": 0,
|
||||
"max": 0
|
||||
},
|
||||
"deployment_id": "",
|
||||
"edge": false,
|
||||
"edgeSyncBeforeExpiry": false,
|
||||
"offlineCount": 0,
|
||||
"bandwidth": 107374182400
|
||||
}
|
||||
"#;
|
||||
|
||||
let target: BucketTarget = serde_json::from_str(json).expect("Failed to deserialize user provided JSON to BucketTarget");
|
||||
|
||||
// Verify the deserialized values match the original JSON
|
||||
assert_eq!(target.source_bucket, "mc-test-bucket-22139");
|
||||
assert_eq!(target.endpoint, "localhost:8000");
|
||||
assert_eq!(target.target_bucket, "test");
|
||||
assert!(!target.secure);
|
||||
assert_eq!(target.path, "auto");
|
||||
assert_eq!(target.api, "s3v4");
|
||||
assert_eq!(target.target_type, BucketTargetType::ReplicationService);
|
||||
assert!(!target.replication_sync);
|
||||
assert_eq!(target.health_check_duration, Duration::from_secs(60));
|
||||
assert!(!target.disable_proxy);
|
||||
assert!(!target.online);
|
||||
assert!(!target.edge);
|
||||
assert!(!target.edge_sync_before_expiry);
|
||||
assert_eq!(target.bandwidth_limit, 107374182400); // bandwidth field mapped to bandwidth_limit
|
||||
|
||||
// Verify credentials
|
||||
assert!(target.credentials.is_some());
|
||||
let credentials = target.credentials.unwrap();
|
||||
assert_eq!(credentials.access_key, "rustfsadmin");
|
||||
assert_eq!(credentials.secret_key, "rustfsadmin");
|
||||
|
||||
// Verify latency statistics
|
||||
assert_eq!(target.latency.curr, Duration::from_millis(0));
|
||||
assert_eq!(target.latency.avg, Duration::from_millis(0));
|
||||
assert_eq!(target.latency.max, Duration::from_millis(0));
|
||||
|
||||
// Verify time fields parsing (should handle "0001-01-01T00:00:00Z" as None due to being the zero time)
|
||||
assert!(target.reset_before_date.is_some());
|
||||
assert!(target.last_online.is_some());
|
||||
|
||||
println!("✅ User provided JSON successfully deserialized to BucketTarget");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_user_provided_json_as_bucket_targets() {
|
||||
// Test wrapping the user JSON in BucketTargets structure
|
||||
let json = r#"
|
||||
{
|
||||
"targets": [
|
||||
{
|
||||
"sourcebucket": "mc-test-bucket-22139",
|
||||
"endpoint": "localhost:8000",
|
||||
"credentials": {
|
||||
"accessKey": "rustfsadmin",
|
||||
"secretKey": "rustfsadmin",
|
||||
"expiration": "0001-01-01T00:00:00Z"
|
||||
},
|
||||
"targetbucket": "test",
|
||||
"secure": false,
|
||||
"path": "auto",
|
||||
"api": "s3v4",
|
||||
"arn": "",
|
||||
"type": "replication",
|
||||
"region": "",
|
||||
"replicationSync": false,
|
||||
"storage_class": "",
|
||||
"healthCheckDuration": 60,
|
||||
"disableProxy": false,
|
||||
"resetBeforeDate": "0001-01-01T00:00:00Z",
|
||||
"reset_id": "",
|
||||
"totalDowntime": 0,
|
||||
"lastOnline": "0001-01-01T00:00:00Z",
|
||||
"isOnline": false,
|
||||
"latency": {
|
||||
"curr": 0,
|
||||
"avg": 0,
|
||||
"max": 0
|
||||
},
|
||||
"deployment_id": "",
|
||||
"edge": false,
|
||||
"edgeSyncBeforeExpiry": false,
|
||||
"offlineCount": 0,
|
||||
"bandwidth": 107374182400
|
||||
}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
|
||||
let bucket_targets: BucketTargets =
|
||||
serde_json::from_str(json).expect("Failed to deserialize user provided JSON to BucketTargets");
|
||||
|
||||
assert_eq!(bucket_targets.targets.len(), 1);
|
||||
assert!(!bucket_targets.is_empty());
|
||||
|
||||
let target = &bucket_targets.targets[0];
|
||||
assert_eq!(target.source_bucket, "mc-test-bucket-22139");
|
||||
assert_eq!(target.endpoint, "localhost:8000");
|
||||
assert_eq!(target.target_bucket, "test");
|
||||
assert_eq!(target.bandwidth_limit, 107374182400);
|
||||
|
||||
println!("✅ User provided JSON successfully deserialized to BucketTargets");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_target_minimal_json_with_defaults() {
|
||||
// Test that BucketTarget can be deserialized with minimal JSON using defaults
|
||||
let minimal_json = r#"
|
||||
{
|
||||
"sourcebucket": "test-source",
|
||||
"endpoint": "localhost:9000",
|
||||
"targetbucket": "test-target"
|
||||
}
|
||||
"#;
|
||||
|
||||
let target: BucketTarget =
|
||||
serde_json::from_str(minimal_json).expect("Failed to deserialize minimal JSON to BucketTarget");
|
||||
|
||||
// Verify required fields
|
||||
assert_eq!(target.source_bucket, "test-source");
|
||||
assert_eq!(target.endpoint, "localhost:9000");
|
||||
assert_eq!(target.target_bucket, "test-target");
|
||||
|
||||
// Verify default values
|
||||
assert!(!target.secure); // bool default is false
|
||||
assert_eq!(target.path, ""); // String default is empty
|
||||
assert_eq!(target.api, ""); // String default is empty
|
||||
assert_eq!(target.arn, ""); // String default is empty
|
||||
assert_eq!(target.target_type, BucketTargetType::None); // enum default
|
||||
assert_eq!(target.region, ""); // String default is empty
|
||||
assert_eq!(target.bandwidth_limit, 0); // i64 default is 0
|
||||
assert!(!target.replication_sync); // bool default is false
|
||||
assert_eq!(target.storage_class, ""); // String default is empty
|
||||
assert_eq!(target.health_check_duration, Duration::from_secs(0)); // Duration default
|
||||
assert!(!target.disable_proxy); // bool default is false
|
||||
assert!(target.reset_before_date.is_none()); // Option default is None
|
||||
assert_eq!(target.reset_id, ""); // String default is empty
|
||||
assert_eq!(target.total_downtime, Duration::from_secs(0)); // Duration default
|
||||
assert!(target.last_online.is_none()); // Option default is None
|
||||
assert!(!target.online); // bool default is false
|
||||
assert_eq!(target.latency.curr, Duration::from_millis(0)); // LatencyStat default
|
||||
assert_eq!(target.latency.avg, Duration::from_millis(0));
|
||||
assert_eq!(target.latency.max, Duration::from_millis(0));
|
||||
assert_eq!(target.deployment_id, ""); // String default is empty
|
||||
assert!(!target.edge); // bool default is false
|
||||
assert!(!target.edge_sync_before_expiry); // bool default is false
|
||||
assert_eq!(target.offline_count, 0); // u64 default is 0
|
||||
assert!(target.credentials.is_none()); // Option default is None
|
||||
|
||||
println!("✅ Minimal JSON with defaults successfully deserialized to BucketTarget");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_target_empty_json_with_defaults() {
|
||||
// Test that BucketTarget can be deserialized with completely empty JSON using all defaults
|
||||
let empty_json = r#"{}"#;
|
||||
|
||||
let target: BucketTarget = serde_json::from_str(empty_json).expect("Failed to deserialize empty JSON to BucketTarget");
|
||||
|
||||
// Verify all fields use default values
|
||||
assert_eq!(target.source_bucket, "");
|
||||
assert_eq!(target.endpoint, "");
|
||||
assert_eq!(target.target_bucket, "");
|
||||
assert!(!target.secure);
|
||||
assert_eq!(target.path, "");
|
||||
assert_eq!(target.api, "");
|
||||
assert_eq!(target.arn, "");
|
||||
assert_eq!(target.target_type, BucketTargetType::None);
|
||||
assert_eq!(target.region, "");
|
||||
assert_eq!(target.bandwidth_limit, 0);
|
||||
assert!(!target.replication_sync);
|
||||
assert_eq!(target.storage_class, "");
|
||||
assert_eq!(target.health_check_duration, Duration::from_secs(0));
|
||||
assert!(!target.disable_proxy);
|
||||
assert!(target.reset_before_date.is_none());
|
||||
assert_eq!(target.reset_id, "");
|
||||
assert_eq!(target.total_downtime, Duration::from_secs(0));
|
||||
assert!(target.last_online.is_none());
|
||||
assert!(!target.online);
|
||||
assert_eq!(target.latency.curr, Duration::from_millis(0));
|
||||
assert_eq!(target.latency.avg, Duration::from_millis(0));
|
||||
assert_eq!(target.latency.max, Duration::from_millis(0));
|
||||
assert_eq!(target.deployment_id, "");
|
||||
assert!(!target.edge);
|
||||
assert!(!target.edge_sync_before_expiry);
|
||||
assert_eq!(target.offline_count, 0);
|
||||
assert!(target.credentials.is_none());
|
||||
|
||||
println!("✅ Empty JSON with all defaults successfully deserialized to BucketTarget");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_original_user_json_with_defaults() {
|
||||
// Test the original user JSON without extra required fields
|
||||
let json = r#"
|
||||
{
|
||||
"sourcebucket": "mc-test-bucket-22139",
|
||||
"endpoint": "localhost:8000",
|
||||
"credentials": {
|
||||
"accessKey": "rustfsadmin",
|
||||
"secretKey": "rustfsadmin",
|
||||
"expiration": "0001-01-01T00:00:00Z"
|
||||
},
|
||||
"targetbucket": "test",
|
||||
"secure": false,
|
||||
"path": "auto",
|
||||
"api": "s3v4",
|
||||
"type": "replication",
|
||||
"replicationSync": false,
|
||||
"healthCheckDuration": 60,
|
||||
"disableProxy": false,
|
||||
"resetBeforeDate": "0001-01-01T00:00:00Z",
|
||||
"totalDowntime": 0,
|
||||
"lastOnline": "0001-01-01T00:00:00Z",
|
||||
"isOnline": false,
|
||||
"latency": {
|
||||
"curr": 0,
|
||||
"avg": 0,
|
||||
"max": 0
|
||||
},
|
||||
"edge": false,
|
||||
"edgeSyncBeforeExpiry": false,
|
||||
"bandwidth": 107374182400
|
||||
}
|
||||
"#;
|
||||
|
||||
let target: BucketTarget = serde_json::from_str(json).expect("Failed to deserialize original user JSON to BucketTarget");
|
||||
|
||||
// Verify the deserialized values
|
||||
assert_eq!(target.source_bucket, "mc-test-bucket-22139");
|
||||
assert_eq!(target.endpoint, "localhost:8000");
|
||||
assert_eq!(target.target_bucket, "test");
|
||||
assert!(!target.secure);
|
||||
assert_eq!(target.path, "auto");
|
||||
assert_eq!(target.api, "s3v4");
|
||||
assert_eq!(target.target_type, BucketTargetType::ReplicationService);
|
||||
assert!(!target.replication_sync);
|
||||
assert_eq!(target.health_check_duration, Duration::from_secs(60));
|
||||
assert!(!target.disable_proxy);
|
||||
assert!(!target.online);
|
||||
assert!(!target.edge);
|
||||
assert!(!target.edge_sync_before_expiry);
|
||||
assert_eq!(target.bandwidth_limit, 107374182400);
|
||||
|
||||
// Fields not specified should use defaults
|
||||
assert_eq!(target.arn, ""); // default empty string
|
||||
assert_eq!(target.region, ""); // default empty string
|
||||
assert_eq!(target.storage_class, ""); // default empty string
|
||||
assert_eq!(target.reset_id, ""); // default empty string
|
||||
assert_eq!(target.deployment_id, ""); // default empty string
|
||||
assert_eq!(target.offline_count, 0); // default u64
|
||||
|
||||
// Verify credentials
|
||||
assert!(target.credentials.is_some());
|
||||
let credentials = target.credentials.unwrap();
|
||||
assert_eq!(credentials.access_key, "rustfsadmin");
|
||||
assert_eq!(credentials.secret_key, "rustfsadmin");
|
||||
|
||||
println!("✅ Original user JSON with defaults successfully deserialized to BucketTarget");
|
||||
}
|
||||
}
|
||||
@@ -12,124 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::Result;
|
||||
use rmp_serde::Serializer as rmpSerializer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::OffsetDateTime;
|
||||
mod arn;
|
||||
mod bucket_target;
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub struct Credentials {
|
||||
#[serde(rename = "accessKey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretKey")]
|
||||
pub secret_key: String,
|
||||
pub session_token: Option<String>,
|
||||
pub expiration: Option<chrono::DateTime<chrono::Utc>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub enum ServiceType {
|
||||
#[default]
|
||||
Replication,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub struct LatencyStat {
|
||||
curr: u64, // current latency
|
||||
avg: u64, // average latency
|
||||
max: u64, // maximum latency
|
||||
}
|
||||
|
||||
// Define BucketTarget struct
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub struct BucketTarget {
|
||||
#[serde(rename = "sourcebucket")]
|
||||
pub source_bucket: String,
|
||||
|
||||
pub endpoint: String,
|
||||
|
||||
pub credentials: Option<Credentials>,
|
||||
#[serde(rename = "targetbucket")]
|
||||
pub target_bucket: String,
|
||||
|
||||
secure: bool,
|
||||
pub path: Option<String>,
|
||||
|
||||
api: Option<String>,
|
||||
|
||||
pub arn: Option<String>,
|
||||
#[serde(rename = "type")]
|
||||
pub type_: Option<String>,
|
||||
|
||||
pub region: Option<String>,
|
||||
|
||||
bandwidth_limit: Option<i64>,
|
||||
|
||||
#[serde(rename = "replicationSync")]
|
||||
replication_sync: bool,
|
||||
|
||||
storage_class: Option<String>,
|
||||
#[serde(rename = "healthCheckDuration")]
|
||||
health_check_duration: u64,
|
||||
#[serde(rename = "disableProxy")]
|
||||
disable_proxy: bool,
|
||||
|
||||
#[serde(rename = "resetBeforeDate")]
|
||||
reset_before_date: String,
|
||||
reset_id: Option<String>,
|
||||
#[serde(rename = "totalDowntime")]
|
||||
total_downtime: u64,
|
||||
|
||||
last_online: Option<OffsetDateTime>,
|
||||
#[serde(rename = "isOnline")]
|
||||
online: bool,
|
||||
|
||||
latency: Option<LatencyStat>,
|
||||
|
||||
deployment_id: Option<String>,
|
||||
|
||||
edge: bool,
|
||||
#[serde(rename = "edgeSyncBeforeExpiry")]
|
||||
edge_sync_before_expiry: bool,
|
||||
}
|
||||
|
||||
impl BucketTarget {
|
||||
pub fn is_empty(self) -> bool {
|
||||
//self.target_bucket.is_empty() && self.endpoint.is_empty() && self.arn.is_empty()
|
||||
self.target_bucket.is_empty() && self.endpoint.is_empty() && self.arn.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub struct BucketTargets {
|
||||
pub targets: Vec<BucketTarget>,
|
||||
}
|
||||
|
||||
impl BucketTargets {
|
||||
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
|
||||
let mut buf = Vec::new();
|
||||
|
||||
self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?;
|
||||
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
pub fn unmarshal(buf: &[u8]) -> Result<Self> {
|
||||
let t: BucketTargets = rmp_serde::from_slice(buf)?;
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
if self.targets.is_empty() {
|
||||
return true;
|
||||
}
|
||||
|
||||
for target in &self.targets {
|
||||
if !target.clone().is_empty() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
}
|
||||
pub use arn::*;
|
||||
pub use bucket_target::*;
|
||||
|
||||
@@ -17,7 +17,8 @@ use crate::disk::{self, DiskAPI, DiskStore, WalkDirOptions};
|
||||
use futures::future::join_all;
|
||||
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetacacheReader, is_io_eof};
|
||||
use std::{future::Future, pin::Pin, sync::Arc};
|
||||
use tokio::{spawn, sync::broadcast::Receiver as B_Receiver};
|
||||
use tokio::spawn;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, warn};
|
||||
|
||||
pub type AgreedFn = Box<dyn Fn(MetaCacheEntry) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
|
||||
@@ -63,7 +64,7 @@ impl Clone for ListPathRawOptions {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -> disk::error::Result<()> {
|
||||
pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> disk::error::Result<()> {
|
||||
if opts.disks.is_empty() {
|
||||
return Err(DiskError::other("list_path_raw: 0 drives provided"));
|
||||
}
|
||||
@@ -72,13 +73,13 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
let mut readers = Vec::with_capacity(opts.disks.len());
|
||||
let fds = Arc::new(opts.fallback_disks.clone());
|
||||
|
||||
let (cancel_tx, cancel_rx) = tokio::sync::broadcast::channel::<bool>(1);
|
||||
let cancel_rx = CancellationToken::new();
|
||||
|
||||
for disk in opts.disks.iter() {
|
||||
let opdisk = disk.clone();
|
||||
let opts_clone = opts.clone();
|
||||
let fds_clone = fds.clone();
|
||||
let mut cancel_rx_clone = cancel_rx.resubscribe();
|
||||
let cancel_rx_clone = cancel_rx.clone();
|
||||
let (rd, mut wr) = tokio::io::duplex(64);
|
||||
readers.push(MetacacheReader::new(rd));
|
||||
jobs.push(spawn(async move {
|
||||
@@ -106,7 +107,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
need_fallback = true;
|
||||
}
|
||||
|
||||
if cancel_rx_clone.try_recv().is_ok() {
|
||||
if cancel_rx_clone.is_cancelled() {
|
||||
// warn!("list_path_raw: cancel_rx_clone.try_recv().await.is_ok()");
|
||||
return Ok(());
|
||||
}
|
||||
@@ -173,7 +174,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
// opts.bucket, opts.path, ¤t.name
|
||||
// );
|
||||
|
||||
if rx.try_recv().is_ok() {
|
||||
if rx.is_cancelled() {
|
||||
return Err(DiskError::other("canceled"));
|
||||
}
|
||||
|
||||
@@ -351,7 +352,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
|
||||
if let Err(err) = revjob.await.map_err(std::io::Error::other)? {
|
||||
error!("list_path_raw: revjob err {:?}", err);
|
||||
let _ = cancel_tx.send(true);
|
||||
cancel_rx.cancel();
|
||||
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
@@ -44,6 +44,8 @@ pub struct GetObjectOptions {
|
||||
pub internal: AdvancedGetOptions,
|
||||
}
|
||||
|
||||
pub type StatObjectOptions = GetObjectOptions;
|
||||
|
||||
impl Default for GetObjectOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
|
||||
@@ -46,11 +46,11 @@ pub struct RemoveBucketOptions {
|
||||
#[derive(Debug)]
|
||||
#[allow(dead_code)]
|
||||
pub struct AdvancedRemoveOptions {
|
||||
replication_delete_marker: bool,
|
||||
replication_status: ReplicationStatus,
|
||||
replication_mtime: OffsetDateTime,
|
||||
replication_request: bool,
|
||||
replication_validity_check: bool,
|
||||
pub replication_delete_marker: bool,
|
||||
pub replication_status: ReplicationStatus,
|
||||
pub replication_mtime: Option<OffsetDateTime>,
|
||||
pub replication_request: bool,
|
||||
pub replication_validity_check: bool,
|
||||
}
|
||||
|
||||
impl Default for AdvancedRemoveOptions {
|
||||
@@ -58,7 +58,7 @@ impl Default for AdvancedRemoveOptions {
|
||||
Self {
|
||||
replication_delete_marker: false,
|
||||
replication_status: ReplicationStatus::from_static(ReplicationStatus::PENDING),
|
||||
replication_mtime: OffsetDateTime::now_utc(),
|
||||
replication_mtime: None,
|
||||
replication_request: false,
|
||||
replication_validity_check: false,
|
||||
}
|
||||
@@ -140,8 +140,7 @@ impl TransitionClient {
|
||||
}
|
||||
|
||||
pub async fn remove_object(&self, bucket_name: &str, object_name: &str, opts: RemoveObjectOptions) -> Option<std::io::Error> {
|
||||
let res = self.remove_object_inner(bucket_name, object_name, opts).await.expect("err");
|
||||
res.err
|
||||
self.remove_object_inner(bucket_name, object_name, opts).await.err()
|
||||
}
|
||||
|
||||
pub async fn remove_object_inner(
|
||||
|
||||
@@ -23,6 +23,7 @@ use http::{HeaderMap, HeaderValue};
|
||||
use rustfs_utils::EMPTY_STRING_SHA256_HASH;
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
use tokio::io::BufReader;
|
||||
use tracing::warn;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::client::{
|
||||
@@ -30,7 +31,10 @@ use crate::client::{
|
||||
api_get_options::GetObjectOptions,
|
||||
transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info},
|
||||
};
|
||||
use s3s::header::{X_AMZ_DELETE_MARKER, X_AMZ_VERSION_ID};
|
||||
use s3s::{
|
||||
dto::VersioningConfiguration,
|
||||
header::{X_AMZ_DELETE_MARKER, X_AMZ_VERSION_ID},
|
||||
};
|
||||
|
||||
impl TransitionClient {
|
||||
pub async fn bucket_exists(&self, bucket_name: &str) -> Result<bool, std::io::Error> {
|
||||
@@ -58,8 +62,14 @@ impl TransitionClient {
|
||||
.await;
|
||||
|
||||
if let Ok(resp) = resp {
|
||||
if resp.status() != http::StatusCode::OK {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let b = resp.body().bytes().expect("err").to_vec();
|
||||
let resperr = http_resp_to_error_response(&resp, b, bucket_name, "");
|
||||
|
||||
warn!("bucket exists, resp: {:?}, resperr: {:?}", resp, resperr);
|
||||
/*if to_error_response(resperr).code == "NoSuchBucket" {
|
||||
return Ok(false);
|
||||
}
|
||||
@@ -70,6 +80,46 @@ impl TransitionClient {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub async fn get_bucket_versioning(&self, bucket_name: &str) -> Result<VersioningConfiguration, std::io::Error> {
|
||||
let mut query_values = HashMap::new();
|
||||
query_values.insert("versioning".to_string(), "".to_string());
|
||||
let resp = self
|
||||
.execute_method(
|
||||
http::Method::GET,
|
||||
&mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: "".to_string(),
|
||||
query_values,
|
||||
custom_header: HeaderMap::new(),
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
content_md5_base64: "".to_string(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
match resp {
|
||||
Ok(resp) => {
|
||||
let b = resp.body().bytes().expect("get bucket versioning err").to_vec();
|
||||
let resperr = http_resp_to_error_response(&resp, b, bucket_name, "");
|
||||
|
||||
warn!("get bucket versioning, resp: {:?}, resperr: {:?}", resp, resperr);
|
||||
|
||||
Ok(VersioningConfiguration::default())
|
||||
}
|
||||
|
||||
Err(err) => Err(std::io::Error::other(err)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn stat_object(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
@@ -131,24 +181,20 @@ impl TransitionClient {
|
||||
..Default::default()
|
||||
};
|
||||
return Ok(ObjectInfo {
|
||||
version_id: match Uuid::from_str(h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap()) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e));
|
||||
}
|
||||
},
|
||||
version_id: h
|
||||
.get(X_AMZ_VERSION_ID)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|s| Uuid::from_str(s).ok()),
|
||||
is_delete_marker: delete_marker,
|
||||
..Default::default()
|
||||
});
|
||||
//err_resp
|
||||
}
|
||||
return Ok(ObjectInfo {
|
||||
version_id: match Uuid::from_str(h.get(X_AMZ_VERSION_ID).unwrap().to_str().unwrap()) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e));
|
||||
}
|
||||
},
|
||||
version_id: h
|
||||
.get(X_AMZ_VERSION_ID)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|s| Uuid::from_str(s).ok()),
|
||||
is_delete_marker: delete_marker,
|
||||
replication_ready: replication_ready,
|
||||
..Default::default()
|
||||
|
||||
@@ -36,6 +36,7 @@ use s3s::S3ErrorCode;
|
||||
use super::constants::UNSIGNED_PAYLOAD;
|
||||
use super::credentials::SignatureType;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BucketLocationCache {
|
||||
items: HashMap<String, String>,
|
||||
}
|
||||
|
||||
@@ -148,10 +148,75 @@ pub fn new_getobjectreader(
|
||||
Ok((get_fn, off as i64, length as i64))
|
||||
}
|
||||
|
||||
/// Format an ETag value according to HTTP standards (wrap with quotes if not already wrapped)
|
||||
pub fn format_etag(etag: &str) -> String {
|
||||
if etag.starts_with('"') && etag.ends_with('"') {
|
||||
// Already properly formatted
|
||||
etag.to_string()
|
||||
} else if etag.starts_with("W/\"") && etag.ends_with('"') {
|
||||
// Already a weak ETag, properly formatted
|
||||
etag.to_string()
|
||||
} else {
|
||||
// Need to wrap with quotes
|
||||
format!("\"{}\"", etag)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn extract_etag(metadata: &HashMap<String, String>) -> String {
|
||||
if let Some(etag) = metadata.get("etag") {
|
||||
let etag = if let Some(etag) = metadata.get("etag") {
|
||||
etag.clone()
|
||||
} else {
|
||||
metadata["md5Sum"].clone()
|
||||
};
|
||||
format_etag(&etag)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_format_etag() {
|
||||
// Test unquoted ETag - should add quotes
|
||||
assert_eq!(format_etag("6af8d12c0c74b78094884349f3c8a079"), "\"6af8d12c0c74b78094884349f3c8a079\"");
|
||||
|
||||
// Test already quoted ETag - should not double quote
|
||||
assert_eq!(
|
||||
format_etag("\"6af8d12c0c74b78094884349f3c8a079\""),
|
||||
"\"6af8d12c0c74b78094884349f3c8a079\""
|
||||
);
|
||||
|
||||
// Test weak ETag - should keep as is
|
||||
assert_eq!(
|
||||
format_etag("W/\"6af8d12c0c74b78094884349f3c8a079\""),
|
||||
"W/\"6af8d12c0c74b78094884349f3c8a079\""
|
||||
);
|
||||
|
||||
// Test empty ETag - should add quotes
|
||||
assert_eq!(format_etag(""), "\"\"");
|
||||
|
||||
// Test malformed quote (only starting quote) - should wrap properly
|
||||
assert_eq!(format_etag("\"incomplete"), "\"\"incomplete\"");
|
||||
|
||||
// Test malformed quote (only ending quote) - should wrap properly
|
||||
assert_eq!(format_etag("incomplete\""), "\"incomplete\"\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_etag() {
|
||||
let mut metadata = HashMap::new();
|
||||
|
||||
// Test with etag field
|
||||
metadata.insert("etag".to_string(), "abc123".to_string());
|
||||
assert_eq!(extract_etag(&metadata), "\"abc123\"");
|
||||
|
||||
// Test with already quoted etag field
|
||||
metadata.insert("etag".to_string(), "\"def456\"".to_string());
|
||||
assert_eq!(extract_etag(&metadata), "\"def456\"");
|
||||
|
||||
// Test fallback to md5Sum
|
||||
metadata.remove("etag");
|
||||
metadata.insert("md5Sum".to_string(), "xyz789".to_string());
|
||||
assert_eq!(extract_etag(&metadata), "\"xyz789\"");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,6 +89,7 @@ pub enum ReaderImpl {
|
||||
|
||||
pub type ReadCloser = BufReader<Cursor<Vec<u8>>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TransitionClient {
|
||||
pub endpoint_url: Url,
|
||||
pub creds_provider: Arc<Mutex<Credentials<Static>>>,
|
||||
@@ -809,6 +810,7 @@ impl TransitionCore {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct PutObjectPartOptions {
|
||||
pub md5_base64: String,
|
||||
pub sha256_hex: String,
|
||||
@@ -820,23 +822,23 @@ pub struct PutObjectPartOptions {
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct ObjectInfo {
|
||||
pub etag: String,
|
||||
pub etag: Option<String>,
|
||||
pub name: String,
|
||||
pub mod_time: OffsetDateTime,
|
||||
pub size: usize,
|
||||
pub mod_time: Option<OffsetDateTime>,
|
||||
pub size: i64,
|
||||
pub content_type: Option<String>,
|
||||
#[serde(skip)]
|
||||
pub metadata: HeaderMap,
|
||||
pub user_metadata: HashMap<String, String>,
|
||||
pub user_tags: String,
|
||||
pub user_tag_count: i64,
|
||||
pub user_tag_count: usize,
|
||||
#[serde(skip)]
|
||||
pub owner: Owner,
|
||||
//pub grant: Vec<Grant>,
|
||||
pub storage_class: String,
|
||||
pub is_latest: bool,
|
||||
pub is_delete_marker: bool,
|
||||
pub version_id: Uuid,
|
||||
pub version_id: Option<Uuid>,
|
||||
|
||||
#[serde(skip, default = "replication_status_default")]
|
||||
pub replication_status: ReplicationStatus,
|
||||
@@ -862,9 +864,9 @@ fn replication_status_default() -> ReplicationStatus {
|
||||
impl Default for ObjectInfo {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
etag: "".to_string(),
|
||||
etag: None,
|
||||
name: "".to_string(),
|
||||
mod_time: OffsetDateTime::now_utc(),
|
||||
mod_time: None,
|
||||
size: 0,
|
||||
content_type: None,
|
||||
metadata: HeaderMap::new(),
|
||||
@@ -875,7 +877,7 @@ impl Default for ObjectInfo {
|
||||
storage_class: "".to_string(),
|
||||
is_latest: false,
|
||||
is_delete_marker: false,
|
||||
version_id: Uuid::nil(),
|
||||
version_id: None,
|
||||
replication_status: ReplicationStatus::from_static(ReplicationStatus::PENDING),
|
||||
replication_ready: false,
|
||||
expiration: OffsetDateTime::now_utc(),
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,69 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use chrono::{DateTime, Utc};
|
||||
|
||||
// Representation of the replication status
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum StatusType {
|
||||
Pending,
|
||||
Completed,
|
||||
CompletedLegacy,
|
||||
Failed,
|
||||
Replica,
|
||||
}
|
||||
|
||||
// Representation of version purge status type (customize as needed)
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum VersionPurgeStatusType {
|
||||
Pending,
|
||||
Completed,
|
||||
Failed,
|
||||
}
|
||||
|
||||
// ReplicationState struct definition
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ReplicationState {
|
||||
// Timestamp when the last replica update was received
|
||||
pub replica_time_stamp: DateTime<Utc>,
|
||||
|
||||
// Replica status
|
||||
pub replica_status: StatusType,
|
||||
|
||||
// Represents DeleteMarker replication state
|
||||
pub delete_marker: bool,
|
||||
|
||||
// Timestamp when the last replication activity happened
|
||||
pub replication_time_stamp: DateTime<Utc>,
|
||||
|
||||
// Stringified representation of all replication activity
|
||||
pub replication_status_internal: String,
|
||||
|
||||
// Stringified representation of all version purge statuses
|
||||
// Example format: "arn1=PENDING;arn2=COMPLETED;"
|
||||
pub version_purge_status_internal: String,
|
||||
|
||||
// Stringified representation of replication decision for each target
|
||||
pub replicate_decision_str: String,
|
||||
|
||||
// Map of ARN -> replication status for ongoing replication activity
|
||||
pub targets: HashMap<String, StatusType>,
|
||||
|
||||
// Map of ARN -> VersionPurgeStatus for all the targets
|
||||
pub purge_targets: HashMap<String, VersionPurgeStatusType>,
|
||||
|
||||
// Map of ARN -> stringified reset id and timestamp for all the targets
|
||||
pub reset_statuses_map: HashMap<String, String>,
|
||||
}
|
||||
@@ -1,890 +0,0 @@
|
||||
#![allow(unused_variables)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(dead_code)]
|
||||
use crate::{
|
||||
StorageAPI,
|
||||
bucket::{metadata_sys, target::BucketTarget},
|
||||
endpoints::Node,
|
||||
rpc::{PeerS3Client, RemotePeerS3Client},
|
||||
};
|
||||
use crate::{
|
||||
bucket::{self, target::BucketTargets},
|
||||
new_object_layer_fn, store_api,
|
||||
};
|
||||
//use tokio::sync::RwLock;
|
||||
use aws_sdk_s3::Client as S3Client;
|
||||
use chrono::Utc;
|
||||
use lazy_static::lazy_static;
|
||||
use std::sync::Arc;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use thiserror::Error;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub struct TClient {
|
||||
pub s3cli: S3Client,
|
||||
pub remote_peer_client: RemotePeerS3Client,
|
||||
pub arn: String,
|
||||
}
|
||||
impl TClient {
|
||||
pub fn new(s3cli: S3Client, remote_peer_client: RemotePeerS3Client, arn: String) -> Self {
|
||||
TClient {
|
||||
s3cli,
|
||||
remote_peer_client,
|
||||
arn,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EpHealth {
|
||||
pub endpoint: String,
|
||||
pub scheme: String,
|
||||
pub online: bool,
|
||||
pub last_online: SystemTime,
|
||||
pub last_hc_at: SystemTime,
|
||||
pub offline_duration: Duration,
|
||||
pub latency: LatencyStat, // Assuming LatencyStat is a custom struct
|
||||
}
|
||||
|
||||
impl EpHealth {
|
||||
pub fn new(
|
||||
endpoint: String,
|
||||
scheme: String,
|
||||
online: bool,
|
||||
last_online: SystemTime,
|
||||
last_hc_at: SystemTime,
|
||||
offline_duration: Duration,
|
||||
latency: LatencyStat,
|
||||
) -> Self {
|
||||
EpHealth {
|
||||
endpoint,
|
||||
scheme,
|
||||
online,
|
||||
last_online,
|
||||
last_hc_at,
|
||||
offline_duration,
|
||||
latency,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LatencyStat {
|
||||
// Define the fields of LatencyStat as per your requirements
|
||||
}
|
||||
|
||||
pub struct ArnTarget {
|
||||
client: TargetClient,
|
||||
last_refresh: chrono::DateTime<Utc>,
|
||||
}
|
||||
impl ArnTarget {
|
||||
pub fn new(bucket: String, endpoint: String, ak: String, sk: String) -> Self {
|
||||
Self {
|
||||
client: TargetClient {
|
||||
bucket,
|
||||
storage_class: "STANDARD".to_string(),
|
||||
disable_proxy: false,
|
||||
health_check_duration: Duration::from_secs(100),
|
||||
endpoint,
|
||||
reset_id: "0".to_string(),
|
||||
replicate_sync: false,
|
||||
secure: false,
|
||||
arn: "".to_string(),
|
||||
client: reqwest::Client::new(),
|
||||
ak,
|
||||
sk,
|
||||
},
|
||||
last_refresh: Utc::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pub fn get_s3client_from_para(
|
||||
// ak: &str,
|
||||
// sk: &str,
|
||||
// url: &str,
|
||||
// _region: &str,
|
||||
// ) -> Result<S3Client, Box<dyn Error>> {
|
||||
// let credentials = Credentials::new(ak, sk, None, None, "");
|
||||
// let region = Region::new("us-east-1".to_string());
|
||||
|
||||
// let config = Config::builder()
|
||||
// .region(region)
|
||||
// .endpoint_url(url.to_string())
|
||||
// .credentials_provider(credentials)
|
||||
// .behavior_version(BehaviorVersion::latest()) // Adjust as necessary
|
||||
// .build();
|
||||
// Ok(S3Client::from_conf(config))
|
||||
// }
|
||||
|
||||
pub struct BucketTargetSys {
|
||||
arn_remote_map: Arc<RwLock<HashMap<String, ArnTarget>>>,
|
||||
targets_map: Arc<RwLock<HashMap<String, Vec<bucket::target::BucketTarget>>>>,
|
||||
hc: HashMap<String, EpHealth>,
|
||||
//store:Option<Arc<ecstore::store::ECStore>>,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_Bucket_Target_Sys: std::sync::OnceLock<BucketTargetSys> = BucketTargetSys::new().into();
|
||||
}
|
||||
|
||||
//#[derive(Debug)]
|
||||
// pub enum SetTargetError {
|
||||
// NotFound,
|
||||
// }
|
||||
|
||||
pub async fn get_bucket_target_client(bucket: &str, arn: &str) -> Result<TargetClient, SetTargetError> {
|
||||
if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() {
|
||||
sys.get_remote_target_client2(arn).await
|
||||
} else {
|
||||
Err(SetTargetError::TargetNotFound(bucket.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BucketRemoteTargetNotFound {
|
||||
pub bucket: String,
|
||||
}
|
||||
|
||||
pub async fn init_bucket_targets(bucket: &str, meta: Arc<bucket::metadata::BucketMetadata>) {
|
||||
println!("140 {bucket}");
|
||||
if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() {
|
||||
if let Some(tgts) = meta.bucket_target_config.clone() {
|
||||
for tgt in tgts.targets {
|
||||
warn!("ak and sk is:{:?}", tgt.credentials);
|
||||
let _ = sys.set_target(bucket, &tgt, false, true).await;
|
||||
//sys.targets_map.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn remove_bucket_target(bucket: &str, arn_str: &str) {
|
||||
if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() {
|
||||
let _ = sys.remove_target(bucket, arn_str).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list_bucket_targets(bucket: &str) -> Result<BucketTargets, BucketRemoteTargetNotFound> {
|
||||
if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() {
|
||||
sys.list_bucket_targets(bucket).await
|
||||
} else {
|
||||
Err(BucketRemoteTargetNotFound {
|
||||
bucket: bucket.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BucketTargetSys {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketTargetSys {
|
||||
pub fn new() -> Self {
|
||||
BucketTargetSys {
|
||||
arn_remote_map: Arc::new(RwLock::new(HashMap::new())),
|
||||
targets_map: Arc::new(RwLock::new(HashMap::new())),
|
||||
hc: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list_bucket_targets(&self, bucket: &str) -> Result<BucketTargets, BucketRemoteTargetNotFound> {
|
||||
let targets_map = self.targets_map.read().await;
|
||||
if let Some(targets) = targets_map.get(bucket) {
|
||||
Ok(BucketTargets {
|
||||
targets: targets.clone(),
|
||||
})
|
||||
} else {
|
||||
Err(BucketRemoteTargetNotFound {
|
||||
bucket: bucket.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list_targets(&self, bucket: Option<&str>, _arn_type: Option<&str>) -> Vec<BucketTarget> {
|
||||
let _ = _arn_type;
|
||||
//let health_stats = self.health_stats();
|
||||
|
||||
let mut targets = Vec::new();
|
||||
|
||||
if let Some(bucket_name) = bucket {
|
||||
if let Ok(ts) = self.list_bucket_targets(bucket_name).await {
|
||||
for t in ts.targets {
|
||||
//if arn_type.map_or(true, |arn| t.target_type == arn) {
|
||||
//if let Some(hs) = health_stats.get(&t.url().host) {
|
||||
// t.total_downtime = hs.offline_duration;
|
||||
// t.online = hs.online;
|
||||
// t.last_online = hs.last_online;
|
||||
// t.latency = LatencyStat {
|
||||
// curr: hs.latency.curr,
|
||||
// avg: hs.latency.avg,
|
||||
// max: hs.latency.peak,
|
||||
// };
|
||||
//}
|
||||
targets.push(t.clone());
|
||||
//}
|
||||
}
|
||||
}
|
||||
return targets;
|
||||
}
|
||||
|
||||
// Locking and iterating over all targets in the system
|
||||
let targets_map = self.targets_map.read().await;
|
||||
for tgts in targets_map.values() {
|
||||
for t in tgts {
|
||||
//if arn_type.map_or(true, |arn| t.target_type == arn) {
|
||||
// if let Some(hs) = health_stats.get(&t.url().host) {
|
||||
// t.total_downtime = hs.offline_duration;
|
||||
// t.online = hs.online;
|
||||
// t.last_online = hs.last_online;
|
||||
// t.latency = LatencyStat {
|
||||
// curr: hs.latency.curr,
|
||||
// avg: hs.latency.avg,
|
||||
// max: hs.latency.peak,
|
||||
// };
|
||||
// }
|
||||
targets.push(t.clone());
|
||||
//}
|
||||
}
|
||||
}
|
||||
|
||||
targets
|
||||
}
|
||||
|
||||
pub async fn remove_target(&self, bucket: &str, arn_str: &str) -> Result<(), SetTargetError> {
|
||||
//to do need lock;
|
||||
let mut targets_map = self.targets_map.write().await;
|
||||
let tgts = targets_map.get(bucket);
|
||||
let mut arn_remotes_map = self.arn_remote_map.write().await;
|
||||
if tgts.is_none() {
|
||||
//Err(SetTargetError::TargetNotFound(bucket.to_string()));
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let tgts = tgts.unwrap(); // 安全解引用
|
||||
let mut targets = Vec::with_capacity(tgts.len());
|
||||
let mut found = false;
|
||||
|
||||
// 遍历 targets,找出不匹配的 ARN
|
||||
for tgt in tgts {
|
||||
if tgt.arn != Some(arn_str.to_string()) {
|
||||
targets.push(tgt.clone()); // 克隆符合条件的项
|
||||
} else {
|
||||
found = true; // 找到匹配的 ARN
|
||||
}
|
||||
}
|
||||
|
||||
// 如果没有找到匹配的 ARN,则返回错误
|
||||
if !found {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// 更新 targets_map
|
||||
targets_map.insert(bucket.to_string(), targets);
|
||||
arn_remotes_map.remove(arn_str);
|
||||
|
||||
let targets = self.list_targets(Some(bucket), None).await;
|
||||
println!("targets is {}", targets.len());
|
||||
match serde_json::to_vec(&targets) {
|
||||
Ok(json) => {
|
||||
let _ = metadata_sys::update(bucket, "bucket-targets.json", json).await;
|
||||
}
|
||||
Err(e) => {
|
||||
println!("序列化失败{e}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_remote_arn(&self, bucket: &str, target: Option<&BucketTarget>, depl_id: &str) -> (Option<String>, bool) {
|
||||
if target.is_none() {
|
||||
return (None, false);
|
||||
}
|
||||
|
||||
let target = target.unwrap();
|
||||
|
||||
let targets_map = self.targets_map.read().await;
|
||||
|
||||
// 获取锁以访问 arn_remote_map
|
||||
let mut _arn_remotes_map = self.arn_remote_map.read().await;
|
||||
if let Some(tgts) = targets_map.get(bucket) {
|
||||
for tgt in tgts {
|
||||
if tgt.type_ == target.type_
|
||||
&& tgt.target_bucket == target.target_bucket
|
||||
&& tgt.endpoint == target.endpoint
|
||||
&& tgt.credentials.as_ref().unwrap().access_key == target.credentials.as_ref().unwrap().access_key
|
||||
{
|
||||
return (tgt.arn.clone(), true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if !target.type_.is_valid() {
|
||||
// return (None, false);
|
||||
// }
|
||||
|
||||
println!("generate_arn");
|
||||
|
||||
(Some(generate_arn(target.clone(), depl_id.to_string())), false)
|
||||
}
|
||||
|
||||
pub async fn get_remote_target_client2(&self, arn: &str) -> Result<TargetClient, SetTargetError> {
|
||||
let map = self.arn_remote_map.read().await;
|
||||
info!("get remote target client and arn is: {}", arn);
|
||||
if let Some(value) = map.get(arn) {
|
||||
let mut x = value.client.clone();
|
||||
x.arn = arn.to_string();
|
||||
Ok(x)
|
||||
} else {
|
||||
error!("not find target");
|
||||
Err(SetTargetError::TargetNotFound(arn.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
// pub async fn get_remote_target_client(&self, _tgt: &BucketTarget) -> Result<TargetClient, SetTargetError> {
|
||||
// // Mocked implementation for obtaining a remote client
|
||||
// let tcli = TargetClient {
|
||||
// bucket: _tgt.target_bucket.clone(),
|
||||
// storage_class: "STANDARD".to_string(),
|
||||
// disable_proxy: false,
|
||||
// health_check_duration: Duration::from_secs(100),
|
||||
// endpoint: _tgt.endpoint.clone(),
|
||||
// reset_id: "0".to_string(),
|
||||
// replicate_sync: false,
|
||||
// secure: false,
|
||||
// arn: "".to_string(),
|
||||
// client: reqwest::Client::new(),
|
||||
// ak: _tgt.
|
||||
|
||||
// };
|
||||
// Ok(tcli)
|
||||
// }
|
||||
// pub async fn get_remote_target_client_with_bucket(&self, _bucket: String) -> Result<TargetClient, SetTargetError> {
|
||||
// // Mocked implementation for obtaining a remote client
|
||||
// let tcli = TargetClient {
|
||||
// bucket: _tgt.target_bucket.clone(),
|
||||
// storage_class: "STANDARD".to_string(),
|
||||
// disable_proxy: false,
|
||||
// health_check_duration: Duration::from_secs(100),
|
||||
// endpoint: _tgt.endpoint.clone(),
|
||||
// reset_id: "0".to_string(),
|
||||
// replicate_sync: false,
|
||||
// secure: false,
|
||||
// arn: "".to_string(),
|
||||
// client: reqwest::Client::new(),
|
||||
// };
|
||||
// Ok(tcli)
|
||||
// }
|
||||
|
||||
async fn local_is_bucket_versioned(&self, _bucket: &str) -> bool {
|
||||
let Some(store) = new_object_layer_fn() else {
|
||||
return false;
|
||||
};
|
||||
//store.get_bucket_info(bucket, opts)
|
||||
|
||||
// let binfo:BucketInfo = store
|
||||
// .get_bucket_info(bucket, &ecstore::store_api::BucketOptions::default()).await;
|
||||
match store.get_bucket_info(_bucket, &store_api::BucketOptions::default()).await {
|
||||
Ok(info) => {
|
||||
println!("Bucket Info: {info:?}");
|
||||
info.versioning
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Error: {err:?}");
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn is_bucket_versioned(&self, _bucket: &str) -> bool {
|
||||
true
|
||||
// let url_str = "http://127.0.0.1:9001";
|
||||
|
||||
// // 转换为 Url 类型
|
||||
// let parsed_url = url::Url::parse(url_str).unwrap();
|
||||
|
||||
// let node = Node {
|
||||
// url: parsed_url,
|
||||
// pools: vec![],
|
||||
// is_local: false,
|
||||
// grid_host: "".to_string(),
|
||||
// };
|
||||
// let cli = ecstore::peer::RemotePeerS3Client::new(Some(node), None);
|
||||
|
||||
// match cli.get_bucket_info(_bucket, &ecstore::store_api::BucketOptions::default()).await
|
||||
// {
|
||||
// Ok(info) => {
|
||||
// println!("Bucket Info: {:?}", info);
|
||||
// info.versioning
|
||||
// }
|
||||
// Err(err) => {
|
||||
// eprintln!("Error: {:?}", err);
|
||||
// return false;
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
pub async fn set_target(&self, bucket: &str, tgt: &BucketTarget, update: bool, fromdisk: bool) -> Result<(), SetTargetError> {
|
||||
// if !tgt.type_.is_valid() && !update {
|
||||
// return Err(SetTargetError::InvalidTargetType(bucket.to_string()));
|
||||
// }
|
||||
|
||||
//let client = self.get_remote_target_client(tgt).await?;
|
||||
if tgt.type_ == Some("replication".to_string()) && !fromdisk {
|
||||
let versioning_config = self.local_is_bucket_versioned(bucket).await;
|
||||
if !versioning_config {
|
||||
// println!("111111111");
|
||||
return Err(SetTargetError::TargetNotVersioned(bucket.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
let url_str = format!("http://{}", tgt.endpoint.clone());
|
||||
|
||||
println!("url str is {url_str}");
|
||||
// 转换为 Url 类型
|
||||
let parsed_url = url::Url::parse(&url_str).unwrap();
|
||||
|
||||
let node = Node {
|
||||
url: parsed_url,
|
||||
pools: vec![],
|
||||
is_local: false,
|
||||
grid_host: "".to_string(),
|
||||
};
|
||||
|
||||
let cli = RemotePeerS3Client::new(Some(node), None);
|
||||
|
||||
match cli
|
||||
.get_bucket_info(&tgt.target_bucket, &store_api::BucketOptions::default())
|
||||
.await
|
||||
{
|
||||
Ok(info) => {
|
||||
println!("Bucket Info: {info:?}");
|
||||
if !info.versioning {
|
||||
return Err(SetTargetError::TargetNotVersioned(tgt.target_bucket.to_string()));
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
println!("remote bucket 369 is:{}", tgt.target_bucket);
|
||||
eprintln!("Error: {err:?}");
|
||||
return Err(SetTargetError::SourceNotVersioned(tgt.target_bucket.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
//if tgt.target_type == BucketTargetType::ReplicationService {
|
||||
// Check if target is a rustfs server and alive
|
||||
// let hc_result = tokio::time::timeout(Duration::from_secs(3), client.health_check(&tgt.endpoint)).await;
|
||||
// match hc_result {
|
||||
// Ok(Ok(true)) => {} // Server is alive
|
||||
// Ok(Ok(false)) | Ok(Err(_)) | Err(_) => {
|
||||
// return Err(SetTargetError::HealthCheckFailed(tgt.target_bucket.clone()));
|
||||
// }
|
||||
// }
|
||||
|
||||
//Lock and update target maps
|
||||
let mut targets_map = self.targets_map.write().await;
|
||||
let mut arn_remotes_map = self.arn_remote_map.write().await;
|
||||
|
||||
let targets = targets_map.entry(bucket.to_string()).or_default();
|
||||
let mut found = false;
|
||||
|
||||
for existing_target in targets.iter_mut() {
|
||||
println!("418 exist:{}", existing_target.source_bucket.clone());
|
||||
if existing_target.type_ == tgt.type_ {
|
||||
if existing_target.arn == tgt.arn {
|
||||
if !update {
|
||||
return Err(SetTargetError::TargetAlreadyExists(existing_target.target_bucket.clone()));
|
||||
}
|
||||
*existing_target = tgt.clone();
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if existing_target.endpoint == tgt.endpoint {
|
||||
println!("endpoint is same:{}", tgt.endpoint.clone());
|
||||
return Err(SetTargetError::TargetAlreadyExists(existing_target.target_bucket.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found && !update {
|
||||
println!("437 exist:{}", tgt.arn.clone().unwrap());
|
||||
targets.push(tgt.clone());
|
||||
}
|
||||
let arntgt: ArnTarget = ArnTarget::new(
|
||||
tgt.target_bucket.clone(),
|
||||
tgt.endpoint.clone(),
|
||||
tgt.credentials.clone().unwrap().access_key.clone(),
|
||||
tgt.credentials.clone().unwrap().secret_key,
|
||||
);
|
||||
|
||||
arn_remotes_map.insert(tgt.arn.clone().unwrap().clone(), arntgt);
|
||||
//self.update_bandwidth_limit(bucket, &tgt.arn, tgt.bandwidth_limit).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TargetClient {
|
||||
pub client: reqwest::Client, // Using reqwest HTTP client
|
||||
pub health_check_duration: Duration,
|
||||
pub bucket: String, // Remote bucket target
|
||||
pub replicate_sync: bool,
|
||||
pub storage_class: String, // Storage class on remote
|
||||
pub disable_proxy: bool,
|
||||
pub arn: String, // ARN to uniquely identify remote target
|
||||
pub reset_id: String,
|
||||
pub endpoint: String,
|
||||
pub secure: bool,
|
||||
pub ak: String,
|
||||
pub sk: String,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
impl TargetClient {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
client: reqwest::Client,
|
||||
health_check_duration: Duration,
|
||||
bucket: String,
|
||||
replicate_sync: bool,
|
||||
storage_class: String,
|
||||
disable_proxy: bool,
|
||||
arn: String,
|
||||
reset_id: String,
|
||||
endpoint: String,
|
||||
secure: bool,
|
||||
ak: String,
|
||||
sk: String,
|
||||
) -> Self {
|
||||
TargetClient {
|
||||
client,
|
||||
health_check_duration,
|
||||
bucket,
|
||||
replicate_sync,
|
||||
storage_class,
|
||||
disable_proxy,
|
||||
arn,
|
||||
reset_id,
|
||||
endpoint,
|
||||
secure,
|
||||
ak,
|
||||
sk,
|
||||
}
|
||||
}
|
||||
pub async fn bucket_exists(&self, _bucket: &str) -> Result<bool, SetTargetError> {
|
||||
Ok(true) // Mocked implementation
|
||||
}
|
||||
}
|
||||
use tracing::{error, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VersioningConfig {
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
impl VersioningConfig {
|
||||
pub fn is_enabled(&self) -> bool {
|
||||
self.enabled
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Client;
|
||||
|
||||
impl Client {
|
||||
pub async fn bucket_exists(&self, _bucket: &str) -> Result<bool, SetTargetError> {
|
||||
Ok(true) // Mocked implementation
|
||||
}
|
||||
|
||||
pub async fn get_bucket_versioning(&self, _bucket: &str) -> Result<VersioningConfig, SetTargetError> {
|
||||
Ok(VersioningConfig { enabled: true })
|
||||
}
|
||||
|
||||
pub async fn health_check(&self, _endpoint: &str) -> Result<bool, SetTargetError> {
|
||||
Ok(true) // Mocked health check
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct ServiceType(String);
|
||||
|
||||
impl ServiceType {
|
||||
pub fn is_valid(&self) -> bool {
|
||||
!self.0.is_empty() // 根据需求添加具体的验证逻辑
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct ARN {
|
||||
pub arn_type: String,
|
||||
pub id: String,
|
||||
pub region: String,
|
||||
pub bucket: String,
|
||||
}
|
||||
|
||||
impl ARN {
|
||||
/// 检查 ARN 是否为空
|
||||
pub fn is_empty(&self) -> bool {
|
||||
//!self.arn_type.is_valid()
|
||||
false
|
||||
}
|
||||
|
||||
// 从字符串解析 ARN
|
||||
pub fn parse(s: &str) -> Result<Self, String> {
|
||||
// ARN 必须是格式 arn:rustfs:<Type>:<REGION>:<ID>:<remote-bucket>
|
||||
if !s.starts_with("arn:rustfs:") {
|
||||
return Err(format!("Invalid ARN {s}"));
|
||||
}
|
||||
|
||||
let tokens: Vec<&str> = s.split(':').collect();
|
||||
if tokens.len() != 6 || tokens[4].is_empty() || tokens[5].is_empty() {
|
||||
return Err(format!("Invalid ARN {s}"));
|
||||
}
|
||||
|
||||
Ok(ARN {
|
||||
arn_type: tokens[2].to_string(),
|
||||
region: tokens[3].to_string(),
|
||||
id: tokens[4].to_string(),
|
||||
bucket: tokens[5].to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// 实现 `Display` trait,使得可以直接使用 `format!` 或 `{}` 输出 ARN
|
||||
impl std::fmt::Display for ARN {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "arn:rustfs:{}:{}:{}:{}", self.arn_type, self.region, self.id, self.bucket)
|
||||
}
|
||||
}
|
||||
|
||||
fn must_get_uuid() -> String {
|
||||
Uuid::new_v4().to_string()
|
||||
// match Uuid::new_v4() {
|
||||
// Ok(uuid) => uuid.to_string(),
|
||||
// Err(err) => {
|
||||
// error!("Critical error: {}", err);
|
||||
// panic!("Failed to generate UUID: {}", err); // Ensures similar behavior as Go's logger.CriticalIf
|
||||
// }
|
||||
// }
|
||||
}
|
||||
fn generate_arn(target: BucketTarget, depl_id: String) -> String {
|
||||
let mut uuid: String = depl_id;
|
||||
if uuid.is_empty() {
|
||||
uuid = must_get_uuid();
|
||||
}
|
||||
|
||||
let arn: ARN = ARN {
|
||||
arn_type: target.type_.unwrap(),
|
||||
id: (uuid),
|
||||
region: "us-east-1".to_string(),
|
||||
bucket: (target.target_bucket),
|
||||
};
|
||||
arn.to_string()
|
||||
}
|
||||
|
||||
// use std::collections::HashMap;
|
||||
// use std::sync::{Arc, Mutex, RwLock};
|
||||
// use std::time::Duration;
|
||||
// use tokio::time::timeout;
|
||||
// use tokio::sync::RwLock as AsyncRwLock;
|
||||
// use serde::Deserialize;
|
||||
// use thiserror::Error;
|
||||
|
||||
// #[derive(Debug, Clone, PartialEq)]
|
||||
// pub enum BucketTargetType {
|
||||
// ReplicationService,
|
||||
// // Add other service types as needed
|
||||
// }
|
||||
|
||||
// impl BucketTargetType {
|
||||
// pub fn is_valid(&self) -> bool {
|
||||
// matches!(self, BucketTargetType::ReplicationService)
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Clone)]
|
||||
// pub struct BucketTarget {
|
||||
// pub arn: String,
|
||||
// pub target_bucket: String,
|
||||
// pub endpoint: String,
|
||||
// pub credentials: Credentials,
|
||||
// pub secure: bool,
|
||||
// pub bandwidth_limit: Option<u64>,
|
||||
// pub target_type: BucketTargetType,
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Clone)]
|
||||
// pub struct Credentials {
|
||||
// pub access_key: String,
|
||||
// pub secret_key: String,
|
||||
// }
|
||||
|
||||
// #[derive(Debug)]
|
||||
// pub struct BucketTargetSys {
|
||||
// targets_map: Arc<RwLock<HashMap<String, Vec<BucketTarget>>>>,
|
||||
// arn_remotes_map: Arc<Mutex<HashMap<String, ArnTarget>>>,
|
||||
// }
|
||||
|
||||
// impl BucketTargetSys {
|
||||
// pub fn new() -> Self {
|
||||
// Self {
|
||||
// targets_map: Arc::new(RwLock::new(HashMap::new())),
|
||||
// arn_remotes_map: Arc::new(Mutex::new(HashMap::new())),
|
||||
// }
|
||||
// }
|
||||
|
||||
// pub async fn set_target(
|
||||
// &self,
|
||||
// bucket: &str,
|
||||
// tgt: &BucketTarget,
|
||||
// update: bool,
|
||||
// ) -> Result<(), SetTargetError> {
|
||||
// if !tgt.target_type.is_valid() && !update {
|
||||
// return Err(SetTargetError::InvalidTargetType(bucket.to_string()));
|
||||
// }
|
||||
|
||||
// let client = self.get_remote_target_client(tgt).await?;
|
||||
|
||||
// // Validate if target credentials are OK
|
||||
// let exists = client.bucket_exists(&tgt.target_bucket).await?;
|
||||
// if !exists {
|
||||
// return Err(SetTargetError::TargetNotFound(tgt.target_bucket.clone()));
|
||||
// }
|
||||
|
||||
// if tgt.target_type == BucketTargetType::ReplicationService {
|
||||
// if !self.is_bucket_versioned(bucket).await {
|
||||
// return Err(SetTargetError::SourceNotVersioned(bucket.to_string()));
|
||||
// }
|
||||
|
||||
// let versioning_config = client.get_bucket_versioning(&tgt.target_bucket).await?;
|
||||
// if !versioning_config.is_enabled() {
|
||||
// return Err(SetTargetError::TargetNotVersioned(tgt.target_bucket.clone()));
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Check if target is a rustfs server and alive
|
||||
// let hc_result = timeout(Duration::from_secs(3), client.health_check(&tgt.endpoint)).await;
|
||||
// match hc_result {
|
||||
// Ok(Ok(true)) => {} // Server is alive
|
||||
// Ok(Ok(false)) | Ok(Err(_)) | Err(_) => {
|
||||
// return Err(SetTargetError::HealthCheckFailed(tgt.target_bucket.clone()));
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Lock and update target maps
|
||||
// let mut targets_map = self.targets_map.write().await;
|
||||
// let mut arn_remotes_map = self.arn_remotes_map.lock().unwrap();
|
||||
|
||||
// let targets = targets_map.entry(bucket.to_string()).or_default();
|
||||
// let mut found = false;
|
||||
|
||||
// for existing_target in targets.iter_mut() {
|
||||
// if existing_target.target_type == tgt.target_type {
|
||||
// if existing_target.arn == tgt.arn {
|
||||
// if !update {
|
||||
// return Err(SetTargetError::TargetAlreadyExists(existing_target.target_bucket.clone()));
|
||||
// }
|
||||
// *existing_target = tgt.clone();
|
||||
// found = true;
|
||||
// break;
|
||||
// }
|
||||
|
||||
// if existing_target.endpoint == tgt.endpoint {
|
||||
// return Err(SetTargetError::TargetAlreadyExists(existing_target.target_bucket.clone()));
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// if !found && !update {
|
||||
// targets.push(tgt.clone());
|
||||
// }
|
||||
|
||||
// arn_remotes_map.insert(tgt.arn.clone(), ArnTarget { client });
|
||||
// self.update_bandwidth_limit(bucket, &tgt.arn, tgt.bandwidth_limit).await;
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
|
||||
// async fn get_remote_target_client(&self, tgt: &BucketTarget) -> Result<Client, SetTargetError> {
|
||||
// // Mocked implementation for obtaining a remote client
|
||||
// Ok(Client {})
|
||||
// }
|
||||
|
||||
// async fn is_bucket_versioned(&self, bucket: &str) -> bool {
|
||||
// // Mocked implementation for checking if a bucket is versioned
|
||||
// true
|
||||
// }
|
||||
|
||||
// async fn update_bandwidth_limit(
|
||||
// &self,
|
||||
// bucket: &str,
|
||||
// arn: &str,
|
||||
// limit: Option<u64>,
|
||||
// ) {
|
||||
// // Mocked implementation for updating bandwidth limits
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[derive(Debug)]
|
||||
// pub struct Client;
|
||||
|
||||
// impl Client {
|
||||
// pub async fn bucket_exists(&self, _bucket: &str) -> Result<bool, SetTargetError> {
|
||||
// Ok(true) // Mocked implementation
|
||||
// }
|
||||
|
||||
// pub async fn get_bucket_versioning(
|
||||
// &self,
|
||||
// _bucket: &str,
|
||||
// ) -> Result<VersioningConfig, SetTargetError> {
|
||||
// Ok(VersioningConfig { enabled: true })
|
||||
// }
|
||||
|
||||
// pub async fn health_check(&self, _endpoint: &str) -> Result<bool, SetTargetError> {
|
||||
// Ok(true) // Mocked health check
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Clone)]
|
||||
// pub struct ArnTarget {
|
||||
// pub client: Client,
|
||||
// }
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum SetTargetError {
|
||||
#[error("Invalid target type for bucket {0}")]
|
||||
InvalidTargetType(String),
|
||||
|
||||
#[error("Target bucket {0} not found")]
|
||||
TargetNotFound(String),
|
||||
|
||||
#[error("Source bucket {0} is not versioned")]
|
||||
SourceNotVersioned(String),
|
||||
|
||||
#[error("Target bucket {0} is not versioned")]
|
||||
TargetNotVersioned(String),
|
||||
|
||||
#[error("Health check failed for bucket {0}")]
|
||||
HealthCheckFailed(String),
|
||||
|
||||
#[error("Target bucket {0} already exists")]
|
||||
TargetAlreadyExists(String),
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod bucket_replication;
|
||||
pub mod bucket_targets;
|
||||
@@ -88,7 +88,7 @@ impl LocalUsageSnapshot {
|
||||
|
||||
/// Build the snapshot file name `<disk-id>.json`.
|
||||
pub fn snapshot_file_name(disk_id: &str) -> String {
|
||||
format!("{}.json", disk_id)
|
||||
format!("{disk_id}.json")
|
||||
}
|
||||
|
||||
/// Build the object path relative to `RUSTFS_META_BUCKET`, e.g. `datausage/<disk-id>.json`.
|
||||
|
||||
@@ -2349,12 +2349,7 @@ impl DiskAPI for LocalDisk {
|
||||
self.delete_file(&volume_dir, &xl_path, true, false).await
|
||||
}
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn delete_versions(
|
||||
&self,
|
||||
volume: &str,
|
||||
versions: Vec<FileInfoVersions>,
|
||||
_opts: DeleteOptions,
|
||||
) -> Result<Vec<Option<Error>>> {
|
||||
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, _opts: DeleteOptions) -> Vec<Option<Error>> {
|
||||
let mut errs = Vec::with_capacity(versions.len());
|
||||
for _ in 0..versions.len() {
|
||||
errs.push(None);
|
||||
@@ -2368,7 +2363,7 @@ impl DiskAPI for LocalDisk {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(errs)
|
||||
errs
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
|
||||
@@ -201,12 +201,7 @@ impl DiskAPI for Disk {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn delete_versions(
|
||||
&self,
|
||||
volume: &str,
|
||||
versions: Vec<FileInfoVersions>,
|
||||
opts: DeleteOptions,
|
||||
) -> Result<Vec<Option<Error>>> {
|
||||
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, opts: DeleteOptions) -> Vec<Option<Error>> {
|
||||
match self {
|
||||
Disk::Local(local_disk) => local_disk.delete_versions(volume, versions, opts).await,
|
||||
Disk::Remote(remote_disk) => remote_disk.delete_versions(volume, versions, opts).await,
|
||||
@@ -448,12 +443,7 @@ pub trait DiskAPI: Debug + Send + Sync + 'static {
|
||||
force_del_marker: bool,
|
||||
opts: DeleteOptions,
|
||||
) -> Result<()>;
|
||||
async fn delete_versions(
|
||||
&self,
|
||||
volume: &str,
|
||||
versions: Vec<FileInfoVersions>,
|
||||
opts: DeleteOptions,
|
||||
) -> Result<Vec<Option<Error>>>;
|
||||
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, opts: DeleteOptions) -> Vec<Option<Error>>;
|
||||
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()>;
|
||||
async fn write_metadata(&self, org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()>;
|
||||
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()>;
|
||||
|
||||
@@ -21,7 +21,6 @@ pub mod bitrot;
|
||||
pub mod bucket;
|
||||
pub mod cache_value;
|
||||
mod chunk_stream;
|
||||
pub mod cmd;
|
||||
pub mod compress;
|
||||
pub mod config;
|
||||
pub mod data_usage;
|
||||
@@ -32,7 +31,6 @@ pub mod erasure_coding;
|
||||
pub mod error;
|
||||
pub mod file_cache;
|
||||
pub mod global;
|
||||
pub mod lock_utils;
|
||||
pub mod metrics_realtime;
|
||||
pub mod notification_sys;
|
||||
pub mod pools;
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::endpoint::Endpoint;
|
||||
use crate::error::Result;
|
||||
use rustfs_lock::client::{LockClient, local::LocalClient, remote::RemoteClient};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Create unique lock clients from endpoints
|
||||
/// This function creates one client per unique host:port combination
|
||||
/// to avoid duplicate connections to the same server
|
||||
pub async fn create_unique_clients(endpoints: &[Endpoint]) -> Result<Vec<Arc<dyn LockClient>>> {
|
||||
let mut unique_endpoints: HashMap<String, &Endpoint> = HashMap::new();
|
||||
|
||||
// Collect unique endpoints based on host:port
|
||||
for endpoint in endpoints {
|
||||
if endpoint.is_local {
|
||||
// For local endpoints, use "local" as the key
|
||||
unique_endpoints.insert("local".to_string(), endpoint);
|
||||
} else {
|
||||
// For remote endpoints, use host:port as the key
|
||||
let host_port = format!(
|
||||
"{}:{}",
|
||||
endpoint.url.host_str().unwrap_or("localhost"),
|
||||
endpoint.url.port().unwrap_or(9000)
|
||||
);
|
||||
unique_endpoints.insert(host_port, endpoint);
|
||||
}
|
||||
}
|
||||
|
||||
let mut clients = Vec::new();
|
||||
|
||||
// Create clients for unique endpoints
|
||||
for (_key, endpoint) in unique_endpoints {
|
||||
if endpoint.is_local {
|
||||
// For local endpoints, create a local lock client
|
||||
let local_client = LocalClient::new();
|
||||
clients.push(Arc::new(local_client) as Arc<dyn LockClient>);
|
||||
} else {
|
||||
// For remote endpoints, create a remote lock client
|
||||
let remote_client = RemoteClient::new(endpoint.url.to_string());
|
||||
clients.push(Arc::new(remote_client) as Arc<dyn LockClient>);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(clients)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use url::Url;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_unique_clients_local() {
|
||||
let endpoints = vec![
|
||||
Endpoint {
|
||||
url: Url::parse("http://localhost:9000").unwrap(),
|
||||
is_local: true,
|
||||
pool_idx: 0,
|
||||
set_idx: 0,
|
||||
disk_idx: 0,
|
||||
},
|
||||
Endpoint {
|
||||
url: Url::parse("http://localhost:9000").unwrap(),
|
||||
is_local: true,
|
||||
pool_idx: 0,
|
||||
set_idx: 0,
|
||||
disk_idx: 1,
|
||||
},
|
||||
];
|
||||
|
||||
let clients = create_unique_clients(&endpoints).await.unwrap();
|
||||
// Should only create one client for local endpoints
|
||||
assert_eq!(clients.len(), 1);
|
||||
assert!(clients[0].is_local().await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_unique_clients_mixed() {
|
||||
let endpoints = vec![
|
||||
Endpoint {
|
||||
url: Url::parse("http://localhost:9000").unwrap(),
|
||||
is_local: true,
|
||||
pool_idx: 0,
|
||||
set_idx: 0,
|
||||
disk_idx: 0,
|
||||
},
|
||||
Endpoint {
|
||||
url: Url::parse("http://remote1:9000").unwrap(),
|
||||
is_local: false,
|
||||
pool_idx: 0,
|
||||
set_idx: 0,
|
||||
disk_idx: 1,
|
||||
},
|
||||
Endpoint {
|
||||
url: Url::parse("http://remote1:9000").unwrap(),
|
||||
is_local: false,
|
||||
pool_idx: 0,
|
||||
set_idx: 0,
|
||||
disk_idx: 2,
|
||||
},
|
||||
Endpoint {
|
||||
url: Url::parse("http://remote2:9000").unwrap(),
|
||||
is_local: false,
|
||||
pool_idx: 0,
|
||||
set_idx: 0,
|
||||
disk_idx: 3,
|
||||
},
|
||||
];
|
||||
|
||||
let clients = create_unique_clients(&endpoints).await.unwrap();
|
||||
// Should create 3 clients: 1 local + 2 unique remote
|
||||
assert_eq!(clients.len(), 3);
|
||||
|
||||
// Check that we have one local client
|
||||
let local_count = clients.iter().filter(|c| futures::executor::block_on(c.is_local())).count();
|
||||
assert_eq!(local_count, 1);
|
||||
|
||||
// Check that we have two remote clients
|
||||
let remote_count = clients.iter().filter(|c| !futures::executor::block_on(c.is_local())).count();
|
||||
assert_eq!(remote_count, 2);
|
||||
}
|
||||
}
|
||||
@@ -16,11 +16,17 @@ use crate::StorageAPI;
|
||||
use crate::admin_server_info::get_commit_id;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::global::{GLOBAL_BOOT_TIME, get_global_endpoints};
|
||||
use crate::metrics_realtime::{CollectMetricsOpts, MetricType};
|
||||
use crate::rpc::PeerRestClient;
|
||||
use crate::{endpoints::EndpointServerPools, new_object_layer_fn};
|
||||
use futures::future::join_all;
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_madmin::health::{Cpus, MemInfo, OsInfo, Partitions, ProcInfo, SysConfig, SysErrors, SysService};
|
||||
use rustfs_madmin::metrics::RealtimeMetrics;
|
||||
use rustfs_madmin::net::NetInfo;
|
||||
use rustfs_madmin::{ItemState, ServerProperties};
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::sync::OnceLock;
|
||||
use std::time::SystemTime;
|
||||
use tracing::{error, warn};
|
||||
@@ -62,21 +68,122 @@ pub struct NotificationPeerErr {
|
||||
}
|
||||
|
||||
impl NotificationSys {
|
||||
pub fn rest_client_from_hash(&self, _s: &str) -> Option<PeerRestClient> {
|
||||
None
|
||||
}
|
||||
pub async fn delete_policy(&self) -> Vec<NotificationPeerErr> {
|
||||
unimplemented!()
|
||||
}
|
||||
pub async fn load_policy(&self) -> Vec<NotificationPeerErr> {
|
||||
unimplemented!()
|
||||
pub fn rest_client_from_hash(&self, s: &str) -> Option<PeerRestClient> {
|
||||
if self.all_peer_clients.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let mut hasher = DefaultHasher::new();
|
||||
s.hash(&mut hasher);
|
||||
let idx = (hasher.finish() as usize) % self.all_peer_clients.len();
|
||||
self.all_peer_clients[idx].clone()
|
||||
}
|
||||
|
||||
pub async fn load_policy_mapping(&self) -> Vec<NotificationPeerErr> {
|
||||
unimplemented!()
|
||||
pub async fn delete_policy(&self, policy_name: &str) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
let policy = policy_name.to_string();
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.delete_policy(&policy).await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
pub async fn delete_user(&self) -> Vec<NotificationPeerErr> {
|
||||
unimplemented!()
|
||||
|
||||
pub async fn load_policy(&self, policy_name: &str) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
let policy = policy_name.to_string();
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.load_policy(&policy).await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn load_policy_mapping(&self, user_or_group: &str, user_type: u64, is_group: bool) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
let uog = user_or_group.to_string();
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.load_policy_mapping(&uog, user_type, is_group).await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn delete_user(&self, access_key: &str) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
let ak = access_key.to_string();
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.delete_user(&ak).await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn storage_info<S: StorageAPI>(&self, api: &S) -> rustfs_madmin::StorageInfo {
|
||||
@@ -140,6 +247,114 @@ impl NotificationSys {
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn load_user(&self, access_key: &str, temp: bool) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
let ak = access_key.to_string();
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.load_user(&ak, temp).await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn load_group(&self, group: &str) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
let gname = group.to_string();
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.load_group(&gname).await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn delete_service_account(&self, access_key: &str) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
let ak = access_key.to_string();
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.delete_service_account(&ak).await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn load_service_account(&self, access_key: &str) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
let ak = access_key.to_string();
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.load_service_account(&ak).await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn reload_pool_meta(&self) {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter().flatten() {
|
||||
@@ -202,6 +417,281 @@ impl NotificationSys {
|
||||
let _ = store.stop_rebalance().await;
|
||||
warn!("notification stop_rebalance stop_rebalance done");
|
||||
}
|
||||
|
||||
pub async fn load_bucket_metadata(&self, bucket: &str) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
let b = bucket.to_string();
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.load_bucket_metadata(&b).await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn delete_bucket_metadata(&self, bucket: &str) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
let b = bucket.to_string();
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.delete_bucket_metadata(&b).await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn start_profiling(&self, profiler: &str) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
let pf = profiler.to_string();
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.start_profiling(&pf).await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn get_cpus(&self) -> Vec<Cpus> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter().cloned() {
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
client.get_cpus().await.unwrap_or_default()
|
||||
} else {
|
||||
Cpus::default()
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn get_net_info(&self) -> Vec<NetInfo> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter().cloned() {
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
client.get_net_info().await.unwrap_or_default()
|
||||
} else {
|
||||
NetInfo::default()
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn get_partitions(&self) -> Vec<Partitions> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter().cloned() {
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
client.get_partitions().await.unwrap_or_default()
|
||||
} else {
|
||||
Partitions::default()
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn get_os_info(&self) -> Vec<OsInfo> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter().cloned() {
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
client.get_os_info().await.unwrap_or_default()
|
||||
} else {
|
||||
OsInfo::default()
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn get_sys_services(&self) -> Vec<SysService> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter().cloned() {
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
client.get_se_linux_info().await.unwrap_or_default()
|
||||
} else {
|
||||
SysService::default()
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn get_sys_config(&self) -> Vec<SysConfig> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter().cloned() {
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
client.get_sys_config().await.unwrap_or_default()
|
||||
} else {
|
||||
SysConfig::default()
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn get_sys_errors(&self) -> Vec<SysErrors> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter().cloned() {
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
client.get_sys_errors().await.unwrap_or_default()
|
||||
} else {
|
||||
SysErrors::default()
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn get_mem_info(&self) -> Vec<MemInfo> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter().cloned() {
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
client.get_mem_info().await.unwrap_or_default()
|
||||
} else {
|
||||
MemInfo::default()
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn get_proc_info(&self) -> Vec<ProcInfo> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter().cloned() {
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
client.get_proc_info().await.unwrap_or_default()
|
||||
} else {
|
||||
ProcInfo::default()
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn get_metrics(&self, t: MetricType, opts: &CollectMetricsOpts) -> Vec<RealtimeMetrics> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter().cloned() {
|
||||
let t_clone = t;
|
||||
let opts_clone = opts;
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
client.get_metrics(t_clone, opts_clone).await.unwrap_or_default()
|
||||
} else {
|
||||
RealtimeMetrics::default()
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn reload_site_replication_config(&self) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.reload_site_replication_config().await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
|
||||
pub async fn load_transition_tier_config(&self) -> Vec<NotificationPeerErr> {
|
||||
let mut futures = Vec::with_capacity(self.peer_clients.len());
|
||||
for client in self.peer_clients.iter() {
|
||||
futures.push(async move {
|
||||
if let Some(client) = client {
|
||||
match client.load_transition_tier_config().await {
|
||||
Ok(_) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: None,
|
||||
},
|
||||
Err(e) => NotificationPeerErr {
|
||||
host: client.host.to_string(),
|
||||
err: Some(e),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
NotificationPeerErr {
|
||||
host: "".to_string(),
|
||||
err: Some(Error::other("peer is not reachable")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
join_all(futures).await
|
||||
}
|
||||
}
|
||||
|
||||
fn get_offline_disks(offline_host: &str, endpoints: &EndpointServerPools) -> Vec<rustfs_madmin::Disk> {
|
||||
|
||||
@@ -48,7 +48,7 @@ use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use time::{Duration, OffsetDateTime};
|
||||
use tokio::io::{AsyncReadExt, BufReader};
|
||||
use tokio::sync::broadcast::Receiver as B_Receiver;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
pub const POOL_META_NAME: &str = "pool.bin";
|
||||
@@ -651,7 +651,7 @@ impl ECStore {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self, rx))]
|
||||
pub async fn decommission(&self, rx: B_Receiver<bool>, indices: Vec<usize>) -> Result<()> {
|
||||
pub async fn decommission(&self, rx: CancellationToken, indices: Vec<usize>) -> Result<()> {
|
||||
warn!("decommission: {:?}", indices);
|
||||
if indices.is_empty() {
|
||||
return Err(Error::other("InvalidArgument"));
|
||||
@@ -663,13 +663,14 @@ impl ECStore {
|
||||
|
||||
self.start_decommission(indices.clone()).await?;
|
||||
|
||||
let rx_clone = rx.clone();
|
||||
tokio::spawn(async move {
|
||||
let Some(store) = new_object_layer_fn() else {
|
||||
error!("store not init");
|
||||
return;
|
||||
};
|
||||
for idx in indices.iter() {
|
||||
store.do_decommission_in_routine(rx.resubscribe(), *idx).await;
|
||||
store.do_decommission_in_routine(rx_clone.clone(), *idx).await;
|
||||
}
|
||||
});
|
||||
|
||||
@@ -891,7 +892,7 @@ impl ECStore {
|
||||
#[tracing::instrument(skip(self, rx))]
|
||||
async fn decommission_pool(
|
||||
self: &Arc<Self>,
|
||||
rx: B_Receiver<bool>,
|
||||
rx: CancellationToken,
|
||||
idx: usize,
|
||||
pool: Arc<Sets>,
|
||||
bi: DecomBucketInfo,
|
||||
@@ -936,20 +937,20 @@ impl ECStore {
|
||||
});
|
||||
|
||||
let set = set.clone();
|
||||
let mut rx = rx.resubscribe();
|
||||
let rx_clone = rx.clone();
|
||||
let bi = bi.clone();
|
||||
let set_id = set_idx;
|
||||
let wk_clone = wk.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if rx.try_recv().is_ok() {
|
||||
if rx_clone.is_cancelled() {
|
||||
warn!("decommission_pool: cancel {}", set_id);
|
||||
break;
|
||||
}
|
||||
warn!("decommission_pool: list_objects_to_decommission {} {}", set_id, &bi.name);
|
||||
|
||||
match set
|
||||
.list_objects_to_decommission(rx.resubscribe(), bi.clone(), decommission_entry.clone())
|
||||
.list_objects_to_decommission(rx_clone.clone(), bi.clone(), decommission_entry.clone())
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
@@ -982,7 +983,7 @@ impl ECStore {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self, rx))]
|
||||
pub async fn do_decommission_in_routine(self: &Arc<Self>, rx: B_Receiver<bool>, idx: usize) {
|
||||
pub async fn do_decommission_in_routine(self: &Arc<Self>, rx: CancellationToken, idx: usize) {
|
||||
if let Err(err) = self.decommission_in_background(rx, idx).await {
|
||||
error!("decom err {:?}", &err);
|
||||
if let Err(er) = self.decommission_failed(idx).await {
|
||||
@@ -1060,7 +1061,7 @@ impl ECStore {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self, rx))]
|
||||
async fn decommission_in_background(self: &Arc<Self>, rx: B_Receiver<bool>, idx: usize) -> Result<()> {
|
||||
async fn decommission_in_background(self: &Arc<Self>, rx: CancellationToken, idx: usize) -> Result<()> {
|
||||
let pool = self.pools[idx].clone();
|
||||
|
||||
let pending = {
|
||||
@@ -1090,10 +1091,7 @@ impl ECStore {
|
||||
|
||||
warn!("decommission: currently on bucket {}", &bucket.name);
|
||||
|
||||
if let Err(err) = self
|
||||
.decommission_pool(rx.resubscribe(), idx, pool.clone(), bucket.clone())
|
||||
.await
|
||||
{
|
||||
if let Err(err) = self.decommission_pool(rx.clone(), idx, pool.clone(), bucket.clone()).await {
|
||||
error!("decommission: decommission_pool err {:?}", &err);
|
||||
return Err(err);
|
||||
} else {
|
||||
@@ -1329,7 +1327,7 @@ impl SetDisks {
|
||||
#[tracing::instrument(skip(self, rx, cb_func))]
|
||||
async fn list_objects_to_decommission(
|
||||
self: &Arc<Self>,
|
||||
rx: B_Receiver<bool>,
|
||||
rx: CancellationToken,
|
||||
bucket_info: DecomBucketInfo,
|
||||
cb_func: ListCallback,
|
||||
) -> Result<()> {
|
||||
|
||||
@@ -34,8 +34,8 @@ use std::io::Cursor;
|
||||
use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
use tokio::io::{AsyncReadExt, BufReader};
|
||||
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
|
||||
use tokio::time::{Duration, Instant};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -151,7 +151,7 @@ pub struct DiskStat {
|
||||
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
|
||||
pub struct RebalanceMeta {
|
||||
#[serde(skip)]
|
||||
pub cancel: Option<broadcast::Sender<bool>>, // To be invoked on rebalance-stop
|
||||
pub cancel: Option<CancellationToken>, // To be invoked on rebalance-stop
|
||||
#[serde(skip)]
|
||||
pub last_refreshed_at: Option<OffsetDateTime>,
|
||||
#[serde(rename = "stopTs")]
|
||||
@@ -493,8 +493,8 @@ impl ECStore {
|
||||
pub async fn stop_rebalance(self: &Arc<Self>) -> Result<()> {
|
||||
let rebalance_meta = self.rebalance_meta.read().await;
|
||||
if let Some(meta) = rebalance_meta.as_ref() {
|
||||
if let Some(tx) = meta.cancel.as_ref() {
|
||||
let _ = tx.send(true);
|
||||
if let Some(cancel_tx) = meta.cancel.as_ref() {
|
||||
cancel_tx.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -506,13 +506,14 @@ impl ECStore {
|
||||
info!("start_rebalance: start rebalance");
|
||||
// let rebalance_meta = self.rebalance_meta.read().await;
|
||||
|
||||
let (tx, rx) = broadcast::channel::<bool>(1);
|
||||
let cancel_tx = CancellationToken::new();
|
||||
let rx = cancel_tx.clone();
|
||||
|
||||
{
|
||||
let mut rebalance_meta = self.rebalance_meta.write().await;
|
||||
|
||||
if let Some(meta) = rebalance_meta.as_mut() {
|
||||
meta.cancel = Some(tx)
|
||||
meta.cancel = Some(cancel_tx)
|
||||
} else {
|
||||
info!("start_rebalance: rebalance_meta is None exit");
|
||||
return;
|
||||
@@ -565,9 +566,9 @@ impl ECStore {
|
||||
|
||||
let pool_idx = idx;
|
||||
let store = self.clone();
|
||||
let rx = rx.resubscribe();
|
||||
let rx_clone = rx.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(err) = store.rebalance_buckets(rx, pool_idx).await {
|
||||
if let Err(err) = store.rebalance_buckets(rx_clone, pool_idx).await {
|
||||
error!("Rebalance failed for pool {}: {}", pool_idx, err);
|
||||
} else {
|
||||
info!("Rebalance completed for pool {}", pool_idx);
|
||||
@@ -579,7 +580,7 @@ impl ECStore {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self, rx))]
|
||||
async fn rebalance_buckets(self: &Arc<Self>, mut rx: B_Receiver<bool>, pool_index: usize) -> Result<()> {
|
||||
async fn rebalance_buckets(self: &Arc<Self>, rx: CancellationToken, pool_index: usize) -> Result<()> {
|
||||
let (done_tx, mut done_rx) = tokio::sync::mpsc::channel::<Result<()>>(1);
|
||||
|
||||
// Save rebalance metadata periodically
|
||||
@@ -651,7 +652,7 @@ impl ECStore {
|
||||
info!("Pool {} rebalancing is started", pool_index);
|
||||
|
||||
loop {
|
||||
if let Ok(true) = rx.try_recv() {
|
||||
if rx.is_cancelled() {
|
||||
info!("Pool {} rebalancing is stopped", pool_index);
|
||||
done_tx.send(Err(Error::other("rebalance stopped canceled"))).await.ok();
|
||||
break;
|
||||
@@ -660,7 +661,7 @@ impl ECStore {
|
||||
if let Some(bucket) = self.next_rebal_bucket(pool_index).await? {
|
||||
info!("Rebalancing bucket: start {}", bucket);
|
||||
|
||||
if let Err(err) = self.rebalance_bucket(rx.resubscribe(), bucket.clone(), pool_index).await {
|
||||
if let Err(err) = self.rebalance_bucket(rx.clone(), bucket.clone(), pool_index).await {
|
||||
if err.to_string().contains("not initialized") {
|
||||
info!("rebalance_bucket: rebalance not initialized, continue");
|
||||
continue;
|
||||
@@ -1033,7 +1034,7 @@ impl ECStore {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self, rx))]
|
||||
async fn rebalance_bucket(self: &Arc<Self>, rx: B_Receiver<bool>, bucket: String, pool_index: usize) -> Result<()> {
|
||||
async fn rebalance_bucket(self: &Arc<Self>, rx: CancellationToken, bucket: String, pool_index: usize) -> Result<()> {
|
||||
// Placeholder for actual bucket rebalance logic
|
||||
info!("Rebalancing bucket {} in pool {}", bucket, pool_index);
|
||||
|
||||
@@ -1072,7 +1073,7 @@ impl ECStore {
|
||||
});
|
||||
|
||||
let set = set.clone();
|
||||
let rx = rx.resubscribe();
|
||||
let rx = rx.clone();
|
||||
let bucket = bucket.clone();
|
||||
// let wk = wk.clone();
|
||||
|
||||
@@ -1144,7 +1145,7 @@ impl SetDisks {
|
||||
#[tracing::instrument(skip(self, rx, cb))]
|
||||
pub async fn list_objects_to_rebalance(
|
||||
self: &Arc<Self>,
|
||||
rx: B_Receiver<bool>,
|
||||
rx: CancellationToken,
|
||||
bucket: String,
|
||||
cb: ListCallback,
|
||||
) -> Result<()> {
|
||||
|
||||
@@ -16,10 +16,8 @@ mod http_auth;
|
||||
mod peer_rest_client;
|
||||
mod peer_s3_client;
|
||||
mod remote_disk;
|
||||
mod tonic_service;
|
||||
|
||||
pub use http_auth::{build_auth_headers, verify_rpc_signature};
|
||||
pub use peer_rest_client::PeerRestClient;
|
||||
pub use peer_s3_client::{LocalPeerS3Client, PeerS3Client, RemotePeerS3Client, S3PeerSys};
|
||||
pub use remote_disk::RemoteDisk;
|
||||
pub use tonic_service::{NodeService, make_server};
|
||||
|
||||
@@ -345,21 +345,43 @@ impl DiskAPI for RemoteDisk {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn delete_versions(
|
||||
&self,
|
||||
volume: &str,
|
||||
versions: Vec<FileInfoVersions>,
|
||||
opts: DeleteOptions,
|
||||
) -> Result<Vec<Option<Error>>> {
|
||||
async fn delete_versions(&self, volume: &str, versions: Vec<FileInfoVersions>, opts: DeleteOptions) -> Vec<Option<Error>> {
|
||||
info!("delete_versions");
|
||||
let opts = serde_json::to_string(&opts)?;
|
||||
|
||||
let opts = match serde_json::to_string(&opts) {
|
||||
Ok(opts) => opts,
|
||||
Err(err) => {
|
||||
let mut errors = Vec::with_capacity(versions.len());
|
||||
for _ in 0..versions.len() {
|
||||
errors.push(Some(Error::other(err.to_string())));
|
||||
}
|
||||
return errors;
|
||||
}
|
||||
};
|
||||
let mut versions_str = Vec::with_capacity(versions.len());
|
||||
for file_info_versions in versions.iter() {
|
||||
versions_str.push(serde_json::to_string(file_info_versions)?);
|
||||
versions_str.push(match serde_json::to_string(file_info_versions) {
|
||||
Ok(versions_str) => versions_str,
|
||||
Err(err) => {
|
||||
let mut errors = Vec::with_capacity(versions.len());
|
||||
for _ in 0..versions.len() {
|
||||
errors.push(Some(Error::other(err.to_string())));
|
||||
}
|
||||
return errors;
|
||||
}
|
||||
});
|
||||
}
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let mut client = match node_service_time_out_client(&self.addr).await {
|
||||
Ok(client) => client,
|
||||
Err(err) => {
|
||||
let mut errors = Vec::with_capacity(versions.len());
|
||||
for _ in 0..versions.len() {
|
||||
errors.push(Some(Error::other(err.to_string())));
|
||||
}
|
||||
return errors;
|
||||
}
|
||||
};
|
||||
|
||||
let request = Request::new(DeleteVersionsRequest {
|
||||
disk: self.endpoint.to_string(),
|
||||
volume: volume.to_string(),
|
||||
@@ -368,11 +390,27 @@ impl DiskAPI for RemoteDisk {
|
||||
});
|
||||
|
||||
// TODO: use Error not string
|
||||
let response = client.delete_versions(request).await?.into_inner();
|
||||
|
||||
let response = match client.delete_versions(request).await {
|
||||
Ok(response) => response,
|
||||
Err(err) => {
|
||||
let mut errors = Vec::with_capacity(versions.len());
|
||||
for _ in 0..versions.len() {
|
||||
errors.push(Some(Error::other(err.to_string())));
|
||||
}
|
||||
return errors;
|
||||
}
|
||||
};
|
||||
|
||||
let response = response.into_inner();
|
||||
if !response.success {
|
||||
return Err(response.error.unwrap_or_default().into());
|
||||
let mut errors = Vec::with_capacity(versions.len());
|
||||
for _ in 0..versions.len() {
|
||||
errors.push(Some(Error::other(response.error.clone().map(|e| e.error_info).unwrap_or_default())));
|
||||
}
|
||||
return errors;
|
||||
}
|
||||
let errors = response
|
||||
response
|
||||
.errors
|
||||
.iter()
|
||||
.map(|error| {
|
||||
@@ -382,9 +420,7 @@ impl DiskAPI for RemoteDisk {
|
||||
Some(Error::other(error.to_string()))
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(errors)
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
use crate::batch_processor::{AsyncBatchProcessor, get_global_processors};
|
||||
use crate::bitrot::{create_bitrot_reader, create_bitrot_writer};
|
||||
use crate::bucket::lifecycle::lifecycle::TRANSITION_COMPLETE;
|
||||
use crate::bucket::replication::check_replicate_delete;
|
||||
use crate::bucket::versioning::VersioningApi;
|
||||
use crate::bucket::versioning_sys::BucketVersioningSys;
|
||||
use crate::client::{object_api_utils::extract_etag, transition_api::ReaderImpl};
|
||||
@@ -29,11 +30,12 @@ use crate::disk::{
|
||||
};
|
||||
use crate::erasure_coding;
|
||||
use crate::erasure_coding::bitrot_verify;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::error::{Error, Result, is_err_version_not_found};
|
||||
use crate::error::{ObjectApiError, is_err_object_not_found};
|
||||
use crate::global::{GLOBAL_LocalNodeName, GLOBAL_TierConfigMgr};
|
||||
use crate::store_api::ListObjectVersionsInfo;
|
||||
use crate::store_api::{ListPartsInfo, ObjectToDelete};
|
||||
use crate::store_api::{ListPartsInfo, ObjectOptions, ObjectToDelete};
|
||||
use crate::store_api::{ObjectInfoOrErr, WalkOptions};
|
||||
use crate::{
|
||||
bucket::lifecycle::bucket_lifecycle_ops::{gen_transition_objname, get_transitioned_object_reader, put_restore_opts},
|
||||
cache_value::metacache_set::{ListPathRawOptions, list_path_raw},
|
||||
@@ -50,7 +52,7 @@ use crate::{
|
||||
store_api::{
|
||||
BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec,
|
||||
ListMultipartsInfo, ListObjectsV2Info, MakeBucketOptions, MultipartInfo, MultipartUploadResult, ObjectIO, ObjectInfo,
|
||||
ObjectOptions, PartInfo, PutObjReader, StorageAPI,
|
||||
PartInfo, PutObjReader, StorageAPI,
|
||||
},
|
||||
store_init::load_format_erasure,
|
||||
};
|
||||
@@ -64,16 +66,16 @@ use md5::{Digest as Md5Digest, Md5};
|
||||
use rand::{Rng, seq::SliceRandom};
|
||||
use regex::Regex;
|
||||
use rustfs_common::heal_channel::{DriveState, HealChannelPriority, HealItemType, HealOpts, HealScanMode, send_heal_disk};
|
||||
use rustfs_filemeta::headers::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use rustfs_filemeta::{
|
||||
FileInfo, FileMeta, FileMetaShallowVersion, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, ObjectPartInfo,
|
||||
RawFileInfo, file_info_from_raw,
|
||||
headers::{AMZ_OBJECT_TAGGING, AMZ_STORAGE_CLASS},
|
||||
merge_file_meta_versions,
|
||||
RawFileInfo, ReplicationStatusType, VersionPurgeStatusType, file_info_from_raw, merge_file_meta_versions,
|
||||
};
|
||||
use rustfs_lock::NamespaceLockManager;
|
||||
use rustfs_lock::fast_lock::types::LockResult;
|
||||
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use rustfs_rio::{EtagResolvable, HashReader, TryGetIndex as _, WarpReader};
|
||||
use rustfs_utils::http::headers::AMZ_OBJECT_TAGGING;
|
||||
use rustfs_utils::http::headers::AMZ_STORAGE_CLASS;
|
||||
use rustfs_utils::http::headers::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use rustfs_utils::{
|
||||
HashAlgorithm,
|
||||
crypto::{base64_decode, base64_encode, hex},
|
||||
@@ -102,6 +104,7 @@ use tokio::{
|
||||
sync::mpsc::{self, Sender},
|
||||
time::interval,
|
||||
};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::error;
|
||||
use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
@@ -147,6 +150,21 @@ impl SetDisks {
|
||||
set_endpoints,
|
||||
})
|
||||
}
|
||||
fn format_lock_error(&self, bucket: &str, object: &str, mode: &str, err: &LockResult) -> String {
|
||||
match err {
|
||||
LockResult::Timeout => {
|
||||
format!("{mode} lock acquisition timed out on {bucket}/{object} (owner={})", self.locker_owner)
|
||||
}
|
||||
LockResult::Conflict {
|
||||
current_owner,
|
||||
current_mode,
|
||||
} => format!(
|
||||
"{mode} lock conflicted on {bucket}/{object}: held by {current_owner} as {:?}",
|
||||
current_mode
|
||||
),
|
||||
LockResult::Acquired => format!("unexpected lock state while acquiring {mode} lock on {bucket}/{object}"),
|
||||
}
|
||||
}
|
||||
async fn get_disks_internal(&self) -> Vec<Option<DiskStore>> {
|
||||
let rl = self.disks.read().await;
|
||||
|
||||
@@ -2461,25 +2479,38 @@ impl SetDisks {
|
||||
|
||||
// Check if lock is already held
|
||||
let key = rustfs_lock::fast_lock::types::ObjectKey::new(bucket, object);
|
||||
let mut reuse_existing_lock = false;
|
||||
if let Some(lock_info) = self.fast_lock_manager.get_lock_info(&key) {
|
||||
warn!("Lock already exists for object {}: {:?}", object, lock_info);
|
||||
if lock_info.owner.as_ref() == self.locker_owner.as_str()
|
||||
&& matches!(lock_info.mode, rustfs_lock::fast_lock::types::LockMode::Exclusive)
|
||||
{
|
||||
reuse_existing_lock = true;
|
||||
debug!("Reusing existing exclusive lock for object {} held by {}", object, self.locker_owner);
|
||||
} else {
|
||||
warn!("Lock already exists for object {}: {:?}", object, lock_info);
|
||||
}
|
||||
} else {
|
||||
info!("No existing lock found for object {}", object);
|
||||
}
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
let lock_result = self
|
||||
.fast_lock_manager
|
||||
.acquire_write_lock(bucket, object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let elapsed = start_time.elapsed();
|
||||
error!("Failed to acquire write lock for heal operation after {:?}: {:?}", elapsed, e);
|
||||
DiskError::other(format!("Failed to acquire write lock for heal operation: {e:?}"))
|
||||
})?;
|
||||
let elapsed = start_time.elapsed();
|
||||
info!("Successfully acquired write lock for object: {} in {:?}", object, elapsed);
|
||||
Some(lock_result)
|
||||
if reuse_existing_lock {
|
||||
None
|
||||
} else {
|
||||
let start_time = std::time::Instant::now();
|
||||
let lock_result = self
|
||||
.fast_lock_manager
|
||||
.acquire_write_lock(bucket, object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let elapsed = start_time.elapsed();
|
||||
let message = self.format_lock_error(bucket, object, "write", &e);
|
||||
error!("Failed to acquire write lock for heal operation after {:?}: {}", elapsed, message);
|
||||
DiskError::other(message)
|
||||
})?;
|
||||
let elapsed = start_time.elapsed();
|
||||
info!("Successfully acquired write lock for object: {} in {:?}", object, elapsed);
|
||||
Some(lock_result)
|
||||
}
|
||||
} else {
|
||||
info!("Skipping lock acquisition (no_lock=true)");
|
||||
None
|
||||
@@ -3079,19 +3110,14 @@ impl SetDisks {
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_object_dir(
|
||||
/// Heal directory metadata assuming caller already holds the write lock for `(bucket, object)`.
|
||||
async fn heal_object_dir_locked(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
dry_run: bool,
|
||||
remove: bool,
|
||||
) -> Result<(HealResultItem, Option<DiskError>)> {
|
||||
let _write_lock_guard = self
|
||||
.fast_lock_manager
|
||||
.acquire_write_lock("", object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|e| DiskError::other(format!("Failed to acquire write lock for heal directory operation: {e:?}")))?;
|
||||
|
||||
let disks = {
|
||||
let disks = self.disks.read().await;
|
||||
disks.clone()
|
||||
@@ -3186,6 +3212,27 @@ impl SetDisks {
|
||||
Ok((result, None))
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// Heal directory metadata after acquiring the necessary write lock.
|
||||
async fn heal_object_dir(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
dry_run: bool,
|
||||
remove: bool,
|
||||
) -> Result<(HealResultItem, Option<DiskError>)> {
|
||||
let _write_lock_guard = self
|
||||
.fast_lock_manager
|
||||
.acquire_write_lock(bucket, object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let message = self.format_lock_error(bucket, object, "write", &e);
|
||||
DiskError::other(message)
|
||||
})?;
|
||||
|
||||
self.heal_object_dir_locked(bucket, object, dry_run, remove).await
|
||||
}
|
||||
|
||||
async fn default_heal_result(
|
||||
&self,
|
||||
lfi: FileInfo,
|
||||
@@ -3450,9 +3497,9 @@ impl ObjectIO for SetDisks {
|
||||
let _read_lock_guard = if !opts.no_lock {
|
||||
Some(
|
||||
self.fast_lock_manager
|
||||
.acquire_read_lock("", object, self.locker_owner.as_str())
|
||||
.acquire_read_lock(bucket, object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|_| Error::other("can not get lock. please retry".to_string()))?,
|
||||
.map_err(|e| Error::other(self.format_lock_error(bucket, object, "read", &e)))?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
@@ -3539,9 +3586,9 @@ impl ObjectIO for SetDisks {
|
||||
let _object_lock_guard = if !opts.no_lock {
|
||||
Some(
|
||||
self.fast_lock_manager
|
||||
.acquire_write_lock("", object, self.locker_owner.as_str())
|
||||
.acquire_write_lock(bucket, object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|_| Error::other("can not get lock. please retry".to_string()))?,
|
||||
.map_err(|e| Error::other(self.format_lock_error(bucket, object, "write", &e)))?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
@@ -3766,7 +3813,7 @@ impl ObjectIO for SetDisks {
|
||||
}
|
||||
}
|
||||
|
||||
fi.is_latest = true;
|
||||
fi.replication_state_internal = Some(opts.put_replication_state());
|
||||
|
||||
// TODO: version support
|
||||
Ok(ObjectInfo::from_file_info(&fi, bucket, object, opts.versioned || opts.version_suspended))
|
||||
@@ -3835,9 +3882,9 @@ impl StorageAPI for SetDisks {
|
||||
// Guard lock for source object metadata update
|
||||
let _lock_guard = self
|
||||
.fast_lock_manager
|
||||
.acquire_write_lock("", src_object, self.locker_owner.as_str())
|
||||
.acquire_write_lock(src_bucket, src_object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|_| Error::other("can not get lock. please retry".to_string()))?;
|
||||
.map_err(|e| Error::other(self.format_lock_error(src_bucket, src_object, "write", &e)))?;
|
||||
|
||||
let disks = self.get_disks_internal().await;
|
||||
|
||||
@@ -3932,12 +3979,12 @@ impl StorageAPI for SetDisks {
|
||||
}
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn delete_object_version(&self, bucket: &str, object: &str, fi: &FileInfo, force_del_marker: bool) -> Result<()> {
|
||||
// Guard lock for single object delete-version
|
||||
let _lock_guard = self
|
||||
.fast_lock_manager
|
||||
.acquire_write_lock("", object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|_| Error::other("can not get lock. please retry".to_string()))?;
|
||||
// // Guard lock for single object delete-version
|
||||
// let _lock_guard = self
|
||||
// .fast_lock_manager
|
||||
// .acquire_write_lock("", object, self.locker_owner.as_str())
|
||||
// .await
|
||||
// .map_err(|_| Error::other("can not get lock. please retry".to_string()))?;
|
||||
let disks = self.get_disks(0, 0).await?;
|
||||
let write_quorum = disks.len() / 2 + 1;
|
||||
|
||||
@@ -3984,7 +4031,7 @@ impl StorageAPI for SetDisks {
|
||||
bucket: &str,
|
||||
objects: Vec<ObjectToDelete>,
|
||||
opts: ObjectOptions,
|
||||
) -> Result<(Vec<DeletedObject>, Vec<Option<Error>>)> {
|
||||
) -> (Vec<DeletedObject>, Vec<Option<Error>>) {
|
||||
// 默认返回值
|
||||
let mut del_objects = vec![DeletedObject::default(); objects.len()];
|
||||
|
||||
@@ -3994,33 +4041,34 @@ impl StorageAPI for SetDisks {
|
||||
del_errs.push(None)
|
||||
}
|
||||
|
||||
// Use fast batch locking to acquire all locks atomically
|
||||
let mut _guards: HashMap<String, rustfs_lock::FastLockGuard> = HashMap::new();
|
||||
// Acquire locks in batch mode (best effort, matching previous behavior)
|
||||
let mut batch = rustfs_lock::BatchLockRequest::new(self.locker_owner.as_str()).with_all_or_nothing(false);
|
||||
let mut unique_objects: std::collections::HashSet<String> = std::collections::HashSet::new();
|
||||
|
||||
// Collect unique object names
|
||||
for dobj in &objects {
|
||||
unique_objects.insert(dobj.object_name.clone());
|
||||
if unique_objects.insert(dobj.object_name.clone()) {
|
||||
batch = batch.add_write_lock(bucket, dobj.object_name.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Acquire all locks in batch to prevent deadlocks
|
||||
for object_name in unique_objects {
|
||||
match self
|
||||
.fast_lock_manager
|
||||
.acquire_write_lock("", object_name.as_str(), self.locker_owner.as_str())
|
||||
.await
|
||||
{
|
||||
Ok(guard) => {
|
||||
_guards.insert(object_name, guard);
|
||||
}
|
||||
Err(_) => {
|
||||
// Mark all operations on this object as failed
|
||||
for (i, dobj) in objects.iter().enumerate() {
|
||||
if dobj.object_name == object_name {
|
||||
del_errs[i] = Some(Error::other("can not get lock. please retry"));
|
||||
}
|
||||
}
|
||||
}
|
||||
let batch_result = self.fast_lock_manager.acquire_locks_batch(batch).await;
|
||||
let locked_objects: HashSet<String> = batch_result
|
||||
.successful_locks
|
||||
.iter()
|
||||
.map(|key| key.object.as_ref().to_string())
|
||||
.collect();
|
||||
let _lock_guards = batch_result.guards;
|
||||
|
||||
let failed_map: HashMap<(String, String), rustfs_lock::fast_lock::LockResult> = batch_result
|
||||
.failed_locks
|
||||
.into_iter()
|
||||
.map(|(key, err)| ((key.bucket.as_ref().to_string(), key.object.as_ref().to_string()), err))
|
||||
.collect();
|
||||
|
||||
// Mark failures for objects that could not be locked
|
||||
for (i, dobj) in objects.iter().enumerate() {
|
||||
if let Some(err) = failed_map.get(&(bucket.to_string(), dobj.object_name.clone())) {
|
||||
let message = self.format_lock_error(bucket, dobj.object_name.as_str(), "write", err);
|
||||
del_errs[i] = Some(Error::other(message));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4035,6 +4083,7 @@ impl StorageAPI for SetDisks {
|
||||
name: dobj.object_name.clone(),
|
||||
version_id: dobj.version_id,
|
||||
idx: i,
|
||||
replication_state_internal: Some(dobj.replication_state()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -4072,21 +4121,23 @@ impl StorageAPI for SetDisks {
|
||||
if vr.deleted {
|
||||
del_objects[i] = DeletedObject {
|
||||
delete_marker: vr.deleted,
|
||||
delete_marker_version_id: vr.version_id.map(|v| v.to_string()),
|
||||
delete_marker_version_id: vr.version_id,
|
||||
delete_marker_mtime: vr.mod_time,
|
||||
object_name: vr.name.clone(),
|
||||
replication_state: vr.replication_state_internal.clone(),
|
||||
..Default::default()
|
||||
}
|
||||
} else {
|
||||
del_objects[i] = DeletedObject {
|
||||
object_name: vr.name.clone(),
|
||||
version_id: vr.version_id.map(|v| v.to_string()),
|
||||
version_id: vr.version_id,
|
||||
replication_state: vr.replication_state_internal.clone(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
// Only add to vers_map if we hold the lock
|
||||
if _guards.contains_key(&dobj.object_name) {
|
||||
if locked_objects.contains(&dobj.object_name) {
|
||||
vers_map.insert(&dobj.object_name, v);
|
||||
}
|
||||
}
|
||||
@@ -4118,32 +4169,80 @@ impl StorageAPI for SetDisks {
|
||||
if let Some(disk) = disk {
|
||||
disk.delete_versions(bucket, vers, DeleteOptions::default()).await
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
let mut errs = Vec::with_capacity(vers.len());
|
||||
for _ in 0..vers.len() {
|
||||
errs.push(Some(DiskError::DiskNotFound));
|
||||
}
|
||||
errs
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let results = join_all(futures).await;
|
||||
|
||||
for errs in results.into_iter().flatten() {
|
||||
// TODO: handle err reduceWriteQuorumErrs
|
||||
for err in errs.iter().flatten() {
|
||||
warn!("result err {:?}", err);
|
||||
let mut del_obj_errs: Vec<Vec<Option<DiskError>>> = vec![vec![None; objects.len()]; disks.len()];
|
||||
|
||||
// 每个磁盘, 删除所有对象
|
||||
for (disk_idx, errors) in results.into_iter().enumerate() {
|
||||
// 所有对象的删除结果
|
||||
for idx in 0..vers.len() {
|
||||
if errors[idx].is_some() {
|
||||
for fi in vers[idx].versions.iter() {
|
||||
del_obj_errs[disk_idx][fi.idx] = errors[idx].clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((del_objects, del_errs))
|
||||
for obj_idx in 0..objects.len() {
|
||||
let mut disk_err = vec![None; disks.len()];
|
||||
|
||||
for disk_idx in 0..disks.len() {
|
||||
if del_obj_errs[disk_idx][obj_idx].is_some() {
|
||||
disk_err[disk_idx] = del_obj_errs[disk_idx][obj_idx].clone();
|
||||
}
|
||||
}
|
||||
|
||||
let mut has_err = reduce_write_quorum_errs(&disk_err, OBJECT_OP_IGNORED_ERRS, disks.len() / 2 + 1);
|
||||
if let Some(err) = has_err.clone() {
|
||||
let er = err.into();
|
||||
if (is_err_object_not_found(&er) || is_err_version_not_found(&er)) && !del_objects[obj_idx].delete_marker {
|
||||
has_err = None;
|
||||
}
|
||||
} else {
|
||||
del_objects[obj_idx].found = true;
|
||||
}
|
||||
|
||||
if let Some(err) = has_err {
|
||||
if del_objects[obj_idx].version_id.is_some() {
|
||||
del_errs[obj_idx] = Some(to_object_err(
|
||||
err.into(),
|
||||
vec![
|
||||
bucket,
|
||||
&objects[obj_idx].object_name.clone(),
|
||||
&objects[obj_idx].version_id.unwrap_or_default().to_string(),
|
||||
],
|
||||
));
|
||||
} else {
|
||||
del_errs[obj_idx] = Some(to_object_err(err.into(), vec![bucket, &objects[obj_idx].object_name.clone()]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: add_partial
|
||||
|
||||
(del_objects, del_errs)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn delete_object(&self, bucket: &str, object: &str, opts: ObjectOptions) -> Result<ObjectInfo> {
|
||||
async fn delete_object(&self, bucket: &str, object: &str, mut opts: ObjectOptions) -> Result<ObjectInfo> {
|
||||
// Guard lock for single object delete
|
||||
let _lock_guard = if !opts.delete_prefix {
|
||||
Some(
|
||||
self.fast_lock_manager
|
||||
.acquire_write_lock("", object, self.locker_owner.as_str())
|
||||
.acquire_write_lock(bucket, object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|_| Error::other("can not get lock. please retry".to_string()))?,
|
||||
.map_err(|e| Error::other(self.format_lock_error(bucket, object, "write", &e)))?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
@@ -4156,17 +4255,55 @@ impl StorageAPI for SetDisks {
|
||||
return Ok(ObjectInfo::default());
|
||||
}
|
||||
|
||||
let (oi, write_quorum) = match self.get_object_info_and_quorum(bucket, object, &opts).await {
|
||||
Ok((oi, wq)) => (oi, wq),
|
||||
Err(e) => {
|
||||
return Err(to_object_err(e, vec![bucket, object]));
|
||||
}
|
||||
let (mut goi, write_quorum, gerr) = match self.get_object_info_and_quorum(bucket, object, &opts).await {
|
||||
Ok((oi, wq)) => (oi, wq, None),
|
||||
Err(e) => (ObjectInfo::default(), 0, Some(e)),
|
||||
};
|
||||
|
||||
let mark_delete = oi.version_id.is_some();
|
||||
let otd = ObjectToDelete {
|
||||
object_name: object.to_string(),
|
||||
version_id: opts
|
||||
.version_id
|
||||
.clone()
|
||||
.map(|v| Uuid::parse_str(v.as_str()).ok().unwrap_or_default()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let version_found = if opts.delete_marker { gerr.is_none() } else { true };
|
||||
|
||||
let dsc = check_replicate_delete(bucket, &otd, &goi, &opts, gerr.map(|e| e.to_string())).await;
|
||||
|
||||
if dsc.replicate_any() {
|
||||
opts.set_delete_replication_state(dsc);
|
||||
goi.replication_decision = opts
|
||||
.delete_replication
|
||||
.as_ref()
|
||||
.map(|v| v.replicate_decision_str.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
|
||||
let mut mark_delete = goi.version_id.is_some();
|
||||
|
||||
let mut delete_marker = opts.versioned;
|
||||
|
||||
if opts.version_id.is_some() {
|
||||
if version_found && opts.delete_marker_replication_status() == ReplicationStatusType::Replica {
|
||||
mark_delete = false;
|
||||
}
|
||||
|
||||
if opts.version_purge_status().is_empty() && opts.delete_marker_replication_status().is_empty() {
|
||||
mark_delete = false;
|
||||
}
|
||||
|
||||
if opts.version_purge_status() != VersionPurgeStatusType::Complete {
|
||||
mark_delete = false;
|
||||
}
|
||||
|
||||
if version_found && (goi.version_purge_status.is_empty() || !goi.delete_marker) {
|
||||
delete_marker = false;
|
||||
}
|
||||
}
|
||||
|
||||
let mod_time = if let Some(mt) = opts.mod_time {
|
||||
mt
|
||||
} else {
|
||||
@@ -4185,7 +4322,8 @@ impl StorageAPI for SetDisks {
|
||||
deleted: delete_marker,
|
||||
mark_deleted: mark_delete,
|
||||
mod_time: Some(mod_time),
|
||||
..Default::default() // TODO: replication
|
||||
replication_state_internal: opts.delete_replication.clone(),
|
||||
..Default::default() // TODO: Transition
|
||||
};
|
||||
|
||||
fi.set_tier_free_version_id(&find_vid.to_string());
|
||||
@@ -4212,88 +4350,27 @@ impl StorageAPI for SetDisks {
|
||||
let version_id = opts.version_id.as_ref().and_then(|v| Uuid::parse_str(v).ok());
|
||||
|
||||
// Create a single object deletion request
|
||||
let mut vr = FileInfo {
|
||||
let mut dfi = FileInfo {
|
||||
name: object.to_string(),
|
||||
version_id: opts.version_id.as_ref().and_then(|v| Uuid::parse_str(v).ok()),
|
||||
mark_deleted: mark_delete,
|
||||
deleted: delete_marker,
|
||||
mod_time: Some(mod_time),
|
||||
replication_state_internal: opts.delete_replication.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Handle versioning
|
||||
let (suspended, versioned) = (opts.version_suspended, opts.versioned);
|
||||
if opts.version_id.is_none() && (suspended || versioned) {
|
||||
vr.mod_time = Some(OffsetDateTime::now_utc());
|
||||
vr.deleted = true;
|
||||
if versioned {
|
||||
vr.version_id = Some(Uuid::new_v4());
|
||||
}
|
||||
dfi.set_tier_free_version_id(&find_vid.to_string());
|
||||
|
||||
if opts.skip_free_version {
|
||||
dfi.set_skip_tier_free_version();
|
||||
}
|
||||
|
||||
let vers = vec![FileInfoVersions {
|
||||
name: vr.name.clone(),
|
||||
versions: vec![vr.clone()],
|
||||
..Default::default()
|
||||
}];
|
||||
self.delete_object_version(bucket, object, &dfi, opts.delete_marker)
|
||||
.await
|
||||
.map_err(|e| to_object_err(e, vec![bucket, object]))?;
|
||||
|
||||
let disks = self.disks.read().await;
|
||||
let disks = disks.clone();
|
||||
let write_quorum = disks.len() / 2 + 1;
|
||||
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
let mut errs = Vec::with_capacity(disks.len());
|
||||
|
||||
for disk in disks.iter() {
|
||||
let vers = vers.clone();
|
||||
futures.push(async move {
|
||||
if let Some(disk) = disk {
|
||||
disk.delete_versions(bucket, vers, DeleteOptions::default()).await
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let results = join_all(futures).await;
|
||||
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(disk_errs) => {
|
||||
// Handle errors from disk operations
|
||||
for err in disk_errs.iter().flatten() {
|
||||
warn!("delete_object disk error: {:?}", err);
|
||||
}
|
||||
errs.push(None);
|
||||
}
|
||||
Err(e) => {
|
||||
errs.push(Some(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check write quorum
|
||||
if let Some(err) = reduce_write_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, write_quorum) {
|
||||
return Err(to_object_err(err.into(), vec![bucket, object]));
|
||||
}
|
||||
|
||||
// Create result ObjectInfo
|
||||
let result_info = if vr.deleted {
|
||||
ObjectInfo {
|
||||
bucket: bucket.to_string(),
|
||||
name: object.to_string(),
|
||||
delete_marker: true,
|
||||
mod_time: vr.mod_time,
|
||||
version_id: vr.version_id,
|
||||
..Default::default()
|
||||
}
|
||||
} else {
|
||||
ObjectInfo {
|
||||
bucket: bucket.to_string(),
|
||||
name: object.to_string(),
|
||||
version_id: vr.version_id,
|
||||
..Default::default()
|
||||
}
|
||||
};
|
||||
|
||||
Ok(result_info)
|
||||
Ok(ObjectInfo::from_file_info(&dfi, bucket, object, opts.versioned || opts.version_suspended))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
@@ -4323,15 +4400,26 @@ impl StorageAPI for SetDisks {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn walk(
|
||||
self: Arc<Self>,
|
||||
_rx: CancellationToken,
|
||||
_bucket: &str,
|
||||
_prefix: &str,
|
||||
_result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
|
||||
_opts: WalkOptions,
|
||||
) -> Result<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
|
||||
// Acquire a shared read-lock to protect consistency during info fetch
|
||||
let _read_lock_guard = if !opts.no_lock {
|
||||
Some(
|
||||
self.fast_lock_manager
|
||||
.acquire_read_lock("", object, self.locker_owner.as_str())
|
||||
.acquire_read_lock(bucket, object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|_| Error::other("can not get lock. please retry".to_string()))?,
|
||||
.map_err(|e| Error::other(self.format_lock_error(bucket, object, "read", &e)))?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
@@ -4371,9 +4459,9 @@ impl StorageAPI for SetDisks {
|
||||
let _lock_guard = if !opts.no_lock {
|
||||
Some(
|
||||
self.fast_lock_manager
|
||||
.acquire_write_lock("", object, self.locker_owner.as_str())
|
||||
.acquire_write_lock(bucket, object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|_| Error::other("can not get lock. please retry".to_string()))?,
|
||||
.map_err(|e| Error::other(self.format_lock_error(bucket, object, "write", &e)))?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
@@ -4470,7 +4558,6 @@ impl StorageAPI for SetDisks {
|
||||
};
|
||||
|
||||
// Acquire write-lock early; hold for the whole transition operation scope
|
||||
// let mut _lock_guard: Option<rustfs_lock::LockGuard> = None;
|
||||
// if !opts.no_lock {
|
||||
// let guard_opt = self
|
||||
// .namespace_lock
|
||||
@@ -4492,8 +4579,11 @@ impl StorageAPI for SetDisks {
|
||||
}
|
||||
return Err(to_object_err(ERR_METHOD_NOT_ALLOWED, vec![bucket, object]));
|
||||
}*/
|
||||
// Normalize ETags by removing quotes before comparison (PR #592 compatibility)
|
||||
let transition_etag = rustfs_utils::path::trim_etag(&opts.transition.etag);
|
||||
let stored_etag = rustfs_utils::path::trim_etag(&extract_etag(&fi.metadata));
|
||||
if !opts.mod_time.expect("err").unix_timestamp() == fi.mod_time.as_ref().expect("err").unix_timestamp()
|
||||
|| opts.transition.etag != extract_etag(&fi.metadata)
|
||||
|| transition_etag != stored_etag
|
||||
{
|
||||
return Err(to_object_err(Error::from(DiskError::FileNotFound), vec![bucket, object]));
|
||||
}
|
||||
@@ -4599,7 +4689,6 @@ impl StorageAPI for SetDisks {
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn restore_transitioned_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
// Acquire write-lock early for the restore operation
|
||||
// let mut _lock_guard: Option<rustfs_lock::LockGuard> = None;
|
||||
// if !opts.no_lock {
|
||||
// let guard_opt = self
|
||||
// .namespace_lock
|
||||
@@ -4684,7 +4773,6 @@ impl StorageAPI for SetDisks {
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn put_object_tags(&self, bucket: &str, object: &str, tags: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
|
||||
// Acquire write-lock for tag update (metadata write)
|
||||
// let mut _lock_guard: Option<rustfs_lock::LockGuard> = None;
|
||||
// if !opts.no_lock {
|
||||
// let guard_opt = self
|
||||
// .namespace_lock
|
||||
@@ -4949,7 +5037,7 @@ impl StorageAPI for SetDisks {
|
||||
// Extract storage class from metadata, default to STANDARD if not found
|
||||
let storage_class = fi
|
||||
.metadata
|
||||
.get(rustfs_filemeta::headers::AMZ_STORAGE_CLASS)
|
||||
.get(AMZ_STORAGE_CLASS)
|
||||
.cloned()
|
||||
.unwrap_or_else(|| storageclass::STANDARD.to_string());
|
||||
|
||||
@@ -5345,7 +5433,6 @@ impl StorageAPI for SetDisks {
|
||||
// let disks = Self::shuffle_disks(&disks, &fi.erasure.distribution);
|
||||
|
||||
// Acquire per-object exclusive lock via RAII guard. It auto-releases asynchronously on drop.
|
||||
// let mut _object_lock_guard: Option<rustfs_lock::LockGuard> = None;
|
||||
if let Some(http_preconditions) = opts.http_preconditions.clone() {
|
||||
// if !opts.no_lock {
|
||||
// let guard_opt = self
|
||||
@@ -5425,10 +5512,13 @@ impl StorageAPI for SetDisks {
|
||||
let ext_part = &curr_fi.parts[i];
|
||||
tracing::info!(target:"rustfs_ecstore::set_disk", part_number = p.part_num, part_size = ext_part.size, part_actual_size = ext_part.actual_size, "Completing multipart part");
|
||||
|
||||
if p.etag != Some(ext_part.etag.clone()) {
|
||||
// Normalize ETags by removing quotes before comparison (PR #592 compatibility)
|
||||
let client_etag = p.etag.as_ref().map(|e| rustfs_utils::path::trim_etag(e));
|
||||
let stored_etag = Some(rustfs_utils::path::trim_etag(&ext_part.etag));
|
||||
if client_etag != stored_etag {
|
||||
error!(
|
||||
"complete_multipart_upload etag err {:?}, part_id={}, bucket={}, object={}",
|
||||
p.etag, p.part_num, bucket, object
|
||||
"complete_multipart_upload etag err client={:?}, stored={:?}, part_id={}, bucket={}, object={}",
|
||||
p.etag, ext_part.etag, p.part_num, bucket, object
|
||||
);
|
||||
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
|
||||
}
|
||||
@@ -5640,18 +5730,36 @@ impl StorageAPI for SetDisks {
|
||||
opts: &HealOpts,
|
||||
) -> Result<(HealResultItem, Option<Error>)> {
|
||||
let _write_lock_guard = if !opts.no_lock {
|
||||
Some(
|
||||
self.fast_lock_manager
|
||||
.acquire_write_lock("", object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|e| Error::other(format!("Failed to acquire write lock for heal operation: {e:?}")))?,
|
||||
)
|
||||
let key = rustfs_lock::fast_lock::types::ObjectKey::new(bucket, object);
|
||||
let mut skip_lock = false;
|
||||
if let Some(lock_info) = self.fast_lock_manager.get_lock_info(&key) {
|
||||
if lock_info.owner.as_ref() == self.locker_owner.as_str()
|
||||
&& matches!(lock_info.mode, rustfs_lock::fast_lock::types::LockMode::Exclusive)
|
||||
{
|
||||
debug!(
|
||||
"Reusing existing exclusive lock for heal operation on {}/{} held by {}",
|
||||
bucket, object, self.locker_owner
|
||||
);
|
||||
skip_lock = true;
|
||||
}
|
||||
}
|
||||
|
||||
if skip_lock {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
self.fast_lock_manager
|
||||
.acquire_write_lock(bucket, object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|e| Error::other(self.format_lock_error(bucket, object, "write", &e)))?,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if has_suffix(object, SLASH_SEPARATOR) {
|
||||
let (result, err) = self.heal_object_dir(bucket, object, opts.dry_run, opts.remove).await?;
|
||||
let (result, err) = self.heal_object_dir_locked(bucket, object, opts.dry_run, opts.remove).await?;
|
||||
return Ok((result, err.map(|e| e.into())));
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use crate::disk::error_reduce::count_errs;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::store_api::ListPartsInfo;
|
||||
use crate::store_api::{ListPartsInfo, ObjectInfoOrErr, WalkOptions};
|
||||
use crate::{
|
||||
disk::{
|
||||
DiskAPI, DiskInfo, DiskOption, DiskStore,
|
||||
@@ -48,6 +48,7 @@ use rustfs_filemeta::FileInfo;
|
||||
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use rustfs_utils::{crc_hash, path::path_join_buf, sip_hash};
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use uuid::Uuid;
|
||||
|
||||
use tokio::sync::broadcast::{Receiver, Sender};
|
||||
@@ -55,8 +56,6 @@ use tokio::time::Duration;
|
||||
use tracing::warn;
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::lock_utils::create_unique_clients;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Sets {
|
||||
pub id: Uuid,
|
||||
@@ -163,8 +162,6 @@ impl Sets {
|
||||
}
|
||||
}
|
||||
|
||||
let _lock_clients = create_unique_clients(&set_endpoints).await?;
|
||||
|
||||
// Note: write_quorum was used for the old lock system, no longer needed with FastLock
|
||||
let _write_quorum = set_drive_count - parity_count;
|
||||
// Create fast lock manager for high performance
|
||||
@@ -459,6 +456,17 @@ impl StorageAPI for Sets {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn walk(
|
||||
self: Arc<Self>,
|
||||
_rx: CancellationToken,
|
||||
_bucket: &str,
|
||||
_prefix: &str,
|
||||
_result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
|
||||
_opts: WalkOptions,
|
||||
) -> Result<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
|
||||
self.get_disks_by_key(object).get_object_info(bucket, object, opts).await
|
||||
@@ -543,7 +551,7 @@ impl StorageAPI for Sets {
|
||||
bucket: &str,
|
||||
objects: Vec<ObjectToDelete>,
|
||||
opts: ObjectOptions,
|
||||
) -> Result<(Vec<DeletedObject>, Vec<Option<Error>>)> {
|
||||
) -> (Vec<DeletedObject>, Vec<Option<Error>>) {
|
||||
// Default return value
|
||||
let mut del_objects = vec![DeletedObject::default(); objects.len()];
|
||||
|
||||
@@ -576,38 +584,11 @@ impl StorageAPI for Sets {
|
||||
}
|
||||
}
|
||||
|
||||
// let semaphore = Arc::new(Semaphore::new(num_cpus::get()));
|
||||
// let mut jhs = Vec::with_capacity(semaphore.available_permits());
|
||||
|
||||
// for (k, v) in set_obj_map {
|
||||
// let disks = self.get_disks(k);
|
||||
// let semaphore = semaphore.clone();
|
||||
// let opts = opts.clone();
|
||||
// let bucket = bucket.to_string();
|
||||
|
||||
// let jh = tokio::spawn(async move {
|
||||
// let _permit = semaphore.acquire().await.unwrap();
|
||||
// let objs: Vec<ObjectToDelete> = v.iter().map(|v| v.obj.clone()).collect();
|
||||
// disks.delete_objects(&bucket, objs, opts).await
|
||||
// });
|
||||
// jhs.push(jh);
|
||||
// }
|
||||
|
||||
// let mut results = Vec::with_capacity(jhs.len());
|
||||
// for jh in jhs {
|
||||
// results.push(jh.await?.unwrap());
|
||||
// }
|
||||
|
||||
// for (dobjects, errs) in results {
|
||||
// del_objects.extend(dobjects);
|
||||
// del_errs.extend(errs);
|
||||
// }
|
||||
|
||||
// TODO: Implement concurrency
|
||||
// TODO: concurrency
|
||||
for (k, v) in set_obj_map {
|
||||
let disks = self.get_disks(k);
|
||||
let objs: Vec<ObjectToDelete> = v.iter().map(|v| v.obj.clone()).collect();
|
||||
let (dobjects, errs) = disks.delete_objects(bucket, objs, opts.clone()).await?;
|
||||
let (dobjects, errs) = disks.delete_objects(bucket, objs, opts.clone()).await;
|
||||
|
||||
for (i, err) in errs.into_iter().enumerate() {
|
||||
let obj = v.get(i).unwrap();
|
||||
@@ -618,7 +599,7 @@ impl StorageAPI for Sets {
|
||||
}
|
||||
}
|
||||
|
||||
Ok((del_objects, del_errs))
|
||||
(del_objects, del_errs)
|
||||
}
|
||||
|
||||
async fn list_object_parts(
|
||||
|
||||
@@ -34,7 +34,9 @@ use crate::global::{
|
||||
use crate::notification_sys::get_global_notification_sys;
|
||||
use crate::pools::PoolMeta;
|
||||
use crate::rebalance::RebalanceMeta;
|
||||
use crate::store_api::{ListMultipartsInfo, ListObjectVersionsInfo, ListPartsInfo, MultipartInfo, ObjectIO};
|
||||
use crate::store_api::{
|
||||
ListMultipartsInfo, ListObjectVersionsInfo, ListPartsInfo, MultipartInfo, ObjectIO, ObjectInfoOrErr, WalkOptions,
|
||||
};
|
||||
use crate::store_init::{check_disk_fatal_errs, ec_drives_no_config};
|
||||
use crate::{
|
||||
bucket::{lifecycle::bucket_lifecycle_ops::TransitionState, metadata::BucketMetadata},
|
||||
@@ -68,8 +70,9 @@ use std::time::SystemTime;
|
||||
use std::{collections::HashMap, sync::Arc, time::Duration};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::select;
|
||||
use tokio::sync::{RwLock, broadcast};
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::time::sleep;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, info};
|
||||
use tracing::{error, warn};
|
||||
use uuid::Uuid;
|
||||
@@ -109,7 +112,7 @@ pub struct ECStore {
|
||||
impl ECStore {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
#[tracing::instrument(level = "debug", skip(endpoint_pools))]
|
||||
pub async fn new(address: SocketAddr, endpoint_pools: EndpointServerPools) -> Result<Arc<Self>> {
|
||||
pub async fn new(address: SocketAddr, endpoint_pools: EndpointServerPools, ctx: CancellationToken) -> Result<Arc<Self>> {
|
||||
// let layouts = DisksLayout::from_volumes(endpoints.as_slice())?;
|
||||
|
||||
let mut deployment_id = None;
|
||||
@@ -251,7 +254,7 @@ impl ECStore {
|
||||
let wait_sec = 5;
|
||||
let mut exit_count = 0;
|
||||
loop {
|
||||
if let Err(err) = ec.init().await {
|
||||
if let Err(err) = ec.init(ctx.clone()).await {
|
||||
error!("init err: {}", err);
|
||||
error!("retry after {} second", wait_sec);
|
||||
sleep(Duration::from_secs(wait_sec)).await;
|
||||
@@ -273,7 +276,7 @@ impl ECStore {
|
||||
Ok(ec)
|
||||
}
|
||||
|
||||
pub async fn init(self: &Arc<Self>) -> Result<()> {
|
||||
pub async fn init(self: &Arc<Self>, rx: CancellationToken) -> Result<()> {
|
||||
GLOBAL_BOOT_TIME.get_or_init(|| async { SystemTime::now() }).await;
|
||||
|
||||
if self.load_rebalance_meta().await.is_ok() {
|
||||
@@ -317,18 +320,16 @@ impl ECStore {
|
||||
if !pool_indices.is_empty() {
|
||||
let idx = pool_indices[0];
|
||||
if endpoints.as_ref()[idx].endpoints.as_ref()[0].is_local {
|
||||
let (_tx, rx) = broadcast::channel(1);
|
||||
|
||||
let store = self.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
// wait 3 minutes for cluster init
|
||||
tokio::time::sleep(Duration::from_secs(60 * 3)).await;
|
||||
|
||||
if let Err(err) = store.decommission(rx.resubscribe(), pool_indices.clone()).await {
|
||||
if let Err(err) = store.decommission(rx.clone(), pool_indices.clone()).await {
|
||||
if err == StorageError::DecommissionAlreadyRunning {
|
||||
for i in pool_indices.iter() {
|
||||
store.do_decommission_in_routine(rx.resubscribe(), *i).await;
|
||||
store.do_decommission_in_routine(rx.clone(), *i).await;
|
||||
}
|
||||
return;
|
||||
}
|
||||
@@ -700,9 +701,13 @@ impl ECStore {
|
||||
opts: &ObjectOptions,
|
||||
) -> Result<(PoolObjInfo, Vec<PoolErr>)> {
|
||||
let mut futures = Vec::new();
|
||||
|
||||
for pool in self.pools.iter() {
|
||||
futures.push(pool.get_object_info(bucket, object, opts));
|
||||
let mut pool_opts = opts.clone();
|
||||
if !pool_opts.metadata_chg {
|
||||
pool_opts.version_id = None;
|
||||
}
|
||||
|
||||
futures.push(async move { pool.get_object_info(bucket, object, &pool_opts).await });
|
||||
}
|
||||
|
||||
let results = join_all(futures).await;
|
||||
@@ -1351,6 +1356,17 @@ impl StorageAPI for ECStore {
|
||||
.await
|
||||
}
|
||||
|
||||
async fn walk(
|
||||
self: Arc<Self>,
|
||||
rx: CancellationToken,
|
||||
bucket: &str,
|
||||
prefix: &str,
|
||||
result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
|
||||
opts: WalkOptions,
|
||||
) -> Result<()> {
|
||||
self.walk_internal(rx, bucket, prefix, result, opts).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
|
||||
check_object_args(bucket, object)?;
|
||||
@@ -1450,9 +1466,12 @@ impl StorageAPI for ECStore {
|
||||
let object = encode_dir_object(object);
|
||||
let object = object.as_str();
|
||||
|
||||
let mut gopts = opts.clone();
|
||||
gopts.no_lock = true;
|
||||
|
||||
// 查询在哪个 pool
|
||||
let (mut pinfo, errs) = self
|
||||
.get_pool_info_existing_with_opts(bucket, object, &opts)
|
||||
.get_pool_info_existing_with_opts(bucket, object, &gopts)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if is_err_read_quorum(&e) {
|
||||
@@ -1513,7 +1532,7 @@ impl StorageAPI for ECStore {
|
||||
bucket: &str,
|
||||
objects: Vec<ObjectToDelete>,
|
||||
opts: ObjectOptions,
|
||||
) -> Result<(Vec<DeletedObject>, Vec<Option<Error>>)> {
|
||||
) -> (Vec<DeletedObject>, Vec<Option<Error>>) {
|
||||
// encode object name
|
||||
let objects: Vec<ObjectToDelete> = objects
|
||||
.iter()
|
||||
@@ -1534,131 +1553,160 @@ impl StorageAPI for ECStore {
|
||||
|
||||
// TODO: nslock
|
||||
|
||||
let mut futures = Vec::with_capacity(objects.len());
|
||||
let mut futures = Vec::with_capacity(self.pools.len());
|
||||
|
||||
for obj in objects.iter() {
|
||||
futures.push(async move {
|
||||
self.internal_get_pool_info_existing_with_opts(
|
||||
bucket,
|
||||
&obj.object_name,
|
||||
&ObjectOptions {
|
||||
no_lock: true,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
});
|
||||
for pool in self.pools.iter() {
|
||||
futures.push(pool.delete_objects(bucket, objects.clone(), opts.clone()));
|
||||
}
|
||||
|
||||
let results = join_all(futures).await;
|
||||
|
||||
// let mut jhs = Vec::new();
|
||||
// let semaphore = Arc::new(Semaphore::new(num_cpus::get()));
|
||||
// let pools = Arc::new(self.pools.clone());
|
||||
for idx in 0..del_objects.len() {
|
||||
for (dels, errs) in results.iter() {
|
||||
if errs[idx].is_none() && dels[idx].found {
|
||||
del_errs[idx] = None;
|
||||
del_objects[idx] = dels[idx].clone();
|
||||
break;
|
||||
}
|
||||
|
||||
if del_errs[idx].is_none() {
|
||||
del_errs[idx] = errs[idx].clone();
|
||||
del_objects[idx] = dels[idx].clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
del_objects.iter_mut().for_each(|v| {
|
||||
v.object_name = decode_dir_object(&v.object_name);
|
||||
});
|
||||
|
||||
(del_objects, del_errs)
|
||||
|
||||
// let mut futures = Vec::with_capacity(objects.len());
|
||||
|
||||
// for obj in objects.iter() {
|
||||
// let (semaphore, pools, bucket, object_name, opt) = (
|
||||
// semaphore.clone(),
|
||||
// pools.clone(),
|
||||
// bucket.to_string(),
|
||||
// obj.object_name.to_string(),
|
||||
// ObjectOptions::default(),
|
||||
// );
|
||||
|
||||
// let jh = tokio::spawn(async move {
|
||||
// let _permit = semaphore.acquire().await.unwrap();
|
||||
// self.internal_get_pool_info_existing_with_opts(pools.as_ref(), &bucket, &object_name, &opt)
|
||||
// .await
|
||||
// futures.push(async move {
|
||||
// self.internal_get_pool_info_existing_with_opts(
|
||||
// bucket,
|
||||
// &obj.object_name,
|
||||
// &ObjectOptions {
|
||||
// no_lock: true,
|
||||
// ..Default::default()
|
||||
// },
|
||||
// )
|
||||
// .await
|
||||
// });
|
||||
// jhs.push(jh);
|
||||
// }
|
||||
// let mut results = Vec::new();
|
||||
// for jh in jhs {
|
||||
// results.push(jh.await.unwrap());
|
||||
// }
|
||||
|
||||
// 记录 pool Index 对应的 objects pool_idx -> objects idx
|
||||
let mut pool_obj_idx_map = HashMap::new();
|
||||
let mut orig_index_map = HashMap::new();
|
||||
// let results = join_all(futures).await;
|
||||
|
||||
for (i, res) in results.into_iter().enumerate() {
|
||||
match res {
|
||||
Ok((pinfo, _)) => {
|
||||
if let Some(obj) = objects.get(i) {
|
||||
if pinfo.object_info.delete_marker && obj.version_id.is_none() {
|
||||
del_objects[i] = DeletedObject {
|
||||
delete_marker: pinfo.object_info.delete_marker,
|
||||
delete_marker_version_id: pinfo.object_info.version_id.map(|v| v.to_string()),
|
||||
object_name: decode_dir_object(&pinfo.object_info.name),
|
||||
delete_marker_mtime: pinfo.object_info.mod_time,
|
||||
..Default::default()
|
||||
};
|
||||
continue;
|
||||
}
|
||||
// // let mut jhs = Vec::new();
|
||||
// // let semaphore = Arc::new(Semaphore::new(num_cpus::get()));
|
||||
// // let pools = Arc::new(self.pools.clone());
|
||||
|
||||
if !pool_obj_idx_map.contains_key(&pinfo.index) {
|
||||
pool_obj_idx_map.insert(pinfo.index, vec![obj.clone()]);
|
||||
} else if let Some(val) = pool_obj_idx_map.get_mut(&pinfo.index) {
|
||||
val.push(obj.clone());
|
||||
}
|
||||
// // for obj in objects.iter() {
|
||||
// // let (semaphore, pools, bucket, object_name, opt) = (
|
||||
// // semaphore.clone(),
|
||||
// // pools.clone(),
|
||||
// // bucket.to_string(),
|
||||
// // obj.object_name.to_string(),
|
||||
// // ObjectOptions::default(),
|
||||
// // );
|
||||
|
||||
if !orig_index_map.contains_key(&pinfo.index) {
|
||||
orig_index_map.insert(pinfo.index, vec![i]);
|
||||
} else if let Some(val) = orig_index_map.get_mut(&pinfo.index) {
|
||||
val.push(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
if !is_err_object_not_found(&e) && is_err_version_not_found(&e) {
|
||||
del_errs[i] = Some(e)
|
||||
}
|
||||
// // let jh = tokio::spawn(async move {
|
||||
// // let _permit = semaphore.acquire().await.unwrap();
|
||||
// // self.internal_get_pool_info_existing_with_opts(pools.as_ref(), &bucket, &object_name, &opt)
|
||||
// // .await
|
||||
// // });
|
||||
// // jhs.push(jh);
|
||||
// // }
|
||||
// // let mut results = Vec::new();
|
||||
// // for jh in jhs {
|
||||
// // results.push(jh.await.unwrap());
|
||||
// // }
|
||||
|
||||
if let Some(obj) = objects.get(i) {
|
||||
del_objects[i] = DeletedObject {
|
||||
object_name: decode_dir_object(&obj.object_name),
|
||||
version_id: obj.version_id.map(|v| v.to_string()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// // 记录 pool Index 对应的 objects pool_idx -> objects idx
|
||||
// let mut pool_obj_idx_map = HashMap::new();
|
||||
// let mut orig_index_map = HashMap::new();
|
||||
|
||||
if !pool_obj_idx_map.is_empty() {
|
||||
for (i, sets) in self.pools.iter().enumerate() {
|
||||
// 取 pool idx 对应的 objects index
|
||||
if let Some(objs) = pool_obj_idx_map.get(&i) {
|
||||
// 取对应 obj,理论上不会 none
|
||||
// let objs: Vec<ObjectToDelete> = obj_idxs.iter().filter_map(|&idx| objects.get(idx).cloned()).collect();
|
||||
// for (i, res) in results.into_iter().enumerate() {
|
||||
// match res {
|
||||
// Ok((pinfo, _)) => {
|
||||
// if let Some(obj) = objects.get(i) {
|
||||
// if pinfo.object_info.delete_marker && obj.version_id.is_none() {
|
||||
// del_objects[i] = DeletedObject {
|
||||
// delete_marker: pinfo.object_info.delete_marker,
|
||||
// delete_marker_version_id: pinfo.object_info.version_id.map(|v| v.to_string()),
|
||||
// object_name: decode_dir_object(&pinfo.object_info.name),
|
||||
// delete_marker_mtime: pinfo.object_info.mod_time,
|
||||
// ..Default::default()
|
||||
// };
|
||||
// continue;
|
||||
// }
|
||||
|
||||
if objs.is_empty() {
|
||||
continue;
|
||||
}
|
||||
// if !pool_obj_idx_map.contains_key(&pinfo.index) {
|
||||
// pool_obj_idx_map.insert(pinfo.index, vec![obj.clone()]);
|
||||
// } else if let Some(val) = pool_obj_idx_map.get_mut(&pinfo.index) {
|
||||
// val.push(obj.clone());
|
||||
// }
|
||||
|
||||
let (pdel_objs, perrs) = sets.delete_objects(bucket, objs.clone(), opts.clone()).await?;
|
||||
// if !orig_index_map.contains_key(&pinfo.index) {
|
||||
// orig_index_map.insert(pinfo.index, vec![i]);
|
||||
// } else if let Some(val) = orig_index_map.get_mut(&pinfo.index) {
|
||||
// val.push(i);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// Err(e) => {
|
||||
// if !is_err_object_not_found(&e) && is_err_version_not_found(&e) {
|
||||
// del_errs[i] = Some(e)
|
||||
// }
|
||||
|
||||
// 同时存入不可能为 none
|
||||
let org_indexes = orig_index_map.get(&i).unwrap();
|
||||
// if let Some(obj) = objects.get(i) {
|
||||
// del_objects[i] = DeletedObject {
|
||||
// object_name: decode_dir_object(&obj.object_name),
|
||||
// version_id: obj.version_id.map(|v| v.to_string()),
|
||||
// ..Default::default()
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// perrs 的顺序理论上跟 obj_idxs 顺序一致
|
||||
for (i, err) in perrs.into_iter().enumerate() {
|
||||
let obj_idx = org_indexes[i];
|
||||
// if !pool_obj_idx_map.is_empty() {
|
||||
// for (i, sets) in self.pools.iter().enumerate() {
|
||||
// // 取 pool idx 对应的 objects index
|
||||
// if let Some(objs) = pool_obj_idx_map.get(&i) {
|
||||
// // 取对应 obj,理论上不会 none
|
||||
// // let objs: Vec<ObjectToDelete> = obj_idxs.iter().filter_map(|&idx| objects.get(idx).cloned()).collect();
|
||||
|
||||
if err.is_some() {
|
||||
del_errs[obj_idx] = err;
|
||||
}
|
||||
// if objs.is_empty() {
|
||||
// continue;
|
||||
// }
|
||||
|
||||
let mut dobj = pdel_objs.get(i).unwrap().clone();
|
||||
dobj.object_name = decode_dir_object(&dobj.object_name);
|
||||
// let (pdel_objs, perrs) = sets.delete_objects(bucket, objs.clone(), opts.clone()).await?;
|
||||
|
||||
del_objects[obj_idx] = dobj;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// // 同时存入不可能为 none
|
||||
// let org_indexes = orig_index_map.get(&i).unwrap();
|
||||
|
||||
Ok((del_objects, del_errs))
|
||||
// // perrs 的顺序理论上跟 obj_idxs 顺序一致
|
||||
// for (i, err) in perrs.into_iter().enumerate() {
|
||||
// let obj_idx = org_indexes[i];
|
||||
|
||||
// if err.is_some() {
|
||||
// del_errs[obj_idx] = err;
|
||||
// }
|
||||
|
||||
// let mut dobj = pdel_objs.get(i).unwrap().clone();
|
||||
// dobj.object_name = decode_dir_object(&dobj.object_name);
|
||||
|
||||
// del_objects[obj_idx] = dobj;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// Ok((del_objects, del_errs))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
|
||||
@@ -13,8 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::metadata_sys::get_versioning_config;
|
||||
use crate::bucket::replication::REPLICATION_RESET;
|
||||
use crate::bucket::replication::REPLICATION_STATUS;
|
||||
use crate::bucket::replication::{ReplicateDecision, replication_statuses_map, version_purge_statuses_map};
|
||||
use crate::bucket::versioning::VersioningApi as _;
|
||||
use crate::cmd::bucket_replication::{ReplicationStatusType, VersionPurgeStatusType};
|
||||
use crate::disk::DiskStore;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::store_utils::clean_metadata;
|
||||
@@ -25,20 +27,25 @@ use crate::{
|
||||
};
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
use rustfs_common::heal_channel::HealOpts;
|
||||
use rustfs_filemeta::headers::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use rustfs_filemeta::{FileInfo, MetaCacheEntriesSorted, ObjectPartInfo, headers::AMZ_OBJECT_TAGGING};
|
||||
use rustfs_filemeta::{
|
||||
FileInfo, MetaCacheEntriesSorted, ObjectPartInfo, ReplicationState, ReplicationStatusType, VersionPurgeStatusType,
|
||||
};
|
||||
use rustfs_madmin::heal_commands::HealResultItem;
|
||||
use rustfs_rio::{DecompressReader, HashReader, LimitReader, WarpReader};
|
||||
use rustfs_utils::CompressionAlgorithm;
|
||||
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX_LOWER};
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
use std::io::Cursor;
|
||||
use std::pin::Pin;
|
||||
use std::str::FromStr as _;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt};
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -221,6 +228,12 @@ impl GetObjectReader {
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncRead for GetObjectReader {
|
||||
fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<std::io::Result<()>> {
|
||||
Pin::new(&mut self.stream).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HTTPRangeSpec {
|
||||
pub is_suffix_length: bool,
|
||||
@@ -326,6 +339,7 @@ pub struct ObjectOptions {
|
||||
|
||||
pub skip_decommissioned: bool,
|
||||
pub skip_rebalancing: bool,
|
||||
pub skip_free_version: bool,
|
||||
|
||||
pub data_movement: bool,
|
||||
pub src_pool_idx: usize,
|
||||
@@ -334,11 +348,10 @@ pub struct ObjectOptions {
|
||||
pub metadata_chg: bool,
|
||||
pub http_preconditions: Option<HTTPPreconditions>,
|
||||
|
||||
pub delete_replication: Option<ReplicationState>,
|
||||
pub replication_request: bool,
|
||||
pub delete_marker: bool,
|
||||
|
||||
pub skip_free_version: bool,
|
||||
|
||||
pub transition: TransitionOptions,
|
||||
pub expiration: ExpirationOptions,
|
||||
pub lifecycle_audit_event: LcAuditEvent,
|
||||
@@ -346,15 +359,66 @@ pub struct ObjectOptions {
|
||||
pub eval_metadata: Option<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
// impl Default for ObjectOptions {
|
||||
// fn default() -> Self {
|
||||
// Self {
|
||||
// max_parity: Default::default(),
|
||||
// mod_time: OffsetDateTime::UNIX_EPOCH,
|
||||
// part_number: Default::default(),
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
impl ObjectOptions {
|
||||
pub fn set_delete_replication_state(&mut self, dsc: ReplicateDecision) {
|
||||
let mut rs = ReplicationState {
|
||||
replicate_decision_str: dsc.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
if self.version_id.is_none() {
|
||||
rs.replication_status_internal = dsc.pending_status();
|
||||
rs.targets = replication_statuses_map(rs.replication_status_internal.as_deref().unwrap_or_default());
|
||||
} else {
|
||||
rs.version_purge_status_internal = dsc.pending_status();
|
||||
rs.purge_targets = version_purge_statuses_map(rs.version_purge_status_internal.as_deref().unwrap_or_default());
|
||||
}
|
||||
|
||||
self.delete_replication = Some(rs)
|
||||
}
|
||||
|
||||
pub fn set_replica_status(&mut self, status: ReplicationStatusType) {
|
||||
if let Some(rs) = self.delete_replication.as_mut() {
|
||||
rs.replica_status = status;
|
||||
rs.replica_timestamp = Some(OffsetDateTime::now_utc());
|
||||
} else {
|
||||
self.delete_replication = Some(ReplicationState {
|
||||
replica_status: status,
|
||||
replica_timestamp: Some(OffsetDateTime::now_utc()),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn version_purge_status(&self) -> VersionPurgeStatusType {
|
||||
self.delete_replication
|
||||
.as_ref()
|
||||
.map(|v| v.composite_version_purge_status())
|
||||
.unwrap_or(VersionPurgeStatusType::Empty)
|
||||
}
|
||||
|
||||
pub fn delete_marker_replication_status(&self) -> ReplicationStatusType {
|
||||
self.delete_replication
|
||||
.as_ref()
|
||||
.map(|v| v.composite_replication_status())
|
||||
.unwrap_or(ReplicationStatusType::Empty)
|
||||
}
|
||||
|
||||
pub fn put_replication_state(&self) -> ReplicationState {
|
||||
let rs = match self
|
||||
.user_defined
|
||||
.get(format!("{RESERVED_METADATA_PREFIX_LOWER}{REPLICATION_STATUS}").as_str())
|
||||
{
|
||||
Some(v) => v.to_string(),
|
||||
None => return ReplicationState::default(),
|
||||
};
|
||||
|
||||
ReplicationState {
|
||||
replication_status_internal: Some(rs.to_string()),
|
||||
targets: replication_statuses_map(rs.as_str()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize)]
|
||||
pub struct BucketOptions {
|
||||
@@ -423,6 +487,7 @@ pub struct ObjectInfo {
|
||||
pub is_latest: bool,
|
||||
pub content_type: Option<String>,
|
||||
pub content_encoding: Option<String>,
|
||||
pub expires: Option<OffsetDateTime>,
|
||||
pub num_versions: usize,
|
||||
pub successor_mod_time: Option<OffsetDateTime>,
|
||||
pub put_object_reader: Option<PutObjReader>,
|
||||
@@ -430,10 +495,11 @@ pub struct ObjectInfo {
|
||||
pub inlined: bool,
|
||||
pub metadata_only: bool,
|
||||
pub version_only: bool,
|
||||
pub replication_status_internal: String,
|
||||
pub replication_status_internal: Option<String>,
|
||||
pub replication_status: ReplicationStatusType,
|
||||
pub version_purge_status_internal: String,
|
||||
pub version_purge_status_internal: Option<String>,
|
||||
pub version_purge_status: VersionPurgeStatusType,
|
||||
pub replication_decision: String,
|
||||
pub checksum: Vec<u8>,
|
||||
}
|
||||
|
||||
@@ -470,7 +536,9 @@ impl Clone for ObjectInfo {
|
||||
replication_status: self.replication_status.clone(),
|
||||
version_purge_status_internal: self.version_purge_status_internal.clone(),
|
||||
version_purge_status: self.version_purge_status.clone(),
|
||||
replication_decision: self.replication_decision.clone(),
|
||||
checksum: Default::default(),
|
||||
expires: self.expires,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -665,7 +733,10 @@ impl ObjectInfo {
|
||||
};
|
||||
|
||||
for fi in versions.iter() {
|
||||
// TODO:VersionPurgeStatus
|
||||
if !fi.version_purge_status().is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let versioned = vcfg.clone().map(|v| v.0.versioned(&entry.name)).unwrap_or_default();
|
||||
objects.push(ObjectInfo::from_file_info(fi, bucket, &entry.name, versioned));
|
||||
}
|
||||
@@ -770,6 +841,32 @@ impl ObjectInfo {
|
||||
|
||||
objects
|
||||
}
|
||||
|
||||
pub fn replication_state(&self) -> ReplicationState {
|
||||
ReplicationState {
|
||||
replication_status_internal: self.replication_status_internal.clone(),
|
||||
version_purge_status_internal: self.version_purge_status_internal.clone(),
|
||||
replicate_decision_str: self.replication_decision.clone(),
|
||||
targets: replication_statuses_map(self.replication_status_internal.clone().unwrap_or_default().as_str()),
|
||||
purge_targets: version_purge_statuses_map(self.version_purge_status_internal.clone().unwrap_or_default().as_str()),
|
||||
reset_statuses_map: self
|
||||
.user_defined
|
||||
.iter()
|
||||
.filter_map(|(k, v)| {
|
||||
if k.starts_with(&format!("{RESERVED_METADATA_PREFIX_LOWER}{REPLICATION_RESET}")) {
|
||||
Some((
|
||||
k.trim_start_matches(&format!("{RESERVED_METADATA_PREFIX_LOWER}{REPLICATION_RESET}-"))
|
||||
.to_string(),
|
||||
v.clone(),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
@@ -927,17 +1024,52 @@ pub struct ListPartsInfo {
|
||||
pub struct ObjectToDelete {
|
||||
pub object_name: String,
|
||||
pub version_id: Option<Uuid>,
|
||||
pub delete_marker_replication_status: Option<String>,
|
||||
pub version_purge_status: Option<VersionPurgeStatusType>,
|
||||
pub version_purge_statuses: Option<String>,
|
||||
pub replicate_decision_str: Option<String>,
|
||||
}
|
||||
|
||||
impl ObjectToDelete {
|
||||
pub fn replication_state(&self) -> ReplicationState {
|
||||
ReplicationState {
|
||||
replication_status_internal: self.delete_marker_replication_status.clone(),
|
||||
version_purge_status_internal: self.version_purge_statuses.clone(),
|
||||
replicate_decision_str: self.replicate_decision_str.clone().unwrap_or_default(),
|
||||
targets: replication_statuses_map(self.delete_marker_replication_status.as_deref().unwrap_or_default()),
|
||||
purge_targets: version_purge_statuses_map(self.version_purge_statuses.as_deref().unwrap_or_default()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct DeletedObject {
|
||||
pub delete_marker: bool,
|
||||
pub delete_marker_version_id: Option<String>,
|
||||
pub delete_marker_version_id: Option<Uuid>,
|
||||
pub object_name: String,
|
||||
pub version_id: Option<String>,
|
||||
pub version_id: Option<Uuid>,
|
||||
// MTime of DeleteMarker on source that needs to be propagated to replica
|
||||
pub delete_marker_mtime: Option<OffsetDateTime>,
|
||||
// to support delete marker replication
|
||||
// pub replication_state: ReplicationState,
|
||||
pub replication_state: Option<ReplicationState>,
|
||||
pub found: bool,
|
||||
}
|
||||
|
||||
impl DeletedObject {
|
||||
pub fn version_purge_status(&self) -> VersionPurgeStatusType {
|
||||
self.replication_state
|
||||
.as_ref()
|
||||
.map(|v| v.composite_version_purge_status())
|
||||
.unwrap_or(VersionPurgeStatusType::Empty)
|
||||
}
|
||||
|
||||
pub fn delete_marker_replication_status(&self) -> ReplicationStatusType {
|
||||
self.replication_state
|
||||
.as_ref()
|
||||
.map(|v| v.composite_replication_status())
|
||||
.unwrap_or(ReplicationStatusType::Empty)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
@@ -949,8 +1081,33 @@ pub struct ListObjectVersionsInfo {
|
||||
pub prefixes: Vec<String>,
|
||||
}
|
||||
|
||||
type WalkFilter = fn(&FileInfo) -> bool;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct WalkOptions {
|
||||
pub filter: Option<WalkFilter>, // return WalkFilter returns 'true/false'
|
||||
pub marker: Option<String>, // set to skip until this object
|
||||
pub latest_only: bool, // returns only latest versions for all matching objects
|
||||
pub ask_disks: String, // dictates how many disks are being listed
|
||||
pub versions_sort: WalkVersionsSortOrder, // sort order for versions of the same object; default: Ascending order in ModTime
|
||||
pub limit: usize, // maximum number of items, 0 means no limit
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, PartialEq, Eq)]
|
||||
pub enum WalkVersionsSortOrder {
|
||||
#[default]
|
||||
Ascending,
|
||||
Descending,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ObjectInfoOrErr {
|
||||
pub item: Option<ObjectInfo>,
|
||||
pub err: Option<Error>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait ObjectIO: Send + Sync + 'static {
|
||||
pub trait ObjectIO: Send + Sync + Debug + 'static {
|
||||
// GetObjectNInfo FIXME:
|
||||
async fn get_object_reader(
|
||||
&self,
|
||||
@@ -966,7 +1123,7 @@ pub trait ObjectIO: Send + Sync + 'static {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub trait StorageAPI: ObjectIO {
|
||||
pub trait StorageAPI: ObjectIO + Debug {
|
||||
// NewNSLock TODO:
|
||||
// Shutdown TODO:
|
||||
// NSScanner TODO:
|
||||
@@ -1000,7 +1157,15 @@ pub trait StorageAPI: ObjectIO {
|
||||
delimiter: Option<String>,
|
||||
max_keys: i32,
|
||||
) -> Result<ListObjectVersionsInfo>;
|
||||
// Walk TODO:
|
||||
|
||||
async fn walk(
|
||||
self: Arc<Self>,
|
||||
rx: CancellationToken,
|
||||
bucket: &str,
|
||||
prefix: &str,
|
||||
result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
|
||||
opts: WalkOptions,
|
||||
) -> Result<()>;
|
||||
|
||||
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
|
||||
async fn verify_object_integrity(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
|
||||
@@ -1021,7 +1186,7 @@ pub trait StorageAPI: ObjectIO {
|
||||
bucket: &str,
|
||||
objects: Vec<ObjectToDelete>,
|
||||
opts: ObjectOptions,
|
||||
) -> Result<(Vec<DeletedObject>, Vec<Option<Error>>)>;
|
||||
) -> (Vec<DeletedObject>, Vec<Option<Error>>);
|
||||
|
||||
// TransitionObject TODO:
|
||||
// RestoreTransitionedObject TODO:
|
||||
|
||||
@@ -23,20 +23,23 @@ use crate::error::{
|
||||
};
|
||||
use crate::set_disk::SetDisks;
|
||||
use crate::store::check_list_objs_args;
|
||||
use crate::store_api::{ListObjectVersionsInfo, ListObjectsInfo, ObjectInfo, ObjectOptions};
|
||||
use crate::store_api::{
|
||||
ListObjectVersionsInfo, ListObjectsInfo, ObjectInfo, ObjectInfoOrErr, ObjectOptions, WalkOptions, WalkVersionsSortOrder,
|
||||
};
|
||||
use crate::store_utils::is_reserved_or_invalid_bucket;
|
||||
use crate::{store::ECStore, store_api::ListObjectsV2Info};
|
||||
use futures::future::join_all;
|
||||
use rand::seq::SliceRandom;
|
||||
use rustfs_filemeta::{
|
||||
FileInfo, MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams,
|
||||
MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams,
|
||||
merge_file_meta_versions,
|
||||
};
|
||||
use rustfs_utils::path::{self, SLASH_SEPARATOR, base_dir_from_prefix};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
|
||||
use tokio::sync::broadcast::{self};
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -529,14 +532,15 @@ impl ECStore {
|
||||
}
|
||||
|
||||
// cancel channel
|
||||
let (cancel_tx, cancel_rx) = broadcast::channel(1);
|
||||
let cancel = CancellationToken::new();
|
||||
|
||||
let (err_tx, mut err_rx) = broadcast::channel::<Arc<Error>>(1);
|
||||
|
||||
let (sender, recv) = mpsc::channel(o.limit as usize);
|
||||
|
||||
let store = self.clone();
|
||||
let opts = o.clone();
|
||||
let cancel_rx1 = cancel_rx.resubscribe();
|
||||
let cancel_rx1 = cancel.clone();
|
||||
let err_tx1 = err_tx.clone();
|
||||
let job1 = tokio::spawn(async move {
|
||||
let mut opts = opts;
|
||||
@@ -547,7 +551,7 @@ impl ECStore {
|
||||
}
|
||||
});
|
||||
|
||||
let cancel_rx2 = cancel_rx.resubscribe();
|
||||
let cancel_rx2 = cancel.clone();
|
||||
|
||||
let (result_tx, mut result_rx) = mpsc::channel(1);
|
||||
let err_tx2 = err_tx.clone();
|
||||
@@ -559,7 +563,7 @@ impl ECStore {
|
||||
}
|
||||
|
||||
// cancel call exit spawns
|
||||
let _ = cancel_tx.send(true);
|
||||
cancel.cancel();
|
||||
});
|
||||
|
||||
let mut result = {
|
||||
@@ -615,7 +619,7 @@ impl ECStore {
|
||||
// Read all
|
||||
async fn list_merged(
|
||||
&self,
|
||||
rx: B_Receiver<bool>,
|
||||
rx: CancellationToken,
|
||||
opts: ListPathOptions,
|
||||
sender: Sender<MetaCacheEntry>,
|
||||
) -> Result<Vec<ObjectInfo>> {
|
||||
@@ -631,9 +635,8 @@ impl ECStore {
|
||||
|
||||
inputs.push(recv);
|
||||
let opts = opts.clone();
|
||||
|
||||
let rx = rx.resubscribe();
|
||||
futures.push(set.list_path(rx, opts, send));
|
||||
let rx_clone = rx.clone();
|
||||
futures.push(set.list_path(rx_clone, opts, send));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -695,9 +698,9 @@ impl ECStore {
|
||||
}
|
||||
|
||||
#[allow(unused_assignments)]
|
||||
pub async fn walk(
|
||||
pub async fn walk_internal(
|
||||
self: Arc<Self>,
|
||||
rx: B_Receiver<bool>,
|
||||
rx: CancellationToken,
|
||||
bucket: &str,
|
||||
prefix: &str,
|
||||
result: Sender<ObjectInfoOrErr>,
|
||||
@@ -711,11 +714,11 @@ impl ECStore {
|
||||
for eset in self.pools.iter() {
|
||||
for set in eset.disk_set.iter() {
|
||||
let (mut disks, infos, _) = set.get_online_disks_with_healing_and_info(true).await;
|
||||
let rx = rx.resubscribe();
|
||||
let opts = opts.clone();
|
||||
|
||||
let (sender, list_out_rx) = mpsc::channel::<MetaCacheEntry>(1);
|
||||
inputs.push(list_out_rx);
|
||||
let rx_clone = rx.clone();
|
||||
futures.push(async move {
|
||||
let mut ask_disks = get_list_quorum(&opts.ask_disks, set.set_drive_count as i32);
|
||||
if ask_disks == -1 {
|
||||
@@ -770,7 +773,7 @@ impl ECStore {
|
||||
let tx2 = sender.clone();
|
||||
|
||||
list_path_raw(
|
||||
rx.resubscribe(),
|
||||
rx_clone,
|
||||
ListPathRawOptions {
|
||||
disks: disks.iter().cloned().map(Some).collect(),
|
||||
fallback_disks: fallback_disks.iter().cloned().map(Some).collect(),
|
||||
@@ -936,33 +939,8 @@ impl ECStore {
|
||||
}
|
||||
}
|
||||
|
||||
type WalkFilter = fn(&FileInfo) -> bool;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct WalkOptions {
|
||||
pub filter: Option<WalkFilter>, // return WalkFilter returns 'true/false'
|
||||
pub marker: Option<String>, // set to skip until this object
|
||||
pub latest_only: bool, // returns only latest versions for all matching objects
|
||||
pub ask_disks: String, // dictates how many disks are being listed
|
||||
pub versions_sort: WalkVersionsSortOrder, // sort order for versions of the same object; default: Ascending order in ModTime
|
||||
pub limit: usize, // maximum number of items, 0 means no limit
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, PartialEq, Eq)]
|
||||
pub enum WalkVersionsSortOrder {
|
||||
#[default]
|
||||
Ascending,
|
||||
Descending,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ObjectInfoOrErr {
|
||||
pub item: Option<ObjectInfo>,
|
||||
pub err: Option<Error>,
|
||||
}
|
||||
|
||||
async fn gather_results(
|
||||
_rx: B_Receiver<bool>,
|
||||
_rx: CancellationToken,
|
||||
opts: ListPathOptions,
|
||||
recv: Receiver<MetaCacheEntry>,
|
||||
results_tx: Sender<MetaCacheEntriesSortedResult>,
|
||||
@@ -1067,12 +1045,11 @@ async fn select_from(
|
||||
|
||||
// TODO: exit when cancel
|
||||
async fn merge_entry_channels(
|
||||
rx: B_Receiver<bool>,
|
||||
rx: CancellationToken,
|
||||
in_channels: Vec<Receiver<MetaCacheEntry>>,
|
||||
out_channel: Sender<MetaCacheEntry>,
|
||||
read_quorum: usize,
|
||||
) -> Result<()> {
|
||||
let mut rx = rx;
|
||||
let mut in_channels = in_channels;
|
||||
if in_channels.len() == 1 {
|
||||
loop {
|
||||
@@ -1085,7 +1062,7 @@ async fn merge_entry_channels(
|
||||
return Ok(())
|
||||
}
|
||||
},
|
||||
_ = rx.recv()=>{
|
||||
_ = rx.cancelled()=>{
|
||||
info!("merge_entry_channels rx.recv() cancel");
|
||||
return Ok(())
|
||||
},
|
||||
@@ -1228,7 +1205,7 @@ async fn merge_entry_channels(
|
||||
}
|
||||
|
||||
impl SetDisks {
|
||||
pub async fn list_path(&self, rx: B_Receiver<bool>, opts: ListPathOptions, sender: Sender<MetaCacheEntry>) -> Result<()> {
|
||||
pub async fn list_path(&self, rx: CancellationToken, opts: ListPathOptions, sender: Sender<MetaCacheEntry>) -> Result<()> {
|
||||
let (mut disks, infos, _) = self.get_online_disks_with_healing_and_info(true).await;
|
||||
|
||||
let mut ask_disks = get_list_quorum(&opts.ask_disks, self.set_drive_count as i32);
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
use crate::config::storageclass::STANDARD;
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use regex::Regex;
|
||||
use rustfs_filemeta::headers::AMZ_OBJECT_TAGGING;
|
||||
use rustfs_filemeta::headers::AMZ_STORAGE_CLASS;
|
||||
use rustfs_utils::http::headers::AMZ_OBJECT_TAGGING;
|
||||
use rustfs_utils::http::headers::AMZ_STORAGE_CLASS;
|
||||
use std::collections::HashMap;
|
||||
use std::io::{Error, Result};
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ uuid = { workspace = true, features = ["v4", "fast-rng", "serde"] }
|
||||
tokio = { workspace = true, features = ["io-util", "macros", "sync"] }
|
||||
xxhash-rust = { workspace = true, features = ["xxh64"] }
|
||||
bytes.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["hash"] }
|
||||
rustfs-utils = { workspace = true, features = ["hash","http"] }
|
||||
byteorder = { workspace = true }
|
||||
tracing.workspace = true
|
||||
thiserror.workspace = true
|
||||
|
||||
@@ -13,11 +13,11 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::headers::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use crate::headers::RUSTFS_HEALING;
|
||||
use crate::{ReplicationState, ReplicationStatusType, VersionPurgeStatusType};
|
||||
use bytes::Bytes;
|
||||
use rmp_serde::Serializer;
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use rustfs_utils::http::headers::{RESERVED_METADATA_PREFIX_LOWER, RUSTFS_HEALING};
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
@@ -202,7 +202,7 @@ pub struct FileInfo {
|
||||
// MarkDeleted marks this version as deleted
|
||||
pub mark_deleted: bool,
|
||||
// ReplicationState - Internal replication state to be passed back in ObjectInfo
|
||||
// pub replication_state: Option<ReplicationState>, // TODO: implement ReplicationState
|
||||
pub replication_state_internal: Option<ReplicationState>,
|
||||
pub data: Option<Bytes>,
|
||||
pub num_versions: usize,
|
||||
pub successor_mod_time: Option<OffsetDateTime>,
|
||||
@@ -471,6 +471,29 @@ impl FileInfo {
|
||||
// TODO: Add replication_state comparison when implemented
|
||||
// && self.replication_state == other.replication_state
|
||||
}
|
||||
|
||||
pub fn version_purge_status(&self) -> VersionPurgeStatusType {
|
||||
self.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.composite_version_purge_status())
|
||||
.unwrap_or(VersionPurgeStatusType::Empty)
|
||||
}
|
||||
pub fn replication_status(&self) -> ReplicationStatusType {
|
||||
self.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.composite_replication_status())
|
||||
.unwrap_or(ReplicationStatusType::Empty)
|
||||
}
|
||||
pub fn delete_marker_replication_status(&self) -> ReplicationStatusType {
|
||||
if self.deleted {
|
||||
self.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.composite_replication_status())
|
||||
.unwrap_or(ReplicationStatusType::Empty)
|
||||
} else {
|
||||
ReplicationStatusType::Empty
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
|
||||
@@ -15,12 +15,13 @@
|
||||
use crate::error::{Error, Result};
|
||||
use crate::fileinfo::{ErasureAlgo, ErasureInfo, FileInfo, FileInfoVersions, ObjectPartInfo, RawFileInfo};
|
||||
use crate::filemeta_inline::InlineData;
|
||||
use crate::headers::{
|
||||
use crate::{ReplicationStatusType, VersionPurgeStatusType};
|
||||
use byteorder::ByteOrder;
|
||||
use bytes::Bytes;
|
||||
use rustfs_utils::http::headers::{
|
||||
self, AMZ_META_UNENCRYPTED_CONTENT_LENGTH, AMZ_META_UNENCRYPTED_CONTENT_MD5, AMZ_STORAGE_CLASS, RESERVED_METADATA_PREFIX,
|
||||
RESERVED_METADATA_PREFIX_LOWER, VERSION_PURGE_STATUS_KEY,
|
||||
};
|
||||
use byteorder::ByteOrder;
|
||||
use bytes::Bytes;
|
||||
use s3s::header::X_AMZ_RESTORE;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cmp::Ordering;
|
||||
@@ -30,6 +31,7 @@ use std::io::{Read, Write};
|
||||
use std::{collections::HashMap, io::Cursor};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::io::AsyncRead;
|
||||
use tracing::error;
|
||||
use uuid::Uuid;
|
||||
use xxhash_rust::xxh64;
|
||||
|
||||
@@ -159,39 +161,57 @@ impl FileMeta {
|
||||
let i = buf.len() as u64;
|
||||
|
||||
// check version, buf = buf[8..]
|
||||
let (buf, _, _) = Self::check_xl2_v1(buf)?;
|
||||
let (buf, _, _) = Self::check_xl2_v1(buf).map_err(|e| {
|
||||
error!("failed to check XL2 v1 format: {}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
let (mut size_buf, buf) = buf.split_at(5);
|
||||
|
||||
// Get meta data, buf = crc + data
|
||||
let bin_len = rmp::decode::read_bin_len(&mut size_buf)?;
|
||||
let bin_len = rmp::decode::read_bin_len(&mut size_buf).map_err(|e| {
|
||||
error!("failed to read binary length for metadata: {}", e);
|
||||
Error::other(format!("failed to read binary length for metadata: {e}"))
|
||||
})?;
|
||||
|
||||
if buf.len() < bin_len as usize {
|
||||
error!("insufficient data for metadata: expected {} bytes, got {} bytes", bin_len, buf.len());
|
||||
return Err(Error::other("insufficient data for metadata"));
|
||||
}
|
||||
let (meta, buf) = buf.split_at(bin_len as usize);
|
||||
|
||||
if buf.len() < 5 {
|
||||
error!("insufficient data for CRC: expected 5 bytes, got {} bytes", buf.len());
|
||||
return Err(Error::other("insufficient data for CRC"));
|
||||
}
|
||||
let (mut crc_buf, buf) = buf.split_at(5);
|
||||
|
||||
// crc check
|
||||
let crc = rmp::decode::read_u32(&mut crc_buf)?;
|
||||
let crc = rmp::decode::read_u32(&mut crc_buf).map_err(|e| {
|
||||
error!("failed to read CRC value: {}", e);
|
||||
Error::other(format!("failed to read CRC value: {e}"))
|
||||
})?;
|
||||
let meta_crc = xxh64::xxh64(meta, XXHASH_SEED) as u32;
|
||||
|
||||
if crc != meta_crc {
|
||||
error!("xl file crc check failed: expected CRC {:#x}, got {:#x}", meta_crc, crc);
|
||||
return Err(Error::other("xl file crc check failed"));
|
||||
}
|
||||
|
||||
if !buf.is_empty() {
|
||||
self.data.update(buf);
|
||||
self.data.validate()?;
|
||||
self.data.validate().map_err(|e| {
|
||||
error!("data validation failed: {}", e);
|
||||
e
|
||||
})?;
|
||||
}
|
||||
|
||||
// Parse meta
|
||||
if !meta.is_empty() {
|
||||
let (versions_len, _, meta_ver, meta) = Self::decode_xl_headers(meta)?;
|
||||
let (versions_len, _, meta_ver, meta) = Self::decode_xl_headers(meta).map_err(|e| {
|
||||
error!("failed to decode XL headers: {}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// let (_, meta) = meta.split_at(read_size as usize);
|
||||
|
||||
@@ -201,24 +221,30 @@ impl FileMeta {
|
||||
|
||||
let mut cur: Cursor<&[u8]> = Cursor::new(meta);
|
||||
for _ in 0..versions_len {
|
||||
let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize;
|
||||
let start = cur.position() as usize;
|
||||
let end = start + bin_len;
|
||||
let header_buf = &meta[start..end];
|
||||
let bin_len = rmp::decode::read_bin_len(&mut cur).map_err(|e| {
|
||||
error!("failed to read binary length for version header: {}", e);
|
||||
Error::other(format!("failed to read binary length for version header: {e}"))
|
||||
})? as usize;
|
||||
|
||||
let mut header_buf = vec![0u8; bin_len];
|
||||
|
||||
cur.read_exact(&mut header_buf)?;
|
||||
|
||||
let mut ver = FileMetaShallowVersion::default();
|
||||
ver.header.unmarshal_msg(header_buf)?;
|
||||
ver.header.unmarshal_msg(&header_buf).map_err(|e| {
|
||||
error!("failed to unmarshal version header: {}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
cur.set_position(end as u64);
|
||||
let bin_len = rmp::decode::read_bin_len(&mut cur).map_err(|e| {
|
||||
error!("failed to read binary length for version metadata: {}", e);
|
||||
Error::other(format!("failed to read binary length for version metadata: {e}"))
|
||||
})? as usize;
|
||||
|
||||
let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize;
|
||||
let start = cur.position() as usize;
|
||||
let end = start + bin_len;
|
||||
let ver_meta_buf = &meta[start..end];
|
||||
let mut ver_meta_buf = vec![0u8; bin_len];
|
||||
cur.read_exact(&mut ver_meta_buf)?;
|
||||
|
||||
ver.meta.extend_from_slice(ver_meta_buf);
|
||||
|
||||
cur.set_position(end as u64);
|
||||
ver.meta.extend_from_slice(&ver_meta_buf);
|
||||
|
||||
self.versions.push(ver);
|
||||
}
|
||||
@@ -487,39 +513,39 @@ impl FileMeta {
|
||||
|
||||
let version = FileMetaVersion::from(fi);
|
||||
|
||||
self.add_version_filemata(version)
|
||||
}
|
||||
|
||||
pub fn add_version_filemata(&mut self, version: FileMetaVersion) -> Result<()> {
|
||||
if !version.valid() {
|
||||
return Err(Error::other("file meta version invalid"));
|
||||
}
|
||||
|
||||
// should replace
|
||||
for (idx, ver) in self.versions.iter().enumerate() {
|
||||
if ver.header.version_id != vid {
|
||||
continue;
|
||||
}
|
||||
|
||||
return self.set_idx(idx, version);
|
||||
// 1000 is the limit of versions TODO: make it configurable
|
||||
if self.versions.len() + 1 > 1000 {
|
||||
return Err(Error::other(
|
||||
"You've exceeded the limit on the number of versions you can create on this object",
|
||||
));
|
||||
}
|
||||
|
||||
// TODO: version count limit !
|
||||
if self.versions.is_empty() {
|
||||
self.versions.push(FileMetaShallowVersion::try_from(version)?);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let vid = version.get_version_id();
|
||||
|
||||
if let Some(fidx) = self.versions.iter().position(|v| v.header.version_id == vid) {
|
||||
return self.set_idx(fidx, version);
|
||||
}
|
||||
|
||||
let mod_time = version.get_mod_time();
|
||||
|
||||
// puth a -1 mod time value , so we can relplace this
|
||||
self.versions.push(FileMetaShallowVersion {
|
||||
header: FileMetaVersionHeader {
|
||||
mod_time: Some(OffsetDateTime::from_unix_timestamp(-1)?),
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
for (idx, exist) in self.versions.iter().enumerate() {
|
||||
if let Some(ref ex_mt) = exist.header.mod_time {
|
||||
if let Some(ref in_md) = mod_time {
|
||||
if ex_mt <= in_md {
|
||||
// insert
|
||||
self.versions.insert(idx, FileMetaShallowVersion::try_from(version)?);
|
||||
self.versions.pop();
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
@@ -527,35 +553,33 @@ impl FileMeta {
|
||||
}
|
||||
|
||||
Err(Error::other("add_version failed"))
|
||||
}
|
||||
|
||||
pub fn add_version_filemata(&mut self, ver: FileMetaVersion) -> Result<()> {
|
||||
if !ver.valid() {
|
||||
return Err(Error::other("attempted to add invalid version"));
|
||||
}
|
||||
// if !ver.valid() {
|
||||
// return Err(Error::other("attempted to add invalid version"));
|
||||
// }
|
||||
|
||||
if self.versions.len() + 1 >= 100 {
|
||||
return Err(Error::other(
|
||||
"You've exceeded the limit on the number of versions you can create on this object",
|
||||
));
|
||||
}
|
||||
// if self.versions.len() + 1 >= 100 {
|
||||
// return Err(Error::other(
|
||||
// "You've exceeded the limit on the number of versions you can create on this object",
|
||||
// ));
|
||||
// }
|
||||
|
||||
let mod_time = ver.get_mod_time();
|
||||
let encoded = ver.marshal_msg()?;
|
||||
let new_version = FileMetaShallowVersion {
|
||||
header: ver.header(),
|
||||
meta: encoded,
|
||||
};
|
||||
// let mod_time = ver.get_mod_time();
|
||||
// let encoded = ver.marshal_msg()?;
|
||||
// let new_version = FileMetaShallowVersion {
|
||||
// header: ver.header(),
|
||||
// meta: encoded,
|
||||
// };
|
||||
|
||||
// Find the insertion position: insert before the first element with mod_time >= new mod_time
|
||||
// This maintains descending order by mod_time (newest first)
|
||||
let insert_pos = self
|
||||
.versions
|
||||
.iter()
|
||||
.position(|existing| existing.header.mod_time <= mod_time)
|
||||
.unwrap_or(self.versions.len());
|
||||
self.versions.insert(insert_pos, new_version);
|
||||
Ok(())
|
||||
// // Find the insertion position: insert before the first element with mod_time >= new mod_time
|
||||
// // This maintains descending order by mod_time (newest first)
|
||||
// let insert_pos = self
|
||||
// .versions
|
||||
// .iter()
|
||||
// .position(|existing| existing.header.mod_time <= mod_time)
|
||||
// .unwrap_or(self.versions.len());
|
||||
// self.versions.insert(insert_pos, new_version);
|
||||
// Ok(())
|
||||
}
|
||||
|
||||
// delete_version deletes version, returns data_dir
|
||||
@@ -575,10 +599,97 @@ impl FileMeta {
|
||||
}
|
||||
|
||||
let mut update_version = fi.mark_deleted;
|
||||
/*if fi.version_purge_status().is_empty()
|
||||
if fi.version_purge_status().is_empty()
|
||||
&& (fi.delete_marker_replication_status() == ReplicationStatusType::Replica
|
||||
|| fi.delete_marker_replication_status() == ReplicationStatusType::Empty)
|
||||
{
|
||||
update_version = fi.mark_deleted;
|
||||
}*/
|
||||
} else {
|
||||
if fi.deleted
|
||||
&& fi.version_purge_status() != VersionPurgeStatusType::Complete
|
||||
&& (!fi.version_purge_status().is_empty() || fi.delete_marker_replication_status().is_empty())
|
||||
{
|
||||
update_version = true;
|
||||
}
|
||||
|
||||
if !fi.version_purge_status().is_empty() && fi.version_purge_status() != VersionPurgeStatusType::Complete {
|
||||
update_version = true;
|
||||
}
|
||||
}
|
||||
|
||||
if fi.deleted {
|
||||
if !fi.delete_marker_replication_status().is_empty() {
|
||||
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
|
||||
if fi.delete_marker_replication_status() == ReplicationStatusType::Replica {
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-status"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replica_status.clone())
|
||||
.unwrap_or_default()
|
||||
.as_str()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-timestamp"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replica_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
} else {
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replication_status_internal.clone().unwrap_or_default())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replication_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !fi.version_purge_status().is_empty() {
|
||||
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
|
||||
delete_marker.meta_sys.insert(
|
||||
VERSION_PURGE_STATUS_KEY.to_string(),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.version_purge_status_internal.clone().unwrap_or_default())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
|
||||
for (k, v) in fi
|
||||
.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.reset_statuses_map.clone())
|
||||
.unwrap_or_default()
|
||||
{
|
||||
delete_marker.meta_sys.insert(k.clone(), v.clone().as_bytes().to_vec());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ???
|
||||
if fi.transition_status == TRANSITION_COMPLETE {
|
||||
update_version = false;
|
||||
}
|
||||
@@ -591,22 +702,111 @@ impl FileMeta {
|
||||
match ver.header.version_type {
|
||||
VersionType::Invalid | VersionType::Legacy => return Err(Error::other("invalid file meta version")),
|
||||
VersionType::Delete => {
|
||||
self.versions.remove(i);
|
||||
if fi.deleted && fi.version_id.is_none() {
|
||||
self.add_version_filemata(ventry)?;
|
||||
if update_version {
|
||||
let mut v = self.get_idx(i)?;
|
||||
if v.delete_marker.is_none() {
|
||||
v.delete_marker = Some(MetaDeleteMarker {
|
||||
version_id: fi.version_id,
|
||||
mod_time: fi.mod_time,
|
||||
meta_sys: HashMap::new(),
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(delete_marker) = v.delete_marker.as_mut() {
|
||||
if !fi.delete_marker_replication_status().is_empty() {
|
||||
if fi.delete_marker_replication_status() == ReplicationStatusType::Replica {
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-status"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replica_status.clone())
|
||||
.unwrap_or_default()
|
||||
.as_str()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-timestamp"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replica_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
} else {
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replication_status_internal.clone().unwrap_or_default())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replication_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
for (k, v) in fi
|
||||
.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.reset_statuses_map.clone())
|
||||
.unwrap_or_default()
|
||||
{
|
||||
delete_marker.meta_sys.insert(k.clone(), v.clone().as_bytes().to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
self.set_idx(i, v)?;
|
||||
return Ok(None);
|
||||
}
|
||||
self.versions.remove(i);
|
||||
|
||||
if (fi.mark_deleted && fi.version_purge_status() != VersionPurgeStatusType::Complete)
|
||||
|| (fi.deleted && fi.version_id.is_none())
|
||||
{
|
||||
self.add_version_filemata(ventry)?;
|
||||
}
|
||||
|
||||
return Ok(None);
|
||||
}
|
||||
VersionType::Object => {
|
||||
if update_version && !fi.deleted {
|
||||
let v = self.get_idx(i)?;
|
||||
let mut v = self.get_idx(i)?;
|
||||
|
||||
self.versions.remove(i);
|
||||
if let Some(obj) = v.object.as_mut() {
|
||||
obj.meta_sys.insert(
|
||||
VERSION_PURGE_STATUS_KEY.to_string(),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.version_purge_status_internal.clone().unwrap_or_default())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
for (k, v) in fi
|
||||
.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.reset_statuses_map.clone())
|
||||
.unwrap_or_default()
|
||||
{
|
||||
obj.meta_sys.insert(k.clone(), v.clone().as_bytes().to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
let a = v.object.map(|v| v.data_dir).unwrap_or_default();
|
||||
return Ok(a);
|
||||
let old_dir = v.object.as_ref().map(|v| v.data_dir).unwrap_or_default();
|
||||
self.set_idx(i, v)?;
|
||||
|
||||
return Ok(old_dir);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -641,31 +841,37 @@ impl FileMeta {
|
||||
let obj_version_id = obj.version_id;
|
||||
let obj_data_dir = obj.data_dir;
|
||||
|
||||
if fi.expire_restored {
|
||||
let mut err = if fi.expire_restored {
|
||||
obj.remove_restore_hdrs();
|
||||
self.set_idx(i, ver)?;
|
||||
self.set_idx(i, ver).err()
|
||||
} else if fi.transition_status == TRANSITION_COMPLETE {
|
||||
obj.set_transition(fi);
|
||||
obj.reset_inline_data();
|
||||
self.set_idx(i, ver)?;
|
||||
self.set_idx(i, ver).err()
|
||||
} else {
|
||||
self.versions.remove(i);
|
||||
|
||||
let (free_version, to_free) = obj.init_free_version(fi);
|
||||
|
||||
if to_free {
|
||||
self.add_version_filemata(free_version)?;
|
||||
self.add_version_filemata(free_version).err()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if fi.deleted {
|
||||
self.add_version_filemata(ventry)?;
|
||||
err = self.add_version_filemata(ventry).err();
|
||||
}
|
||||
|
||||
if self.shared_data_dir_count(obj_version_id, obj_data_dir) > 0 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if let Some(e) = err {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
Ok(obj_data_dir)
|
||||
}
|
||||
|
||||
@@ -1642,17 +1848,15 @@ impl MetaObject {
|
||||
free_entry.delete_marker = Some(MetaDeleteMarker {
|
||||
version_id: Some(vid),
|
||||
mod_time: self.mod_time,
|
||||
meta_sys: Some(HashMap::<String, Vec<u8>>::new()),
|
||||
meta_sys: HashMap::<String, Vec<u8>>::new(),
|
||||
});
|
||||
|
||||
free_entry
|
||||
.delete_marker
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
let delete_marker = free_entry.delete_marker.as_mut().unwrap();
|
||||
|
||||
delete_marker
|
||||
.meta_sys
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{FREE_VERSION}"), vec![]);
|
||||
|
||||
let tier_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITION_TIER}");
|
||||
let tier_obj_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_OBJECTNAME}");
|
||||
let tier_obj_vid_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_VERSION_ID}");
|
||||
@@ -1660,14 +1864,7 @@ impl MetaObject {
|
||||
let aa = [tier_key, tier_obj_key, tier_obj_vid_key];
|
||||
for (k, v) in &self.meta_sys {
|
||||
if aa.contains(k) {
|
||||
free_entry
|
||||
.delete_marker
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.meta_sys
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.insert(k.clone(), v.clone());
|
||||
delete_marker.meta_sys.insert(k.clone(), v.clone());
|
||||
}
|
||||
}
|
||||
return (free_entry, true);
|
||||
@@ -1737,19 +1934,16 @@ pub struct MetaDeleteMarker {
|
||||
#[serde(rename = "MTime")]
|
||||
pub mod_time: Option<OffsetDateTime>, // Object delete marker modified time
|
||||
#[serde(rename = "MetaSys")]
|
||||
pub meta_sys: Option<HashMap<String, Vec<u8>>>, // Delete marker internal metadata
|
||||
pub meta_sys: HashMap<String, Vec<u8>>, // Delete marker internal metadata
|
||||
}
|
||||
|
||||
impl MetaDeleteMarker {
|
||||
pub fn free_version(&self) -> bool {
|
||||
self.meta_sys
|
||||
.as_ref()
|
||||
.map(|v| v.get(FREE_VERSION_META_HEADER).is_some())
|
||||
.unwrap_or_default()
|
||||
self.meta_sys.contains_key(FREE_VERSION_META_HEADER)
|
||||
}
|
||||
|
||||
pub fn into_fileinfo(&self, volume: &str, path: &str, _all_parts: bool) -> FileInfo {
|
||||
let metadata = self.meta_sys.clone().unwrap_or_default();
|
||||
let metadata = self.meta_sys.clone();
|
||||
|
||||
FileInfo {
|
||||
version_id: self.version_id.filter(|&vid| !vid.is_nil()),
|
||||
@@ -1895,7 +2089,7 @@ impl From<FileInfo> for MetaDeleteMarker {
|
||||
Self {
|
||||
version_id: value.version_id,
|
||||
mod_time: value.mod_time,
|
||||
meta_sys: None,
|
||||
meta_sys: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2794,7 +2988,7 @@ mod test {
|
||||
let delete_marker = MetaDeleteMarker {
|
||||
version_id: Some(Uuid::new_v4()),
|
||||
mod_time: Some(OffsetDateTime::now_utc()),
|
||||
meta_sys: None,
|
||||
meta_sys: HashMap::new(),
|
||||
};
|
||||
|
||||
let delete_version = FileMetaVersion {
|
||||
|
||||
@@ -169,6 +169,9 @@ impl InlineData {
|
||||
}
|
||||
pub fn remove(&mut self, remove_keys: Vec<Uuid>) -> Result<bool> {
|
||||
let buf = self.after_version();
|
||||
if buf.is_empty() {
|
||||
return Ok(false);
|
||||
}
|
||||
let mut cur = Cursor::new(buf);
|
||||
|
||||
let mut fields_len = rmp::decode::read_map_len(&mut cur)? as usize;
|
||||
|
||||
@@ -16,8 +16,9 @@ mod error;
|
||||
pub mod fileinfo;
|
||||
mod filemeta;
|
||||
mod filemeta_inline;
|
||||
pub mod headers;
|
||||
pub mod metacache;
|
||||
// pub mod headers;
|
||||
mod metacache;
|
||||
mod replication;
|
||||
|
||||
pub mod test_data;
|
||||
|
||||
@@ -26,3 +27,4 @@ pub use fileinfo::*;
|
||||
pub use filemeta::*;
|
||||
pub use filemeta_inline::*;
|
||||
pub use metacache::*;
|
||||
pub use replication::*;
|
||||
|
||||
494
crates/filemeta/src/replication.rs
Normal file
494
crates/filemeta/src/replication.rs
Normal file
@@ -0,0 +1,494 @@
|
||||
use core::fmt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
/// StatusType of Replication for x-amz-replication-status header
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default, Hash)]
|
||||
pub enum ReplicationStatusType {
|
||||
/// Pending - replication is pending.
|
||||
Pending,
|
||||
/// Completed - replication completed ok.
|
||||
Completed,
|
||||
/// CompletedLegacy was called "COMPLETE" incorrectly.
|
||||
CompletedLegacy,
|
||||
/// Failed - replication failed.
|
||||
Failed,
|
||||
/// Replica - this is a replica.
|
||||
Replica,
|
||||
#[default]
|
||||
Empty,
|
||||
}
|
||||
|
||||
impl ReplicationStatusType {
|
||||
/// Returns string representation of status
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
ReplicationStatusType::Pending => "PENDING",
|
||||
ReplicationStatusType::Completed => "COMPLETED",
|
||||
ReplicationStatusType::CompletedLegacy => "COMPLETE",
|
||||
ReplicationStatusType::Failed => "FAILED",
|
||||
ReplicationStatusType::Replica => "REPLICA",
|
||||
ReplicationStatusType::Empty => "",
|
||||
}
|
||||
}
|
||||
pub fn is_empty(&self) -> bool {
|
||||
matches!(self, ReplicationStatusType::Empty)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ReplicationStatusType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for ReplicationStatusType {
|
||||
fn from(s: &str) -> Self {
|
||||
match s {
|
||||
"PENDING" => ReplicationStatusType::Pending,
|
||||
"COMPLETED" => ReplicationStatusType::Completed,
|
||||
"COMPLETE" => ReplicationStatusType::CompletedLegacy,
|
||||
"FAILED" => ReplicationStatusType::Failed,
|
||||
"REPLICA" => ReplicationStatusType::Replica,
|
||||
_ => ReplicationStatusType::Empty,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VersionPurgeStatusType> for ReplicationStatusType {
|
||||
fn from(status: VersionPurgeStatusType) -> Self {
|
||||
match status {
|
||||
VersionPurgeStatusType::Pending => ReplicationStatusType::Pending,
|
||||
VersionPurgeStatusType::Complete => ReplicationStatusType::Completed,
|
||||
VersionPurgeStatusType::Failed => ReplicationStatusType::Failed,
|
||||
VersionPurgeStatusType::Empty => ReplicationStatusType::Empty,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub enum VersionPurgeStatusType {
|
||||
Pending,
|
||||
Complete,
|
||||
Failed,
|
||||
#[default]
|
||||
Empty,
|
||||
}
|
||||
|
||||
impl VersionPurgeStatusType {
|
||||
/// Returns string representation of version purge status
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
VersionPurgeStatusType::Pending => "PENDING",
|
||||
VersionPurgeStatusType::Complete => "COMPLETE",
|
||||
VersionPurgeStatusType::Failed => "FAILED",
|
||||
VersionPurgeStatusType::Empty => "",
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the version is pending purge.
|
||||
pub fn is_pending(&self) -> bool {
|
||||
matches!(self, VersionPurgeStatusType::Pending | VersionPurgeStatusType::Failed)
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
matches!(self, VersionPurgeStatusType::Empty)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for VersionPurgeStatusType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for VersionPurgeStatusType {
|
||||
fn from(s: &str) -> Self {
|
||||
match s {
|
||||
"PENDING" => VersionPurgeStatusType::Pending,
|
||||
"COMPLETE" => VersionPurgeStatusType::Complete,
|
||||
"FAILED" => VersionPurgeStatusType::Failed,
|
||||
_ => VersionPurgeStatusType::Empty,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Type - replication type enum
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub enum ReplicationType {
|
||||
#[default]
|
||||
Unset,
|
||||
Object,
|
||||
Delete,
|
||||
Metadata,
|
||||
Heal,
|
||||
ExistingObject,
|
||||
Resync,
|
||||
All,
|
||||
}
|
||||
|
||||
impl ReplicationType {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
ReplicationType::Unset => "",
|
||||
ReplicationType::Object => "OBJECT",
|
||||
ReplicationType::Delete => "DELETE",
|
||||
ReplicationType::Metadata => "METADATA",
|
||||
ReplicationType::Heal => "HEAL",
|
||||
ReplicationType::ExistingObject => "EXISTING_OBJECT",
|
||||
ReplicationType::Resync => "RESYNC",
|
||||
ReplicationType::All => "ALL",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
ReplicationType::Object
|
||||
| ReplicationType::Delete
|
||||
| ReplicationType::Metadata
|
||||
| ReplicationType::Heal
|
||||
| ReplicationType::ExistingObject
|
||||
| ReplicationType::Resync
|
||||
| ReplicationType::All
|
||||
)
|
||||
}
|
||||
|
||||
pub fn is_data_replication(&self) -> bool {
|
||||
matches!(self, ReplicationType::Object | ReplicationType::Delete | ReplicationType::Heal)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ReplicationType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for ReplicationType {
|
||||
fn from(s: &str) -> Self {
|
||||
match s {
|
||||
"UNSET" => ReplicationType::Unset,
|
||||
"OBJECT" => ReplicationType::Object,
|
||||
"DELETE" => ReplicationType::Delete,
|
||||
"METADATA" => ReplicationType::Metadata,
|
||||
"HEAL" => ReplicationType::Heal,
|
||||
"EXISTING_OBJECT" => ReplicationType::ExistingObject,
|
||||
"RESYNC" => ReplicationType::Resync,
|
||||
"ALL" => ReplicationType::All,
|
||||
_ => ReplicationType::Unset,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// ReplicationState represents internal replication state
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq)]
|
||||
pub struct ReplicationState {
|
||||
pub replica_timestamp: Option<OffsetDateTime>,
|
||||
pub replica_status: ReplicationStatusType,
|
||||
pub delete_marker: bool,
|
||||
pub replication_timestamp: Option<OffsetDateTime>,
|
||||
pub replication_status_internal: Option<String>,
|
||||
pub version_purge_status_internal: Option<String>,
|
||||
pub replicate_decision_str: String,
|
||||
pub targets: HashMap<String, ReplicationStatusType>,
|
||||
pub purge_targets: HashMap<String, VersionPurgeStatusType>,
|
||||
pub reset_statuses_map: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl ReplicationState {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Returns true if replication state is identical for version purge statuses and replication statuses
|
||||
pub fn equal(&self, other: &ReplicationState) -> bool {
|
||||
self.replica_status == other.replica_status
|
||||
&& self.replication_status_internal == other.replication_status_internal
|
||||
&& self.version_purge_status_internal == other.version_purge_status_internal
|
||||
}
|
||||
|
||||
/// Returns overall replication status for the object version being replicated
|
||||
pub fn composite_replication_status(&self) -> ReplicationStatusType {
|
||||
if let Some(replication_status_internal) = &self.replication_status_internal {
|
||||
match ReplicationStatusType::from(replication_status_internal.as_str()) {
|
||||
ReplicationStatusType::Pending
|
||||
| ReplicationStatusType::Completed
|
||||
| ReplicationStatusType::Failed
|
||||
| ReplicationStatusType::Replica => {
|
||||
return ReplicationStatusType::from(replication_status_internal.as_str());
|
||||
}
|
||||
_ => {
|
||||
let repl_status = get_composite_replication_status(&self.targets);
|
||||
|
||||
if self.replica_timestamp.is_none() {
|
||||
return repl_status;
|
||||
}
|
||||
|
||||
if repl_status == ReplicationStatusType::Completed {
|
||||
if let (Some(replica_timestamp), Some(replication_timestamp)) =
|
||||
(self.replica_timestamp, self.replication_timestamp)
|
||||
{
|
||||
if replica_timestamp > replication_timestamp {
|
||||
return self.replica_status.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return repl_status;
|
||||
}
|
||||
}
|
||||
} else if self.replica_status != ReplicationStatusType::default() {
|
||||
return self.replica_status.clone();
|
||||
}
|
||||
|
||||
ReplicationStatusType::default()
|
||||
}
|
||||
|
||||
/// Returns overall replication purge status for the permanent delete being replicated
|
||||
pub fn composite_version_purge_status(&self) -> VersionPurgeStatusType {
|
||||
match VersionPurgeStatusType::from(self.version_purge_status_internal.clone().unwrap_or_default().as_str()) {
|
||||
VersionPurgeStatusType::Pending | VersionPurgeStatusType::Complete | VersionPurgeStatusType::Failed => {
|
||||
VersionPurgeStatusType::from(self.version_purge_status_internal.clone().unwrap_or_default().as_str())
|
||||
}
|
||||
_ => get_composite_version_purge_status(&self.purge_targets),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns replicatedInfos struct initialized with the previous state of replication
|
||||
pub fn target_state(&self, arn: &str) -> ReplicatedTargetInfo {
|
||||
ReplicatedTargetInfo {
|
||||
arn: arn.to_string(),
|
||||
prev_replication_status: self.targets.get(arn).cloned().unwrap_or_default(),
|
||||
version_purge_status: self.purge_targets.get(arn).cloned().unwrap_or_default(),
|
||||
resync_timestamp: self.reset_statuses_map.get(arn).cloned().unwrap_or_default(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_composite_replication_status(targets: &HashMap<String, ReplicationStatusType>) -> ReplicationStatusType {
|
||||
if targets.is_empty() {
|
||||
return ReplicationStatusType::Empty;
|
||||
}
|
||||
|
||||
let mut completed = 0;
|
||||
for status in targets.values() {
|
||||
match status {
|
||||
ReplicationStatusType::Failed => return ReplicationStatusType::Failed,
|
||||
ReplicationStatusType::Completed => completed += 1,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
if completed == targets.len() {
|
||||
ReplicationStatusType::Completed
|
||||
} else {
|
||||
ReplicationStatusType::Pending
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_composite_version_purge_status(targets: &HashMap<String, VersionPurgeStatusType>) -> VersionPurgeStatusType {
|
||||
if targets.is_empty() {
|
||||
return VersionPurgeStatusType::default();
|
||||
}
|
||||
|
||||
let mut completed = 0;
|
||||
for status in targets.values() {
|
||||
match status {
|
||||
VersionPurgeStatusType::Failed => return VersionPurgeStatusType::Failed,
|
||||
VersionPurgeStatusType::Complete => completed += 1,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
if completed == targets.len() {
|
||||
VersionPurgeStatusType::Complete
|
||||
} else {
|
||||
VersionPurgeStatusType::Pending
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub enum ReplicationAction {
|
||||
/// Replicate all data
|
||||
All,
|
||||
/// Replicate only metadata
|
||||
Metadata,
|
||||
/// Do not replicate
|
||||
#[default]
|
||||
None,
|
||||
}
|
||||
|
||||
impl ReplicationAction {
|
||||
/// Returns string representation of replication action
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
ReplicationAction::All => "all",
|
||||
ReplicationAction::Metadata => "metadata",
|
||||
ReplicationAction::None => "none",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ReplicationAction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for ReplicationAction {
|
||||
fn from(s: &str) -> Self {
|
||||
match s {
|
||||
"all" => ReplicationAction::All,
|
||||
"metadata" => ReplicationAction::Metadata,
|
||||
"none" => ReplicationAction::None,
|
||||
_ => ReplicationAction::None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// ReplicatedTargetInfo struct represents replication info on a target
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct ReplicatedTargetInfo {
|
||||
pub arn: String,
|
||||
pub size: i64,
|
||||
pub duration: Duration,
|
||||
pub replication_action: ReplicationAction,
|
||||
pub op_type: ReplicationType,
|
||||
pub replication_status: ReplicationStatusType,
|
||||
pub prev_replication_status: ReplicationStatusType,
|
||||
pub version_purge_status: VersionPurgeStatusType,
|
||||
pub resync_timestamp: String,
|
||||
pub replication_resynced: bool,
|
||||
pub endpoint: String,
|
||||
pub secure: bool,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl ReplicatedTargetInfo {
|
||||
/// Returns true for a target if arn is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.arn.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// ReplicatedInfos struct contains replication information for multiple targets
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ReplicatedInfos {
|
||||
pub replication_timestamp: Option<OffsetDateTime>,
|
||||
pub targets: Vec<ReplicatedTargetInfo>,
|
||||
}
|
||||
|
||||
impl ReplicatedInfos {
|
||||
/// Returns the total size of completed replications
|
||||
pub fn completed_size(&self) -> i64 {
|
||||
let mut sz = 0i64;
|
||||
for target in &self.targets {
|
||||
if target.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if target.replication_status == ReplicationStatusType::Completed
|
||||
&& target.prev_replication_status != ReplicationStatusType::Completed
|
||||
{
|
||||
sz += target.size;
|
||||
}
|
||||
}
|
||||
sz
|
||||
}
|
||||
|
||||
/// Returns true if replication was attempted on any of the targets for the object version queued
|
||||
pub fn replication_resynced(&self) -> bool {
|
||||
for target in &self.targets {
|
||||
if target.is_empty() || !target.replication_resynced {
|
||||
continue;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns internal representation of replication status for all targets
|
||||
pub fn replication_status_internal(&self) -> Option<String> {
|
||||
let mut result = String::new();
|
||||
for target in &self.targets {
|
||||
if target.is_empty() {
|
||||
continue;
|
||||
}
|
||||
result.push_str(&format!("{}={};", target.arn, target.replication_status));
|
||||
}
|
||||
if result.is_empty() { None } else { Some(result) }
|
||||
}
|
||||
|
||||
/// Returns overall replication status across all targets
|
||||
pub fn replication_status(&self) -> ReplicationStatusType {
|
||||
if self.targets.is_empty() {
|
||||
return ReplicationStatusType::Empty;
|
||||
}
|
||||
|
||||
let mut completed = 0;
|
||||
for target in &self.targets {
|
||||
match target.replication_status {
|
||||
ReplicationStatusType::Failed => return ReplicationStatusType::Failed,
|
||||
ReplicationStatusType::Completed => completed += 1,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
if completed == self.targets.len() {
|
||||
ReplicationStatusType::Completed
|
||||
} else {
|
||||
ReplicationStatusType::Pending
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns overall version purge status across all targets
|
||||
pub fn version_purge_status(&self) -> VersionPurgeStatusType {
|
||||
if self.targets.is_empty() {
|
||||
return VersionPurgeStatusType::Empty;
|
||||
}
|
||||
|
||||
let mut completed = 0;
|
||||
for target in &self.targets {
|
||||
match target.version_purge_status {
|
||||
VersionPurgeStatusType::Failed => return VersionPurgeStatusType::Failed,
|
||||
VersionPurgeStatusType::Complete => completed += 1,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
if completed == self.targets.len() {
|
||||
VersionPurgeStatusType::Complete
|
||||
} else {
|
||||
VersionPurgeStatusType::Pending
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns internal representation of version purge status for all targets
|
||||
pub fn version_purge_status_internal(&self) -> Option<String> {
|
||||
let mut result = String::new();
|
||||
for target in &self.targets {
|
||||
if target.is_empty() || target.version_purge_status.is_empty() {
|
||||
continue;
|
||||
}
|
||||
result.push_str(&format!("{}={};", target.arn, target.version_purge_status));
|
||||
}
|
||||
if result.is_empty() { None } else { Some(result) }
|
||||
}
|
||||
|
||||
/// Returns replication action based on target that actually performed replication
|
||||
pub fn action(&self) -> ReplicationAction {
|
||||
for target in &self.targets {
|
||||
if target.is_empty() {
|
||||
continue;
|
||||
}
|
||||
// rely on replication action from target that actually performed replication now.
|
||||
if target.prev_replication_status != ReplicationStatusType::Completed {
|
||||
return target.replication_action;
|
||||
}
|
||||
}
|
||||
ReplicationAction::None
|
||||
}
|
||||
}
|
||||
@@ -67,7 +67,7 @@ pub fn create_real_xlmeta() -> Result<Vec<u8>> {
|
||||
let delete_marker = MetaDeleteMarker {
|
||||
version_id: Some(delete_version_id),
|
||||
mod_time: Some(OffsetDateTime::from_unix_timestamp(1705312260)?), // 1 minute later
|
||||
meta_sys: None,
|
||||
meta_sys: HashMap::new(),
|
||||
};
|
||||
|
||||
let delete_file_version = FileMetaVersion {
|
||||
@@ -151,7 +151,7 @@ pub fn create_complex_xlmeta() -> Result<Vec<u8>> {
|
||||
let delete_marker = MetaDeleteMarker {
|
||||
version_id: Some(delete_version_id),
|
||||
mod_time: Some(OffsetDateTime::from_unix_timestamp(1705312200 + i * 60 + 30)?),
|
||||
meta_sys: None,
|
||||
meta_sys: HashMap::new(),
|
||||
};
|
||||
|
||||
let delete_file_version = FileMetaVersion {
|
||||
|
||||
@@ -46,5 +46,6 @@ jsonwebtoken = { workspace = true }
|
||||
tracing.workspace = true
|
||||
rustfs-madmin.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["path"] }
|
||||
tokio-util.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -43,3 +43,7 @@ pub async fn init_iam_sys(ecstore: Arc<ECStore>) -> Result<()> {
|
||||
pub fn get() -> Result<Arc<IamSys<ObjectStore>>> {
|
||||
IAM_SYS.get().map(Arc::clone).ok_or(Error::IamSysNotInitialized)
|
||||
}
|
||||
|
||||
pub fn get_global_iam_sys() -> Option<Arc<IamSys<ObjectStore>>> {
|
||||
IAM_SYS.get().cloned()
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ use time::OffsetDateTime;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait Store: Clone + Send + Sync + 'static {
|
||||
fn has_watcher(&self) -> bool;
|
||||
async fn save_iam_config<Item: Serialize + Send>(&self, item: Item, path: impl AsRef<str> + Send) -> Result<()>;
|
||||
async fn load_iam_config<Item: DeserializeOwned>(&self, path: impl AsRef<str> + Send) -> Result<Item>;
|
||||
async fn delete_iam_config(&self, path: impl AsRef<str> + Send) -> Result<()>;
|
||||
@@ -89,6 +90,24 @@ impl UserType {
|
||||
UserType::None => "",
|
||||
}
|
||||
}
|
||||
pub fn to_u64(&self) -> u64 {
|
||||
match self {
|
||||
UserType::Svc => 1,
|
||||
UserType::Sts => 2,
|
||||
UserType::Reg => 3,
|
||||
UserType::None => 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_u64(u64: u64) -> Option<Self> {
|
||||
match u64 {
|
||||
1 => Some(UserType::Svc),
|
||||
2 => Some(UserType::Sts),
|
||||
3 => Some(UserType::Reg),
|
||||
0 => Some(UserType::None),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
|
||||
@@ -20,6 +20,8 @@ use crate::{
|
||||
manager::{extract_jwt_claims, get_default_policyes},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use rustfs_ecstore::StorageAPI as _;
|
||||
use rustfs_ecstore::store_api::{ObjectInfoOrErr, WalkOptions};
|
||||
use rustfs_ecstore::{
|
||||
config::{
|
||||
RUSTFS_CONFIG_PREFIX,
|
||||
@@ -28,15 +30,14 @@ use rustfs_ecstore::{
|
||||
global::get_global_action_cred,
|
||||
store::ECStore,
|
||||
store_api::{ObjectInfo, ObjectOptions},
|
||||
store_list_objects::{ObjectInfoOrErr, WalkOptions},
|
||||
};
|
||||
use rustfs_policy::{auth::UserIdentity, policy::PolicyDoc};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use std::sync::LazyLock;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
|
||||
use tokio::sync::mpsc::{self, Sender};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
pub static IAM_CONFIG_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam"));
|
||||
@@ -144,7 +145,7 @@ impl ObjectStore {
|
||||
Ok((Self::decrypt_data(&data)?, obj))
|
||||
}
|
||||
|
||||
async fn list_iam_config_items(&self, prefix: &str, ctx_rx: B_Receiver<bool>, sender: Sender<StringOrErr>) {
|
||||
async fn list_iam_config_items(&self, prefix: &str, ctx: CancellationToken, sender: Sender<StringOrErr>) {
|
||||
// debug!("list iam config items, prefix: {}", &prefix);
|
||||
|
||||
// TODO: Implement walk, use walk
|
||||
@@ -156,7 +157,11 @@ impl ObjectStore {
|
||||
let (tx, mut rx) = mpsc::channel::<ObjectInfoOrErr>(100);
|
||||
|
||||
let path = prefix.to_owned();
|
||||
tokio::spawn(async move { store.walk(ctx_rx, Self::BUCKET_NAME, &path, tx, WalkOptions::default()).await });
|
||||
tokio::spawn(async move {
|
||||
store
|
||||
.walk(ctx.clone(), Self::BUCKET_NAME, &path, tx, WalkOptions::default())
|
||||
.await
|
||||
});
|
||||
|
||||
let prefix = prefix.to_owned();
|
||||
tokio::spawn(async move {
|
||||
@@ -190,10 +195,11 @@ impl ObjectStore {
|
||||
}
|
||||
|
||||
async fn list_all_iamconfig_items(&self) -> Result<HashMap<String, Vec<String>>> {
|
||||
let (ctx_tx, ctx_rx) = broadcast::channel(1);
|
||||
let (tx, mut rx) = mpsc::channel::<StringOrErr>(100);
|
||||
|
||||
self.list_iam_config_items(format!("{}/", *IAM_CONFIG_PREFIX).as_str(), ctx_rx, tx)
|
||||
let ctx = CancellationToken::new();
|
||||
|
||||
self.list_iam_config_items(format!("{}/", *IAM_CONFIG_PREFIX).as_str(), ctx.clone(), tx)
|
||||
.await;
|
||||
|
||||
let mut res = HashMap::new();
|
||||
@@ -201,7 +207,7 @@ impl ObjectStore {
|
||||
while let Some(v) = rx.recv().await {
|
||||
if let Some(err) = v.err {
|
||||
warn!("list_iam_config_items {:?}", err);
|
||||
let _ = ctx_tx.send(true);
|
||||
ctx.cancel();
|
||||
|
||||
return Err(err);
|
||||
}
|
||||
@@ -215,7 +221,7 @@ impl ObjectStore {
|
||||
}
|
||||
}
|
||||
|
||||
let _ = ctx_tx.send(true);
|
||||
ctx.cancel();
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
@@ -374,6 +380,9 @@ impl ObjectStore {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for ObjectStore {
|
||||
fn has_watcher(&self) -> bool {
|
||||
false
|
||||
}
|
||||
async fn load_iam_config<Item: DeserializeOwned>(&self, path: impl AsRef<str> + Send) -> Result<Item> {
|
||||
let mut data = read_config(self.object_api.clone(), path.as_ref()).await?;
|
||||
|
||||
@@ -477,15 +486,15 @@ impl Store for ObjectStore {
|
||||
UserType::None => "",
|
||||
};
|
||||
|
||||
let (ctx_tx, ctx_rx) = broadcast::channel(1);
|
||||
let ctx = CancellationToken::new();
|
||||
let (tx, mut rx) = mpsc::channel::<StringOrErr>(100);
|
||||
|
||||
self.list_iam_config_items(base_prefix, ctx_rx, tx).await;
|
||||
self.list_iam_config_items(base_prefix, ctx.clone(), tx).await;
|
||||
|
||||
while let Some(v) = rx.recv().await {
|
||||
if let Some(err) = v.err {
|
||||
warn!("list_iam_config_items {:?}", err);
|
||||
let _ = ctx_tx.send(true);
|
||||
let _ = ctx.cancel();
|
||||
|
||||
return Err(err);
|
||||
}
|
||||
@@ -495,7 +504,7 @@ impl Store for ObjectStore {
|
||||
self.load_user(&name, user_type, m).await?;
|
||||
}
|
||||
}
|
||||
let _ = ctx_tx.send(true);
|
||||
let _ = ctx.cancel();
|
||||
Ok(())
|
||||
}
|
||||
async fn load_secret_key(&self, name: &str, user_type: UserType) -> Result<String> {
|
||||
@@ -539,15 +548,15 @@ impl Store for ObjectStore {
|
||||
Ok(())
|
||||
}
|
||||
async fn load_groups(&self, m: &mut HashMap<String, GroupInfo>) -> Result<()> {
|
||||
let (ctx_tx, ctx_rx) = broadcast::channel(1);
|
||||
let ctx = CancellationToken::new();
|
||||
let (tx, mut rx) = mpsc::channel::<StringOrErr>(100);
|
||||
|
||||
self.list_iam_config_items(&IAM_CONFIG_GROUPS_PREFIX, ctx_rx, tx).await;
|
||||
self.list_iam_config_items(&IAM_CONFIG_GROUPS_PREFIX, ctx.clone(), tx).await;
|
||||
|
||||
while let Some(v) = rx.recv().await {
|
||||
if let Some(err) = v.err {
|
||||
warn!("list_iam_config_items {:?}", err);
|
||||
let _ = ctx_tx.send(true);
|
||||
let _ = ctx.cancel();
|
||||
|
||||
return Err(err);
|
||||
}
|
||||
@@ -557,7 +566,7 @@ impl Store for ObjectStore {
|
||||
self.load_group(&name, m).await?;
|
||||
}
|
||||
}
|
||||
let _ = ctx_tx.send(true);
|
||||
let _ = ctx.cancel();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -603,15 +612,15 @@ impl Store for ObjectStore {
|
||||
Ok(())
|
||||
}
|
||||
async fn load_policy_docs(&self, m: &mut HashMap<String, PolicyDoc>) -> Result<()> {
|
||||
let (ctx_tx, ctx_rx) = broadcast::channel(1);
|
||||
let ctx = CancellationToken::new();
|
||||
let (tx, mut rx) = mpsc::channel::<StringOrErr>(100);
|
||||
|
||||
self.list_iam_config_items(&IAM_CONFIG_POLICIES_PREFIX, ctx_rx, tx).await;
|
||||
self.list_iam_config_items(&IAM_CONFIG_POLICIES_PREFIX, ctx.clone(), tx).await;
|
||||
|
||||
while let Some(v) = rx.recv().await {
|
||||
if let Some(err) = v.err {
|
||||
warn!("list_iam_config_items {:?}", err);
|
||||
let _ = ctx_tx.send(true);
|
||||
let _ = ctx.cancel();
|
||||
|
||||
return Err(err);
|
||||
}
|
||||
@@ -621,7 +630,7 @@ impl Store for ObjectStore {
|
||||
self.load_policy_doc(&name, m).await?;
|
||||
}
|
||||
}
|
||||
let _ = ctx_tx.send(true);
|
||||
let _ = ctx.cancel();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -678,15 +687,15 @@ impl Store for ObjectStore {
|
||||
}
|
||||
}
|
||||
};
|
||||
let (ctx_tx, ctx_rx) = broadcast::channel(1);
|
||||
let ctx = CancellationToken::new();
|
||||
let (tx, mut rx) = mpsc::channel::<StringOrErr>(100);
|
||||
|
||||
self.list_iam_config_items(base_path, ctx_rx, tx).await;
|
||||
self.list_iam_config_items(base_path, ctx.clone(), tx).await;
|
||||
|
||||
while let Some(v) = rx.recv().await {
|
||||
if let Some(err) = v.err {
|
||||
warn!("list_iam_config_items {:?}", err);
|
||||
let _ = ctx_tx.send(true);
|
||||
let _ = ctx.cancel();
|
||||
|
||||
return Err(err);
|
||||
}
|
||||
@@ -696,7 +705,7 @@ impl Store for ObjectStore {
|
||||
self.load_mapped_policy(name, user_type, is_group, m).await?;
|
||||
}
|
||||
}
|
||||
let _ = ctx_tx.send(true);
|
||||
let _ = ctx.cancel(); // TODO: check if this is needed
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ use crate::store::Store;
|
||||
use crate::store::UserType;
|
||||
use crate::utils::extract_claims;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_ecstore::notification_sys::get_global_notification_sys;
|
||||
use rustfs_madmin::AddOrUpdateUserReq;
|
||||
use rustfs_madmin::GroupDesc;
|
||||
use rustfs_policy::arn::ARN;
|
||||
@@ -41,6 +42,7 @@ use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::warn;
|
||||
|
||||
pub const MAX_SVCSESSION_POLICY_SIZE: usize = 4096;
|
||||
|
||||
@@ -63,6 +65,9 @@ impl<T: Store> IamSys<T> {
|
||||
roles_map: HashMap::new(),
|
||||
}
|
||||
}
|
||||
pub fn has_watcher(&self) -> bool {
|
||||
self.store.api.has_watcher()
|
||||
}
|
||||
|
||||
pub async fn load_group(&self, name: &str) -> Result<()> {
|
||||
self.store.group_notification_handler(name).await
|
||||
@@ -104,8 +109,17 @@ impl<T: Store> IamSys<T> {
|
||||
|
||||
self.store.delete_policy(name, notify).await?;
|
||||
|
||||
if notify {
|
||||
// TODO: implement notification
|
||||
if !notify || self.has_watcher() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
let resp = notification_sys.delete_policy(name).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify delete_policy failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -142,9 +156,20 @@ impl<T: Store> IamSys<T> {
|
||||
}
|
||||
|
||||
pub async fn set_policy(&self, name: &str, policy: Policy) -> Result<OffsetDateTime> {
|
||||
self.store.set_policy(name, policy).await
|
||||
let updated_at = self.store.set_policy(name, policy).await?;
|
||||
|
||||
// TODO: notification
|
||||
if !self.has_watcher() {
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
let resp = notification_sys.load_policy(name).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify load_policy failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(updated_at)
|
||||
}
|
||||
|
||||
pub async fn get_role_policy(&self, arn_str: &str) -> Result<(ARN, String)> {
|
||||
@@ -159,9 +184,51 @@ impl<T: Store> IamSys<T> {
|
||||
Ok((arn, policy.clone()))
|
||||
}
|
||||
|
||||
pub async fn delete_user(&self, name: &str, _notify: bool) -> Result<()> {
|
||||
self.store.delete_user(name, UserType::Reg).await
|
||||
// TODO: notification
|
||||
pub async fn delete_user(&self, name: &str, notify: bool) -> Result<()> {
|
||||
self.store.delete_user(name, UserType::Reg).await?;
|
||||
|
||||
if notify && !self.has_watcher() {
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
let resp = notification_sys.delete_user(name).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify delete_user failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn notify_for_user(&self, name: &str, is_temp: bool) {
|
||||
if self.has_watcher() {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
let resp = notification_sys.load_user(name, is_temp).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify load_user failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn notify_for_service_account(&self, name: &str) {
|
||||
if self.has_watcher() {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
let resp = notification_sys.load_service_account(name).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify load_service_account failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn current_policies(&self, name: &str) -> String {
|
||||
@@ -177,8 +244,11 @@ impl<T: Store> IamSys<T> {
|
||||
}
|
||||
|
||||
pub async fn set_temp_user(&self, name: &str, cred: &Credentials, policy_name: Option<&str>) -> Result<OffsetDateTime> {
|
||||
self.store.set_temp_user(name, cred, policy_name).await
|
||||
// TODO: notification
|
||||
let updated_at = self.store.set_temp_user(name, cred, policy_name).await?;
|
||||
|
||||
self.notify_for_user(&cred.access_key, true).await;
|
||||
|
||||
Ok(updated_at)
|
||||
}
|
||||
|
||||
pub async fn is_temp_user(&self, name: &str) -> Result<(bool, String)> {
|
||||
@@ -208,8 +278,11 @@ impl<T: Store> IamSys<T> {
|
||||
}
|
||||
|
||||
pub async fn set_user_status(&self, name: &str, status: rustfs_madmin::AccountStatus) -> Result<OffsetDateTime> {
|
||||
self.store.set_user_status(name, status).await
|
||||
// TODO: notification
|
||||
let updated_at = self.store.set_user_status(name, status).await?;
|
||||
|
||||
self.notify_for_user(name, false).await;
|
||||
|
||||
Ok(updated_at)
|
||||
}
|
||||
|
||||
pub async fn new_service_account(
|
||||
@@ -294,14 +367,17 @@ impl<T: Store> IamSys<T> {
|
||||
|
||||
let create_at = self.store.add_service_account(cred.clone()).await?;
|
||||
|
||||
self.notify_for_service_account(&cred.access_key).await;
|
||||
|
||||
Ok((cred, create_at))
|
||||
// TODO: notification
|
||||
}
|
||||
|
||||
pub async fn update_service_account(&self, name: &str, opts: UpdateServiceAccountOpts) -> Result<OffsetDateTime> {
|
||||
self.store.update_service_account(name, opts).await
|
||||
let updated_at = self.store.update_service_account(name, opts).await?;
|
||||
|
||||
// TODO: notification
|
||||
self.notify_for_service_account(name).await;
|
||||
|
||||
Ok(updated_at)
|
||||
}
|
||||
|
||||
pub async fn list_service_accounts(&self, access_key: &str) -> Result<Vec<Credentials>> {
|
||||
@@ -424,7 +500,7 @@ impl<T: Store> IamSys<T> {
|
||||
extract_jwt_claims(&u)
|
||||
}
|
||||
|
||||
pub async fn delete_service_account(&self, access_key: &str, _notify: bool) -> Result<()> {
|
||||
pub async fn delete_service_account(&self, access_key: &str, notify: bool) -> Result<()> {
|
||||
let Some(u) = self.store.get_user(access_key).await else {
|
||||
return Ok(());
|
||||
};
|
||||
@@ -433,9 +509,35 @@ impl<T: Store> IamSys<T> {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.store.delete_user(access_key, UserType::Svc).await
|
||||
self.store.delete_user(access_key, UserType::Svc).await?;
|
||||
|
||||
// TODO: notification
|
||||
if notify && !self.has_watcher() {
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
let resp = notification_sys.delete_service_account(access_key).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify delete_service_account failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn notify_for_group(&self, group: &str) {
|
||||
if self.has_watcher() {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
let resp = notification_sys.load_group(group).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify load_group failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create_user(&self, access_key: &str, args: &AddOrUpdateUserReq) -> Result<OffsetDateTime> {
|
||||
@@ -451,8 +553,11 @@ impl<T: Store> IamSys<T> {
|
||||
return Err(IamError::InvalidSecretKeyLength);
|
||||
}
|
||||
|
||||
self.store.add_user(access_key, args).await
|
||||
// TODO: notification
|
||||
let updated_at = self.store.add_user(access_key, args).await?;
|
||||
|
||||
self.notify_for_user(access_key, false).await;
|
||||
|
||||
Ok(updated_at)
|
||||
}
|
||||
|
||||
pub async fn set_user_secret_key(&self, access_key: &str, secret_key: &str) -> Result<()> {
|
||||
@@ -495,18 +600,27 @@ impl<T: Store> IamSys<T> {
|
||||
if contains_reserved_chars(group) {
|
||||
return Err(IamError::GroupNameContainsReservedChars);
|
||||
}
|
||||
self.store.add_users_to_group(group, users).await
|
||||
// TODO: notification
|
||||
let updated_at = self.store.add_users_to_group(group, users).await?;
|
||||
|
||||
self.notify_for_group(group).await;
|
||||
|
||||
Ok(updated_at)
|
||||
}
|
||||
|
||||
pub async fn remove_users_from_group(&self, group: &str, users: Vec<String>) -> Result<OffsetDateTime> {
|
||||
self.store.remove_users_from_group(group, users).await
|
||||
// TODO: notification
|
||||
let updated_at = self.store.remove_users_from_group(group, users).await?;
|
||||
|
||||
self.notify_for_group(group).await;
|
||||
|
||||
Ok(updated_at)
|
||||
}
|
||||
|
||||
pub async fn set_group_status(&self, group: &str, enable: bool) -> Result<OffsetDateTime> {
|
||||
self.store.set_group_status(group, enable).await
|
||||
// TODO: notification
|
||||
let updated_at = self.store.set_group_status(group, enable).await?;
|
||||
|
||||
self.notify_for_group(group).await;
|
||||
|
||||
Ok(updated_at)
|
||||
}
|
||||
pub async fn get_group_description(&self, group: &str) -> Result<GroupDesc> {
|
||||
self.store.get_group_description(group).await
|
||||
@@ -517,8 +631,20 @@ impl<T: Store> IamSys<T> {
|
||||
}
|
||||
|
||||
pub async fn policy_db_set(&self, name: &str, user_type: UserType, is_group: bool, policy: &str) -> Result<OffsetDateTime> {
|
||||
self.store.policy_db_set(name, user_type, is_group, policy).await
|
||||
// TODO: notification
|
||||
let updated_at = self.store.policy_db_set(name, user_type, is_group, policy).await?;
|
||||
|
||||
if !self.has_watcher() {
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
let resp = notification_sys.load_policy_mapping(name, user_type.to_u64(), is_group).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify load_policy failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(updated_at)
|
||||
}
|
||||
|
||||
pub async fn policy_db_get(&self, name: &str, groups: &Option<Vec<String>>) -> Result<Vec<String>> {
|
||||
|
||||
@@ -87,7 +87,7 @@ pub fn generate_jwt<T: Serialize>(claims: &T, secret: &str) -> std::result::Resu
|
||||
jsonwebtoken::encode(&header, &claims, &EncodingKey::from_secret(secret.as_bytes()))
|
||||
}
|
||||
|
||||
pub fn extract_claims<T: DeserializeOwned>(
|
||||
pub fn extract_claims<T: DeserializeOwned + Clone>(
|
||||
token: &str,
|
||||
secret: &str,
|
||||
) -> std::result::Result<jsonwebtoken::TokenData<T>, jsonwebtoken::errors::Error> {
|
||||
@@ -193,7 +193,7 @@ mod tests {
|
||||
assert_eq!(error.to_string(), "secret key length is too short");
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
|
||||
struct Claims {
|
||||
sub: String,
|
||||
company: String,
|
||||
|
||||
@@ -111,7 +111,7 @@ impl LocalKmsClient {
|
||||
|
||||
/// Get the file path for a master key
|
||||
fn master_key_path(&self, key_id: &str) -> PathBuf {
|
||||
self.config.key_dir.join(format!("{}.key", key_id))
|
||||
self.config.key_dir.join(format!("{key_id}.key"))
|
||||
}
|
||||
|
||||
/// Load a master key from disk
|
||||
@@ -334,12 +334,11 @@ impl KmsClient for LocalKmsClient {
|
||||
if let Some(actual_value) = envelope.encryption_context.get(key) {
|
||||
if actual_value != expected_value {
|
||||
return Err(KmsError::context_mismatch(format!(
|
||||
"Context mismatch for key '{}': expected '{}', got '{}'",
|
||||
key, expected_value, actual_value
|
||||
"Context mismatch for key '{key}': expected '{expected_value}', got '{actual_value}'"
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
return Err(KmsError::context_mismatch(format!("Missing context key '{}'", key)));
|
||||
return Err(KmsError::context_mismatch(format!("Missing context key '{key}'")));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -720,14 +719,14 @@ impl KmsBackend for LocalKmsBackend {
|
||||
.client
|
||||
.load_master_key(key_id)
|
||||
.await
|
||||
.map_err(|_| crate::error::KmsError::key_not_found(format!("Key {} not found", key_id)))?;
|
||||
.map_err(|_| crate::error::KmsError::key_not_found(format!("Key {key_id} not found")))?;
|
||||
|
||||
let (deletion_date_str, deletion_date_dt) = if request.force_immediate.unwrap_or(false) {
|
||||
// For immediate deletion, actually delete the key from filesystem
|
||||
let key_path = self.client.master_key_path(key_id);
|
||||
tokio::fs::remove_file(&key_path)
|
||||
.await
|
||||
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to delete key file: {}", e)))?;
|
||||
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to delete key file: {e}")))?;
|
||||
|
||||
// Remove from cache
|
||||
let mut cache = self.client.key_cache.write().await;
|
||||
@@ -773,9 +772,9 @@ impl KmsBackend for LocalKmsBackend {
|
||||
let key_path = self.client.master_key_path(key_id);
|
||||
let content = tokio::fs::read(&key_path)
|
||||
.await
|
||||
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to read key file: {}", e)))?;
|
||||
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to read key file: {e}")))?;
|
||||
let stored_key: crate::backends::local::StoredMasterKey = serde_json::from_slice(&content)
|
||||
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to parse stored key: {}", e)))?;
|
||||
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to parse stored key: {e}")))?;
|
||||
|
||||
// Decrypt the existing key material to preserve it
|
||||
let existing_key_material = if let Some(ref cipher) = self.client.master_cipher {
|
||||
@@ -821,13 +820,10 @@ impl KmsBackend for LocalKmsBackend {
|
||||
.client
|
||||
.load_master_key(key_id)
|
||||
.await
|
||||
.map_err(|_| crate::error::KmsError::key_not_found(format!("Key {} not found", key_id)))?;
|
||||
.map_err(|_| crate::error::KmsError::key_not_found(format!("Key {key_id} not found")))?;
|
||||
|
||||
if master_key.status != KeyStatus::PendingDeletion {
|
||||
return Err(crate::error::KmsError::invalid_key_state(format!(
|
||||
"Key {} is not pending deletion",
|
||||
key_id
|
||||
)));
|
||||
return Err(crate::error::KmsError::invalid_key_state(format!("Key {key_id} is not pending deletion")));
|
||||
}
|
||||
|
||||
// Cancel the deletion by resetting the state
|
||||
|
||||
@@ -89,10 +89,10 @@ impl VaultKmsClient {
|
||||
|
||||
let settings = settings_builder
|
||||
.build()
|
||||
.map_err(|e| KmsError::backend_error(format!("Failed to build Vault client settings: {}", e)))?;
|
||||
.map_err(|e| KmsError::backend_error(format!("Failed to build Vault client settings: {e}")))?;
|
||||
|
||||
let client =
|
||||
VaultClient::new(settings).map_err(|e| KmsError::backend_error(format!("Failed to create Vault client: {}", e)))?;
|
||||
VaultClient::new(settings).map_err(|e| KmsError::backend_error(format!("Failed to create Vault client: {e}")))?;
|
||||
|
||||
info!("Successfully connected to Vault at {}", config.address);
|
||||
|
||||
@@ -144,7 +144,7 @@ impl VaultKmsClient {
|
||||
|
||||
kv2::set(&self.client, &self.kv_mount, &path, key_data)
|
||||
.await
|
||||
.map_err(|e| KmsError::backend_error(format!("Failed to store key in Vault: {}", e)))?;
|
||||
.map_err(|e| KmsError::backend_error(format!("Failed to store key in Vault: {e}")))?;
|
||||
|
||||
debug!("Stored key {} in Vault at path {}", key_id, path);
|
||||
Ok(())
|
||||
@@ -176,7 +176,7 @@ impl VaultKmsClient {
|
||||
let secret: VaultKeyData = kv2::read(&self.client, &self.kv_mount, &path).await.map_err(|e| match e {
|
||||
vaultrs::error::ClientError::ResponseWrapError => KmsError::key_not_found(key_id),
|
||||
vaultrs::error::ClientError::APIError { code: 404, .. } => KmsError::key_not_found(key_id),
|
||||
_ => KmsError::backend_error(format!("Failed to read key from Vault: {}", e)),
|
||||
_ => KmsError::backend_error(format!("Failed to read key from Vault: {e}")),
|
||||
})?;
|
||||
|
||||
debug!("Retrieved key {} from Vault, tags: {:?}", key_id, secret.tags);
|
||||
@@ -200,7 +200,7 @@ impl VaultKmsClient {
|
||||
debug!("Key path doesn't exist in Vault (404), returning empty list");
|
||||
Ok(Vec::new())
|
||||
}
|
||||
Err(e) => Err(KmsError::backend_error(format!("Failed to list keys in Vault: {}", e))),
|
||||
Err(e) => Err(KmsError::backend_error(format!("Failed to list keys in Vault: {e}"))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ impl VaultKmsClient {
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
vaultrs::error::ClientError::APIError { code: 404, .. } => KmsError::key_not_found(key_id),
|
||||
_ => KmsError::backend_error(format!("Failed to delete key metadata from Vault: {}", e)),
|
||||
_ => KmsError::backend_error(format!("Failed to delete key metadata from Vault: {e}")),
|
||||
})?;
|
||||
|
||||
debug!("Permanently deleted key {} metadata from Vault at path {}", key_id, path);
|
||||
@@ -649,7 +649,7 @@ impl KmsBackend for VaultKmsBackend {
|
||||
let mut key_metadata = match self.describe_key(describe_request).await {
|
||||
Ok(response) => response.key_metadata,
|
||||
Err(_) => {
|
||||
return Err(crate::error::KmsError::key_not_found(format!("Key {} not found", key_id)));
|
||||
return Err(crate::error::KmsError::key_not_found(format!("Key {key_id} not found")));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -705,15 +705,12 @@ impl KmsBackend for VaultKmsBackend {
|
||||
let mut key_metadata = match self.describe_key(describe_request).await {
|
||||
Ok(response) => response.key_metadata,
|
||||
Err(_) => {
|
||||
return Err(crate::error::KmsError::key_not_found(format!("Key {} not found", key_id)));
|
||||
return Err(crate::error::KmsError::key_not_found(format!("Key {key_id} not found")));
|
||||
}
|
||||
};
|
||||
|
||||
if key_metadata.key_state != KeyState::PendingDeletion {
|
||||
return Err(crate::error::KmsError::invalid_key_state(format!(
|
||||
"Key {} is not pending deletion",
|
||||
key_id
|
||||
)));
|
||||
return Err(crate::error::KmsError::invalid_key_state(format!("Key {key_id} is not pending deletion")));
|
||||
}
|
||||
|
||||
// Cancel the deletion by resetting the state
|
||||
|
||||
@@ -315,7 +315,7 @@ impl KmsConfig {
|
||||
config.backend = match backend_type.to_lowercase().as_str() {
|
||||
"local" => KmsBackend::Local,
|
||||
"vault" => KmsBackend::Vault,
|
||||
_ => return Err(KmsError::configuration_error(format!("Unknown KMS backend: {}", backend_type))),
|
||||
_ => return Err(KmsError::configuration_error(format!("Unknown KMS backend: {backend_type}"))),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -227,7 +227,7 @@ impl ObjectEncryptionService {
|
||||
self.kms_manager
|
||||
.create_key(create_req)
|
||||
.await
|
||||
.map_err(|e| KmsError::backend_error(format!("Failed to auto-create SSE-S3 key {}: {}", actual_key_id, e)))?;
|
||||
.map_err(|e| KmsError::backend_error(format!("Failed to auto-create SSE-S3 key {actual_key_id}: {e}")))?;
|
||||
}
|
||||
} else {
|
||||
// For SSE-KMS, key must exist
|
||||
@@ -235,7 +235,7 @@ impl ObjectEncryptionService {
|
||||
key_id: actual_key_id.to_string(),
|
||||
};
|
||||
self.kms_manager.describe_key(describe_req).await.map_err(|_| {
|
||||
KmsError::invalid_operation(format!("SSE-KMS key '{}' not found. Please create it first.", actual_key_id))
|
||||
KmsError::invalid_operation(format!("SSE-KMS key '{actual_key_id}' not found. Please create it first."))
|
||||
})?;
|
||||
}
|
||||
|
||||
@@ -250,7 +250,7 @@ impl ObjectEncryptionService {
|
||||
.kms_manager
|
||||
.generate_data_key(request)
|
||||
.await
|
||||
.map_err(|e| KmsError::backend_error(format!("Failed to generate data key: {}", e)))?;
|
||||
.map_err(|e| KmsError::backend_error(format!("Failed to generate data key: {e}")))?;
|
||||
|
||||
let plaintext_key = data_key.plaintext_key;
|
||||
|
||||
@@ -325,7 +325,7 @@ impl ObjectEncryptionService {
|
||||
.kms_manager
|
||||
.decrypt(decrypt_request)
|
||||
.await
|
||||
.map_err(|e| KmsError::backend_error(format!("Failed to decrypt data key: {}", e)))?;
|
||||
.map_err(|e| KmsError::backend_error(format!("Failed to decrypt data key: {e}")))?;
|
||||
|
||||
// Create cipher
|
||||
let cipher = create_cipher(&algorithm, &decrypt_response.plaintext)?;
|
||||
@@ -379,7 +379,7 @@ impl ObjectEncryptionService {
|
||||
// Validate key MD5 if provided
|
||||
if let Some(expected_md5) = customer_key_md5 {
|
||||
let actual_md5 = md5::compute(customer_key);
|
||||
let actual_md5_hex = format!("{:x}", actual_md5);
|
||||
let actual_md5_hex = format!("{actual_md5:x}");
|
||||
if actual_md5_hex != expected_md5.to_lowercase() {
|
||||
return Err(KmsError::validation_error("Customer key MD5 mismatch"));
|
||||
}
|
||||
@@ -487,12 +487,11 @@ impl ObjectEncryptionService {
|
||||
Some(actual_value) if actual_value == expected_value => continue,
|
||||
Some(actual_value) => {
|
||||
return Err(KmsError::context_mismatch(format!(
|
||||
"Context mismatch for '{}': expected '{}', got '{}'",
|
||||
key, expected_value, actual_value
|
||||
"Context mismatch for '{key}': expected '{expected_value}', got '{actual_value}'"
|
||||
)));
|
||||
}
|
||||
None => {
|
||||
return Err(KmsError::context_mismatch(format!("Missing context key '{}'", key)));
|
||||
return Err(KmsError::context_mismatch(format!("Missing context key '{key}'")));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -562,13 +561,13 @@ impl ObjectEncryptionService {
|
||||
.ok_or_else(|| KmsError::validation_error("Missing IV header"))?;
|
||||
let iv = base64::engine::general_purpose::STANDARD
|
||||
.decode(iv)
|
||||
.map_err(|e| KmsError::validation_error(format!("Invalid IV: {}", e)))?;
|
||||
.map_err(|e| KmsError::validation_error(format!("Invalid IV: {e}")))?;
|
||||
|
||||
let tag = if let Some(tag_str) = headers.get("x-rustfs-encryption-tag") {
|
||||
Some(
|
||||
base64::engine::general_purpose::STANDARD
|
||||
.decode(tag_str)
|
||||
.map_err(|e| KmsError::validation_error(format!("Invalid tag: {}", e)))?,
|
||||
.map_err(|e| KmsError::validation_error(format!("Invalid tag: {e}")))?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
@@ -577,14 +576,14 @@ impl ObjectEncryptionService {
|
||||
let encrypted_data_key = if let Some(key_str) = headers.get("x-rustfs-encryption-key") {
|
||||
base64::engine::general_purpose::STANDARD
|
||||
.decode(key_str)
|
||||
.map_err(|e| KmsError::validation_error(format!("Invalid encrypted key: {}", e)))?
|
||||
.map_err(|e| KmsError::validation_error(format!("Invalid encrypted key: {e}")))?
|
||||
} else {
|
||||
Vec::new() // Empty for SSE-C
|
||||
};
|
||||
|
||||
let encryption_context = if let Some(context_str) = headers.get("x-rustfs-encryption-context") {
|
||||
serde_json::from_str(context_str)
|
||||
.map_err(|e| KmsError::validation_error(format!("Invalid encryption context: {}", e)))?
|
||||
.map_err(|e| KmsError::validation_error(format!("Invalid encryption context: {e}")))?
|
||||
} else {
|
||||
HashMap::new()
|
||||
};
|
||||
|
||||
@@ -225,7 +225,7 @@ impl KmsError {
|
||||
impl From<url::ParseError> for KmsError {
|
||||
fn from(error: url::ParseError) -> Self {
|
||||
Self::ConfigurationError {
|
||||
message: format!("Invalid URL: {}", error),
|
||||
message: format!("Invalid URL: {error}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -233,7 +233,7 @@ impl From<url::ParseError> for KmsError {
|
||||
impl From<reqwest::Error> for KmsError {
|
||||
fn from(error: reqwest::Error) -> Self {
|
||||
Self::BackendError {
|
||||
message: format!("HTTP request failed: {}", error),
|
||||
message: format!("HTTP request failed: {error}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,7 +137,7 @@ impl KmsServiceManager {
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
let err_msg = format!("Failed to create KMS backend: {}", e);
|
||||
let err_msg = format!("Failed to create KMS backend: {e}");
|
||||
error!("{}", err_msg);
|
||||
let mut status = self.status.write().await;
|
||||
*status = KmsServiceStatus::Error(err_msg.clone());
|
||||
@@ -218,7 +218,7 @@ impl KmsServiceManager {
|
||||
error!("KMS health check error: {}", e);
|
||||
// Update status to error
|
||||
let mut status = self.status.write().await;
|
||||
*status = KmsServiceStatus::Error(format!("Health check failed: {}", e));
|
||||
*status = KmsServiceStatus::Error(format!("Health check failed: {e}"));
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,12 +98,18 @@ impl DisabledLockManager {
|
||||
|
||||
/// Always succeeds - all locks acquired
|
||||
pub async fn acquire_locks_batch(&self, batch_request: BatchLockRequest) -> BatchLockResult {
|
||||
let successful_locks: Vec<ObjectKey> = batch_request.requests.into_iter().map(|req| req.key).collect();
|
||||
let successful_locks: Vec<ObjectKey> = batch_request.requests.iter().map(|req| req.key.clone()).collect();
|
||||
let guards = batch_request
|
||||
.requests
|
||||
.into_iter()
|
||||
.map(|req| FastLockGuard::new_disabled(req.key, req.mode, req.owner))
|
||||
.collect();
|
||||
|
||||
BatchLockResult {
|
||||
successful_locks,
|
||||
failed_locks: Vec::new(),
|
||||
all_acquired: true,
|
||||
guards,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -106,6 +106,10 @@ impl FastObjectLockManager {
|
||||
object: impl Into<Arc<str>>,
|
||||
owner: impl Into<Arc<str>>,
|
||||
) -> Result<FastLockGuard, LockResult> {
|
||||
// let bucket = bucket.into();
|
||||
// let object = object.into();
|
||||
// let owner = owner.into();
|
||||
// error!("acquire_write_lock: bucket={:?}, object={:?}, owner={:?}", bucket, object, owner);
|
||||
let request = ObjectLockRequest::new_write(bucket, object, owner);
|
||||
self.acquire_lock(request).await
|
||||
}
|
||||
@@ -213,20 +217,33 @@ impl FastObjectLockManager {
|
||||
) -> BatchLockResult {
|
||||
let mut all_successful = Vec::new();
|
||||
let mut all_failed = Vec::new();
|
||||
let mut guards = Vec::new();
|
||||
|
||||
for (&shard_id, requests) in shard_groups {
|
||||
let shard = &self.shards[shard_id];
|
||||
let shard = self.shards[shard_id].clone();
|
||||
|
||||
// Try fast path first for each request
|
||||
for request in requests {
|
||||
if shard.try_fast_path_only(request) {
|
||||
all_successful.push(request.key.clone());
|
||||
let key = request.key.clone();
|
||||
let owner = request.owner.clone();
|
||||
let mode = request.mode;
|
||||
|
||||
let acquired = if shard.try_fast_path_only(request) {
|
||||
true
|
||||
} else {
|
||||
// Fallback to slow path
|
||||
match shard.acquire_lock(request).await {
|
||||
Ok(()) => all_successful.push(request.key.clone()),
|
||||
Err(err) => all_failed.push((request.key.clone(), err)),
|
||||
Ok(()) => true,
|
||||
Err(err) => {
|
||||
all_failed.push((key.clone(), err));
|
||||
false
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if acquired {
|
||||
let guard = FastLockGuard::new(key.clone(), mode, owner.clone(), shard.clone());
|
||||
shard.register_guard(guard.guard_id());
|
||||
all_successful.push(key);
|
||||
guards.push(guard);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -236,6 +253,7 @@ impl FastObjectLockManager {
|
||||
successful_locks: all_successful,
|
||||
failed_locks: all_failed,
|
||||
all_acquired,
|
||||
guards,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -245,16 +263,18 @@ impl FastObjectLockManager {
|
||||
shard_groups: &std::collections::HashMap<usize, Vec<ObjectLockRequest>>,
|
||||
) -> BatchLockResult {
|
||||
// Phase 1: Try to acquire all locks
|
||||
let mut acquired_locks = Vec::new();
|
||||
let mut acquired_guards = Vec::new();
|
||||
let mut failed_locks = Vec::new();
|
||||
|
||||
'outer: for (&shard_id, requests) in shard_groups {
|
||||
let shard = &self.shards[shard_id];
|
||||
let shard = self.shards[shard_id].clone();
|
||||
|
||||
for request in requests {
|
||||
match shard.acquire_lock(request).await {
|
||||
Ok(()) => {
|
||||
acquired_locks.push((request.key.clone(), request.mode, request.owner.clone()));
|
||||
let guard = FastLockGuard::new(request.key.clone(), request.mode, request.owner.clone(), shard.clone());
|
||||
shard.register_guard(guard.guard_id());
|
||||
acquired_guards.push(guard);
|
||||
}
|
||||
Err(err) => {
|
||||
failed_locks.push((request.key.clone(), err));
|
||||
@@ -266,35 +286,22 @@ impl FastObjectLockManager {
|
||||
|
||||
// Phase 2: If any failed, release all acquired locks with error tracking
|
||||
if !failed_locks.is_empty() {
|
||||
let mut cleanup_failures = 0;
|
||||
for (key, mode, owner) in acquired_locks {
|
||||
let shard = self.get_shard(&key);
|
||||
if !shard.release_lock(&key, &owner, mode) {
|
||||
cleanup_failures += 1;
|
||||
tracing::warn!(
|
||||
"Failed to release lock during batch cleanup: bucket={}, object={}",
|
||||
key.bucket,
|
||||
key.object
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if cleanup_failures > 0 {
|
||||
tracing::error!("Batch lock cleanup had {} failures", cleanup_failures);
|
||||
}
|
||||
|
||||
// Drop guards to release any acquired locks.
|
||||
drop(acquired_guards);
|
||||
return BatchLockResult {
|
||||
successful_locks: Vec::new(),
|
||||
failed_locks,
|
||||
all_acquired: false,
|
||||
guards: Vec::new(),
|
||||
};
|
||||
}
|
||||
|
||||
// All successful
|
||||
let successful_locks = acquired_guards.iter().map(|guard| guard.key().clone()).collect();
|
||||
BatchLockResult {
|
||||
successful_locks: acquired_locks.into_iter().map(|(key, _, _)| key).collect(),
|
||||
successful_locks,
|
||||
failed_locks: Vec::new(),
|
||||
all_acquired: true,
|
||||
guards: acquired_guards,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -111,6 +111,9 @@ impl ObjectLockState {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::fast_lock::state::{ExclusiveOwnerInfo, SharedOwnerEntry};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
#[test]
|
||||
fn test_object_pool() {
|
||||
@@ -142,8 +145,17 @@ mod tests {
|
||||
let mut state = ObjectLockState::new();
|
||||
|
||||
// Modify state
|
||||
*state.current_owner.write() = Some("test_owner".into());
|
||||
state.shared_owners.write().push("shared_owner".into());
|
||||
*state.current_owner.write() = Some(ExclusiveOwnerInfo {
|
||||
owner: Arc::from("test_owner"),
|
||||
acquired_at: SystemTime::now(),
|
||||
lock_timeout: Duration::from_secs(30),
|
||||
});
|
||||
state.shared_owners.write().push(SharedOwnerEntry {
|
||||
owner: Arc::from("shared_owner"),
|
||||
count: 1,
|
||||
acquired_at: SystemTime::now(),
|
||||
lock_timeout: Duration::from_secs(30),
|
||||
});
|
||||
|
||||
// Reset
|
||||
state.reset_for_reuse();
|
||||
|
||||
@@ -88,8 +88,8 @@ impl LockShard {
|
||||
|
||||
// Try atomic acquisition
|
||||
let success = match request.mode {
|
||||
LockMode::Shared => state.try_acquire_shared_fast(&request.owner),
|
||||
LockMode::Exclusive => state.try_acquire_exclusive_fast(&request.owner),
|
||||
LockMode::Shared => state.try_acquire_shared_fast(&request.owner, request.lock_timeout),
|
||||
LockMode::Exclusive => state.try_acquire_exclusive_fast(&request.owner, request.lock_timeout),
|
||||
};
|
||||
|
||||
if success {
|
||||
@@ -108,14 +108,14 @@ impl LockShard {
|
||||
let state = state.clone();
|
||||
drop(objects);
|
||||
|
||||
if state.try_acquire_exclusive_fast(&request.owner) {
|
||||
if state.try_acquire_exclusive_fast(&request.owner, request.lock_timeout) {
|
||||
return Some(state);
|
||||
}
|
||||
} else {
|
||||
// Create new state from pool and acquire immediately
|
||||
let state_box = self.object_pool.acquire();
|
||||
let state = Arc::new(*state_box);
|
||||
if state.try_acquire_exclusive_fast(&request.owner) {
|
||||
if state.try_acquire_exclusive_fast(&request.owner, request.lock_timeout) {
|
||||
objects.insert(request.key.clone(), state.clone());
|
||||
return Some(state);
|
||||
}
|
||||
@@ -151,8 +151,8 @@ impl LockShard {
|
||||
|
||||
// Try acquisition again
|
||||
let success = match request.mode {
|
||||
LockMode::Shared => state.try_acquire_shared_fast(&request.owner),
|
||||
LockMode::Exclusive => state.try_acquire_exclusive_fast(&request.owner),
|
||||
LockMode::Shared => state.try_acquire_shared_fast(&request.owner, request.lock_timeout),
|
||||
LockMode::Exclusive => state.try_acquire_exclusive_fast(&request.owner, request.lock_timeout),
|
||||
};
|
||||
|
||||
if success {
|
||||
@@ -443,22 +443,24 @@ impl LockShard {
|
||||
let objects = self.objects.read();
|
||||
if let Some(state) = objects.get(key) {
|
||||
if let Some(mode) = state.current_mode() {
|
||||
let owner = match mode {
|
||||
let (owner, acquired_at, lock_timeout) = match mode {
|
||||
LockMode::Exclusive => {
|
||||
let current_owner = state.current_owner.read();
|
||||
current_owner.clone()?
|
||||
let info = current_owner.clone()?;
|
||||
(info.owner, info.acquired_at, info.lock_timeout)
|
||||
}
|
||||
LockMode::Shared => {
|
||||
let shared_owners = state.shared_owners.read();
|
||||
shared_owners.first()?.clone()
|
||||
let entry = shared_owners.first()?.clone();
|
||||
(entry.owner, entry.acquired_at, entry.lock_timeout)
|
||||
}
|
||||
};
|
||||
|
||||
let priority = *state.priority.read();
|
||||
|
||||
// Estimate acquisition time (approximate)
|
||||
let acquired_at = SystemTime::now() - Duration::from_secs(60);
|
||||
let expires_at = acquired_at + Duration::from_secs(300);
|
||||
let expires_at = acquired_at
|
||||
.checked_add(lock_timeout)
|
||||
.unwrap_or_else(|| acquired_at + crate::fast_lock::DEFAULT_LOCK_TIMEOUT);
|
||||
|
||||
return Some(crate::fast_lock::types::ObjectLockInfo {
|
||||
key: key.clone(),
|
||||
|
||||
@@ -308,13 +308,28 @@ pub struct ObjectLockState {
|
||||
|
||||
// Third cache line: Less frequently accessed data
|
||||
/// Current owner of exclusive lock (if any)
|
||||
pub current_owner: parking_lot::RwLock<Option<Arc<str>>>,
|
||||
pub current_owner: parking_lot::RwLock<Option<ExclusiveOwnerInfo>>,
|
||||
/// Shared owners - optimized for small number of readers
|
||||
pub shared_owners: parking_lot::RwLock<smallvec::SmallVec<[Arc<str>; 4]>>,
|
||||
pub shared_owners: parking_lot::RwLock<smallvec::SmallVec<[SharedOwnerEntry; 4]>>,
|
||||
/// Lock priority for conflict resolution
|
||||
pub priority: parking_lot::RwLock<LockPriority>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ExclusiveOwnerInfo {
|
||||
pub owner: Arc<str>,
|
||||
pub acquired_at: SystemTime,
|
||||
pub lock_timeout: Duration,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SharedOwnerEntry {
|
||||
pub owner: Arc<str>,
|
||||
pub count: u32,
|
||||
pub acquired_at: SystemTime,
|
||||
pub lock_timeout: Duration,
|
||||
}
|
||||
|
||||
impl Default for ObjectLockState {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
@@ -335,60 +350,87 @@ impl ObjectLockState {
|
||||
}
|
||||
|
||||
/// Try fast path shared lock acquisition
|
||||
pub fn try_acquire_shared_fast(&self, owner: &Arc<str>) -> bool {
|
||||
if self.atomic_state.try_acquire_shared() {
|
||||
self.atomic_state.update_access_time();
|
||||
let mut shared = self.shared_owners.write();
|
||||
if !shared.contains(owner) {
|
||||
shared.push(owner.clone());
|
||||
}
|
||||
true
|
||||
} else {
|
||||
false
|
||||
pub fn try_acquire_shared_fast(&self, owner: &Arc<str>, lock_timeout: Duration) -> bool {
|
||||
if !self.atomic_state.try_acquire_shared() {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.atomic_state.update_access_time();
|
||||
let mut shared = self.shared_owners.write();
|
||||
if let Some(entry) = shared.iter_mut().find(|entry| entry.owner.as_ref() == owner.as_ref()) {
|
||||
entry.count = entry.count.saturating_add(1);
|
||||
entry.acquired_at = SystemTime::now();
|
||||
entry.lock_timeout = lock_timeout;
|
||||
} else {
|
||||
shared.push(SharedOwnerEntry {
|
||||
owner: owner.clone(),
|
||||
count: 1,
|
||||
acquired_at: SystemTime::now(),
|
||||
lock_timeout,
|
||||
});
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// Try fast path exclusive lock acquisition
|
||||
pub fn try_acquire_exclusive_fast(&self, owner: &Arc<str>) -> bool {
|
||||
if self.atomic_state.try_acquire_exclusive() {
|
||||
self.atomic_state.update_access_time();
|
||||
let mut current = self.current_owner.write();
|
||||
*current = Some(owner.clone());
|
||||
true
|
||||
} else {
|
||||
false
|
||||
pub fn try_acquire_exclusive_fast(&self, owner: &Arc<str>, lock_timeout: Duration) -> bool {
|
||||
if !self.atomic_state.try_acquire_exclusive() {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.atomic_state.update_access_time();
|
||||
let mut current = self.current_owner.write();
|
||||
*current = Some(ExclusiveOwnerInfo {
|
||||
owner: owner.clone(),
|
||||
acquired_at: SystemTime::now(),
|
||||
lock_timeout,
|
||||
});
|
||||
true
|
||||
}
|
||||
|
||||
/// Release shared lock
|
||||
pub fn release_shared(&self, owner: &Arc<str>) -> bool {
|
||||
let mut shared = self.shared_owners.write();
|
||||
if let Some(pos) = shared.iter().position(|x| x.as_ref() == owner.as_ref()) {
|
||||
shared.remove(pos);
|
||||
if let Some(pos) = shared.iter().position(|entry| entry.owner.as_ref() == owner.as_ref()) {
|
||||
let original_entry = shared[pos].clone();
|
||||
let removed_entry = if shared[pos].count > 1 {
|
||||
shared[pos].count -= 1;
|
||||
None
|
||||
} else {
|
||||
Some(shared.remove(pos))
|
||||
};
|
||||
if self.atomic_state.release_shared() {
|
||||
// Notify waiting writers if no more readers
|
||||
if shared.is_empty() {
|
||||
drop(shared);
|
||||
self.optimized_notify.notify_writer();
|
||||
}
|
||||
true
|
||||
} else {
|
||||
// Inconsistency detected - atomic state shows no shared lock but owner was found
|
||||
tracing::warn!(
|
||||
"Atomic state inconsistency during shared lock release: owner={}, remaining_owners={}",
|
||||
"Atomic state inconsistency during shared lock release: owner={}, remaining_entries={}",
|
||||
owner,
|
||||
shared.len()
|
||||
);
|
||||
// Re-add owner to maintain consistency
|
||||
shared.push(owner.clone());
|
||||
// Re-add owner entry to maintain consistency when release failed
|
||||
match removed_entry {
|
||||
Some(entry) => {
|
||||
shared.push(entry);
|
||||
}
|
||||
None => {
|
||||
if let Some(existing) = shared.iter_mut().find(|existing| existing.owner.as_ref() == owner.as_ref()) {
|
||||
existing.count = existing.count.saturating_add(1);
|
||||
} else {
|
||||
shared.push(original_entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
} else {
|
||||
// Owner not found in shared owners list
|
||||
tracing::debug!(
|
||||
"Shared lock release failed - owner not found: owner={}, current_owners={:?}",
|
||||
"Shared lock release failed - owner not found: owner={}, current_entries={:?}",
|
||||
owner,
|
||||
shared.iter().map(|s| s.as_ref()).collect::<Vec<_>>()
|
||||
shared.iter().map(|s| s.owner.as_ref()).collect::<Vec<_>>()
|
||||
);
|
||||
false
|
||||
}
|
||||
@@ -397,7 +439,7 @@ impl ObjectLockState {
|
||||
/// Release exclusive lock
|
||||
pub fn release_exclusive(&self, owner: &Arc<str>) -> bool {
|
||||
let mut current = self.current_owner.write();
|
||||
if current.as_ref() == Some(owner) {
|
||||
if current.as_ref().is_some_and(|info| info.owner.as_ref() == owner.as_ref()) {
|
||||
if self.atomic_state.release_exclusive() {
|
||||
*current = None;
|
||||
drop(current);
|
||||
@@ -426,7 +468,7 @@ impl ObjectLockState {
|
||||
tracing::debug!(
|
||||
"Exclusive lock release failed - owner mismatch: expected_owner={}, actual_owner={:?}",
|
||||
owner,
|
||||
current.as_ref().map(|s| s.as_ref())
|
||||
current.as_ref().map(|s| s.owner.as_ref())
|
||||
);
|
||||
false
|
||||
}
|
||||
@@ -483,16 +525,18 @@ mod tests {
|
||||
let owner2 = Arc::from("owner2");
|
||||
|
||||
// Test shared locks
|
||||
assert!(state.try_acquire_shared_fast(&owner1));
|
||||
assert!(state.try_acquire_shared_fast(&owner2));
|
||||
assert!(!state.try_acquire_exclusive_fast(&owner1));
|
||||
let timeout = Duration::from_secs(30);
|
||||
|
||||
assert!(state.try_acquire_shared_fast(&owner1, timeout));
|
||||
assert!(state.try_acquire_shared_fast(&owner2, timeout));
|
||||
assert!(!state.try_acquire_exclusive_fast(&owner1, timeout));
|
||||
|
||||
assert!(state.release_shared(&owner1));
|
||||
assert!(state.release_shared(&owner2));
|
||||
|
||||
// Test exclusive lock
|
||||
assert!(state.try_acquire_exclusive_fast(&owner1));
|
||||
assert!(!state.try_acquire_shared_fast(&owner2));
|
||||
assert!(state.try_acquire_exclusive_fast(&owner1, timeout));
|
||||
assert!(!state.try_acquire_shared_fast(&owner2, timeout));
|
||||
assert!(state.release_exclusive(&owner1));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,8 @@ use std::hash::{Hash, Hasher};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use crate::fast_lock::guard::FastLockGuard;
|
||||
|
||||
/// Object key for version-aware locking
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
||||
pub struct ObjectKey {
|
||||
@@ -340,6 +342,7 @@ pub struct BatchLockResult {
|
||||
pub successful_locks: Vec<ObjectKey>,
|
||||
pub failed_locks: Vec<(ObjectKey, LockResult)>,
|
||||
pub all_acquired: bool,
|
||||
pub guards: Vec<FastLockGuard>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -296,11 +296,11 @@ impl S3Client {
|
||||
.context(format!("Failed to resolve file path: {local_path}"))?;
|
||||
|
||||
if !canonical_path.exists() {
|
||||
anyhow::bail!("File does not exist: {}", local_path);
|
||||
anyhow::bail!("File does not exist: {local_path}");
|
||||
}
|
||||
|
||||
if !canonical_path.is_file() {
|
||||
anyhow::bail!("Path is not a file: {}", local_path);
|
||||
anyhow::bail!("Path is not a file: {local_path}");
|
||||
}
|
||||
|
||||
let metadata = tokio::fs::metadata(&canonical_path)
|
||||
@@ -432,7 +432,7 @@ impl S3Client {
|
||||
|
||||
while let Some(bytes_result) = byte_stream.try_next().await.context("Failed to read object content")? {
|
||||
if total_read + bytes_result.len() > max_size {
|
||||
anyhow::bail!("Object size exceeds maximum allowed size of {} bytes", max_size);
|
||||
anyhow::bail!("Object size exceeds maximum allowed size of {max_size} bytes");
|
||||
}
|
||||
content.extend_from_slice(&bytes_result);
|
||||
total_read += bytes_result.len();
|
||||
|
||||
@@ -32,14 +32,17 @@ rustfs-utils = { workspace = true, features = ["path", "sys"] }
|
||||
rustfs-targets = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
dashmap = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
form_urlencoded = { workspace = true }
|
||||
hashbrown = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
quick-xml = { workspace = true, features = ["serialize", "async-tokio"] }
|
||||
rayon = { workspace = true }
|
||||
rumqttc = { workspace = true }
|
||||
rustc-hash = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
starshard = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "sync", "time"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
@@ -13,9 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use hashbrown::HashMap;
|
||||
use rustfs_targets::EventName;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use url::form_urlencoded;
|
||||
|
||||
/// Represents the identity of the user who triggered the event
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use crate::Event;
|
||||
use async_trait::async_trait;
|
||||
use hashbrown::HashSet;
|
||||
use rumqttc::QoS;
|
||||
use rustfs_config::notify::{ENV_NOTIFY_MQTT_KEYS, ENV_NOTIFY_WEBHOOK_KEYS, NOTIFY_MQTT_KEYS, NOTIFY_WEBHOOK_KEYS};
|
||||
use rustfs_config::{
|
||||
@@ -27,7 +28,6 @@ use rustfs_targets::{
|
||||
error::TargetError,
|
||||
target::{mqtt::MQTTArgs, webhook::WebhookArgs},
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, warn};
|
||||
use url::Url;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user