From 32baa7f14684bd2a6294de460559f6e34757edb5 Mon Sep 17 00:00:00 2001 From: weisd Date: Tue, 1 Oct 2024 04:35:25 +0800 Subject: [PATCH 01/20] stash --- TODO.md | 14 +- ecstore/src/bucket/encryption/mod.rs | 6 +- ecstore/src/bucket/event/mod.rs | 20 +- ecstore/src/bucket/lifecycle/and.rs | 2 +- ecstore/src/bucket/lifecycle/delmarker.rs | 2 +- ecstore/src/bucket/lifecycle/expiration.rs | 8 +- ecstore/src/bucket/lifecycle/fileter.rs | 2 +- ecstore/src/bucket/lifecycle/lifecycle.rs | 2 +- .../src/bucket/lifecycle/noncurrentversion.rs | 4 +- ecstore/src/bucket/lifecycle/prefix.rs | 2 +- ecstore/src/bucket/lifecycle/rule.rs | 4 +- ecstore/src/bucket/lifecycle/tag.rs | 2 +- ecstore/src/bucket/lifecycle/transition.rs | 4 +- ecstore/src/bucket/metadata.rs | 177 +++++++++++------- ecstore/src/bucket/metadata_sys.rs | 82 ++++++++ ecstore/src/bucket/mod.rs | 5 +- ecstore/src/bucket/objectlock/mod.rs | 8 +- ecstore/src/bucket/policy/action.rs | 2 +- ecstore/src/bucket/policy/bucket_policy.rs | 6 +- .../src/bucket/policy/condition/function.rs | 4 +- ecstore/src/bucket/policy/resource.rs | 4 +- ecstore/src/bucket/quota/mod.rs | 2 +- ecstore/src/bucket/replication/and.rs | 2 +- ecstore/src/bucket/replication/filter.rs | 2 +- ecstore/src/bucket/replication/mod.rs | 2 +- ecstore/src/bucket/replication/rule.rs | 16 +- ecstore/src/bucket/replication/tag.rs | 2 +- ecstore/src/bucket/tags/mod.rs | 21 ++- ecstore/src/bucket/target/mod.rs | 10 +- ecstore/src/bucket/versioning/mod.rs | 4 +- ecstore/src/bucket_meta.rs | 11 +- ecstore/src/config/common.rs | 54 ++++++ ecstore/src/config/error.rs | 15 ++ ecstore/src/config/mod.rs | 2 + ecstore/src/lib.rs | 1 + ecstore/src/sets.rs | 39 ++-- ecstore/src/store.rs | 63 ++++--- ecstore/src/store_api.rs | 58 ++++-- rustfs/src/storage/ecfs.rs | 87 +++++---- 39 files changed, 513 insertions(+), 238 deletions(-) create mode 100644 ecstore/src/bucket/metadata_sys.rs create mode 100644 ecstore/src/config/common.rs create mode 100644 ecstore/src/config/error.rs create mode 100644 ecstore/src/config/mod.rs diff --git a/TODO.md b/TODO.md index cbcab874..b158b0aa 100644 --- a/TODO.md +++ b/TODO.md @@ -2,18 +2,18 @@ ## 基础存储 -- [ ] EC可用读写数量判断 Read/WriteQuorum -- [ ] 优化并发执行,边读边取,可中断 +- [x] EC可用读写数量判断 Read/WriteQuorum +- [ ] 优化后台并发执行,可中断 - [ ] 小文件存储到metafile, inlinedata - [ ] 完善bucketmeta -- [ ] 对象锁 -- [ ] 代码优化 使用范型? -- [ ] 抽象出metafile存储 -- [ ] 边读写边hash +- [x] 对象锁 +- [ ] 边读写边hash,实现reader嵌套 - [x] 远程rpc - [x] 错误类型判断,程序中判断错误类型,如何统一错误 - [x] 优化xlmeta, 自定义msg数据结构 -- [x] appendFile, createFile, readFile, walk_dir sync io +- [ ] 优化io.reader 参考 GetObjectNInfo 方便io copy 如果 异步写,再平衡 +- [ ] 代码优化 使用范型? +- [ ] 抽象出metafile存储 ## 基础功能 diff --git a/ecstore/src/bucket/encryption/mod.rs b/ecstore/src/bucket/encryption/mod.rs index f5fee7c7..20e7d315 100644 --- a/ecstore/src/bucket/encryption/mod.rs +++ b/ecstore/src/bucket/encryption/mod.rs @@ -21,20 +21,20 @@ impl std::str::FromStr for Algorithm { } // 定义EncryptionAction结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct EncryptionAction { algorithm: Option, master_key_id: Option, } // 定义Rule结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Rule { default_encryption_action: EncryptionAction, } // 定义BucketSSEConfig结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct BucketSSEConfig { xml_ns: String, xml_name: String, diff --git a/ecstore/src/bucket/event/mod.rs b/ecstore/src/bucket/event/mod.rs index 48944fee..d1cb49fb 100644 --- a/ecstore/src/bucket/event/mod.rs +++ b/ecstore/src/bucket/event/mod.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use name::Name; // 定义common结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] struct Common { pub id: String, pub filter: S3Key, @@ -13,57 +13,57 @@ struct Common { } // 定义Queue结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] struct Queue { pub common: Common, pub arn: ARN, } // 定义ARN结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct ARN { pub target_id: TargetID, pub region: String, } // 定义TargetID结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct TargetID { pub id: String, pub name: String, } // 定义FilterRule结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct FilterRule { pub name: String, pub value: String, } // 定义FilterRuleList结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct FilterRuleList { pub rules: Vec, } // 定义S3Key结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct S3Key { pub rule_list: FilterRuleList, } -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Lambda { arn: String, } // 定义Topic结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Topic { arn: String, } -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Config { queue_list: Vec, lambda_list: Vec, diff --git a/ecstore/src/bucket/lifecycle/and.rs b/ecstore/src/bucket/lifecycle/and.rs index 38bee917..70b36607 100644 --- a/ecstore/src/bucket/lifecycle/and.rs +++ b/ecstore/src/bucket/lifecycle/and.rs @@ -1,6 +1,6 @@ use super::{prefix::Prefix, tag::Tag}; use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct And { pub object_size_greater_than: i64, pub object_size_less_than: i64, diff --git a/ecstore/src/bucket/lifecycle/delmarker.rs b/ecstore/src/bucket/lifecycle/delmarker.rs index 06102f21..5ea1d3bb 100644 --- a/ecstore/src/bucket/lifecycle/delmarker.rs +++ b/ecstore/src/bucket/lifecycle/delmarker.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct DelMarkerExpiration { pub days: usize, } diff --git a/ecstore/src/bucket/lifecycle/expiration.rs b/ecstore/src/bucket/lifecycle/expiration.rs index 8fa2e08c..490b454b 100644 --- a/ecstore/src/bucket/lifecycle/expiration.rs +++ b/ecstore/src/bucket/lifecycle/expiration.rs @@ -3,21 +3,21 @@ use time::OffsetDateTime; // ExpirationDays is a type alias to unmarshal Days in Expiration pub type ExpirationDays = usize; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct ExpirationDate(Option); -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct ExpireDeleteMarker { pub marker: Boolean, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Boolean { pub val: bool, pub set: bool, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Expiration { pub days: Option, pub date: Option, diff --git a/ecstore/src/bucket/lifecycle/fileter.rs b/ecstore/src/bucket/lifecycle/fileter.rs index 3604384b..e7b35be8 100644 --- a/ecstore/src/bucket/lifecycle/fileter.rs +++ b/ecstore/src/bucket/lifecycle/fileter.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use super::{and::And, prefix::Prefix, tag::Tag}; use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Filter { pub set: bool, diff --git a/ecstore/src/bucket/lifecycle/lifecycle.rs b/ecstore/src/bucket/lifecycle/lifecycle.rs index b4ef834c..e4f2fa6f 100644 --- a/ecstore/src/bucket/lifecycle/lifecycle.rs +++ b/ecstore/src/bucket/lifecycle/lifecycle.rs @@ -2,7 +2,7 @@ use super::rule::Rule; use serde::{Deserialize, Serialize}; use time::OffsetDateTime; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Lifecycle { pub rules: Vec, pub expiry_updated_at: Option, diff --git a/ecstore/src/bucket/lifecycle/noncurrentversion.rs b/ecstore/src/bucket/lifecycle/noncurrentversion.rs index 5f39d7d8..3441746f 100644 --- a/ecstore/src/bucket/lifecycle/noncurrentversion.rs +++ b/ecstore/src/bucket/lifecycle/noncurrentversion.rs @@ -1,14 +1,14 @@ use super::{expiration::ExpirationDays, transition::TransitionDays}; use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct NoncurrentVersionExpiration { pub noncurrent_days: ExpirationDays, pub newer_noncurrent_versions: usize, set: bool, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct NoncurrentVersionTransition { pub noncurrent_days: TransitionDays, pub storage_class: String, diff --git a/ecstore/src/bucket/lifecycle/prefix.rs b/ecstore/src/bucket/lifecycle/prefix.rs index abc09320..657038dc 100644 --- a/ecstore/src/bucket/lifecycle/prefix.rs +++ b/ecstore/src/bucket/lifecycle/prefix.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Prefix { pub val: String, pub set: bool, diff --git a/ecstore/src/bucket/lifecycle/rule.rs b/ecstore/src/bucket/lifecycle/rule.rs index 4c69c7bb..27f6e729 100644 --- a/ecstore/src/bucket/lifecycle/rule.rs +++ b/ecstore/src/bucket/lifecycle/rule.rs @@ -8,14 +8,14 @@ use super::{ }; use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub enum Status { #[default] Enabled, Disabled, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Rule { pub id: String, pub status: Status, diff --git a/ecstore/src/bucket/lifecycle/tag.rs b/ecstore/src/bucket/lifecycle/tag.rs index 95687a49..5d80bb2c 100644 --- a/ecstore/src/bucket/lifecycle/tag.rs +++ b/ecstore/src/bucket/lifecycle/tag.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Tag { pub key: String, pub value: String, diff --git a/ecstore/src/bucket/lifecycle/transition.rs b/ecstore/src/bucket/lifecycle/transition.rs index 5950de29..54534a4a 100644 --- a/ecstore/src/bucket/lifecycle/transition.rs +++ b/ecstore/src/bucket/lifecycle/transition.rs @@ -3,10 +3,10 @@ use time::OffsetDateTime; pub type TransitionDays = usize; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct TransitionDate(Option); -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Transition { pub days: Option, pub date: Option, diff --git a/ecstore/src/bucket/metadata.rs b/ecstore/src/bucket/metadata.rs index 316e5f5b..251ff304 100644 --- a/ecstore/src/bucket/metadata.rs +++ b/ecstore/src/bucket/metadata.rs @@ -1,32 +1,34 @@ -use byteorder::{BigEndian, ByteOrder}; +use super::{ + encryption::BucketSSEConfig, event, lifecycle::lifecycle::Lifecycle, objectlock, policy::bucket_policy::BucketPolicy, + quota::BucketQuota, replication, tags::Tags, target::BucketTargets, versioning::Versioning, +}; +use byteorder::{BigEndian, ByteOrder, LittleEndian}; use rmp_serde::Serializer as rmpSerializer; use serde::Serializer; use serde::{Deserialize, Deserializer, Serialize}; use std::collections::HashMap; use std::fmt::Display; use std::str::FromStr; -use time::macros::datetime; use time::OffsetDateTime; +use tracing::error; -use super::{ - encryption::BucketSSEConfig, event, lifecycle::lifecycle::Lifecycle, objectlock, policy::bucket_policy::BucketPolicy, - quota::BucketQuota, replication, tags::Tags, target::BucketTargets, versioning::Versioning, -}; - -use crate::error::Result; +use crate::bucket::tags; +use crate::config::common::{read_config, save_config}; +use crate::config::error::ConfigError; +use crate::error::{Error, Result}; use crate::disk::BUCKET_META_PREFIX; -use crate::utils::crypto::hex; +use crate::store::ECStore; +use crate::store_api::StorageAPI; pub const BUCKET_METADATA_FILE: &str = ".metadata.bin"; pub const BUCKET_METADATA_FORMAT: u16 = 1; pub const BUCKET_METADATA_VERSION: u16 = 1; -#[derive(Debug, Deserialize, Serialize)] +#[derive(Debug, Deserialize, Serialize, Clone)] #[serde(rename_all = "PascalCase", default)] pub struct BucketMetadata { pub name: String, - #[serde(serialize_with = "write_times")] pub created: OffsetDateTime, pub lock_enabled: bool, // 虽然标记为不使用,但可能需要保留 pub policy_config_json: Vec, @@ -41,27 +43,16 @@ pub struct BucketMetadata { pub bucket_targets_config_json: Vec, pub bucket_targets_config_meta_json: Vec, - #[serde(serialize_with = "write_times")] pub policy_config_updated_at: OffsetDateTime, - #[serde(serialize_with = "write_times")] pub object_lock_config_updated_at: OffsetDateTime, - #[serde(serialize_with = "write_times")] pub encryption_config_updated_at: OffsetDateTime, - #[serde(serialize_with = "write_times")] pub tagging_config_updated_at: OffsetDateTime, - #[serde(serialize_with = "write_times")] pub quota_config_updated_at: OffsetDateTime, - #[serde(serialize_with = "write_times")] pub replication_config_updated_at: OffsetDateTime, - #[serde(serialize_with = "write_times")] pub versioning_config_updated_at: OffsetDateTime, - #[serde(serialize_with = "write_times")] pub lifecycle_config_updated_at: OffsetDateTime, - #[serde(serialize_with = "write_times")] pub notification_config_updated_at: OffsetDateTime, - #[serde(serialize_with = "write_times")] pub bucket_targets_config_updated_at: OffsetDateTime, - #[serde(serialize_with = "write_times")] pub bucket_targets_config_meta_updated_at: OffsetDateTime, #[serde(skip)] @@ -147,9 +138,9 @@ impl BucketMetadata { format!("{}/{}/{}", BUCKET_META_PREFIX, self.name.as_str(), BUCKET_METADATA_FILE) } - fn msg_size(&self) -> usize { - unimplemented!() - } + // fn msg_size(&self) -> usize { + // unimplemented!() + // } pub fn marshal_msg(&self) -> Result> { let mut buf = Vec::new(); @@ -159,12 +150,99 @@ impl BucketMetadata { Ok(buf) } - pub fn unmarshal(_buf: &[u8]) -> Result { - unimplemented!() + pub fn unmarshal(buf: &[u8]) -> Result { + let t: BucketMetadata = rmp_serde::from_slice(buf)?; + Ok(t) + } + + pub fn check_header(buf: &[u8]) -> Result<()> { + if buf.len() <= 4 { + return Err(Error::msg("read_bucket_metadata: data invalid")); + } + + // TODO: check version + Ok(()) + } + + fn default_timestamps(&mut self) { + if self.tagging_config_updated_at == OffsetDateTime::UNIX_EPOCH { + self.tagging_config_updated_at = self.created + } + } + + async fn save(&mut self, api: &ECStore) -> Result<()> { + self.parse_all_configs(api)?; + + let mut buf: Vec = vec![0; 4]; + + LittleEndian::write_u16(&mut buf[0..2], BUCKET_METADATA_FORMAT); + + LittleEndian::write_u16(&mut buf[2..4], BUCKET_METADATA_VERSION); + + let data = self.marshal_msg()?; + + buf.extend_from_slice(&data); + + save_config(api, self.save_file_path().as_str(), &buf).await?; + + Ok(()) + } + + fn parse_all_configs(&mut self, _api: &ECStore) -> Result<()> { + if !self.tagging_config_xml.is_empty() { + self.tagging_config = Some(tags::Tags::unmarshal(&self.tagging_config_xml)?); + } + + Ok(()) } } -fn deserialize_from_str<'de, S, D>(deserializer: D) -> core::result::Result +pub async fn load_bucket_metadata(api: &ECStore, bucket: &str) -> Result { + load_bucket_metadata_parse(api, bucket, true).await +} + +async fn load_bucket_metadata_parse(api: &ECStore, bucket: &str, parse: bool) -> Result { + let mut bm = match read_bucket_metadata(api, bucket).await { + Ok(res) => res, + Err(err) => { + if let Some(e) = err.downcast_ref::() { + if !ConfigError::is_not_found(&e) { + return Err(err); + } + } + + BucketMetadata::new(bucket) + } + }; + + bm.default_timestamps(); + + if parse { + bm.parse_all_configs(api)?; + } + + // TODO: parse_all_configs + + Ok(bm) +} + +async fn read_bucket_metadata(api: &ECStore, bucket: &str) -> Result { + if bucket.is_empty() { + error!("bucket name empty"); + return Err(Error::msg("invalid argument")); + } + + let bm = BucketMetadata::new(&bucket); + let file_path = bm.save_file_path(); + + let data = read_config(api, &file_path).await?; + + BucketMetadata::check_header(&data)?; + + BucketMetadata::unmarshal(&data[4..]) +} + +fn _deserialize_from_str<'de, S, D>(deserializer: D) -> core::result::Result where S: FromStr, S::Err: Display, @@ -175,7 +253,7 @@ where unimplemented!() } -fn write_times(t: &OffsetDateTime, s: S) -> Result +fn _write_time(t: &OffsetDateTime, s: S) -> Result where S: Serializer, { @@ -183,47 +261,16 @@ where let sec = t.unix_timestamp() - 62135596800; let nsec = t.nanosecond(); - buf[0] = 0xc7; - buf[1] = 0x0c; - buf[2] = 0x05; + buf[0] = 0xc7; // mext8 + buf[1] = 0x0c; // 长度 + buf[2] = 0x05; // 时间扩展类型 BigEndian::write_u64(&mut buf[3..], sec as u64); BigEndian::write_u32(&mut buf[11..], nsec as u32); s.serialize_bytes(&buf) } -fn write_time(t: OffsetDateTime) -> Result<(), String> { - // let t = t.saturating_sub(0); // 转换为自 UNIX_EPOCH 以来的时间 - println!("t:{:?}", t); - println!("offset:{:?}", datetime!(0-01-01 0:00 UTC)); - - let mut buf = vec![0x0; 15]; - - let sec = t.unix_timestamp() - 62135596800; - let nsec = t.nanosecond(); - - println!("sec:{:?}", sec); - println!("nsec:{:?}", nsec); - - buf[0] = 0xc7; - buf[1] = 0x0c; - buf[2] = 0x05; - // 扩展格式的时间类型 - // buf.push(0xc7); // mext8 - // buf.push(12); // 长度 - // buf.push(0x05); // 时间扩展类型 - - // 将 Unix 时间戳和纳秒部分写入缓冲区 - BigEndian::write_u64(&mut buf[3..], sec as u64); - BigEndian::write_u32(&mut buf[11..], nsec as u32); - - println!("hex:{:?}", hex(buf)); - - Ok(()) -} - #[cfg(test)] mod test { - use crate::utils::crypto::hex; use super::*; @@ -235,6 +282,8 @@ mod test { let buf = bm.marshal_msg().unwrap(); - println!("{:?}", hex(buf)) + let new = BucketMetadata::unmarshal(&buf).unwrap(); + + assert_eq!(bm.name, new.name); } } diff --git a/ecstore/src/bucket/metadata_sys.rs b/ecstore/src/bucket/metadata_sys.rs new file mode 100644 index 00000000..92d24a4e --- /dev/null +++ b/ecstore/src/bucket/metadata_sys.rs @@ -0,0 +1,82 @@ +use std::{collections::HashMap, sync::Arc}; + +use crate::error::{Error, Result}; +use crate::store::ECStore; +use lazy_static::lazy_static; +use time::OffsetDateTime; +use tokio::sync::RwLock; + +use super::metadata::{load_bucket_metadata, BucketMetadata}; +use super::tags; + +lazy_static! { + pub static ref GLOBAL_BucketMetadataSys: Arc> = Arc::new(Some(BucketMetadataSys::new())); +} + +pub fn get_bucket_metadata_sys() -> Arc> { + GLOBAL_BucketMetadataSys.clone() +} + +#[derive(Debug, Default)] +pub struct BucketMetadataSys { + metadata_map: RwLock>, + api: Option>, + initialized: RwLock, +} + +impl BucketMetadataSys { + fn new() -> Self { + Self { ..Default::default() } + } + + pub fn init(&mut self, api: Arc, buckets: Vec) { + self.api = Some(api); + + unimplemented!() + } + + async fn reset(&mut self) { + let mut map = self.metadata_map.write().await; + map.clear(); + } + + pub async fn get_config(&self, bucket: String) -> Result<(BucketMetadata, bool)> { + if let Some(api) = self.api.as_ref() { + let has_bm = { + let map = self.metadata_map.read().await; + if let Some(bm) = map.get(&bucket) { + Some(bm.clone()) + } else { + None + } + }; + + if let Some(bm) = has_bm { + return Ok((bm, false)); + } else { + let bm = match load_bucket_metadata(&api, bucket.as_str()).await { + Ok(res) => res, + Err(err) => { + if *self.initialized.read().await { + return Err(Error::msg("errBucketMetadataNotInitialized")); + } else { + return Err(err); + } + } + }; + + let mut map = self.metadata_map.write().await; + + map.insert(bucket, bm.clone()); + + Ok((bm, true)) + } + } else { + Err(Error::msg("errBucketMetadataNotInitialized")) + } + } + + pub async fn get_tagging_config(&self, bucket: String) -> Result<(tags::Tags, Option)> { + unimplemented!() + } +} diff --git a/ecstore/src/bucket/mod.rs b/ecstore/src/bucket/mod.rs index 9667266f..48d3c5f5 100644 --- a/ecstore/src/bucket/mod.rs +++ b/ecstore/src/bucket/mod.rs @@ -1,7 +1,8 @@ mod encryption; mod event; mod lifecycle; -mod metadata; +pub mod metadata; +mod metadata_sys; mod objectlock; mod policy; mod quota; @@ -10,4 +11,4 @@ mod tags; mod target; mod versioning; -pub use metadata::BucketMetadata; +pub use metadata_sys::get_bucket_metadata_sys; diff --git a/ecstore/src/bucket/objectlock/mod.rs b/ecstore/src/bucket/objectlock/mod.rs index c8838398..74fa0f3b 100644 --- a/ecstore/src/bucket/objectlock/mod.rs +++ b/ecstore/src/bucket/objectlock/mod.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Default, PartialEq, Eq, Hash)] +#[derive(Debug, Deserialize, Serialize, Default, PartialEq, Eq, Hash, Clone)] pub enum RetMode { #[default] Govenance, @@ -20,19 +20,19 @@ impl std::str::FromStr for RetMode { } } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct DefaultRetention { pub mode: RetMode, pub days: Option, pub years: Option, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Rule { pub default_retention: DefaultRetention, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Config { pub object_lock_enabled: String, pub rule: Option, diff --git a/ecstore/src/bucket/policy/action.rs b/ecstore/src/bucket/policy/action.rs index d448727c..34b6dc8a 100644 --- a/ecstore/src/bucket/policy/action.rs +++ b/ecstore/src/bucket/policy/action.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use std::collections::HashSet; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct ActionSet(HashSet); impl ActionSet {} diff --git a/ecstore/src/bucket/policy/bucket_policy.rs b/ecstore/src/bucket/policy/bucket_policy.rs index 6e0345c5..b769ec66 100644 --- a/ecstore/src/bucket/policy/bucket_policy.rs +++ b/ecstore/src/bucket/policy/bucket_policy.rs @@ -9,7 +9,7 @@ use super::{ resource::ResourceSet, }; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct BucketPolicyArgs { account_name: String, groups: Vec, @@ -20,7 +20,7 @@ pub struct BucketPolicyArgs { object_name: String, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct BPStatement { sid: String, effect: Effect, @@ -32,7 +32,7 @@ pub struct BPStatement { conditions: Option, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct BucketPolicy { pub id: String, pub version: String, diff --git a/ecstore/src/bucket/policy/condition/function.rs b/ecstore/src/bucket/policy/condition/function.rs index 7863c5c7..61c9090f 100644 --- a/ecstore/src/bucket/policy/condition/function.rs +++ b/ecstore/src/bucket/policy/condition/function.rs @@ -41,14 +41,14 @@ pub trait FunctionApi { // } // } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] enum Function { #[default] Test, } // 定义Functions类型 -#[derive(Deserialize, Serialize, Default)] +#[derive(Deserialize, Serialize, Default, Clone)] pub struct Functions(Vec); impl Debug for Functions { diff --git a/ecstore/src/bucket/policy/resource.rs b/ecstore/src/bucket/policy/resource.rs index c02fd7dc..32ebc5a3 100644 --- a/ecstore/src/bucket/policy/resource.rs +++ b/ecstore/src/bucket/policy/resource.rs @@ -15,11 +15,11 @@ const RESOURCE_ARN_PREFIX: &str = "arn:aws:s3:::"; const RESOURCE_ARN_KMS_PREFIX: &str = "arn:rustfs:kms::::"; // 定义Resource结构体 -#[derive(Debug, Deserialize, Serialize, Default, PartialEq, Eq, Hash)] +#[derive(Debug, Deserialize, Serialize, Default, PartialEq, Eq, Hash, Clone)] pub struct Resource { pattern: String, r#type: ResourceARNType, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct ResourceSet(HashSet); diff --git a/ecstore/src/bucket/quota/mod.rs b/ecstore/src/bucket/quota/mod.rs index c68eb8cb..12c50dc9 100644 --- a/ecstore/src/bucket/quota/mod.rs +++ b/ecstore/src/bucket/quota/mod.rs @@ -7,7 +7,7 @@ pub enum QuotaType { } // 定义BucketQuota结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct BucketQuota { quota: Option, // 使用Option来表示可能不存在的字段 diff --git a/ecstore/src/bucket/replication/and.rs b/ecstore/src/bucket/replication/and.rs index 812dd29e..b9751f0f 100644 --- a/ecstore/src/bucket/replication/and.rs +++ b/ecstore/src/bucket/replication/and.rs @@ -2,7 +2,7 @@ use super::tag::Tag; use serde::{Deserialize, Serialize}; // 定义And结构体 -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct And { prefix: Option, tags: Option>, diff --git a/ecstore/src/bucket/replication/filter.rs b/ecstore/src/bucket/replication/filter.rs index a194ec34..0f06b3ef 100644 --- a/ecstore/src/bucket/replication/filter.rs +++ b/ecstore/src/bucket/replication/filter.rs @@ -3,7 +3,7 @@ use super::tag::Tag; use serde::{Deserialize, Serialize}; use std::collections::HashMap; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Filter { prefix: String, and: And, diff --git a/ecstore/src/bucket/replication/mod.rs b/ecstore/src/bucket/replication/mod.rs index 56abce48..a50bf7c6 100644 --- a/ecstore/src/bucket/replication/mod.rs +++ b/ecstore/src/bucket/replication/mod.rs @@ -6,7 +6,7 @@ mod tag; use rule::Rule; use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Config { rules: Vec, role_arn: String, diff --git a/ecstore/src/bucket/replication/rule.rs b/ecstore/src/bucket/replication/rule.rs index ca688bb9..9930dd60 100644 --- a/ecstore/src/bucket/replication/rule.rs +++ b/ecstore/src/bucket/replication/rule.rs @@ -1,29 +1,29 @@ use super::filter::Filter; use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub enum Status { #[default] Enabled, Disabled, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct DeleteMarkerReplication { pub status: Status, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct DeleteReplication { pub status: Status, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct ExistingObjectReplication { pub status: Status, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Destination { pub bucket: String, pub storage_class: String, @@ -31,18 +31,18 @@ pub struct Destination { } // 定义ReplicaModifications结构体 -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct ReplicaModifications { status: Status, } // 定义SourceSelectionCriteria结构体 -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct SourceSelectionCriteria { replica_modifications: ReplicaModifications, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Rule { pub id: String, pub status: Status, diff --git a/ecstore/src/bucket/replication/tag.rs b/ecstore/src/bucket/replication/tag.rs index 565aeeea..d0853c50 100644 --- a/ecstore/src/bucket/replication/tag.rs +++ b/ecstore/src/bucket/replication/tag.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Tag { pub key: Option, pub value: Option, diff --git a/ecstore/src/bucket/tags/mod.rs b/ecstore/src/bucket/tags/mod.rs index e615af24..62cfb984 100644 --- a/ecstore/src/bucket/tags/mod.rs +++ b/ecstore/src/bucket/tags/mod.rs @@ -1,8 +1,10 @@ +use crate::error::{Error, Result}; +use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; use std::collections::HashMap; // 定义tagSet结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct TagSet { #[serde(rename = "Tag")] tag_map: HashMap, @@ -10,10 +12,25 @@ pub struct TagSet { } // 定义tagging结构体 -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Tags { #[serde(rename = "Tagging")] xml_name: String, #[serde(rename = "TagSet")] tag_set: Option, } + +impl Tags { + pub fn marshal_msg(&self) -> Result> { + let mut buf = Vec::new(); + + self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?; + + Ok(buf) + } + + pub fn unmarshal(buf: &[u8]) -> Result { + let t: Tags = rmp_serde::from_slice(buf)?; + Ok(t) + } +} diff --git a/ecstore/src/bucket/target/mod.rs b/ecstore/src/bucket/target/mod.rs index cb4fcbde..e296423b 100644 --- a/ecstore/src/bucket/target/mod.rs +++ b/ecstore/src/bucket/target/mod.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; use std::time::Duration; use time::OffsetDateTime; -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Credentials { access_key: String, secret_key: String, @@ -10,13 +10,13 @@ pub struct Credentials { expiration: Option, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub enum ServiceType { #[default] Replication, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct LatencyStat { curr: Duration, // 当前延迟 avg: Duration, // 平均延迟 @@ -24,7 +24,7 @@ pub struct LatencyStat { } // 定义BucketTarget结构体 -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct BucketTarget { source_bucket: String, @@ -73,7 +73,7 @@ pub struct BucketTarget { edge: bool, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct BucketTargets { pub targets: Vec, } diff --git a/ecstore/src/bucket/versioning/mod.rs b/ecstore/src/bucket/versioning/mod.rs index 45d114c2..37fdcdfa 100644 --- a/ecstore/src/bucket/versioning/mod.rs +++ b/ecstore/src/bucket/versioning/mod.rs @@ -25,12 +25,12 @@ impl std::fmt::Display for State { } } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct ExcludedPrefix { pub prefix: String, } -#[derive(Debug, Deserialize, Serialize, Default)] +#[derive(Debug, Deserialize, Serialize, Default,Clone)] pub struct Versioning { pub status: State, pub excluded_prefixes: Vec, diff --git a/ecstore/src/bucket_meta.rs b/ecstore/src/bucket_meta.rs index 6ad9d9cc..4a559b75 100644 --- a/ecstore/src/bucket_meta.rs +++ b/ecstore/src/bucket_meta.rs @@ -14,12 +14,10 @@ pub const BUCKET_METADATA_VERSION: u16 = 1; #[derive(Debug, PartialEq, Deserialize, Serialize, Default)] pub struct BucketMetadata { - format: u16, - version: u16, + pub format: u16, + pub version: u16, pub name: String, - #[serde(skip_serializing_if = "Option::is_none", default)] pub tagging: Option>, - #[serde(skip_serializing_if = "Option::is_none", default)] pub created: Option, } @@ -60,7 +58,8 @@ impl BucketMetadata { Ok(buf) } - pub fn unmarshal_from(buffer: &[u8]) -> Result { - Ok(rmp_serde::from_slice(buffer)?) + pub fn unmarshal_from(buf: &[u8]) -> Result { + let t: BucketMetadata = rmp_serde::from_slice(buf)?; + Ok(t) } } diff --git a/ecstore/src/config/common.rs b/ecstore/src/config/common.rs new file mode 100644 index 00000000..e7132ed6 --- /dev/null +++ b/ecstore/src/config/common.rs @@ -0,0 +1,54 @@ +use crate::disk::RUSTFS_META_BUCKET; +use crate::error::{Error, Result}; +use crate::store::ECStore; +use crate::store_api::{HTTPRangeSpec, ObjectIO, ObjectInfo, ObjectOptions, PutObjReader}; +use http::HeaderMap; +use s3s::dto::StreamingBlob; +use s3s::Body; + +use super::error::ConfigError; + +pub async fn read_config(api: &ECStore, file: &str) -> Result> { + let (data, _obj) = read_config_with_metadata(api, file, &ObjectOptions::default()).await?; + + Ok(data) +} + +async fn read_config_with_metadata(api: &ECStore, file: &str, opts: &ObjectOptions) -> Result<(Vec, ObjectInfo)> { + let range = HTTPRangeSpec::nil(); + let h = HeaderMap::new(); + let mut rd = api.get_object_reader(RUSTFS_META_BUCKET, file, range, h, &opts).await?; + + let data = rd.read_all().await?; + + if data.is_empty() { + return Err(Error::new(ConfigError::NotFound)); + } + + Ok((data, rd.object_info)) +} + +pub async fn save_config(api: &ECStore, file: &str, data: &[u8]) -> Result<()> { + save_config_with_opts( + api, + file, + data, + &ObjectOptions { + max_parity: true, + ..Default::default() + }, + ) + .await +} + +async fn save_config_with_opts(api: &ECStore, file: &str, data: &[u8], opts: &ObjectOptions) -> Result<()> { + let _ = api + .put_object( + RUSTFS_META_BUCKET, + file, + PutObjReader::new(StreamingBlob::from(Body::from(data.to_vec())), data.len()), + &ObjectOptions::default(), + ) + .await?; + Ok(()) +} diff --git a/ecstore/src/config/error.rs b/ecstore/src/config/error.rs new file mode 100644 index 00000000..490b8c50 --- /dev/null +++ b/ecstore/src/config/error.rs @@ -0,0 +1,15 @@ +#[derive(Debug, thiserror::Error)] +pub enum ConfigError { + #[error("config not found")] + NotFound, +} + +impl ConfigError { + /// Returns `true` if the config error is [`NotFound`]. + /// + /// [`NotFound`]: ConfigError::NotFound + #[must_use] + pub fn is_not_found(&self) -> bool { + matches!(self, Self::NotFound) + } +} diff --git a/ecstore/src/config/mod.rs b/ecstore/src/config/mod.rs new file mode 100644 index 00000000..3c22a0c5 --- /dev/null +++ b/ecstore/src/config/mod.rs @@ -0,0 +1,2 @@ +pub mod common; +pub mod error; diff --git a/ecstore/src/lib.rs b/ecstore/src/lib.rs index 059c3f41..d5881427 100644 --- a/ecstore/src/lib.rs +++ b/ecstore/src/lib.rs @@ -1,5 +1,6 @@ pub mod bucket_meta; mod chunk_stream; +mod config; pub mod disk; pub mod disks_layout; pub mod endpoints; diff --git a/ecstore/src/sets.rs b/ecstore/src/sets.rs index 783a5dd6..f43564b6 100644 --- a/ecstore/src/sets.rs +++ b/ecstore/src/sets.rs @@ -19,8 +19,8 @@ use crate::{ store::{GLOBAL_IsDistErasure, GLOBAL_LOCAL_DISK_SET_DRIVES}, store_api::{ BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec, - ListObjectsV2Info, MakeBucketOptions, MultipartUploadResult, ObjectInfo, ObjectOptions, ObjectToDelete, PartInfo, - PutObjReader, StorageAPI, + ListObjectsV2Info, MakeBucketOptions, MultipartUploadResult, ObjectIO, ObjectInfo, ObjectOptions, ObjectToDelete, + PartInfo, PutObjReader, StorageAPI, }, utils::hash, }; @@ -258,6 +258,25 @@ struct DelObj { obj: ObjectToDelete, } +#[async_trait::async_trait] +impl ObjectIO for Sets { + async fn get_object_reader( + &self, + bucket: &str, + object: &str, + range: HTTPRangeSpec, + h: HeaderMap, + opts: &ObjectOptions, + ) -> Result { + self.get_disks_by_key(object) + .get_object_reader(bucket, object, range, h, opts) + .await + } + async fn put_object(&self, bucket: &str, object: &str, data: PutObjReader, opts: &ObjectOptions) -> Result { + self.get_disks_by_key(object).put_object(bucket, object, data, opts).await + } +} + #[async_trait::async_trait] impl StorageAPI for Sets { async fn list_bucket(&self, _opts: &BucketOptions) -> Result> { @@ -383,22 +402,6 @@ impl StorageAPI for Sets { .await } - async fn get_object_reader( - &self, - bucket: &str, - object: &str, - range: HTTPRangeSpec, - h: HeaderMap, - opts: &ObjectOptions, - ) -> Result { - self.get_disks_by_key(object) - .get_object_reader(bucket, object, range, h, opts) - .await - } - async fn put_object(&self, bucket: &str, object: &str, data: PutObjReader, opts: &ObjectOptions) -> Result { - self.get_disks_by_key(object).put_object(bucket, object, data, opts).await - } - async fn put_object_part( &self, bucket: &str, diff --git a/ecstore/src/store.rs b/ecstore/src/store.rs index 8ad6a81f..f53a8202 100644 --- a/ecstore/src/store.rs +++ b/ecstore/src/store.rs @@ -1,7 +1,8 @@ #![allow(clippy::map_entry)] use crate::disk::endpoint::EndpointType; +use crate::store_api::ObjectIO; use crate::{ - bucket::BucketMetadata, + bucket::metadata::BucketMetadata, disk::{error::DiskError, new_disk, DiskOption, DiskStore, WalkDirOptions, BUCKET_META_PREFIX, RUSTFS_META_BUCKET}, endpoints::{EndpointServerPools, SetupType}, error::{Error, Result}, @@ -494,6 +495,38 @@ pub struct ListPathOptions { pub limit: i32, } +#[async_trait::async_trait] +impl ObjectIO for ECStore { + #[tracing::instrument(level = "debug", skip(self))] + async fn get_object_reader( + &self, + bucket: &str, + object: &str, + range: HTTPRangeSpec, + h: HeaderMap, + opts: &ObjectOptions, + ) -> Result { + let object = utils::path::encode_dir_object(object); + + if self.single_pool() { + return self.pools[0].get_object_reader(bucket, object.as_str(), range, h, opts).await; + } + + unimplemented!() + } + async fn put_object(&self, bucket: &str, object: &str, data: PutObjReader, opts: &ObjectOptions) -> Result { + // checkPutObjectArgs + + let object = utils::path::encode_dir_object(object); + + if self.single_pool() { + return self.pools[0].put_object(bucket, object.as_str(), data, opts).await; + } + + unimplemented!() + } +} + #[async_trait::async_trait] impl StorageAPI for ECStore { async fn list_bucket(&self, opts: &BucketOptions) -> Result> { @@ -740,34 +773,6 @@ impl StorageAPI for ECStore { unimplemented!() } - async fn get_object_reader( - &self, - bucket: &str, - object: &str, - range: HTTPRangeSpec, - h: HeaderMap, - opts: &ObjectOptions, - ) -> Result { - let object = utils::path::encode_dir_object(object); - - if self.single_pool() { - return self.pools[0].get_object_reader(bucket, object.as_str(), range, h, opts).await; - } - - unimplemented!() - } - async fn put_object(&self, bucket: &str, object: &str, data: PutObjReader, opts: &ObjectOptions) -> Result { - // checkPutObjectArgs - - let object = utils::path::encode_dir_object(object); - - if self.single_pool() { - return self.pools[0].put_object(bucket, object.as_str(), data, opts).await; - } - - unimplemented!() - } - async fn put_object_part( &self, bucket: &str, diff --git a/ecstore/src/store_api.rs b/ecstore/src/store_api.rs index 8673ffd3..d05bfea6 100644 --- a/ecstore/src/store_api.rs +++ b/ecstore/src/store_api.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use crate::error::{Error, Result}; +use futures::StreamExt; use http::HeaderMap; use rmp_serde::Serializer; use s3s::dto::StreamingBlob; @@ -281,12 +282,26 @@ pub struct GetObjectReader { pub object_info: ObjectInfo, } -// impl GetObjectReader { -// pub fn new(stream: StreamingBlob, object_info: ObjectInfo) -> Self { -// GetObjectReader { stream, object_info } -// } -// } +impl GetObjectReader { + // pub fn new(stream: StreamingBlob, object_info: ObjectInfo) -> Self { + // GetObjectReader { stream, object_info } + // } + pub async fn read_all(&mut self) -> Result> { + let mut data = Vec::new(); + while let Some(x) = self.stream.next().await { + let buf = match x { + Ok(res) => res, + Err(e) => return Err(Error::msg(e.to_string())), + }; + data.extend_from_slice(buf.as_ref()); + } + + Ok(data) + } +} + +#[derive(Debug)] pub struct HTTPRangeSpec { pub is_suffix_length: bool, pub start: i64, @@ -510,7 +525,20 @@ pub struct DeletedObject { } #[async_trait::async_trait] -pub trait StorageAPI { +pub trait ObjectIO: Send + Sync + 'static { + async fn get_object_reader( + &self, + bucket: &str, + object: &str, + range: HTTPRangeSpec, + h: HeaderMap, + opts: &ObjectOptions, + ) -> Result; + async fn put_object(&self, bucket: &str, object: &str, data: PutObjReader, opts: &ObjectOptions) -> Result; +} + +#[async_trait::async_trait] +pub trait StorageAPI: ObjectIO { async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()>; async fn delete_bucket(&self, bucket: &str, opts: &DeleteBucketOptions) -> Result<()>; async fn list_bucket(&self, opts: &BucketOptions) -> Result>; @@ -536,15 +564,15 @@ pub trait StorageAPI { async fn put_object_info(&self, bucket: &str, object: &str, info: ObjectInfo, opts: &ObjectOptions) -> Result<()>; - async fn get_object_reader( - &self, - bucket: &str, - object: &str, - range: HTTPRangeSpec, - h: HeaderMap, - opts: &ObjectOptions, - ) -> Result; - async fn put_object(&self, bucket: &str, object: &str, data: PutObjReader, opts: &ObjectOptions) -> Result; + // async fn get_object_reader( + // &self, + // bucket: &str, + // object: &str, + // range: HTTPRangeSpec, + // h: HeaderMap, + // opts: &ObjectOptions, + // ) -> Result; + // async fn put_object(&self, bucket: &str, object: &str, data: PutObjReader, opts: &ObjectOptions) -> Result; async fn put_object_part( &self, bucket: &str, diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 3df2b6d9..2ab8aedf 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -1,5 +1,6 @@ use bytes::BufMut; use bytes::Bytes; +use ecstore::bucket::get_bucket_metadata_sys; use ecstore::bucket_meta::BucketMetadata; use ecstore::disk::error::DiskError; use ecstore::disk::RUSTFS_META_BUCKET; @@ -10,6 +11,7 @@ use ecstore::store_api::DeleteBucketOptions; use ecstore::store_api::HTTPRangeSpec; use ecstore::store_api::MakeBucketOptions; use ecstore::store_api::MultipartUploadResult; +use ecstore::store_api::ObjectIO; use ecstore::store_api::ObjectOptions; use ecstore::store_api::ObjectToDelete; use ecstore::store_api::PutObjReader; @@ -769,45 +771,62 @@ impl S3 for FS { .as_ref() .ok_or_else(|| S3Error::with_message(S3ErrorCode::InternalError, "Not init"))?; - let meta_obj = try_!( - store - .get_object_reader( - RUSTFS_META_BUCKET, - BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), - HTTPRangeSpec::nil(), - Default::default(), - &ObjectOptions::default(), - ) - .await - ); + // let a = get_bucket_metadata_sys(); - let stream = meta_obj.stream; + Ok(S3Response::new(GetBucketTaggingOutput { ..Default::default() })) - let mut data = vec![]; - pin_mut!(stream); + // let meta_obj = try_!( + // store + // .get_object_reader( + // RUSTFS_META_BUCKET, + // BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), + // HTTPRangeSpec::nil(), + // Default::default(), + // &ObjectOptions::default(), + // ) + // .await + // ); - while let Some(x) = stream.next().await { - let x = try_!(x); - data.put_slice(&x[..]); - } + // let stream = meta_obj.stream; - let meta = try_!(BucketMetadata::unmarshal_from(&data[..])); - if meta.tagging.is_none() { - return Err({ - let mut err = S3Error::with_message(S3ErrorCode::Custom("NoSuchTagSet".into()), "The TagSet does not exist"); - err.set_status_code("404".try_into().unwrap()); - err - }); - } + // let mut data = vec![]; + // pin_mut!(stream); - Ok(S3Response::new(GetBucketTaggingOutput { - tag_set: meta - .tagging - .unwrap() - .into_iter() - .map(|(key, value)| Tag { key, value }) - .collect(), - })) + // while let Some(x) = stream.next().await { + // let x = try_!(x); + // data.put_slice(&x[..]); + // } + + // if data.is_empty() { + // return Err({ + // let mut err = S3Error::with_message(S3ErrorCode::Custom("NoSuchTagSet".into()), "The TagSet does not exist"); + // err.set_status_code("404".try_into().unwrap()); + // err + // }); + // } + + // warn!("bm: 1111"); + + // let meta = try_!(BucketMetadata::unmarshal_from(&data[..])); + + // warn!("bm 33333"); + // if meta.tagging.is_none() { + // return Err({ + // let mut err = S3Error::with_message(S3ErrorCode::Custom("NoSuchTagSet".into()), "The TagSet does not exist"); + // err.set_status_code("404".try_into().unwrap()); + // err + // }); + // } + + // warn!("bm:4444"); + // Ok(S3Response::new(GetBucketTaggingOutput { + // tag_set: meta + // .tagging + // .unwrap() + // .into_iter() + // .map(|(key, value)| Tag { key, value }) + // .collect(), + // })) } #[tracing::instrument(level = "debug", skip(self))] From 25bfd1bbc4e8936ed236b32fc81aa2d548192016 Mon Sep 17 00:00:00 2001 From: weisd Date: Tue, 1 Oct 2024 23:06:09 +0800 Subject: [PATCH 02/20] init bucketmetadata_sys --- ecstore/src/bucket/error.rs | 5 + ecstore/src/bucket/metadata.rs | 76 +++++++++-- ecstore/src/bucket/metadata_sys.rs | 138 ++++++++++++++++++-- ecstore/src/bucket/mod.rs | 2 + ecstore/src/bucket/utils.rs | 5 + ecstore/src/config/error.rs | 10 ++ ecstore/src/endpoints.rs | 7 +- ecstore/src/global.rs | 62 +++++++++ ecstore/src/lib.rs | 4 + ecstore/src/sets.rs | 6 +- ecstore/src/store.rs | 198 ++++++++++++----------------- rustfs/src/main.rs | 4 +- rustfs/src/storage/ecfs.rs | 2 +- 13 files changed, 372 insertions(+), 147 deletions(-) create mode 100644 ecstore/src/bucket/error.rs create mode 100644 ecstore/src/bucket/utils.rs create mode 100644 ecstore/src/global.rs diff --git a/ecstore/src/bucket/error.rs b/ecstore/src/bucket/error.rs new file mode 100644 index 00000000..781f7f11 --- /dev/null +++ b/ecstore/src/bucket/error.rs @@ -0,0 +1,5 @@ +#[derive(Debug, thiserror::Error)] +pub enum BucketMetadataError { + #[error("tagging not found")] + TaggingNotFound, +} diff --git a/ecstore/src/bucket/metadata.rs b/ecstore/src/bucket/metadata.rs index 251ff304..6d53e1b4 100644 --- a/ecstore/src/bucket/metadata.rs +++ b/ecstore/src/bucket/metadata.rs @@ -13,18 +13,30 @@ use time::OffsetDateTime; use tracing::error; use crate::bucket::tags; +use crate::config; use crate::config::common::{read_config, save_config}; -use crate::config::error::ConfigError; use crate::error::{Error, Result}; use crate::disk::BUCKET_META_PREFIX; use crate::store::ECStore; -use crate::store_api::StorageAPI; + +type TypeConfigFile = &'static str; pub const BUCKET_METADATA_FILE: &str = ".metadata.bin"; pub const BUCKET_METADATA_FORMAT: u16 = 1; pub const BUCKET_METADATA_VERSION: u16 = 1; +pub const BUCKET_POLICY_CONFIG: &str = "policy.json"; +pub const BUCKET_NOTIFICATION_CONFIG: &str = "notification.xml"; +pub const BUCKET_LIFECYCLE_CONFIG: &str = "lifecycle.xml"; +pub const BUCKET_SSECONFIG: &str = "bucket-encryption.xml"; +pub const BUCKET_TAGGING_CONFIG: &str = "tagging.xml"; +pub const BUCKET_QUOTA_CONFIG_FILE: &str = "quota.json"; +pub const OBJECT_LOCK_CONFIG: &str = "object-lock.xml"; +pub const BUCKET_VERSIONING_CONFIG: &str = "versioning.xml"; +pub const BUCKET_REPLICATION_CONFIG: &str = "replication.xml"; +pub const BUCKET_TARGETS_FILE: &str = "bucket-targets.json"; + #[derive(Debug, Deserialize, Serialize, Clone)] #[serde(rename_all = "PascalCase", default)] pub struct BucketMetadata { @@ -170,7 +182,57 @@ impl BucketMetadata { } } - async fn save(&mut self, api: &ECStore) -> Result<()> { + pub fn update_config(&mut self, config_file: &str, data: Vec) -> Result { + let updated = OffsetDateTime::now_utc(); + + match config_file { + BUCKET_POLICY_CONFIG => { + self.policy_config_json = data; + self.policy_config_updated_at = updated; + } + BUCKET_NOTIFICATION_CONFIG => { + self.notification_config_xml = data; + self.notification_config_updated_at = updated; + } + BUCKET_LIFECYCLE_CONFIG => { + self.lifecycle_config_xml = data; + self.lifecycle_config_updated_at = updated; + } + BUCKET_SSECONFIG => { + self.encryption_config_xml = data; + self.encryption_config_updated_at = updated; + } + BUCKET_TAGGING_CONFIG => { + self.tagging_config_xml = data; + self.tagging_config_updated_at = updated; + } + BUCKET_QUOTA_CONFIG_FILE => { + self.quota_config_json = data; + self.quota_config_updated_at = updated; + } + OBJECT_LOCK_CONFIG => { + self.object_lock_config_xml = data; + self.object_lock_config_updated_at = updated; + } + BUCKET_VERSIONING_CONFIG => { + self.versioning_config_xml = data; + self.versioning_config_updated_at = updated; + } + BUCKET_REPLICATION_CONFIG => { + self.replication_config_xml = data; + self.replication_config_updated_at = updated; + } + BUCKET_TARGETS_FILE => { + self.tagging_config_xml = data; + self.tagging_config_updated_at = updated; + } + _ => return Err(Error::msg(format!("config file not found : {}", config_file))), + } + + Ok(updated) + } + + pub async fn save(&mut self, api: &ECStore) -> Result<()> { self.parse_all_configs(api)?; let mut buf: Vec = vec![0; 4]; @@ -201,14 +263,12 @@ pub async fn load_bucket_metadata(api: &ECStore, bucket: &str) -> Result Result { +pub async fn load_bucket_metadata_parse(api: &ECStore, bucket: &str, parse: bool) -> Result { let mut bm = match read_bucket_metadata(api, bucket).await { Ok(res) => res, Err(err) => { - if let Some(e) = err.downcast_ref::() { - if !ConfigError::is_not_found(&e) { - return Err(err); - } + if !config::error::is_not_found(&err) { + return Err(err); } BucketMetadata::new(bucket) diff --git a/ecstore/src/bucket/metadata_sys.rs b/ecstore/src/bucket/metadata_sys.rs index 92d24a4e..a43eda5d 100644 --- a/ecstore/src/bucket/metadata_sys.rs +++ b/ecstore/src/bucket/metadata_sys.rs @@ -1,7 +1,15 @@ use std::{collections::HashMap, sync::Arc}; +use crate::bucket::error::BucketMetadataError; +use crate::bucket::metadata::load_bucket_metadata_parse; +use crate::bucket::utils::is_meta_bucketname; +use crate::config; +use crate::config::error::ConfigError; +use crate::disk::error::DiskError; use crate::error::{Error, Result}; +use crate::global::{is_dist_erasure, is_erasure, new_object_layer_fn}; use crate::store::ECStore; +use futures::future::join_all; use lazy_static::lazy_static; use time::OffsetDateTime; use tokio::sync::RwLock; @@ -10,41 +18,132 @@ use super::metadata::{load_bucket_metadata, BucketMetadata}; use super::tags; lazy_static! { - pub static ref GLOBAL_BucketMetadataSys: Arc> = Arc::new(Some(BucketMetadataSys::new())); + static ref GLOBAL_BucketMetadataSys: Arc = Arc::new(BucketMetadataSys::new()); } -pub fn get_bucket_metadata_sys() -> Arc> { +pub fn get_bucket_metadata_sys() -> Arc { GLOBAL_BucketMetadataSys.clone() } #[derive(Debug, Default)] pub struct BucketMetadataSys { metadata_map: RwLock>, - api: Option>, + api: Option, initialized: RwLock, } impl BucketMetadataSys { fn new() -> Self { - Self { ..Default::default() } + Self::default() } - pub fn init(&mut self, api: Arc, buckets: Vec) { - self.api = Some(api); + pub async fn init(&mut self, api: Option, buckets: Vec<&str>) -> Result<()> { + if api.is_none() { + return Err(Error::msg("errServerNotInitialized")); + } + self.api = api; + let _ = self.init_internal(buckets).await; + + Ok(()) + } + async fn init_internal(&self, buckets: Vec<&str>) -> Result<()> { + if self.api.is_none() { + return Err(Error::msg("errServerNotInitialized")); + } + let mut futures = Vec::new(); + let mut errs = Vec::new(); + let mut ress = Vec::new(); + + for &bucket in buckets.iter() { + futures.push(load_bucket_metadata(self.api.as_ref().unwrap(), bucket)); + } + + let results = join_all(futures).await; + + for res in results { + match res { + Ok(entrys) => { + ress.push(Some(entrys)); + errs.push(None); + } + Err(e) => { + ress.push(None); + errs.push(Some(e)); + } + } + } unimplemented!() } + async fn concurrent_load(&self, buckets: Vec<&str>) -> Result> { + unimplemented!() + } + + pub async fn get(&self, bucket: &str) -> Result { + if is_meta_bucketname(bucket) { + return Err(Error::new(ConfigError::NotFound)); + } + + let map = self.metadata_map.read().await; + if let Some(bm) = map.get(bucket) { + Ok(bm.clone()) + } else { + Err(Error::new(ConfigError::NotFound)) + } + } + + pub async fn set(&self, bucket: &str, bm: BucketMetadata) { + if !is_meta_bucketname(bucket) { + let mut map = self.metadata_map.write().await; + map.insert(bucket.to_string(), bm); + } + } + async fn reset(&mut self) { let mut map = self.metadata_map.write().await; map.clear(); } - pub async fn get_config(&self, bucket: String) -> Result<(BucketMetadata, bool)> { + pub async fn update(&mut self, bucket: &str, config_file: &str, data: Vec) -> Result<(OffsetDateTime)> { + self.update_and_parse(bucket, config_file, data, true).await + } + + async fn update_and_parse(&mut self, bucket: &str, config_file: &str, data: Vec, parse: bool) -> Result { + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(Error::msg("errServerNotInitialized")), + }; + + if is_meta_bucketname(&bucket) { + return Err(Error::msg("errInvalidArgument")); + } + + let mut bm = match load_bucket_metadata_parse(store, &bucket, parse).await { + Ok(res) => res, + Err(err) => { + if !is_erasure().await && !is_dist_erasure().await && DiskError::VolumeNotFound.is(&err) { + BucketMetadata::new(&bucket) + } else { + return Err(err); + } + } + }; + + let updated = bm.update_config(config_file, data)?; + + bm.save(store).await?; + + Ok(updated) + } + + pub async fn get_config(&self, bucket: &str) -> Result<(BucketMetadata, bool)> { if let Some(api) = self.api.as_ref() { let has_bm = { let map = self.metadata_map.read().await; - if let Some(bm) = map.get(&bucket) { + if let Some(bm) = map.get(&bucket.to_string()) { Some(bm.clone()) } else { None @@ -54,7 +153,7 @@ impl BucketMetadataSys { if let Some(bm) = has_bm { return Ok((bm, false)); } else { - let bm = match load_bucket_metadata(&api, bucket.as_str()).await { + let bm = match load_bucket_metadata(&api, bucket).await { Ok(res) => res, Err(err) => { if *self.initialized.read().await { @@ -67,7 +166,7 @@ impl BucketMetadataSys { let mut map = self.metadata_map.write().await; - map.insert(bucket, bm.clone()); + map.insert(bucket.to_string(), bm.clone()); Ok((bm, true)) } @@ -76,7 +175,22 @@ impl BucketMetadataSys { } } - pub async fn get_tagging_config(&self, bucket: String) -> Result<(tags::Tags, Option)> { - unimplemented!() + pub async fn get_tagging_config(&self, bucket: &str) -> Result<(tags::Tags, OffsetDateTime)> { + let bm = match self.get_config(bucket).await { + Ok((res, _)) => res, + Err(err) => { + if config::error::is_not_found(&err) { + return Err(Error::new(BucketMetadataError::TaggingNotFound)); + } else { + return Err(err); + } + } + }; + + if let Some(config) = bm.tagging_config { + Ok((config, bm.tagging_config_updated_at)) + } else { + Err(Error::new(BucketMetadataError::TaggingNotFound)) + } } } diff --git a/ecstore/src/bucket/mod.rs b/ecstore/src/bucket/mod.rs index 48d3c5f5..b2df60cc 100644 --- a/ecstore/src/bucket/mod.rs +++ b/ecstore/src/bucket/mod.rs @@ -1,4 +1,5 @@ mod encryption; +mod error; mod event; mod lifecycle; pub mod metadata; @@ -10,5 +11,6 @@ mod replication; mod tags; mod target; mod versioning; +pub mod utils; pub use metadata_sys::get_bucket_metadata_sys; diff --git a/ecstore/src/bucket/utils.rs b/ecstore/src/bucket/utils.rs new file mode 100644 index 00000000..a8cb97b6 --- /dev/null +++ b/ecstore/src/bucket/utils.rs @@ -0,0 +1,5 @@ +use crate::disk::RUSTFS_META_BUCKET; + +pub fn is_meta_bucketname(name: &str) -> bool { + name.starts_with(RUSTFS_META_BUCKET) +} diff --git a/ecstore/src/config/error.rs b/ecstore/src/config/error.rs index 490b8c50..1628f31d 100644 --- a/ecstore/src/config/error.rs +++ b/ecstore/src/config/error.rs @@ -1,3 +1,5 @@ +use crate::error::Error; + #[derive(Debug, thiserror::Error)] pub enum ConfigError { #[error("config not found")] @@ -13,3 +15,11 @@ impl ConfigError { matches!(self, Self::NotFound) } } + +pub fn is_not_found(err: &Error) -> bool { + if let Some(e) = err.downcast_ref::() { + ConfigError::is_not_found(&e) + } else { + false + } +} diff --git a/ecstore/src/endpoints.rs b/ecstore/src/endpoints.rs index 9af5b963..ad533140 100644 --- a/ecstore/src/endpoints.rs +++ b/ecstore/src/endpoints.rs @@ -10,7 +10,7 @@ use std::{ }; /// enum for setup type. -#[derive(PartialEq, Eq, Debug)] +#[derive(PartialEq, Eq, Debug, Clone)] pub enum SetupType { /// starts with unknown setup type. Unknown, @@ -111,6 +111,7 @@ impl Endpoints { } } +#[derive(Debug)] /// a temporary type to holds the list of endpoints struct PoolEndpointList { inner: Vec, @@ -439,6 +440,10 @@ impl EndpointServerPools { Ok((ret, pool_eps.setup_type)) } + pub fn es_count(&self) -> usize { + self.0.iter().map(|v| v.set_count).count() + } + /// add pool endpoints pub fn add(&mut self, eps: PoolEndpoints) -> Result<()> { let mut exits = HashSet::new(); diff --git a/ecstore/src/global.rs b/ecstore/src/global.rs new file mode 100644 index 00000000..c7ac7b00 --- /dev/null +++ b/ecstore/src/global.rs @@ -0,0 +1,62 @@ +use crate::error::Result; +use lazy_static::lazy_static; +use std::{collections::HashMap, sync::Arc}; +use tokio::{fs, sync::RwLock}; + +use crate::{ + disk::{new_disk, DiskOption, DiskStore}, + endpoints::{EndpointServerPools, SetupType}, + store::ECStore, +}; + +lazy_static! { + pub static ref GLOBAL_OBJECT_API: Arc>> = Arc::new(RwLock::new(None)); + pub static ref GLOBAL_LOCAL_DISK: Arc>>> = Arc::new(RwLock::new(Vec::new())); +} + +pub fn new_object_layer_fn() -> Arc>> { + GLOBAL_OBJECT_API.clone() +} + +pub async fn set_object_layer(o: ECStore) { + let mut global_object_api = GLOBAL_OBJECT_API.write().await; + *global_object_api = Some(o); +} + +lazy_static! { + static ref GLOBAL_IsErasure: RwLock = RwLock::new(false); + static ref GLOBAL_IsDistErasure: RwLock = RwLock::new(false); + static ref GLOBAL_IsErasureSD: RwLock = RwLock::new(false); +} + +pub async fn is_dist_erasure() -> bool { + let lock = GLOBAL_IsDistErasure.read().await; + *lock == true +} + +pub async fn is_erasure() -> bool { + let lock = GLOBAL_IsErasure.read().await; + *lock == true +} + +pub async fn update_erasure_type(setup_type: SetupType) { + let mut is_erasure = GLOBAL_IsErasure.write().await; + *is_erasure = setup_type == SetupType::Erasure; + + let mut is_dist_erasure = GLOBAL_IsDistErasure.write().await; + *is_dist_erasure = setup_type == SetupType::DistErasure; + + if *is_dist_erasure { + *is_erasure = true + } + + let mut is_erasure_sd = GLOBAL_IsErasureSD.write().await; + *is_erasure_sd = setup_type == SetupType::ErasureSD; +} + +type TypeLocalDiskSetDrives = Vec>>>; + +lazy_static! { + pub static ref GLOBAL_LOCAL_DISK_MAP: Arc>>> = Arc::new(RwLock::new(HashMap::new())); + pub static ref GLOBAL_LOCAL_DISK_SET_DRIVES: Arc> = Arc::new(RwLock::new(Vec::new())); +} diff --git a/ecstore/src/lib.rs b/ecstore/src/lib.rs index d5881427..4210db4c 100644 --- a/ecstore/src/lib.rs +++ b/ecstore/src/lib.rs @@ -7,6 +7,7 @@ pub mod endpoints; pub mod erasure; pub mod error; mod file_meta; +mod global; pub mod peer; mod quorum; pub mod set_disk; @@ -18,3 +19,6 @@ mod store_init; mod utils; pub mod bucket; + +pub use global::new_object_layer_fn; +pub use global::update_erasure_type; diff --git a/ecstore/src/sets.rs b/ecstore/src/sets.rs index f43564b6..e3236fe0 100644 --- a/ecstore/src/sets.rs +++ b/ecstore/src/sets.rs @@ -15,8 +15,8 @@ use crate::{ }, endpoints::PoolEndpoints, error::{Error, Result}, + global::{is_dist_erasure, GLOBAL_LOCAL_DISK_SET_DRIVES}, set_disk::SetDisks, - store::{GLOBAL_IsDistErasure, GLOBAL_LOCAL_DISK_SET_DRIVES}, store_api::{ BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec, ListObjectsV2Info, MakeBucketOptions, MultipartUploadResult, ObjectIO, ObjectInfo, ObjectOptions, ObjectToDelete, @@ -94,7 +94,7 @@ impl Sets { continue; } - if disk.as_ref().unwrap().is_local() && *GLOBAL_IsDistErasure.read().await { + if disk.as_ref().unwrap().is_local() && is_dist_erasure().await { let local_disk = { let local_set_drives = GLOBAL_LOCAL_DISK_SET_DRIVES.read().await; local_set_drives[pool_idx][i][j].clone() @@ -124,7 +124,7 @@ impl Sets { let set_disks = SetDisks { lockers: lockers[i].clone(), locker_owner: GLOBAL_Local_Node_Name.read().await.to_string(), - ns_mutex: Arc::new(RwLock::new(NsLockMap::new(*GLOBAL_IsDistErasure.read().await))), + ns_mutex: Arc::new(RwLock::new(NsLockMap::new(is_dist_erasure().await))), disks: RwLock::new(set_drive), set_drive_count, default_parity_count: partiy_count, diff --git a/ecstore/src/store.rs b/ecstore/src/store.rs index f53a8202..0e563ada 100644 --- a/ecstore/src/store.rs +++ b/ecstore/src/store.rs @@ -1,5 +1,6 @@ #![allow(clippy::map_entry)] use crate::disk::endpoint::EndpointType; +use crate::global::{is_dist_erasure, set_object_layer, GLOBAL_LOCAL_DISK_MAP, GLOBAL_LOCAL_DISK_SET_DRIVES}; use crate::store_api::ObjectIO; use crate::{ bucket::metadata::BucketMetadata, @@ -27,129 +28,12 @@ use std::{ time::Duration, }; use time::OffsetDateTime; +use tokio::fs; use tokio::sync::Semaphore; -use tokio::{fs, sync::RwLock}; + use tracing::{debug, info}; use uuid::Uuid; -use lazy_static::lazy_static; - -lazy_static! { - pub static ref GLOBAL_IsErasure: RwLock = RwLock::new(false); - pub static ref GLOBAL_IsDistErasure: RwLock = RwLock::new(false); - pub static ref GLOBAL_IsErasureSD: RwLock = RwLock::new(false); -} - -pub async fn update_erasure_type(setup_type: SetupType) { - let mut is_erasure = GLOBAL_IsErasure.write().await; - *is_erasure = setup_type == SetupType::Erasure; - - let mut is_dist_erasure = GLOBAL_IsDistErasure.write().await; - *is_dist_erasure = setup_type == SetupType::DistErasure; - - if *is_dist_erasure { - *is_erasure = true - } - - let mut is_erasure_sd = GLOBAL_IsErasureSD.write().await; - *is_erasure_sd = setup_type == SetupType::ErasureSD; -} - -type TypeLocalDiskSetDrives = Vec>>>; - -lazy_static! { - pub static ref GLOBAL_LOCAL_DISK_MAP: Arc>>> = Arc::new(RwLock::new(HashMap::new())); - pub static ref GLOBAL_LOCAL_DISK_SET_DRIVES: Arc> = Arc::new(RwLock::new(Vec::new())); -} - -pub async fn find_local_disk(disk_path: &String) -> Option { - let disk_path = match fs::canonicalize(disk_path).await { - Ok(disk_path) => disk_path, - Err(_) => return None, - }; - - let disk_map = GLOBAL_LOCAL_DISK_MAP.read().await; - - let path = disk_path.to_string_lossy().to_string(); - if disk_map.contains_key(&path) { - let a = disk_map[&path].as_ref().cloned(); - - return a; - } - None -} - -pub async fn all_local_disk_path() -> Vec { - let disk_map = GLOBAL_LOCAL_DISK_MAP.read().await; - disk_map.keys().cloned().collect() -} - -pub async fn all_local_disk() -> Vec { - let disk_map = GLOBAL_LOCAL_DISK_MAP.read().await; - disk_map - .values() - .filter(|v| v.is_some()) - .map(|v| v.as_ref().unwrap().clone()) - .collect() -} - -// init_local_disks 初始化本地磁盘,server启动前必须初始化成功 -pub async fn init_local_disks(endpoint_pools: EndpointServerPools) -> Result<()> { - let opt = &DiskOption { - cleanup: true, - health_check: true, - }; - - let mut global_set_drives = GLOBAL_LOCAL_DISK_SET_DRIVES.write().await; - for pool_eps in endpoint_pools.as_ref().iter() { - let mut set_count_drives = Vec::with_capacity(pool_eps.set_count); - for _ in 0..pool_eps.set_count { - set_count_drives.push(vec![None; pool_eps.drives_per_set]); - } - - global_set_drives.push(set_count_drives); - } - - let mut global_local_disk_map = GLOBAL_LOCAL_DISK_MAP.write().await; - - for pool_eps in endpoint_pools.as_ref().iter() { - let mut set_drives = HashMap::new(); - for ep in pool_eps.endpoints.as_ref().iter() { - if !ep.is_local { - continue; - } - - let disk = new_disk(ep, opt).await?; - - let path = disk.path().to_string_lossy().to_string(); - - global_local_disk_map.insert(path, Some(disk.clone())); - - set_drives.insert(ep.disk_idx, Some(disk.clone())); - - if ep.pool_idx.is_some() && ep.set_idx.is_some() && ep.disk_idx.is_some() { - global_set_drives[ep.pool_idx.unwrap()][ep.set_idx.unwrap()][ep.disk_idx.unwrap()] = Some(disk.clone()); - } - } - } - - Ok(()) -} - -lazy_static! { - pub static ref GLOBAL_OBJECT_API: Arc>> = Arc::new(RwLock::new(None)); - pub static ref GLOBAL_LOCAL_DISK: Arc>>> = Arc::new(RwLock::new(Vec::new())); -} - -pub fn new_object_layer_fn() -> Arc>> { - GLOBAL_OBJECT_API.clone() -} - -async fn set_object_layer(o: ECStore) { - let mut global_object_api = GLOBAL_OBJECT_API.write().await; - *global_object_api = Some(o); -} - #[derive(Debug)] pub struct ECStore { pub id: uuid::Uuid, @@ -244,7 +128,7 @@ impl ECStore { } // 替换本地磁盘 - if !*GLOBAL_IsDistErasure.read().await { + if !is_dist_erasure().await { let mut global_local_disk_map = GLOBAL_LOCAL_DISK_MAP.write().await; for disk in local_disks { let path = disk.path().to_string_lossy().to_string(); @@ -403,6 +287,80 @@ impl ECStore { } } +pub async fn find_local_disk(disk_path: &String) -> Option { + let disk_path = match fs::canonicalize(disk_path).await { + Ok(disk_path) => disk_path, + Err(_) => return None, + }; + + let disk_map = GLOBAL_LOCAL_DISK_MAP.read().await; + + let path = disk_path.to_string_lossy().to_string(); + if disk_map.contains_key(&path) { + let a = disk_map[&path].as_ref().cloned(); + + return a; + } + None +} + +pub async fn all_local_disk_path() -> Vec { + let disk_map = GLOBAL_LOCAL_DISK_MAP.read().await; + disk_map.keys().cloned().collect() +} + +pub async fn all_local_disk() -> Vec { + let disk_map = GLOBAL_LOCAL_DISK_MAP.read().await; + disk_map + .values() + .filter(|v| v.is_some()) + .map(|v| v.as_ref().unwrap().clone()) + .collect() +} + +// init_local_disks 初始化本地磁盘,server启动前必须初始化成功 +pub async fn init_local_disks(endpoint_pools: EndpointServerPools) -> Result<()> { + let opt = &DiskOption { + cleanup: true, + health_check: true, + }; + + let mut global_set_drives = GLOBAL_LOCAL_DISK_SET_DRIVES.write().await; + for pool_eps in endpoint_pools.as_ref().iter() { + let mut set_count_drives = Vec::with_capacity(pool_eps.set_count); + for _ in 0..pool_eps.set_count { + set_count_drives.push(vec![None; pool_eps.drives_per_set]); + } + + global_set_drives.push(set_count_drives); + } + + let mut global_local_disk_map = GLOBAL_LOCAL_DISK_MAP.write().await; + + for pool_eps in endpoint_pools.as_ref().iter() { + let mut set_drives = HashMap::new(); + for ep in pool_eps.endpoints.as_ref().iter() { + if !ep.is_local { + continue; + } + + let disk = new_disk(ep, opt).await?; + + let path = disk.path().to_string_lossy().to_string(); + + global_local_disk_map.insert(path, Some(disk.clone())); + + set_drives.insert(ep.disk_idx, Some(disk.clone())); + + if ep.pool_idx.is_some() && ep.set_idx.is_some() && ep.disk_idx.is_some() { + global_set_drives[ep.pool_idx.unwrap()][ep.set_idx.unwrap()][ep.disk_idx.unwrap()] = Some(disk.clone()); + } + } + } + + Ok(()) +} + async fn internal_get_pool_info_existing_with_opts( pools: &[Arc], bucket: &str, diff --git a/rustfs/src/main.rs b/rustfs/src/main.rs index 0c32c263..802832a8 100644 --- a/rustfs/src/main.rs +++ b/rustfs/src/main.rs @@ -7,7 +7,8 @@ use clap::Parser; use common::error::{Error, Result}; use ecstore::{ endpoints::EndpointServerPools, - store::{init_local_disks, update_erasure_type, ECStore}, + store::{init_local_disks, ECStore}, + update_erasure_type, }; use grpc::make_server; use hyper_util::{ @@ -91,7 +92,6 @@ async fn run(opt: config::Opt) -> Result<()> { .map_err(|err| Error::from_string(err.to_string()))?; update_erasure_type(setup_type).await; - // 初始化本地磁盘 init_local_disks(endpoint_pools.clone()) .await diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 2ab8aedf..2c9a0325 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -4,7 +4,7 @@ use ecstore::bucket::get_bucket_metadata_sys; use ecstore::bucket_meta::BucketMetadata; use ecstore::disk::error::DiskError; use ecstore::disk::RUSTFS_META_BUCKET; -use ecstore::store::new_object_layer_fn; +use ecstore::new_object_layer_fn; use ecstore::store_api::BucketOptions; use ecstore::store_api::CompletePart; use ecstore::store_api::DeleteBucketOptions; From 2f046b00f70476e2eee019c622a6418970d5aa72 Mon Sep 17 00:00:00 2001 From: weisd Date: Fri, 4 Oct 2024 23:21:58 +0800 Subject: [PATCH 03/20] init bucketmetadata_sys --- ecstore/src/bucket/metadata.rs | 23 ++++++-- ecstore/src/bucket/metadata_sys.rs | 87 +++++++++++++++++++++--------- ecstore/src/bucket/mod.rs | 4 +- ecstore/src/config/common.rs | 2 +- ecstore/src/endpoints.rs | 5 +- ecstore/src/global.rs | 29 +++++----- ecstore/src/lib.rs | 1 + ecstore/src/peer.rs | 2 +- ecstore/src/store.rs | 43 +++++++-------- ecstore/src/store_api.rs | 6 ++- rustfs/src/main.rs | 20 ++++++- rustfs/src/storage/ecfs.rs | 6 +-- 12 files changed, 153 insertions(+), 75 deletions(-) diff --git a/ecstore/src/bucket/metadata.rs b/ecstore/src/bucket/metadata.rs index 6d53e1b4..8f349a98 100644 --- a/ecstore/src/bucket/metadata.rs +++ b/ecstore/src/bucket/metadata.rs @@ -2,6 +2,7 @@ use super::{ encryption::BucketSSEConfig, event, lifecycle::lifecycle::Lifecycle, objectlock, policy::bucket_policy::BucketPolicy, quota::BucketQuota, replication, tags::Tags, target::BucketTargets, versioning::Versioning, }; + use byteorder::{BigEndian, ByteOrder, LittleEndian}; use rmp_serde::Serializer as rmpSerializer; use serde::Serializer; @@ -10,7 +11,7 @@ use std::collections::HashMap; use std::fmt::Display; use std::str::FromStr; use time::OffsetDateTime; -use tracing::error; +use tracing::{error, warn}; use crate::bucket::tags; use crate::config; @@ -172,7 +173,19 @@ impl BucketMetadata { return Err(Error::msg("read_bucket_metadata: data invalid")); } - // TODO: check version + let format = LittleEndian::read_u16(&buf[0..2]); + let version = LittleEndian::read_u16(&buf[2..4]); + + match format { + BUCKET_METADATA_FORMAT => {} + _ => return Err(Error::msg("read_bucket_metadata: format invalid")), + } + + match version { + BUCKET_METADATA_VERSION => {} + _ => return Err(Error::msg("read_bucket_metadata: version invalid")), + } + Ok(()) } @@ -252,6 +265,7 @@ impl BucketMetadata { fn parse_all_configs(&mut self, _api: &ECStore) -> Result<()> { if !self.tagging_config_xml.is_empty() { + warn!("self.tagging_config_xml {:?}", &self.tagging_config_xml); self.tagging_config = Some(tags::Tags::unmarshal(&self.tagging_config_xml)?); } @@ -267,6 +281,7 @@ pub async fn load_bucket_metadata_parse(api: &ECStore, bucket: &str, parse: bool let mut bm = match read_bucket_metadata(api, bucket).await { Ok(res) => res, Err(err) => { + warn!("load_bucket_metadata_parse err {:?}", &err); if !config::error::is_not_found(&err) { return Err(err); } @@ -299,7 +314,9 @@ async fn read_bucket_metadata(api: &ECStore, bucket: &str) -> Result(deserializer: D) -> core::result::Result diff --git a/ecstore/src/bucket/metadata_sys.rs b/ecstore/src/bucket/metadata_sys.rs index a43eda5d..05a60f8f 100644 --- a/ecstore/src/bucket/metadata_sys.rs +++ b/ecstore/src/bucket/metadata_sys.rs @@ -1,3 +1,4 @@ +use std::collections::HashSet; use std::{collections::HashMap, sync::Arc}; use crate::bucket::error::BucketMetadataError; @@ -7,21 +8,26 @@ use crate::config; use crate::config::error::ConfigError; use crate::disk::error::DiskError; use crate::error::{Error, Result}; -use crate::global::{is_dist_erasure, is_erasure, new_object_layer_fn}; +use crate::global::{is_dist_erasure, is_erasure, new_object_layer_fn, GLOBAL_Endpoints}; use crate::store::ECStore; use futures::future::join_all; use lazy_static::lazy_static; use time::OffsetDateTime; use tokio::sync::RwLock; +use tracing::{error, info, warn}; use super::metadata::{load_bucket_metadata, BucketMetadata}; use super::tags; lazy_static! { - static ref GLOBAL_BucketMetadataSys: Arc = Arc::new(BucketMetadataSys::new()); + static ref GLOBAL_BucketMetadataSys: Arc> = Arc::new(RwLock::new(BucketMetadataSys::new())); } -pub fn get_bucket_metadata_sys() -> Arc { +pub async fn init_bucket_metadata_sys(api: ECStore, buckets: Vec) { + let mut sys = GLOBAL_BucketMetadataSys.write().await; + sys.init(api, buckets).await +} +pub async fn get_bucket_metadata_sys() -> Arc> { GLOBAL_BucketMetadataSys.clone() } @@ -37,46 +43,77 @@ impl BucketMetadataSys { Self::default() } - pub async fn init(&mut self, api: Option, buckets: Vec<&str>) -> Result<()> { - if api.is_none() { - return Err(Error::msg("errServerNotInitialized")); - } - self.api = api; + pub async fn init(&mut self, api: ECStore, buckets: Vec) { + // if api.is_none() { + // return Err(Error::msg("errServerNotInitialized")); + // } + self.api = Some(api); let _ = self.init_internal(buckets).await; + } + async fn init_internal(&self, buckets: Vec) -> Result<()> { + let count = { + let endpoints = GLOBAL_Endpoints.read().await; + endpoints.es_count() * 10 + }; + + let mut failed_buckets: HashSet = HashSet::new(); + let mut buckets = buckets.as_slice(); + + loop { + if buckets.len() < count { + self.concurrent_load(buckets, &mut failed_buckets).await; + break; + } + + self.concurrent_load(&buckets[..count], &mut failed_buckets).await; + + buckets = &buckets[count..] + } + + let mut initialized = self.initialized.write().await; + *initialized = true; + + if is_dist_erasure().await { + // TODO: refresh_buckets_metadata_loop + } Ok(()) } - async fn init_internal(&self, buckets: Vec<&str>) -> Result<()> { - if self.api.is_none() { - return Err(Error::msg("errServerNotInitialized")); - } - let mut futures = Vec::new(); - let mut errs = Vec::new(); - let mut ress = Vec::new(); - for &bucket in buckets.iter() { - futures.push(load_bucket_metadata(self.api.as_ref().unwrap(), bucket)); + async fn concurrent_load(&self, buckets: &[String], failed_buckets: &mut HashSet) { + let mut futures = Vec::new(); + + for bucket in buckets.iter() { + futures.push(load_bucket_metadata(self.api.as_ref().unwrap(), bucket.as_str())); } let results = join_all(futures).await; + let mut idx = 0; + + let mut mp = self.metadata_map.write().await; + for res in results { match res { - Ok(entrys) => { - ress.push(Some(entrys)); - errs.push(None); + Ok(res) => { + if let Some(bucket) = buckets.get(idx) { + mp.insert(bucket.clone(), res); + } } Err(e) => { - ress.push(None); - errs.push(Some(e)); + error!("Unable to load bucket metadata, will be retried: {:?}", e); + if let Some(bucket) = buckets.get(idx) { + failed_buckets.insert(bucket.clone()); + } } } + + idx += 1; } - unimplemented!() } - async fn concurrent_load(&self, buckets: Vec<&str>) -> Result> { + async fn refresh_buckets_metadata_loop(&self, failed_buckets: &HashSet) -> Result<()> { unimplemented!() } @@ -105,7 +142,7 @@ impl BucketMetadataSys { map.clear(); } - pub async fn update(&mut self, bucket: &str, config_file: &str, data: Vec) -> Result<(OffsetDateTime)> { + pub async fn update(&mut self, bucket: &str, config_file: &str, data: Vec) -> Result { self.update_and_parse(bucket, config_file, data, true).await } diff --git a/ecstore/src/bucket/mod.rs b/ecstore/src/bucket/mod.rs index b2df60cc..9be46494 100644 --- a/ecstore/src/bucket/mod.rs +++ b/ecstore/src/bucket/mod.rs @@ -10,7 +10,7 @@ mod quota; mod replication; mod tags; mod target; -mod versioning; pub mod utils; +mod versioning; -pub use metadata_sys::get_bucket_metadata_sys; +pub use metadata_sys::{get_bucket_metadata_sys, init_bucket_metadata_sys}; diff --git a/ecstore/src/config/common.rs b/ecstore/src/config/common.rs index e7132ed6..6dedfbed 100644 --- a/ecstore/src/config/common.rs +++ b/ecstore/src/config/common.rs @@ -47,7 +47,7 @@ async fn save_config_with_opts(api: &ECStore, file: &str, data: &[u8], opts: &Ob RUSTFS_META_BUCKET, file, PutObjReader::new(StreamingBlob::from(Body::from(data.to_vec())), data.len()), - &ObjectOptions::default(), + opts, ) .await?; Ok(()) diff --git a/ecstore/src/endpoints.rs b/ecstore/src/endpoints.rs index ad533140..14547bbf 100644 --- a/ecstore/src/endpoints.rs +++ b/ecstore/src/endpoints.rs @@ -388,7 +388,7 @@ pub struct PoolEndpoints { /// list of list of endpoints #[derive(Debug, Clone)] -pub struct EndpointServerPools(Vec); +pub struct EndpointServerPools(pub Vec); impl From> for EndpointServerPools { fn from(v: Vec) -> Self { @@ -409,6 +409,9 @@ impl AsMut> for EndpointServerPools { } impl EndpointServerPools { + pub fn reset(&mut self, eps: Vec) { + self.0 = eps; + } pub fn from_volumes(server_addr: &str, endpoints: Vec) -> Result<(EndpointServerPools, SetupType)> { let layouts = DisksLayout::try_from(endpoints.as_slice())?; diff --git a/ecstore/src/global.rs b/ecstore/src/global.rs index c7ac7b00..1b24924c 100644 --- a/ecstore/src/global.rs +++ b/ecstore/src/global.rs @@ -1,17 +1,27 @@ -use crate::error::Result; use lazy_static::lazy_static; use std::{collections::HashMap, sync::Arc}; -use tokio::{fs, sync::RwLock}; +use tokio::sync::RwLock; use crate::{ - disk::{new_disk, DiskOption, DiskStore}, - endpoints::{EndpointServerPools, SetupType}, + disk::DiskStore, + endpoints::{EndpointServerPools, PoolEndpoints, SetupType}, store::ECStore, }; lazy_static! { pub static ref GLOBAL_OBJECT_API: Arc>> = Arc::new(RwLock::new(None)); pub static ref GLOBAL_LOCAL_DISK: Arc>>> = Arc::new(RwLock::new(Vec::new())); + static ref GLOBAL_IsErasure: RwLock = RwLock::new(false); + static ref GLOBAL_IsDistErasure: RwLock = RwLock::new(false); + static ref GLOBAL_IsErasureSD: RwLock = RwLock::new(false); + pub static ref GLOBAL_LOCAL_DISK_MAP: Arc>>> = Arc::new(RwLock::new(HashMap::new())); + pub static ref GLOBAL_LOCAL_DISK_SET_DRIVES: Arc> = Arc::new(RwLock::new(Vec::new())); + pub static ref GLOBAL_Endpoints: RwLock = RwLock::new(EndpointServerPools(Vec::new())); +} + +pub async fn set_global_endpoints(eps: Vec) { + let mut endpoints = GLOBAL_Endpoints.write().await; + endpoints.reset(eps); } pub fn new_object_layer_fn() -> Arc>> { @@ -23,12 +33,6 @@ pub async fn set_object_layer(o: ECStore) { *global_object_api = Some(o); } -lazy_static! { - static ref GLOBAL_IsErasure: RwLock = RwLock::new(false); - static ref GLOBAL_IsDistErasure: RwLock = RwLock::new(false); - static ref GLOBAL_IsErasureSD: RwLock = RwLock::new(false); -} - pub async fn is_dist_erasure() -> bool { let lock = GLOBAL_IsDistErasure.read().await; *lock == true @@ -55,8 +59,3 @@ pub async fn update_erasure_type(setup_type: SetupType) { } type TypeLocalDiskSetDrives = Vec>>>; - -lazy_static! { - pub static ref GLOBAL_LOCAL_DISK_MAP: Arc>>> = Arc::new(RwLock::new(HashMap::new())); - pub static ref GLOBAL_LOCAL_DISK_SET_DRIVES: Arc> = Arc::new(RwLock::new(Vec::new())); -} diff --git a/ecstore/src/lib.rs b/ecstore/src/lib.rs index 4210db4c..bd8bced9 100644 --- a/ecstore/src/lib.rs +++ b/ecstore/src/lib.rs @@ -21,4 +21,5 @@ mod utils; pub mod bucket; pub use global::new_object_layer_fn; +pub use global::set_global_endpoints; pub use global::update_erasure_type; diff --git a/ecstore/src/peer.rs b/ecstore/src/peer.rs index 27256de8..a1b6deb0 100644 --- a/ecstore/src/peer.rs +++ b/ecstore/src/peer.rs @@ -26,7 +26,7 @@ pub trait PeerS3Client: Debug + Sync + Send + 'static { fn get_pools(&self) -> Option>; } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct S3PeerSys { pub clients: Vec, pub pools_count: usize, diff --git a/ecstore/src/store.rs b/ecstore/src/store.rs index 0e563ada..772d87e9 100644 --- a/ecstore/src/store.rs +++ b/ecstore/src/store.rs @@ -34,7 +34,7 @@ use tokio::sync::Semaphore; use tracing::{debug, info}; use uuid::Uuid; -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ECStore { pub id: uuid::Uuid, // pub disks: Vec, @@ -46,7 +46,7 @@ pub struct ECStore { impl ECStore { #[allow(clippy::new_ret_no_self)] - pub async fn new(_address: String, endpoint_pools: EndpointServerPools) -> Result<()> { + pub async fn new(_address: String, endpoint_pools: EndpointServerPools) -> Result { // let layouts = DisksLayout::try_from(endpoints.as_slice())?; let mut deployment_id = None; @@ -145,9 +145,9 @@ impl ECStore { peer_sys, }; - set_object_layer(ec).await; + set_object_layer(ec.clone()).await; - Ok(()) + Ok(ec) } pub fn init_local_disks() {} @@ -507,28 +507,29 @@ impl StorageAPI for ECStore { // TODO: delete created bucket when error self.peer_sys.make_bucket(bucket, opts).await?; - let meta = BucketMetadata::new(bucket); - let data = meta.marshal_msg()?; - let file_path = meta.save_file_path(); + let mut meta = BucketMetadata::new(bucket); + meta.save(self).await?; + // let data = meta.marshal_msg()?; + // let file_path = meta.save_file_path(); - // TODO: wrap hash reader + // // TODO: wrap hash reader - let content_len = data.len(); + // let content_len = data.len(); - let body = Body::from(data); + // let body = Body::from(data); - let reader = PutObjReader::new(StreamingBlob::from(body), content_len); + // let reader = PutObjReader::new(StreamingBlob::from(body), content_len); - self.put_object( - RUSTFS_META_BUCKET, - &file_path, - reader, - &ObjectOptions { - max_parity: true, - ..Default::default() - }, - ) - .await?; + // self.put_object( + // RUSTFS_META_BUCKET, + // &file_path, + // reader, + // &ObjectOptions { + // max_parity: true, + // ..Default::default() + // }, + // ) + // .await?; // TODO: toObjectErr diff --git a/ecstore/src/store_api.rs b/ecstore/src/store_api.rs index d05bfea6..8289ec22 100644 --- a/ecstore/src/store_api.rs +++ b/ecstore/src/store_api.rs @@ -414,7 +414,11 @@ pub struct ObjectOptions { // } #[derive(Debug, Default, Serialize, Deserialize)] -pub struct BucketOptions {} +pub struct BucketOptions { + pub deleted: bool, // true only when site replication is enabled + pub cached: bool, // true only when we are requesting a cached response instead of hitting the disk for example ListBuckets() call. + pub no_metadata: bool, +} #[derive(Debug, Clone, Serialize, Deserialize)] pub struct BucketInfo { diff --git a/rustfs/src/main.rs b/rustfs/src/main.rs index 802832a8..c23e04a6 100644 --- a/rustfs/src/main.rs +++ b/rustfs/src/main.rs @@ -6,8 +6,11 @@ mod storage; use clap::Parser; use common::error::{Error, Result}; use ecstore::{ + bucket::init_bucket_metadata_sys, endpoints::EndpointServerPools, + set_global_endpoints, store::{init_local_disks, ECStore}, + store_api::{BucketOptions, StorageAPI}, update_erasure_type, }; use grpc::make_server; @@ -90,8 +93,9 @@ async fn run(opt: config::Opt) -> Result<()> { // 用于rpc let (endpoint_pools, setup_type) = EndpointServerPools::from_volumes(opt.address.clone().as_str(), opt.volumes.clone()) .map_err(|err| Error::from_string(err.to_string()))?; - + set_global_endpoints(endpoint_pools.as_ref().clone()).await; update_erasure_type(setup_type).await; + // 初始化本地磁盘 init_local_disks(endpoint_pools.clone()) .await @@ -179,11 +183,23 @@ async fn run(opt: config::Opt) -> Result<()> { }); // init store - ECStore::new(opt.address.clone(), endpoint_pools.clone()) + let store = ECStore::new(opt.address.clone(), endpoint_pools.clone()) .await .map_err(|err| Error::from_string(err.to_string()))?; info!(" init store success!"); + let buckets_list = store + .list_bucket(&BucketOptions { + no_metadata: true, + ..Default::default() + }) + .await + .map_err(|err| Error::from_string(err.to_string()))?; + + let buckets = buckets_list.iter().map(|v| v.name.clone()).collect(); + + init_bucket_metadata_sys(store.clone(), buckets).await; + tokio::select! { _ = tokio::signal::ctrl_c() => { diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 2c9a0325..b701404b 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -250,7 +250,7 @@ impl S3 for FS { None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), }; - if let Err(e) = store.get_bucket_info(&input.bucket, &BucketOptions {}).await { + if let Err(e) = store.get_bucket_info(&input.bucket, &BucketOptions::default()).await { if DiskError::VolumeNotFound.is(&e) { return Err(s3_error!(NoSuchBucket)); } else { @@ -324,7 +324,7 @@ impl S3 for FS { None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), }; - if let Err(e) = store.get_bucket_info(&input.bucket, &BucketOptions {}).await { + if let Err(e) = store.get_bucket_info(&input.bucket, &BucketOptions::default()).await { if DiskError::VolumeNotFound.is(&e) { return Err(s3_error!(NoSuchBucket)); } else { @@ -375,7 +375,7 @@ impl S3 for FS { None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), }; - let bucket_infos = try_!(store.list_bucket(&BucketOptions {}).await); + let bucket_infos = try_!(store.list_bucket(&BucketOptions::default()).await); let buckets: Vec = bucket_infos .iter() From c29199515c0ba432a8931834a1f4eb7f81cdf155 Mon Sep 17 00:00:00 2001 From: weisd Date: Fri, 4 Oct 2024 23:32:07 +0800 Subject: [PATCH 04/20] test bucketmetadata_sys --- ecstore/src/bucket/metadata_sys.rs | 5 +++++ ecstore/src/bucket/mod.rs | 2 +- ecstore/src/store.rs | 25 +++---------------------- rustfs/src/storage/ecfs.rs | 1 - 4 files changed, 9 insertions(+), 24 deletions(-) diff --git a/ecstore/src/bucket/metadata_sys.rs b/ecstore/src/bucket/metadata_sys.rs index 05a60f8f..a73c08c6 100644 --- a/ecstore/src/bucket/metadata_sys.rs +++ b/ecstore/src/bucket/metadata_sys.rs @@ -31,6 +31,11 @@ pub async fn get_bucket_metadata_sys() -> Arc> { GLOBAL_BucketMetadataSys.clone() } +pub async fn bucket_metadata_sys_set(bucket: &str, bm: BucketMetadata) { + let sys = GLOBAL_BucketMetadataSys.write().await; + sys.set(bucket, bm).await +} + #[derive(Debug, Default)] pub struct BucketMetadataSys { metadata_map: RwLock>, diff --git a/ecstore/src/bucket/mod.rs b/ecstore/src/bucket/mod.rs index 9be46494..b1f0a6d8 100644 --- a/ecstore/src/bucket/mod.rs +++ b/ecstore/src/bucket/mod.rs @@ -13,4 +13,4 @@ mod target; pub mod utils; mod versioning; -pub use metadata_sys::{get_bucket_metadata_sys, init_bucket_metadata_sys}; +pub use metadata_sys::{bucket_metadata_sys_set, get_bucket_metadata_sys, init_bucket_metadata_sys}; diff --git a/ecstore/src/store.rs b/ecstore/src/store.rs index 772d87e9..321740e3 100644 --- a/ecstore/src/store.rs +++ b/ecstore/src/store.rs @@ -1,11 +1,12 @@ #![allow(clippy::map_entry)] +use crate::bucket::bucket_metadata_sys_set; use crate::disk::endpoint::EndpointType; use crate::global::{is_dist_erasure, set_object_layer, GLOBAL_LOCAL_DISK_MAP, GLOBAL_LOCAL_DISK_SET_DRIVES}; use crate::store_api::ObjectIO; use crate::{ bucket::metadata::BucketMetadata, disk::{error::DiskError, new_disk, DiskOption, DiskStore, WalkDirOptions, BUCKET_META_PREFIX, RUSTFS_META_BUCKET}, - endpoints::{EndpointServerPools, SetupType}, + endpoints::EndpointServerPools, error::{Error, Result}, peer::S3PeerSys, sets::Sets, @@ -21,7 +22,6 @@ use backon::{ExponentialBuilder, Retryable}; use common::globals::{GLOBAL_Local_Node_Name, GLOBAL_Rustfs_Host, GLOBAL_Rustfs_Port}; use futures::future::join_all; use http::HeaderMap; -use s3s::{dto::StreamingBlob, Body}; use std::{ collections::{HashMap, HashSet}, sync::Arc, @@ -509,27 +509,8 @@ impl StorageAPI for ECStore { let mut meta = BucketMetadata::new(bucket); meta.save(self).await?; - // let data = meta.marshal_msg()?; - // let file_path = meta.save_file_path(); - // // TODO: wrap hash reader - - // let content_len = data.len(); - - // let body = Body::from(data); - - // let reader = PutObjReader::new(StreamingBlob::from(body), content_len); - - // self.put_object( - // RUSTFS_META_BUCKET, - // &file_path, - // reader, - // &ObjectOptions { - // max_parity: true, - // ..Default::default() - // }, - // ) - // .await?; + bucket_metadata_sys_set(bucket, meta).await; // TODO: toObjectErr diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index b701404b..dd1855f3 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -1,6 +1,5 @@ use bytes::BufMut; use bytes::Bytes; -use ecstore::bucket::get_bucket_metadata_sys; use ecstore::bucket_meta::BucketMetadata; use ecstore::disk::error::DiskError; use ecstore::disk::RUSTFS_META_BUCKET; From 1092b2696a6f154af7286df3c6acf0539074ce10 Mon Sep 17 00:00:00 2001 From: weisd Date: Sat, 5 Oct 2024 01:41:19 +0800 Subject: [PATCH 05/20] update bucket tagging op use bucketmetadata_sys --- ecstore/src/bucket/metadata.rs | 1 - ecstore/src/bucket/metadata_sys.rs | 30 +++- ecstore/src/bucket/mod.rs | 2 +- ecstore/src/bucket/tags/mod.rs | 17 +-- ecstore/src/config/common.rs | 1 + ecstore/src/store.rs | 2 +- rustfs/src/storage/ecfs.rs | 218 +++++++++++++++++------------ 7 files changed, 168 insertions(+), 103 deletions(-) diff --git a/ecstore/src/bucket/metadata.rs b/ecstore/src/bucket/metadata.rs index 8f349a98..5f6dd388 100644 --- a/ecstore/src/bucket/metadata.rs +++ b/ecstore/src/bucket/metadata.rs @@ -265,7 +265,6 @@ impl BucketMetadata { fn parse_all_configs(&mut self, _api: &ECStore) -> Result<()> { if !self.tagging_config_xml.is_empty() { - warn!("self.tagging_config_xml {:?}", &self.tagging_config_xml); self.tagging_config = Some(tags::Tags::unmarshal(&self.tagging_config_xml)?); } diff --git a/ecstore/src/bucket/metadata_sys.rs b/ecstore/src/bucket/metadata_sys.rs index a73c08c6..2e0711ce 100644 --- a/ecstore/src/bucket/metadata_sys.rs +++ b/ecstore/src/bucket/metadata_sys.rs @@ -31,7 +31,7 @@ pub async fn get_bucket_metadata_sys() -> Arc> { GLOBAL_BucketMetadataSys.clone() } -pub async fn bucket_metadata_sys_set(bucket: &str, bm: BucketMetadata) { +pub async fn bucket_metadata_sys_set(bucket: String, bm: BucketMetadata) { let sys = GLOBAL_BucketMetadataSys.write().await; sys.set(bucket, bm).await } @@ -135,10 +135,10 @@ impl BucketMetadataSys { } } - pub async fn set(&self, bucket: &str, bm: BucketMetadata) { - if !is_meta_bucketname(bucket) { + pub async fn set(&self, bucket: String, bm: BucketMetadata) { + if !is_meta_bucketname(&bucket) { let mut map = self.metadata_map.write().await; - map.insert(bucket.to_string(), bm); + map.insert(bucket, bm); } } @@ -176,11 +176,30 @@ impl BucketMetadataSys { let updated = bm.update_config(config_file, data)?; - bm.save(store).await?; + self.save(&mut bm).await?; Ok(updated) } + async fn save(&self, bm: &mut BucketMetadata) -> Result<()> { + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(Error::msg("errServerNotInitialized")), + }; + + if is_meta_bucketname(&bm.name) { + return Err(Error::msg("errInvalidArgument")); + } + + bm.save(store).await?; + + self.set(bm.name.clone(), bm.clone()).await; + + Ok(()) + } + pub async fn get_config(&self, bucket: &str) -> Result<(BucketMetadata, bool)> { if let Some(api) = self.api.as_ref() { let has_bm = { @@ -221,6 +240,7 @@ impl BucketMetadataSys { let bm = match self.get_config(bucket).await { Ok((res, _)) => res, Err(err) => { + warn!("get_tagging_config err {:?}", &err); if config::error::is_not_found(&err) { return Err(Error::new(BucketMetadataError::TaggingNotFound)); } else { diff --git a/ecstore/src/bucket/mod.rs b/ecstore/src/bucket/mod.rs index b1f0a6d8..a905fba1 100644 --- a/ecstore/src/bucket/mod.rs +++ b/ecstore/src/bucket/mod.rs @@ -8,7 +8,7 @@ mod objectlock; mod policy; mod quota; mod replication; -mod tags; +pub mod tags; mod target; pub mod utils; mod versioning; diff --git a/ecstore/src/bucket/tags/mod.rs b/ecstore/src/bucket/tags/mod.rs index 62cfb984..34c0ca79 100644 --- a/ecstore/src/bucket/tags/mod.rs +++ b/ecstore/src/bucket/tags/mod.rs @@ -1,4 +1,4 @@ -use crate::error::{Error, Result}; +use crate::error::Result; use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -6,21 +6,22 @@ use std::collections::HashMap; // 定义tagSet结构体 #[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct TagSet { - #[serde(rename = "Tag")] - tag_map: HashMap, - is_object: bool, + pub tag_map: HashMap, + pub is_object: bool, } // 定义tagging结构体 #[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Tags { - #[serde(rename = "Tagging")] - xml_name: String, - #[serde(rename = "TagSet")] - tag_set: Option, + pub tag_set: TagSet, } impl Tags { + pub fn new(tag_map: HashMap, is_object: bool) -> Self { + Self { + tag_set: TagSet { tag_map, is_object }, + } + } pub fn marshal_msg(&self) -> Result> { let mut buf = Vec::new(); diff --git a/ecstore/src/config/common.rs b/ecstore/src/config/common.rs index 6dedfbed..62e53252 100644 --- a/ecstore/src/config/common.rs +++ b/ecstore/src/config/common.rs @@ -5,6 +5,7 @@ use crate::store_api::{HTTPRangeSpec, ObjectIO, ObjectInfo, ObjectOptions, PutOb use http::HeaderMap; use s3s::dto::StreamingBlob; use s3s::Body; +use tracing::warn; use super::error::ConfigError; diff --git a/ecstore/src/store.rs b/ecstore/src/store.rs index 321740e3..41bcba04 100644 --- a/ecstore/src/store.rs +++ b/ecstore/src/store.rs @@ -510,7 +510,7 @@ impl StorageAPI for ECStore { let mut meta = BucketMetadata::new(bucket); meta.save(self).await?; - bucket_metadata_sys_set(bucket, meta).await; + bucket_metadata_sys_set(bucket.to_string(), meta).await; // TODO: toObjectErr diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index dd1855f3..53607001 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -1,5 +1,8 @@ use bytes::BufMut; use bytes::Bytes; +use ecstore::bucket::get_bucket_metadata_sys; +use ecstore::bucket::metadata::BUCKET_TAGGING_CONFIG; +use ecstore::bucket::tags::Tags; use ecstore::bucket_meta::BucketMetadata; use ecstore::disk::error::DiskError; use ecstore::disk::RUSTFS_META_BUCKET; @@ -18,6 +21,7 @@ use ecstore::store_api::StorageAPI; use futures::pin_mut; use futures::{Stream, StreamExt}; use http::HeaderMap; +use log::warn; use s3s::dto::*; use s3s::s3_error; use s3s::Body; @@ -26,6 +30,7 @@ use s3s::S3ErrorCode; use s3s::S3Result; use s3s::S3; use s3s::{S3Request, S3Response}; +use std::collections::HashMap; use std::fmt::Debug; use std::str::FromStr; use transform_stream::AsyncTryStream; @@ -702,53 +707,67 @@ impl S3 for FS { })) .await?; - let layer = new_object_layer_fn(); - let lock = layer.read().await; - let store = lock - .as_ref() - .ok_or_else(|| S3Error::with_message(S3ErrorCode::InternalError, "Not init"))?; + let bucket_meta_sys_lock = get_bucket_metadata_sys().await; + let mut bucket_meta_sys = bucket_meta_sys_lock.write().await; - let meta_obj = try_!( - store - .get_object_reader( - RUSTFS_META_BUCKET, - BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), - HTTPRangeSpec::nil(), - Default::default(), - &ObjectOptions::default(), - ) - .await - ); - - let stream = meta_obj.stream; - - let mut data = vec![]; - pin_mut!(stream); - - while let Some(x) = stream.next().await { - let x = try_!(x); - data.put_slice(&x[..]); + let mut tag_map = HashMap::new(); + for tag in tagging.tag_set.iter() { + tag_map.insert(tag.key.clone(), tag.value.clone()); } - let mut meta = try_!(BucketMetadata::unmarshal_from(&data[..])); - if tagging.tag_set.is_empty() { - meta.tagging = None; - } else { - meta.tagging = Some(tagging.tag_set.into_iter().map(|x| (x.key, x.value)).collect()) - } + let tags = Tags::new(tag_map, false); - let data = try_!(meta.marshal_msg()); - let len = data.len(); - try_!( - store - .put_object( - RUSTFS_META_BUCKET, - BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), - PutObjReader::new(StreamingBlob::from(Body::from(data)), len), - &ObjectOptions::default(), - ) - .await - ); + let data = try_!(tags.marshal_msg()); + + let _updated = try_!(bucket_meta_sys.update(&bucket, BUCKET_TAGGING_CONFIG, data).await); + + // let layer = new_object_layer_fn(); + // let lock = layer.read().await; + // let store = lock + // .as_ref() + // .ok_or_else(|| S3Error::with_message(S3ErrorCode::InternalError, "Not init"))?; + + // let meta_obj = try_!( + // store + // .get_object_reader( + // RUSTFS_META_BUCKET, + // BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), + // HTTPRangeSpec::nil(), + // Default::default(), + // &ObjectOptions::default(), + // ) + // .await + // ); + + // let stream = meta_obj.stream; + + // let mut data = vec![]; + // pin_mut!(stream); + + // while let Some(x) = stream.next().await { + // let x = try_!(x); + // data.put_slice(&x[..]); + // } + + // let mut meta = try_!(BucketMetadata::unmarshal_from(&data[..])); + // if tagging.tag_set.is_empty() { + // meta.tagging = None; + // } else { + // meta.tagging = Some(tagging.tag_set.into_iter().map(|x| (x.key, x.value)).collect()) + // } + + // let data = try_!(meta.marshal_msg()); + // let len = data.len(); + // try_!( + // store + // .put_object( + // RUSTFS_META_BUCKET, + // BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), + // PutObjReader::new(StreamingBlob::from(Body::from(data)), len), + // &ObjectOptions::default(), + // ) + // .await + // ); Ok(S3Response::new(Default::default())) } @@ -764,15 +783,29 @@ impl S3 for FS { })) .await?; - let layer = new_object_layer_fn(); - let lock = layer.read().await; - let store = lock - .as_ref() - .ok_or_else(|| S3Error::with_message(S3ErrorCode::InternalError, "Not init"))?; + // let layer = new_object_layer_fn(); + // let lock = layer.read().await; + // let store = lock + // .as_ref() + // .ok_or_else(|| S3Error::with_message(S3ErrorCode::InternalError, "Not init"))?; - // let a = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys().await; + let bucket_meta_sys = bucket_meta_sys_lock.read().await; + let tag_set: Vec = match bucket_meta_sys.get_tagging_config(&bucket).await { + Ok((tags, _)) => tags + .tag_set + .tag_map + .into_iter() + .map(|(key, value)| Tag { key, value }) + .collect(), + Err(err) => { + warn!("get_tagging_config err {:?}", &err); + // TODO: check not found + Vec::new() + } + }; - Ok(S3Response::new(GetBucketTaggingOutput { ..Default::default() })) + Ok(S3Response::new(GetBucketTaggingOutput { tag_set })) // let meta_obj = try_!( // store @@ -842,48 +875,59 @@ impl S3 for FS { })) .await?; - let layer = new_object_layer_fn(); - let lock = layer.read().await; - let store = lock - .as_ref() - .ok_or_else(|| S3Error::with_message(S3ErrorCode::InternalError, "Not init"))?; + let bucket_meta_sys_lock = get_bucket_metadata_sys().await; + let mut bucket_meta_sys = bucket_meta_sys_lock.write().await; - let meta_obj = try_!( - store - .get_object_reader( - RUSTFS_META_BUCKET, - BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), - HTTPRangeSpec::nil(), - Default::default(), - &ObjectOptions::default(), - ) - .await - ); + let tag_map = HashMap::new(); - let stream = meta_obj.stream; + let tags = Tags::new(tag_map, false); - let mut data = vec![]; - pin_mut!(stream); + let data = try_!(tags.marshal_msg()); - while let Some(x) = stream.next().await { - let x = try_!(x); - data.put_slice(&x[..]); - } + let _updated = try_!(bucket_meta_sys.update(&bucket, BUCKET_TAGGING_CONFIG, data).await); - let mut meta = try_!(BucketMetadata::unmarshal_from(&data[..])); - meta.tagging = None; - let data = try_!(meta.marshal_msg()); - let len = data.len(); - try_!( - store - .put_object( - RUSTFS_META_BUCKET, - BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), - PutObjReader::new(StreamingBlob::from(Body::from(data)), len), - &ObjectOptions::default(), - ) - .await - ); + // let layer = new_object_layer_fn(); + // let lock = layer.read().await; + // let store = lock + // .as_ref() + // .ok_or_else(|| S3Error::with_message(S3ErrorCode::InternalError, "Not init"))?; + + // let meta_obj = try_!( + // store + // .get_object_reader( + // RUSTFS_META_BUCKET, + // BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), + // HTTPRangeSpec::nil(), + // Default::default(), + // &ObjectOptions::default(), + // ) + // .await + // ); + + // let stream = meta_obj.stream; + + // let mut data = vec![]; + // pin_mut!(stream); + + // while let Some(x) = stream.next().await { + // let x = try_!(x); + // data.put_slice(&x[..]); + // } + + // let mut meta = try_!(BucketMetadata::unmarshal_from(&data[..])); + // meta.tagging = None; + // let data = try_!(meta.marshal_msg()); + // let len = data.len(); + // try_!( + // store + // .put_object( + // RUSTFS_META_BUCKET, + // BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), + // PutObjReader::new(StreamingBlob::from(Body::from(data)), len), + // &ObjectOptions::default(), + // ) + // .await + // ); Ok(S3Response::new(DeleteBucketTaggingOutput {})) } From cc7b27325e8794322b8f2f2b0554630a0047e50a Mon Sep 17 00:00:00 2001 From: weisd Date: Sat, 5 Oct 2024 09:55:09 +0800 Subject: [PATCH 06/20] base bucketmetadata_sys done --- rustfs/src/storage/ecfs.rs | 150 ------------------------------------- 1 file changed, 150 deletions(-) diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 53607001..2cbb3dcb 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -721,54 +721,6 @@ impl S3 for FS { let _updated = try_!(bucket_meta_sys.update(&bucket, BUCKET_TAGGING_CONFIG, data).await); - // let layer = new_object_layer_fn(); - // let lock = layer.read().await; - // let store = lock - // .as_ref() - // .ok_or_else(|| S3Error::with_message(S3ErrorCode::InternalError, "Not init"))?; - - // let meta_obj = try_!( - // store - // .get_object_reader( - // RUSTFS_META_BUCKET, - // BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), - // HTTPRangeSpec::nil(), - // Default::default(), - // &ObjectOptions::default(), - // ) - // .await - // ); - - // let stream = meta_obj.stream; - - // let mut data = vec![]; - // pin_mut!(stream); - - // while let Some(x) = stream.next().await { - // let x = try_!(x); - // data.put_slice(&x[..]); - // } - - // let mut meta = try_!(BucketMetadata::unmarshal_from(&data[..])); - // if tagging.tag_set.is_empty() { - // meta.tagging = None; - // } else { - // meta.tagging = Some(tagging.tag_set.into_iter().map(|x| (x.key, x.value)).collect()) - // } - - // let data = try_!(meta.marshal_msg()); - // let len = data.len(); - // try_!( - // store - // .put_object( - // RUSTFS_META_BUCKET, - // BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), - // PutObjReader::new(StreamingBlob::from(Body::from(data)), len), - // &ObjectOptions::default(), - // ) - // .await - // ); - Ok(S3Response::new(Default::default())) } @@ -783,12 +735,6 @@ impl S3 for FS { })) .await?; - // let layer = new_object_layer_fn(); - // let lock = layer.read().await; - // let store = lock - // .as_ref() - // .ok_or_else(|| S3Error::with_message(S3ErrorCode::InternalError, "Not init"))?; - let bucket_meta_sys_lock = get_bucket_metadata_sys().await; let bucket_meta_sys = bucket_meta_sys_lock.read().await; let tag_set: Vec = match bucket_meta_sys.get_tagging_config(&bucket).await { @@ -806,59 +752,6 @@ impl S3 for FS { }; Ok(S3Response::new(GetBucketTaggingOutput { tag_set })) - - // let meta_obj = try_!( - // store - // .get_object_reader( - // RUSTFS_META_BUCKET, - // BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), - // HTTPRangeSpec::nil(), - // Default::default(), - // &ObjectOptions::default(), - // ) - // .await - // ); - - // let stream = meta_obj.stream; - - // let mut data = vec![]; - // pin_mut!(stream); - - // while let Some(x) = stream.next().await { - // let x = try_!(x); - // data.put_slice(&x[..]); - // } - - // if data.is_empty() { - // return Err({ - // let mut err = S3Error::with_message(S3ErrorCode::Custom("NoSuchTagSet".into()), "The TagSet does not exist"); - // err.set_status_code("404".try_into().unwrap()); - // err - // }); - // } - - // warn!("bm: 1111"); - - // let meta = try_!(BucketMetadata::unmarshal_from(&data[..])); - - // warn!("bm 33333"); - // if meta.tagging.is_none() { - // return Err({ - // let mut err = S3Error::with_message(S3ErrorCode::Custom("NoSuchTagSet".into()), "The TagSet does not exist"); - // err.set_status_code("404".try_into().unwrap()); - // err - // }); - // } - - // warn!("bm:4444"); - // Ok(S3Response::new(GetBucketTaggingOutput { - // tag_set: meta - // .tagging - // .unwrap() - // .into_iter() - // .map(|(key, value)| Tag { key, value }) - // .collect(), - // })) } #[tracing::instrument(level = "debug", skip(self))] @@ -886,49 +779,6 @@ impl S3 for FS { let _updated = try_!(bucket_meta_sys.update(&bucket, BUCKET_TAGGING_CONFIG, data).await); - // let layer = new_object_layer_fn(); - // let lock = layer.read().await; - // let store = lock - // .as_ref() - // .ok_or_else(|| S3Error::with_message(S3ErrorCode::InternalError, "Not init"))?; - - // let meta_obj = try_!( - // store - // .get_object_reader( - // RUSTFS_META_BUCKET, - // BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), - // HTTPRangeSpec::nil(), - // Default::default(), - // &ObjectOptions::default(), - // ) - // .await - // ); - - // let stream = meta_obj.stream; - - // let mut data = vec![]; - // pin_mut!(stream); - - // while let Some(x) = stream.next().await { - // let x = try_!(x); - // data.put_slice(&x[..]); - // } - - // let mut meta = try_!(BucketMetadata::unmarshal_from(&data[..])); - // meta.tagging = None; - // let data = try_!(meta.marshal_msg()); - // let len = data.len(); - // try_!( - // store - // .put_object( - // RUSTFS_META_BUCKET, - // BucketMetadata::new(bucket.as_str()).save_file_path().as_str(), - // PutObjReader::new(StreamingBlob::from(Body::from(data)), len), - // &ObjectOptions::default(), - // ) - // .await - // ); - Ok(S3Response::new(DeleteBucketTaggingOutput {})) } From 9cf7dc128e4c7657f08ceb6776536c8c5ee358fb Mon Sep 17 00:00:00 2001 From: junxiang Mu <1948535941@qq.com> Date: Tue, 8 Oct 2024 14:11:28 +0800 Subject: [PATCH 07/20] use asm features Signed-off-by: junxiang Mu <1948535941@qq.com> --- Cargo.lock | 12 ++ .../src/generated/proto_gen/node_service.rs | 173 ++++++++---------- ecstore/Cargo.toml | 4 +- 3 files changed, 91 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6f5db919..380a3a65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1502,6 +1502,8 @@ version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7263373d500d4d4f505d43a2a662d475a894aa94503a1ee28e9188b5f3960d4f" dependencies = [ + "cc", + "libc", "libm", "lru", "parking_lot", @@ -1838,6 +1840,16 @@ dependencies = [ "cfg-if", "cpufeatures", "digest", + "sha2-asm", +] + +[[package]] +name = "sha2-asm" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" +dependencies = [ + "cc", ] [[package]] diff --git a/common/protos/src/generated/proto_gen/node_service.rs b/common/protos/src/generated/proto_gen/node_service.rs index 054d023d..1a4f32e6 100644 --- a/common/protos/src/generated/proto_gen/node_service.rs +++ b/common/protos/src/generated/proto_gen/node_service.rs @@ -580,7 +580,13 @@ pub struct GenerallyLockResponse { } /// Generated client implementations. pub mod node_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; #[derive(Debug, Clone)] @@ -602,8 +608,8 @@ pub mod node_service_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -628,7 +634,7 @@ pub mod node_service_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { NodeServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -672,8 +678,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -697,8 +702,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -722,8 +726,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -747,8 +750,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -772,8 +774,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -797,8 +798,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -822,8 +822,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -844,8 +843,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -869,8 +867,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -894,8 +891,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -916,8 +912,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -941,8 +936,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -967,8 +961,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -992,8 +985,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1017,8 +1009,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1042,8 +1033,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1067,8 +1057,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1092,8 +1081,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1117,8 +1105,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1142,8 +1129,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1167,8 +1153,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1192,8 +1177,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1217,8 +1201,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1242,8 +1225,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1264,8 +1246,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1289,8 +1270,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1314,8 +1294,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1339,8 +1318,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1364,8 +1342,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1389,8 +1366,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1414,8 +1390,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1439,8 +1414,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1464,8 +1438,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1489,8 +1462,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1514,8 +1486,7 @@ pub mod node_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -1532,11 +1503,17 @@ pub mod node_service_client { } /// Generated server implementations. pub mod node_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with NodeServiceServer. #[async_trait] - pub trait NodeService: Send + Sync + 'static { + pub trait NodeService: std::marker::Send + std::marker::Sync + 'static { /// -------------------------------meta service-------------------------- async fn ping( &self, @@ -1607,7 +1584,7 @@ pub mod node_service_server { type WriteStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, > - + Send + + std::marker::Send + 'static; async fn write_stream( &self, @@ -1620,7 +1597,7 @@ pub mod node_service_server { type ReadAtStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, > - + Send + + std::marker::Send + 'static; /// rpc Append(AppendRequest) returns (AppendResponse) {}; async fn read_at( @@ -1774,14 +1751,14 @@ pub mod node_service_server { >; } #[derive(Debug)] - pub struct NodeServiceServer { + pub struct NodeServiceServer { inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - impl NodeServiceServer { + impl NodeServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } @@ -1835,8 +1812,8 @@ pub mod node_service_server { impl tonic::codegen::Service> for NodeServiceServer where T: NodeService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -3428,23 +3405,25 @@ pub mod node_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", tonic::Code::Unimplemented as i32) - .header( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ) - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for NodeServiceServer { + impl Clone for NodeServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -3456,7 +3435,9 @@ pub mod node_service_server { } } } - impl tonic::server::NamedService for NodeServiceServer { - const NAME: &'static str = "node_service.NodeService"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "node_service.NodeService"; + impl tonic::server::NamedService for NodeServiceServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/ecstore/Cargo.toml b/ecstore/Cargo.toml index d3123277..7228d49d 100644 --- a/ecstore/Cargo.toml +++ b/ecstore/Cargo.toml @@ -23,7 +23,7 @@ tracing-error.workspace = true http.workspace = true url.workspace = true uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] } -reed-solomon-erasure = "6.0.0" +reed-solomon-erasure = { version = "6.0.0", features = [ "simd-accel" ] } transform-stream = "0.3.0" lazy_static.workspace = true lock.workspace = true @@ -37,7 +37,7 @@ s3s = "0.10.0" crc32fast = "1.4.2" siphasher = "1.0.1" base64-simd = "0.8.0" -sha2 = "0.10.8" +sha2 = { version = "0.10.8", features = ["asm"] } hex-simd = "0.8.0" path-clean = "1.0.1" tokio = { workspace = true, features = ["io-util", "sync"] } From 38db5a1d4913003f308cc0c9b84a5c1bd950c202 Mon Sep 17 00:00:00 2001 From: loverustfs <155562731+loverustfs@users.noreply.github.com> Date: Tue, 8 Oct 2024 15:47:07 +0800 Subject: [PATCH 08/20] Update e2e.yml --- .github/workflows/e2e.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 74971f64..e9fe3937 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -10,7 +10,7 @@ env: jobs: build: - timeout-minutes: 10 + timeout-minutes: 20 runs-on: ubuntu-latest strategy: matrix: From 47afe69cf0ad6290d063a81d7a77e37e929d835c Mon Sep 17 00:00:00 2001 From: loverustfs <155562731+loverustfs@users.noreply.github.com> Date: Tue, 8 Oct 2024 16:10:11 +0800 Subject: [PATCH 09/20] Update e2e.yml --- .github/workflows/e2e.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index e9fe3937..4c39fc63 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -10,7 +10,7 @@ env: jobs: build: - timeout-minutes: 20 + timeout-minutes: 30 runs-on: ubuntu-latest strategy: matrix: From ae2a901725e32a5d955f95841aa680f852ab9212 Mon Sep 17 00:00:00 2001 From: mirschao <119988085+mirschao@users.noreply.github.com> Date: Tue, 8 Oct 2024 22:22:09 +0800 Subject: [PATCH 10/20] Update e2e.yml fix not listen 9000 port --- .github/workflows/e2e.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 4c39fc63..8f129acd 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -70,7 +70,7 @@ jobs: - name: run fs working-directory: . run: | - make e2e-server > /dev/null & + nohup make e2e-server >/dev/null & make probe-e2e - name: e2e test From 540878d2a950390e6ddc540191e1bb64f0ec3966 Mon Sep 17 00:00:00 2001 From: mirschao <119988085+mirschao@users.noreply.github.com> Date: Tue, 8 Oct 2024 22:41:35 +0800 Subject: [PATCH 11/20] Update e2e.yml --- .github/workflows/e2e.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 8f129acd..156fda4c 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -67,10 +67,15 @@ jobs: - name: checkout uses: actions/checkout@v4 - - name: run fs + - name: run fs start working-directory: . run: | nohup make e2e-server >/dev/null & + sleep 180 + + - name: run fs test + working-directory: . + run: | make probe-e2e - name: e2e test From 9aaab22f1afc19fc14b28278531aef49bf30812a Mon Sep 17 00:00:00 2001 From: mirschao <119988085+mirschao@users.noreply.github.com> Date: Tue, 8 Oct 2024 22:47:32 +0800 Subject: [PATCH 12/20] Update e2e.yml --- .github/workflows/e2e.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 156fda4c..f614fc0e 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -70,8 +70,7 @@ jobs: - name: run fs start working-directory: . run: | - nohup make e2e-server >/dev/null & - sleep 180 + make e2e-server - name: run fs test working-directory: . From b95d0238e2dc232ae0da8a8b99363a5a47334522 Mon Sep 17 00:00:00 2001 From: mirschao <119988085+mirschao@users.noreply.github.com> Date: Tue, 8 Oct 2024 23:03:58 +0800 Subject: [PATCH 13/20] Update e2e.yml --- .github/workflows/e2e.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index f614fc0e..deecd5f5 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -70,6 +70,7 @@ jobs: - name: run fs start working-directory: . run: | + ls -l make e2e-server - name: run fs test From f62d81314c01f8e5263f58aa57bb6778f6413480 Mon Sep 17 00:00:00 2001 From: mirschao <119988085+mirschao@users.noreply.github.com> Date: Tue, 8 Oct 2024 23:10:12 +0800 Subject: [PATCH 14/20] Update run.sh --- scripts/run.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/run.sh b/scripts/run.sh index 1a7b2073..6da15bad 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -24,4 +24,5 @@ fi # --domain-name 127.0.0.1:9010 \ # "$DATA_DIR_ARG" -cargo run "$DATA_DIR_ARG" \ No newline at end of file +# cargo run "$DATA_DIR_ARG" +cargo build ./target/volume/test From 4c22dd2c82026aca4b2d61772bb512a26018c3c6 Mon Sep 17 00:00:00 2001 From: mirschao <119988085+mirschao@users.noreply.github.com> Date: Tue, 8 Oct 2024 23:11:20 +0800 Subject: [PATCH 15/20] Update run.sh --- scripts/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run.sh b/scripts/run.sh index 6da15bad..49e6d032 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -25,4 +25,4 @@ fi # "$DATA_DIR_ARG" # cargo run "$DATA_DIR_ARG" -cargo build ./target/volume/test +cargo run ./target/volume/test From 7cc8a5a405d147305024b9bf801c47d98c3888fc Mon Sep 17 00:00:00 2001 From: mirschao <119988085+mirschao@users.noreply.github.com> Date: Tue, 8 Oct 2024 23:17:11 +0800 Subject: [PATCH 16/20] Update e2e.yml --- .github/workflows/e2e.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index deecd5f5..6d2b7fc1 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -70,8 +70,7 @@ jobs: - name: run fs start working-directory: . run: | - ls -l - make e2e-server + nohup make e2e-server & - name: run fs test working-directory: . From c9423038dab3223c45701cf06c92f6b45a3d781b Mon Sep 17 00:00:00 2001 From: weisd Date: Wed, 9 Oct 2024 15:06:50 +0800 Subject: [PATCH 17/20] add bucket sys marshal func --- ecstore/src/bucket/encryption/mod.rs | 23 ++- ecstore/src/bucket/error.rs | 14 ++ ecstore/src/bucket/event/mod.rs | 40 ++-- ecstore/src/bucket/lifecycle/lifecycle.rs | 19 +- ecstore/src/bucket/metadata.rs | 65 +++++++ ecstore/src/bucket/metadata_sys.rs | 202 ++++++++++++++++++++- ecstore/src/bucket/objectlock/mod.rs | 17 ++ ecstore/src/bucket/policy/bucket_policy.rs | 17 ++ ecstore/src/bucket/quota/mod.rs | 17 ++ ecstore/src/bucket/replication/mod.rs | 19 +- ecstore/src/bucket/target/mod.rs | 27 ++- ecstore/src/bucket/versioning/mod.rs | 21 ++- rustfs/src/storage/ecfs.rs | 4 - 13 files changed, 456 insertions(+), 29 deletions(-) diff --git a/ecstore/src/bucket/encryption/mod.rs b/ecstore/src/bucket/encryption/mod.rs index 20e7d315..1aa3c32f 100644 --- a/ecstore/src/bucket/encryption/mod.rs +++ b/ecstore/src/bucket/encryption/mod.rs @@ -1,3 +1,5 @@ +use crate::error::Result; +use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; // 定义Algorithm枚举类型 @@ -21,22 +23,37 @@ impl std::str::FromStr for Algorithm { } // 定义EncryptionAction结构体 -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct EncryptionAction { algorithm: Option, master_key_id: Option, } // 定义Rule结构体 -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Rule { default_encryption_action: EncryptionAction, } // 定义BucketSSEConfig结构体 -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct BucketSSEConfig { xml_ns: String, xml_name: String, rules: Vec, } + +impl BucketSSEConfig { + pub fn marshal_msg(&self) -> Result> { + let mut buf = Vec::new(); + + self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?; + + Ok(buf) + } + + pub fn unmarshal(buf: &[u8]) -> Result { + let t: BucketSSEConfig = rmp_serde::from_slice(buf)?; + Ok(t) + } +} diff --git a/ecstore/src/bucket/error.rs b/ecstore/src/bucket/error.rs index 781f7f11..a18a295a 100644 --- a/ecstore/src/bucket/error.rs +++ b/ecstore/src/bucket/error.rs @@ -2,4 +2,18 @@ pub enum BucketMetadataError { #[error("tagging not found")] TaggingNotFound, + #[error("bucket policy not found")] + BucketPolicyNotFound, + #[error("bucket object lock not found")] + BucketObjectLockConfigNotFound, + #[error("bucket lifecycle not found")] + BucketLifecycleNotFound, + #[error("bucket SSE config not found")] + BucketSSEConfigNotFound, + #[error("bucket quota config not found")] + BucketQuotaConfigNotFound, + #[error("bucket replication config not found")] + BucketReplicationConfigNotFound, + #[error("bucket remote target not found")] + BucketRemoteTargetNotFound, } diff --git a/ecstore/src/bucket/event/mod.rs b/ecstore/src/bucket/event/mod.rs index d1cb49fb..8f8a96bb 100644 --- a/ecstore/src/bucket/event/mod.rs +++ b/ecstore/src/bucket/event/mod.rs @@ -1,11 +1,12 @@ mod name; +use crate::error::Result; +use name::Name; +use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; -use name::Name; - // 定义common结构体 -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] struct Common { pub id: String, pub filter: S3Key, @@ -13,59 +14,74 @@ struct Common { } // 定义Queue结构体 -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] struct Queue { pub common: Common, pub arn: ARN, } // 定义ARN结构体 -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct ARN { pub target_id: TargetID, pub region: String, } // 定义TargetID结构体 -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct TargetID { pub id: String, pub name: String, } // 定义FilterRule结构体 -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct FilterRule { pub name: String, pub value: String, } // 定义FilterRuleList结构体 -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct FilterRuleList { pub rules: Vec, } // 定义S3Key结构体 -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct S3Key { pub rule_list: FilterRuleList, } -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Lambda { arn: String, } // 定义Topic结构体 -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Topic { arn: String, } -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Config { queue_list: Vec, lambda_list: Vec, topic_list: Vec, } + +impl Config { + pub fn marshal_msg(&self) -> Result> { + let mut buf = Vec::new(); + + self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?; + + Ok(buf) + } + + pub fn unmarshal(buf: &[u8]) -> Result { + let t: Config = rmp_serde::from_slice(buf)?; + Ok(t) + } +} diff --git a/ecstore/src/bucket/lifecycle/lifecycle.rs b/ecstore/src/bucket/lifecycle/lifecycle.rs index e4f2fa6f..109cad29 100644 --- a/ecstore/src/bucket/lifecycle/lifecycle.rs +++ b/ecstore/src/bucket/lifecycle/lifecycle.rs @@ -1,9 +1,26 @@ use super::rule::Rule; +use crate::error::Result; +use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; use time::OffsetDateTime; -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Lifecycle { pub rules: Vec, pub expiry_updated_at: Option, } + +impl Lifecycle { + pub fn marshal_msg(&self) -> Result> { + let mut buf = Vec::new(); + + self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?; + + Ok(buf) + } + + pub fn unmarshal(buf: &[u8]) -> Result { + let t: Lifecycle = rmp_serde::from_slice(buf)?; + Ok(t) + } +} diff --git a/ecstore/src/bucket/metadata.rs b/ecstore/src/bucket/metadata.rs index 5f6dd388..8bb3a73c 100644 --- a/ecstore/src/bucket/metadata.rs +++ b/ecstore/src/bucket/metadata.rs @@ -190,9 +190,44 @@ impl BucketMetadata { } fn default_timestamps(&mut self) { + if self.policy_config_updated_at == OffsetDateTime::UNIX_EPOCH { + self.policy_config_updated_at = self.created + } + if self.encryption_config_updated_at == OffsetDateTime::UNIX_EPOCH { + self.encryption_config_updated_at = self.created + } + if self.tagging_config_updated_at == OffsetDateTime::UNIX_EPOCH { self.tagging_config_updated_at = self.created } + if self.object_lock_config_updated_at == OffsetDateTime::UNIX_EPOCH { + self.object_lock_config_updated_at = self.created + } + if self.quota_config_updated_at == OffsetDateTime::UNIX_EPOCH { + self.quota_config_updated_at = self.created + } + + if self.replication_config_updated_at == OffsetDateTime::UNIX_EPOCH { + self.replication_config_updated_at = self.created + } + + if self.versioning_config_updated_at == OffsetDateTime::UNIX_EPOCH { + self.versioning_config_updated_at = self.created + } + + if self.lifecycle_config_updated_at == OffsetDateTime::UNIX_EPOCH { + self.lifecycle_config_updated_at = self.created + } + if self.notification_config_updated_at == OffsetDateTime::UNIX_EPOCH { + self.notification_config_updated_at = self.created + } + + if self.bucket_targets_config_updated_at == OffsetDateTime::UNIX_EPOCH { + self.bucket_targets_config_updated_at = self.created + } + if self.bucket_targets_config_meta_updated_at == OffsetDateTime::UNIX_EPOCH { + self.bucket_targets_config_meta_updated_at = self.created + } } pub fn update_config(&mut self, config_file: &str, data: Vec) -> Result { @@ -264,9 +299,39 @@ impl BucketMetadata { } fn parse_all_configs(&mut self, _api: &ECStore) -> Result<()> { + if !self.policy_config_json.is_empty() { + self.policy_config = Some(BucketPolicy::unmarshal(&self.policy_config_json)?); + } + if !self.notification_config_xml.is_empty() { + self.notification_config = Some(event::Config::unmarshal(&self.notification_config_xml)?); + } + if !self.lifecycle_config_xml.is_empty() { + self.lifecycle_config = Some(Lifecycle::unmarshal(&self.lifecycle_config_xml)?); + } + + if !self.object_lock_config_xml.is_empty() { + self.object_lock_config = Some(objectlock::Config::unmarshal(&self.object_lock_config_xml)?); + } + if !self.versioning_config_xml.is_empty() { + self.versioning_config = Some(Versioning::unmarshal(&self.versioning_config_xml)?); + } + if !self.encryption_config_xml.is_empty() { + self.sse_config = Some(BucketSSEConfig::unmarshal(&self.encryption_config_xml)?); + } if !self.tagging_config_xml.is_empty() { self.tagging_config = Some(tags::Tags::unmarshal(&self.tagging_config_xml)?); } + if !self.quota_config_json.is_empty() { + self.quota_config = Some(BucketQuota::unmarshal(&self.quota_config_json)?); + } + if !self.replication_config_xml.is_empty() { + self.replication_config = Some(replication::Config::unmarshal(&self.replication_config_xml)?); + } + if !self.bucket_targets_config_json.is_empty() { + self.bucket_target_config = Some(BucketTargets::unmarshal(&self.bucket_targets_config_json)?); + } else { + self.bucket_target_config = Some(BucketTargets::default()) + } Ok(()) } diff --git a/ecstore/src/bucket/metadata_sys.rs b/ecstore/src/bucket/metadata_sys.rs index 2e0711ce..f295809d 100644 --- a/ecstore/src/bucket/metadata_sys.rs +++ b/ecstore/src/bucket/metadata_sys.rs @@ -16,8 +16,13 @@ use time::OffsetDateTime; use tokio::sync::RwLock; use tracing::{error, info, warn}; +use super::encryption::BucketSSEConfig; +use super::lifecycle::lifecycle::Lifecycle; use super::metadata::{load_bucket_metadata, BucketMetadata}; -use super::tags; +use super::policy::bucket_policy::BucketPolicy; +use super::quota::BucketQuota; +use super::target::BucketTargets; +use super::{event, objectlock, policy, replication, tags, versioning}; lazy_static! { static ref GLOBAL_BucketMetadataSys: Arc> = Arc::new(RwLock::new(BucketMetadataSys::new())); @@ -236,6 +241,42 @@ impl BucketMetadataSys { } } + pub async fn get_versioning_config(&self, bucket: &str) -> Result<(versioning::Versioning, OffsetDateTime)> { + let bm = match self.get_config(bucket).await { + Ok((res, _)) => res, + Err(err) => { + warn!("get_versioning_config err {:?}", &err); + if config::error::is_not_found(&err) { + return Ok((versioning::Versioning::default(), OffsetDateTime::UNIX_EPOCH)); + } else { + return Err(err); + } + } + }; + + Ok((bm.versioning_config.unwrap_or_default(), bm.versioning_config_updated_at)) + } + + pub async fn get_bucket_policy(&self, bucket: &str) -> Result<(BucketPolicy, OffsetDateTime)> { + let bm = match self.get_config(bucket).await { + Ok((res, _)) => res, + Err(err) => { + warn!("get_bucket_policy err {:?}", &err); + if config::error::is_not_found(&err) { + return Err(Error::new(BucketMetadataError::BucketPolicyNotFound)); + } else { + return Err(err); + } + } + }; + + if let Some(config) = bm.policy_config { + Ok((config, bm.policy_config_updated_at)) + } else { + Err(Error::new(BucketMetadataError::BucketPolicyNotFound)) + } + } + pub async fn get_tagging_config(&self, bucket: &str) -> Result<(tags::Tags, OffsetDateTime)> { let bm = match self.get_config(bucket).await { Ok((res, _)) => res, @@ -255,4 +296,163 @@ impl BucketMetadataSys { Err(Error::new(BucketMetadataError::TaggingNotFound)) } } + + pub async fn get_object_lock_config(&self, bucket: &str) -> Result<(objectlock::Config, OffsetDateTime)> { + let bm = match self.get_config(bucket).await { + Ok((res, _)) => res, + Err(err) => { + warn!("get_object_lock_config err {:?}", &err); + if config::error::is_not_found(&err) { + return Err(Error::new(BucketMetadataError::BucketObjectLockConfigNotFound)); + } else { + return Err(err); + } + } + }; + + if let Some(config) = bm.object_lock_config { + Ok((config, bm.object_lock_config_updated_at)) + } else { + Err(Error::new(BucketMetadataError::BucketObjectLockConfigNotFound)) + } + } + + pub async fn get_lifecycle_config(&self, bucket: &str) -> Result<(Lifecycle, OffsetDateTime)> { + let bm = match self.get_config(bucket).await { + Ok((res, _)) => res, + Err(err) => { + warn!("get_lifecycle_config err {:?}", &err); + if config::error::is_not_found(&err) { + return Err(Error::new(BucketMetadataError::BucketLifecycleNotFound)); + } else { + return Err(err); + } + } + }; + + if let Some(config) = bm.lifecycle_config { + if config.rules.is_empty() { + Err(Error::new(BucketMetadataError::BucketLifecycleNotFound)) + } else { + Ok((config, bm.lifecycle_config_updated_at)) + } + } else { + Err(Error::new(BucketMetadataError::BucketLifecycleNotFound)) + } + } + + pub async fn get_notification_config(&self, bucket: &str) -> Result> { + let bm = match self.get_config(bucket).await { + Ok((bm, _)) => bm.notification_config, + Err(err) => { + warn!("get_notification_config err {:?}", &err); + if config::error::is_not_found(&err) { + None + } else { + return Err(err); + } + } + }; + + Ok(bm) + } + + pub async fn get_sse_config(&self, bucket: &str) -> Result<(BucketSSEConfig, OffsetDateTime)> { + let bm = match self.get_config(bucket).await { + Ok((res, _)) => res, + Err(err) => { + warn!("get_sse_config err {:?}", &err); + if config::error::is_not_found(&err) { + return Err(Error::new(BucketMetadataError::BucketSSEConfigNotFound)); + } else { + return Err(err); + } + } + }; + + if let Some(config) = bm.sse_config { + Ok((config, bm.encryption_config_updated_at)) + } else { + Err(Error::new(BucketMetadataError::BucketSSEConfigNotFound)) + } + } + + pub async fn created_at(&self, bucket: &str) -> Result { + let bm = match self.get_config(bucket).await { + Ok((bm, _)) => bm.created, + Err(err) => { + return Err(err); + } + }; + + Ok(bm) + } + + pub async fn get_quota_config(&self, bucket: &str) -> Result<(BucketQuota, OffsetDateTime)> { + let bm = match self.get_config(bucket).await { + Ok((res, _)) => res, + Err(err) => { + warn!("get_quota_config err {:?}", &err); + if config::error::is_not_found(&err) { + return Err(Error::new(BucketMetadataError::BucketQuotaConfigNotFound)); + } else { + return Err(err); + } + } + }; + + if let Some(config) = bm.quota_config { + Ok((config, bm.quota_config_updated_at)) + } else { + Err(Error::new(BucketMetadataError::BucketQuotaConfigNotFound)) + } + } + + pub async fn get_replication_config(&self, bucket: &str) -> Result<(replication::Config, OffsetDateTime)> { + let (bm, reload) = match self.get_config(bucket).await { + Ok(res) => res, + Err(err) => { + warn!("get_replication_config err {:?}", &err); + if config::error::is_not_found(&err) { + return Err(Error::new(BucketMetadataError::BucketReplicationConfigNotFound)); + } else { + return Err(err); + } + } + }; + + if let Some(config) = bm.replication_config { + if reload { + // TODO: globalBucketTargetSys + } + + Ok((config, bm.replication_config_updated_at)) + } else { + Err(Error::new(BucketMetadataError::BucketReplicationConfigNotFound)) + } + } + + pub async fn get_bucket_targets_config(&self, bucket: &str) -> Result { + let (bm, reload) = match self.get_config(bucket).await { + Ok(res) => res, + Err(err) => { + warn!("get_replication_config err {:?}", &err); + if config::error::is_not_found(&err) { + return Err(Error::new(BucketMetadataError::BucketRemoteTargetNotFound)); + } else { + return Err(err); + } + } + }; + + if let Some(config) = bm.bucket_target_config { + if reload { + // TODO: globalBucketTargetSys + } + + Ok(config) + } else { + Err(Error::new(BucketMetadataError::BucketRemoteTargetNotFound)) + } + } } diff --git a/ecstore/src/bucket/objectlock/mod.rs b/ecstore/src/bucket/objectlock/mod.rs index 74fa0f3b..dcd27332 100644 --- a/ecstore/src/bucket/objectlock/mod.rs +++ b/ecstore/src/bucket/objectlock/mod.rs @@ -1,3 +1,5 @@ +use crate::error::Result; +use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize, Serialize, Default, PartialEq, Eq, Hash, Clone)] @@ -37,3 +39,18 @@ pub struct Config { pub object_lock_enabled: String, pub rule: Option, } + +impl Config { + pub fn marshal_msg(&self) -> Result> { + let mut buf = Vec::new(); + + self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?; + + Ok(buf) + } + + pub fn unmarshal(buf: &[u8]) -> Result { + let t: Config = rmp_serde::from_slice(buf)?; + Ok(t) + } +} diff --git a/ecstore/src/bucket/policy/bucket_policy.rs b/ecstore/src/bucket/policy/bucket_policy.rs index b769ec66..512bf8a8 100644 --- a/ecstore/src/bucket/policy/bucket_policy.rs +++ b/ecstore/src/bucket/policy/bucket_policy.rs @@ -1,3 +1,5 @@ +use crate::error::Result; +use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -38,3 +40,18 @@ pub struct BucketPolicy { pub version: String, pub statements: Vec, } + +impl BucketPolicy { + pub fn marshal_msg(&self) -> Result> { + let mut buf = Vec::new(); + + self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?; + + Ok(buf) + } + + pub fn unmarshal(buf: &[u8]) -> Result { + let t: BucketPolicy = rmp_serde::from_slice(buf)?; + Ok(t) + } +} diff --git a/ecstore/src/bucket/quota/mod.rs b/ecstore/src/bucket/quota/mod.rs index 12c50dc9..a7753d85 100644 --- a/ecstore/src/bucket/quota/mod.rs +++ b/ecstore/src/bucket/quota/mod.rs @@ -1,3 +1,5 @@ +use crate::error::Result; +use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; // 定义QuotaType枚举类型 @@ -19,3 +21,18 @@ pub struct BucketQuota { quota_type: Option, } + +impl BucketQuota { + pub fn marshal_msg(&self) -> Result> { + let mut buf = Vec::new(); + + self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?; + + Ok(buf) + } + + pub fn unmarshal(buf: &[u8]) -> Result { + let t: BucketQuota = rmp_serde::from_slice(buf)?; + Ok(t) + } +} diff --git a/ecstore/src/bucket/replication/mod.rs b/ecstore/src/bucket/replication/mod.rs index a50bf7c6..a54e8c5d 100644 --- a/ecstore/src/bucket/replication/mod.rs +++ b/ecstore/src/bucket/replication/mod.rs @@ -3,11 +3,28 @@ mod filter; mod rule; mod tag; +use crate::error::Result; +use rmp_serde::Serializer as rmpSerializer; use rule::Rule; use serde::{Deserialize, Serialize}; -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Config { rules: Vec, role_arn: String, } + +impl Config { + pub fn marshal_msg(&self) -> Result> { + let mut buf = Vec::new(); + + self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?; + + Ok(buf) + } + + pub fn unmarshal(buf: &[u8]) -> Result { + let t: Config = rmp_serde::from_slice(buf)?; + Ok(t) + } +} diff --git a/ecstore/src/bucket/target/mod.rs b/ecstore/src/bucket/target/mod.rs index e296423b..f830c522 100644 --- a/ecstore/src/bucket/target/mod.rs +++ b/ecstore/src/bucket/target/mod.rs @@ -1,8 +1,10 @@ +use crate::error::Result; +use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; use std::time::Duration; use time::OffsetDateTime; -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Credentials { access_key: String, secret_key: String, @@ -10,13 +12,13 @@ pub struct Credentials { expiration: Option, } -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub enum ServiceType { #[default] Replication, } -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct LatencyStat { curr: Duration, // 当前延迟 avg: Duration, // 平均延迟 @@ -24,7 +26,7 @@ pub struct LatencyStat { } // 定义BucketTarget结构体 -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct BucketTarget { source_bucket: String, @@ -73,7 +75,22 @@ pub struct BucketTarget { edge: bool, } -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct BucketTargets { pub targets: Vec, } + +impl BucketTargets { + pub fn marshal_msg(&self) -> Result> { + let mut buf = Vec::new(); + + self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?; + + Ok(buf) + } + + pub fn unmarshal(buf: &[u8]) -> Result { + let t: BucketTargets = rmp_serde::from_slice(buf)?; + Ok(t) + } +} diff --git a/ecstore/src/bucket/versioning/mod.rs b/ecstore/src/bucket/versioning/mod.rs index 37fdcdfa..aa2f473c 100644 --- a/ecstore/src/bucket/versioning/mod.rs +++ b/ecstore/src/bucket/versioning/mod.rs @@ -1,3 +1,5 @@ +use crate::error::Result; +use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Deserialize, Serialize)] @@ -25,14 +27,29 @@ impl std::fmt::Display for State { } } -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct ExcludedPrefix { pub prefix: String, } -#[derive(Debug, Deserialize, Serialize, Default,Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Versioning { pub status: State, pub excluded_prefixes: Vec, pub exclude_folders: bool, } + +impl Versioning { + pub fn marshal_msg(&self) -> Result> { + let mut buf = Vec::new(); + + self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?; + + Ok(buf) + } + + pub fn unmarshal(buf: &[u8]) -> Result { + let t: Versioning = rmp_serde::from_slice(buf)?; + Ok(t) + } +} diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 2cbb3dcb..eaa66fc2 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -1,11 +1,8 @@ -use bytes::BufMut; use bytes::Bytes; use ecstore::bucket::get_bucket_metadata_sys; use ecstore::bucket::metadata::BUCKET_TAGGING_CONFIG; use ecstore::bucket::tags::Tags; -use ecstore::bucket_meta::BucketMetadata; use ecstore::disk::error::DiskError; -use ecstore::disk::RUSTFS_META_BUCKET; use ecstore::new_object_layer_fn; use ecstore::store_api::BucketOptions; use ecstore::store_api::CompletePart; @@ -24,7 +21,6 @@ use http::HeaderMap; use log::warn; use s3s::dto::*; use s3s::s3_error; -use s3s::Body; use s3s::S3Error; use s3s::S3ErrorCode; use s3s::S3Result; From 000866556e6fd52eda2221a893fc90598436732c Mon Sep 17 00:00:00 2001 From: weisd Date: Wed, 9 Oct 2024 17:38:56 +0800 Subject: [PATCH 18/20] init BucketVersionin api --- ecstore/src/bucket/metadata_sys.rs | 4 ++-- ecstore/src/config/common.rs | 1 - rustfs/src/storage/ecfs.rs | 16 ++++++++++++++++ 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/ecstore/src/bucket/metadata_sys.rs b/ecstore/src/bucket/metadata_sys.rs index f295809d..87b89e82 100644 --- a/ecstore/src/bucket/metadata_sys.rs +++ b/ecstore/src/bucket/metadata_sys.rs @@ -14,7 +14,7 @@ use futures::future::join_all; use lazy_static::lazy_static; use time::OffsetDateTime; use tokio::sync::RwLock; -use tracing::{error, info, warn}; +use tracing::{error, warn}; use super::encryption::BucketSSEConfig; use super::lifecycle::lifecycle::Lifecycle; @@ -22,7 +22,7 @@ use super::metadata::{load_bucket_metadata, BucketMetadata}; use super::policy::bucket_policy::BucketPolicy; use super::quota::BucketQuota; use super::target::BucketTargets; -use super::{event, objectlock, policy, replication, tags, versioning}; +use super::{event, objectlock, replication, tags, versioning}; lazy_static! { static ref GLOBAL_BucketMetadataSys: Arc> = Arc::new(RwLock::new(BucketMetadataSys::new())); diff --git a/ecstore/src/config/common.rs b/ecstore/src/config/common.rs index 62e53252..6dedfbed 100644 --- a/ecstore/src/config/common.rs +++ b/ecstore/src/config/common.rs @@ -5,7 +5,6 @@ use crate::store_api::{HTTPRangeSpec, ObjectIO, ObjectInfo, ObjectOptions, PutOb use http::HeaderMap; use s3s::dto::StreamingBlob; use s3s::Body; -use tracing::warn; use super::error::ConfigError; diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index eaa66fc2..2b994017 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -850,6 +850,22 @@ impl S3 for FS { Ok(S3Response::new(DeleteObjectTaggingOutput { version_id: None })) } + + #[tracing::instrument(level = "debug", skip(self))] + async fn get_bucket_versioning( + &self, + _req: S3Request, + ) -> S3Result> { + Err(s3_error!(NotImplemented, "GetBucketVersioning is not implemented yet")) + } + + #[tracing::instrument(level = "debug", skip(self))] + async fn put_bucket_versioning( + &self, + _req: S3Request, + ) -> S3Result> { + Err(s3_error!(NotImplemented, "PutBucketVersioning is not implemented yet")) + } } #[allow(dead_code)] From 1bbd5168c9d7f2ef57544d4a3de182cc660b473f Mon Sep 17 00:00:00 2001 From: weisd Date: Wed, 9 Oct 2024 20:45:34 +0800 Subject: [PATCH 19/20] add versioning_sys --- ecstore/src/bucket/mod.rs | 3 +- ecstore/src/bucket/versioning/mod.rs | 95 ++++++++++++++++++++++++++-- ecstore/src/bucket/versioning_sys.rs | 61 ++++++++++++++++++ ecstore/src/utils/mod.rs | 1 + ecstore/src/utils/wildcard.rs | 70 ++++++++++++++++++++ rustfs/src/storage/ecfs.rs | 39 +++++++++++- 6 files changed, 259 insertions(+), 10 deletions(-) create mode 100644 ecstore/src/bucket/versioning_sys.rs create mode 100644 ecstore/src/utils/wildcard.rs diff --git a/ecstore/src/bucket/mod.rs b/ecstore/src/bucket/mod.rs index a905fba1..59e35a47 100644 --- a/ecstore/src/bucket/mod.rs +++ b/ecstore/src/bucket/mod.rs @@ -11,6 +11,7 @@ mod replication; pub mod tags; mod target; pub mod utils; -mod versioning; +pub mod versioning; +pub mod versioning_sys; pub use metadata_sys::{bucket_metadata_sys_set, get_bucket_metadata_sys, init_bucket_metadata_sys}; diff --git a/ecstore/src/bucket/versioning/mod.rs b/ecstore/src/bucket/versioning/mod.rs index aa2f473c..0cf37eb8 100644 --- a/ecstore/src/bucket/versioning/mod.rs +++ b/ecstore/src/bucket/versioning/mod.rs @@ -1,14 +1,20 @@ -use crate::error::Result; +use crate::error::{Error, Result}; use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; +#[derive(Debug, thiserror::Error)] +pub enum VersioningErr { + #[error("too many excluded prefixes")] + TooManyExcludedPrefixes, + #[error("excluded prefixes extension supported only when versioning is enabled")] + ExcludedPrefixNotSupported, +} + #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Deserialize, Serialize)] pub enum State { #[default] - Enabled, Suspended, - // 如果未来可能会使用到Disabled状态,可以在这里添加 - // Disabled, + Enabled, } // 实现Display trait用于打印 @@ -20,8 +26,6 @@ impl std::fmt::Display for State { match *self { State::Enabled => "Enabled", State::Suspended => "Suspended", - // 如果未来可能会使用到Disabled状态,可以在这里添加 - // State::Disabled => "Disabled", } ) } @@ -52,4 +56,83 @@ impl Versioning { let t: Versioning = rmp_serde::from_slice(buf)?; Ok(t) } + + pub fn validate(&self) -> Result<()> { + match self.status { + State::Suspended => { + if self.excluded_prefixes.len() > 0 { + return Err(Error::new(VersioningErr::ExcludedPrefixNotSupported)); + } + } + State::Enabled => { + if self.excluded_prefixes.len() > 10 { + return Err(Error::new(VersioningErr::TooManyExcludedPrefixes)); + } + } + _ => return Err(Error::msg(format!("unsupported versioning status {}", self.status))), + } + + Ok(()) + } + + pub fn enabled(&self) -> bool { + self.status == State::Enabled + } + + pub fn versioned(&self, prefix: &str) -> bool { + self.prefix_enabled(prefix) || self.prefix_suspended(prefix) + } + + pub fn prefix_enabled(&self, prefix: &str) -> bool { + if self.status != State::Enabled { + return false; + } + + if prefix.is_empty() { + return true; + } + if self.exclude_folders && prefix.ends_with("/") { + return false; + } + + for sprefix in self.excluded_prefixes.iter() { + let full_prefix = format!("{}*", sprefix.prefix); + if utils::wildcard::match_simple(full_prefix, prefix) { + return false; + } + } + return true; + } + + pub fn suspended(&self) -> bool { + self.status == State::Suspended + } + + pub fn prefix_suspended(&self, prefix: &str) -> bool { + if self.status == State::Suspended { + return true; + } + + if self.status == State::Enabled { + if prefix.is_empty() { + return false; + } + + if self.exclude_folders && prefix.starts_with("/") { + return true; + } + + for sprefix in self.excluded_prefixes.iter() { + let full_prefix = format!("{}*", sprefix.prefix); + if utils::wildcard::match_simple(full_prefix, prefix) { + return true; + } + } + } + return false; + } + + pub fn prefixes_excluded(&self) -> bool { + self.excluded_prefixes.len() > 0 || self.exclude_folders + } } diff --git a/ecstore/src/bucket/versioning_sys.rs b/ecstore/src/bucket/versioning_sys.rs new file mode 100644 index 00000000..636bbd8a --- /dev/null +++ b/ecstore/src/bucket/versioning_sys.rs @@ -0,0 +1,61 @@ +use super::get_bucket_metadata_sys; +use super::versioning::Versioning; +use crate::disk::RUSTFS_META_BUCKET; +use tracing::warn; + +pub struct BucketVersioningSys {} + +impl BucketVersioningSys { + pub async fn enabled(bucket: &str) -> bool { + match Self::get(bucket).await { + Ok(res) => res.enabled(), + Err(err) => { + warn!("{:?}", err); + false + } + } + } + + pub async fn prefix_enabled(bucket: &str, prefix: &str) -> bool { + match Self::get(bucket).await { + Ok(res) => res.prefix_enabled(prefix), + Err(err) => { + warn!("{:?}", err); + false + } + } + } + + pub async fn suspended(bucket: &str) -> bool { + match Self::get(bucket).await { + Ok(res) => res.suspended(), + Err(err) => { + warn!("{:?}", err); + false + } + } + } + + pub async fn prefix_suspended(bucket: &str, prefix: &str) -> bool { + match Self::get(bucket).await { + Ok(res) => res.prefix_suspended(prefix), + Err(err) => { + warn!("{:?}", err); + false + } + } + } + + pub async fn get(bucket: &str) -> Result { + if bucket == RUSTFS_META_BUCKET || bucket.starts_with(RUSTFS_META_BUCKET) { + return Ok(Versioning::default()); + } + + let bucket_meta_sys_lock = get_bucket_metadata_sys().await; + let mut bucket_meta_sys = bucket_meta_sys_lock.write().await; + + let (cfg, _) = bucket_meta_sys.get_versioning_config(bucket).await?; + + Ok(cfg) + } +} diff --git a/ecstore/src/utils/mod.rs b/ecstore/src/utils/mod.rs index 90e120d0..969921fb 100644 --- a/ecstore/src/utils/mod.rs +++ b/ecstore/src/utils/mod.rs @@ -4,3 +4,4 @@ pub mod fs; pub mod hash; pub mod net; pub mod path; +mod wildcard; diff --git a/ecstore/src/utils/wildcard.rs b/ecstore/src/utils/wildcard.rs new file mode 100644 index 00000000..58d81637 --- /dev/null +++ b/ecstore/src/utils/wildcard.rs @@ -0,0 +1,70 @@ +pub fn match_simple(pattern: &str, name: &str) -> bool { + if pattern.is_empty() { + return name == pattern; + } + if pattern == "*" { + return true; + } + // Do an extended wildcard '*' and '?' match. + deep_match_rune(name, pattern, true) +} + +pub fn match_pattern(pattern: &str, name: &str) -> bool { + if pattern.is_empty() { + return name == pattern; + } + if pattern == "*" { + return true; + } + // Do an extended wildcard '*' and '?' match. + deep_match_rune(name, pattern, false) +} + +fn deep_match_rune(str_: &str, pattern: &str, simple: bool) -> bool { + let (mut str_, mut pattern) = (str_.as_bytes(), pattern.as_bytes()); + while !pattern.is_empty() { + match pattern[0] as char { + '*' => { + return if pattern.len() == 1 { + true + } else if deep_match_rune(&str_[..], &pattern[1..], simple) + || (!str_.is_empty() && deep_match_rune(&str_[1..], pattern, simple)) + { + true + } else { + false + }; + } + '?' => { + if str_.is_empty() { + return simple; + } + } + _ => { + if str_.is_empty() || str_[0] != pattern[0] { + return false; + } + } + } + str_ = &str_[1..]; + pattern = &pattern[1..]; + } + str_.is_empty() && pattern.is_empty() +} + +pub fn match_as_pattern_prefix(pattern: &str, text: &str) -> bool { + let mut i = 0; + while i < text.len() && i < pattern.len() { + match pattern.as_bytes()[i] as char { + '*' => return true, + '?' => i += 1, + _ => { + if pattern.as_bytes()[i] != text.as_bytes()[i] { + return false; + } + } + } + i += 1; + } + text.len() <= pattern.len() +} diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 2b994017..25e07d00 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -2,6 +2,8 @@ use bytes::Bytes; use ecstore::bucket::get_bucket_metadata_sys; use ecstore::bucket::metadata::BUCKET_TAGGING_CONFIG; use ecstore::bucket::tags::Tags; +use ecstore::bucket::versioning::State as VersioningState; +use ecstore::bucket::versioning_sys::BucketVersioningSys; use ecstore::disk::error::DiskError; use ecstore::new_object_layer_fn; use ecstore::store_api::BucketOptions; @@ -854,16 +856,47 @@ impl S3 for FS { #[tracing::instrument(level = "debug", skip(self))] async fn get_bucket_versioning( &self, - _req: S3Request, + req: S3Request, ) -> S3Result> { - Err(s3_error!(NotImplemented, "GetBucketVersioning is not implemented yet")) + let GetBucketVersioningInput { bucket, .. } = req; + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + + if let Err(e) = store.get_bucket_info(&input.bucket, &BucketOptions::default()).await { + if DiskError::VolumeNotFound.is(&e) { + return Err(s3_error!(NoSuchBucket)); + } else { + return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("{}", e))); + } + } + + let cfg = try_!(BucketVersioningSys::get(&bucket).await); + + let status = match cfg.status { + VersioningState::Enabled => Some(BucketVersioningStatus::ENABLED), + VersioningState::Suspended => Some(BucketVersioningStatus::SUSPENDED), + }; + + Ok(S3Response::new(GetBucketVersioningOutput { + mfa_delete: None, + status, + })) } #[tracing::instrument(level = "debug", skip(self))] async fn put_bucket_versioning( &self, - _req: S3Request, + req: S3Request, ) -> S3Result> { + let PutBucketVersioningInput { bucket, .. } = req; + + // check site replication enable + // check bucket object lock enable + // check replication suspended Err(s3_error!(NotImplemented, "PutBucketVersioning is not implemented yet")) } } From 22463c9eb18e9fccbbba33685bca25e851fd287d Mon Sep 17 00:00:00 2001 From: weisd Date: Thu, 10 Oct 2024 09:40:30 +0800 Subject: [PATCH 20/20] get/put BucketVersionin done --- ecstore/src/bucket/versioning/mod.rs | 10 +++--- ecstore/src/bucket/versioning_sys.rs | 3 +- ecstore/src/utils/mod.rs | 2 +- ecstore/src/utils/wildcard.rs | 8 ++--- rustfs/src/storage/ecfs.rs | 46 ++++++++++++++++++++++++---- 5 files changed, 53 insertions(+), 16 deletions(-) diff --git a/ecstore/src/bucket/versioning/mod.rs b/ecstore/src/bucket/versioning/mod.rs index 0cf37eb8..04019b21 100644 --- a/ecstore/src/bucket/versioning/mod.rs +++ b/ecstore/src/bucket/versioning/mod.rs @@ -1,4 +1,7 @@ -use crate::error::{Error, Result}; +use crate::{ + error::{Error, Result}, + utils, +}; use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; @@ -69,7 +72,6 @@ impl Versioning { return Err(Error::new(VersioningErr::TooManyExcludedPrefixes)); } } - _ => return Err(Error::msg(format!("unsupported versioning status {}", self.status))), } Ok(()) @@ -97,7 +99,7 @@ impl Versioning { for sprefix in self.excluded_prefixes.iter() { let full_prefix = format!("{}*", sprefix.prefix); - if utils::wildcard::match_simple(full_prefix, prefix) { + if utils::wildcard::match_simple(&full_prefix, prefix) { return false; } } @@ -124,7 +126,7 @@ impl Versioning { for sprefix in self.excluded_prefixes.iter() { let full_prefix = format!("{}*", sprefix.prefix); - if utils::wildcard::match_simple(full_prefix, prefix) { + if utils::wildcard::match_simple(&full_prefix, prefix) { return true; } } diff --git a/ecstore/src/bucket/versioning_sys.rs b/ecstore/src/bucket/versioning_sys.rs index 636bbd8a..52a78fc0 100644 --- a/ecstore/src/bucket/versioning_sys.rs +++ b/ecstore/src/bucket/versioning_sys.rs @@ -1,6 +1,7 @@ use super::get_bucket_metadata_sys; use super::versioning::Versioning; use crate::disk::RUSTFS_META_BUCKET; +use crate::error::Result; use tracing::warn; pub struct BucketVersioningSys {} @@ -52,7 +53,7 @@ impl BucketVersioningSys { } let bucket_meta_sys_lock = get_bucket_metadata_sys().await; - let mut bucket_meta_sys = bucket_meta_sys_lock.write().await; + let bucket_meta_sys = bucket_meta_sys_lock.write().await; let (cfg, _) = bucket_meta_sys.get_versioning_config(bucket).await?; diff --git a/ecstore/src/utils/mod.rs b/ecstore/src/utils/mod.rs index 969921fb..b5ca93bd 100644 --- a/ecstore/src/utils/mod.rs +++ b/ecstore/src/utils/mod.rs @@ -4,4 +4,4 @@ pub mod fs; pub mod hash; pub mod net; pub mod path; -mod wildcard; +pub mod wildcard; diff --git a/ecstore/src/utils/wildcard.rs b/ecstore/src/utils/wildcard.rs index 58d81637..8652a88b 100644 --- a/ecstore/src/utils/wildcard.rs +++ b/ecstore/src/utils/wildcard.rs @@ -6,7 +6,7 @@ pub fn match_simple(pattern: &str, name: &str) -> bool { return true; } // Do an extended wildcard '*' and '?' match. - deep_match_rune(name, pattern, true) + deep_match_rune(name.as_bytes(), pattern.as_bytes(), true) } pub fn match_pattern(pattern: &str, name: &str) -> bool { @@ -17,11 +17,11 @@ pub fn match_pattern(pattern: &str, name: &str) -> bool { return true; } // Do an extended wildcard '*' and '?' match. - deep_match_rune(name, pattern, false) + deep_match_rune(name.as_bytes(), pattern.as_bytes(), false) } -fn deep_match_rune(str_: &str, pattern: &str, simple: bool) -> bool { - let (mut str_, mut pattern) = (str_.as_bytes(), pattern.as_bytes()); +fn deep_match_rune(str_: &[u8], pattern: &[u8], simple: bool) -> bool { + let (mut str_, mut pattern) = (str_, pattern); while !pattern.is_empty() { match pattern[0] as char { '*' => { diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 25e07d00..7ea11fbb 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -1,8 +1,10 @@ use bytes::Bytes; use ecstore::bucket::get_bucket_metadata_sys; use ecstore::bucket::metadata::BUCKET_TAGGING_CONFIG; +use ecstore::bucket::metadata::BUCKET_VERSIONING_CONFIG; use ecstore::bucket::tags::Tags; use ecstore::bucket::versioning::State as VersioningState; +use ecstore::bucket::versioning::Versioning; use ecstore::bucket::versioning_sys::BucketVersioningSys; use ecstore::disk::error::DiskError; use ecstore::new_object_layer_fn; @@ -19,6 +21,7 @@ use ecstore::store_api::PutObjReader; use ecstore::store_api::StorageAPI; use futures::pin_mut; use futures::{Stream, StreamExt}; +use http::status; use http::HeaderMap; use log::warn; use s3s::dto::*; @@ -858,7 +861,7 @@ impl S3 for FS { &self, req: S3Request, ) -> S3Result> { - let GetBucketVersioningInput { bucket, .. } = req; + let GetBucketVersioningInput { bucket, .. } = req.input; let layer = new_object_layer_fn(); let lock = layer.read().await; let store = match lock.as_ref() { @@ -866,7 +869,7 @@ impl S3 for FS { None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), }; - if let Err(e) = store.get_bucket_info(&input.bucket, &BucketOptions::default()).await { + if let Err(e) = store.get_bucket_info(&bucket, &BucketOptions::default()).await { if DiskError::VolumeNotFound.is(&e) { return Err(s3_error!(NoSuchBucket)); } else { @@ -877,8 +880,8 @@ impl S3 for FS { let cfg = try_!(BucketVersioningSys::get(&bucket).await); let status = match cfg.status { - VersioningState::Enabled => Some(BucketVersioningStatus::ENABLED), - VersioningState::Suspended => Some(BucketVersioningStatus::SUSPENDED), + VersioningState::Enabled => Some(BucketVersioningStatus::from_static(BucketVersioningStatus::ENABLED)), + VersioningState::Suspended => Some(BucketVersioningStatus::from_static(BucketVersioningStatus::SUSPENDED)), }; Ok(S3Response::new(GetBucketVersioningOutput { @@ -892,12 +895,43 @@ impl S3 for FS { &self, req: S3Request, ) -> S3Result> { - let PutBucketVersioningInput { bucket, .. } = req; + let PutBucketVersioningInput { + bucket, + versioning_configuration, + .. + } = req.input; + // TODO: check other sys // check site replication enable // check bucket object lock enable // check replication suspended - Err(s3_error!(NotImplemented, "PutBucketVersioning is not implemented yet")) + + let mut cfg = match BucketVersioningSys::get(&bucket).await { + Ok(res) => res, + Err(err) => { + warn!("BucketVersioningSys::get err {:?}", err); + Versioning::default() + } + }; + + if let Some(verstatus) = versioning_configuration.status { + cfg.status = match verstatus.as_str() { + BucketVersioningStatus::ENABLED => VersioningState::Enabled, + BucketVersioningStatus::SUSPENDED => VersioningState::Suspended, + _ => return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init")), + } + } + + let data = try_!(cfg.marshal_msg()); + + let bucket_meta_sys_lock = get_bucket_metadata_sys().await; + let mut bucket_meta_sys = bucket_meta_sys_lock.write().await; + + try_!(bucket_meta_sys.update(&bucket, BUCKET_VERSIONING_CONFIG, data).await); + + // TODO: globalSiteReplicationSys.BucketMetaHook + + Ok(S3Response::new(PutBucketVersioningOutput {})) } }