diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4db6750a..e11da3cf 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -28,7 +28,7 @@ jobs: - name: Download and Extract Static Assets run: | - url="https://dl.rustfs.com/console/rustfs-console-latest.zip" + url="https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" mkdir -p static curl -L -o static_assets.zip "$url" unzip -o static_assets.zip -d ./rustfs/static diff --git a/Cargo.lock b/Cargo.lock index 213aeb0f..18226104 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1924,6 +1924,7 @@ checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" name = "e2e_test" version = "0.0.1" dependencies = [ + "common", "ecstore", "flatbuffers", "futures", @@ -1971,6 +1972,7 @@ dependencies = [ "path-absolutize", "path-clean", "pin-project-lite", + "policy", "protos", "rand 0.8.5", "reed-solomon-erasure", @@ -3003,6 +3005,7 @@ dependencies = [ "arc-swap", "async-trait", "base64-simd", + "common", "crypto", "ecstore", "futures", @@ -3012,6 +3015,7 @@ dependencies = [ "lazy_static", "log", "madmin", + "policy", "rand 0.8.5", "regex", "serde", @@ -4815,6 +4819,34 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "policy" +version = "0.0.1" +dependencies = [ + "arc-swap", + "async-trait", + "base64-simd", + "common", + "crypto", + "futures", + "ipnetwork", + "itertools", + "jsonwebtoken", + "lazy_static", + "log", + "madmin", + "rand 0.8.5", + "regex", + "serde", + "serde_json", + "strum", + "test-case", + "thiserror 2.0.12", + "time", + "tokio", + "tracing", +] + [[package]] name = "polling" version = "3.7.4" @@ -5663,6 +5695,7 @@ dependencies = [ "netif", "once_cell", "pin-project-lite", + "policy", "prost", "prost-build", "prost-types", diff --git a/Cargo.toml b/Cargo.toml index 4977bc9d..40dd5159 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,6 +44,7 @@ flatbuffers = "24.12.23" futures = "0.3.31" futures-util = "0.3.31" common = { path = "./common/common" } +policy = {path = "./policy"} hex = "0.4.3" hyper = "1.6.0" hyper-util = { version = "0.1.10", features = [ diff --git a/TODO.md b/TODO.md index 3519bd92..9c8e4653 100644 --- a/TODO.md +++ b/TODO.md @@ -63,5 +63,6 @@ ## 性能优化 - [ ] bitrot impl AsyncRead/AsyncWrite - [ ] erasure 并发读写 -- [ ] 完善删除逻辑, 并发处理,先移动到回收站,空间不足时清空回收站 +- [x] 完善删除逻辑, 并发处理,先移动到回收站, +- [ ] 空间不足时清空回收站 - [ ] list_object 使用reader传输 \ No newline at end of file diff --git a/common/common/src/error.rs b/common/common/src/error.rs index 24d2936b..2c889053 100644 --- a/common/common/src/error.rs +++ b/common/common/src/error.rs @@ -61,6 +61,11 @@ impl Error { pub fn downcast_mut(&mut self) -> Option<&mut T> { self.inner.downcast_mut() } + + pub fn to_io_err(&self) -> Option { + self.downcast_ref::() + .map(|e| std::io::Error::new(e.kind(), e.to_string())) + } } impl From for Error { diff --git a/e2e_test/Cargo.toml b/e2e_test/Cargo.toml index 0a8d98d5..a519f647 100644 --- a/e2e_test/Cargo.toml +++ b/e2e_test/Cargo.toml @@ -6,6 +6,7 @@ license.workspace = true repository.workspace = true rust-version.workspace = true + # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [lints] @@ -25,4 +26,5 @@ tonic = { version = "0.12.3", features = ["gzip"] } tokio = { workspace = true } tower.workspace = true url.workspace = true -madmin.workspace =true \ No newline at end of file +madmin.workspace =true +common.workspace = true \ No newline at end of file diff --git a/e2e_test/src/reliant/node_interact_test.rs b/e2e_test/src/reliant/node_interact_test.rs index 5bd66ce3..742b925d 100644 --- a/e2e_test/src/reliant/node_interact_test.rs +++ b/e2e_test/src/reliant/node_interact_test.rs @@ -14,10 +14,7 @@ use protos::{ use rmp_serde::{Deserializer, Serializer}; use serde::{Deserialize, Serialize}; use std::{error::Error, io::Cursor}; -use tokio::io::AsyncWrite; use tokio::spawn; -use tokio::sync::mpsc; -use tonic::codegen::tokio_stream::wrappers::ReceiverStream; use tonic::codegen::tokio_stream::StreamExt; use tonic::Request; @@ -125,7 +122,7 @@ async fn walk_dir() -> Result<(), Box> { println!("{}", resp.error_info.unwrap_or("".to_string())); } let entry = serde_json::from_str::(&resp.meta_cache_entry) - .map_err(|e| ecstore::error::Error::from_string(format!("Unexpected response: {:?}", response))) + .map_err(|_e| common::error::Error::from_string(format!("Unexpected response: {:?}", response))) .unwrap(); out.write_obj(&entry).await.unwrap(); } diff --git a/ecstore/Cargo.toml b/ecstore/Cargo.toml index f7357e91..4f822c03 100644 --- a/ecstore/Cargo.toml +++ b/ecstore/Cargo.toml @@ -16,6 +16,7 @@ backon.workspace = true blake2 = "0.10.6" bytes.workspace = true common.workspace = true +policy.workspace = true chrono.workspace = true glob = "0.3.2" thiserror.workspace = true diff --git a/ecstore/src/bitrot.rs b/ecstore/src/bitrot.rs index 849e54b9..75740155 100644 --- a/ecstore/src/bitrot.rs +++ b/ecstore/src/bitrot.rs @@ -1,12 +1,12 @@ use crate::{ disk::{error::DiskError, Disk, DiskAPI}, erasure::{ReadAt, Writer}, - error::{Error, Result}, io::{FileReader, FileWriter}, store_api::BitrotAlgorithm, }; use blake2::Blake2b512; use blake2::Digest as _; +use common::error::{Error, Result}; use highway::{HighwayHash, HighwayHasher, Key}; use lazy_static::lazy_static; use sha2::{digest::core_api::BlockSizeUser, Digest, Sha256}; @@ -731,14 +731,10 @@ pub fn new_bitrot_filereader( mod test { use std::collections::HashMap; + use crate::{disk::error::DiskError, store_api::BitrotAlgorithm}; + use common::error::{Error, Result}; use hex_simd::decode_to_vec; - use crate::{ - disk::error::DiskError, - error::{Error, Result}, - store_api::BitrotAlgorithm, - }; - // use super::{bitrot_writer_sum, new_bitrot_reader}; #[test] diff --git a/ecstore/src/bucket/error.rs b/ecstore/src/bucket/error.rs index a6aa8232..9d76e7a4 100644 --- a/ecstore/src/bucket/error.rs +++ b/ecstore/src/bucket/error.rs @@ -1,4 +1,4 @@ -use crate::error::Error; +use common::error::Error; #[derive(Debug, thiserror::Error, PartialEq, Eq)] pub enum BucketMetadataError { diff --git a/ecstore/src/bucket/metadata.rs b/ecstore/src/bucket/metadata.rs index fc97224e..135bbb78 100644 --- a/ecstore/src/bucket/metadata.rs +++ b/ecstore/src/bucket/metadata.rs @@ -1,9 +1,9 @@ -use super::policy::bucket_policy::BucketPolicy; use super::{quota::BucketQuota, target::BucketTargets}; use super::object_lock::ObjectLockApi; use super::versioning::VersioningApi; use byteorder::{BigEndian, ByteOrder, LittleEndian}; +use policy::policy::BucketPolicy; use rmp_serde::Serializer as rmpSerializer; use s3s::dto::{ BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ReplicationConfiguration, @@ -16,9 +16,9 @@ use std::sync::Arc; use time::OffsetDateTime; use tracing::error; -use crate::config::common::{read_config, save_config}; -use crate::error::{Error, Result}; +use crate::config::com::{read_config, save_config}; use crate::{config, new_object_layer_fn}; +use common::error::{Error, Result}; use crate::disk::BUCKET_META_PREFIX; use crate::store::ECStore; diff --git a/ecstore/src/bucket/metadata_sys.rs b/ecstore/src/bucket/metadata_sys.rs index ef455bd9..b3a7be99 100644 --- a/ecstore/src/bucket/metadata_sys.rs +++ b/ecstore/src/bucket/metadata_sys.rs @@ -8,11 +8,12 @@ use crate::bucket::utils::is_meta_bucketname; use crate::config; use crate::config::error::ConfigError; use crate::disk::error::DiskError; -use crate::error::{Error, Result}; use crate::global::{is_dist_erasure, is_erasure, new_object_layer_fn, GLOBAL_Endpoints}; use crate::store::ECStore; use crate::utils::xml::deserialize; +use common::error::{Error, Result}; use futures::future::join_all; +use policy::policy::BucketPolicy; use s3s::dto::{ BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ReplicationConfiguration, ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration, @@ -22,7 +23,6 @@ use tokio::sync::RwLock; use tracing::{error, warn}; use super::metadata::{load_bucket_metadata, BucketMetadata}; -use super::policy::bucket_policy::BucketPolicy; use super::quota::BucketQuota; use super::target::BucketTargets; @@ -42,94 +42,99 @@ pub async fn init_bucket_metadata_sys(api: Arc, buckets: Vec) { } // panic if not init -pub(super) fn get_bucket_metadata_sys() -> Arc> { - GLOBAL_BucketMetadataSys.get().unwrap().clone() +pub(super) fn get_bucket_metadata_sys() -> Result>> { + if let Some(sys) = GLOBAL_BucketMetadataSys.get() { + Ok(sys.clone()) + } else { + Err(Error::msg("GLOBAL_BucketMetadataSys not init")) + } } -pub async fn set_bucket_metadata(bucket: String, bm: BucketMetadata) { - let sys = get_bucket_metadata_sys(); +pub async fn set_bucket_metadata(bucket: String, bm: BucketMetadata) -> Result<()> { + let sys = get_bucket_metadata_sys()?; let lock = sys.write().await; lock.set(bucket, Arc::new(bm)).await; + Ok(()) } pub(crate) async fn get(bucket: &str) -> Result> { - let sys = get_bucket_metadata_sys(); + let sys = get_bucket_metadata_sys()?; let lock = sys.read().await; lock.get(bucket).await } pub async fn update(bucket: &str, config_file: &str, data: Vec) -> Result { - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let mut bucket_meta_sys = bucket_meta_sys_lock.write().await; bucket_meta_sys.update(bucket, config_file, data).await } pub async fn delete(bucket: &str, config_file: &str) -> Result { - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let mut bucket_meta_sys = bucket_meta_sys_lock.write().await; bucket_meta_sys.delete(bucket, config_file).await } pub async fn get_tagging_config(bucket: &str) -> Result<(Tagging, OffsetDateTime)> { - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let bucket_meta_sys = bucket_meta_sys_lock.read().await; bucket_meta_sys.get_tagging_config(bucket).await } pub async fn get_lifecycle_config(bucket: &str) -> Result<(BucketLifecycleConfiguration, OffsetDateTime)> { - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let bucket_meta_sys = bucket_meta_sys_lock.read().await; bucket_meta_sys.get_lifecycle_config(bucket).await } pub async fn get_sse_config(bucket: &str) -> Result<(ServerSideEncryptionConfiguration, OffsetDateTime)> { - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let bucket_meta_sys = bucket_meta_sys_lock.read().await; bucket_meta_sys.get_sse_config(bucket).await } pub async fn get_object_lock_config(bucket: &str) -> Result<(ObjectLockConfiguration, OffsetDateTime)> { - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let bucket_meta_sys = bucket_meta_sys_lock.read().await; bucket_meta_sys.get_object_lock_config(bucket).await } pub async fn get_replication_config(bucket: &str) -> Result<(ReplicationConfiguration, OffsetDateTime)> { - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let bucket_meta_sys = bucket_meta_sys_lock.read().await; bucket_meta_sys.get_replication_config(bucket).await } pub async fn get_notification_config(bucket: &str) -> Result> { - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let bucket_meta_sys = bucket_meta_sys_lock.read().await; bucket_meta_sys.get_notification_config(bucket).await } pub async fn get_versioning_config(bucket: &str) -> Result<(VersioningConfiguration, OffsetDateTime)> { - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let bucket_meta_sys = bucket_meta_sys_lock.read().await; bucket_meta_sys.get_versioning_config(bucket).await } pub async fn get_config_from_disk(bucket: &str) -> Result { - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let bucket_meta_sys = bucket_meta_sys_lock.read().await; bucket_meta_sys.get_config_from_disk(bucket).await } pub async fn created_at(bucket: &str) -> Result { - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let bucket_meta_sys = bucket_meta_sys_lock.read().await; bucket_meta_sys.created_at(bucket).await diff --git a/ecstore/src/bucket/mod.rs b/ecstore/src/bucket/mod.rs index fb9f0230..33e5b87e 100644 --- a/ecstore/src/bucket/mod.rs +++ b/ecstore/src/bucket/mod.rs @@ -2,7 +2,6 @@ pub mod error; pub mod metadata; pub mod metadata_sys; pub mod object_lock; -pub mod policy; pub mod policy_sys; mod quota; pub mod tagging; diff --git a/ecstore/src/bucket/policy/action.rs b/ecstore/src/bucket/policy/action.rs deleted file mode 100644 index d444e52d..00000000 --- a/ecstore/src/bucket/policy/action.rs +++ /dev/null @@ -1,605 +0,0 @@ -use crate::{bucket::policy::condition::keyname::ALL_SUPPORT_KEYS, utils}; -use lazy_static::lazy_static; -use serde::{Deserialize, Serialize}; -use std::{ - collections::{HashMap, HashSet}, - str::FromStr, - vec, -}; - -use super::condition::{ - key::{Key, KeySet}, - keyname::{KeyName, COMMOM_KEYS}, -}; - -#[derive(Debug, Deserialize, Serialize, Default, Clone, PartialEq, Eq)] - -pub struct ActionSet(pub HashSet); - -impl ActionSet { - pub fn is_match(&self, act: &Action) -> bool { - for item in self.0.iter() { - if item.is_match(act) { - return true; - } - - if item == &Action::GetObjectVersion && act == &Action::GetObjectVersion { - return true; - } - } - - false - } - - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - -impl AsRef> for ActionSet { - fn as_ref(&self) -> &HashSet { - &self.0 - } -} - -// TODO:: 使用字符串 -// 定义Action枚举类型 -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Default, Hash)] -pub enum Action { - #[serde(rename = "s3:AbortMultipartUpload")] - AbortMultipartUpload, - #[serde(rename = "s3:CreateBucket")] - CreateBucket, - #[serde(rename = "s3:DeleteBucket")] - DeleteBucket, - #[serde(rename = "s3:ForceDeleteBucket")] - ForceDeleteBucket, - #[serde(rename = "s3:DeleteBucketPolicy")] - DeleteBucketPolicy, - #[serde(rename = "s3:DeleteBucketCors")] - DeleteBucketCors, - #[serde(rename = "s3:DeleteObject")] - DeleteObject, - #[serde(rename = "s3:GetBucketLocation")] - GetBucketLocation, - #[serde(rename = "s3:GetBucketNotification")] - GetBucketNotification, - #[serde(rename = "s3:GetBucketPolicy")] - GetBucketPolicy, - #[serde(rename = "s3:GetBucketCors")] - GetBucketCors, - #[serde(rename = "s3:GetObject")] - GetObject, - #[serde(rename = "s3:GetObjectAttributes")] - GetObjectAttributes, - #[serde(rename = "s3:HeadBucket")] - HeadBucket, - #[serde(rename = "s3:ListAllMyBuckets")] - ListAllMyBuckets, - #[serde(rename = "s3:ListBucket")] - ListBucket, - #[serde(rename = "s3:GetBucketPolicyStatus")] - GetBucketPolicyStatus, - #[serde(rename = "s3:ListBucketVersions")] - ListBucketVersions, - #[serde(rename = "s3:ListBucketMultipartUploads")] - ListBucketMultipartUploads, - #[serde(rename = "s3:ListenNotification")] - ListenNotification, - #[serde(rename = "s3:ListenBucketNotification")] - ListenBucketNotification, - #[serde(rename = "s3:ListMultipartUploadParts")] - ListMultipartUploadParts, - #[serde(rename = "s3:PutLifecycleConfiguration")] - PutLifecycleConfiguration, - #[serde(rename = "s3:GetLifecycleConfiguration")] - GetLifecycleConfiguration, - #[serde(rename = "s3:PutBucketNotification")] - PutBucketNotification, - #[serde(rename = "s3:PutBucketPolicy")] - PutBucketPolicy, - #[serde(rename = "s3:PutBucketCors")] - PutBucketCors, - #[serde(rename = "s3:PutObject")] - PutObject, - #[serde(rename = "s3:DeleteObjectVersion")] - DeleteObjectVersion, - #[serde(rename = "s3:DeleteObjectVersionTagging")] - DeleteObjectVersionTagging, - #[serde(rename = "s3:GetObjectVersion")] - GetObjectVersion, - #[serde(rename = "s3:GetObjectVersionAttributes")] - GetObjectVersionAttributes, - #[serde(rename = "s3:GetObjectVersionTagging")] - GetObjectVersionTagging, - #[serde(rename = "s3:PutObjectVersionTagging")] - PutObjectVersionTagging, - #[serde(rename = "s3:BypassGovernanceRetention")] - BypassGovernanceRetention, - #[serde(rename = "s3:PutObjectRetention")] - PutObjectRetention, - #[serde(rename = "s3:GetObjectRetention")] - GetObjectRetention, - #[serde(rename = "s3:GetObjectLegalHold")] - GetObjectLegalHold, - #[serde(rename = "s3:PutObjectLegalHold")] - PutObjectLegalHold, - #[serde(rename = "s3:GetBucketObjectLockConfiguration")] - GetBucketObjectLockConfiguration, - #[serde(rename = "s3:PutBucketObjectLockConfiguration")] - PutBucketObjectLockConfiguration, - #[serde(rename = "s3:GetBucketTagging")] - GetBucketTagging, - #[serde(rename = "s3:PutBucketTagging")] - PutBucketTagging, - #[serde(rename = "s3:GetObjectTagging")] - GetObjectTagging, - #[serde(rename = "s3:PutObjectTagging")] - PutObjectTagging, - #[serde(rename = "s3:DeleteObjectTagging")] - DeleteObjectTagging, - #[serde(rename = "s3:PutBucketEncryption")] - PutBucketEncryption, - #[serde(rename = "s3:GetBucketEncryption")] - GetBucketEncryption, - #[serde(rename = "s3:PutBucketVersioning")] - PutBucketVersioning, - #[serde(rename = "s3:GetBucketVersioning")] - GetBucketVersioning, - #[serde(rename = "s3:PutReplicationConfiguration")] - PutReplicationConfiguration, - #[serde(rename = "s3:GetReplicationConfiguration")] - GetReplicationConfiguration, - #[serde(rename = "s3:ReplicateObject")] - ReplicateObject, - #[serde(rename = "s3:ReplicateDelete")] - ReplicateDelete, - #[serde(rename = "s3:ReplicateTags")] - ReplicateTags, - #[serde(rename = "s3:GetObjectVersionForReplication")] - GetObjectVersionForReplication, - #[serde(rename = "s3:RestoreObject")] - RestoreObject, - #[serde(rename = "s3:ResetBucketReplicationState")] - ResetBucketReplicationState, - #[serde(rename = "s3:PutObjectFanOut")] - PutObjectFanOut, - #[default] - #[serde(rename = "s3:*")] - AllActions, -} - -lazy_static! { - #[derive(Debug)] - static ref SUPPORT_OBJCET_ACTIONS: HashSet = { - let mut h = HashSet::new(); - h.insert(Action::AllActions); - h.insert(Action::AbortMultipartUpload); - h.insert(Action::DeleteObject); - h.insert(Action::GetObject); - h.insert(Action::ListMultipartUploadParts); - h.insert(Action::PutObject); - h.insert(Action::BypassGovernanceRetention); - h.insert(Action::PutObjectRetention); - h.insert(Action::GetObjectRetention); - h.insert(Action::PutObjectLegalHold); - h.insert(Action::GetObjectLegalHold); - h.insert(Action::GetObjectTagging); - h.insert(Action::PutObjectTagging); - h.insert(Action::DeleteObjectTagging); - h.insert(Action::GetObjectVersion); - h.insert(Action::GetObjectVersionTagging); - h.insert(Action::DeleteObjectVersion); - h.insert(Action::DeleteObjectVersionTagging); - h.insert(Action::PutObjectVersionTagging); - h.insert(Action::ReplicateObject); - h.insert(Action::ReplicateDelete); - h.insert(Action::ReplicateTags); - h.insert(Action::GetObjectVersionForReplication); - h.insert(Action::RestoreObject); - h.insert(Action::ResetBucketReplicationState); - h.insert(Action::PutObjectFanOut); - h.insert(Action::GetObjectAttributes); - h.insert(Action::GetObjectVersionAttributes); - h - }; -} - -impl Action { - pub fn is_object_action(&self) -> bool { - for act in SUPPORT_OBJCET_ACTIONS.iter() { - if self.is_match(act) { - return true; - } - } - false - } - pub fn is_match(&self, a: &Action) -> bool { - utils::wildcard::match_pattern(self.clone().as_str(), a.clone().as_str()) - } - - fn as_str(&self) -> &'static str { - match self { - Action::AbortMultipartUpload => "s3:AbortMultipartUpload", - Action::CreateBucket => "s3:CreateBucket", - Action::DeleteBucket => "s3:DeleteBucket", - Action::ForceDeleteBucket => "s3:ForceDeleteBucket", - Action::DeleteBucketPolicy => "s3:DeleteBucketPolicy", - Action::DeleteBucketCors => "s3:DeleteBucketCors", - Action::DeleteObject => "s3:DeleteObject", - Action::GetBucketLocation => "s3:GetBucketLocation", - Action::GetBucketNotification => "s3:GetBucketNotification", - Action::GetBucketPolicy => "s3:GetBucketPolicy", - Action::GetBucketCors => "s3:GetBucketCors", - Action::GetObject => "s3:GetObject", - Action::GetObjectAttributes => "s3:GetObjectAttributes", - Action::HeadBucket => "s3:HeadBucket", - Action::ListAllMyBuckets => "s3:ListAllMyBuckets", - Action::ListBucket => "s3:ListBucket", - Action::GetBucketPolicyStatus => "s3:GetBucketPolicyStatus", - Action::ListBucketVersions => "s3:ListBucketVersions", - Action::ListBucketMultipartUploads => "s3:ListBucketMultipartUploads", - Action::ListenNotification => "s3:ListenNotification", - Action::ListenBucketNotification => "s3:ListenBucketNotification", - Action::ListMultipartUploadParts => "s3:ListMultipartUploadParts", - Action::PutLifecycleConfiguration => "s3:PutLifecycleConfiguration", - Action::GetLifecycleConfiguration => "s3:GetLifecycleConfiguration", - Action::PutBucketNotification => "s3:PutBucketNotification", - Action::PutBucketPolicy => "s3:PutBucketPolicy", - Action::PutBucketCors => "s3:PutBucketCors", - Action::PutObject => "s3:PutObject", - Action::DeleteObjectVersion => "s3:DeleteObjectVersion", - Action::DeleteObjectVersionTagging => "s3:DeleteObjectVersionTagging", - Action::GetObjectVersion => "s3:GetObjectVersion", - Action::GetObjectVersionAttributes => "s3:GetObjectVersionAttributes", - Action::GetObjectVersionTagging => "s3:GetObjectVersionTagging", - Action::PutObjectVersionTagging => "s3:PutObjectVersionTagging", - Action::BypassGovernanceRetention => "s3:BypassGovernanceRetention", - Action::PutObjectRetention => "s3:PutObjectRetention", - Action::GetObjectRetention => "s3:GetObjectRetention", - Action::GetObjectLegalHold => "s3:GetObjectLegalHold", - Action::PutObjectLegalHold => "s3:PutObjectLegalHold", - Action::GetBucketObjectLockConfiguration => "s3:GetBucketObjectLockConfiguration", - Action::PutBucketObjectLockConfiguration => "s3:PutBucketObjectLockConfiguration", - Action::GetBucketTagging => "s3:GetBucketTagging", - Action::PutBucketTagging => "s3:PutBucketTagging", - Action::GetObjectTagging => "s3:GetObjectTagging", - Action::PutObjectTagging => "s3:PutObjectTagging", - Action::DeleteObjectTagging => "s3:DeleteObjectTagging", - Action::PutBucketEncryption => "s3:PutEncryptionConfiguration", - Action::GetBucketEncryption => "s3:GetEncryptionConfiguration", - Action::PutBucketVersioning => "s3:PutBucketVersioning", - Action::GetBucketVersioning => "s3:GetBucketVersioning", - Action::PutReplicationConfiguration => "s3:GetReplicationConfiguration", - Action::GetReplicationConfiguration => "s3:PutReplicationConfiguration", - Action::ReplicateObject => "s3:ReplicateObject", - Action::ReplicateDelete => "s3:ReplicateDelete", - Action::ReplicateTags => "s3:ReplicateTags", - Action::GetObjectVersionForReplication => "s3:GetObjectVersionForReplication", - Action::RestoreObject => "s3:RestoreObject", - Action::ResetBucketReplicationState => "s3:ResetBucketReplicationState", - Action::PutObjectFanOut => "s3:PutObjectFanOut", - Action::AllActions => "s3:*", - } - } - - // pub fn from_str(s: &str) -> Option { - // match s { - // "s3:AbortMultipartUpload" => Some(Action::AbortMultipartUpload), - // "s3:CreateBucket" => Some(Action::CreateBucket), - // "s3:DeleteBucket" => Some(Action::DeleteBucket), - // "s3:ForceDeleteBucket" => Some(Action::ForceDeleteBucket), - // "s3:DeleteBucketPolicy" => Some(Action::DeleteBucketPolicy), - // "s3:DeleteBucketCors" => Some(Action::DeleteBucketCors), - // "s3:DeleteObject" => Some(Action::DeleteObject), - // "s3:GetBucketLocation" => Some(Action::GetBucketLocation), - // "s3:GetBucketNotification" => Some(Action::GetBucketNotification), - // "s3:GetBucketPolicy" => Some(Action::GetBucketPolicy), - // "s3:GetBucketCors" => Some(Action::GetBucketCors), - // "s3:GetObject" => Some(Action::GetObject), - // "s3:GetObjectAttributes" => Some(Action::GetObjectAttributes), - // "s3:HeadBucket" => Some(Action::HeadBucket), - // "s3:ListAllMyBuckets" => Some(Action::ListAllMyBuckets), - // "s3:ListBucket" => Some(Action::ListBucket), - // "s3:GetBucketPolicyStatus" => Some(Action::GetBucketPolicyStatus), - // "s3:ListBucketVersions" => Some(Action::ListBucketVersions), - // "s3:ListBucketMultipartUploads" => Some(Action::ListBucketMultipartUploads), - // "s3:ListenNotification" => Some(Action::ListenNotification), - // "s3:ListenBucketNotification" => Some(Action::ListenBucketNotification), - // "s3:ListMultipartUploadParts" => Some(Action::ListMultipartUploadParts), - // "s3:PutLifecycleConfiguration" => Some(Action::PutLifecycleConfiguration), - // "s3:GetLifecycleConfiguration" => Some(Action::GetLifecycleConfiguration), - // "s3:PutBucketNotification" => Some(Action::PutBucketNotification), - // "s3:PutBucketPolicy" => Some(Action::PutBucketPolicy), - // "s3:PutBucketCors" => Some(Action::PutBucketCors), - // "s3:PutObject" => Some(Action::PutObject), - // "s3:DeleteObjectVersion" => Some(Action::DeleteObjectVersion), - // "s3:DeleteObjectVersionTagging" => Some(Action::DeleteObjectVersionTagging), - // "s3:GetObjectVersion" => Some(Action::GetObjectVersion), - // "s3:GetObjectVersionAttributes" => Some(Action::GetObjectVersionAttributes), - // "s3:GetObjectVersionTagging" => Some(Action::GetObjectVersionTagging), - // "s3:PutObjectVersionTagging" => Some(Action::PutObjectVersionTagging), - // "s3:BypassGovernanceRetention" => Some(Action::BypassGovernanceRetention), - // "s3:PutObjectRetention" => Some(Action::PutObjectRetention), - // "s3:GetObjectRetention" => Some(Action::GetObjectRetention), - // "s3:GetObjectLegalHold" => Some(Action::GetObjectLegalHold), - // "s3:PutObjectLegalHold" => Some(Action::PutObjectLegalHold), - // "s3:GetBucketObjectLockConfiguration" => Some(Action::GetBucketObjectLockConfiguration), - // "s3:PutBucketObjectLockConfiguration" => Some(Action::PutBucketObjectLockConfiguration), - // "s3:GetBucketTagging" => Some(Action::GetBucketTagging), - // "s3:PutBucketTagging" => Some(Action::PutBucketTagging), - // "s3:GetObjectTagging" => Some(Action::GetObjectTagging), - // "s3:PutObjectTagging" => Some(Action::PutObjectTagging), - // "s3:DeleteObjectTagging" => Some(Action::DeleteObjectTagging), - // "s3:PutEncryptionConfiguration" => Some(Action::PutBucketEncryption), - // "s3:GetEncryptionConfiguration" => Some(Action::GetBucketEncryption), - // "s3:PutBucketVersioning" => Some(Action::PutBucketVersioning), - // "s3:GetBucketVersioning" => Some(Action::GetBucketVersioning), - // "s3:PutReplicationConfiguration" => Some(Action::PutReplicationConfiguration), - // "s3:GetReplicationConfiguration" => Some(Action::GetReplicationConfiguration), - // "s3:ReplicateObject" => Some(Action::ReplicateObject), - // "s3:ReplicateDelete" => Some(Action::ReplicateDelete), - // "s3:ReplicateTags" => Some(Action::ReplicateTags), - // "s3:GetObjectVersionForReplication" => Some(Action::GetObjectVersionForReplication), - // "s3:RestoreObject" => Some(Action::RestoreObject), - // "s3:ResetBucketReplicationState" => Some(Action::ResetBucketReplicationState), - // "s3:PutObjectFanOut" => Some(Action::PutObjectFanOut), - // "s3:*" => Some(Action::AllActions), - // _ => None, - // } - // } -} - -impl FromStr for Action { - type Err = (); - - fn from_str(s: &str) -> Result { - match s { - "s3:AbortMultipartUpload" => Ok(Action::AbortMultipartUpload), - "s3:CreateBucket" => Ok(Action::CreateBucket), - "s3:DeleteBucket" => Ok(Action::DeleteBucket), - "s3:ForceDeleteBucket" => Ok(Action::ForceDeleteBucket), - "s3:DeleteBucketPolicy" => Ok(Action::DeleteBucketPolicy), - "s3:DeleteBucketCors" => Ok(Action::DeleteBucketCors), - "s3:DeleteObject" => Ok(Action::DeleteObject), - "s3:GetBucketLocation" => Ok(Action::GetBucketLocation), - "s3:GetBucketNotification" => Ok(Action::GetBucketNotification), - "s3:GetBucketPolicy" => Ok(Action::GetBucketPolicy), - "s3:GetBucketCors" => Ok(Action::GetBucketCors), - "s3:GetObject" => Ok(Action::GetObject), - "s3:GetObjectAttributes" => Ok(Action::GetObjectAttributes), - "s3:HeadBucket" => Ok(Action::HeadBucket), - "s3:ListAllMyBuckets" => Ok(Action::ListAllMyBuckets), - "s3:ListBucket" => Ok(Action::ListBucket), - "s3:GetBucketPolicyStatus" => Ok(Action::GetBucketPolicyStatus), - "s3:ListBucketVersions" => Ok(Action::ListBucketVersions), - "s3:ListBucketMultipartUploads" => Ok(Action::ListBucketMultipartUploads), - "s3:ListenNotification" => Ok(Action::ListenNotification), - "s3:ListenBucketNotification" => Ok(Action::ListenBucketNotification), - "s3:ListMultipartUploadParts" => Ok(Action::ListMultipartUploadParts), - "s3:PutLifecycleConfiguration" => Ok(Action::PutLifecycleConfiguration), - "s3:GetLifecycleConfiguration" => Ok(Action::GetLifecycleConfiguration), - "s3:PutBucketNotification" => Ok(Action::PutBucketNotification), - "s3:PutBucketPolicy" => Ok(Action::PutBucketPolicy), - "s3:PutBucketCors" => Ok(Action::PutBucketCors), - "s3:PutObject" => Ok(Action::PutObject), - "s3:DeleteObjectVersion" => Ok(Action::DeleteObjectVersion), - "s3:DeleteObjectVersionTagging" => Ok(Action::DeleteObjectVersionTagging), - "s3:GetObjectVersion" => Ok(Action::GetObjectVersion), - "s3:GetObjectVersionAttributes" => Ok(Action::GetObjectVersionAttributes), - "s3:GetObjectVersionTagging" => Ok(Action::GetObjectVersionTagging), - "s3:PutObjectVersionTagging" => Ok(Action::PutObjectVersionTagging), - "s3:BypassGovernanceRetention" => Ok(Action::BypassGovernanceRetention), - "s3:PutObjectRetention" => Ok(Action::PutObjectRetention), - "s3:GetObjectRetention" => Ok(Action::GetObjectRetention), - "s3:GetObjectLegalHold" => Ok(Action::GetObjectLegalHold), - "s3:PutObjectLegalHold" => Ok(Action::PutObjectLegalHold), - "s3:GetBucketObjectLockConfiguration" => Ok(Action::GetBucketObjectLockConfiguration), - "s3:PutBucketObjectLockConfiguration" => Ok(Action::PutBucketObjectLockConfiguration), - "s3:GetBucketTagging" => Ok(Action::GetBucketTagging), - "s3:PutBucketTagging" => Ok(Action::PutBucketTagging), - "s3:GetObjectTagging" => Ok(Action::GetObjectTagging), - "s3:PutObjectTagging" => Ok(Action::PutObjectTagging), - "s3:DeleteObjectTagging" => Ok(Action::DeleteObjectTagging), - "s3:PutEncryptionConfiguration" => Ok(Action::PutBucketEncryption), - "s3:GetEncryptionConfiguration" => Ok(Action::GetBucketEncryption), - "s3:PutBucketVersioning" => Ok(Action::PutBucketVersioning), - "s3:GetBucketVersioning" => Ok(Action::GetBucketVersioning), - "s3:PutReplicationConfiguration" => Ok(Action::PutReplicationConfiguration), - "s3:GetReplicationConfiguration" => Ok(Action::GetReplicationConfiguration), - "s3:ReplicateObject" => Ok(Action::ReplicateObject), - "s3:ReplicateDelete" => Ok(Action::ReplicateDelete), - "s3:ReplicateTags" => Ok(Action::ReplicateTags), - "s3:GetObjectVersionForReplication" => Ok(Action::GetObjectVersionForReplication), - "s3:RestoreObject" => Ok(Action::RestoreObject), - "s3:ResetBucketReplicationState" => Ok(Action::ResetBucketReplicationState), - "s3:PutObjectFanOut" => Ok(Action::PutObjectFanOut), - "s3:*" => Ok(Action::AllActions), - _ => Err(()), - } - } -} - -pub struct ActionConditionKeyMap(HashMap); - -impl ActionConditionKeyMap { - pub fn lookup(&self, action: &Action) -> KeySet { - let common_keys: Vec = COMMOM_KEYS.iter().map(|v| v.to_key()).collect(); - - let mut merged_keys = KeySet::from_keys(&common_keys); - - for (act, key) in self.0.iter() { - if action.is_match(act) { - merged_keys.merge(key); - } - } - - merged_keys - } -} - -lazy_static! { - pub static ref IAMActionConditionKeyMap: ActionConditionKeyMap = create_action_condition_key_map(); -} - -fn create_action_condition_key_map() -> ActionConditionKeyMap { - let common_keys: Vec = COMMOM_KEYS.iter().map(|v| v.to_key()).collect(); - let all_support_keys: Vec = ALL_SUPPORT_KEYS.iter().map(|v| v.to_key()).collect(); - - let mut map = HashMap::new(); - - map.insert(Action::AllActions, KeySet::from_keys(&all_support_keys)); - map.insert(Action::AbortMultipartUpload, KeySet::from_keys(&common_keys)); - map.insert(Action::CreateBucket, KeySet::from_keys(&common_keys)); - - let mut delete_obj_keys = common_keys.clone(); - delete_obj_keys.push(KeyName::S3VersionID.to_key()); - map.insert(Action::DeleteObject, KeySet::from_keys(&delete_obj_keys)); - - map.insert(Action::GetBucketLocation, KeySet::from_keys(&common_keys)); - map.insert(Action::GetBucketPolicyStatus, KeySet::from_keys(&common_keys)); - - let mut get_obj_keys = common_keys.clone(); - get_obj_keys.extend(vec![ - KeyName::S3XAmzServerSideEncryption.to_key(), - KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm.to_key(), - KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID.to_key(), - KeyName::S3VersionID.to_key(), - KeyName::ExistingObjectTag.to_key(), - ]); - map.insert(Action::DeleteObject, KeySet::from_keys(&get_obj_keys)); - - map.insert(Action::HeadBucket, KeySet::from_keys(&common_keys)); - - let mut get_obj_attr_keys = common_keys.clone(); - get_obj_attr_keys.push(KeyName::ExistingObjectTag.to_key()); - map.insert(Action::DeleteObject, KeySet::from_keys(&get_obj_attr_keys)); - - let mut get_obj_ver_attr_keys = common_keys.clone(); - get_obj_ver_attr_keys.extend(vec![KeyName::S3VersionID.to_key(), KeyName::ExistingObjectTag.to_key()]); - map.insert(Action::DeleteObject, KeySet::from_keys(&get_obj_ver_attr_keys)); - - map.insert(Action::ListAllMyBuckets, KeySet::from_keys(&common_keys)); - - let mut list_bucket_keys = common_keys.clone(); - list_bucket_keys.extend(vec![ - KeyName::S3Prefix.to_key(), - KeyName::S3Delimiter.to_key(), - KeyName::S3MaxKeys.to_key(), - ]); - map.insert(Action::ListBucket, KeySet::from_keys(&list_bucket_keys)); - map.insert(Action::ListBucketVersions, KeySet::from_keys(&list_bucket_keys)); - - map.insert(Action::ListBucketMultipartUploads, KeySet::from_keys(&common_keys)); - map.insert(Action::ListenNotification, KeySet::from_keys(&common_keys)); - map.insert(Action::ListenBucketNotification, KeySet::from_keys(&common_keys)); - map.insert(Action::ListMultipartUploadParts, KeySet::from_keys(&common_keys)); - - let mut put_obj_keys = common_keys.clone(); - put_obj_keys.extend(vec![ - KeyName::S3XAmzCopySource.to_key(), - KeyName::S3XAmzServerSideEncryption.to_key(), - KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm.to_key(), - KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID.to_key(), - KeyName::S3XAmzMetadataDirective.to_key(), - KeyName::S3XAmzStorageClass.to_key(), - KeyName::S3VersionID.to_key(), - KeyName::S3ObjectLockRetainUntilDate.to_key(), - KeyName::S3ObjectLockMode.to_key(), - KeyName::S3ObjectLockLegalHold.to_key(), - KeyName::RequestObjectTagKeys.to_key(), - KeyName::RequestObjectTag.to_key(), - ]); - map.insert(Action::PutObject, KeySet::from_keys(&put_obj_keys)); - - let mut put_obj_retention_keys = common_keys.clone(); - put_obj_retention_keys.extend(vec![ - KeyName::S3XAmzServerSideEncryption.to_key(), - KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm.to_key(), - KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID.to_key(), - KeyName::S3ObjectLockRemainingRetentionDays.to_key(), - KeyName::S3ObjectLockRetainUntilDate.to_key(), - KeyName::S3ObjectLockMode.to_key(), - KeyName::S3VersionID.to_key(), - ]); - map.insert(Action::PutObjectRetention, KeySet::from_keys(&put_obj_retention_keys)); - - let mut get_obj_retention_keys = common_keys.clone(); - get_obj_retention_keys.extend(vec![ - KeyName::S3XAmzServerSideEncryption.to_key(), - KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm.to_key(), - KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID.to_key(), - KeyName::S3VersionID.to_key(), - ]); - map.insert(Action::GetObjectRetention, KeySet::from_keys(&get_obj_retention_keys)); - - let mut put_obj_hold_keys = common_keys.clone(); - put_obj_hold_keys.extend(vec![ - KeyName::S3XAmzServerSideEncryption.to_key(), - KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm.to_key(), - KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID.to_key(), - KeyName::S3ObjectLockLegalHold.to_key(), - KeyName::S3VersionID.to_key(), - ]); - map.insert(Action::PutObjectLegalHold, KeySet::from_keys(&put_obj_hold_keys)); - - map.insert(Action::GetObjectLegalHold, KeySet::from_keys(&common_keys)); - - let mut bypass_governance_retention_keys = common_keys.clone(); - bypass_governance_retention_keys.extend(vec![ - KeyName::S3VersionID.to_key(), - KeyName::S3ObjectLockRemainingRetentionDays.to_key(), - KeyName::S3ObjectLockRetainUntilDate.to_key(), - KeyName::S3ObjectLockMode.to_key(), - KeyName::S3ObjectLockLegalHold.to_key(), - KeyName::RequestObjectTagKeys.to_key(), - KeyName::RequestObjectTag.to_key(), - ]); - map.insert(Action::BypassGovernanceRetention, KeySet::from_keys(&bypass_governance_retention_keys)); - - map.insert(Action::GetBucketObjectLockConfiguration, KeySet::from_keys(&common_keys)); - map.insert(Action::PutBucketObjectLockConfiguration, KeySet::from_keys(&common_keys)); - map.insert(Action::GetBucketTagging, KeySet::from_keys(&common_keys)); - - let mut put_bucket_tagging_keys = common_keys.clone(); - put_bucket_tagging_keys.extend(vec![KeyName::RequestObjectTagKeys.to_key(), KeyName::RequestObjectTag.to_key()]); - map.insert(Action::PutBucketTagging, KeySet::from_keys(&put_bucket_tagging_keys)); - - let mut put_object_tagging_keys = common_keys.clone(); - put_object_tagging_keys.extend(vec![ - KeyName::S3VersionID.to_key(), - KeyName::ExistingObjectTag.to_key(), - KeyName::RequestObjectTagKeys.to_key(), - KeyName::RequestObjectTag.to_key(), - ]); - map.insert(Action::PutObjectTagging, KeySet::from_keys(&put_object_tagging_keys)); - - let mut get_object_tagging_keys = common_keys.clone(); - get_object_tagging_keys.extend(vec![KeyName::S3VersionID.to_key(), KeyName::ExistingObjectTag.to_key()]); - map.insert(Action::GetObjectTagging, KeySet::from_keys(&get_object_tagging_keys)); - map.insert(Action::DeleteObjectTagging, KeySet::from_keys(&get_object_tagging_keys)); - - map.insert(Action::PutObjectVersionTagging, KeySet::from_keys(&put_object_tagging_keys)); - map.insert(Action::GetObjectVersionTagging, KeySet::from_keys(&get_object_tagging_keys)); - map.insert(Action::GetObjectVersion, KeySet::from_keys(&get_object_tagging_keys)); - - let mut delete_object_version_keys = common_keys.clone(); - delete_object_version_keys.extend(vec![KeyName::S3VersionID.to_key()]); - - map.insert(Action::DeleteObjectVersion, KeySet::from_keys(&delete_object_version_keys)); - map.insert(Action::DeleteObjectVersionTagging, KeySet::from_keys(&get_object_tagging_keys)); - - map.insert(Action::GetReplicationConfiguration, KeySet::from_keys(&common_keys)); - map.insert(Action::PutReplicationConfiguration, KeySet::from_keys(&common_keys)); - - map.insert(Action::ReplicateObject, KeySet::from_keys(&get_object_tagging_keys)); - map.insert(Action::ReplicateDelete, KeySet::from_keys(&get_object_tagging_keys)); - map.insert(Action::ReplicateTags, KeySet::from_keys(&get_object_tagging_keys)); - map.insert(Action::GetObjectVersionForReplication, KeySet::from_keys(&get_object_tagging_keys)); - - map.insert(Action::RestoreObject, KeySet::from_keys(&common_keys)); - map.insert(Action::ResetBucketReplicationState, KeySet::from_keys(&common_keys)); - map.insert(Action::PutObjectFanOut, KeySet::from_keys(&common_keys)); - - ActionConditionKeyMap(map) -} diff --git a/ecstore/src/bucket/policy/bucket_policy.rs b/ecstore/src/bucket/policy/bucket_policy.rs deleted file mode 100644 index 2d4d183a..00000000 --- a/ecstore/src/bucket/policy/bucket_policy.rs +++ /dev/null @@ -1,259 +0,0 @@ -use crate::error::{Error, Result}; -// use rmp_serde::Serializer as rmpSerializer; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -use super::{ - action::{Action, ActionSet, IAMActionConditionKeyMap}, - condition::function::Functions, - effect::Effect, - principal::Principal, - resource::ResourceSet, -}; - -const DEFAULT_VERSION: &str = "2012-10-17"; - -#[derive(Debug, Deserialize, Serialize, Default, Clone)] -pub struct BucketPolicyArgs { - pub account_name: String, - pub groups: Vec, - pub action: Action, - pub bucket_name: String, - pub condition_values: HashMap>, - pub is_owner: bool, - pub object_name: String, -} - -#[derive(Debug, Deserialize, Serialize, Default, Clone, PartialEq, Eq)] -#[serde(rename_all = "PascalCase", default)] -pub struct BPStatement { - #[serde(rename = "Sid")] - pub sid: String, - #[serde(rename = "Effect")] - pub effect: Effect, - #[serde(rename = "Principal")] - pub principal: Principal, - #[serde(rename = "Action")] - pub actions: ActionSet, - #[serde(rename = "NotAction", skip_serializing_if = "ActionSet::is_empty")] - pub not_actions: ActionSet, - #[serde(rename = "Resource", skip_serializing_if = "ResourceSet::is_empty")] - pub resources: ResourceSet, - #[serde(rename = "Condition", skip_serializing_if = "Functions::is_empty")] - pub conditions: Functions, -} - -impl BPStatement { - // pub fn equals(&self, other: &BPStatement) -> bool { - // if self.effect != other.effect { - // return false; - // } - - // if !self.principal.equals(other.principal) { - // return false; - // } - - // if !self.actions.equals(other.actions) { - // return false; - // } - // if !self.not_actions.equals(other.not_actions) { - // return false; - // } - // if !self.resources.equals(other.resources) { - // return false; - // } - // if !self.conditions.equals(other.conditions) { - // return false; - // } - - // true - // } - pub fn validate(&self, bucket: &str) -> Result<()> { - self.is_valid()?; - self.resources.validate_bucket(bucket) - } - pub fn is_valid(&self) -> Result<()> { - if !self.effect.is_valid() { - return Err(Error::msg(format!("invalid Effect {:?}", self.effect))); - } - - if !self.principal.is_valid() { - return Err(Error::msg(format!("invalid Principal {:?}", self.principal))); - } - - if self.actions.is_empty() && self.not_actions.is_empty() { - return Err(Error::msg("Action must not be empty")); - } - - if self.resources.as_ref().is_empty() { - return Err(Error::msg("Resource must not be empty")); - } - - for act in self.actions.as_ref() { - if act.is_object_action() { - if !self.resources.object_resource_exists() { - return Err(Error::msg(format!( - "unsupported object Resource found {:?} for action {:?}", - self.resources, act - ))); - } - } else if !self.resources.bucket_resource_exists() { - return Err(Error::msg(format!( - "unsupported bucket Resource found {:?} for action {:?}", - self.resources, act - ))); - } - - let key_diff = self.conditions.keys().difference(&IAMActionConditionKeyMap.lookup(act)); - if !key_diff.is_empty() { - return Err(Error::msg(format!( - "unsupported condition keys '{:?}' used for action '{:?}'", - key_diff, act - ))); - } - } - Ok(()) - } - fn is_allowed(&self, args: &BucketPolicyArgs) -> bool { - let check = || -> bool { - if !self.principal.is_match(&args.account_name) { - return false; - } - - if (!self.actions.is_match(&args.action) && !self.actions.is_empty()) || self.not_actions.is_match(&args.action) { - return false; - } - - let mut resource = args.bucket_name.clone(); - if !args.object_name.is_empty() { - if !args.object_name.starts_with('/') { - resource.push('/'); - } - - resource.push_str(&args.object_name); - } - - if !self.resources.is_match(&resource, &args.condition_values) { - return false; - } - - self.conditions.evaluate(&args.condition_values) - }; - - self.effect.is_allowed(check()) - } -} - -#[derive(Debug, Deserialize, Serialize, Default, Clone)] -// #[serde(rename_all = "PascalCase", default)] -pub struct BucketPolicy { - #[serde(rename = "ID", default)] - pub id: String, - #[serde(rename = "Version")] - pub version: String, - #[serde(rename = "Statement")] - pub statements: Vec, -} - -impl BucketPolicy { - pub fn is_allowed(&self, args: &BucketPolicyArgs) -> bool { - for statement in self.statements.iter() { - if statement.effect == Effect::Deny && !statement.is_allowed(args) { - return false; - } - } - - if args.is_owner { - return true; - } - - for statement in self.statements.iter() { - if statement.effect == Effect::Allow && statement.is_allowed(args) { - return true; - } - } - - false - } - - pub fn validate(&self, bucket: &str) -> Result<()> { - self.is_valid()?; - for statement in self.statements.iter() { - statement.validate(bucket)?; - } - Ok(()) - } - - pub fn is_valid(&self) -> Result<()> { - if self.version.as_str() != DEFAULT_VERSION && self.version.is_empty() { - return Err(Error::msg(format!("invalid version {}", self.version))); - } - - for statement in self.statements.iter() { - statement.is_valid()?; - } - Ok(()) - } - - pub fn is_empty(&self) -> bool { - self.statements.is_empty() - } - - pub fn marshal_msg(&self) -> Result { - let buf = serde_json::to_string(self)?; - - Ok(buf) - - // let mut buf = Vec::new(); - // self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?; - - // Ok(buf) - } - - pub fn unmarshal(buf: &[u8]) -> Result { - let mut p = serde_json::from_slice::(buf)?; - p.drop_duplicate_statements(); - Ok(p) - - // let t: BucketPolicy = rmp_serde::from_slice(buf)?; - // Ok(t) - } - - fn drop_duplicate_statements(&mut self) { - let mut dups = HashMap::new(); - - for v in self.statements.iter() { - if let Ok(data) = serde_json::to_string(self) { - dups.insert(data, v); - } - } - - let mut news = Vec::new(); - - for (_, v) in dups { - news.push(v.clone()); - } - - self.statements = news; - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_bucket_policy() { - let json = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Action\":[\"s3:GetBucketLocation\",\"s3:ListBucket\",\"s3:ListBucketMultipartUploads\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"*\"]},\"Resource\":[\"arn:aws:s3:::dada\"],\"Sid\":\"\"},{\"Action\":[\"s3:AbortMultipartUpload\",\"s3:DeleteObject\",\"s3:GetObject\",\"s3:ListMultipartUploadParts\",\"s3:PutObject\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"*\"]},\"Resource\":[\"arn:aws:s3:::dada/*\"],\"Sid\":\"sdf\"}]}"; - - let a = BucketPolicy::unmarshal(json.to_string().as_bytes()).unwrap(); - - println!("{:?}", a); - - let j = a.marshal_msg(); - - println!("{:?}", j); - - println!("{:?}", json); - } -} diff --git a/ecstore/src/bucket/policy/condition/function.rs b/ecstore/src/bucket/policy/condition/function.rs deleted file mode 100644 index 929b2d4e..00000000 --- a/ecstore/src/bucket/policy/condition/function.rs +++ /dev/null @@ -1,304 +0,0 @@ -use super::{ - key::{Key, KeySet}, - keyname::KeyName, - name::Name, -}; -use serde::{ - de::{MapAccess, Visitor}, - ser::SerializeMap, - Deserialize, Serialize, -}; -use std::{ - collections::{HashMap, HashSet}, - fmt::{self, Debug, Display}, - marker::PhantomData, -}; - -// 定义ValueSet类型 -pub type ValueSet = HashSet; - -// 定义Function trait -pub trait FunctionApi: 'static + Send + Sync { - // evaluate方法 - fn evaluate(&self, values: &HashMap>) -> bool; - - // key方法 - fn key(&self) -> Key; - - // name方法 - fn name(&self) -> Name; - - // String方法 - fn to_string(&self) -> String; - - // to_map方法 - fn to_map(&self) -> HashMap; - - fn clone_box(&self) -> Box; -} - -// #[derive(Debug, Deserialize, Serialize, Clone)] -// pub enum Function { -// Test(TestFunction), -// } - -// impl FunctionApi for Function { -// // evaluate方法 -// fn evaluate(&self, values: &HashMap>) -> bool { -// match self { -// Function::Test(f) => f.evaluate(values), -// } -// } - -// // key方法 -// fn key(&self) -> Key { -// match self { -// Function::Test(f) => f.key(), -// } -// } - -// // name方法 -// fn name(&self) -> Name { -// match self { -// Function::Test(f) => f.name(), -// } -// } - -// // String方法 -// fn to_string(&self) -> String { -// match self { -// Function::Test(f) => f.to_string(), -// } -// } - -// // to_map方法 -// fn to_map(&self) -> HashMap { -// match self { -// Function::Test(f) => f.to_map(), -// } -// } - -// fn clone_box(&self) -> Box { -// match self { -// Function::Test(f) => f.clone_box(), -// } -// } -// } - -// 定义Functions类型 -#[derive(Default)] -pub struct Functions(Vec>); - -impl Functions { - pub fn evaluate(&self, values: &HashMap>) -> bool { - for f in self.0.iter() { - if f.evaluate(values) { - return true; - } - } - - false - } - pub fn keys(&self) -> KeySet { - let mut set = KeySet::new(); - for f in self.0.iter() { - set.add(f.key()) - } - set - } - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - -impl Debug for Functions { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let funs: Vec = self.0.iter().map(|v| v.to_string()).collect(); - f.debug_list().entries(funs.iter()).finish() - } -} - -impl Display for Functions { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let funs: Vec = self.0.iter().map(|v| v.to_string()).collect(); - write!(f, "{:?}", funs) - } -} - -impl Clone for Functions { - fn clone(&self) -> Self { - let mut list = Vec::new(); - for v in self.0.iter() { - list.push(v.clone_box()) - } - - Functions(list) - } -} - -impl PartialEq for Functions { - fn eq(&self, other: &Self) -> bool { - if self.0.len() != other.0.len() { - return false; - } - - for v in self.0.iter() { - let s = v.to_string(); - let mut found = false; - for o in other.0.iter() { - if s == o.to_string() { - found = true; - break; - } - } - - if !found { - return false; - } - } - - true - } -} - -impl Eq for Functions {} - -type FunctionsMap = HashMap>; - -impl Serialize for Functions { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - let mut nm: FunctionsMap = HashMap::new(); - for f in self.0.iter() { - let fname = f.name().to_string(); - - if !nm.contains_key(&fname) { - nm.insert(fname.clone(), HashMap::new()); - } - - for (k, v) in f.to_map() { - if let Some(hm) = nm.get_mut(&fname) { - hm.insert(k.to_string(), v); - } - } - } - - let mut map = serializer.serialize_map(Some(nm.len()))?; - for (k, v) in nm.iter() { - map.serialize_entry(k, v)?; - } - - map.end() - } -} - -struct MyMapVisitor { - marker: PhantomData FunctionsMap>, -} - -impl MyMapVisitor { - fn new() -> Self { - MyMapVisitor { marker: PhantomData } - } -} - -// This is the trait that Deserializers are going to be driving. There -// is one method for each type of data that our type knows how to -// deserialize from. There are many other methods that are not -// implemented here, for example deserializing from integers or strings. -// By default those methods will return an error, which makes sense -// because we cannot deserialize a MyMap from an integer or string. -impl<'de> Visitor<'de> for MyMapVisitor { - // The type that our Visitor is going to produce. - type Value = FunctionsMap; - - // Format a message stating what data this Visitor expects to receive. - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a very special map") - } - - // Deserialize MyMap from an abstract "map" provided by the - // Deserializer. The MapAccess input is a callback provided by - // the Deserializer to let us see each entry in the map. - fn visit_map(self, mut access: M) -> Result - where - M: MapAccess<'de>, - { - let mut map = FunctionsMap::with_capacity(access.size_hint().unwrap_or(0)); - - // While there are entries remaining in the input, add them - // into our map. - while let Some((key, value)) = access.next_entry()? { - map.insert(key, value); - } - - Ok(map) - } -} - -// This is the trait that informs Serde how to deserialize MyMap. -impl<'de> Deserialize<'de> for Functions { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - // Instantiate our Visitor and ask the Deserializer to drive - // it over the input data, resulting in an instance of MyMap. - let map = deserializer.deserialize_map(MyMapVisitor::new())?; - - for (key, vals) in map.iter() { - println!("functions key {}, vals {:?}", key, vals); - } - // TODO: FIXME: create functions from name - - Ok(Functions(Vec::new())) - } -} - -// impl<'de> Deserialize<'de> for Functions { -// fn deserialize(deserializer: D) -> Result -// where -// D: serde::Deserializer<'de>, -// { -// todo!() -// } -// } - -#[derive(Debug, Deserialize, Serialize, Default, Clone)] -pub struct TestFunction {} - -impl FunctionApi for TestFunction { - // evaluate方法 - fn evaluate(&self, _values: &HashMap>) -> bool { - true - } - - // key方法 - fn key(&self) -> Key { - Key { - name: KeyName::JWTPrefUsername, - variable: "".to_string(), - } - } - - // name方法 - fn name(&self) -> Name { - Name::StringEquals - } - - // String方法 - fn to_string(&self) -> String { - Name::StringEquals.to_string() - } - - // to_map方法 - fn to_map(&self) -> HashMap { - HashMap::new() - } - - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } -} diff --git a/ecstore/src/bucket/policy/condition/key.rs b/ecstore/src/bucket/policy/condition/key.rs deleted file mode 100644 index b317d0a9..00000000 --- a/ecstore/src/bucket/policy/condition/key.rs +++ /dev/null @@ -1,155 +0,0 @@ -use super::keyname::{KeyName, ALL_SUPPORT_KEYS}; -use crate::error::Error; -use serde::{Deserialize, Serialize}; -use std::{collections::HashSet, fmt, str::FromStr}; - -// 定义Key结构体 -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct Key { - pub name: KeyName, - pub variable: String, -} - -impl Key { - pub fn new(name: KeyName, variable: String) -> Self { - Key { name, variable } - } - // IsValid - checks if key is valid or not. - fn is_valid(&self) -> bool { - ALL_SUPPORT_KEYS.iter().any(|supported| self.name == *supported) - } - - // Is - checks if this key has the same key name or not. - pub fn is(&self, name: &KeyName) -> bool { - self.name == *name - } - - // VarName - returns variable key name, such as "${aws:username}" - pub fn var_name(&self) -> String { - self.name.var_name() - } - - // Name - returns key name which is stripped value of prefixes "aws:" and "s3:" - pub fn name(&self) -> String { - if !self.variable.is_empty() { - format!("{}{}", self.name.name(), self.variable) - } else { - self.name.name().to_string() - } - } -} - -impl FromStr for Key { - type Err = Error; - - fn from_str(s: &str) -> Result { - let (name, variable) = if let Some(pos) = s.find('/') { - (&s[..pos], &s[pos + 1..]) - } else { - (s, "") - }; - - let keyname = KeyName::from_str(name)?; - - let key = Key { - name: keyname, - variable: variable.to_string(), - }; - - if key.is_valid() { - Ok(key) - } else { - Err(Error::msg(format!("invalid condition key '{}'", s))) - } - } -} - -impl Serialize for Key { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_str(self.to_string().as_str()) - } -} - -impl<'de> Deserialize<'de> for Key { - fn deserialize(deserializer: D) -> Result - where - D: serde::de::Deserializer<'de>, - { - let s: String = Deserialize::deserialize(deserializer)?; - Key::from_str(s.as_str()).map_err(serde::de::Error::custom) - } -} - -impl fmt::Display for Key { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if !self.variable.is_empty() { - write!(f, "{}/{}", self.name.as_str(), self.variable) - } else { - write!(f, "{}", self.name) - } - } -} - -#[derive(Debug, Default)] -pub struct KeySet(HashSet); - -impl KeySet { - pub fn new() -> Self { - KeySet(HashSet::new()) - } - // Add - add a key to key set - pub fn add(&mut self, key: Key) { - self.0.insert(key); - } - - // Merge merges two key sets, duplicates are overwritten - pub fn merge(&mut self, other: &KeySet) { - for key in &other.0 { - self.add(key.clone()); - } - } - - // Match matches the input key name with current keySet - pub fn match_key(&self, key: &Key) -> bool { - self.0.contains(key) - } - - // Difference - returns a key set contains difference of two keys - pub fn difference(&self, other: &KeySet) -> KeySet { - let mut result = KeySet::default(); - for key in &self.0 { - if !other.match_key(key) { - result.add(key.clone()); - } - } - result - } - - // IsEmpty - returns whether key set is empty or not - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - // ToSlice - returns slice of keys - fn to_slice(&self) -> Vec { - self.0.iter().cloned().collect() - } - - // NewKeySet - returns new KeySet contains given keys - pub fn from_keys(keys: &Vec) -> KeySet { - let mut set = KeySet::default(); - for key in keys { - set.add(key.clone()); - } - set - } -} - -impl fmt::Display for KeySet { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.to_slice()) - } -} diff --git a/ecstore/src/bucket/policy/condition/keyname.rs b/ecstore/src/bucket/policy/condition/keyname.rs deleted file mode 100644 index 1fc8d660..00000000 --- a/ecstore/src/bucket/policy/condition/keyname.rs +++ /dev/null @@ -1,475 +0,0 @@ -use core::fmt; -use lazy_static::lazy_static; -use serde::{Deserialize, Serialize}; -use std::str::FromStr; - -use crate::error::Error; - -use super::key::Key; - -// 定义KeyName枚举类型 -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub enum KeyName { - // S3XAmzCopySource - key representing x-amz-copy-source HTTP header applicable to PutObject API only. - S3XAmzCopySource, // KeyName = "s3:x-amz-copy-source" - - // S3XAmzServerSideEncryption - key representing x-amz-server-side-encryption HTTP header applicable - // to PutObject API only. - S3XAmzServerSideEncryption, // KeyName = "s3:x-amz-server-side-encryption" - - // S3XAmzServerSideEncryptionCustomerAlgorithm - key representing - // x-amz-server-side-encryption-customer-algorithm HTTP header applicable to PutObject API only. - S3XAmzServerSideEncryptionCustomerAlgorithm, // KeyName = "s3:x-amz-server-side-encryption-customer-algorithm" - - // S3XAmzMetadataDirective - key representing x-amz-metadata-directive HTTP header applicable to - // PutObject API only. - S3XAmzMetadataDirective, // KeyName = "s3:x-amz-metadata-directive" - - // S3XAmzContentSha256 - set a static content-sha256 for all calls for a given action. - S3XAmzContentSha256, // KeyName = "s3:x-amz-content-sha256" - - // S3XAmzStorageClass - key representing x-amz-storage-class HTTP header applicable to PutObject API - // only. - S3XAmzStorageClass, // KeyName = "s3:x-amz-storage-class" - - // S3XAmzServerSideEncryptionAwsKmsKeyID - key representing x-amz-server-side-encryption-aws-kms-key-id - // HTTP header for S3 API calls - S3XAmzServerSideEncryptionAwsKmsKeyID, // KeyName = "s3:x-amz-server-side-encryption-aws-kms-key-id" - - // S3LocationConstraint - key representing LocationConstraint XML tag of CreateBucket API only. - S3LocationConstraint, // KeyName = "s3:LocationConstraint" - - // S3Prefix - key representing prefix query parameter of ListBucket API only. - S3Prefix, // KeyName = "s3:prefix" - - // S3Delimiter - key representing delimiter query parameter of ListBucket API only. - S3Delimiter, // KeyName = "s3:delimiter" - - // S3VersionID - Enables you to limit the permission for the - // s3:PutObjectVersionTagging action to a specific object version. - S3VersionID, // KeyName = "s3:versionid" - - // S3MaxKeys - key representing max-keys query parameter of ListBucket API only. - S3MaxKeys, // KeyName = "s3:max-keys" - - // S3ObjectLockRemainingRetentionDays - key representing object-lock-remaining-retention-days - // Enables enforcement of an object relative to the remaining retention days, you can set - // minimum and maximum allowable retention periods for a bucket using a bucket policy. - // This key are specific for s3:PutObjectRetention API. - S3ObjectLockRemainingRetentionDays, // KeyName = "s3:object-lock-remaining-retention-days" - - // S3ObjectLockMode - key representing object-lock-mode - // Enables enforcement of the specified object retention mode - S3ObjectLockMode, // KeyName = "s3:object-lock-mode" - - // S3ObjectLockRetainUntilDate - key representing object-lock-retain-util-date - // Enables enforcement of a specific retain-until-date - S3ObjectLockRetainUntilDate, // KeyName = "s3:object-lock-retain-until-date" - - // S3ObjectLockLegalHold - key representing object-local-legal-hold - // Enables enforcement of the specified object legal hold status - S3ObjectLockLegalHold, // KeyName = "s3:object-lock-legal-hold" - - // AWSReferer - key representing Referer header of any API. - AWSReferer, // KeyName = "aws:Referer" - - // AWSSourceIP - key representing client's IP address (not intermittent proxies) of any API. - AWSSourceIP, // KeyName = "aws:SourceIp" - - // AWSUserAgent - key representing UserAgent header for any API. - AWSUserAgent, // KeyName = "aws:UserAgent" - - // AWSSecureTransport - key representing if the clients request is authenticated or not. - AWSSecureTransport, // KeyName = "aws:SecureTransport" - - // AWSCurrentTime - key representing the current time. - AWSCurrentTime, // KeyName = "aws:CurrentTime" - - // AWSEpochTime - key representing the current epoch time. - AWSEpochTime, // KeyName = "aws:EpochTime" - - // AWSPrincipalType - user principal type currently supported values are "User" and "Anonymous". - AWSPrincipalType, // KeyName = "aws:principaltype" - - // AWSUserID - user unique ID, in RustFS this value is same as your user Access Key. - AWSUserID, // KeyName = "aws:userid" - - // AWSUsername - user friendly name, in RustFS this value is same as your user Access Key. - AWSUsername, // KeyName = "aws:username" - - // AWSGroups - groups for any authenticating Access Key. - AWSGroups, // KeyName = "ss" - - // S3SignatureVersion - identifies the version of AWS Signature that you want to support for authenticated requests. - S3SignatureVersion, // KeyName = "s3:signatureversion" - - // S3SignatureAge - identifies the maximum age of presgiend URL allowed - S3SignatureAge, // KeyName = "s3:signatureAge" - - // S3AuthType - optionally use this condition key to restrict incoming requests to use a specific authentication method. - S3AuthType, // KeyName = "s3:authType" - - // Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/tagging-and-policies.html - ExistingObjectTag, // KeyName = "s3:ExistingObjectTag" - RequestObjectTagKeys, // KeyName = "s3:RequestObjectTagKeys" - RequestObjectTag, // KeyName = "s3:RequestObjectTag" - - // JWTSub - JWT subject claim substitution. - JWTSub, //KeyName = "jwt:sub" - - // JWTIss issuer claim substitution. - JWTIss, //KeyName = "jwt:iss" - - // JWTAud audience claim substitution. - JWTAud, //KeyName = "jwt:aud" - - // JWTJti JWT unique identifier claim substitution. - JWTJti, //KeyName = "jwt:jti" - - JWTUpn, //KeyName = "jwt:upn" - JWTName, //KeyName = "jwt:name" - JWTGroups, //KeyName = "jwt:groups" - JWTGivenName, //KeyName = "jwt:given_name" - JWTFamilyName, //KeyName = "jwt:family_name" - JWTMiddleName, //KeyName = "jwt:middle_name" - JWTNickName, //KeyName = "jwt:nickname" - JWTPrefUsername, //KeyName = "jwt:preferred_username" - JWTProfile, //KeyName = "jwt:profile" - JWTPicture, //KeyName = "jwt:picture" - JWTWebsite, //KeyName = "jwt:website" - JWTEmail, //KeyName = "jwt:email" - JWTGender, //KeyName = "jwt:gender" - JWTBirthdate, //KeyName = "jwt:birthdate" - JWTPhoneNumber, //KeyName = "jwt:phone_number" - JWTAddress, //KeyName = "jwt:address" - JWTScope, //KeyName = "jwt:scope" - JWTClientID, //KeyName = "jwt:client_id" - - // LDAPUser - LDAP username, this value is equal to your authenticating LDAP user DN. - LDAPUser, // KeyName = "ldap:user" - - // LDAPUsername - LDAP username, is the authenticated simple user. - LDAPUsername, // KeyName = "ldap:username" - - // LDAPGroups - LDAP groups, this value is equal LDAP Group DNs for the authenticating user. - LDAPGroups, // KeyName = "ldap:groups" - - // STSDurationSeconds - Duration seconds condition for STS policy - STSDurationSeconds, // KeyName = "sts:DurationSeconds" - // SVCDurationSeconds - Duration seconds condition for Admin policy - SVCDurationSeconds, // KeyName = "svc:DurationSeconds" - - Undefined, -} - -lazy_static! { - pub static ref JWTKEYS: Vec = { - vec![ - KeyName::JWTSub, - KeyName::JWTIss, - KeyName::JWTAud, - KeyName::JWTJti, - KeyName::JWTName, - KeyName::JWTUpn, - KeyName::JWTGroups, - KeyName::JWTGivenName, - KeyName::JWTFamilyName, - KeyName::JWTMiddleName, - KeyName::JWTNickName, - KeyName::JWTPrefUsername, - KeyName::JWTProfile, - KeyName::JWTPicture, - KeyName::JWTWebsite, - KeyName::JWTEmail, - KeyName::JWTGender, - KeyName::JWTBirthdate, - KeyName::JWTPhoneNumber, - KeyName::JWTAddress, - KeyName::JWTScope, - KeyName::JWTClientID, - ] - }; - pub static ref ALL_SUPPORT_KEYS: Vec = { - vec![ - KeyName::S3SignatureVersion, - KeyName::S3AuthType, - KeyName::S3SignatureAge, - KeyName::S3XAmzCopySource, - KeyName::S3XAmzServerSideEncryption, - KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm, - KeyName::S3XAmzMetadataDirective, - KeyName::S3XAmzStorageClass, - KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID, - KeyName::S3XAmzContentSha256, - KeyName::S3LocationConstraint, - KeyName::S3Prefix, - KeyName::S3Delimiter, - KeyName::S3MaxKeys, - KeyName::S3VersionID, - KeyName::S3ObjectLockRemainingRetentionDays, - KeyName::S3ObjectLockMode, - KeyName::S3ObjectLockLegalHold, - KeyName::S3ObjectLockRetainUntilDate, - KeyName::AWSReferer, - KeyName::AWSSourceIP, - KeyName::AWSUserAgent, - KeyName::AWSSecureTransport, - KeyName::AWSCurrentTime, - KeyName::AWSEpochTime, - KeyName::AWSPrincipalType, - KeyName::AWSUserID, - KeyName::AWSUsername, - KeyName::AWSGroups, - KeyName::LDAPUser, - KeyName::LDAPUsername, - KeyName::LDAPGroups, - KeyName::RequestObjectTag, - KeyName::ExistingObjectTag, - KeyName::RequestObjectTagKeys, - KeyName::JWTSub, - KeyName::JWTIss, - KeyName::JWTAud, - KeyName::JWTJti, - KeyName::JWTName, - KeyName::JWTUpn, - KeyName::JWTGroups, - KeyName::JWTGivenName, - KeyName::JWTFamilyName, - KeyName::JWTMiddleName, - KeyName::JWTNickName, - KeyName::JWTPrefUsername, - KeyName::JWTProfile, - KeyName::JWTPicture, - KeyName::JWTWebsite, - KeyName::JWTEmail, - KeyName::JWTGender, - KeyName::JWTBirthdate, - KeyName::JWTPhoneNumber, - KeyName::JWTAddress, - KeyName::JWTScope, - KeyName::JWTClientID, - KeyName::STSDurationSeconds, - KeyName::SVCDurationSeconds, - ] - }; - pub static ref COMMOM_KEYS: Vec = { - let mut keys = vec![ - KeyName::S3SignatureVersion, - KeyName::S3AuthType, - KeyName::S3SignatureAge, - KeyName::S3XAmzContentSha256, - KeyName::S3LocationConstraint, - KeyName::AWSReferer, - KeyName::AWSSourceIP, - KeyName::AWSUserAgent, - KeyName::AWSSecureTransport, - KeyName::AWSCurrentTime, - KeyName::AWSEpochTime, - KeyName::AWSPrincipalType, - KeyName::AWSUserID, - KeyName::AWSUsername, - KeyName::AWSGroups, - KeyName::LDAPUser, - KeyName::LDAPUsername, - KeyName::LDAPGroups, - ]; - - keys.extend(JWTKEYS.iter().cloned()); - - keys - }; - pub static ref ALL_SUPPORT_ADMIN_KEYS: Vec = { - let mut keys = vec![ - KeyName::AWSReferer, - KeyName::AWSSourceIP, - KeyName::AWSUserAgent, - KeyName::AWSSecureTransport, - KeyName::AWSCurrentTime, - KeyName::AWSEpochTime, - KeyName::AWSPrincipalType, - KeyName::AWSUserID, - KeyName::AWSUsername, - KeyName::AWSGroups, - KeyName::LDAPUser, - KeyName::LDAPUsername, - KeyName::LDAPGroups, - KeyName::SVCDurationSeconds, - ]; - - keys.extend(JWTKEYS.iter().cloned()); - - keys - }; - pub static ref ALL_SUPPORT_STS_KEYS: Vec = vec![KeyName::STSDurationSeconds]; -} - -// 实现KeyName枚举的方法 -impl KeyName { - pub fn name(&self) -> &str { - let name = self.as_str(); - if name.starts_with("aws:") { - name.trim_start_matches("aws:") - } else if name.starts_with("jwt:") { - name.trim_start_matches("jwt:") - } else if name.starts_with("ldap:") { - name.trim_start_matches("ldap:") - } else if name.starts_with("sts:") { - name.trim_start_matches("sts:") - } else if name.starts_with("svc:") { - name.trim_start_matches("svc:") - } else { - name.trim_start_matches("s3:") - } - } - - // Name方法,返回键名的名称 - pub fn as_str(&self) -> &str { - match self { - KeyName::S3XAmzCopySource => "s3:x-amz-copy-source", - KeyName::S3XAmzServerSideEncryption => "s3:x-amz-server-side-encryption", - KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm => "s3:x-amz-server-side-encryption-customer-algorithm", - KeyName::S3XAmzMetadataDirective => "s3:x-amz-metadata-directive", - KeyName::S3XAmzContentSha256 => "s3:x-amz-content-sha256", - KeyName::S3XAmzStorageClass => "s3:x-amz-storage-class", - KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID => "s3:x-amz-server-side-encryption-aws-kms-key-id", - KeyName::S3LocationConstraint => "s3:LocationConstraint", - KeyName::S3Prefix => "s3:prefix", - KeyName::S3Delimiter => "s3:delimiter", - KeyName::S3VersionID => "s3:versionid", - KeyName::S3MaxKeys => "s3:max-keys", - KeyName::S3ObjectLockRemainingRetentionDays => "s3:object-lock-remaining-retention-days", - KeyName::S3ObjectLockMode => "s3:object-lock-mode", - KeyName::S3ObjectLockRetainUntilDate => "s3:object-lock-retain-until-date", - KeyName::S3ObjectLockLegalHold => "s3:object-lock-legal-hold", - KeyName::AWSReferer => "aws:Referer", - KeyName::AWSSourceIP => "aws:SourceIp", - KeyName::AWSUserAgent => "aws:UserAgent", - KeyName::AWSSecureTransport => "aws:SecureTransport", - KeyName::AWSCurrentTime => "aws:CurrentTime", - KeyName::AWSEpochTime => "aws:EpochTime", - KeyName::AWSPrincipalType => "aws:principaltype", - KeyName::AWSUserID => "aws:userid", - KeyName::AWSUsername => "aws:username", - KeyName::AWSGroups => "ss", - KeyName::S3SignatureVersion => "s3:signatureversion", - KeyName::S3SignatureAge => "s3:signatureAge", - KeyName::S3AuthType => "s3:authType", - KeyName::ExistingObjectTag => "s3:ExistingObjectTag", - KeyName::RequestObjectTagKeys => "s3:RequestObjectTagKeys", - KeyName::RequestObjectTag => "s3:RequestObjectTag", - KeyName::JWTSub => "jwt:sub", - KeyName::JWTIss => "jwt:iss", - KeyName::JWTAud => "jwt:aud", - KeyName::JWTJti => "jwt:jti", - KeyName::JWTUpn => "jwt:upn", - KeyName::JWTName => "jwt:name", - KeyName::JWTGroups => "jwt:groups", - KeyName::JWTGivenName => "jwt:given_name", - KeyName::JWTFamilyName => "jwt:family_name", - KeyName::JWTMiddleName => "jwt:middle_name", - KeyName::JWTNickName => "jwt:nickname", - KeyName::JWTPrefUsername => "jwt:preferred_username", - KeyName::JWTProfile => "jwt:profile", - KeyName::JWTPicture => "jwt:picture", - KeyName::JWTWebsite => "jwt:website", - KeyName::JWTEmail => "jwt:email", - KeyName::JWTGender => "jwt:gender", - KeyName::JWTBirthdate => "jwt:birthdate", - KeyName::JWTPhoneNumber => "jwt:phone_number", - KeyName::JWTAddress => "jwt:address", - KeyName::JWTScope => "jwt:scope", - KeyName::JWTClientID => "jwt:client_id", - KeyName::LDAPUser => "ldap:user", - KeyName::LDAPUsername => "ldap:username", - KeyName::LDAPGroups => "ldap:groups", - KeyName::STSDurationSeconds => "sts:DurationSeconds", - KeyName::SVCDurationSeconds => "svc:DurationSeconds", - KeyName::Undefined => "", - } - } - - // VarName方法,返回变量键名,例如 "${aws:username}" - pub fn var_name(&self) -> String { - format!("${{{}}}", self.name()) - } - - // ToKey方法,从名称创建键 - pub fn to_key(&self) -> Key { - Key::new(self.clone(), "".to_string()) - } -} - -impl fmt::Display for KeyName { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.as_str()) - } -} - -impl FromStr for KeyName { - type Err = Error; - - fn from_str(s: &str) -> Result { - match s { - "s3:x-amz-copy-source" => Ok(KeyName::S3XAmzCopySource), - "s3:x-amz-server-side-encryption" => Ok(KeyName::S3XAmzServerSideEncryption), - "s3:x-amz-server-side-encryption-customer-algorithm" => Ok(KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm), - "s3:x-amz-metadata-directive" => Ok(KeyName::S3XAmzMetadataDirective), - "s3:x-amz-content-sha256" => Ok(KeyName::S3XAmzContentSha256), - "s3:x-amz-storage-class" => Ok(KeyName::S3XAmzStorageClass), - "s3:x-amz-server-side-encryption-aws-kms-key-id" => Ok(KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID), - "s3:LocationConstraint" => Ok(KeyName::S3LocationConstraint), - "s3:prefix" => Ok(KeyName::S3Prefix), - "s3:delimiter" => Ok(KeyName::S3Delimiter), - "s3:versionid" => Ok(KeyName::S3VersionID), - "s3:max-keys" => Ok(KeyName::S3MaxKeys), - "s3:object-lock-remaining-retention-days" => Ok(KeyName::S3ObjectLockRemainingRetentionDays), - "s3:object-lock-mode" => Ok(KeyName::S3ObjectLockMode), - "s3:object-lock-retain-until-date" => Ok(KeyName::S3ObjectLockRetainUntilDate), - "s3:object-lock-legal-hold" => Ok(KeyName::S3ObjectLockLegalHold), - "aws:Referer" => Ok(KeyName::AWSReferer), - "aws:SourceIp" => Ok(KeyName::AWSSourceIP), - "aws:UserAgent" => Ok(KeyName::AWSUserAgent), - "aws:SecureTransport" => Ok(KeyName::AWSSecureTransport), - "aws:CurrentTime" => Ok(KeyName::AWSCurrentTime), - "aws:EpochTime" => Ok(KeyName::AWSEpochTime), - "aws:principaltype" => Ok(KeyName::AWSPrincipalType), - "aws:userid" => Ok(KeyName::AWSUserID), - "aws:username" => Ok(KeyName::AWSUsername), - "aws:groups" => Ok(KeyName::AWSGroups), - "s3:signatureversion" => Ok(KeyName::S3SignatureVersion), - "s3:signatureAge" => Ok(KeyName::S3SignatureAge), - "s3:authType" => Ok(KeyName::S3AuthType), - "s3:ExistingObjectTag" => Ok(KeyName::ExistingObjectTag), - "s3:RequestObjectTagKeys" => Ok(KeyName::RequestObjectTagKeys), - "s3:RequestObjectTag" => Ok(KeyName::RequestObjectTag), - "jwt:sub" => Ok(KeyName::JWTSub), - "jwt:iss" => Ok(KeyName::JWTIss), - "jwt:aud" => Ok(KeyName::JWTAud), - "jwt:jti" => Ok(KeyName::JWTJti), - "jwt:upn" => Ok(KeyName::JWTUpn), - "jwt:name" => Ok(KeyName::JWTName), - "jwt:groups" => Ok(KeyName::JWTGroups), - "jwt:given_name" => Ok(KeyName::JWTGivenName), - "jwt:family_name" => Ok(KeyName::JWTFamilyName), - "jwt:middle_name" => Ok(KeyName::JWTMiddleName), - "jwt:nickname" => Ok(KeyName::JWTNickName), - "jwt:preferred_username" => Ok(KeyName::JWTPrefUsername), - "jwt:profile" => Ok(KeyName::JWTProfile), - "jwt:picture" => Ok(KeyName::JWTPicture), - "jwt:website" => Ok(KeyName::JWTWebsite), - "jwt:email" => Ok(KeyName::JWTEmail), - "jwt:gender" => Ok(KeyName::JWTGender), - "jwt:birthdate" => Ok(KeyName::JWTBirthdate), - "jwt:phone_number" => Ok(KeyName::JWTPhoneNumber), - "jwt:address" => Ok(KeyName::JWTAddress), - "jwt:scope" => Ok(KeyName::JWTScope), - "jwt:client_id" => Ok(KeyName::JWTClientID), - "ldap:user" => Ok(KeyName::LDAPUser), - "ldap:username" => Ok(KeyName::LDAPUsername), - "ldap:groups" => Ok(KeyName::LDAPGroups), - "sts:DurationSeconds" => Ok(KeyName::STSDurationSeconds), - "svc:DurationSeconds" => Ok(KeyName::SVCDurationSeconds), - _ => Err(Error::msg(format!("keyname not found: {}", s))), - } - } -} diff --git a/ecstore/src/bucket/policy/condition/mod.rs b/ecstore/src/bucket/policy/condition/mod.rs deleted file mode 100644 index 46e6fa03..00000000 --- a/ecstore/src/bucket/policy/condition/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod function; -pub mod key; -pub mod keyname; -pub mod name; diff --git a/ecstore/src/bucket/policy/condition/name.rs b/ecstore/src/bucket/policy/condition/name.rs deleted file mode 100644 index be8541db..00000000 --- a/ecstore/src/bucket/policy/condition/name.rs +++ /dev/null @@ -1,75 +0,0 @@ -// 定义Name枚举类型 -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum Name { - StringEquals, - StringNotEquals, - StringEqualsIgnoreCase, - StringNotEqualsIgnoreCase, - StringLike, - StringNotLike, - BinaryEquals, - IpAddress, - NotIpAddress, - Null, - Bool, - NumericEquals, - NumericNotEquals, - NumericLessThan, - NumericLessThanEquals, - NumericGreaterThan, - NumericGreaterThanIfExists, - NumericGreaterThanEquals, - DateEquals, - DateNotEquals, - DateLessThan, - DateLessThanEquals, - DateGreaterThan, - DateGreaterThanEquals, - ForAllValues, - ForAnyValue, -} - -impl Name { - pub fn as_str(&self) -> &'static str { - match self { - Name::StringEquals => "StringEquals", - Name::StringNotEquals => "StringNotEquals", - Name::StringEqualsIgnoreCase => "StringEqualsIgnoreCase", - Name::StringNotEqualsIgnoreCase => "StringNotEqualsIgnoreCase", - Name::StringLike => "StringLike", - Name::StringNotLike => "StringNotLike", - Name::BinaryEquals => "BinaryEquals", - Name::IpAddress => "IpAddress", - Name::NotIpAddress => "NotIpAddress", - Name::Null => "Null", - Name::Bool => "Bool", - Name::NumericEquals => "NumericEquals", - Name::NumericNotEquals => "NumericNotEquals", - Name::NumericLessThan => "NumericLessThan", - Name::NumericLessThanEquals => "NumericLessThanEquals", - Name::NumericGreaterThan => "NumericGreaterThan", - Name::NumericGreaterThanIfExists => "NumericGreaterThanIfExists", - Name::NumericGreaterThanEquals => "NumericGreaterThanEquals", - Name::DateEquals => "DateEquals", - Name::DateNotEquals => "DateNotEquals", - Name::DateLessThan => "DateLessThan", - Name::DateLessThanEquals => "DateLessThanEquals", - Name::DateGreaterThan => "DateGreaterThan", - Name::DateGreaterThanEquals => "DateGreaterThanEquals", - Name::ForAllValues => "ForAllValues", - Name::ForAnyValue => "ForAnyValue", - } - } -} - -// impl ToString for Name { -// fn to_string(&self) -> String { -// self.as_str().to_string() -// } -// } - -impl std::fmt::Display for Name { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.as_str()) - } -} diff --git a/ecstore/src/bucket/policy/effect.rs b/ecstore/src/bucket/policy/effect.rs deleted file mode 100644 index 6ec86ef6..00000000 --- a/ecstore/src/bucket/policy/effect.rs +++ /dev/null @@ -1,40 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::str::FromStr; - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Default)] -#[serde(rename_all = "PascalCase")] -pub enum Effect { - #[default] - Allow, - Deny, -} - -impl Effect { - pub fn is_allowed(self, b: bool) -> bool { - if self == Effect::Allow { - b - } else { - !b - } - } - - pub fn is_valid(self) -> bool { - match self { - Effect::Allow => true, - Effect::Deny => true, - } - } -} - -// 实现从字符串解析Effect的功能 -impl FromStr for Effect { - type Err = (); - - fn from_str(s: &str) -> Result { - match s { - "Allow" => Ok(Effect::Allow), - "Deny" => Ok(Effect::Deny), - _ => Err(()), - } - } -} diff --git a/ecstore/src/bucket/policy/mod.rs b/ecstore/src/bucket/policy/mod.rs deleted file mode 100644 index a0d54685..00000000 --- a/ecstore/src/bucket/policy/mod.rs +++ /dev/null @@ -1,103 +0,0 @@ -// use std::collections::HashMap; - -// use action::Action; -// use s3s_policy::model::{Effect, Policy, Principal, PrincipalRule, Statement}; -// use serde::{Deserialize, Serialize}; -// use tower::ready_cache::cache::Equivalent; - -// use crate::utils::wildcard; - -pub mod action; -pub mod bucket_policy; -pub mod condition; -pub mod effect; -pub mod principal; -pub mod resource; - -// #[derive(Debug, Deserialize, Serialize, Default, Clone)] -// pub struct BucketPolicyArgs { -// pub account_name: String, -// pub groups: Vec, -// pub action: Action, -// pub bucket_name: String, -// pub condition_values: HashMap>, -// pub is_owner: bool, -// pub object_name: String, -// } - -// pub trait AllowApi { -// fn is_allowed(&self, args: &BucketPolicyArgs) -> bool; -// } - -// pub trait MatchApi { -// fn is_match(&self, found: &str) -> bool; -// } - -// impl AllowApi for Policy { -// fn is_allowed(&self, args: &BucketPolicyArgs) -> bool { -// for statement in self.statement.as_slice().iter() { -// if statement.effect == Effect::Deny { -// if !statement.is_allowed(args) { -// return false; -// } -// } -// } -// false -// } -// } - -// impl AllowApi for Statement { -// fn is_allowed(&self, args: &BucketPolicyArgs) -> bool { -// let check = || -> bool { -// if let Some(principal) = &self.principal { -// if !principal.is_match(&args.account_name) { -// return false; -// } -// } - -// false -// }; - -// self.effect.is_allowed(check()) -// } -// } - -// impl MatchApi for PrincipalRule { -// fn is_match(&self, found: &str) -> bool { -// match self { -// PrincipalRule::Principal(principal) => match principal { -// Principal::Wildcard => return true, -// Principal::Map(index_map) => { -// if let Some(keys) = index_map.get("AWS") { -// for key in keys.as_slice() { -// if wildcard::match_simple(key, found) { -// return true; -// } -// } -// } -// return false; -// } -// }, -// PrincipalRule::NotPrincipal(principal) => match principal { -// Principal::Wildcard => return true, -// Principal::Map(index_map) => todo!(), -// }, -// } - -// false -// } -// } - -// trait EffectApi { -// fn is_allowed(&self, b: bool) -> bool; -// } - -// impl EffectApi for Effect { -// fn is_allowed(&self, b: bool) -> bool { -// if self == &Effect::Allow { -// b -// } else { -// !b -// } -// } -// } diff --git a/ecstore/src/bucket/policy/resource.rs b/ecstore/src/bucket/policy/resource.rs deleted file mode 100644 index b9c520f5..00000000 --- a/ecstore/src/bucket/policy/resource.rs +++ /dev/null @@ -1,263 +0,0 @@ -use crate::error::{Error, Result}; -use crate::{ - bucket::policy::condition::keyname::COMMOM_KEYS, - utils::{self, wildcard}, -}; -use core::fmt; -use serde::{Deserialize, Serialize}; -use std::{ - collections::{HashMap, HashSet}, - str::FromStr, -}; - -// 定义ResourceARNType枚举类型 -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize, Default)] -pub enum ResourceARNType { - #[default] - UnknownARN, - ResourceARNS3, - ResourceARNKMS, -} - -impl fmt::Display for ResourceARNType { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ResourceARNType::UnknownARN => write!(f, ""), - ResourceARNType::ResourceARNS3 => write!(f, "{}", RESOURCE_ARN_PREFIX), - ResourceARNType::ResourceARNKMS => write!(f, "{}", RESOURCE_ARN_KMS_PREFIX), - } - } -} - -// 定义资源ARN前缀 -const RESOURCE_ARN_PREFIX: &str = "arn:aws:s3:::"; -const RESOURCE_ARN_KMS_PREFIX: &str = "arn:rustfs:kms::::"; - -// 定义Resource结构体 -#[derive(Debug, Default, PartialEq, Eq, Hash, Clone)] -pub struct Resource { - pattern: String, - rtype: ResourceARNType, -} - -impl Resource { - pub fn new(pattern: &str) -> Self { - Self { - pattern: pattern.to_owned(), - rtype: ResourceARNType::ResourceARNS3, - } - } - pub fn validate_bucket(&self, bucket: &str) -> Result<()> { - self.validate()?; - if !wildcard::match_pattern(&self.pattern, bucket) - && !wildcard::match_as_pattern_prefix(&self.pattern, format!("{}/", bucket).as_str()) - { - return Err(Error::msg("bucket name does not match")); - } - Ok(()) - } - pub fn validate(&self) -> Result<()> { - if !self.is_valid() { - Err(Error::msg("invalid resource")) - } else { - Ok(()) - } - } - pub fn is_valid(&self) -> bool { - if self.rtype == ResourceARNType::UnknownARN { - return false; - } - if self.is_s3() && self.pattern.starts_with('/') { - return false; - } - if self.is_kms() && self.pattern.as_bytes().iter().any(|&v| v == b'/' || v == b'\\' || v == b'.') { - return false; - } - - !self.pattern.is_empty() - } - pub fn is_s3(&self) -> bool { - self.rtype == ResourceARNType::ResourceARNS3 - } - pub fn is_kms(&self) -> bool { - self.rtype == ResourceARNType::ResourceARNKMS - } - pub fn is_bucket_pattern(&self) -> bool { - !self.pattern.contains('/') || self.pattern.eq("*") - } - pub fn is_object_pattern(&self) -> bool { - self.pattern.contains('/') || self.pattern.contains('*') - } - pub fn is_match(&self, res: &str, condition_values: &HashMap>) -> bool { - let mut pattern = res.to_string(); - if !condition_values.is_empty() { - for key in COMMOM_KEYS.iter() { - if let Some(vals) = condition_values.get(key.name()) { - if let Some(v0) = vals.first() { - pattern = pattern.replace(key.name(), v0); - } - } - } - } - - let cp = utils::path::clean(res); - - if cp != "." && cp == pattern { - return true; - } - - wildcard::match_pattern(&pattern, res) - } -} - -impl fmt::Display for Resource { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}{}", self.rtype, self.pattern) - } -} - -impl FromStr for Resource { - type Err = serde_json::Error; - - fn from_str(s: &str) -> Result { - if s.starts_with(RESOURCE_ARN_PREFIX) { - let pattern = { - if let Some(val) = s.strip_prefix(RESOURCE_ARN_PREFIX) { - val.to_string() - } else { - s.to_string() - } - }; - Ok(Self { - rtype: ResourceARNType::ResourceARNS3, - pattern, - }) - } else if s.starts_with(RESOURCE_ARN_KMS_PREFIX) { - let pattern = { - if let Some(val) = s.strip_prefix(RESOURCE_ARN_KMS_PREFIX) { - val.to_string() - } else { - s.to_string() - } - }; - Ok(Self { - rtype: ResourceARNType::ResourceARNS3, - pattern, - }) - } else { - Ok(Self { - rtype: ResourceARNType::UnknownARN, - pattern: "".to_string(), - }) - } - } -} - -impl Serialize for Resource { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_str(self.to_string().as_str()) - } -} - -impl<'de> Deserialize<'de> for Resource { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - struct Visitor; - - #[allow(clippy::needless_lifetimes)] - impl<'de> serde::de::Visitor<'de> for Visitor { - type Value = Resource; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - formatter.write_str("string resource") - } - - fn visit_str(self, value: &str) -> Result - where - E: serde::de::Error, - { - match Resource::from_str(value) { - Ok(res) => Ok(res), - Err(_) => Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(value), &self)), - } - } - } - - deserializer.deserialize_any(Visitor) - } -} - -#[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq, Eq)] -#[serde(transparent)] -pub struct ResourceSet(pub HashSet); - -impl ResourceSet { - pub fn validate_bucket(&self, bucket: &str) -> Result<()> { - for res in self.0.iter() { - res.validate_bucket(bucket)?; - } - Ok(()) - } - - pub fn is_match(&self, res: &str, condition_values: &HashMap>) -> bool { - for item in self.0.iter() { - if item.is_match(res, condition_values) { - return true; - } - } - - false - } - pub fn object_resource_exists(&self) -> bool { - for res in self.0.iter() { - if res.is_object_pattern() { - return true; - } - } - false - } - pub fn bucket_resource_exists(&self) -> bool { - for res in self.0.iter() { - if res.is_bucket_pattern() { - return true; - } - } - false - } - - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - -impl AsRef> for ResourceSet { - fn as_ref(&self) -> &HashSet { - &self.0 - } -} - -// impl Serialize for ResourceSet { -// fn serialize(&self, serializer: S) -> Result -// where -// S: serde::Serializer, -// { -// let ress: Vec = self.0.iter().cloned().collect(); -// serializer.collect_seq(ress) -// } -// } - -// impl<'de> Deserialize<'de> for ResourceSet { -// fn deserialize(deserializer: D) -> Result -// where -// D: Deserializer<'de>, -// { -// let vec: Vec = Deserialize::deserialize(deserializer)?; -// let ha: HashSet = vec.into_iter().collect(); -// Ok(ResourceSet(ha)) -// } -// } diff --git a/ecstore/src/bucket/policy_sys.rs b/ecstore/src/bucket/policy_sys.rs index b72cb333..2a505a8b 100644 --- a/ecstore/src/bucket/policy_sys.rs +++ b/ecstore/src/bucket/policy_sys.rs @@ -1,16 +1,13 @@ -use super::{ - error::BucketMetadataError, - metadata_sys::get_bucket_metadata_sys, - policy::bucket_policy::{BucketPolicy, BucketPolicyArgs}, -}; -use crate::error::Result; +use super::{error::BucketMetadataError, metadata_sys::get_bucket_metadata_sys}; +use common::error::Result; +use policy::policy::{BucketPolicy, BucketPolicyArgs}; use tracing::warn; pub struct PolicySys {} impl PolicySys { - pub async fn is_allowed(args: &BucketPolicyArgs) -> bool { - match Self::get(&args.bucket_name).await { + pub async fn is_allowed(args: &BucketPolicyArgs<'_>) -> bool { + match Self::get(args.bucket).await { Ok(cfg) => return cfg.is_allowed(args), Err(err) => { if !BucketMetadataError::BucketPolicyNotFound.is(&err) { @@ -22,7 +19,7 @@ impl PolicySys { args.is_owner } pub async fn get(bucket: &str) -> Result { - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let bucket_meta_sys = bucket_meta_sys_lock.write().await; let (cfg, _) = bucket_meta_sys.get_bucket_policy(bucket).await?; diff --git a/ecstore/src/bucket/quota/mod.rs b/ecstore/src/bucket/quota/mod.rs index a7753d85..c3f38e84 100644 --- a/ecstore/src/bucket/quota/mod.rs +++ b/ecstore/src/bucket/quota/mod.rs @@ -1,4 +1,4 @@ -use crate::error::Result; +use common::error::Result; use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; diff --git a/ecstore/src/bucket/target/mod.rs b/ecstore/src/bucket/target/mod.rs index f830c522..cb8797f2 100644 --- a/ecstore/src/bucket/target/mod.rs +++ b/ecstore/src/bucket/target/mod.rs @@ -1,4 +1,4 @@ -use crate::error::Result; +use common::error::Result; use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; use std::time::Duration; diff --git a/ecstore/src/bucket/utils.rs b/ecstore/src/bucket/utils.rs index 613f1b3b..28ed670b 100644 --- a/ecstore/src/bucket/utils.rs +++ b/ecstore/src/bucket/utils.rs @@ -1,4 +1,5 @@ -use crate::{disk::RUSTFS_META_BUCKET, error::Error}; +use crate::disk::RUSTFS_META_BUCKET; +use common::error::{Error, Result}; pub fn is_meta_bucketname(name: &str) -> bool { name.starts_with(RUSTFS_META_BUCKET) diff --git a/ecstore/src/bucket/versioning_sys.rs b/ecstore/src/bucket/versioning_sys.rs index c893b6fd..46549859 100644 --- a/ecstore/src/bucket/versioning_sys.rs +++ b/ecstore/src/bucket/versioning_sys.rs @@ -1,6 +1,6 @@ use super::{metadata_sys::get_bucket_metadata_sys, versioning::VersioningApi}; use crate::disk::RUSTFS_META_BUCKET; -use crate::error::Result; +use common::error::Result; use s3s::dto::VersioningConfiguration; use tracing::warn; @@ -61,7 +61,7 @@ impl BucketVersioningSys { return Ok(VersioningConfiguration::default()); } - let bucket_meta_sys_lock = get_bucket_metadata_sys(); + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let bucket_meta_sys = bucket_meta_sys_lock.write().await; let (cfg, _) = bucket_meta_sys.get_versioning_config(bucket).await?; diff --git a/ecstore/src/cache_value/cache.rs b/ecstore/src/cache_value/cache.rs index 96833dd9..9de88d1a 100644 --- a/ecstore/src/cache_value/cache.rs +++ b/ecstore/src/cache_value/cache.rs @@ -14,7 +14,7 @@ use std::{ use tokio::{spawn, sync::Mutex}; -use crate::error::Result; +use common::error::Result; pub type UpdateFn = Box Pin> + Send>> + Send + Sync + 'static>; diff --git a/ecstore/src/cache_value/metacache_set.rs b/ecstore/src/cache_value/metacache_set.rs index 401561b6..a0a6613c 100644 --- a/ecstore/src/cache_value/metacache_set.rs +++ b/ecstore/src/cache_value/metacache_set.rs @@ -1,11 +1,9 @@ +use crate::disk::{DiskAPI, DiskStore, MetaCacheEntries, MetaCacheEntry, WalkDirOptions}; use crate::{ disk::error::{is_err_eof, is_err_file_not_found, is_err_volume_not_found, DiskError}, metacache::writer::MetacacheReader, }; -use crate::{ - disk::{DiskAPI, DiskStore, MetaCacheEntries, MetaCacheEntry, WalkDirOptions}, - error::{Error, Result}, -}; +use common::error::{Error, Result}; use futures::future::join_all; use std::{future::Future, pin::Pin, sync::Arc}; use tokio::{spawn, sync::broadcast::Receiver as B_Receiver}; @@ -140,7 +138,11 @@ pub async fn list_path_raw(mut rx: B_Receiver, opts: ListPathRawOptions) - } let revjob = spawn(async move { - let mut errs: Vec> = vec![None; readers.len()]; + let mut errs: Vec> = Vec::with_capacity(readers.len()); + for _ in 0..readers.len() { + errs.push(None); + } + loop { let mut current = MetaCacheEntry::default(); diff --git a/ecstore/src/config/common.rs b/ecstore/src/config/com.rs similarity index 99% rename from ecstore/src/config/common.rs rename to ecstore/src/config/com.rs index 837f577a..d086f942 100644 --- a/ecstore/src/config/common.rs +++ b/ecstore/src/config/com.rs @@ -1,10 +1,10 @@ use super::error::{is_err_config_not_found, ConfigError}; use super::{storageclass, Config, GLOBAL_StorageClass, KVS}; use crate::disk::RUSTFS_META_BUCKET; -use crate::error::{Error, Result}; use crate::store_api::{ObjectInfo, ObjectOptions, PutObjReader, StorageAPI}; use crate::store_err::is_err_object_not_found; use crate::utils::path::SLASH_SEPARATOR; +use common::error::{Error, Result}; use http::HeaderMap; use lazy_static::lazy_static; use std::collections::HashSet; diff --git a/ecstore/src/config/error.rs b/ecstore/src/config/error.rs index f734adb9..bc25d4ba 100644 --- a/ecstore/src/config/error.rs +++ b/ecstore/src/config/error.rs @@ -1,4 +1,5 @@ -use crate::{disk, error::Error, store_err::is_err_object_not_found}; +use crate::{disk, store_err::is_err_object_not_found}; +use common::error::Error; #[derive(Debug, PartialEq, thiserror::Error)] pub enum ConfigError { diff --git a/ecstore/src/config/heal.rs b/ecstore/src/config/heal.rs index 91399716..6fcaf73f 100644 --- a/ecstore/src/config/heal.rs +++ b/ecstore/src/config/heal.rs @@ -1,9 +1,7 @@ use std::time::Duration; -use crate::{ - error::{Error, Result}, - utils::bool_flag::parse_bool, -}; +use crate::utils::bool_flag::parse_bool; +use common::error::{Error, Result}; #[derive(Debug, Default)] pub struct Config { diff --git a/ecstore/src/config/mod.rs b/ecstore/src/config/mod.rs index c4bae997..ffe00477 100644 --- a/ecstore/src/config/mod.rs +++ b/ecstore/src/config/mod.rs @@ -1,12 +1,12 @@ -pub mod common; +pub mod com; pub mod error; #[allow(dead_code)] pub mod heal; pub mod storageclass; -use crate::error::Result; use crate::store::ECStore; -use common::{lookup_configs, read_config_without_migrate, STORAGE_CLASS_SUB_SYS}; +use com::{lookup_configs, read_config_without_migrate, STORAGE_CLASS_SUB_SYS}; +use common::error::Result; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use std::collections::HashMap; diff --git a/ecstore/src/config/storageclass.rs b/ecstore/src/config/storageclass.rs index 46bc0737..3859fcb5 100644 --- a/ecstore/src/config/storageclass.rs +++ b/ecstore/src/config/storageclass.rs @@ -1,9 +1,7 @@ use std::env; -use crate::{ - config::KV, - error::{Error, Result}, -}; +use crate::config::KV; +use common::error::{Error, Result}; use super::KVS; use lazy_static::lazy_static; diff --git a/ecstore/src/disk/endpoint.rs b/ecstore/src/disk/endpoint.rs index 6e1ac442..d901969c 100644 --- a/ecstore/src/disk/endpoint.rs +++ b/ecstore/src/disk/endpoint.rs @@ -1,5 +1,5 @@ -use crate::error::{Error, Result}; use crate::utils::net; +use common::error::{Error, Result}; use path_absolutize::Absolutize; use std::{fmt::Display, path::Path}; use url::{ParseError, Url}; diff --git a/ecstore/src/disk/error.rs b/ecstore/src/disk/error.rs index a98c9d58..a0773b5e 100644 --- a/ecstore/src/disk/error.rs +++ b/ecstore/src/disk/error.rs @@ -2,11 +2,9 @@ use std::io::{self, ErrorKind}; use tracing::error; +use crate::quorum::CheckErrorFn; use crate::utils::ERROR_TYPE_MASK; -use crate::{ - error::{Error, Result}, - quorum::CheckErrorFn, -}; +use common::error::{Error, Result}; // DiskError == StorageErr #[derive(Debug, thiserror::Error)] @@ -565,3 +563,13 @@ pub fn is_err_os_not_exist(err: &Error) -> bool { false } } + +pub fn is_err_os_disk_full(err: &Error) -> bool { + if let Some(os_err) = err.downcast_ref::() { + is_sys_err_no_space(os_err) + } else if let Some(e) = err.downcast_ref::() { + e == &DiskError::DiskFull + } else { + false + } +} diff --git a/ecstore/src/disk/format.rs b/ecstore/src/disk/format.rs index 602ea629..0a3be1b2 100644 --- a/ecstore/src/disk/format.rs +++ b/ecstore/src/disk/format.rs @@ -1,5 +1,5 @@ use super::{error::DiskError, DiskInfo}; -use crate::error::{Error, Result}; +use common::error::{Error, Result}; use serde::{Deserialize, Serialize}; use serde_json::Error as JsonError; use uuid::Uuid; diff --git a/ecstore/src/disk/local.rs b/ecstore/src/disk/local.rs index 69b1717c..76b5f3ac 100644 --- a/ecstore/src/disk/local.rs +++ b/ecstore/src/disk/local.rs @@ -1,6 +1,6 @@ use super::error::{ - is_err_file_not_found, is_err_file_version_not_found, is_sys_err_io, is_sys_err_not_empty, is_sys_err_too_many_files, - os_is_not_exist, os_is_permission, + is_err_file_not_found, is_err_file_version_not_found, is_err_os_disk_full, is_sys_err_io, is_sys_err_not_empty, + is_sys_err_too_many_files, os_is_not_exist, os_is_permission, }; use super::os::{is_root_disk, rename_all}; use super::{endpoint::Endpoint, error::DiskError, format::FormatV3}; @@ -18,7 +18,6 @@ use crate::disk::error::{ }; use crate::disk::os::{check_path_length, is_empty_dir}; use crate::disk::STORAGE_FORMAT_FILE; -use crate::error::{Error, Result}; use crate::file_meta::{get_file_info, read_xl_meta_no_data, FileInfoOpts}; use crate::global::{GLOBAL_IsErasureSD, GLOBAL_RootDiskThreshold}; use crate::heal::data_scanner::{has_active_rules, scan_data_folder, ScannerItem, ShouldSleepFn, SizeSummary}; @@ -35,11 +34,11 @@ use crate::set_disk::{ CHECK_PART_VOLUME_NOT_FOUND, }; use crate::store_api::{BitrotAlgorithm, StorageAPI}; -use crate::utils::fs::{access, lstat, O_APPEND, O_CREATE, O_RDONLY, O_WRONLY}; +use crate::utils::fs::{access, lstat, remove, remove_all, rename, O_APPEND, O_CREATE, O_RDONLY, O_WRONLY}; use crate::utils::os::get_info; use crate::utils::path::{ - self, clean, decode_dir_object, has_suffix, path_join, path_join_buf, GLOBAL_DIR_SUFFIX, GLOBAL_DIR_SUFFIX_WITH_SLASH, - SLASH_SEPARATOR, + self, clean, decode_dir_object, encode_dir_object, has_suffix, path_join, path_join_buf, GLOBAL_DIR_SUFFIX, + GLOBAL_DIR_SUFFIX_WITH_SLASH, SLASH_SEPARATOR, }; use crate::{ file_meta::FileMeta, @@ -47,6 +46,7 @@ use crate::{ utils, }; use common::defer; +use common::error::{Error, Result}; use path_absolutize::Absolutize; use std::collections::{HashMap, HashSet}; use std::fmt::Debug; @@ -308,44 +308,46 @@ impl LocalDisk { // }) // } - pub async fn move_to_trash(&self, delete_path: &PathBuf, _recursive: bool, _immediate_purge: bool) -> Result<()> { + pub async fn move_to_trash(&self, delete_path: &PathBuf, recursive: bool, immediate_purge: bool) -> Result<()> { let trash_path = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?; if let Some(parent) = trash_path.parent() { if !parent.exists() { fs::create_dir_all(parent).await?; } } - // debug!("move_to_trash from:{:?} to {:?}", &delete_path, &trash_path); - // TODO: 清空回收站 - if let Err(err) = fs::rename(&delete_path, &trash_path).await { - match err.kind() { - ErrorKind::NotFound => (), - _ => { - warn!("delete_file rename {:?} err {:?}", &delete_path, &err); - return Err(Error::from(err)); - } - } + + let err = if recursive { + rename_all(delete_path, trash_path, self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?) + .await + .err() + } else { + rename(&delete_path, &trash_path).await.map_err(Error::new).err() + }; + + if immediate_purge || delete_path.to_string_lossy().ends_with(path::SLASH_SEPARATOR) { + warn!("move_to_trash immediate_purge {:?}", &delete_path.to_string_lossy()); + let trash_path2 = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?; + let _ = rename_all( + encode_dir_object(delete_path.to_string_lossy().as_ref()), + trash_path2, + self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?, + ) + .await; } - // TODO: 优化 FIXME: 先清空回收站吧,有时间再添加判断逻辑 - - if let Err(err) = { - if trash_path.is_dir() { - fs::remove_dir_all(&trash_path).await - } else { - fs::remove_file(&trash_path).await - } - } { - match err.kind() { - ErrorKind::NotFound => (), - _ => { - warn!("delete_file remove trash {:?} err {:?}", &trash_path, &err); - return Err(Error::from(err)); + if let Some(err) = err { + if is_err_os_disk_full(&err) { + if recursive { + remove_all(delete_path).await?; + } else { + remove(delete_path).await?; } } + + return Ok(()); } - // TODO: immediate + // TODO: 异步通知 检测硬盘空间 清空回收站 Ok(()) } @@ -1971,7 +1973,7 @@ impl DiskAPI for LocalDisk { created: modtime, }) } - async fn delete_paths(&self, volume: &str, paths: &[&str]) -> Result<()> { + async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> { let volume_dir = self.get_bucket_path(volume)?; if !skip_access_checks(volume) { utils::fs::access(&volume_dir) diff --git a/ecstore/src/disk/mod.rs b/ecstore/src/disk/mod.rs index 8d737e92..fd0fb89e 100644 --- a/ecstore/src/disk/mod.rs +++ b/ecstore/src/disk/mod.rs @@ -16,7 +16,6 @@ pub const STORAGE_FORMAT_FILE_BACKUP: &str = "xl.meta.bkp"; use crate::{ bucket::{metadata_sys::get_versioning_config, versioning::VersioningApi}, - error::{Error, Result}, file_meta::{merge_file_meta_versions, FileMeta, FileMetaShallowVersion, VersionType}, heal::{ data_scanner::ShouldSleepFn, @@ -27,6 +26,7 @@ use crate::{ store_api::{FileInfo, ObjectInfo, RawFileInfo}, utils::path::SLASH_SEPARATOR, }; +use common::error::{Error, Result}; use endpoint::Endpoint; use error::DiskError; use local::LocalDisk; @@ -250,7 +250,7 @@ impl DiskAPI for Disk { } } - async fn delete_paths(&self, volume: &str, paths: &[&str]) -> Result<()> { + async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> { match self { Disk::Local(local_disk) => local_disk.delete_paths(volume, paths).await, Disk::Remote(remote_disk) => remote_disk.delete_paths(volume, paths).await, @@ -412,7 +412,7 @@ pub trait DiskAPI: Debug + Send + Sync + 'static { versions: Vec, opts: DeleteOptions, ) -> Result>>; - async fn delete_paths(&self, volume: &str, paths: &[&str]) -> Result<()>; + async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()>; async fn write_metadata(&self, org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()>; async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()>; async fn read_version( diff --git a/ecstore/src/disk/os.rs b/ecstore/src/disk/os.rs index 175052cb..ae88611a 100644 --- a/ecstore/src/disk/os.rs +++ b/ecstore/src/disk/os.rs @@ -3,14 +3,13 @@ use std::{ path::{Component, Path}, }; -use tokio::fs; -use tracing::info; - use crate::{ disk::error::{is_sys_err_not_dir, is_sys_err_path_not_found, os_is_not_exist}, - error::{Error, Result}, utils::{self, os::same_disk}, }; +use common::error::{Error, Result}; +use tokio::fs; +use tracing::info; use super::error::{os_err_to_file_err, os_is_exist, DiskError}; @@ -137,20 +136,12 @@ pub async fn reliable_rename( base_dir: impl AsRef, ) -> io::Result<()> { if let Some(parent) = dst_file_path.as_ref().parent() { - reliable_mkdir_all(parent, base_dir.as_ref()).await?; - } - // need remove dst path - if let Err(err) = utils::fs::remove_all(dst_file_path.as_ref()).await { - if err.kind() != io::ErrorKind::NotFound { - info!( - "reliable_rename rm dst failed. src_file_path: {:?}, dst_file_path: {:?}, base_dir: {:?}, err: {:?}", - src_file_path.as_ref(), - dst_file_path.as_ref(), - base_dir.as_ref(), - err - ); + if !file_exists(parent).await { + info!("reliable_rename reliable_mkdir_all parent: {:?}", parent); + reliable_mkdir_all(parent, base_dir.as_ref()).await?; } } + let mut i = 0; loop { if let Err(e) = utils::fs::rename(src_file_path.as_ref(), dst_file_path.as_ref()).await { @@ -158,13 +149,13 @@ pub async fn reliable_rename( i += 1; continue; } - info!( - "reliable_rename failed. src_file_path: {:?}, dst_file_path: {:?}, base_dir: {:?}, err: {:?}", - src_file_path.as_ref(), - dst_file_path.as_ref(), - base_dir.as_ref(), - e - ); + // info!( + // "reliable_rename failed. src_file_path: {:?}, dst_file_path: {:?}, base_dir: {:?}, err: {:?}", + // src_file_path.as_ref(), + // dst_file_path.as_ref(), + // base_dir.as_ref(), + // e + // ); return Err(e); } @@ -229,3 +220,7 @@ pub async fn os_mkdir_all(dir_path: impl AsRef, base_dir: impl AsRef Ok(()) } + +pub async fn file_exists(path: impl AsRef) -> bool { + fs::metadata(path.as_ref()).await.map(|_| true).unwrap_or(false) +} diff --git a/ecstore/src/disk/remote.rs b/ecstore/src/disk/remote.rs index 43f02832..bac27b0d 100644 --- a/ecstore/src/disk/remote.rs +++ b/ecstore/src/disk/remote.rs @@ -28,7 +28,6 @@ use super::{ }; use crate::{ disk::error::DiskError, - error::{Error, Result}, heal::{ data_scanner::ShouldSleepFn, data_usage_cache::{DataUsageCache, DataUsageEntry}, @@ -41,6 +40,7 @@ use crate::{ io::{FileReader, FileWriter, HttpFileReader, HttpFileWriter}, utils::proto_err_to_err, }; +use common::error::{Error, Result}; use protos::proto_gen::node_service::RenamePartRequst; #[derive(Debug)] @@ -565,9 +565,9 @@ impl DiskAPI for RemoteDisk { Ok(volume_info) } - async fn delete_paths(&self, volume: &str, paths: &[&str]) -> Result<()> { + async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> { info!("delete_paths"); - let paths = paths.iter().map(|s| s.to_string()).collect::>(); + let paths = paths.to_owned(); let mut client = node_service_time_out_client(&self.addr) .await .map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?; diff --git a/ecstore/src/disks_layout.rs b/ecstore/src/disks_layout.rs index 62d78a91..75eab78e 100644 --- a/ecstore/src/disks_layout.rs +++ b/ecstore/src/disks_layout.rs @@ -1,5 +1,5 @@ -use crate::error::{Error, Result}; use crate::utils::ellipses::*; +use common::error::{Error, Result}; use serde::Deserialize; use std::collections::HashSet; use std::env; diff --git a/ecstore/src/endpoints.rs b/ecstore/src/endpoints.rs index 3bca9480..ea44edf4 100644 --- a/ecstore/src/endpoints.rs +++ b/ecstore/src/endpoints.rs @@ -3,10 +3,10 @@ use tracing::{instrument, warn}; use crate::{ disk::endpoint::{Endpoint, EndpointType}, disks_layout::DisksLayout, - error::{Error, Result}, global::global_rustfs_port, utils::net::{self, XHost}, }; +use common::error::{Error, Result}; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, net::IpAddr, diff --git a/ecstore/src/erasure.rs b/ecstore/src/erasure.rs index 83d1d9ae..d4f4f1af 100644 --- a/ecstore/src/erasure.rs +++ b/ecstore/src/erasure.rs @@ -1,6 +1,7 @@ use crate::bitrot::{BitrotReader, BitrotWriter}; -use crate::error::{Error, Result}; +use crate::error::clone_err; use crate::quorum::{object_op_ignored_errs, reduce_write_quorum_errs}; +use common::error::{Error, Result}; use futures::future::join_all; use reed_solomon_erasure::galois_8::ReedSolomon; use std::any::Any; @@ -487,7 +488,7 @@ impl Erasure { } } if !errs.is_empty() { - return Err(errs[0].clone()); + return Err(clone_err(&errs[0])); } Ok(()) diff --git a/ecstore/src/error.rs b/ecstore/src/error.rs index 3d32b495..f3aea337 100644 --- a/ecstore/src/error.rs +++ b/ecstore/src/error.rs @@ -1,106 +1,122 @@ use crate::disk::error::{clone_disk_err, DiskError}; +use common::error::Error; use std::io; -use tracing_error::{SpanTrace, SpanTraceStatus}; +// use tracing_error::{SpanTrace, SpanTraceStatus}; -pub type StdError = Box; +// pub type StdError = Box; -pub type Result = std::result::Result; +// pub type Result = std::result::Result; -#[derive(Debug)] -pub struct Error { - inner: Box, - span_trace: SpanTrace, -} +// #[derive(Debug)] +// pub struct Error { +// inner: Box, +// span_trace: SpanTrace, +// } -impl Error { - /// Create a new error from a `std::error::Error`. - #[must_use] - #[track_caller] - pub fn new(source: T) -> Self { - Self::from_std_error(source.into()) - } +// impl Error { +// /// Create a new error from a `std::error::Error`. +// #[must_use] +// #[track_caller] +// pub fn new(source: T) -> Self { +// Self::from_std_error(source.into()) +// } - /// Create a new error from a `std::error::Error`. - #[must_use] - #[track_caller] - pub fn from_std_error(inner: StdError) -> Self { - Self { - inner, - span_trace: SpanTrace::capture(), - } - } +// /// Create a new error from a `std::error::Error`. +// #[must_use] +// #[track_caller] +// pub fn from_std_error(inner: StdError) -> Self { +// Self { +// inner, +// span_trace: SpanTrace::capture(), +// } +// } - /// Create a new error from a string. - #[must_use] - #[track_caller] - pub fn from_string(s: impl Into) -> Self { - Self::msg(s) - } +// /// Create a new error from a string. +// #[must_use] +// #[track_caller] +// pub fn from_string(s: impl Into) -> Self { +// Self::msg(s) +// } - /// Create a new error from a string. - #[must_use] - #[track_caller] - pub fn msg(s: impl Into) -> Self { - Self::from_std_error(s.into().into()) - } +// /// Create a new error from a string. +// #[must_use] +// #[track_caller] +// pub fn msg(s: impl Into) -> Self { +// Self::from_std_error(s.into().into()) +// } - /// Returns `true` if the inner type is the same as `T`. - #[inline] - pub fn is(&self) -> bool { - self.inner.is::() - } +// /// Returns `true` if the inner type is the same as `T`. +// #[inline] +// pub fn is(&self) -> bool { +// self.inner.is::() +// } - /// Returns some reference to the inner value if it is of type `T`, or - /// `None` if it isn't. - #[inline] - pub fn downcast_ref(&self) -> Option<&T> { - self.inner.downcast_ref() - } +// /// Returns some reference to the inner value if it is of type `T`, or +// /// `None` if it isn't. +// #[inline] +// pub fn downcast_ref(&self) -> Option<&T> { +// self.inner.downcast_ref() +// } - /// Returns some mutable reference to the inner value if it is of type `T`, or - /// `None` if it isn't. - #[inline] - pub fn downcast_mut(&mut self) -> Option<&mut T> { - self.inner.downcast_mut() - } +// /// Returns some mutable reference to the inner value if it is of type `T`, or +// /// `None` if it isn't. +// #[inline] +// pub fn downcast_mut(&mut self) -> Option<&mut T> { +// self.inner.downcast_mut() +// } - pub fn to_io_err(&self) -> Option { - self.downcast_ref::() - .map(|e| io::Error::new(e.kind(), e.to_string())) - } -} +// pub fn to_io_err(&self) -> Option { +// self.downcast_ref::() +// .map(|e| io::Error::new(e.kind(), e.to_string())) +// } +// } -impl From for Error { - fn from(e: T) -> Self { - Self::new(e) - } -} +// impl From for Error { +// fn from(e: T) -> Self { +// Self::new(e) +// } +// } -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.inner)?; +// impl std::fmt::Display for Error { +// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +// write!(f, "{}", self.inner)?; - if self.span_trace.status() != SpanTraceStatus::EMPTY { - write!(f, "\nspan_trace:\n{}", self.span_trace)?; - } +// if self.span_trace.status() != SpanTraceStatus::EMPTY { +// write!(f, "\nspan_trace:\n{}", self.span_trace)?; +// } - Ok(()) - } -} +// Ok(()) +// } +// } -impl Clone for Error { - fn clone(&self) -> Self { - if let Some(e) = self.downcast_ref::() { - clone_disk_err(e) - } else if let Some(e) = self.downcast_ref::() { - if let Some(code) = e.raw_os_error() { - Error::new(io::Error::from_raw_os_error(code)) - } else { - Error::new(io::Error::new(e.kind(), e.to_string())) - } +// impl Clone for Error { +// fn clone(&self) -> Self { +// if let Some(e) = self.downcast_ref::() { +// clone_disk_err(e) +// } else if let Some(e) = self.downcast_ref::() { +// if let Some(code) = e.raw_os_error() { +// Error::new(io::Error::from_raw_os_error(code)) +// } else { +// Error::new(io::Error::new(e.kind(), e.to_string())) +// } +// } else { +// // TODO: 优化其他类型 +// Error::msg(self.to_string()) +// } +// } +// } + +pub fn clone_err(e: &Error) -> Error { + if let Some(e) = e.downcast_ref::() { + clone_disk_err(e) + } else if let Some(e) = e.downcast_ref::() { + if let Some(code) = e.raw_os_error() { + Error::new(io::Error::from_raw_os_error(code)) } else { - // TODO: 优化其他类型 - Error::msg(self.to_string()) + Error::new(io::Error::new(e.kind(), e.to_string())) } + } else { + //TODO: 优化其他类型 + Error::msg(e.to_string()) } } diff --git a/ecstore/src/file_meta.rs b/ecstore/src/file_meta.rs index bb33f8e6..c9558578 100644 --- a/ecstore/src/file_meta.rs +++ b/ecstore/src/file_meta.rs @@ -1,4 +1,13 @@ +use crate::disk::FileInfoVersions; +use crate::file_meta_inline::InlineData; +use crate::store_api::RawFileInfo; +use crate::store_err::StorageError; +use crate::{ + disk::error::DiskError, + store_api::{ErasureInfo, FileInfo, ObjectPartInfo, ERASURE_ALGORITHM}, +}; use byteorder::ByteOrder; +use common::error::{Error, Result}; use rmp::Marker; use serde::{Deserialize, Serialize}; use std::cmp::Ordering; @@ -11,16 +20,6 @@ use tracing::{error, warn}; use uuid::Uuid; use xxhash_rust::xxh64; -use crate::disk::FileInfoVersions; -use crate::file_meta_inline::InlineData; -use crate::store_api::RawFileInfo; -use crate::store_err::StorageError; -use crate::{ - disk::error::DiskError, - error::{Error, Result}, - store_api::{ErasureInfo, FileInfo, ObjectPartInfo, ERASURE_ALGORITHM}, -}; - // XL header specifies the format pub static XL_FILE_HEADER: [u8; 4] = [b'X', b'L', b'2', b' ']; // pub static XL_FILE_VERSION_CURRENT: [u8; 4] = [0; 4]; diff --git a/ecstore/src/file_meta_inline.rs b/ecstore/src/file_meta_inline.rs index 005e18a9..083d1b4f 100644 --- a/ecstore/src/file_meta_inline.rs +++ b/ecstore/src/file_meta_inline.rs @@ -1,8 +1,7 @@ +use common::error::{Error, Result}; use serde::{Deserialize, Serialize}; -use uuid::Uuid; - -use crate::error::{Error, Result}; use std::io::{Cursor, Read}; +use uuid::Uuid; #[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] pub struct InlineData(Vec); diff --git a/ecstore/src/heal/background_heal_ops.rs b/ecstore/src/heal/background_heal_ops.rs index e096cf3e..e0a05338 100644 --- a/ecstore/src/heal/background_heal_ops.rs +++ b/ecstore/src/heal/background_heal_ops.rs @@ -23,7 +23,6 @@ use crate::heal::heal_ops::{HealSource, BG_HEALING_UUID}; use crate::{ config::RUSTFS_CONFIG_PREFIX, disk::{endpoint::Endpoint, error::DiskError, DiskAPI, DiskInfoOptions, BUCKET_META_PREFIX, RUSTFS_META_BUCKET}, - error::{Error, Result}, global::{GLOBAL_BackgroundHealRoutine, GLOBAL_BackgroundHealState, GLOBAL_LOCAL_DISK_MAP}, heal::{ data_usage::{DATA_USAGE_CACHE_NAME, DATA_USAGE_ROOT}, @@ -36,6 +35,7 @@ use crate::{ store_api::{BucketInfo, BucketOptions, StorageAPI}, utils::path::{path_join, SLASH_SEPARATOR}, }; +use common::error::{Error, Result}; pub static DEFAULT_MONITOR_NEW_DISK_INTERVAL: Duration = Duration::from_secs(10); diff --git a/ecstore/src/heal/data_scanner.rs b/ecstore/src/heal/data_scanner.rs index 66d796ce..52ded12a 100644 --- a/ecstore/src/heal/data_scanner.rs +++ b/ecstore/src/heal/data_scanner.rs @@ -12,22 +12,6 @@ use std::{ time::{Duration, SystemTime}, }; -use chrono::{DateTime, Utc}; -use lazy_static::lazy_static; -use rand::Rng; -use rmp_serde::{Deserializer, Serializer}; -use s3s::dto::{ReplicationConfiguration, ReplicationRuleStatus}; -use serde::{Deserialize, Serialize}; -use tokio::{ - sync::{ - broadcast, - mpsc::{self, Sender}, - RwLock, - }, - time::sleep, -}; -use tracing::{error, info}; - use super::{ data_scanner_metric::{globalScannerMetrics, ScannerMetric, ScannerMetrics}, data_usage::{store_data_usage_in_backend, DATA_USAGE_BLOOM_NAME_PATH}, @@ -38,11 +22,10 @@ use crate::heal::data_usage::DATA_USAGE_ROOT; use crate::{ cache_value::metacache_set::{list_path_raw, ListPathRawOptions}, config::{ - common::{read_config, save_config}, + com::{read_config, save_config}, heal::Config, }, disk::{error::DiskError, DiskInfoOptions, DiskStore, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams}, - error::{Error, Result}, global::{GLOBAL_BackgroundHealState, GLOBAL_IsErasure, GLOBAL_IsErasureSD}, heal::{ data_usage::BACKGROUND_HEAL_INFO_PATH, @@ -61,6 +44,22 @@ use crate::{ disk::DiskAPI, store_api::{FileInfo, ObjectInfo}, }; +use chrono::{DateTime, Utc}; +use common::error::{Error, Result}; +use lazy_static::lazy_static; +use rand::Rng; +use rmp_serde::{Deserializer, Serializer}; +use s3s::dto::{ReplicationConfiguration, ReplicationRuleStatus}; +use serde::{Deserialize, Serialize}; +use tokio::{ + sync::{ + broadcast, + mpsc::{self, Sender}, + RwLock, + }, + time::sleep, +}; +use tracing::{error, info}; const DATA_SCANNER_SLEEP_PER_FOLDER: Duration = Duration::from_millis(1); // Time to wait between folders. const DATA_USAGE_UPDATE_DIR_CYCLES: u32 = 16; // Visit all folders every n cycles. diff --git a/ecstore/src/heal/data_usage.rs b/ecstore/src/heal/data_usage.rs index ef569d6a..a5de85a3 100644 --- a/ecstore/src/heal/data_usage.rs +++ b/ecstore/src/heal/data_usage.rs @@ -1,22 +1,21 @@ -use lazy_static::lazy_static; -use serde::{Deserialize, Serialize}; -use std::{collections::HashMap, sync::Arc, time::SystemTime}; -use tokio::sync::mpsc::Receiver; -use tracing::{error, warn}; - use crate::{ bucket::metadata_sys::get_replication_config, config::{ - common::{read_config, save_config}, + com::{read_config, save_config}, error::is_err_config_not_found, }, disk::{BUCKET_META_PREFIX, RUSTFS_META_BUCKET}, - error::Result, new_object_layer_fn, store::ECStore, store_err::to_object_err, utils::path::SLASH_SEPARATOR, }; +use common::error::Result; +use lazy_static::lazy_static; +use serde::{Deserialize, Serialize}; +use std::{collections::HashMap, sync::Arc, time::SystemTime}; +use tokio::sync::mpsc::Receiver; +use tracing::{error, warn}; pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR; const DATA_USAGE_OBJ_NAME: &str = ".usage.json"; diff --git a/ecstore/src/heal/data_usage_cache.rs b/ecstore/src/heal/data_usage_cache.rs index 6b336790..7bc33df8 100644 --- a/ecstore/src/heal/data_usage_cache.rs +++ b/ecstore/src/heal/data_usage_cache.rs @@ -1,11 +1,11 @@ -use crate::config::common::save_config; +use crate::config::com::save_config; use crate::disk::error::DiskError; use crate::disk::{BUCKET_META_PREFIX, RUSTFS_META_BUCKET}; -use crate::error::{Error, Result}; use crate::new_object_layer_fn; use crate::set_disk::SetDisks; use crate::store_api::{BucketInfo, ObjectIO, ObjectOptions}; use bytesize::ByteSize; +use common::error::{Error, Result}; use http::HeaderMap; use path_clean::PathClean; use rand::Rng; diff --git a/ecstore/src/heal/heal_commands.rs b/ecstore/src/heal/heal_commands.rs index 3ff5610e..6edd0e73 100644 --- a/ecstore/src/heal/heal_commands.rs +++ b/ecstore/src/heal/heal_commands.rs @@ -4,22 +4,21 @@ use std::{ time::SystemTime, }; -use chrono::{DateTime, Utc}; -use lazy_static::lazy_static; -use serde::{Deserialize, Serialize}; -use time::OffsetDateTime; -use tokio::sync::RwLock; - use crate::{ config::storageclass::{RRS, STANDARD}, disk::{DeleteOptions, DiskAPI, DiskStore, BUCKET_META_PREFIX, RUSTFS_META_BUCKET}, - error::{Error, Result}, global::GLOBAL_BackgroundHealState, heal::heal_ops::HEALING_TRACKER_FILENAME, new_object_layer_fn, store_api::{BucketInfo, StorageAPI}, utils::fs::read_file, }; +use chrono::{DateTime, Utc}; +use common::error::{Error, Result}; +use lazy_static::lazy_static; +use serde::{Deserialize, Serialize}; +use time::OffsetDateTime; +use tokio::sync::RwLock; use super::{background_heal_ops::get_local_disks_to_heal, heal_ops::BG_HEALING_UUID}; diff --git a/ecstore/src/heal/heal_ops.rs b/ecstore/src/heal/heal_ops.rs index b7b694b4..d5c4544f 100644 --- a/ecstore/src/heal/heal_ops.rs +++ b/ecstore/src/heal/heal_ops.rs @@ -6,7 +6,7 @@ use super::{ }; use crate::store_api::StorageAPI; use crate::{ - config::common::CONFIG_PREFIX, + config::com::CONFIG_PREFIX, disk::RUSTFS_META_BUCKET, global::GLOBAL_BackgroundHealRoutine, heal::{error::ERR_HEAL_STOP_SIGNALLED, heal_commands::DRIVE_STATE_OK}, @@ -14,7 +14,6 @@ use crate::{ use crate::{ disk::{endpoint::Endpoint, MetaCacheEntry}, endpoints::Endpoints, - error::{Error, Result}, global::GLOBAL_IsDistErasure, heal::heal_commands::{HealStartSuccess, HEAL_UNKNOWN_SCAN}, new_object_layer_fn, @@ -25,6 +24,7 @@ use crate::{ utils::path::path_join, }; use chrono::Utc; +use common::error::{Error, Result}; use futures::join; use lazy_static::lazy_static; use madmin::heal_commands::{HealDriveInfo, HealItemType, HealResultItem}; diff --git a/ecstore/src/metacache/writer.rs b/ecstore/src/metacache/writer.rs index bd1b576b..e615fe3d 100644 --- a/ecstore/src/metacache/writer.rs +++ b/ecstore/src/metacache/writer.rs @@ -1,6 +1,6 @@ use crate::disk::MetaCacheEntry; -use crate::error::Error; -use crate::error::Result; +use crate::error::clone_err; +use common::error::{Error, Result}; use rmp::Marker; use std::str::from_utf8; use tokio::io::AsyncRead; @@ -246,7 +246,7 @@ impl MetacacheReader { self.check_init().await?; if let Some(err) = &self.err { - return Err(err.clone()); + return Err(clone_err(err)); } let mut n = size; @@ -285,7 +285,7 @@ impl MetacacheReader { self.check_init().await?; if let Some(err) = &self.err { - return Err(err.clone()); + return Err(clone_err(err)); } match rmp::decode::read_bool(&mut self.read_more(1).await?) { diff --git a/ecstore/src/notification_sys.rs b/ecstore/src/notification_sys.rs index 37b23489..017e346c 100644 --- a/ecstore/src/notification_sys.rs +++ b/ecstore/src/notification_sys.rs @@ -1,8 +1,8 @@ use crate::endpoints::EndpointServerPools; -use crate::error::{Error, Result}; use crate::global::get_global_endpoints; use crate::peer_rest_client::PeerRestClient; use crate::StorageAPI; +use common::error::{Error, Result}; use futures::future::join_all; use lazy_static::lazy_static; use madmin::{ItemState, ServerProperties}; diff --git a/ecstore/src/peer.rs b/ecstore/src/peer.rs index 13df3a7e..5328cbc9 100644 --- a/ecstore/src/peer.rs +++ b/ecstore/src/peer.rs @@ -1,18 +1,6 @@ -use async_trait::async_trait; -use futures::future::join_all; -use madmin::heal_commands::{HealDriveInfo, HealResultItem}; -use protos::node_service_time_out_client; -use protos::proto_gen::node_service::{ - DeleteBucketRequest, GetBucketInfoRequest, HealBucketRequest, ListBucketRequest, MakeBucketRequest, -}; -use regex::Regex; -use std::{collections::HashMap, fmt::Debug, sync::Arc}; -use tokio::sync::RwLock; -use tonic::Request; -use tracing::info; - use crate::disk::error::is_all_buckets_not_found; use crate::disk::{DiskAPI, DiskStore}; +use crate::error::clone_err; use crate::global::GLOBAL_LOCAL_DISK_MAP; use crate::heal::heal_commands::{ HealOpts, DRIVE_STATE_CORRUPT, DRIVE_STATE_MISSING, DRIVE_STATE_OFFLINE, DRIVE_STATE_OK, HEAL_ITEM_BUCKET, @@ -25,9 +13,21 @@ use crate::utils::wildcard::is_rustfs_meta_bucket_name; use crate::{ disk::{self, error::DiskError, VolumeInfo}, endpoints::{EndpointServerPools, Node}, - error::{Error, Result}, store_api::{BucketInfo, BucketOptions, DeleteBucketOptions, MakeBucketOptions}, }; +use async_trait::async_trait; +use common::error::{Error, Result}; +use futures::future::join_all; +use madmin::heal_commands::{HealDriveInfo, HealResultItem}; +use protos::node_service_time_out_client; +use protos::proto_gen::node_service::{ + DeleteBucketRequest, GetBucketInfoRequest, HealBucketRequest, ListBucketRequest, MakeBucketRequest, +}; +use regex::Regex; +use std::{collections::HashMap, fmt::Debug, sync::Arc}; +use tokio::sync::RwLock; +use tonic::Request; +use tracing::info; type Client = Arc>; @@ -95,7 +95,7 @@ impl S3PeerSys { for (i, client) in self.clients.iter().enumerate() { if let Some(v) = client.get_pools() { if v.contains(&pool_idx) { - per_pool_errs.push(errs[i].clone()); + per_pool_errs.push(errs[i].as_ref().map(clone_err)); } } } @@ -130,7 +130,7 @@ impl S3PeerSys { for (i, client) in self.clients.iter().enumerate() { if let Some(v) = client.get_pools() { if v.contains(&pool_idx) { - per_pool_errs.push(errs[i].clone()); + per_pool_errs.push(errs[i].as_ref().map(clone_err)); } } } @@ -781,7 +781,7 @@ pub async fn heal_bucket_local(bucket: &str, opts: &HealOpts) -> Result>(); futures.push(async move { if bs_clone.read().await[idx] == DRIVE_STATE_MISSING { info!("bucket not find, will recreate"); @@ -795,7 +795,7 @@ pub async fn heal_bucket_local(bucket: &str, opts: &HealOpts) -> Result bool; @@ -106,7 +107,7 @@ fn reduce_errs(errs: &[Option], ignored_errs: &[Box]) - if let Some(&c) = error_counts.get(&max_err) { if let Some(&err_idx) = error_map.get(&max_err) { - let err = errs[err_idx].clone(); + let err = errs[err_idx].as_ref().map(clone_err); return (c, err); } diff --git a/ecstore/src/set_disk.rs b/ecstore/src/set_disk.rs index 157881b9..754681fe 100644 --- a/ecstore/src/set_disk.rs +++ b/ecstore/src/set_disk.rs @@ -1,12 +1,3 @@ -use std::{ - collections::{HashMap, HashSet}, - io::{Cursor, Write}, - mem::replace, - path::Path, - sync::Arc, - time::Duration, -}; - use crate::{ bitrot::{bitrot_verify, close_bitrot_writers, new_bitrot_filereader, new_bitrot_filewriter, BitrotFileWriter}, cache_value::metacache_set::{list_path_raw, ListPathRawOptions}, @@ -20,7 +11,7 @@ use crate::{ UpdateMetadataOpts, RUSTFS_META_BUCKET, RUSTFS_META_MULTIPART_BUCKET, RUSTFS_META_TMP_BUCKET, }, erasure::Erasure, - error::{Error, Result}, + error::clone_err, file_meta::{merge_file_meta_versions, FileMeta, FileMetaShallowVersion}, global::{ get_global_deployment_id, is_dist_erasure, GLOBAL_BackgroundHealState, GLOBAL_LOCAL_DISK_MAP, @@ -64,6 +55,7 @@ use crate::{ }; use bytesize::ByteSize; use chrono::Utc; +use common::error::{Error, Result}; use futures::future::join_all; use glob::Pattern; use http::HeaderMap; @@ -82,6 +74,14 @@ use rand::{ use sha2::{Digest, Sha256}; use std::hash::Hash; use std::time::SystemTime; +use std::{ + collections::{HashMap, HashSet}, + io::{Cursor, Write}, + mem::replace, + path::Path, + sync::Arc, + time::Duration, +}; use time::OffsetDateTime; use tokio::{ io::{empty, AsyncWrite}, @@ -320,7 +320,7 @@ impl SetDisks { } Err(e) => { // ress.push(None); - errs.push(Some(e.clone())); + errs.push(Some(clone_err(e))); } } } @@ -453,7 +453,7 @@ impl SetDisks { Ok(()) } - async fn cleanup_multipart_path(disks: &[Option], paths: &[&str]) { + async fn cleanup_multipart_path(disks: &[Option], paths: &[String]) { let mut futures = Vec::with_capacity(disks.len()); let mut errs = Vec::with_capacity(disks.len()); @@ -479,6 +479,10 @@ impl SetDisks { } } } + + if errs.iter().any(|e| e.is_some()) { + warn!("cleanup_multipart_path errs {:?}", &errs); + } } async fn rename_part( disks: &[Option], @@ -518,7 +522,7 @@ impl SetDisks { if let Some(err) = reduce_write_quorum_errs(&errs, object_op_ignored_errs().as_ref(), write_quorum) { warn!("rename_part errs {:?}", &errs); - Self::cleanup_multipart_path(disks, vec![dst_object, format!("{}.meta", dst_object).as_str()].as_slice()).await; + Self::cleanup_multipart_path(disks, &[dst_object.to_owned(), format!("{}.meta", dst_object)]).await; return Err(err); } @@ -1238,7 +1242,7 @@ impl SetDisks { Err(err) => { for item in errs.iter_mut() { if item.is_none() { - *item = Some(err.clone()) + *item = Some(clone_err(&err)); } } @@ -1490,92 +1494,92 @@ impl SetDisks { // (ress, errs) // } - async fn remove_object_part( - &self, - bucket: &str, - object: &str, - upload_id: &str, - data_dir: &str, - part_num: usize, - ) -> Result<()> { - let upload_id_path = Self::get_upload_id_dir(bucket, object, upload_id); - let disks = self.disks.read().await; + // async fn remove_object_part( + // &self, + // bucket: &str, + // object: &str, + // upload_id: &str, + // data_dir: &str, + // part_num: usize, + // ) -> Result<()> { + // let upload_id_path = Self::get_upload_id_dir(bucket, object, upload_id); + // let disks = self.disks.read().await; - let disks = disks.clone(); + // let disks = disks.clone(); - let file_path = format!("{}/{}/part.{}", upload_id_path, data_dir, part_num); + // let file_path = format!("{}/{}/part.{}", upload_id_path, data_dir, part_num); - let mut futures = Vec::with_capacity(disks.len()); - let mut errors = Vec::with_capacity(disks.len()); + // let mut futures = Vec::with_capacity(disks.len()); + // let mut errors = Vec::with_capacity(disks.len()); - for disk in disks.iter() { - let file_path = file_path.clone(); - let meta_file_path = format!("{}.meta", file_path); + // for disk in disks.iter() { + // let file_path = file_path.clone(); + // let meta_file_path = format!("{}.meta", file_path); - futures.push(async move { - if let Some(disk) = disk { - disk.delete(RUSTFS_META_MULTIPART_BUCKET, &file_path, DeleteOptions::default()) - .await?; - disk.delete(RUSTFS_META_MULTIPART_BUCKET, &meta_file_path, DeleteOptions::default()) - .await - } else { - Err(Error::new(DiskError::DiskNotFound)) - } - }); - } + // futures.push(async move { + // if let Some(disk) = disk { + // disk.delete(RUSTFS_META_MULTIPART_BUCKET, &file_path, DeleteOptions::default()) + // .await?; + // disk.delete(RUSTFS_META_MULTIPART_BUCKET, &meta_file_path, DeleteOptions::default()) + // .await + // } else { + // Err(Error::new(DiskError::DiskNotFound)) + // } + // }); + // } - let results = join_all(futures).await; - for result in results { - match result { - Ok(_) => { - errors.push(None); - } - Err(e) => { - errors.push(Some(e)); - } - } - } + // let results = join_all(futures).await; + // for result in results { + // match result { + // Ok(_) => { + // errors.push(None); + // } + // Err(e) => { + // errors.push(Some(e)); + // } + // } + // } - Ok(()) - } - async fn remove_part_meta(&self, bucket: &str, object: &str, upload_id: &str, data_dir: &str, part_num: usize) -> Result<()> { - let upload_id_path = Self::get_upload_id_dir(bucket, object, upload_id); - let disks = self.disks.read().await; + // Ok(()) + // } + // async fn remove_part_meta(&self, bucket: &str, object: &str, upload_id: &str, data_dir: &str, part_num: usize) -> Result<()> { + // let upload_id_path = Self::get_upload_id_dir(bucket, object, upload_id); + // let disks = self.disks.read().await; - let disks = disks.clone(); - // let disks = Self::shuffle_disks(&disks, &fi.erasure.distribution); + // let disks = disks.clone(); + // // let disks = Self::shuffle_disks(&disks, &fi.erasure.distribution); - let file_path = format!("{}/{}/part.{}.meta", upload_id_path, data_dir, part_num); + // let file_path = format!("{}/{}/part.{}.meta", upload_id_path, data_dir, part_num); - let mut futures = Vec::with_capacity(disks.len()); - let mut errors = Vec::with_capacity(disks.len()); + // let mut futures = Vec::with_capacity(disks.len()); + // let mut errors = Vec::with_capacity(disks.len()); - for disk in disks.iter() { - let file_path = file_path.clone(); - futures.push(async move { - if let Some(disk) = disk { - disk.delete(RUSTFS_META_MULTIPART_BUCKET, &file_path, DeleteOptions::default()) - .await - } else { - Err(Error::new(DiskError::DiskNotFound)) - } - }); - } + // for disk in disks.iter() { + // let file_path = file_path.clone(); + // futures.push(async move { + // if let Some(disk) = disk { + // disk.delete(RUSTFS_META_MULTIPART_BUCKET, &file_path, DeleteOptions::default()) + // .await + // } else { + // Err(Error::new(DiskError::DiskNotFound)) + // } + // }); + // } - let results = join_all(futures).await; - for result in results { - match result { - Ok(_) => { - errors.push(None); - } - Err(e) => { - errors.push(Some(e)); - } - } - } + // let results = join_all(futures).await; + // for result in results { + // match result { + // Ok(_) => { + // errors.push(None); + // } + // Err(e) => { + // errors.push(Some(e)); + // } + // } + // } - Ok(()) - } + // Ok(()) + // } // #[tracing::instrument(skip(self))] pub async fn delete_all(&self, bucket: &str, prefix: &str) -> Result<()> { @@ -2288,7 +2292,7 @@ impl SetDisks { "file({} : {}) part corrupt too much, can not to fix, disks_to_heal_count: {}, parity_blocks: {}", bucket, object, disks_to_heal_count, lastest_meta.erasure.parity_blocks ); - let mut t_errs = vec![None; errs.len()]; + // Allow for dangling deletes, on versions that have DataDir missing etc. // this would end up restoring the correct readable versions. match self @@ -2311,13 +2315,22 @@ impl SetDisks { } else { Error::new(DiskError::FileNotFound) }; + let mut t_errs = Vec::with_capacity(errs.len()); + for _ in 0..errs.len() { + t_errs.push(None); + } return Ok(( self.default_heal_result(m, &t_errs, bucket, object, version_id).await, Some(derr), )); } Err(err) => { - t_errs = vec![Some(err.clone()); errs.len()]; + // t_errs = vec![Some(err.clone()); errs.len()]; + let mut t_errs = Vec::with_capacity(errs.len()); + for _ in 0..errs.len() { + t_errs.push(Some(clone_err(&err))); + } + return Ok(( self.default_heal_result(FileInfo::default(), &t_errs, bucket, object, version_id) .await, @@ -3517,7 +3530,7 @@ impl SetDisks { } if let Some(err) = ret_err.as_ref() { - return Err(err.clone()); + return Err(clone_err(err)); } if !tracker.read().await.queue_buckets.is_empty() { return Err(Error::from_string(format!( @@ -4434,7 +4447,7 @@ impl StorageAPI for SetDisks { } }; - let mut upload_ids = Vec::new(); + let mut upload_ids: Vec = Vec::new(); for disk in disks.iter().flatten() { if !disk.is_online().await { @@ -4847,29 +4860,49 @@ impl StorageAPI for SetDisks { } } + let mut parts = Vec::with_capacity(curr_fi.parts.len()); // TODO: 优化 cleanupMultipartPath for p in curr_fi.parts.iter() { - let _ = self - .remove_part_meta( - bucket, - object, - upload_id, - curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(), - p.number, - ) - .await; + parts.push(path_join_buf(&[ + &upload_id_path, + curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(), + format!("part.{}.meta", p.number).as_str(), + ])); if !fi.parts.iter().any(|v| v.number == p.number) { - let _ = self - .remove_object_part( - bucket, - object, - upload_id, - curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(), - p.number, - ) - .await; + parts.push(path_join_buf(&[ + &upload_id_path, + curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(), + format!("part.{}", p.number).as_str(), + ])); } + + // let _ = self + // .remove_part_meta( + // bucket, + // object, + // upload_id, + // curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(), + // p.number, + // ) + // .await; + + // if !fi.parts.iter().any(|v| v.number == p.number) { + // let _ = self + // .remove_object_part( + // bucket, + // object, + // upload_id, + // curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(), + // p.number, + // ) + // .await; + // } + } + + { + let disks = self.get_disks_internal().await; + Self::cleanup_multipart_path(&disks, &parts).await; } let (online_disks, versions, op_old_dir) = Self::rename_data( @@ -5203,7 +5236,10 @@ async fn disks_with_all_parts( let erasure_distribution_reliable = inconsistent <= parts_metadata.len() / 2; - let mut meta_errs = vec![None; errs.len()]; + let mut meta_errs = Vec::with_capacity(errs.len()); + for _ in 0..errs.len() { + meta_errs.push(None); + } for (index, disk) in online_disks.iter().enumerate() { let disk = if let Some(disk) = disk { @@ -5213,8 +5249,8 @@ async fn disks_with_all_parts( continue; }; - if let Some(err) = errs[index].clone() { - meta_errs[index] = Some(err); + if let Some(err) = &errs[index] { + meta_errs[index] = Some(clone_err(err)); continue; } if !disk.is_online().await { @@ -5370,7 +5406,7 @@ pub fn should_heal_object_on_disk( match err { Some(err) => match err.downcast_ref::() { Some(DiskError::FileNotFound) | Some(DiskError::FileVersionNotFound) | Some(DiskError::FileCorrupt) => { - return (true, Some(err.clone())); + return (true, Some(clone_err(err))); } _ => {} }, @@ -5393,7 +5429,7 @@ pub fn should_heal_object_on_disk( } } } - (false, err.clone()) + (false, err.as_ref().map(clone_err)) } async fn get_disks_info(disks: &[Option], eps: &[Endpoint]) -> Vec { diff --git a/ecstore/src/sets.rs b/ecstore/src/sets.rs index 149c0e62..0a2e716f 100644 --- a/ecstore/src/sets.rs +++ b/ecstore/src/sets.rs @@ -1,14 +1,6 @@ #![allow(clippy::map_entry)] use std::{collections::HashMap, sync::Arc}; -use common::globals::GLOBAL_Local_Node_Name; -use futures::future::join_all; -use http::HeaderMap; -use lock::{namespace_lock::NsLockMap, new_lock_api, LockApi}; -use madmin::heal_commands::{HealDriveInfo, HealResultItem}; -use tokio::sync::RwLock; -use uuid::Uuid; - use crate::{ disk::{ error::{is_unformatted_disk, DiskError}, @@ -16,7 +8,6 @@ use crate::{ new_disk, DiskAPI, DiskInfo, DiskOption, DiskStore, }, endpoints::{Endpoints, PoolEndpoints}, - error::{Error, Result}, global::{is_dist_erasure, GLOBAL_LOCAL_DISK_SET_DRIVES}, heal::heal_commands::{ HealOpts, DRIVE_STATE_CORRUPT, DRIVE_STATE_MISSING, DRIVE_STATE_OFFLINE, DRIVE_STATE_OK, HEAL_ITEM_METADATA, @@ -31,6 +22,14 @@ use crate::{ store_init::{check_format_erasure_values, get_format_erasure_in_quorum, load_format_erasure_all, save_format_file}, utils::{hash, path::path_join_buf}, }; +use common::error::{Error, Result}; +use common::globals::GLOBAL_Local_Node_Name; +use futures::future::join_all; +use http::HeaderMap; +use lock::{namespace_lock::NsLockMap, new_lock_api, LockApi}; +use madmin::heal_commands::{HealDriveInfo, HealResultItem}; +use tokio::sync::RwLock; +use uuid::Uuid; use crate::heal::heal_ops::HealSequence; use tokio::time::Duration; @@ -767,30 +766,50 @@ async fn init_storage_disks_with_errors( opts: &DiskOption, ) -> (Vec>, Vec>) { // Bootstrap disks. - let disks = Arc::new(RwLock::new(vec![None; endpoints.as_ref().len()])); - let errs = Arc::new(RwLock::new(vec![None; endpoints.as_ref().len()])); + // let disks = Arc::new(RwLock::new(vec![None; endpoints.as_ref().len()])); + // let errs = Arc::new(RwLock::new(vec![None; endpoints.as_ref().len()])); let mut futures = Vec::with_capacity(endpoints.as_ref().len()); - for (index, endpoint) in endpoints.as_ref().iter().enumerate() { - let ep = endpoint.clone(); - let opt = opts.clone(); - let disks_clone = disks.clone(); - let errs_clone = errs.clone(); - futures.push(tokio::spawn(async move { - match new_disk(&ep, &opt).await { - Ok(disk) => { - disks_clone.write().await[index] = Some(disk); - errs_clone.write().await[index] = None; - } - Err(err) => { - disks_clone.write().await[index] = None; - errs_clone.write().await[index] = Some(err); - } - } - })); + for endpoint in endpoints.as_ref().iter() { + futures.push(new_disk(endpoint, opts)); + + // let ep = endpoint.clone(); + // let opt = opts.clone(); + // let disks_clone = disks.clone(); + // let errs_clone = errs.clone(); + // futures.push(tokio::spawn(async move { + // match new_disk(&ep, &opt).await { + // Ok(disk) => { + // disks_clone.write().await[index] = Some(disk); + // errs_clone.write().await[index] = None; + // } + // Err(err) => { + // disks_clone.write().await[index] = None; + // errs_clone.write().await[index] = Some(err); + // } + // } + // })); } - let _ = join_all(futures).await; - let disks = disks.read().await.clone(); - let errs = errs.read().await.clone(); + // let _ = join_all(futures).await; + // let disks = disks.read().await.clone(); + // let errs = errs.read().await.clone(); + + let mut disks = Vec::with_capacity(endpoints.as_ref().len()); + let mut errs = Vec::with_capacity(endpoints.as_ref().len()); + + let results = join_all(futures).await; + for result in results { + match result { + Ok(disk) => { + disks.push(Some(disk)); + errs.push(None); + } + Err(err) => { + disks.push(None); + errs.push(Some(err)); + } + } + } + (disks, errs) } diff --git a/ecstore/src/store.rs b/ecstore/src/store.rs index 8a95edef..84c6da01 100644 --- a/ecstore/src/store.rs +++ b/ecstore/src/store.rs @@ -6,6 +6,7 @@ use crate::config::GLOBAL_StorageClass; use crate::config::{self, storageclass, GLOBAL_ConfigSys}; use crate::disk::endpoint::{Endpoint, EndpointType}; use crate::disk::{DiskAPI, DiskInfo, DiskInfoOptions, MetaCacheEntry}; +use crate::error::clone_err; use crate::global::{ is_dist_erasure, is_erasure_sd, set_global_deployment_id, set_object_layer, DISK_ASSUME_UNKNOWN_SIZE, DISK_FILL_FRACTION, DISK_MIN_INODES, DISK_RESERVE_FRACTION, GLOBAL_LOCAL_DISK_MAP, GLOBAL_LOCAL_DISK_SET_DRIVES, @@ -30,7 +31,6 @@ use crate::{ bucket::metadata::BucketMetadata, disk::{error::DiskError, new_disk, DiskOption, DiskStore, BUCKET_META_PREFIX, RUSTFS_META_BUCKET}, endpoints::EndpointServerPools, - error::{Error, Result}, peer::S3PeerSys, sets::Sets, store_api::{ @@ -40,6 +40,7 @@ use crate::{ }, store_init, utils, }; +use common::error::{Error, Result}; use common::globals::{GLOBAL_Local_Node_Name, GLOBAL_Rustfs_Host, GLOBAL_Rustfs_Port}; use futures::future::join_all; use glob::Pattern; @@ -664,7 +665,7 @@ impl ECStore { has_def_pool = true; if !is_err_object_not_found(err) && !is_err_version_not_found(err) { - return Err(err.clone()); + return Err(clone_err(err)); } if pinfo.object_info.delete_marker && !pinfo.object_info.name.is_empty() { @@ -802,7 +803,7 @@ impl ECStore { } let _ = task.await; if let Some(err) = first_err.read().await.as_ref() { - return Err(err.clone()); + return Err(clone_err(err)); } Ok(()) } @@ -932,8 +933,8 @@ impl ECStore { } } - if derrs[0].is_some() { - return Err(derrs[0].as_ref().unwrap().clone()); + if let Some(e) = &derrs[0] { + return Err(clone_err(e)); } Ok(objs[0].as_ref().unwrap().clone()) @@ -1056,13 +1057,23 @@ struct PoolErr { err: Option, } -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default)] pub struct PoolObjInfo { pub index: usize, pub object_info: ObjectInfo, pub err: Option, } +impl Clone for PoolObjInfo { + fn clone(&self) -> Self { + Self { + index: self.index, + object_info: self.object_info.clone(), + err: self.err.as_ref().map(clone_err), + } + } +} + // #[derive(Debug, Default, Clone)] // pub struct ListPathOptions { // pub id: String, @@ -1312,7 +1323,7 @@ impl StorageAPI for ECStore { meta.save().await.map_err(|e| to_object_err(e, vec![bucket]))?; - set_bucket_metadata(bucket.to_string(), meta).await; + set_bucket_metadata(bucket.to_string(), meta).await?; Ok(()) } @@ -2037,45 +2048,59 @@ impl StorageAPI for ECStore { ) -> Result<(HealResultItem, Option)> { info!("ECStore heal_object"); let object = utils::path::encode_dir_object(object); - let errs = Arc::new(RwLock::new(vec![None; self.pools.len()])); - let results = Arc::new(RwLock::new(vec![HealResultItem::default(); self.pools.len()])); - let mut futures = Vec::with_capacity(self.pools.len()); - for (idx, pool) in self.pools.iter().enumerate() { - //TODO: IsSuspended - let object = object.clone(); - let results = results.clone(); - let errs = errs.clone(); - futures.push(async move { - match pool.heal_object(bucket, &object, version_id, opts).await { - Ok((mut result, err)) => { - result.object = utils::path::decode_dir_object(&result.object); - results.write().await.insert(idx, result); - errs.write().await[idx] = err; - } - Err(err) => { - errs.write().await[idx] = Some(err); - } - } - }); - } - let _ = join_all(futures).await; - // Return the first nil error - for (index, err) in errs.read().await.iter().enumerate() { + let mut futures = Vec::with_capacity(self.pools.len()); + for pool in self.pools.iter() { + //TODO: IsSuspended + futures.push(pool.heal_object(bucket, &object, version_id, opts)); + // futures.push(async move { + // match pool.heal_object(bucket, &object, version_id, opts).await { + // Ok((mut result, err)) => { + // result.object = utils::path::decode_dir_object(&result.object); + // results.write().await.insert(idx, result); + // errs.write().await[idx] = err; + // } + // Err(err) => { + // errs.write().await[idx] = Some(err); + // } + // } + // }); + } + let results = join_all(futures).await; + + let mut errs = Vec::with_capacity(self.pools.len()); + let mut ress = Vec::with_capacity(self.pools.len()); + + for res in results.into_iter() { + match res { + Ok((result, err)) => { + let mut result = result; + result.object = utils::path::decode_dir_object(&result.object); + ress.push(result); + errs.push(err); + } + Err(err) => { + errs.push(Some(err)); + ress.push(HealResultItem::default()); + } + } + } + + for (idx, err) in errs.iter().enumerate() { if err.is_none() { - return Ok((results.write().await.remove(index), None)); + return Ok((ress.remove(idx), None)); } } // No pool returned a nil error, return the first non 'not found' error - for (index, err) in errs.read().await.iter().enumerate() { + for (index, err) in errs.iter().enumerate() { match err { Some(err) => match err.downcast_ref::() { Some(DiskError::FileNotFound) | Some(DiskError::FileVersionNotFound) => {} - _ => return Ok((results.write().await.remove(index), Some(err.clone()))), + _ => return Ok((ress.remove(index), Some(clone_err(err)))), }, None => { - return Ok((results.write().await.remove(index), None)); + return Ok((ress.remove(index), None)); } } } @@ -2227,7 +2252,7 @@ impl StorageAPI for ECStore { } if !errs.is_empty() { - return Err(errs[0].clone()); + return Err(clone_err(&errs[0])); } Ok(()) diff --git a/ecstore/src/store_api.rs b/ecstore/src/store_api.rs index 69299616..476fc2d6 100644 --- a/ecstore/src/store_api.rs +++ b/ecstore/src/store_api.rs @@ -1,13 +1,8 @@ use crate::heal::heal_ops::HealSequence; use crate::io::FileReader; use crate::store_utils::clean_metadata; -use crate::{ - disk::DiskStore, - error::{Error, Result}, - heal::heal_commands::HealOpts, - utils::path::decode_dir_object, - xhttp, -}; +use crate::{disk::DiskStore, heal::heal_commands::HealOpts, utils::path::decode_dir_object, xhttp}; +use common::error::{Error, Result}; use http::{HeaderMap, HeaderValue}; use madmin::heal_commands::HealResultItem; use rmp_serde::Serializer; diff --git a/ecstore/src/store_err.rs b/ecstore/src/store_err.rs index 74498c17..b6b3532e 100644 --- a/ecstore/src/store_err.rs +++ b/ecstore/src/store_err.rs @@ -1,8 +1,8 @@ use crate::{ disk::error::{is_err_file_not_found, DiskError}, - error::Error, utils::path::decode_dir_object, }; +use common::error::Error; #[derive(Debug, thiserror::Error, PartialEq, Eq)] pub enum StorageError { diff --git a/ecstore/src/store_init.rs b/ecstore/src/store_init.rs index 00f93d11..2e44db00 100644 --- a/ecstore/src/store_init.rs +++ b/ecstore/src/store_init.rs @@ -7,9 +7,9 @@ use crate::{ new_disk, DiskInfoOptions, DiskOption, DiskStore, FORMAT_CONFIG_FILE, RUSTFS_META_BUCKET, }, endpoints::Endpoints, - error::{Error, Result}, heal::heal_commands::init_healing_tracker, }; +use common::error::{Error, Result}; use futures::future::join_all; use std::{ collections::{hash_map::Entry, HashMap}, diff --git a/ecstore/src/store_list_objects.rs b/ecstore/src/store_list_objects.rs index 58b8a740..4ebd42e4 100644 --- a/ecstore/src/store_list_objects.rs +++ b/ecstore/src/store_list_objects.rs @@ -6,7 +6,7 @@ use crate::disk::{ DiskInfo, DiskStore, MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams, }; -use crate::error::{Error, Result}; +use crate::error::clone_err; use crate::file_meta::merge_file_meta_versions; use crate::peer::is_reserved_or_invalid_bucket; use crate::set_disk::SetDisks; @@ -16,6 +16,7 @@ use crate::store_err::{is_err_bucket_not_found, to_object_err, StorageError}; use crate::utils::path::{self, base_dir_from_prefix, SLASH_SEPARATOR}; use crate::StorageAPI; use crate::{store::ECStore, store_api::ListObjectsV2Info}; +use common::error::{Error, Result}; use futures::future::join_all; use rand::seq::SliceRandom; use rand::thread_rng; @@ -508,7 +509,7 @@ impl ECStore { // cancel channel let (cancel_tx, cancel_rx) = broadcast::channel(1); - let (err_tx, mut err_rx) = broadcast::channel::(1); + let (err_tx, mut err_rx) = broadcast::channel::>(1); let (sender, recv) = mpsc::channel(o.limit as usize); @@ -521,7 +522,7 @@ impl ECStore { opts.stop_disk_at_limit = true; if let Err(err) = store.list_merged(cancel_rx1, opts, sender).await { error!("list_merged err {:?}", err); - let _ = err_tx1.send(err); + let _ = err_tx1.send(Arc::new(err)); } }); @@ -533,7 +534,7 @@ impl ECStore { let job2 = tokio::spawn(async move { if let Err(err) = gather_results(cancel_rx2, opts, recv, result_tx).await { error!("gather_results err {:?}", err); - let _ = err_tx2.send(err); + let _ = err_tx2.send(Arc::new(err)); } }); @@ -545,7 +546,7 @@ impl ECStore { match res{ Ok(o) => { error!("list_path err_rx.recv() ok {:?}", &o); - MetaCacheEntriesSortedResult{ entries: None, err: Some(o) } + MetaCacheEntriesSortedResult{ entries: None, err: Some(clone_err(o.as_ref())) } }, Err(err) => { error!("list_path err_rx.recv() err {:?}", &err); @@ -659,7 +660,7 @@ impl ECStore { continue; } - return Err(err.clone()); + return Err(clone_err(err)); } else { all_at_eof = false; continue; diff --git a/ecstore/src/utils/bool_flag.rs b/ecstore/src/utils/bool_flag.rs index 45f4a4b1..1a042af2 100644 --- a/ecstore/src/utils/bool_flag.rs +++ b/ecstore/src/utils/bool_flag.rs @@ -1,4 +1,4 @@ -use crate::error::{Error, Result}; +use common::error::{Error, Result}; pub fn parse_bool(str: &str) -> Result { match str { diff --git a/ecstore/src/utils/ellipses.rs b/ecstore/src/utils/ellipses.rs index 07c33c1c..f052236a 100644 --- a/ecstore/src/utils/ellipses.rs +++ b/ecstore/src/utils/ellipses.rs @@ -1,4 +1,4 @@ -use crate::error::{Error, Result}; +use common::error::{Error, Result}; use lazy_static::*; use regex::Regex; diff --git a/ecstore/src/utils/mod.rs b/ecstore/src/utils/mod.rs index b45790ab..d5aa9c2f 100644 --- a/ecstore/src/utils/mod.rs +++ b/ecstore/src/utils/mod.rs @@ -1,10 +1,10 @@ use crate::bucket::error::BucketMetadataError; use crate::config::error::ConfigError; use crate::disk::error::DiskError; -use crate::error::Error; use crate::quorum::QuorumError; use crate::store_err::StorageError; use crate::store_init::ErasureError; +use common::error::Error; use protos::proto_gen::node_service::Error as Proto_Error; pub mod bool_flag; diff --git a/ecstore/src/utils/net.rs b/ecstore/src/utils/net.rs index c2de72e1..e892dce3 100644 --- a/ecstore/src/utils/net.rs +++ b/ecstore/src/utils/net.rs @@ -1,4 +1,4 @@ -use crate::error::{Error, Result}; +use common::error::{Error, Result}; use lazy_static::lazy_static; use std::{ collections::HashSet, diff --git a/ecstore/src/utils/os/linux.rs b/ecstore/src/utils/os/linux.rs index 27b73f86..ca64f818 100644 --- a/ecstore/src/utils/os/linux.rs +++ b/ecstore/src/utils/os/linux.rs @@ -4,10 +4,8 @@ use std::fs::File; use std::io::{self, BufRead, Error, ErrorKind}; use std::path::Path; -use crate::{ - disk::Info, - error::{Error as e_Error, Result}, -}; +use crate::disk::Info; +use common::error::{Error as e_Error, Result}; use super::IOStats; diff --git a/ecstore/src/utils/os/unix.rs b/ecstore/src/utils/os/unix.rs index bee85715..d710a770 100644 --- a/ecstore/src/utils/os/unix.rs +++ b/ecstore/src/utils/os/unix.rs @@ -1,5 +1,6 @@ use super::IOStats; -use crate::{disk::Info, error::Result}; +use crate::disk::Info; +use common::error::Result; use nix::sys::{stat::stat, statfs::statfs}; use std::io::{Error, ErrorKind}; use std::path::Path; diff --git a/iam/Cargo.toml b/iam/Cargo.toml index 6c66d8f6..5afc93c5 100644 --- a/iam/Cargo.toml +++ b/iam/Cargo.toml @@ -15,6 +15,7 @@ log.workspace = true time = { workspace = true, features = ["serde-human-readable"] } serde = { workspace = true, features = ["derive", "rc"] } ecstore = { path = "../ecstore" } +policy.workspace = true serde_json.workspace = true async-trait.workspace = true thiserror.workspace = true @@ -31,6 +32,7 @@ tracing.workspace = true madmin.workspace = true lazy_static.workspace = true regex = "1.11.1" +common.workspace = true [dev-dependencies] test-case.workspace = true diff --git a/iam/src/cache.rs b/iam/src/cache.rs index b4394445..f1692116 100644 --- a/iam/src/cache.rs +++ b/iam/src/cache.rs @@ -7,14 +7,13 @@ use std::{ use arc_swap::{ArcSwap, AsRaw, Guard}; use log::warn; +use policy::{ + auth::UserIdentity, + policy::{Args, PolicyDoc}, +}; use time::OffsetDateTime; -use crate::{ - auth::UserIdentity, - policy::PolicyDoc, - store::{GroupInfo, MappedPolicy}, - sys::Args, -}; +use crate::store::{GroupInfo, MappedPolicy}; pub struct Cache { pub policy_docs: ArcSwap>, diff --git a/iam/src/error.rs b/iam/src/error.rs index 4c4ff0a6..41b4f45d 100644 --- a/iam/src/error.rs +++ b/iam/src/error.rs @@ -1,12 +1,14 @@ -use crate::policy; +use ecstore::disk::error::clone_disk_err; +use ecstore::disk::error::DiskError; +use policy::policy::Error as PolicyError; #[derive(thiserror::Error, Debug)] pub enum Error { #[error(transparent)] - PolicyError(#[from] policy::Error), + PolicyError(#[from] PolicyError), #[error("ecsotre error: {0}")] - EcstoreError(ecstore::error::Error), + EcstoreError(common::error::Error), #[error("{0}")] StringError(String), @@ -96,7 +98,7 @@ pub enum Error { // matches!(e, Error::NoSuchUser(_)) // } -pub fn is_err_no_such_policy(err: &ecstore::error::Error) -> bool { +pub fn is_err_no_such_policy(err: &common::error::Error) -> bool { if let Some(e) = err.downcast_ref::() { matches!(e, Error::NoSuchPolicy) } else { @@ -104,7 +106,7 @@ pub fn is_err_no_such_policy(err: &ecstore::error::Error) -> bool { } } -pub fn is_err_no_such_user(err: &ecstore::error::Error) -> bool { +pub fn is_err_no_such_user(err: &common::error::Error) -> bool { if let Some(e) = err.downcast_ref::() { matches!(e, Error::NoSuchUser(_)) } else { @@ -112,7 +114,7 @@ pub fn is_err_no_such_user(err: &ecstore::error::Error) -> bool { } } -pub fn is_err_no_such_account(err: &ecstore::error::Error) -> bool { +pub fn is_err_no_such_account(err: &common::error::Error) -> bool { if let Some(e) = err.downcast_ref::() { matches!(e, Error::NoSuchAccount(_)) } else { @@ -120,7 +122,7 @@ pub fn is_err_no_such_account(err: &ecstore::error::Error) -> bool { } } -pub fn is_err_no_such_temp_account(err: &ecstore::error::Error) -> bool { +pub fn is_err_no_such_temp_account(err: &common::error::Error) -> bool { if let Some(e) = err.downcast_ref::() { matches!(e, Error::NoSuchTempAccount(_)) } else { @@ -128,7 +130,7 @@ pub fn is_err_no_such_temp_account(err: &ecstore::error::Error) -> bool { } } -pub fn is_err_no_such_group(err: &ecstore::error::Error) -> bool { +pub fn is_err_no_such_group(err: &common::error::Error) -> bool { if let Some(e) = err.downcast_ref::() { matches!(e, Error::NoSuchGroup(_)) } else { @@ -136,10 +138,25 @@ pub fn is_err_no_such_group(err: &ecstore::error::Error) -> bool { } } -pub fn is_err_no_such_service_account(err: &ecstore::error::Error) -> bool { +pub fn is_err_no_such_service_account(err: &common::error::Error) -> bool { if let Some(e) = err.downcast_ref::() { matches!(e, Error::NoSuchServiceAccount(_)) } else { false } } + +pub fn clone_err(e: &common::error::Error) -> common::error::Error { + if let Some(e) = e.downcast_ref::() { + clone_disk_err(e) + } else if let Some(e) = e.downcast_ref::() { + if let Some(code) = e.raw_os_error() { + common::error::Error::new(std::io::Error::from_raw_os_error(code)) + } else { + common::error::Error::new(std::io::Error::new(e.kind(), e.to_string())) + } + } else { + //TODO: 优化其他类型 + common::error::Error::msg(e.to_string()) + } +} diff --git a/iam/src/handler.rs b/iam/src/handler.rs deleted file mode 100644 index 1166c687..00000000 --- a/iam/src/handler.rs +++ /dev/null @@ -1,154 +0,0 @@ -// use std::{borrow::Cow, collections::HashMap}; - -// use log::{info, warn}; - -// use crate::{ -// arn::ARN, -// auth::UserIdentity, -// cache::CacheInner, -// policy::{utils::get_values_from_claims, Args, Policy}, -// store::Store, -// Error, -// }; - -// pub(crate) struct Handler<'m, T> { -// cache: CacheInner, -// api: &'m T, -// roles: &'m HashMap>, -// } - -// impl<'m, T> Handler<'m, T> { -// pub fn new(cache: CacheInner, api: &'m T, roles: &'m HashMap>) -> Self { -// Self { cache, api, roles } -// } -// } - -// impl<'m, T> Handler<'m, T> -// where -// T: Store, -// { -// #[inline] -// fn get_user<'a>(&self, user_name: &'a str) -> Option<&UserIdentity> { -// self.cache -// .users -// .get(user_name) -// .or_else(|| self.cache.sts_accounts.get(user_name)) -// } - -// async fn get_policy(&self, name: &str, _groups: &[String]) -> crate::Result> { -// if name.is_empty() { -// return Err(Error::InvalidArgument); -// } - -// todo!() -// // self.api.policy_db_get(name, groups) -// } - -// /// 如果是临时用户,返回Ok(Some(partent_name))) -// /// 如果不是临时用户,返回Ok(None) -// fn is_temp_user<'a>(&self, user_name: &'a str) -> crate::Result> { -// let user = self -// .get_user(user_name) -// .ok_or_else(|| Error::NoSuchUser(user_name.to_owned()))?; - -// if user.credentials.is_temp() { -// Ok(Some(&user.credentials.parent_user)) -// } else { -// Ok(None) -// } -// } - -// /// 如果是临时用户,返回Ok(Some(partent_name))) -// /// 如果不是临时用户,返回Ok(None) -// fn is_service_account<'a>(&self, user_name: &'a str) -> crate::Result> { -// let user = self -// .get_user(user_name) -// .ok_or_else(|| Error::NoSuchUser(user_name.to_owned()))?; - -// if user.credentials.is_service_account() { -// Ok(Some(&user.credentials.parent_user)) -// } else { -// Ok(None) -// } -// } - -// // todo -// pub fn is_allowed_sts(&self, args: &Args, parent: &str) -> bool { -// warn!("unimplement is_allowed_sts"); -// false -// } - -// // todo -// pub async fn is_allowed_service_account<'a>(&self, args: &Args<'a>, parent: &str) -> bool { -// let Some(p) = args.claims.get(parent) else { -// return false; -// }; - -// if let Some(parent_in_chaim) = p.as_str() { -// if parent_in_chaim != parent { -// return false; -// } -// } else { -// return false; -// } - -// let is_owner_derived = parent == "rustfsadmin"; // todo ,使用全局变量 -// let role_arn = args.get_role_arn(); -// let mut svc_policies = None; - -// if is_owner_derived { -// } else if let Some(x) = role_arn { -// let Ok(arn) = x.parse::() else { -// info!("error parsing role ARN {x}"); -// return false; -// }; - -// svc_policies = self.roles.get(&arn).map(|x| Cow::from(x)); -// } else { -// let Ok(mut p) = self.get_policy(parent, &args.groups[..]).await else { return false }; -// if p.is_empty() { -// // todo iamPolicyClaimNameOpenID -// let (p1, _) = get_values_from_claims(&args.claims, ""); -// p = p1; -// } -// svc_policies = Some(Cow::Owned(p)); -// } - -// if is_owner_derived && svc_policies.as_ref().map(|x| x.as_ref().len()).unwrap_or_default() == 0 { -// return false; -// } - -// false -// } - -// pub async fn get_combined_policy(&self, _policies: &[String]) -> Policy { -// todo!() -// } - -// pub async fn is_allowed<'a>(&self, args: Args<'a>) -> bool { -// if args.is_owner { -// return true; -// } - -// match self.is_temp_user(&args.account) { -// Ok(Some(parent)) => return self.is_allowed_sts(&args, parent), -// Err(_) => return false, -// _ => {} -// } - -// match self.is_service_account(&args.account) { -// Ok(Some(parent)) => return self.is_allowed_service_account(&args, parent).await, -// Err(_) => return false, -// _ => {} -// } - -// let Ok(policies) = self.get_policy(&args.account, &args.groups).await else { return false }; - -// if policies.is_empty() { -// return false; -// } - -// let policy = self.get_combined_policy(&policies[..]).await; -// policy.is_allowed(&args) -// } -// } diff --git a/iam/src/lib.rs b/iam/src/lib.rs index 364627a2..5d3ca626 100644 --- a/iam/src/lib.rs +++ b/iam/src/lib.rs @@ -1,23 +1,16 @@ -use auth::Credentials; -use ecstore::error::{Error, Result}; +use common::error::{Error, Result}; use ecstore::store::ECStore; use error::Error as IamError; use manager::IamCache; +use policy::auth::Credentials; use std::sync::{Arc, OnceLock}; use store::object::ObjectStore; use sys::IamSys; use tracing::{debug, instrument}; pub mod cache; -mod format; -mod handler; - -pub mod arn; -pub mod auth; pub mod error; pub mod manager; -pub mod policy; -pub mod service_type; pub mod store; pub mod utils; diff --git a/iam/src/manager.rs b/iam/src/manager.rs index d257b49e..f4fc076c 100644 --- a/iam/src/manager.rs +++ b/iam/src/manager.rs @@ -1,24 +1,26 @@ use crate::{ - arn::ARN, - auth::{self, get_claims_from_token_with_secret, is_secret_key_valid, jwt_sign, Credentials, UserIdentity}, cache::{Cache, CacheEntity}, error::{is_err_no_such_group, is_err_no_such_policy, is_err_no_such_user, Error as IamError}, - format::Format, get_global_action_cred, - policy::{Policy, PolicyDoc, DEFAULT_POLICIES}, store::{object::IAM_CONFIG_PREFIX, GroupInfo, MappedPolicy, Store, UserType}, sys::{ - iam_policy_claim_name_sa, UpdateServiceAccountOpts, EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, - MAX_SVCSESSION_POLICY_SIZE, SESSION_POLICY_NAME, SESSION_POLICY_NAME_EXTRACTED, STATUS_DISABLED, STATUS_ENABLED, + UpdateServiceAccountOpts, MAX_SVCSESSION_POLICY_SIZE, SESSION_POLICY_NAME, SESSION_POLICY_NAME_EXTRACTED, + STATUS_DISABLED, STATUS_ENABLED, }, }; +use common::error::{Error, Result}; +use ecstore::config::error::is_err_config_not_found; use ecstore::utils::{crypto::base64_encode, path::path_join_buf}; -use ecstore::{ - config::error::is_err_config_not_found, - error::{Error, Result}, -}; use log::{debug, warn}; use madmin::{AccountStatus, AddOrUpdateUserReq, GroupDesc}; +use policy::{ + arn::ARN, + auth::{self, get_claims_from_token_with_secret, is_secret_key_valid, jwt_sign, Credentials, UserIdentity}, + format::Format, + policy::{ + default::DEFAULT_POLICIES, iam_policy_claim_name_sa, Policy, PolicyDoc, EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, + }, +}; use serde::{Deserialize, Serialize}; use serde_json::Value; use std::{ @@ -488,7 +490,6 @@ where if !is_secret_key_valid(&secret) { return Err(IamError::InvalidSecretKeyLength.into()); } - cr.secret_key = secret; } diff --git a/iam/src/store.rs b/iam/src/store.rs index 946a8802..633ff250 100644 --- a/iam/src/store.rs +++ b/iam/src/store.rs @@ -1,7 +1,8 @@ pub mod object; -use crate::{auth::UserIdentity, cache::Cache, policy::PolicyDoc}; -use ecstore::error::Result; +use crate::cache::Cache; +use common::error::Result; +use policy::{auth::UserIdentity, policy::PolicyDoc}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use time::OffsetDateTime; diff --git a/iam/src/store/object.rs b/iam/src/store/object.rs index 40c9e97a..20ab0128 100644 --- a/iam/src/store/object.rs +++ b/iam/src/store/object.rs @@ -1,19 +1,17 @@ use super::{GroupInfo, MappedPolicy, Store, UserType}; use crate::{ - auth::UserIdentity, cache::{Cache, CacheEntity}, error::{is_err_no_such_policy, is_err_no_such_user}, get_global_action_cred, manager::{extract_jwt_claims, get_default_policyes}, - policy::PolicyDoc, }; +use common::error::{Error, Result}; use ecstore::{ config::{ - common::{delete_config, read_config, read_config_with_metadata, save_config}, + com::{delete_config, read_config, read_config_with_metadata, save_config}, error::is_err_config_not_found, RUSTFS_CONFIG_PREFIX, }, - error::{Error, Result}, store::ECStore, store_api::{ObjectInfo, ObjectOptions}, store_list_objects::{ObjectInfoOrErr, WalkOptions}, @@ -21,6 +19,7 @@ use ecstore::{ }; use futures::future::join_all; use lazy_static::lazy_static; +use policy::{auth::UserIdentity, policy::PolicyDoc}; use serde::{de::DeserializeOwned, Serialize}; use std::{collections::HashMap, sync::Arc}; use tokio::sync::broadcast::{self, Receiver as B_Receiver}; diff --git a/iam/src/sys.rs b/iam/src/sys.rs index c22a9d56..ee2a26b6 100644 --- a/iam/src/sys.rs +++ b/iam/src/sys.rs @@ -1,16 +1,3 @@ -use std::collections::HashMap; -use std::collections::HashSet; -use std::sync::Arc; - -use crate::arn::ARN; -use crate::auth::contains_reserved_chars; -use crate::auth::create_new_credentials_with_metadata; -use crate::auth::generate_credentials; -use crate::auth::is_access_key_valid; -use crate::auth::is_secret_key_valid; -use crate::auth::Credentials; -use crate::auth::UserIdentity; -use crate::auth::ACCOUNT_ON; use crate::error::is_err_no_such_account; use crate::error::is_err_no_such_temp_account; use crate::error::Error as IamError; @@ -18,19 +5,33 @@ use crate::get_global_action_cred; use crate::manager::extract_jwt_claims; use crate::manager::get_default_policyes; use crate::manager::IamCache; -use crate::policy::action::Action; -use crate::policy::Policy; -use crate::policy::PolicyDoc; use crate::store::MappedPolicy; use crate::store::Store; use crate::store::UserType; -use ecstore::error::{Error, Result}; +use common::error::{Error, Result}; use ecstore::utils::crypto::base64_decode; use ecstore::utils::crypto::base64_encode; use madmin::AddOrUpdateUserReq; use madmin::GroupDesc; +use policy::arn::ARN; +use policy::auth::contains_reserved_chars; +use policy::auth::create_new_credentials_with_metadata; +use policy::auth::generate_credentials; +use policy::auth::is_access_key_valid; +use policy::auth::is_secret_key_valid; +use policy::auth::Credentials; +use policy::auth::UserIdentity; +use policy::auth::ACCOUNT_ON; +use policy::policy::iam_policy_claim_name_sa; +use policy::policy::Args; +use policy::policy::Policy; +use policy::policy::PolicyDoc; +use policy::policy::EMBEDDED_POLICY_TYPE; +use policy::policy::INHERITED_POLICY_TYPE; use serde_json::json; use serde_json::Value; +use std::collections::HashMap; +use std::sync::Arc; use time::OffsetDateTime; pub const MAX_SVCSESSION_POLICY_SIZE: usize = 4096; @@ -42,9 +43,6 @@ pub const POLICYNAME: &str = "policy"; pub const SESSION_POLICY_NAME: &str = "sessionPolicy"; pub const SESSION_POLICY_NAME_EXTRACTED: &str = "sessionPolicy-extracted"; -pub const EMBEDDED_POLICY_TYPE: &str = "embedded-policy"; -pub const INHERITED_POLICY_TYPE: &str = "inherited-policy"; - pub struct IamSys { store: Arc>, roles_map: HashMap, @@ -697,73 +695,3 @@ pub struct UpdateServiceAccountOpts { pub expiration: Option, pub status: Option, } - -pub fn iam_policy_claim_name_sa() -> String { - "sa-policy".to_string() -} - -/// DEFAULT_VERSION is the default version. -/// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html -pub const DEFAULT_VERSION: &str = "2012-10-17"; - -/// check the data is Validator -pub trait Validator { - type Error; - fn is_valid(&self) -> Result<()> { - Ok(()) - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Args<'a> { - pub account: &'a str, - pub groups: &'a Option>, - pub action: Action, - pub bucket: &'a str, - pub conditions: &'a HashMap>, - pub is_owner: bool, - pub object: &'a str, - pub claims: &'a HashMap, - pub deny_only: bool, -} - -impl Args<'_> { - pub fn get_role_arn(&self) -> Option<&str> { - self.claims.get("roleArn").and_then(|x| x.as_str()) - } - pub fn get_policies(&self, policy_claim_name: &str) -> (HashSet, bool) { - get_policies_from_claims(self.claims, policy_claim_name) - } -} - -fn get_values_from_claims(claims: &HashMap, claim_name: &str) -> (HashSet, bool) { - let mut s = HashSet::new(); - if let Some(pname) = claims.get(claim_name) { - if let Some(pnames) = pname.as_array() { - for pname in pnames { - if let Some(pname_str) = pname.as_str() { - for pname in pname_str.split(',') { - let pname = pname.trim(); - if !pname.is_empty() { - s.insert(pname.to_string()); - } - } - } - } - return (s, true); - } else if let Some(pname_str) = pname.as_str() { - for pname in pname_str.split(',') { - let pname = pname.trim(); - if !pname.is_empty() { - s.insert(pname.to_string()); - } - } - return (s, true); - } - } - (s, false) -} - -fn get_policies_from_claims(claims: &HashMap, policy_claim_name: &str) -> (HashSet, bool) { - get_values_from_claims(claims, policy_claim_name) -} diff --git a/iam/src/utils.rs b/iam/src/utils.rs index 2d301bb9..80447d8a 100644 --- a/iam/src/utils.rs +++ b/iam/src/utils.rs @@ -1,4 +1,4 @@ -use ecstore::error::{Error, Result}; +use common::error::{Error, Result}; use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey, Header}; use rand::{Rng, RngCore}; use serde::{de::DeserializeOwned, Serialize}; diff --git a/policy/Cargo.toml b/policy/Cargo.toml new file mode 100644 index 00000000..da2c4636 --- /dev/null +++ b/policy/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "policy" +edition.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[lints] +workspace = true + +[dependencies] +tokio.workspace = true +log.workspace = true +time = { workspace = true, features = ["serde-human-readable"] } +serde = { workspace = true, features = ["derive", "rc"] } +serde_json.workspace = true +async-trait.workspace = true +thiserror.workspace = true +strum = { version = "0.27.1", features = ["derive"] } +arc-swap = "1.7.1" +crypto = { path = "../crypto" } +ipnetwork = { version = "0.21.1", features = ["serde"] } +itertools = "0.14.0" +futures.workspace = true +rand.workspace = true +base64-simd = "0.8.0" +jsonwebtoken = "9.3.0" +tracing.workspace = true +madmin.workspace = true +lazy_static.workspace = true +regex = "1.11.1" +common.workspace = true + +[dev-dependencies] +test-case.workspace = true diff --git a/iam/src/arn.rs b/policy/src/arn.rs similarity index 98% rename from iam/src/arn.rs rename to policy/src/arn.rs index c633fadc..2337208a 100644 --- a/iam/src/arn.rs +++ b/policy/src/arn.rs @@ -1,4 +1,4 @@ -use ecstore::error::{Error, Result}; +use common::error::{Error, Result}; use regex::Regex; const ARN_PREFIX_ARN: &str = "arn"; diff --git a/iam/src/auth.rs b/policy/src/auth.rs similarity index 100% rename from iam/src/auth.rs rename to policy/src/auth.rs diff --git a/iam/src/auth/credentials.rs b/policy/src/auth/credentials.rs similarity index 99% rename from iam/src/auth/credentials.rs rename to policy/src/auth/credentials.rs index 905c1771..9ce0c73e 100644 --- a/iam/src/auth/credentials.rs +++ b/policy/src/auth/credentials.rs @@ -1,9 +1,8 @@ use crate::error::Error as IamError; -use crate::policy::Policy; -use crate::sys::{iam_policy_claim_name_sa, Validator, INHERITED_POLICY_TYPE}; +use crate::policy::{iam_policy_claim_name_sa, Policy, Validator, INHERITED_POLICY_TYPE}; use crate::utils; use crate::utils::extract_claims; -use ecstore::error::{Error, Result}; +use common::error::{Error, Result}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; diff --git a/policy/src/error.rs b/policy/src/error.rs new file mode 100644 index 00000000..90c1f2c5 --- /dev/null +++ b/policy/src/error.rs @@ -0,0 +1,145 @@ +use crate::policy; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + PolicyError(#[from] policy::Error), + + #[error("ecsotre error: {0}")] + EcstoreError(common::error::Error), + + #[error("{0}")] + StringError(String), + + #[error("crypto: {0}")] + CryptoError(#[from] crypto::Error), + + #[error("user '{0}' does not exist")] + NoSuchUser(String), + + #[error("account '{0}' does not exist")] + NoSuchAccount(String), + + #[error("service account '{0}' does not exist")] + NoSuchServiceAccount(String), + + #[error("temp account '{0}' does not exist")] + NoSuchTempAccount(String), + + #[error("group '{0}' does not exist")] + NoSuchGroup(String), + + #[error("policy does not exist")] + NoSuchPolicy, + + #[error("policy in use")] + PolicyInUse, + + #[error("group not empty")] + GroupNotEmpty, + + #[error("invalid arguments specified")] + InvalidArgument, + + #[error("not initialized")] + IamSysNotInitialized, + + #[error("invalid service type: {0}")] + InvalidServiceType(String), + + #[error("malformed credential")] + ErrCredMalformed, + + #[error("CredNotInitialized")] + CredNotInitialized, + + #[error("invalid access key length")] + InvalidAccessKeyLength, + + #[error("invalid secret key length")] + InvalidSecretKeyLength, + + #[error("access key contains reserved characters =,")] + ContainsReservedChars, + + #[error("group name contains reserved characters =,")] + GroupNameContainsReservedChars, + + #[error("jwt err {0}")] + JWTError(jsonwebtoken::errors::Error), + + #[error("no access key")] + NoAccessKey, + + #[error("invalid token")] + InvalidToken, + + #[error("invalid access_key")] + InvalidAccessKey, + #[error("action not allowed")] + IAMActionNotAllowed, + + #[error("invalid expiration")] + InvalidExpiration, + + #[error("no secret key with access key")] + NoSecretKeyWithAccessKey, + + #[error("no access key with secret key")] + NoAccessKeyWithSecretKey, + + #[error("policy too large")] + PolicyTooLarge, +} + +// pub fn is_err_no_such_user(e: &Error) -> bool { +// matches!(e, Error::NoSuchUser(_)) +// } + +pub fn is_err_no_such_policy(err: &common::error::Error) -> bool { + if let Some(e) = err.downcast_ref::() { + matches!(e, Error::NoSuchPolicy) + } else { + false + } +} + +pub fn is_err_no_such_user(err: &common::error::Error) -> bool { + if let Some(e) = err.downcast_ref::() { + matches!(e, Error::NoSuchUser(_)) + } else { + false + } +} + +pub fn is_err_no_such_account(err: &common::error::Error) -> bool { + if let Some(e) = err.downcast_ref::() { + matches!(e, Error::NoSuchAccount(_)) + } else { + false + } +} + +pub fn is_err_no_such_temp_account(err: &common::error::Error) -> bool { + if let Some(e) = err.downcast_ref::() { + matches!(e, Error::NoSuchTempAccount(_)) + } else { + false + } +} + +pub fn is_err_no_such_group(err: &common::error::Error) -> bool { + if let Some(e) = err.downcast_ref::() { + matches!(e, Error::NoSuchGroup(_)) + } else { + false + } +} + +pub fn is_err_no_such_service_account(err: &common::error::Error) -> bool { + if let Some(e) = err.downcast_ref::() { + matches!(e, Error::NoSuchServiceAccount(_)) + } else { + false + } +} diff --git a/iam/src/format.rs b/policy/src/format.rs similarity index 100% rename from iam/src/format.rs rename to policy/src/format.rs diff --git a/policy/src/lib.rs b/policy/src/lib.rs new file mode 100644 index 00000000..270aaa31 --- /dev/null +++ b/policy/src/lib.rs @@ -0,0 +1,7 @@ +pub mod arn; +pub mod auth; +pub mod error; +pub mod format; +pub mod policy; +pub mod service_type; +pub mod utils; diff --git a/iam/src/policy.rs b/policy/src/policy.rs similarity index 84% rename from iam/src/policy.rs rename to policy/src/policy.rs index e25dca90..3685fca7 100644 --- a/iam/src/policy.rs +++ b/policy/src/policy.rs @@ -5,6 +5,7 @@ mod function; mod id; #[allow(clippy::module_inception)] mod policy; +mod principal; pub mod resource; pub mod statement; pub(crate) mod utils; @@ -15,11 +16,14 @@ pub use doc::PolicyDoc; pub use effect::Effect; pub use function::Functions; pub use id::ID; -pub use policy::{default::DEFAULT_POLICIES, Policy}; +pub use policy::*; +pub use principal::Principal; pub use resource::ResourceSet; - pub use statement::Statement; +pub const EMBEDDED_POLICY_TYPE: &str = "embedded-policy"; +pub const INHERITED_POLICY_TYPE: &str = "inherited-policy"; + #[derive(thiserror::Error, Debug)] #[cfg_attr(test, derive(Eq, PartialEq))] pub enum Error { diff --git a/iam/src/policy/action.rs b/policy/src/policy/action.rs similarity index 97% rename from iam/src/policy/action.rs rename to policy/src/policy/action.rs index 03204aae..e152e89b 100644 --- a/iam/src/policy/action.rs +++ b/policy/src/policy/action.rs @@ -1,11 +1,9 @@ -use ecstore::error::{Error, Result}; +use common::error::{Error, Result}; use serde::{Deserialize, Serialize}; use std::{collections::HashSet, ops::Deref}; use strum::{EnumString, IntoStaticStr}; -use crate::sys::Validator; - -use super::{utils::wildcard, Error as IamError}; +use super::{utils::wildcard, Error as IamError, Validator}; #[derive(Serialize, Deserialize, Clone, Default, Debug)] pub struct ActionSet(pub HashSet); @@ -49,7 +47,7 @@ impl PartialEq for ActionSet { } } -#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, Debug)] +#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, Debug, Copy)] #[serde(try_from = "&str", untagged)] pub enum Action { S3Action(S3Action), @@ -107,7 +105,7 @@ impl TryFrom<&str> for Action { } } -#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug)] +#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug, Copy)] #[cfg_attr(test, derive(Default))] #[serde(try_from = "&str", into = "&str")] pub enum S3Action { @@ -234,7 +232,7 @@ pub enum S3Action { PutObjectFanOutAction, } -#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug)] +#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug, Copy)] #[serde(try_from = "&str", into = "&str")] pub enum AdminAction { #[strum(serialize = "admin:*")] @@ -265,11 +263,11 @@ pub enum AdminAction { CreateServiceAccountAdminAction, } -#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug)] +#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug, Copy)] #[serde(try_from = "&str", into = "&str")] pub enum StsAction {} -#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug)] +#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug, Copy)] #[serde(try_from = "&str", into = "&str")] pub enum KmsAction { #[strum(serialize = "kms:*")] diff --git a/iam/src/policy/doc.rs b/policy/src/policy/doc.rs similarity index 100% rename from iam/src/policy/doc.rs rename to policy/src/policy/doc.rs diff --git a/iam/src/policy/effect.rs b/policy/src/policy/effect.rs similarity index 90% rename from iam/src/policy/effect.rs rename to policy/src/policy/effect.rs index 67cc38c3..04e6c8a2 100644 --- a/iam/src/policy/effect.rs +++ b/policy/src/policy/effect.rs @@ -1,8 +1,8 @@ -use ecstore::error::{Error, Result}; +use common::error::{Error, Result}; use serde::{Deserialize, Serialize}; use strum::{EnumString, IntoStaticStr}; -use crate::sys::Validator; +use super::Validator; #[derive(Serialize, Clone, Deserialize, EnumString, IntoStaticStr, Default, Debug, PartialEq)] #[serde(try_from = "&str", into = "&str")] diff --git a/iam/src/policy/function.rs b/policy/src/policy/function.rs similarity index 100% rename from iam/src/policy/function.rs rename to policy/src/policy/function.rs diff --git a/iam/src/policy/function/addr.rs b/policy/src/policy/function/addr.rs similarity index 100% rename from iam/src/policy/function/addr.rs rename to policy/src/policy/function/addr.rs diff --git a/iam/src/policy/function/binary.rs b/policy/src/policy/function/binary.rs similarity index 100% rename from iam/src/policy/function/binary.rs rename to policy/src/policy/function/binary.rs diff --git a/iam/src/policy/function/bool_null.rs b/policy/src/policy/function/bool_null.rs similarity index 100% rename from iam/src/policy/function/bool_null.rs rename to policy/src/policy/function/bool_null.rs diff --git a/iam/src/policy/function/condition.rs b/policy/src/policy/function/condition.rs similarity index 100% rename from iam/src/policy/function/condition.rs rename to policy/src/policy/function/condition.rs diff --git a/iam/src/policy/function/date.rs b/policy/src/policy/function/date.rs similarity index 100% rename from iam/src/policy/function/date.rs rename to policy/src/policy/function/date.rs diff --git a/iam/src/policy/function/func.rs b/policy/src/policy/function/func.rs similarity index 100% rename from iam/src/policy/function/func.rs rename to policy/src/policy/function/func.rs diff --git a/iam/src/policy/function/key.rs b/policy/src/policy/function/key.rs similarity index 97% rename from iam/src/policy/function/key.rs rename to policy/src/policy/function/key.rs index 73e0ae18..f4cde509 100644 --- a/iam/src/policy/function/key.rs +++ b/policy/src/policy/function/key.rs @@ -1,6 +1,6 @@ use super::key_name::KeyName; -use crate::{policy::Error as PolicyError, sys::Validator}; -use ecstore::error::Error; +use crate::policy::{Error as PolicyError, Validator}; +use common::error::Error; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] diff --git a/iam/src/policy/function/key_name.rs b/policy/src/policy/function/key_name.rs similarity index 100% rename from iam/src/policy/function/key_name.rs rename to policy/src/policy/function/key_name.rs diff --git a/iam/src/policy/function/number.rs b/policy/src/policy/function/number.rs similarity index 100% rename from iam/src/policy/function/number.rs rename to policy/src/policy/function/number.rs diff --git a/iam/src/policy/function/string.rs b/policy/src/policy/function/string.rs similarity index 100% rename from iam/src/policy/function/string.rs rename to policy/src/policy/function/string.rs diff --git a/iam/src/policy/id.rs b/policy/src/policy/id.rs similarity index 89% rename from iam/src/policy/id.rs rename to policy/src/policy/id.rs index d31f0dfd..2f314ab4 100644 --- a/iam/src/policy/id.rs +++ b/policy/src/policy/id.rs @@ -1,8 +1,8 @@ -use ecstore::error::{Error, Result}; +use common::error::{Error, Result}; use serde::{Deserialize, Serialize}; use std::ops::Deref; -use crate::sys::Validator; +use super::Validator; #[derive(Serialize, Deserialize, Clone, Default, Debug)] pub struct ID(pub String); diff --git a/iam/src/policy/policy.rs b/policy/src/policy/policy.rs similarity index 76% rename from iam/src/policy/policy.rs rename to policy/src/policy/policy.rs index bec59983..f1de3790 100644 --- a/iam/src/policy/policy.rs +++ b/policy/src/policy/policy.rs @@ -1,8 +1,42 @@ -use super::{Effect, Error as IamError, Statement, ID}; -use crate::sys::{Args, Validator, DEFAULT_VERSION}; -use ecstore::error::{Error, Result}; +use super::{action::Action, statement::BPStatement, Effect, Error as IamError, Statement, ID}; +use common::error::{Error, Result}; use serde::{Deserialize, Serialize}; -use std::collections::HashSet; +use serde_json::Value; +use std::collections::{HashMap, HashSet}; + +/// DEFAULT_VERSION is the default version. +/// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html +pub const DEFAULT_VERSION: &str = "2012-10-17"; + +/// check the data is Validator +pub trait Validator { + type Error; + fn is_valid(&self) -> Result<()> { + Ok(()) + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Args<'a> { + pub account: &'a str, + pub groups: &'a Option>, + pub action: Action, + pub bucket: &'a str, + pub conditions: &'a HashMap>, + pub is_owner: bool, + pub object: &'a str, + pub claims: &'a HashMap, + pub deny_only: bool, +} + +impl Args<'_> { + pub fn get_role_arn(&self) -> Option<&str> { + self.claims.get("roleArn").and_then(|x| x.as_str()) + } + pub fn get_policies(&self, policy_claim_name: &str) -> (HashSet, bool) { + get_policies_from_claims(self.claims, policy_claim_name) + } +} #[derive(Serialize, Deserialize, Clone, Default, Debug)] pub struct Policy { @@ -119,16 +153,108 @@ impl Validator for Policy { } } +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BucketPolicyArgs<'a> { + pub account: &'a str, + pub groups: &'a Option>, + pub action: Action, + pub bucket: &'a str, + pub conditions: &'a HashMap>, + pub is_owner: bool, + pub object: &'a str, +} + +#[derive(Serialize, Deserialize, Clone, Default, Debug)] +pub struct BucketPolicy { + #[serde(default, rename = "ID")] + pub id: ID, + #[serde(rename = "Version")] + pub version: String, + #[serde(rename = "Statement")] + pub statements: Vec, +} + +impl BucketPolicy { + pub fn is_allowed(&self, args: &BucketPolicyArgs) -> bool { + for statement in self.statements.iter().filter(|s| matches!(s.effect, Effect::Deny)) { + if !statement.is_allowed(args) { + return false; + } + } + + if args.is_owner { + return true; + } + + for statement in self.statements.iter().filter(|s| matches!(s.effect, Effect::Allow)) { + if statement.is_allowed(args) { + return true; + } + } + + false + } +} + +impl Validator for BucketPolicy { + type Error = Error; + + fn is_valid(&self) -> Result<()> { + if !self.id.is_empty() && !self.id.eq(DEFAULT_VERSION) { + return Err(IamError::InvalidVersion(self.id.0.clone()).into()); + } + + for statement in self.statements.iter() { + statement.is_valid()?; + } + + Ok(()) + } +} + +fn get_values_from_claims(claims: &HashMap, claim_name: &str) -> (HashSet, bool) { + let mut s = HashSet::new(); + if let Some(pname) = claims.get(claim_name) { + if let Some(pnames) = pname.as_array() { + for pname in pnames { + if let Some(pname_str) = pname.as_str() { + for pname in pname_str.split(',') { + let pname = pname.trim(); + if !pname.is_empty() { + s.insert(pname.to_string()); + } + } + } + } + return (s, true); + } else if let Some(pname_str) = pname.as_str() { + for pname in pname_str.split(',') { + let pname = pname.trim(); + if !pname.is_empty() { + s.insert(pname.to_string()); + } + } + return (s, true); + } + } + (s, false) +} + +fn get_policies_from_claims(claims: &HashMap, policy_claim_name: &str) -> (HashSet, bool) { + get_values_from_claims(claims, policy_claim_name) +} + +pub fn iam_policy_claim_name_sa() -> String { + "sa-policy".to_string() +} + pub mod default { use std::{collections::HashSet, sync::LazyLock}; - use crate::{ - policy::{ - action::{Action, AdminAction, KmsAction, S3Action}, - resource::Resource, - ActionSet, Effect, Functions, ResourceSet, Statement, - }, - sys::DEFAULT_VERSION, + use crate::policy::{ + action::{Action, AdminAction, KmsAction, S3Action}, + resource::Resource, + ActionSet, Effect, Functions, ResourceSet, Statement, DEFAULT_VERSION, }; use super::Policy; @@ -323,7 +449,7 @@ pub mod default { #[cfg(test)] mod test { use super::*; - use ecstore::error::Result; + use common::error::Result; #[tokio::test] async fn test_parse_policy() -> Result<()> { diff --git a/ecstore/src/bucket/policy/principal.rs b/policy/src/policy/principal.rs similarity index 56% rename from ecstore/src/bucket/policy/principal.rs rename to policy/src/policy/principal.rs index cff14c7b..bf8087c3 100644 --- a/ecstore/src/bucket/policy/principal.rs +++ b/policy/src/policy/principal.rs @@ -1,8 +1,8 @@ +use super::{utils::wildcard, Validator}; +use common::error::{Error, Result}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; -use crate::utils; - #[derive(Debug, Clone, Deserialize, Serialize, Default, PartialEq, Eq)] #[serde(rename_all = "PascalCase", default)] pub struct Principal { @@ -11,15 +11,22 @@ pub struct Principal { } impl Principal { - pub fn is_valid(&self) -> bool { - !self.aws.is_empty() - } pub fn is_match(&self, parincipal: &str) -> bool { for pattern in self.aws.iter() { - if utils::wildcard::match_simple(pattern, parincipal) { + if wildcard::is_simple_match(pattern, parincipal) { return true; } } false } } + +impl Validator for Principal { + type Error = Error; + fn is_valid(&self) -> Result<()> { + if self.aws.is_empty() { + return Err(Error::msg("Principal is empty")); + } + Ok(()) + } +} diff --git a/iam/src/policy/resource.rs b/policy/src/policy/resource.rs similarity index 98% rename from iam/src/policy/resource.rs rename to policy/src/policy/resource.rs index 9c33e8d5..9592590a 100644 --- a/iam/src/policy/resource.rs +++ b/policy/src/policy/resource.rs @@ -1,4 +1,4 @@ -use ecstore::error::{Error, Result}; +use common::error::{Error, Result}; use serde::{Deserialize, Serialize}; use std::{ collections::{HashMap, HashSet}, @@ -6,12 +6,10 @@ use std::{ ops::Deref, }; -use crate::sys::Validator; - use super::{ function::key_name::KeyName, utils::{path, wildcard}, - Error as IamError, + Error as IamError, Validator, }; #[derive(Serialize, Deserialize, Clone, Default, Debug)] diff --git a/iam/src/policy/statement.rs b/policy/src/policy/statement.rs similarity index 54% rename from iam/src/policy/statement.rs rename to policy/src/policy/statement.rs index 142d19fd..9b1db671 100644 --- a/iam/src/policy/statement.rs +++ b/policy/src/policy/statement.rs @@ -1,7 +1,8 @@ -use crate::sys::{Args, Validator}; - -use super::{action::Action, ActionSet, Effect, Error as IamError, Functions, ResourceSet, ID}; -use ecstore::error::{Error, Result}; +use super::{ + action::Action, ActionSet, Args, BucketPolicyArgs, Effect, Error as IamError, Functions, Principal, ResourceSet, Validator, + ID, +}; +use common::error::{Error, Result}; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Clone, Default, Debug)] @@ -117,3 +118,86 @@ impl PartialEq for Statement { && self.conditions == other.conditions } } + +#[derive(Debug, Deserialize, Serialize, Default, Clone)] +#[serde(rename_all = "PascalCase", default)] +pub struct BPStatement { + #[serde(rename = "Sid", default)] + pub sid: ID, + #[serde(rename = "Effect")] + pub effect: Effect, + #[serde(rename = "Principal")] + pub principal: Principal, + #[serde(rename = "Action")] + pub actions: ActionSet, + #[serde(rename = "NotAction", default)] + pub not_actions: ActionSet, + #[serde(rename = "Resource", default)] + pub resources: ResourceSet, + #[serde(rename = "NotResource", default)] + pub not_resources: ResourceSet, + #[serde(rename = "Condition", default)] + pub conditions: Functions, +} + +impl BPStatement { + pub fn is_allowed(&self, args: &BucketPolicyArgs) -> bool { + let check = 'c: { + if !self.principal.is_match(args.account) { + break 'c false; + } + + if (!self.actions.is_match(&args.action) && !self.actions.is_empty()) || self.not_actions.is_match(&args.action) { + break 'c false; + } + + let mut resource = String::from(args.bucket); + if !args.object.is_empty() { + if !args.object.starts_with('/') { + resource.push('/'); + } + + resource.push_str(args.object); + } else { + resource.push('/'); + } + + if !self.resources.is_empty() && !self.resources.is_match(&resource, args.conditions) { + break 'c false; + } + + if !self.not_resources.is_empty() && self.not_resources.is_match(&resource, args.conditions) { + break 'c false; + } + + self.conditions.evaluate(args.conditions) + }; + + self.effect.is_allowed(check) + } +} + +impl Validator for BPStatement { + type Error = Error; + fn is_valid(&self) -> Result<()> { + self.effect.is_valid()?; + // check sid + self.sid.is_valid()?; + + self.principal.is_valid()?; + + if self.actions.is_empty() && self.not_actions.is_empty() { + return Err(IamError::NonAction.into()); + } + + if self.resources.is_empty() { + return Err(IamError::NonResource.into()); + } + + self.actions.is_valid()?; + self.not_actions.is_valid()?; + self.resources.is_valid()?; + + Ok(()) + } +} diff --git a/iam/src/policy/utils.rs b/policy/src/policy/utils.rs similarity index 100% rename from iam/src/policy/utils.rs rename to policy/src/policy/utils.rs diff --git a/iam/src/policy/utils/path.rs b/policy/src/policy/utils/path.rs similarity index 100% rename from iam/src/policy/utils/path.rs rename to policy/src/policy/utils/path.rs diff --git a/iam/src/policy/utils/wildcard.rs b/policy/src/policy/utils/wildcard.rs similarity index 100% rename from iam/src/policy/utils/wildcard.rs rename to policy/src/policy/utils/wildcard.rs diff --git a/iam/src/service_type.rs b/policy/src/service_type.rs similarity index 100% rename from iam/src/service_type.rs rename to policy/src/service_type.rs diff --git a/policy/src/utils.rs b/policy/src/utils.rs new file mode 100644 index 00000000..c868a89a --- /dev/null +++ b/policy/src/utils.rs @@ -0,0 +1,110 @@ +use common::error::{Error, Result}; +use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey, Header}; +use rand::{Rng, RngCore}; +use serde::{de::DeserializeOwned, Serialize}; + +pub fn gen_access_key(length: usize) -> Result { + const ALPHA_NUMERIC_TABLE: [char; 36] = [ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', + 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + ]; + + if length < 3 { + return Err(Error::msg("access key length is too short")); + } + + let mut result = String::with_capacity(length); + let mut rng = rand::thread_rng(); + + for _ in 0..length { + result.push(ALPHA_NUMERIC_TABLE[rng.gen_range(0..ALPHA_NUMERIC_TABLE.len())]); + } + + Ok(result) +} + +pub fn gen_secret_key(length: usize) -> Result { + use base64_simd::URL_SAFE_NO_PAD; + + if length < 8 { + return Err(Error::msg("secret key length is too short")); + } + let mut rng = rand::thread_rng(); + + let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)]; + rng.fill_bytes(&mut key); + + let encoded = URL_SAFE_NO_PAD.encode_to_string(&key); + let key_str = encoded.replace("/", "+"); + + Ok(key_str) +} + +pub fn generate_jwt(claims: &T, secret: &str) -> Result { + let header = Header::new(Algorithm::HS512); + jsonwebtoken::encode(&header, &claims, &EncodingKey::from_secret(secret.as_bytes())) +} + +pub fn extract_claims( + token: &str, + secret: &str, +) -> Result, jsonwebtoken::errors::Error> { + jsonwebtoken::decode::( + token, + &DecodingKey::from_secret(secret.as_bytes()), + &jsonwebtoken::Validation::new(Algorithm::HS512), + ) +} + +#[cfg(test)] +mod tests { + use super::{gen_access_key, gen_secret_key, generate_jwt}; + use serde::{Deserialize, Serialize}; + + #[test] + fn test_gen_access_key() { + let a = gen_access_key(10).unwrap(); + let b = gen_access_key(10).unwrap(); + + assert_eq!(a.len(), 10); + assert_eq!(b.len(), 10); + assert_ne!(a, b); + } + + #[test] + fn test_gen_secret_key() { + let a = gen_secret_key(10).unwrap(); + let b = gen_secret_key(10).unwrap(); + assert_ne!(a, b); + } + + #[derive(Debug, Serialize, Deserialize, PartialEq)] + struct Claims { + sub: String, + company: String, + } + + #[test] + fn test_generate_jwt() { + let claims = Claims { + sub: "user1".to_string(), + company: "example".to_string(), + }; + let secret = "my_secret"; + let token = generate_jwt(&claims, secret).unwrap(); + + assert!(!token.is_empty()); + } + + // #[test] + // fn test_extract_claims() { + // let claims = Claims { + // sub: "user1".to_string(), + // company: "example".to_string(), + // }; + // let secret = "my_secret"; + // let token = generate_jwt(&claims, secret).unwrap(); + // let decoded_claims = extract_claims::(&token, secret).unwrap(); + // assert_eq!(decoded_claims.claims, claims); + // } +} diff --git a/iam/tests/policy_is_allowed.rs b/policy/tests/policy_is_allowed.rs similarity index 98% rename from iam/tests/policy_is_allowed.rs rename to policy/tests/policy_is_allowed.rs index 45ed9607..b72be6dc 100644 --- a/iam/tests/policy_is_allowed.rs +++ b/policy/tests/policy_is_allowed.rs @@ -1,11 +1,8 @@ -use iam::policy::action::Action; -use iam::policy::action::ActionSet; -use iam::policy::action::S3Action::*; -use iam::policy::resource::ResourceSet; -use iam::policy::Effect::*; -use iam::policy::{Policy, Statement}; -use iam::sys::Args; -use iam::sys::DEFAULT_VERSION; +use policy::policy::action::Action; +use policy::policy::action::S3Action::*; +use policy::policy::ActionSet; +use policy::policy::Effect::*; +use policy::policy::*; use serde_json::Value; use std::collections::HashMap; use test_case::test_case; @@ -24,10 +21,10 @@ struct ArgsBuilder { } #[test_case( - Policy{ - version: DEFAULT_VERSION.into(), + policy::policy::Policy{ + version: policy::policy::DEFAULT_VERSION.into(), statements: vec![ - Statement{ + policy::policy::Statement{ effect: Allow, actions: ActionSet(vec![Action::S3Action(PutObjectAction), Action::S3Action(GetBucketLocationAction)].into_iter().collect()), resources: ResourceSet(vec!["arn:aws:s3:::*".try_into().unwrap()].into_iter().collect()), @@ -46,7 +43,7 @@ struct ArgsBuilder { )] #[test_case( Policy{ - version: iam::sys::DEFAULT_VERSION.into(), + version: DEFAULT_VERSION.into(), statements: vec![ Statement{ effect: Allow, @@ -581,7 +578,7 @@ struct ArgsBuilder { )] #[test_case( Policy{ - version: iam::sys::DEFAULT_VERSION.into(), + version: DEFAULT_VERSION.into(), statements: vec![ Statement{ effect: Deny, diff --git a/rustfs/Cargo.toml b/rustfs/Cargo.toml index 640baea7..6bc9881f 100644 --- a/rustfs/Cargo.toml +++ b/rustfs/Cargo.toml @@ -22,6 +22,7 @@ bytes.workspace = true clap.workspace = true common.workspace = true ecstore.workspace = true +policy.workspace =true flatbuffers.workspace = true futures.workspace = true futures-util.workspace = true diff --git a/rustfs/src/admin/handlers.rs b/rustfs/src/admin/handlers.rs index 88a551f7..3a53de3c 100644 --- a/rustfs/src/admin/handlers.rs +++ b/rustfs/src/admin/handlers.rs @@ -1,12 +1,12 @@ use super::router::Operation; use crate::storage::error::to_s3_error; +use ::policy::policy::action::{Action, S3Action}; +use ::policy::policy::resource::Resource; +use ::policy::policy::statement::BPStatement; +use ::policy::policy::{ActionSet, BucketPolicy, Effect, ResourceSet}; use bytes::Bytes; +use common::error::Error as ec_Error; use ecstore::admin_server_info::get_server_info; -use ecstore::bucket::policy::action::{Action, ActionSet}; -use ecstore::bucket::policy::bucket_policy::{BPStatement, BucketPolicy}; -use ecstore::bucket::policy::effect::Effect; -use ecstore::bucket::policy::resource::{Resource, ResourceSet}; -use ecstore::error::Error as ec_Error; use ecstore::global::GLOBAL_ALlHealState; use ecstore::heal::data_usage::load_data_usage_from_backend; use ecstore::heal::heal_commands::HealOpts; @@ -74,16 +74,16 @@ impl Operation for AccountInfoHandler { // test policy let mut s3_all_act = HashSet::with_capacity(1); - s3_all_act.insert(Action::AllActions); + s3_all_act.insert(Action::S3Action(S3Action::AllActions)); let mut all_res = HashSet::with_capacity(1); - all_res.insert(Resource::new("*")); + all_res.insert(Resource::S3("*".to_string())); let bucket_policy = BucketPolicy { - id: "".to_owned(), + id: "".into(), version: "2012-10-17".to_owned(), statements: vec![BPStatement { - sid: "".to_owned(), + sid: "".into(), effect: Effect::Allow, actions: ActionSet(s3_all_act.clone()), resources: ResourceSet(all_res), diff --git a/rustfs/src/admin/handlers/policy.rs b/rustfs/src/admin/handlers/policy.rs index 40b8c98d..4d650a59 100644 --- a/rustfs/src/admin/handlers/policy.rs +++ b/rustfs/src/admin/handlers/policy.rs @@ -2,8 +2,9 @@ use std::collections::HashMap; use crate::admin::{router::Operation, utils::has_space_be}; use http::{HeaderMap, StatusCode}; -use iam::{error::is_err_no_such_user, get_global_action_cred, policy::Policy, store::MappedPolicy}; +use iam::{error::is_err_no_such_user, get_global_action_cred, store::MappedPolicy}; use matchit::Params; +use policy::policy::Policy; use s3s::{header::CONTENT_TYPE, s3_error, Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result}; use serde::Deserialize; use serde_urlencoded::from_bytes; diff --git a/rustfs/src/admin/handlers/service_account.rs b/rustfs/src/admin/handlers/service_account.rs index 021e048e..7d26e9ba 100644 --- a/rustfs/src/admin/handlers/service_account.rs +++ b/rustfs/src/admin/handlers/service_account.rs @@ -5,7 +5,6 @@ use hyper::StatusCode; use iam::{ error::is_err_no_such_service_account, get_global_action_cred, - policy::Policy, sys::{NewServiceAccountOpts, UpdateServiceAccountOpts}, }; use madmin::{ @@ -13,6 +12,7 @@ use madmin::{ ServiceAccountInfo, UpdateServiceAccountReq, }; use matchit::Params; +use policy::policy::Policy; use s3s::S3ErrorCode::InvalidRequest; use s3s::{header::CONTENT_TYPE, s3_error, Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result}; use serde::Deserialize; diff --git a/rustfs/src/admin/handlers/sts.rs b/rustfs/src/admin/handlers/sts.rs index cdd54286..14efd222 100644 --- a/rustfs/src/admin/handlers/sts.rs +++ b/rustfs/src/admin/handlers/sts.rs @@ -6,8 +6,9 @@ use crate::{ }; use ecstore::utils::{crypto::base64_encode, xml}; use http::StatusCode; -use iam::{auth::get_new_credentials_with_metadata, manager::get_token_signing_key, policy::Policy, sys::SESSION_POLICY_NAME}; +use iam::{manager::get_token_signing_key, sys::SESSION_POLICY_NAME}; use matchit::Params; +use policy::{auth::get_new_credentials_with_metadata, policy::Policy}; use s3s::{ dto::{AssumeRoleOutput, Credentials, Timestamp}, s3_error, Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, diff --git a/rustfs/src/auth.rs b/rustfs/src/auth.rs index c2e215a6..42d4a464 100644 --- a/rustfs/src/auth.rs +++ b/rustfs/src/auth.rs @@ -1,11 +1,11 @@ use std::collections::HashMap; use http::HeaderMap; -use iam::auth; -use iam::auth::get_claims_from_token_with_secret; use iam::error::Error as IamError; use iam::get_global_action_cred; use iam::sys::SESSION_POLICY_NAME; +use policy::auth; +use policy::auth::get_claims_from_token_with_secret; use s3s::auth::S3Auth; use s3s::auth::SecretKey; use s3s::auth::SimpleAuth; diff --git a/rustfs/src/console.rs b/rustfs/src/console.rs index 102a3262..8ccf0fb8 100644 --- a/rustfs/src/console.rs +++ b/rustfs/src/console.rs @@ -11,7 +11,7 @@ use mime_guess::from_path; use rust_embed::RustEmbed; use serde::Serialize; use shadow_rs::shadow; -use std::net::{Ipv4Addr, ToSocketAddrs}; +use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::sync::OnceLock; use tracing::info; @@ -159,7 +159,15 @@ async fn config_handler(Host(host): Host) -> impl IntoResponse { let is_addr = host_with_port .to_socket_addrs() - .map(|addrs| addrs.into_iter().find(|v| v.is_ipv4())) + .map(|addrs| { + addrs.into_iter().find(|v| { + if let SocketAddr::V4(ipv4) = v { + !ipv4.ip().is_private() && !ipv4.ip().is_loopback() && !ipv4.ip().is_unspecified() + } else { + false + } + }) + }) .unwrap_or_default(); let mut cfg = CONSOLE_CONFIG.get().unwrap().clone(); diff --git a/rustfs/src/grpc.rs b/rustfs/src/grpc.rs index 1aeb64b2..c1787915 100644 --- a/rustfs/src/grpc.rs +++ b/rustfs/src/grpc.rs @@ -1,12 +1,12 @@ use std::{collections::HashMap, io::Cursor, pin::Pin}; +use common::error::Error as EcsError; use ecstore::{ admin_server_info::get_local_server_property, bucket::{metadata::load_bucket_metadata, metadata_sys}, disk::{ DeleteOptions, DiskAPI, DiskInfoOptions, DiskStore, FileInfoVersions, ReadMultipleReq, ReadOptions, UpdateMetadataOpts, }, - error::Error as EcsError, heal::{ data_usage_cache::DataUsageCache, heal_commands::{get_local_background_heal_status, HealOpts}, @@ -1073,8 +1073,7 @@ impl Node for NodeService { async fn delete_paths(&self, request: Request) -> Result, Status> { let request = request.into_inner(); if let Some(disk) = self.find_disk(&request.disk).await { - let paths = request.paths.iter().map(|s| s.as_str()).collect::>(); - match disk.delete_paths(&request.volume, &paths).await { + match disk.delete_paths(&request.volume, &request.paths).await { Ok(_) => Ok(tonic::Response::new(DeletePathsResponse { success: true, error: None, @@ -2079,7 +2078,12 @@ impl Node for NodeService { match load_bucket_metadata(store, &bucket).await { Ok(meta) => { - metadata_sys::set_bucket_metadata(bucket, meta).await; + if let Err(err) = metadata_sys::set_bucket_metadata(bucket, meta).await { + return Ok(tonic::Response::new(LoadBucketMetadataResponse { + success: false, + error_info: Some(err.to_string()), + })); + }; Ok(tonic::Response::new(LoadBucketMetadataResponse { success: true, error_info: None, diff --git a/rustfs/src/storage/access.rs b/rustfs/src/storage/access.rs index 06844c64..0e1562ca 100644 --- a/rustfs/src/storage/access.rs +++ b/rustfs/src/storage/access.rs @@ -1,56 +1,128 @@ use super::ecfs::FS; use crate::auth::{check_key_valid, get_condition_values}; -use iam::auth; +use ecstore::bucket::policy_sys::PolicySys; use iam::error::Error as IamError; -use iam::policy::action::{Action, S3Action}; -use iam::sys::Args; +use policy::auth; +use policy::policy::action::{Action, S3Action}; +use policy::policy::{Args, BucketPolicyArgs}; use s3s::access::{S3Access, S3AccessContext}; use s3s::{dto::*, s3_error, S3Error, S3ErrorCode, S3Request, S3Result}; use std::collections::HashMap; -use tracing::info; #[allow(dead_code)] #[derive(Default, Clone)] pub(crate) struct ReqInfo { - pub cred: auth::Credentials, + pub cred: Option, pub is_owner: bool, pub bucket: Option, pub object: Option, pub version_id: Option, } -pub async fn authorize_request(req: &mut S3Request, actions: Vec) -> S3Result<()> { - let Ok(iam_store) = iam::get() else { - return Err(S3Error::with_message( - S3ErrorCode::InternalError, - format!("check_key_valid {:?}", IamError::IamSysNotInitialized), - )); - }; - +pub async fn authorize_request(req: &mut S3Request, action: Action) -> S3Result<()> { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); - let default_claims = HashMap::new(); - let claims = req_info.cred.claims.as_ref().unwrap_or(&default_claims); - let conditions = get_condition_values(&req.headers, &req_info.cred); - - for action in actions { - let args = &Args { - account: &req_info.cred.access_key, - groups: &req_info.cred.groups, - action, - bucket: req_info.bucket.as_deref().unwrap_or(""), - conditions: &conditions, - is_owner: req_info.is_owner, - object: req_info.object.as_deref().unwrap_or(""), - claims, - deny_only: false, + if let Some(cred) = &req_info.cred { + let Ok(iam_store) = iam::get() else { + return Err(S3Error::with_message( + S3ErrorCode::InternalError, + format!("authorize_request {:?}", IamError::IamSysNotInitialized), + )); }; - if !iam_store.is_allowed(args).await { - return Err(s3_error!(AccessDenied, "Access Denied")); + + let default_claims = HashMap::new(); + let claims = cred.claims.as_ref().unwrap_or(&default_claims); + let conditions = get_condition_values(&req.headers, cred); + + if action != Action::S3Action(S3Action::DeleteObjectAction) + && req_info.version_id.is_some() + && iam_store + .is_allowed(&Args { + account: &cred.access_key, + groups: &cred.groups, + action: Action::S3Action(S3Action::DeleteObjectVersionAction), + bucket: req_info.bucket.as_deref().unwrap_or(""), + conditions: &conditions, + is_owner: req_info.is_owner, + object: req_info.object.as_deref().unwrap_or(""), + claims, + deny_only: false, + }) + .await + { + return Ok(()); + } + + if iam_store + .is_allowed(&Args { + account: &cred.access_key, + groups: &cred.groups, + action, + bucket: req_info.bucket.as_deref().unwrap_or(""), + conditions: &conditions, + is_owner: req_info.is_owner, + object: req_info.object.as_deref().unwrap_or(""), + claims, + deny_only: false, + }) + .await + { + return Ok(()); + } + + if action == Action::S3Action(S3Action::ListBucketVersionsAction) + && iam_store + .is_allowed(&Args { + account: &cred.access_key, + groups: &cred.groups, + action: Action::S3Action(S3Action::ListBucketAction), + bucket: req_info.bucket.as_deref().unwrap_or(""), + conditions: &conditions, + is_owner: req_info.is_owner, + object: req_info.object.as_deref().unwrap_or(""), + claims, + deny_only: false, + }) + .await + { + return Ok(()); + } + } else { + let conditions = get_condition_values(&req.headers, &auth::Credentials::default()); + + if action != Action::S3Action(S3Action::ListAllMyBucketsAction) { + if PolicySys::is_allowed(&BucketPolicyArgs { + bucket: req_info.bucket.as_deref().unwrap_or(""), + action, + is_owner: false, + account: "", + groups: &None, + conditions: &conditions, + object: req_info.object.as_deref().unwrap_or(""), + }) + .await + { + return Ok(()); + } + + if action == Action::S3Action(S3Action::ListBucketVersionsAction) + && PolicySys::is_allowed(&BucketPolicyArgs { + bucket: req_info.bucket.as_deref().unwrap_or(""), + action: Action::S3Action(S3Action::ListBucketAction), + is_owner: false, + account: "", + groups: &None, + conditions: &conditions, + object: "", + }) + .await + { + return Ok(()); + } } } - Ok(()) + Err(s3_error!(AccessDenied, "Access Denied")) } #[async_trait::async_trait] @@ -71,23 +143,24 @@ impl S3Access for FS { // /// + [`cx.extensions_mut()`](S3AccessContext::extensions_mut) async fn check(&self, cx: &mut S3AccessContext<'_>) -> S3Result<()> { // 上层验证了 ak/sk - info!( - "s3 check uri: {:?}, method: {:?} path: {:?}, s3_op: {:?}, cred: {:?}, headers:{:?}", - cx.uri(), - cx.method(), - cx.s3_path(), - cx.s3_op().name(), - cx.credentials(), - cx.headers(), - // cx.extensions_mut(), - ); + // info!( + // "s3 check uri: {:?}, method: {:?} path: {:?}, s3_op: {:?}, cred: {:?}, headers:{:?}", + // cx.uri(), + // cx.method(), + // cx.s3_path(), + // cx.s3_op().name(), + // cx.credentials(), + // cx.headers(), + // // cx.extensions_mut(), + // ); - let Some(input_cred) = cx.credentials() else { - return Err(s3_error!(UnauthorizedAccess, "Signature is required")); + let (cred, is_owner) = if let Some(input_cred) = cx.credentials() { + let (cred, is_owner) = check_key_valid(cx.headers(), &input_cred.access_key).await?; + (Some(cred), is_owner) + } else { + (None, false) }; - let (cred, is_owner) = check_key_valid(cx.headers(), &input_cred.access_key).await?; - let req_info = ReqInfo { cred, is_owner, @@ -109,17 +182,11 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::CreateBucketAction)]).await?; + authorize_request(req, Action::S3Action(S3Action::CreateBucketAction)).await?; if req.input.object_lock_enabled_for_bucket.is_some_and(|v| v) { - authorize_request( - req, - vec![ - Action::S3Action(S3Action::PutBucketObjectLockConfigurationAction), - Action::S3Action(S3Action::PutBucketVersioningAction), - ], - ) - .await?; + authorize_request(req, Action::S3Action(S3Action::PutBucketObjectLockConfigurationAction)).await?; + authorize_request(req, Action::S3Action(S3Action::PutBucketVersioningAction)).await?; } Ok(()) @@ -157,7 +224,7 @@ impl S3Access for FS { req_info.object = Some(src_key); req_info.version_id = version_id; - authorize_request(req, vec![Action::S3Action(S3Action::GetObjectAction)]).await?; + authorize_request(req, Action::S3Action(S3Action::GetObjectAction)).await?; } let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); @@ -166,7 +233,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::PutObjectAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutObjectAction)).await } /// Checks whether the CreateMultipartUpload request has accesses to the resources. @@ -183,10 +250,10 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::DeleteBucketAction)]).await?; + authorize_request(req, Action::S3Action(S3Action::DeleteBucketAction)).await?; if req.input.force_delete.is_some_and(|v| v) { - authorize_request(req, vec![Action::S3Action(S3Action::ForceDeleteBucketAction)]).await?; + authorize_request(req, Action::S3Action(S3Action::ForceDeleteBucketAction)).await?; } Ok(()) } @@ -208,7 +275,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketCorsAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketCorsAction)).await } /// Checks whether the DeleteBucketEncryption request has accesses to the resources. @@ -218,7 +285,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketEncryptionAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketEncryptionAction)).await } /// Checks whether the DeleteBucketIntelligentTieringConfiguration request has accesses to the resources. @@ -248,7 +315,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketLifecycleAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketLifecycleAction)).await } /// Checks whether the DeleteBucketMetricsConfiguration request has accesses to the resources. @@ -275,7 +342,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::DeleteBucketPolicyAction)]).await + authorize_request(req, Action::S3Action(S3Action::DeleteBucketPolicyAction)).await } /// Checks whether the DeleteBucketReplication request has accesses to the resources. @@ -285,7 +352,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutReplicationConfigurationAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutReplicationConfigurationAction)).await } /// Checks whether the DeleteBucketTagging request has accesses to the resources. @@ -295,7 +362,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketTaggingAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketTaggingAction)).await } /// Checks whether the DeleteBucketWebsite request has accesses to the resources. @@ -314,7 +381,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::DeleteObjectAction)]).await + authorize_request(req, Action::S3Action(S3Action::DeleteObjectAction)).await } /// Checks whether the DeleteObjectTagging request has accesses to the resources. @@ -326,7 +393,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::DeleteObjectTaggingAction)]).await + authorize_request(req, Action::S3Action(S3Action::DeleteObjectTaggingAction)).await } /// Checks whether the DeleteObjects request has accesses to the resources. @@ -360,7 +427,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetBucketPolicyAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetBucketPolicyAction)).await } /// Checks whether the GetBucketAnalyticsConfiguration request has accesses to the resources. @@ -380,7 +447,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetBucketCorsAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetBucketCorsAction)).await } /// Checks whether the GetBucketEncryption request has accesses to the resources. @@ -390,7 +457,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetBucketEncryptionAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetBucketEncryptionAction)).await } /// Checks whether the GetBucketIntelligentTieringConfiguration request has accesses to the resources. @@ -423,7 +490,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetBucketLifecycleAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetBucketLifecycleAction)).await } /// Checks whether the GetBucketLocation request has accesses to the resources. @@ -433,7 +500,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetBucketLocationAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetBucketLocationAction)).await } /// Checks whether the GetBucketLogging request has accesses to the resources. @@ -460,7 +527,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetBucketNotificationAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetBucketNotificationAction)).await } /// Checks whether the GetBucketOwnershipControls request has accesses to the resources. @@ -477,7 +544,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetBucketPolicyAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetBucketPolicyAction)).await } /// Checks whether the GetBucketPolicyStatus request has accesses to the resources. @@ -487,7 +554,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetBucketPolicyStatusAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetBucketPolicyStatusAction)).await } /// Checks whether the GetBucketReplication request has accesses to the resources. @@ -497,7 +564,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetReplicationConfigurationAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetReplicationConfigurationAction)).await } /// Checks whether the GetBucketRequestPayment request has accesses to the resources. @@ -514,7 +581,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetBucketTaggingAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetBucketTaggingAction)).await } /// Checks whether the GetBucketVersioning request has accesses to the resources. @@ -524,7 +591,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetBucketVersioningAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetBucketVersioningAction)).await } /// Checks whether the GetBucketWebsite request has accesses to the resources. @@ -543,7 +610,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::GetObjectAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetObjectAction)).await } /// Checks whether the GetObjectAcl request has accesses to the resources. @@ -555,7 +622,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::GetBucketPolicyAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetBucketPolicyAction)).await } /// Checks whether the GetObjectAttributes request has accesses to the resources. @@ -567,16 +634,15 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - let mut actions = Vec::new(); if req.input.version_id.is_some() { - actions.push(Action::S3Action(S3Action::GetObjectVersionAttributesAction)); - actions.push(Action::S3Action(S3Action::GetObjectVersionAction)); + authorize_request(req, Action::S3Action(S3Action::GetObjectVersionAttributesAction)).await?; + authorize_request(req, Action::S3Action(S3Action::GetObjectVersionAction)).await?; } else { - actions.push(Action::S3Action(S3Action::GetObjectAttributesAction)); - actions.push(Action::S3Action(S3Action::GetObjectAction)); + authorize_request(req, Action::S3Action(S3Action::GetObjectAttributesAction)).await?; + authorize_request(req, Action::S3Action(S3Action::GetObjectAction)).await?; } - authorize_request(req, actions).await + Ok(()) } /// Checks whether the GetObjectLegalHold request has accesses to the resources. @@ -588,7 +654,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::GetObjectLegalHoldAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetObjectLegalHoldAction)).await } /// Checks whether the GetObjectLockConfiguration request has accesses to the resources. @@ -598,7 +664,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetBucketObjectLockConfigurationAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetBucketObjectLockConfigurationAction)).await } /// Checks whether the GetObjectRetention request has accesses to the resources. @@ -610,7 +676,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::GetObjectRetentionAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetObjectRetentionAction)).await } /// Checks whether the GetObjectTagging request has accesses to the resources. @@ -622,7 +688,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::GetObjectTaggingAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetObjectTaggingAction)).await } /// Checks whether the GetObjectTorrent request has accesses to the resources. @@ -646,7 +712,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::ListBucketAction)]).await + authorize_request(req, Action::S3Action(S3Action::ListBucketAction)).await } /// Checks whether the HeadObject request has accesses to the resources. @@ -658,7 +724,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::GetObjectAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetObjectAction)).await } /// Checks whether the ListBucketAnalyticsConfigurations request has accesses to the resources. @@ -716,7 +782,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::ListBucketMultipartUploadsAction)]).await + authorize_request(req, Action::S3Action(S3Action::ListBucketMultipartUploadsAction)).await } /// Checks whether the ListObjectVersions request has accesses to the resources. @@ -733,7 +799,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::ListBucketAction)]).await + authorize_request(req, Action::S3Action(S3Action::ListBucketAction)).await } /// Checks whether the ListObjectsV2 request has accesses to the resources. @@ -743,7 +809,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::ListBucketAction)]).await + authorize_request(req, Action::S3Action(S3Action::ListBucketAction)).await } /// Checks whether the ListParts request has accesses to the resources. @@ -770,7 +836,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketPolicyAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketPolicyAction)).await } /// Checks whether the PutBucketAnalyticsConfiguration request has accesses to the resources. @@ -790,7 +856,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketCorsAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketCorsAction)).await } /// Checks whether the PutBucketEncryption request has accesses to the resources. @@ -800,7 +866,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketEncryptionAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketEncryptionAction)).await } /// Checks whether the PutBucketIntelligentTieringConfiguration request has accesses to the resources. @@ -833,7 +899,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketLifecycleAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketLifecycleAction)).await } /// Checks whether the PutBucketLogging request has accesses to the resources. @@ -860,7 +926,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketNotificationAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketNotificationAction)).await } /// Checks whether the PutBucketOwnershipControls request has accesses to the resources. @@ -877,7 +943,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketPolicyAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketPolicyAction)).await } /// Checks whether the PutBucketReplication request has accesses to the resources. @@ -887,7 +953,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutReplicationConfigurationAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutReplicationConfigurationAction)).await } /// Checks whether the PutBucketRequestPayment request has accesses to the resources. @@ -904,7 +970,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketTaggingAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketTaggingAction)).await } /// Checks whether the PutBucketVersioning request has accesses to the resources. @@ -914,7 +980,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketVersioningAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketVersioningAction)).await } /// Checks whether the PutBucketWebsite request has accesses to the resources. @@ -933,7 +999,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::PutObjectAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutObjectAction)).await } /// Checks whether the PutObjectAcl request has accesses to the resources. @@ -945,7 +1011,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketPolicyAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketPolicyAction)).await } /// Checks whether the PutObjectLegalHold request has accesses to the resources. @@ -957,7 +1023,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::PutObjectLegalHoldAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutObjectLegalHoldAction)).await } /// Checks whether the PutObjectLockConfiguration request has accesses to the resources. @@ -967,7 +1033,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutBucketObjectLockConfigurationAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutBucketObjectLockConfigurationAction)).await } /// Checks whether the PutObjectRetention request has accesses to the resources. @@ -979,7 +1045,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::PutObjectRetentionAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutObjectRetentionAction)).await } /// Checks whether the PutObjectTagging request has accesses to the resources. @@ -991,7 +1057,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::PutObjectTaggingAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutObjectTaggingAction)).await } /// Checks whether the PutPublicAccessBlock request has accesses to the resources. @@ -1010,7 +1076,7 @@ impl S3Access for FS { req_info.object = Some(req.input.key.clone()); req_info.version_id = req.input.version_id.clone(); - authorize_request(req, vec![Action::S3Action(S3Action::RestoreObjectAction)]).await + authorize_request(req, Action::S3Action(S3Action::RestoreObjectAction)).await } /// Checks whether the SelectObjectContent request has accesses to the resources. @@ -1021,7 +1087,7 @@ impl S3Access for FS { req_info.bucket = Some(req.input.bucket.clone()); req_info.object = Some(req.input.key.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::GetObjectAction)]).await + authorize_request(req, Action::S3Action(S3Action::GetObjectAction)).await } /// Checks whether the UploadPart request has accesses to the resources. @@ -1032,7 +1098,7 @@ impl S3Access for FS { req_info.bucket = Some(req.input.bucket.clone()); req_info.object = Some(req.input.key.clone()); - authorize_request(req, vec![Action::S3Action(S3Action::PutObjectAction)]).await + authorize_request(req, Action::S3Action(S3Action::PutObjectAction)).await } /// Checks whether the UploadPartCopy request has accesses to the resources. diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 7dfcb46c..92f32769 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -2,7 +2,12 @@ use super::access::authorize_request; use super::options::del_opts; use super::options::extract_metadata; use super::options::put_opts; +use crate::auth::get_condition_values; use crate::storage::access::ReqInfo; +use crate::storage::error::to_s3_error; +use crate::storage::options::copy_dst_opts; +use crate::storage::options::copy_src_opts; +use crate::storage::options::{extract_metadata_from_mime, get_opts}; use bytes::Bytes; use common::error::Result; use ecstore::bucket::error::BucketMetadataError; @@ -15,7 +20,6 @@ use ecstore::bucket::metadata::BUCKET_TAGGING_CONFIG; use ecstore::bucket::metadata::BUCKET_VERSIONING_CONFIG; use ecstore::bucket::metadata::OBJECT_LOCK_CONFIG; use ecstore::bucket::metadata_sys; -use ecstore::bucket::policy::bucket_policy::BucketPolicy; use ecstore::bucket::policy_sys::PolicySys; use ecstore::bucket::tagging::decode_tags; use ecstore::bucket::tagging::encode_tags; @@ -39,9 +43,8 @@ use ecstore::xhttp; use futures::pin_mut; use futures::{Stream, StreamExt}; use http::HeaderMap; -use iam::policy::action::Action; -use iam::policy::action::S3Action; use lazy_static::lazy_static; +use policy::policy::BucketPolicy; use s3s::dto::*; use s3s::s3_error; use s3s::S3Error; @@ -53,18 +56,10 @@ use std::fmt::Debug; use std::str::FromStr; use tokio_util::io::ReaderStream; use tokio_util::io::StreamReader; -use tracing::debug; -use tracing::error; -use tracing::info; -use tracing::warn; +use tracing::{debug, error, info, warn}; use transform_stream::AsyncTryStream; use uuid::Uuid; -use crate::storage::error::to_s3_error; -use crate::storage::options::copy_dst_opts; -use crate::storage::options::copy_src_opts; -use crate::storage::options::{extract_metadata_from_mime, get_opts}; - macro_rules! try_ { ($result:expr) => { match $result { diff --git a/rustfs/src/storage/error.rs b/rustfs/src/storage/error.rs index 459a1b29..c902390e 100644 --- a/rustfs/src/storage/error.rs +++ b/rustfs/src/storage/error.rs @@ -1,6 +1,6 @@ -use ecstore::{disk::error::is_err_file_not_found, error::Error, store_err::StorageError}; +use common::error::Error; +use ecstore::{disk::error::is_err_file_not_found, store_err::StorageError}; use s3s::{s3_error, S3Error, S3ErrorCode}; - pub fn to_s3_error(err: Error) -> S3Error { if let Some(storage_err) = err.downcast_ref::() { return match storage_err { diff --git a/rustfs/src/storage/options.rs b/rustfs/src/storage/options.rs index 847d6779..18a13974 100644 --- a/rustfs/src/storage/options.rs +++ b/rustfs/src/storage/options.rs @@ -1,5 +1,5 @@ +use common::error::{Error, Result}; use ecstore::bucket::versioning_sys::BucketVersioningSys; -use ecstore::error::{Error, Result}; use ecstore::store_api::ObjectOptions; use ecstore::store_err::StorageError; use ecstore::utils::path::is_dir_object;