mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
Merge branch 'main' of github.com:rustfs/s3-rustfs into feature/observability
# Conflicts: # rustfs/src/storage/ecfs.rs
This commit is contained in:
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
|
||||
- name: Download and Extract Static Assets
|
||||
run: |
|
||||
url="https://dl.rustfs.com/console/rustfs-console-latest.zip"
|
||||
url="https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip"
|
||||
mkdir -p static
|
||||
curl -L -o static_assets.zip "$url"
|
||||
unzip -o static_assets.zip -d ./rustfs/static
|
||||
|
||||
33
Cargo.lock
generated
33
Cargo.lock
generated
@@ -1924,6 +1924,7 @@ checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813"
|
||||
name = "e2e_test"
|
||||
version = "0.0.1"
|
||||
dependencies = [
|
||||
"common",
|
||||
"ecstore",
|
||||
"flatbuffers",
|
||||
"futures",
|
||||
@@ -1971,6 +1972,7 @@ dependencies = [
|
||||
"path-absolutize",
|
||||
"path-clean",
|
||||
"pin-project-lite",
|
||||
"policy",
|
||||
"protos",
|
||||
"rand 0.8.5",
|
||||
"reed-solomon-erasure",
|
||||
@@ -3003,6 +3005,7 @@ dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
"base64-simd",
|
||||
"common",
|
||||
"crypto",
|
||||
"ecstore",
|
||||
"futures",
|
||||
@@ -3012,6 +3015,7 @@ dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
"madmin",
|
||||
"policy",
|
||||
"rand 0.8.5",
|
||||
"regex",
|
||||
"serde",
|
||||
@@ -4815,6 +4819,34 @@ dependencies = [
|
||||
"miniz_oxide",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "policy"
|
||||
version = "0.0.1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
"base64-simd",
|
||||
"common",
|
||||
"crypto",
|
||||
"futures",
|
||||
"ipnetwork",
|
||||
"itertools",
|
||||
"jsonwebtoken",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"madmin",
|
||||
"rand 0.8.5",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"strum",
|
||||
"test-case",
|
||||
"thiserror 2.0.12",
|
||||
"time",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "polling"
|
||||
version = "3.7.4"
|
||||
@@ -5663,6 +5695,7 @@ dependencies = [
|
||||
"netif",
|
||||
"once_cell",
|
||||
"pin-project-lite",
|
||||
"policy",
|
||||
"prost",
|
||||
"prost-build",
|
||||
"prost-types",
|
||||
|
||||
@@ -44,6 +44,7 @@ flatbuffers = "24.12.23"
|
||||
futures = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
common = { path = "./common/common" }
|
||||
policy = {path = "./policy"}
|
||||
hex = "0.4.3"
|
||||
hyper = "1.6.0"
|
||||
hyper-util = { version = "0.1.10", features = [
|
||||
|
||||
3
TODO.md
3
TODO.md
@@ -63,5 +63,6 @@
|
||||
## 性能优化
|
||||
- [ ] bitrot impl AsyncRead/AsyncWrite
|
||||
- [ ] erasure 并发读写
|
||||
- [ ] 完善删除逻辑, 并发处理,先移动到回收站,空间不足时清空回收站
|
||||
- [x] 完善删除逻辑, 并发处理,先移动到回收站,
|
||||
- [ ] 空间不足时清空回收站
|
||||
- [ ] list_object 使用reader传输
|
||||
@@ -61,6 +61,11 @@ impl Error {
|
||||
pub fn downcast_mut<T: std::error::Error + 'static>(&mut self) -> Option<&mut T> {
|
||||
self.inner.downcast_mut()
|
||||
}
|
||||
|
||||
pub fn to_io_err(&self) -> Option<std::io::Error> {
|
||||
self.downcast_ref::<std::io::Error>()
|
||||
.map(|e| std::io::Error::new(e.kind(), e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: std::error::Error + Send + Sync + 'static> From<T> for Error {
|
||||
|
||||
@@ -6,6 +6,7 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[lints]
|
||||
@@ -25,4 +26,5 @@ tonic = { version = "0.12.3", features = ["gzip"] }
|
||||
tokio = { workspace = true }
|
||||
tower.workspace = true
|
||||
url.workspace = true
|
||||
madmin.workspace =true
|
||||
madmin.workspace =true
|
||||
common.workspace = true
|
||||
@@ -14,10 +14,7 @@ use protos::{
|
||||
use rmp_serde::{Deserializer, Serializer};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{error::Error, io::Cursor};
|
||||
use tokio::io::AsyncWrite;
|
||||
use tokio::spawn;
|
||||
use tokio::sync::mpsc;
|
||||
use tonic::codegen::tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::codegen::tokio_stream::StreamExt;
|
||||
use tonic::Request;
|
||||
|
||||
@@ -125,7 +122,7 @@ async fn walk_dir() -> Result<(), Box<dyn Error>> {
|
||||
println!("{}", resp.error_info.unwrap_or("".to_string()));
|
||||
}
|
||||
let entry = serde_json::from_str::<MetaCacheEntry>(&resp.meta_cache_entry)
|
||||
.map_err(|e| ecstore::error::Error::from_string(format!("Unexpected response: {:?}", response)))
|
||||
.map_err(|_e| common::error::Error::from_string(format!("Unexpected response: {:?}", response)))
|
||||
.unwrap();
|
||||
out.write_obj(&entry).await.unwrap();
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ backon.workspace = true
|
||||
blake2 = "0.10.6"
|
||||
bytes.workspace = true
|
||||
common.workspace = true
|
||||
policy.workspace = true
|
||||
chrono.workspace = true
|
||||
glob = "0.3.2"
|
||||
thiserror.workspace = true
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use crate::{
|
||||
disk::{error::DiskError, Disk, DiskAPI},
|
||||
erasure::{ReadAt, Writer},
|
||||
error::{Error, Result},
|
||||
io::{FileReader, FileWriter},
|
||||
store_api::BitrotAlgorithm,
|
||||
};
|
||||
use blake2::Blake2b512;
|
||||
use blake2::Digest as _;
|
||||
use common::error::{Error, Result};
|
||||
use highway::{HighwayHash, HighwayHasher, Key};
|
||||
use lazy_static::lazy_static;
|
||||
use sha2::{digest::core_api::BlockSizeUser, Digest, Sha256};
|
||||
@@ -731,14 +731,10 @@ pub fn new_bitrot_filereader(
|
||||
mod test {
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::{disk::error::DiskError, store_api::BitrotAlgorithm};
|
||||
use common::error::{Error, Result};
|
||||
use hex_simd::decode_to_vec;
|
||||
|
||||
use crate::{
|
||||
disk::error::DiskError,
|
||||
error::{Error, Result},
|
||||
store_api::BitrotAlgorithm,
|
||||
};
|
||||
|
||||
// use super::{bitrot_writer_sum, new_bitrot_reader};
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::error::Error;
|
||||
use common::error::Error;
|
||||
|
||||
#[derive(Debug, thiserror::Error, PartialEq, Eq)]
|
||||
pub enum BucketMetadataError {
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use super::policy::bucket_policy::BucketPolicy;
|
||||
use super::{quota::BucketQuota, target::BucketTargets};
|
||||
|
||||
use super::object_lock::ObjectLockApi;
|
||||
use super::versioning::VersioningApi;
|
||||
use byteorder::{BigEndian, ByteOrder, LittleEndian};
|
||||
use policy::policy::BucketPolicy;
|
||||
use rmp_serde::Serializer as rmpSerializer;
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ReplicationConfiguration,
|
||||
@@ -16,9 +16,9 @@ use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::error;
|
||||
|
||||
use crate::config::common::{read_config, save_config};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::config::com::{read_config, save_config};
|
||||
use crate::{config, new_object_layer_fn};
|
||||
use common::error::{Error, Result};
|
||||
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use crate::store::ECStore;
|
||||
|
||||
@@ -8,11 +8,12 @@ use crate::bucket::utils::is_meta_bucketname;
|
||||
use crate::config;
|
||||
use crate::config::error::ConfigError;
|
||||
use crate::disk::error::DiskError;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::global::{is_dist_erasure, is_erasure, new_object_layer_fn, GLOBAL_Endpoints};
|
||||
use crate::store::ECStore;
|
||||
use crate::utils::xml::deserialize;
|
||||
use common::error::{Error, Result};
|
||||
use futures::future::join_all;
|
||||
use policy::policy::BucketPolicy;
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ReplicationConfiguration,
|
||||
ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
|
||||
@@ -22,7 +23,6 @@ use tokio::sync::RwLock;
|
||||
use tracing::{error, warn};
|
||||
|
||||
use super::metadata::{load_bucket_metadata, BucketMetadata};
|
||||
use super::policy::bucket_policy::BucketPolicy;
|
||||
use super::quota::BucketQuota;
|
||||
use super::target::BucketTargets;
|
||||
|
||||
@@ -42,94 +42,99 @@ pub async fn init_bucket_metadata_sys(api: Arc<ECStore>, buckets: Vec<String>) {
|
||||
}
|
||||
|
||||
// panic if not init
|
||||
pub(super) fn get_bucket_metadata_sys() -> Arc<RwLock<BucketMetadataSys>> {
|
||||
GLOBAL_BucketMetadataSys.get().unwrap().clone()
|
||||
pub(super) fn get_bucket_metadata_sys() -> Result<Arc<RwLock<BucketMetadataSys>>> {
|
||||
if let Some(sys) = GLOBAL_BucketMetadataSys.get() {
|
||||
Ok(sys.clone())
|
||||
} else {
|
||||
Err(Error::msg("GLOBAL_BucketMetadataSys not init"))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set_bucket_metadata(bucket: String, bm: BucketMetadata) {
|
||||
let sys = get_bucket_metadata_sys();
|
||||
pub async fn set_bucket_metadata(bucket: String, bm: BucketMetadata) -> Result<()> {
|
||||
let sys = get_bucket_metadata_sys()?;
|
||||
let lock = sys.write().await;
|
||||
lock.set(bucket, Arc::new(bm)).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn get(bucket: &str) -> Result<Arc<BucketMetadata>> {
|
||||
let sys = get_bucket_metadata_sys();
|
||||
let sys = get_bucket_metadata_sys()?;
|
||||
let lock = sys.read().await;
|
||||
lock.get(bucket).await
|
||||
}
|
||||
|
||||
pub async fn update(bucket: &str, config_file: &str, data: Vec<u8>) -> Result<OffsetDateTime> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let mut bucket_meta_sys = bucket_meta_sys_lock.write().await;
|
||||
|
||||
bucket_meta_sys.update(bucket, config_file, data).await
|
||||
}
|
||||
|
||||
pub async fn delete(bucket: &str, config_file: &str) -> Result<OffsetDateTime> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let mut bucket_meta_sys = bucket_meta_sys_lock.write().await;
|
||||
|
||||
bucket_meta_sys.delete(bucket, config_file).await
|
||||
}
|
||||
|
||||
pub async fn get_tagging_config(bucket: &str) -> Result<(Tagging, OffsetDateTime)> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.get_tagging_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_lifecycle_config(bucket: &str) -> Result<(BucketLifecycleConfiguration, OffsetDateTime)> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.get_lifecycle_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_sse_config(bucket: &str) -> Result<(ServerSideEncryptionConfiguration, OffsetDateTime)> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.get_sse_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_object_lock_config(bucket: &str) -> Result<(ObjectLockConfiguration, OffsetDateTime)> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.get_object_lock_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_replication_config(bucket: &str) -> Result<(ReplicationConfiguration, OffsetDateTime)> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.get_replication_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_notification_config(bucket: &str) -> Result<Option<NotificationConfiguration>> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.get_notification_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_versioning_config(bucket: &str) -> Result<(VersioningConfiguration, OffsetDateTime)> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.get_versioning_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_config_from_disk(bucket: &str) -> Result<BucketMetadata> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.get_config_from_disk(bucket).await
|
||||
}
|
||||
|
||||
pub async fn created_at(bucket: &str) -> Result<OffsetDateTime> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.created_at(bucket).await
|
||||
|
||||
@@ -2,7 +2,6 @@ pub mod error;
|
||||
pub mod metadata;
|
||||
pub mod metadata_sys;
|
||||
pub mod object_lock;
|
||||
pub mod policy;
|
||||
pub mod policy_sys;
|
||||
mod quota;
|
||||
pub mod tagging;
|
||||
|
||||
@@ -1,605 +0,0 @@
|
||||
use crate::{bucket::policy::condition::keyname::ALL_SUPPORT_KEYS, utils};
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
str::FromStr,
|
||||
vec,
|
||||
};
|
||||
|
||||
use super::condition::{
|
||||
key::{Key, KeySet},
|
||||
keyname::{KeyName, COMMOM_KEYS},
|
||||
};
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone, PartialEq, Eq)]
|
||||
|
||||
pub struct ActionSet(pub HashSet<Action>);
|
||||
|
||||
impl ActionSet {
|
||||
pub fn is_match(&self, act: &Action) -> bool {
|
||||
for item in self.0.iter() {
|
||||
if item.is_match(act) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if item == &Action::GetObjectVersion && act == &Action::GetObjectVersion {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<HashSet<Action>> for ActionSet {
|
||||
fn as_ref(&self) -> &HashSet<Action> {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
// TODO:: 使用字符串
|
||||
// 定义Action枚举类型
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Default, Hash)]
|
||||
pub enum Action {
|
||||
#[serde(rename = "s3:AbortMultipartUpload")]
|
||||
AbortMultipartUpload,
|
||||
#[serde(rename = "s3:CreateBucket")]
|
||||
CreateBucket,
|
||||
#[serde(rename = "s3:DeleteBucket")]
|
||||
DeleteBucket,
|
||||
#[serde(rename = "s3:ForceDeleteBucket")]
|
||||
ForceDeleteBucket,
|
||||
#[serde(rename = "s3:DeleteBucketPolicy")]
|
||||
DeleteBucketPolicy,
|
||||
#[serde(rename = "s3:DeleteBucketCors")]
|
||||
DeleteBucketCors,
|
||||
#[serde(rename = "s3:DeleteObject")]
|
||||
DeleteObject,
|
||||
#[serde(rename = "s3:GetBucketLocation")]
|
||||
GetBucketLocation,
|
||||
#[serde(rename = "s3:GetBucketNotification")]
|
||||
GetBucketNotification,
|
||||
#[serde(rename = "s3:GetBucketPolicy")]
|
||||
GetBucketPolicy,
|
||||
#[serde(rename = "s3:GetBucketCors")]
|
||||
GetBucketCors,
|
||||
#[serde(rename = "s3:GetObject")]
|
||||
GetObject,
|
||||
#[serde(rename = "s3:GetObjectAttributes")]
|
||||
GetObjectAttributes,
|
||||
#[serde(rename = "s3:HeadBucket")]
|
||||
HeadBucket,
|
||||
#[serde(rename = "s3:ListAllMyBuckets")]
|
||||
ListAllMyBuckets,
|
||||
#[serde(rename = "s3:ListBucket")]
|
||||
ListBucket,
|
||||
#[serde(rename = "s3:GetBucketPolicyStatus")]
|
||||
GetBucketPolicyStatus,
|
||||
#[serde(rename = "s3:ListBucketVersions")]
|
||||
ListBucketVersions,
|
||||
#[serde(rename = "s3:ListBucketMultipartUploads")]
|
||||
ListBucketMultipartUploads,
|
||||
#[serde(rename = "s3:ListenNotification")]
|
||||
ListenNotification,
|
||||
#[serde(rename = "s3:ListenBucketNotification")]
|
||||
ListenBucketNotification,
|
||||
#[serde(rename = "s3:ListMultipartUploadParts")]
|
||||
ListMultipartUploadParts,
|
||||
#[serde(rename = "s3:PutLifecycleConfiguration")]
|
||||
PutLifecycleConfiguration,
|
||||
#[serde(rename = "s3:GetLifecycleConfiguration")]
|
||||
GetLifecycleConfiguration,
|
||||
#[serde(rename = "s3:PutBucketNotification")]
|
||||
PutBucketNotification,
|
||||
#[serde(rename = "s3:PutBucketPolicy")]
|
||||
PutBucketPolicy,
|
||||
#[serde(rename = "s3:PutBucketCors")]
|
||||
PutBucketCors,
|
||||
#[serde(rename = "s3:PutObject")]
|
||||
PutObject,
|
||||
#[serde(rename = "s3:DeleteObjectVersion")]
|
||||
DeleteObjectVersion,
|
||||
#[serde(rename = "s3:DeleteObjectVersionTagging")]
|
||||
DeleteObjectVersionTagging,
|
||||
#[serde(rename = "s3:GetObjectVersion")]
|
||||
GetObjectVersion,
|
||||
#[serde(rename = "s3:GetObjectVersionAttributes")]
|
||||
GetObjectVersionAttributes,
|
||||
#[serde(rename = "s3:GetObjectVersionTagging")]
|
||||
GetObjectVersionTagging,
|
||||
#[serde(rename = "s3:PutObjectVersionTagging")]
|
||||
PutObjectVersionTagging,
|
||||
#[serde(rename = "s3:BypassGovernanceRetention")]
|
||||
BypassGovernanceRetention,
|
||||
#[serde(rename = "s3:PutObjectRetention")]
|
||||
PutObjectRetention,
|
||||
#[serde(rename = "s3:GetObjectRetention")]
|
||||
GetObjectRetention,
|
||||
#[serde(rename = "s3:GetObjectLegalHold")]
|
||||
GetObjectLegalHold,
|
||||
#[serde(rename = "s3:PutObjectLegalHold")]
|
||||
PutObjectLegalHold,
|
||||
#[serde(rename = "s3:GetBucketObjectLockConfiguration")]
|
||||
GetBucketObjectLockConfiguration,
|
||||
#[serde(rename = "s3:PutBucketObjectLockConfiguration")]
|
||||
PutBucketObjectLockConfiguration,
|
||||
#[serde(rename = "s3:GetBucketTagging")]
|
||||
GetBucketTagging,
|
||||
#[serde(rename = "s3:PutBucketTagging")]
|
||||
PutBucketTagging,
|
||||
#[serde(rename = "s3:GetObjectTagging")]
|
||||
GetObjectTagging,
|
||||
#[serde(rename = "s3:PutObjectTagging")]
|
||||
PutObjectTagging,
|
||||
#[serde(rename = "s3:DeleteObjectTagging")]
|
||||
DeleteObjectTagging,
|
||||
#[serde(rename = "s3:PutBucketEncryption")]
|
||||
PutBucketEncryption,
|
||||
#[serde(rename = "s3:GetBucketEncryption")]
|
||||
GetBucketEncryption,
|
||||
#[serde(rename = "s3:PutBucketVersioning")]
|
||||
PutBucketVersioning,
|
||||
#[serde(rename = "s3:GetBucketVersioning")]
|
||||
GetBucketVersioning,
|
||||
#[serde(rename = "s3:PutReplicationConfiguration")]
|
||||
PutReplicationConfiguration,
|
||||
#[serde(rename = "s3:GetReplicationConfiguration")]
|
||||
GetReplicationConfiguration,
|
||||
#[serde(rename = "s3:ReplicateObject")]
|
||||
ReplicateObject,
|
||||
#[serde(rename = "s3:ReplicateDelete")]
|
||||
ReplicateDelete,
|
||||
#[serde(rename = "s3:ReplicateTags")]
|
||||
ReplicateTags,
|
||||
#[serde(rename = "s3:GetObjectVersionForReplication")]
|
||||
GetObjectVersionForReplication,
|
||||
#[serde(rename = "s3:RestoreObject")]
|
||||
RestoreObject,
|
||||
#[serde(rename = "s3:ResetBucketReplicationState")]
|
||||
ResetBucketReplicationState,
|
||||
#[serde(rename = "s3:PutObjectFanOut")]
|
||||
PutObjectFanOut,
|
||||
#[default]
|
||||
#[serde(rename = "s3:*")]
|
||||
AllActions,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
#[derive(Debug)]
|
||||
static ref SUPPORT_OBJCET_ACTIONS: HashSet<Action> = {
|
||||
let mut h = HashSet::new();
|
||||
h.insert(Action::AllActions);
|
||||
h.insert(Action::AbortMultipartUpload);
|
||||
h.insert(Action::DeleteObject);
|
||||
h.insert(Action::GetObject);
|
||||
h.insert(Action::ListMultipartUploadParts);
|
||||
h.insert(Action::PutObject);
|
||||
h.insert(Action::BypassGovernanceRetention);
|
||||
h.insert(Action::PutObjectRetention);
|
||||
h.insert(Action::GetObjectRetention);
|
||||
h.insert(Action::PutObjectLegalHold);
|
||||
h.insert(Action::GetObjectLegalHold);
|
||||
h.insert(Action::GetObjectTagging);
|
||||
h.insert(Action::PutObjectTagging);
|
||||
h.insert(Action::DeleteObjectTagging);
|
||||
h.insert(Action::GetObjectVersion);
|
||||
h.insert(Action::GetObjectVersionTagging);
|
||||
h.insert(Action::DeleteObjectVersion);
|
||||
h.insert(Action::DeleteObjectVersionTagging);
|
||||
h.insert(Action::PutObjectVersionTagging);
|
||||
h.insert(Action::ReplicateObject);
|
||||
h.insert(Action::ReplicateDelete);
|
||||
h.insert(Action::ReplicateTags);
|
||||
h.insert(Action::GetObjectVersionForReplication);
|
||||
h.insert(Action::RestoreObject);
|
||||
h.insert(Action::ResetBucketReplicationState);
|
||||
h.insert(Action::PutObjectFanOut);
|
||||
h.insert(Action::GetObjectAttributes);
|
||||
h.insert(Action::GetObjectVersionAttributes);
|
||||
h
|
||||
};
|
||||
}
|
||||
|
||||
impl Action {
|
||||
pub fn is_object_action(&self) -> bool {
|
||||
for act in SUPPORT_OBJCET_ACTIONS.iter() {
|
||||
if self.is_match(act) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
pub fn is_match(&self, a: &Action) -> bool {
|
||||
utils::wildcard::match_pattern(self.clone().as_str(), a.clone().as_str())
|
||||
}
|
||||
|
||||
fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Action::AbortMultipartUpload => "s3:AbortMultipartUpload",
|
||||
Action::CreateBucket => "s3:CreateBucket",
|
||||
Action::DeleteBucket => "s3:DeleteBucket",
|
||||
Action::ForceDeleteBucket => "s3:ForceDeleteBucket",
|
||||
Action::DeleteBucketPolicy => "s3:DeleteBucketPolicy",
|
||||
Action::DeleteBucketCors => "s3:DeleteBucketCors",
|
||||
Action::DeleteObject => "s3:DeleteObject",
|
||||
Action::GetBucketLocation => "s3:GetBucketLocation",
|
||||
Action::GetBucketNotification => "s3:GetBucketNotification",
|
||||
Action::GetBucketPolicy => "s3:GetBucketPolicy",
|
||||
Action::GetBucketCors => "s3:GetBucketCors",
|
||||
Action::GetObject => "s3:GetObject",
|
||||
Action::GetObjectAttributes => "s3:GetObjectAttributes",
|
||||
Action::HeadBucket => "s3:HeadBucket",
|
||||
Action::ListAllMyBuckets => "s3:ListAllMyBuckets",
|
||||
Action::ListBucket => "s3:ListBucket",
|
||||
Action::GetBucketPolicyStatus => "s3:GetBucketPolicyStatus",
|
||||
Action::ListBucketVersions => "s3:ListBucketVersions",
|
||||
Action::ListBucketMultipartUploads => "s3:ListBucketMultipartUploads",
|
||||
Action::ListenNotification => "s3:ListenNotification",
|
||||
Action::ListenBucketNotification => "s3:ListenBucketNotification",
|
||||
Action::ListMultipartUploadParts => "s3:ListMultipartUploadParts",
|
||||
Action::PutLifecycleConfiguration => "s3:PutLifecycleConfiguration",
|
||||
Action::GetLifecycleConfiguration => "s3:GetLifecycleConfiguration",
|
||||
Action::PutBucketNotification => "s3:PutBucketNotification",
|
||||
Action::PutBucketPolicy => "s3:PutBucketPolicy",
|
||||
Action::PutBucketCors => "s3:PutBucketCors",
|
||||
Action::PutObject => "s3:PutObject",
|
||||
Action::DeleteObjectVersion => "s3:DeleteObjectVersion",
|
||||
Action::DeleteObjectVersionTagging => "s3:DeleteObjectVersionTagging",
|
||||
Action::GetObjectVersion => "s3:GetObjectVersion",
|
||||
Action::GetObjectVersionAttributes => "s3:GetObjectVersionAttributes",
|
||||
Action::GetObjectVersionTagging => "s3:GetObjectVersionTagging",
|
||||
Action::PutObjectVersionTagging => "s3:PutObjectVersionTagging",
|
||||
Action::BypassGovernanceRetention => "s3:BypassGovernanceRetention",
|
||||
Action::PutObjectRetention => "s3:PutObjectRetention",
|
||||
Action::GetObjectRetention => "s3:GetObjectRetention",
|
||||
Action::GetObjectLegalHold => "s3:GetObjectLegalHold",
|
||||
Action::PutObjectLegalHold => "s3:PutObjectLegalHold",
|
||||
Action::GetBucketObjectLockConfiguration => "s3:GetBucketObjectLockConfiguration",
|
||||
Action::PutBucketObjectLockConfiguration => "s3:PutBucketObjectLockConfiguration",
|
||||
Action::GetBucketTagging => "s3:GetBucketTagging",
|
||||
Action::PutBucketTagging => "s3:PutBucketTagging",
|
||||
Action::GetObjectTagging => "s3:GetObjectTagging",
|
||||
Action::PutObjectTagging => "s3:PutObjectTagging",
|
||||
Action::DeleteObjectTagging => "s3:DeleteObjectTagging",
|
||||
Action::PutBucketEncryption => "s3:PutEncryptionConfiguration",
|
||||
Action::GetBucketEncryption => "s3:GetEncryptionConfiguration",
|
||||
Action::PutBucketVersioning => "s3:PutBucketVersioning",
|
||||
Action::GetBucketVersioning => "s3:GetBucketVersioning",
|
||||
Action::PutReplicationConfiguration => "s3:GetReplicationConfiguration",
|
||||
Action::GetReplicationConfiguration => "s3:PutReplicationConfiguration",
|
||||
Action::ReplicateObject => "s3:ReplicateObject",
|
||||
Action::ReplicateDelete => "s3:ReplicateDelete",
|
||||
Action::ReplicateTags => "s3:ReplicateTags",
|
||||
Action::GetObjectVersionForReplication => "s3:GetObjectVersionForReplication",
|
||||
Action::RestoreObject => "s3:RestoreObject",
|
||||
Action::ResetBucketReplicationState => "s3:ResetBucketReplicationState",
|
||||
Action::PutObjectFanOut => "s3:PutObjectFanOut",
|
||||
Action::AllActions => "s3:*",
|
||||
}
|
||||
}
|
||||
|
||||
// pub fn from_str(s: &str) -> Option<Self> {
|
||||
// match s {
|
||||
// "s3:AbortMultipartUpload" => Some(Action::AbortMultipartUpload),
|
||||
// "s3:CreateBucket" => Some(Action::CreateBucket),
|
||||
// "s3:DeleteBucket" => Some(Action::DeleteBucket),
|
||||
// "s3:ForceDeleteBucket" => Some(Action::ForceDeleteBucket),
|
||||
// "s3:DeleteBucketPolicy" => Some(Action::DeleteBucketPolicy),
|
||||
// "s3:DeleteBucketCors" => Some(Action::DeleteBucketCors),
|
||||
// "s3:DeleteObject" => Some(Action::DeleteObject),
|
||||
// "s3:GetBucketLocation" => Some(Action::GetBucketLocation),
|
||||
// "s3:GetBucketNotification" => Some(Action::GetBucketNotification),
|
||||
// "s3:GetBucketPolicy" => Some(Action::GetBucketPolicy),
|
||||
// "s3:GetBucketCors" => Some(Action::GetBucketCors),
|
||||
// "s3:GetObject" => Some(Action::GetObject),
|
||||
// "s3:GetObjectAttributes" => Some(Action::GetObjectAttributes),
|
||||
// "s3:HeadBucket" => Some(Action::HeadBucket),
|
||||
// "s3:ListAllMyBuckets" => Some(Action::ListAllMyBuckets),
|
||||
// "s3:ListBucket" => Some(Action::ListBucket),
|
||||
// "s3:GetBucketPolicyStatus" => Some(Action::GetBucketPolicyStatus),
|
||||
// "s3:ListBucketVersions" => Some(Action::ListBucketVersions),
|
||||
// "s3:ListBucketMultipartUploads" => Some(Action::ListBucketMultipartUploads),
|
||||
// "s3:ListenNotification" => Some(Action::ListenNotification),
|
||||
// "s3:ListenBucketNotification" => Some(Action::ListenBucketNotification),
|
||||
// "s3:ListMultipartUploadParts" => Some(Action::ListMultipartUploadParts),
|
||||
// "s3:PutLifecycleConfiguration" => Some(Action::PutLifecycleConfiguration),
|
||||
// "s3:GetLifecycleConfiguration" => Some(Action::GetLifecycleConfiguration),
|
||||
// "s3:PutBucketNotification" => Some(Action::PutBucketNotification),
|
||||
// "s3:PutBucketPolicy" => Some(Action::PutBucketPolicy),
|
||||
// "s3:PutBucketCors" => Some(Action::PutBucketCors),
|
||||
// "s3:PutObject" => Some(Action::PutObject),
|
||||
// "s3:DeleteObjectVersion" => Some(Action::DeleteObjectVersion),
|
||||
// "s3:DeleteObjectVersionTagging" => Some(Action::DeleteObjectVersionTagging),
|
||||
// "s3:GetObjectVersion" => Some(Action::GetObjectVersion),
|
||||
// "s3:GetObjectVersionAttributes" => Some(Action::GetObjectVersionAttributes),
|
||||
// "s3:GetObjectVersionTagging" => Some(Action::GetObjectVersionTagging),
|
||||
// "s3:PutObjectVersionTagging" => Some(Action::PutObjectVersionTagging),
|
||||
// "s3:BypassGovernanceRetention" => Some(Action::BypassGovernanceRetention),
|
||||
// "s3:PutObjectRetention" => Some(Action::PutObjectRetention),
|
||||
// "s3:GetObjectRetention" => Some(Action::GetObjectRetention),
|
||||
// "s3:GetObjectLegalHold" => Some(Action::GetObjectLegalHold),
|
||||
// "s3:PutObjectLegalHold" => Some(Action::PutObjectLegalHold),
|
||||
// "s3:GetBucketObjectLockConfiguration" => Some(Action::GetBucketObjectLockConfiguration),
|
||||
// "s3:PutBucketObjectLockConfiguration" => Some(Action::PutBucketObjectLockConfiguration),
|
||||
// "s3:GetBucketTagging" => Some(Action::GetBucketTagging),
|
||||
// "s3:PutBucketTagging" => Some(Action::PutBucketTagging),
|
||||
// "s3:GetObjectTagging" => Some(Action::GetObjectTagging),
|
||||
// "s3:PutObjectTagging" => Some(Action::PutObjectTagging),
|
||||
// "s3:DeleteObjectTagging" => Some(Action::DeleteObjectTagging),
|
||||
// "s3:PutEncryptionConfiguration" => Some(Action::PutBucketEncryption),
|
||||
// "s3:GetEncryptionConfiguration" => Some(Action::GetBucketEncryption),
|
||||
// "s3:PutBucketVersioning" => Some(Action::PutBucketVersioning),
|
||||
// "s3:GetBucketVersioning" => Some(Action::GetBucketVersioning),
|
||||
// "s3:PutReplicationConfiguration" => Some(Action::PutReplicationConfiguration),
|
||||
// "s3:GetReplicationConfiguration" => Some(Action::GetReplicationConfiguration),
|
||||
// "s3:ReplicateObject" => Some(Action::ReplicateObject),
|
||||
// "s3:ReplicateDelete" => Some(Action::ReplicateDelete),
|
||||
// "s3:ReplicateTags" => Some(Action::ReplicateTags),
|
||||
// "s3:GetObjectVersionForReplication" => Some(Action::GetObjectVersionForReplication),
|
||||
// "s3:RestoreObject" => Some(Action::RestoreObject),
|
||||
// "s3:ResetBucketReplicationState" => Some(Action::ResetBucketReplicationState),
|
||||
// "s3:PutObjectFanOut" => Some(Action::PutObjectFanOut),
|
||||
// "s3:*" => Some(Action::AllActions),
|
||||
// _ => None,
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
impl FromStr for Action {
|
||||
type Err = ();
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"s3:AbortMultipartUpload" => Ok(Action::AbortMultipartUpload),
|
||||
"s3:CreateBucket" => Ok(Action::CreateBucket),
|
||||
"s3:DeleteBucket" => Ok(Action::DeleteBucket),
|
||||
"s3:ForceDeleteBucket" => Ok(Action::ForceDeleteBucket),
|
||||
"s3:DeleteBucketPolicy" => Ok(Action::DeleteBucketPolicy),
|
||||
"s3:DeleteBucketCors" => Ok(Action::DeleteBucketCors),
|
||||
"s3:DeleteObject" => Ok(Action::DeleteObject),
|
||||
"s3:GetBucketLocation" => Ok(Action::GetBucketLocation),
|
||||
"s3:GetBucketNotification" => Ok(Action::GetBucketNotification),
|
||||
"s3:GetBucketPolicy" => Ok(Action::GetBucketPolicy),
|
||||
"s3:GetBucketCors" => Ok(Action::GetBucketCors),
|
||||
"s3:GetObject" => Ok(Action::GetObject),
|
||||
"s3:GetObjectAttributes" => Ok(Action::GetObjectAttributes),
|
||||
"s3:HeadBucket" => Ok(Action::HeadBucket),
|
||||
"s3:ListAllMyBuckets" => Ok(Action::ListAllMyBuckets),
|
||||
"s3:ListBucket" => Ok(Action::ListBucket),
|
||||
"s3:GetBucketPolicyStatus" => Ok(Action::GetBucketPolicyStatus),
|
||||
"s3:ListBucketVersions" => Ok(Action::ListBucketVersions),
|
||||
"s3:ListBucketMultipartUploads" => Ok(Action::ListBucketMultipartUploads),
|
||||
"s3:ListenNotification" => Ok(Action::ListenNotification),
|
||||
"s3:ListenBucketNotification" => Ok(Action::ListenBucketNotification),
|
||||
"s3:ListMultipartUploadParts" => Ok(Action::ListMultipartUploadParts),
|
||||
"s3:PutLifecycleConfiguration" => Ok(Action::PutLifecycleConfiguration),
|
||||
"s3:GetLifecycleConfiguration" => Ok(Action::GetLifecycleConfiguration),
|
||||
"s3:PutBucketNotification" => Ok(Action::PutBucketNotification),
|
||||
"s3:PutBucketPolicy" => Ok(Action::PutBucketPolicy),
|
||||
"s3:PutBucketCors" => Ok(Action::PutBucketCors),
|
||||
"s3:PutObject" => Ok(Action::PutObject),
|
||||
"s3:DeleteObjectVersion" => Ok(Action::DeleteObjectVersion),
|
||||
"s3:DeleteObjectVersionTagging" => Ok(Action::DeleteObjectVersionTagging),
|
||||
"s3:GetObjectVersion" => Ok(Action::GetObjectVersion),
|
||||
"s3:GetObjectVersionAttributes" => Ok(Action::GetObjectVersionAttributes),
|
||||
"s3:GetObjectVersionTagging" => Ok(Action::GetObjectVersionTagging),
|
||||
"s3:PutObjectVersionTagging" => Ok(Action::PutObjectVersionTagging),
|
||||
"s3:BypassGovernanceRetention" => Ok(Action::BypassGovernanceRetention),
|
||||
"s3:PutObjectRetention" => Ok(Action::PutObjectRetention),
|
||||
"s3:GetObjectRetention" => Ok(Action::GetObjectRetention),
|
||||
"s3:GetObjectLegalHold" => Ok(Action::GetObjectLegalHold),
|
||||
"s3:PutObjectLegalHold" => Ok(Action::PutObjectLegalHold),
|
||||
"s3:GetBucketObjectLockConfiguration" => Ok(Action::GetBucketObjectLockConfiguration),
|
||||
"s3:PutBucketObjectLockConfiguration" => Ok(Action::PutBucketObjectLockConfiguration),
|
||||
"s3:GetBucketTagging" => Ok(Action::GetBucketTagging),
|
||||
"s3:PutBucketTagging" => Ok(Action::PutBucketTagging),
|
||||
"s3:GetObjectTagging" => Ok(Action::GetObjectTagging),
|
||||
"s3:PutObjectTagging" => Ok(Action::PutObjectTagging),
|
||||
"s3:DeleteObjectTagging" => Ok(Action::DeleteObjectTagging),
|
||||
"s3:PutEncryptionConfiguration" => Ok(Action::PutBucketEncryption),
|
||||
"s3:GetEncryptionConfiguration" => Ok(Action::GetBucketEncryption),
|
||||
"s3:PutBucketVersioning" => Ok(Action::PutBucketVersioning),
|
||||
"s3:GetBucketVersioning" => Ok(Action::GetBucketVersioning),
|
||||
"s3:PutReplicationConfiguration" => Ok(Action::PutReplicationConfiguration),
|
||||
"s3:GetReplicationConfiguration" => Ok(Action::GetReplicationConfiguration),
|
||||
"s3:ReplicateObject" => Ok(Action::ReplicateObject),
|
||||
"s3:ReplicateDelete" => Ok(Action::ReplicateDelete),
|
||||
"s3:ReplicateTags" => Ok(Action::ReplicateTags),
|
||||
"s3:GetObjectVersionForReplication" => Ok(Action::GetObjectVersionForReplication),
|
||||
"s3:RestoreObject" => Ok(Action::RestoreObject),
|
||||
"s3:ResetBucketReplicationState" => Ok(Action::ResetBucketReplicationState),
|
||||
"s3:PutObjectFanOut" => Ok(Action::PutObjectFanOut),
|
||||
"s3:*" => Ok(Action::AllActions),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ActionConditionKeyMap(HashMap<Action, KeySet>);
|
||||
|
||||
impl ActionConditionKeyMap {
|
||||
pub fn lookup(&self, action: &Action) -> KeySet {
|
||||
let common_keys: Vec<Key> = COMMOM_KEYS.iter().map(|v| v.to_key()).collect();
|
||||
|
||||
let mut merged_keys = KeySet::from_keys(&common_keys);
|
||||
|
||||
for (act, key) in self.0.iter() {
|
||||
if action.is_match(act) {
|
||||
merged_keys.merge(key);
|
||||
}
|
||||
}
|
||||
|
||||
merged_keys
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref IAMActionConditionKeyMap: ActionConditionKeyMap = create_action_condition_key_map();
|
||||
}
|
||||
|
||||
fn create_action_condition_key_map() -> ActionConditionKeyMap {
|
||||
let common_keys: Vec<Key> = COMMOM_KEYS.iter().map(|v| v.to_key()).collect();
|
||||
let all_support_keys: Vec<Key> = ALL_SUPPORT_KEYS.iter().map(|v| v.to_key()).collect();
|
||||
|
||||
let mut map = HashMap::new();
|
||||
|
||||
map.insert(Action::AllActions, KeySet::from_keys(&all_support_keys));
|
||||
map.insert(Action::AbortMultipartUpload, KeySet::from_keys(&common_keys));
|
||||
map.insert(Action::CreateBucket, KeySet::from_keys(&common_keys));
|
||||
|
||||
let mut delete_obj_keys = common_keys.clone();
|
||||
delete_obj_keys.push(KeyName::S3VersionID.to_key());
|
||||
map.insert(Action::DeleteObject, KeySet::from_keys(&delete_obj_keys));
|
||||
|
||||
map.insert(Action::GetBucketLocation, KeySet::from_keys(&common_keys));
|
||||
map.insert(Action::GetBucketPolicyStatus, KeySet::from_keys(&common_keys));
|
||||
|
||||
let mut get_obj_keys = common_keys.clone();
|
||||
get_obj_keys.extend(vec![
|
||||
KeyName::S3XAmzServerSideEncryption.to_key(),
|
||||
KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm.to_key(),
|
||||
KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID.to_key(),
|
||||
KeyName::S3VersionID.to_key(),
|
||||
KeyName::ExistingObjectTag.to_key(),
|
||||
]);
|
||||
map.insert(Action::DeleteObject, KeySet::from_keys(&get_obj_keys));
|
||||
|
||||
map.insert(Action::HeadBucket, KeySet::from_keys(&common_keys));
|
||||
|
||||
let mut get_obj_attr_keys = common_keys.clone();
|
||||
get_obj_attr_keys.push(KeyName::ExistingObjectTag.to_key());
|
||||
map.insert(Action::DeleteObject, KeySet::from_keys(&get_obj_attr_keys));
|
||||
|
||||
let mut get_obj_ver_attr_keys = common_keys.clone();
|
||||
get_obj_ver_attr_keys.extend(vec![KeyName::S3VersionID.to_key(), KeyName::ExistingObjectTag.to_key()]);
|
||||
map.insert(Action::DeleteObject, KeySet::from_keys(&get_obj_ver_attr_keys));
|
||||
|
||||
map.insert(Action::ListAllMyBuckets, KeySet::from_keys(&common_keys));
|
||||
|
||||
let mut list_bucket_keys = common_keys.clone();
|
||||
list_bucket_keys.extend(vec![
|
||||
KeyName::S3Prefix.to_key(),
|
||||
KeyName::S3Delimiter.to_key(),
|
||||
KeyName::S3MaxKeys.to_key(),
|
||||
]);
|
||||
map.insert(Action::ListBucket, KeySet::from_keys(&list_bucket_keys));
|
||||
map.insert(Action::ListBucketVersions, KeySet::from_keys(&list_bucket_keys));
|
||||
|
||||
map.insert(Action::ListBucketMultipartUploads, KeySet::from_keys(&common_keys));
|
||||
map.insert(Action::ListenNotification, KeySet::from_keys(&common_keys));
|
||||
map.insert(Action::ListenBucketNotification, KeySet::from_keys(&common_keys));
|
||||
map.insert(Action::ListMultipartUploadParts, KeySet::from_keys(&common_keys));
|
||||
|
||||
let mut put_obj_keys = common_keys.clone();
|
||||
put_obj_keys.extend(vec![
|
||||
KeyName::S3XAmzCopySource.to_key(),
|
||||
KeyName::S3XAmzServerSideEncryption.to_key(),
|
||||
KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm.to_key(),
|
||||
KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID.to_key(),
|
||||
KeyName::S3XAmzMetadataDirective.to_key(),
|
||||
KeyName::S3XAmzStorageClass.to_key(),
|
||||
KeyName::S3VersionID.to_key(),
|
||||
KeyName::S3ObjectLockRetainUntilDate.to_key(),
|
||||
KeyName::S3ObjectLockMode.to_key(),
|
||||
KeyName::S3ObjectLockLegalHold.to_key(),
|
||||
KeyName::RequestObjectTagKeys.to_key(),
|
||||
KeyName::RequestObjectTag.to_key(),
|
||||
]);
|
||||
map.insert(Action::PutObject, KeySet::from_keys(&put_obj_keys));
|
||||
|
||||
let mut put_obj_retention_keys = common_keys.clone();
|
||||
put_obj_retention_keys.extend(vec![
|
||||
KeyName::S3XAmzServerSideEncryption.to_key(),
|
||||
KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm.to_key(),
|
||||
KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID.to_key(),
|
||||
KeyName::S3ObjectLockRemainingRetentionDays.to_key(),
|
||||
KeyName::S3ObjectLockRetainUntilDate.to_key(),
|
||||
KeyName::S3ObjectLockMode.to_key(),
|
||||
KeyName::S3VersionID.to_key(),
|
||||
]);
|
||||
map.insert(Action::PutObjectRetention, KeySet::from_keys(&put_obj_retention_keys));
|
||||
|
||||
let mut get_obj_retention_keys = common_keys.clone();
|
||||
get_obj_retention_keys.extend(vec![
|
||||
KeyName::S3XAmzServerSideEncryption.to_key(),
|
||||
KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm.to_key(),
|
||||
KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID.to_key(),
|
||||
KeyName::S3VersionID.to_key(),
|
||||
]);
|
||||
map.insert(Action::GetObjectRetention, KeySet::from_keys(&get_obj_retention_keys));
|
||||
|
||||
let mut put_obj_hold_keys = common_keys.clone();
|
||||
put_obj_hold_keys.extend(vec![
|
||||
KeyName::S3XAmzServerSideEncryption.to_key(),
|
||||
KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm.to_key(),
|
||||
KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID.to_key(),
|
||||
KeyName::S3ObjectLockLegalHold.to_key(),
|
||||
KeyName::S3VersionID.to_key(),
|
||||
]);
|
||||
map.insert(Action::PutObjectLegalHold, KeySet::from_keys(&put_obj_hold_keys));
|
||||
|
||||
map.insert(Action::GetObjectLegalHold, KeySet::from_keys(&common_keys));
|
||||
|
||||
let mut bypass_governance_retention_keys = common_keys.clone();
|
||||
bypass_governance_retention_keys.extend(vec![
|
||||
KeyName::S3VersionID.to_key(),
|
||||
KeyName::S3ObjectLockRemainingRetentionDays.to_key(),
|
||||
KeyName::S3ObjectLockRetainUntilDate.to_key(),
|
||||
KeyName::S3ObjectLockMode.to_key(),
|
||||
KeyName::S3ObjectLockLegalHold.to_key(),
|
||||
KeyName::RequestObjectTagKeys.to_key(),
|
||||
KeyName::RequestObjectTag.to_key(),
|
||||
]);
|
||||
map.insert(Action::BypassGovernanceRetention, KeySet::from_keys(&bypass_governance_retention_keys));
|
||||
|
||||
map.insert(Action::GetBucketObjectLockConfiguration, KeySet::from_keys(&common_keys));
|
||||
map.insert(Action::PutBucketObjectLockConfiguration, KeySet::from_keys(&common_keys));
|
||||
map.insert(Action::GetBucketTagging, KeySet::from_keys(&common_keys));
|
||||
|
||||
let mut put_bucket_tagging_keys = common_keys.clone();
|
||||
put_bucket_tagging_keys.extend(vec![KeyName::RequestObjectTagKeys.to_key(), KeyName::RequestObjectTag.to_key()]);
|
||||
map.insert(Action::PutBucketTagging, KeySet::from_keys(&put_bucket_tagging_keys));
|
||||
|
||||
let mut put_object_tagging_keys = common_keys.clone();
|
||||
put_object_tagging_keys.extend(vec![
|
||||
KeyName::S3VersionID.to_key(),
|
||||
KeyName::ExistingObjectTag.to_key(),
|
||||
KeyName::RequestObjectTagKeys.to_key(),
|
||||
KeyName::RequestObjectTag.to_key(),
|
||||
]);
|
||||
map.insert(Action::PutObjectTagging, KeySet::from_keys(&put_object_tagging_keys));
|
||||
|
||||
let mut get_object_tagging_keys = common_keys.clone();
|
||||
get_object_tagging_keys.extend(vec![KeyName::S3VersionID.to_key(), KeyName::ExistingObjectTag.to_key()]);
|
||||
map.insert(Action::GetObjectTagging, KeySet::from_keys(&get_object_tagging_keys));
|
||||
map.insert(Action::DeleteObjectTagging, KeySet::from_keys(&get_object_tagging_keys));
|
||||
|
||||
map.insert(Action::PutObjectVersionTagging, KeySet::from_keys(&put_object_tagging_keys));
|
||||
map.insert(Action::GetObjectVersionTagging, KeySet::from_keys(&get_object_tagging_keys));
|
||||
map.insert(Action::GetObjectVersion, KeySet::from_keys(&get_object_tagging_keys));
|
||||
|
||||
let mut delete_object_version_keys = common_keys.clone();
|
||||
delete_object_version_keys.extend(vec![KeyName::S3VersionID.to_key()]);
|
||||
|
||||
map.insert(Action::DeleteObjectVersion, KeySet::from_keys(&delete_object_version_keys));
|
||||
map.insert(Action::DeleteObjectVersionTagging, KeySet::from_keys(&get_object_tagging_keys));
|
||||
|
||||
map.insert(Action::GetReplicationConfiguration, KeySet::from_keys(&common_keys));
|
||||
map.insert(Action::PutReplicationConfiguration, KeySet::from_keys(&common_keys));
|
||||
|
||||
map.insert(Action::ReplicateObject, KeySet::from_keys(&get_object_tagging_keys));
|
||||
map.insert(Action::ReplicateDelete, KeySet::from_keys(&get_object_tagging_keys));
|
||||
map.insert(Action::ReplicateTags, KeySet::from_keys(&get_object_tagging_keys));
|
||||
map.insert(Action::GetObjectVersionForReplication, KeySet::from_keys(&get_object_tagging_keys));
|
||||
|
||||
map.insert(Action::RestoreObject, KeySet::from_keys(&common_keys));
|
||||
map.insert(Action::ResetBucketReplicationState, KeySet::from_keys(&common_keys));
|
||||
map.insert(Action::PutObjectFanOut, KeySet::from_keys(&common_keys));
|
||||
|
||||
ActionConditionKeyMap(map)
|
||||
}
|
||||
@@ -1,259 +0,0 @@
|
||||
use crate::error::{Error, Result};
|
||||
// use rmp_serde::Serializer as rmpSerializer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use super::{
|
||||
action::{Action, ActionSet, IAMActionConditionKeyMap},
|
||||
condition::function::Functions,
|
||||
effect::Effect,
|
||||
principal::Principal,
|
||||
resource::ResourceSet,
|
||||
};
|
||||
|
||||
const DEFAULT_VERSION: &str = "2012-10-17";
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub struct BucketPolicyArgs {
|
||||
pub account_name: String,
|
||||
pub groups: Vec<String>,
|
||||
pub action: Action,
|
||||
pub bucket_name: String,
|
||||
pub condition_values: HashMap<String, Vec<String>>,
|
||||
pub is_owner: bool,
|
||||
pub object_name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone, PartialEq, Eq)]
|
||||
#[serde(rename_all = "PascalCase", default)]
|
||||
pub struct BPStatement {
|
||||
#[serde(rename = "Sid")]
|
||||
pub sid: String,
|
||||
#[serde(rename = "Effect")]
|
||||
pub effect: Effect,
|
||||
#[serde(rename = "Principal")]
|
||||
pub principal: Principal,
|
||||
#[serde(rename = "Action")]
|
||||
pub actions: ActionSet,
|
||||
#[serde(rename = "NotAction", skip_serializing_if = "ActionSet::is_empty")]
|
||||
pub not_actions: ActionSet,
|
||||
#[serde(rename = "Resource", skip_serializing_if = "ResourceSet::is_empty")]
|
||||
pub resources: ResourceSet,
|
||||
#[serde(rename = "Condition", skip_serializing_if = "Functions::is_empty")]
|
||||
pub conditions: Functions,
|
||||
}
|
||||
|
||||
impl BPStatement {
|
||||
// pub fn equals(&self, other: &BPStatement) -> bool {
|
||||
// if self.effect != other.effect {
|
||||
// return false;
|
||||
// }
|
||||
|
||||
// if !self.principal.equals(other.principal) {
|
||||
// return false;
|
||||
// }
|
||||
|
||||
// if !self.actions.equals(other.actions) {
|
||||
// return false;
|
||||
// }
|
||||
// if !self.not_actions.equals(other.not_actions) {
|
||||
// return false;
|
||||
// }
|
||||
// if !self.resources.equals(other.resources) {
|
||||
// return false;
|
||||
// }
|
||||
// if !self.conditions.equals(other.conditions) {
|
||||
// return false;
|
||||
// }
|
||||
|
||||
// true
|
||||
// }
|
||||
pub fn validate(&self, bucket: &str) -> Result<()> {
|
||||
self.is_valid()?;
|
||||
self.resources.validate_bucket(bucket)
|
||||
}
|
||||
pub fn is_valid(&self) -> Result<()> {
|
||||
if !self.effect.is_valid() {
|
||||
return Err(Error::msg(format!("invalid Effect {:?}", self.effect)));
|
||||
}
|
||||
|
||||
if !self.principal.is_valid() {
|
||||
return Err(Error::msg(format!("invalid Principal {:?}", self.principal)));
|
||||
}
|
||||
|
||||
if self.actions.is_empty() && self.not_actions.is_empty() {
|
||||
return Err(Error::msg("Action must not be empty"));
|
||||
}
|
||||
|
||||
if self.resources.as_ref().is_empty() {
|
||||
return Err(Error::msg("Resource must not be empty"));
|
||||
}
|
||||
|
||||
for act in self.actions.as_ref() {
|
||||
if act.is_object_action() {
|
||||
if !self.resources.object_resource_exists() {
|
||||
return Err(Error::msg(format!(
|
||||
"unsupported object Resource found {:?} for action {:?}",
|
||||
self.resources, act
|
||||
)));
|
||||
}
|
||||
} else if !self.resources.bucket_resource_exists() {
|
||||
return Err(Error::msg(format!(
|
||||
"unsupported bucket Resource found {:?} for action {:?}",
|
||||
self.resources, act
|
||||
)));
|
||||
}
|
||||
|
||||
let key_diff = self.conditions.keys().difference(&IAMActionConditionKeyMap.lookup(act));
|
||||
if !key_diff.is_empty() {
|
||||
return Err(Error::msg(format!(
|
||||
"unsupported condition keys '{:?}' used for action '{:?}'",
|
||||
key_diff, act
|
||||
)));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
fn is_allowed(&self, args: &BucketPolicyArgs) -> bool {
|
||||
let check = || -> bool {
|
||||
if !self.principal.is_match(&args.account_name) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!self.actions.is_match(&args.action) && !self.actions.is_empty()) || self.not_actions.is_match(&args.action) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut resource = args.bucket_name.clone();
|
||||
if !args.object_name.is_empty() {
|
||||
if !args.object_name.starts_with('/') {
|
||||
resource.push('/');
|
||||
}
|
||||
|
||||
resource.push_str(&args.object_name);
|
||||
}
|
||||
|
||||
if !self.resources.is_match(&resource, &args.condition_values) {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.conditions.evaluate(&args.condition_values)
|
||||
};
|
||||
|
||||
self.effect.is_allowed(check())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
// #[serde(rename_all = "PascalCase", default)]
|
||||
pub struct BucketPolicy {
|
||||
#[serde(rename = "ID", default)]
|
||||
pub id: String,
|
||||
#[serde(rename = "Version")]
|
||||
pub version: String,
|
||||
#[serde(rename = "Statement")]
|
||||
pub statements: Vec<BPStatement>,
|
||||
}
|
||||
|
||||
impl BucketPolicy {
|
||||
pub fn is_allowed(&self, args: &BucketPolicyArgs) -> bool {
|
||||
for statement in self.statements.iter() {
|
||||
if statement.effect == Effect::Deny && !statement.is_allowed(args) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if args.is_owner {
|
||||
return true;
|
||||
}
|
||||
|
||||
for statement in self.statements.iter() {
|
||||
if statement.effect == Effect::Allow && statement.is_allowed(args) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn validate(&self, bucket: &str) -> Result<()> {
|
||||
self.is_valid()?;
|
||||
for statement in self.statements.iter() {
|
||||
statement.validate(bucket)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> Result<()> {
|
||||
if self.version.as_str() != DEFAULT_VERSION && self.version.is_empty() {
|
||||
return Err(Error::msg(format!("invalid version {}", self.version)));
|
||||
}
|
||||
|
||||
for statement in self.statements.iter() {
|
||||
statement.is_valid()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.statements.is_empty()
|
||||
}
|
||||
|
||||
pub fn marshal_msg(&self) -> Result<String> {
|
||||
let buf = serde_json::to_string(self)?;
|
||||
|
||||
Ok(buf)
|
||||
|
||||
// let mut buf = Vec::new();
|
||||
// self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?;
|
||||
|
||||
// Ok(buf)
|
||||
}
|
||||
|
||||
pub fn unmarshal(buf: &[u8]) -> Result<Self> {
|
||||
let mut p = serde_json::from_slice::<BucketPolicy>(buf)?;
|
||||
p.drop_duplicate_statements();
|
||||
Ok(p)
|
||||
|
||||
// let t: BucketPolicy = rmp_serde::from_slice(buf)?;
|
||||
// Ok(t)
|
||||
}
|
||||
|
||||
fn drop_duplicate_statements(&mut self) {
|
||||
let mut dups = HashMap::new();
|
||||
|
||||
for v in self.statements.iter() {
|
||||
if let Ok(data) = serde_json::to_string(self) {
|
||||
dups.insert(data, v);
|
||||
}
|
||||
}
|
||||
|
||||
let mut news = Vec::new();
|
||||
|
||||
for (_, v) in dups {
|
||||
news.push(v.clone());
|
||||
}
|
||||
|
||||
self.statements = news;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_bucket_policy() {
|
||||
let json = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Action\":[\"s3:GetBucketLocation\",\"s3:ListBucket\",\"s3:ListBucketMultipartUploads\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"*\"]},\"Resource\":[\"arn:aws:s3:::dada\"],\"Sid\":\"\"},{\"Action\":[\"s3:AbortMultipartUpload\",\"s3:DeleteObject\",\"s3:GetObject\",\"s3:ListMultipartUploadParts\",\"s3:PutObject\"],\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"*\"]},\"Resource\":[\"arn:aws:s3:::dada/*\"],\"Sid\":\"sdf\"}]}";
|
||||
|
||||
let a = BucketPolicy::unmarshal(json.to_string().as_bytes()).unwrap();
|
||||
|
||||
println!("{:?}", a);
|
||||
|
||||
let j = a.marshal_msg();
|
||||
|
||||
println!("{:?}", j);
|
||||
|
||||
println!("{:?}", json);
|
||||
}
|
||||
}
|
||||
@@ -1,304 +0,0 @@
|
||||
use super::{
|
||||
key::{Key, KeySet},
|
||||
keyname::KeyName,
|
||||
name::Name,
|
||||
};
|
||||
use serde::{
|
||||
de::{MapAccess, Visitor},
|
||||
ser::SerializeMap,
|
||||
Deserialize, Serialize,
|
||||
};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
fmt::{self, Debug, Display},
|
||||
marker::PhantomData,
|
||||
};
|
||||
|
||||
// 定义ValueSet类型
|
||||
pub type ValueSet = HashSet<String>;
|
||||
|
||||
// 定义Function trait
|
||||
pub trait FunctionApi: 'static + Send + Sync {
|
||||
// evaluate方法
|
||||
fn evaluate(&self, values: &HashMap<String, Vec<String>>) -> bool;
|
||||
|
||||
// key方法
|
||||
fn key(&self) -> Key;
|
||||
|
||||
// name方法
|
||||
fn name(&self) -> Name;
|
||||
|
||||
// String方法
|
||||
fn to_string(&self) -> String;
|
||||
|
||||
// to_map方法
|
||||
fn to_map(&self) -> HashMap<Key, ValueSet>;
|
||||
|
||||
fn clone_box(&self) -> Box<dyn FunctionApi>;
|
||||
}
|
||||
|
||||
// #[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
// pub enum Function {
|
||||
// Test(TestFunction),
|
||||
// }
|
||||
|
||||
// impl FunctionApi for Function {
|
||||
// // evaluate方法
|
||||
// fn evaluate(&self, values: &HashMap<String, Vec<String>>) -> bool {
|
||||
// match self {
|
||||
// Function::Test(f) => f.evaluate(values),
|
||||
// }
|
||||
// }
|
||||
|
||||
// // key方法
|
||||
// fn key(&self) -> Key {
|
||||
// match self {
|
||||
// Function::Test(f) => f.key(),
|
||||
// }
|
||||
// }
|
||||
|
||||
// // name方法
|
||||
// fn name(&self) -> Name {
|
||||
// match self {
|
||||
// Function::Test(f) => f.name(),
|
||||
// }
|
||||
// }
|
||||
|
||||
// // String方法
|
||||
// fn to_string(&self) -> String {
|
||||
// match self {
|
||||
// Function::Test(f) => f.to_string(),
|
||||
// }
|
||||
// }
|
||||
|
||||
// // to_map方法
|
||||
// fn to_map(&self) -> HashMap<Key, ValueSet> {
|
||||
// match self {
|
||||
// Function::Test(f) => f.to_map(),
|
||||
// }
|
||||
// }
|
||||
|
||||
// fn clone_box(&self) -> Box<dyn FunctionApi> {
|
||||
// match self {
|
||||
// Function::Test(f) => f.clone_box(),
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// 定义Functions类型
|
||||
#[derive(Default)]
|
||||
pub struct Functions(Vec<Box<dyn FunctionApi>>);
|
||||
|
||||
impl Functions {
|
||||
pub fn evaluate(&self, values: &HashMap<String, Vec<String>>) -> bool {
|
||||
for f in self.0.iter() {
|
||||
if f.evaluate(values) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
pub fn keys(&self) -> KeySet {
|
||||
let mut set = KeySet::new();
|
||||
for f in self.0.iter() {
|
||||
set.add(f.key())
|
||||
}
|
||||
set
|
||||
}
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for Functions {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let funs: Vec<String> = self.0.iter().map(|v| v.to_string()).collect();
|
||||
f.debug_list().entries(funs.iter()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Functions {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let funs: Vec<String> = self.0.iter().map(|v| v.to_string()).collect();
|
||||
write!(f, "{:?}", funs)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Functions {
|
||||
fn clone(&self) -> Self {
|
||||
let mut list = Vec::new();
|
||||
for v in self.0.iter() {
|
||||
list.push(v.clone_box())
|
||||
}
|
||||
|
||||
Functions(list)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Functions {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
if self.0.len() != other.0.len() {
|
||||
return false;
|
||||
}
|
||||
|
||||
for v in self.0.iter() {
|
||||
let s = v.to_string();
|
||||
let mut found = false;
|
||||
for o in other.0.iter() {
|
||||
if s == o.to_string() {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Functions {}
|
||||
|
||||
type FunctionsMap = HashMap<String, HashMap<String, ValueSet>>;
|
||||
|
||||
impl Serialize for Functions {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let mut nm: FunctionsMap = HashMap::new();
|
||||
for f in self.0.iter() {
|
||||
let fname = f.name().to_string();
|
||||
|
||||
if !nm.contains_key(&fname) {
|
||||
nm.insert(fname.clone(), HashMap::new());
|
||||
}
|
||||
|
||||
for (k, v) in f.to_map() {
|
||||
if let Some(hm) = nm.get_mut(&fname) {
|
||||
hm.insert(k.to_string(), v);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut map = serializer.serialize_map(Some(nm.len()))?;
|
||||
for (k, v) in nm.iter() {
|
||||
map.serialize_entry(k, v)?;
|
||||
}
|
||||
|
||||
map.end()
|
||||
}
|
||||
}
|
||||
|
||||
struct MyMapVisitor {
|
||||
marker: PhantomData<fn() -> FunctionsMap>,
|
||||
}
|
||||
|
||||
impl MyMapVisitor {
|
||||
fn new() -> Self {
|
||||
MyMapVisitor { marker: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
// This is the trait that Deserializers are going to be driving. There
|
||||
// is one method for each type of data that our type knows how to
|
||||
// deserialize from. There are many other methods that are not
|
||||
// implemented here, for example deserializing from integers or strings.
|
||||
// By default those methods will return an error, which makes sense
|
||||
// because we cannot deserialize a MyMap from an integer or string.
|
||||
impl<'de> Visitor<'de> for MyMapVisitor {
|
||||
// The type that our Visitor is going to produce.
|
||||
type Value = FunctionsMap;
|
||||
|
||||
// Format a message stating what data this Visitor expects to receive.
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("a very special map")
|
||||
}
|
||||
|
||||
// Deserialize MyMap from an abstract "map" provided by the
|
||||
// Deserializer. The MapAccess input is a callback provided by
|
||||
// the Deserializer to let us see each entry in the map.
|
||||
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
|
||||
where
|
||||
M: MapAccess<'de>,
|
||||
{
|
||||
let mut map = FunctionsMap::with_capacity(access.size_hint().unwrap_or(0));
|
||||
|
||||
// While there are entries remaining in the input, add them
|
||||
// into our map.
|
||||
while let Some((key, value)) = access.next_entry()? {
|
||||
map.insert(key, value);
|
||||
}
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
}
|
||||
|
||||
// This is the trait that informs Serde how to deserialize MyMap.
|
||||
impl<'de> Deserialize<'de> for Functions {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
// Instantiate our Visitor and ask the Deserializer to drive
|
||||
// it over the input data, resulting in an instance of MyMap.
|
||||
let map = deserializer.deserialize_map(MyMapVisitor::new())?;
|
||||
|
||||
for (key, vals) in map.iter() {
|
||||
println!("functions key {}, vals {:?}", key, vals);
|
||||
}
|
||||
// TODO: FIXME: create functions from name
|
||||
|
||||
Ok(Functions(Vec::new()))
|
||||
}
|
||||
}
|
||||
|
||||
// impl<'de> Deserialize<'de> for Functions {
|
||||
// fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
// where
|
||||
// D: serde::Deserializer<'de>,
|
||||
// {
|
||||
// todo!()
|
||||
// }
|
||||
// }
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub struct TestFunction {}
|
||||
|
||||
impl FunctionApi for TestFunction {
|
||||
// evaluate方法
|
||||
fn evaluate(&self, _values: &HashMap<String, Vec<String>>) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
// key方法
|
||||
fn key(&self) -> Key {
|
||||
Key {
|
||||
name: KeyName::JWTPrefUsername,
|
||||
variable: "".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
// name方法
|
||||
fn name(&self) -> Name {
|
||||
Name::StringEquals
|
||||
}
|
||||
|
||||
// String方法
|
||||
fn to_string(&self) -> String {
|
||||
Name::StringEquals.to_string()
|
||||
}
|
||||
|
||||
// to_map方法
|
||||
fn to_map(&self) -> HashMap<Key, ValueSet> {
|
||||
HashMap::new()
|
||||
}
|
||||
|
||||
fn clone_box(&self) -> Box<dyn FunctionApi> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
use super::keyname::{KeyName, ALL_SUPPORT_KEYS};
|
||||
use crate::error::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashSet, fmt, str::FromStr};
|
||||
|
||||
// 定义Key结构体
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct Key {
|
||||
pub name: KeyName,
|
||||
pub variable: String,
|
||||
}
|
||||
|
||||
impl Key {
|
||||
pub fn new(name: KeyName, variable: String) -> Self {
|
||||
Key { name, variable }
|
||||
}
|
||||
// IsValid - checks if key is valid or not.
|
||||
fn is_valid(&self) -> bool {
|
||||
ALL_SUPPORT_KEYS.iter().any(|supported| self.name == *supported)
|
||||
}
|
||||
|
||||
// Is - checks if this key has the same key name or not.
|
||||
pub fn is(&self, name: &KeyName) -> bool {
|
||||
self.name == *name
|
||||
}
|
||||
|
||||
// VarName - returns variable key name, such as "${aws:username}"
|
||||
pub fn var_name(&self) -> String {
|
||||
self.name.var_name()
|
||||
}
|
||||
|
||||
// Name - returns key name which is stripped value of prefixes "aws:" and "s3:"
|
||||
pub fn name(&self) -> String {
|
||||
if !self.variable.is_empty() {
|
||||
format!("{}{}", self.name.name(), self.variable)
|
||||
} else {
|
||||
self.name.name().to_string()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Key {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let (name, variable) = if let Some(pos) = s.find('/') {
|
||||
(&s[..pos], &s[pos + 1..])
|
||||
} else {
|
||||
(s, "")
|
||||
};
|
||||
|
||||
let keyname = KeyName::from_str(name)?;
|
||||
|
||||
let key = Key {
|
||||
name: keyname,
|
||||
variable: variable.to_string(),
|
||||
};
|
||||
|
||||
if key.is_valid() {
|
||||
Ok(key)
|
||||
} else {
|
||||
Err(Error::msg(format!("invalid condition key '{}'", s)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Key {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_str(self.to_string().as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Key {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::de::Deserializer<'de>,
|
||||
{
|
||||
let s: String = Deserialize::deserialize(deserializer)?;
|
||||
Key::from_str(s.as_str()).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Key {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if !self.variable.is_empty() {
|
||||
write!(f, "{}/{}", self.name.as_str(), self.variable)
|
||||
} else {
|
||||
write!(f, "{}", self.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct KeySet(HashSet<Key>);
|
||||
|
||||
impl KeySet {
|
||||
pub fn new() -> Self {
|
||||
KeySet(HashSet::new())
|
||||
}
|
||||
// Add - add a key to key set
|
||||
pub fn add(&mut self, key: Key) {
|
||||
self.0.insert(key);
|
||||
}
|
||||
|
||||
// Merge merges two key sets, duplicates are overwritten
|
||||
pub fn merge(&mut self, other: &KeySet) {
|
||||
for key in &other.0 {
|
||||
self.add(key.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Match matches the input key name with current keySet
|
||||
pub fn match_key(&self, key: &Key) -> bool {
|
||||
self.0.contains(key)
|
||||
}
|
||||
|
||||
// Difference - returns a key set contains difference of two keys
|
||||
pub fn difference(&self, other: &KeySet) -> KeySet {
|
||||
let mut result = KeySet::default();
|
||||
for key in &self.0 {
|
||||
if !other.match_key(key) {
|
||||
result.add(key.clone());
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
// IsEmpty - returns whether key set is empty or not
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
|
||||
// ToSlice - returns slice of keys
|
||||
fn to_slice(&self) -> Vec<Key> {
|
||||
self.0.iter().cloned().collect()
|
||||
}
|
||||
|
||||
// NewKeySet - returns new KeySet contains given keys
|
||||
pub fn from_keys(keys: &Vec<Key>) -> KeySet {
|
||||
let mut set = KeySet::default();
|
||||
for key in keys {
|
||||
set.add(key.clone());
|
||||
}
|
||||
set
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for KeySet {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{:?}", self.to_slice())
|
||||
}
|
||||
}
|
||||
@@ -1,475 +0,0 @@
|
||||
use core::fmt;
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
use super::key::Key;
|
||||
|
||||
// 定义KeyName枚举类型
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub enum KeyName {
|
||||
// S3XAmzCopySource - key representing x-amz-copy-source HTTP header applicable to PutObject API only.
|
||||
S3XAmzCopySource, // KeyName = "s3:x-amz-copy-source"
|
||||
|
||||
// S3XAmzServerSideEncryption - key representing x-amz-server-side-encryption HTTP header applicable
|
||||
// to PutObject API only.
|
||||
S3XAmzServerSideEncryption, // KeyName = "s3:x-amz-server-side-encryption"
|
||||
|
||||
// S3XAmzServerSideEncryptionCustomerAlgorithm - key representing
|
||||
// x-amz-server-side-encryption-customer-algorithm HTTP header applicable to PutObject API only.
|
||||
S3XAmzServerSideEncryptionCustomerAlgorithm, // KeyName = "s3:x-amz-server-side-encryption-customer-algorithm"
|
||||
|
||||
// S3XAmzMetadataDirective - key representing x-amz-metadata-directive HTTP header applicable to
|
||||
// PutObject API only.
|
||||
S3XAmzMetadataDirective, // KeyName = "s3:x-amz-metadata-directive"
|
||||
|
||||
// S3XAmzContentSha256 - set a static content-sha256 for all calls for a given action.
|
||||
S3XAmzContentSha256, // KeyName = "s3:x-amz-content-sha256"
|
||||
|
||||
// S3XAmzStorageClass - key representing x-amz-storage-class HTTP header applicable to PutObject API
|
||||
// only.
|
||||
S3XAmzStorageClass, // KeyName = "s3:x-amz-storage-class"
|
||||
|
||||
// S3XAmzServerSideEncryptionAwsKmsKeyID - key representing x-amz-server-side-encryption-aws-kms-key-id
|
||||
// HTTP header for S3 API calls
|
||||
S3XAmzServerSideEncryptionAwsKmsKeyID, // KeyName = "s3:x-amz-server-side-encryption-aws-kms-key-id"
|
||||
|
||||
// S3LocationConstraint - key representing LocationConstraint XML tag of CreateBucket API only.
|
||||
S3LocationConstraint, // KeyName = "s3:LocationConstraint"
|
||||
|
||||
// S3Prefix - key representing prefix query parameter of ListBucket API only.
|
||||
S3Prefix, // KeyName = "s3:prefix"
|
||||
|
||||
// S3Delimiter - key representing delimiter query parameter of ListBucket API only.
|
||||
S3Delimiter, // KeyName = "s3:delimiter"
|
||||
|
||||
// S3VersionID - Enables you to limit the permission for the
|
||||
// s3:PutObjectVersionTagging action to a specific object version.
|
||||
S3VersionID, // KeyName = "s3:versionid"
|
||||
|
||||
// S3MaxKeys - key representing max-keys query parameter of ListBucket API only.
|
||||
S3MaxKeys, // KeyName = "s3:max-keys"
|
||||
|
||||
// S3ObjectLockRemainingRetentionDays - key representing object-lock-remaining-retention-days
|
||||
// Enables enforcement of an object relative to the remaining retention days, you can set
|
||||
// minimum and maximum allowable retention periods for a bucket using a bucket policy.
|
||||
// This key are specific for s3:PutObjectRetention API.
|
||||
S3ObjectLockRemainingRetentionDays, // KeyName = "s3:object-lock-remaining-retention-days"
|
||||
|
||||
// S3ObjectLockMode - key representing object-lock-mode
|
||||
// Enables enforcement of the specified object retention mode
|
||||
S3ObjectLockMode, // KeyName = "s3:object-lock-mode"
|
||||
|
||||
// S3ObjectLockRetainUntilDate - key representing object-lock-retain-util-date
|
||||
// Enables enforcement of a specific retain-until-date
|
||||
S3ObjectLockRetainUntilDate, // KeyName = "s3:object-lock-retain-until-date"
|
||||
|
||||
// S3ObjectLockLegalHold - key representing object-local-legal-hold
|
||||
// Enables enforcement of the specified object legal hold status
|
||||
S3ObjectLockLegalHold, // KeyName = "s3:object-lock-legal-hold"
|
||||
|
||||
// AWSReferer - key representing Referer header of any API.
|
||||
AWSReferer, // KeyName = "aws:Referer"
|
||||
|
||||
// AWSSourceIP - key representing client's IP address (not intermittent proxies) of any API.
|
||||
AWSSourceIP, // KeyName = "aws:SourceIp"
|
||||
|
||||
// AWSUserAgent - key representing UserAgent header for any API.
|
||||
AWSUserAgent, // KeyName = "aws:UserAgent"
|
||||
|
||||
// AWSSecureTransport - key representing if the clients request is authenticated or not.
|
||||
AWSSecureTransport, // KeyName = "aws:SecureTransport"
|
||||
|
||||
// AWSCurrentTime - key representing the current time.
|
||||
AWSCurrentTime, // KeyName = "aws:CurrentTime"
|
||||
|
||||
// AWSEpochTime - key representing the current epoch time.
|
||||
AWSEpochTime, // KeyName = "aws:EpochTime"
|
||||
|
||||
// AWSPrincipalType - user principal type currently supported values are "User" and "Anonymous".
|
||||
AWSPrincipalType, // KeyName = "aws:principaltype"
|
||||
|
||||
// AWSUserID - user unique ID, in RustFS this value is same as your user Access Key.
|
||||
AWSUserID, // KeyName = "aws:userid"
|
||||
|
||||
// AWSUsername - user friendly name, in RustFS this value is same as your user Access Key.
|
||||
AWSUsername, // KeyName = "aws:username"
|
||||
|
||||
// AWSGroups - groups for any authenticating Access Key.
|
||||
AWSGroups, // KeyName = "ss"
|
||||
|
||||
// S3SignatureVersion - identifies the version of AWS Signature that you want to support for authenticated requests.
|
||||
S3SignatureVersion, // KeyName = "s3:signatureversion"
|
||||
|
||||
// S3SignatureAge - identifies the maximum age of presgiend URL allowed
|
||||
S3SignatureAge, // KeyName = "s3:signatureAge"
|
||||
|
||||
// S3AuthType - optionally use this condition key to restrict incoming requests to use a specific authentication method.
|
||||
S3AuthType, // KeyName = "s3:authType"
|
||||
|
||||
// Refer https://docs.aws.amazon.com/AmazonS3/latest/userguide/tagging-and-policies.html
|
||||
ExistingObjectTag, // KeyName = "s3:ExistingObjectTag"
|
||||
RequestObjectTagKeys, // KeyName = "s3:RequestObjectTagKeys"
|
||||
RequestObjectTag, // KeyName = "s3:RequestObjectTag"
|
||||
|
||||
// JWTSub - JWT subject claim substitution.
|
||||
JWTSub, //KeyName = "jwt:sub"
|
||||
|
||||
// JWTIss issuer claim substitution.
|
||||
JWTIss, //KeyName = "jwt:iss"
|
||||
|
||||
// JWTAud audience claim substitution.
|
||||
JWTAud, //KeyName = "jwt:aud"
|
||||
|
||||
// JWTJti JWT unique identifier claim substitution.
|
||||
JWTJti, //KeyName = "jwt:jti"
|
||||
|
||||
JWTUpn, //KeyName = "jwt:upn"
|
||||
JWTName, //KeyName = "jwt:name"
|
||||
JWTGroups, //KeyName = "jwt:groups"
|
||||
JWTGivenName, //KeyName = "jwt:given_name"
|
||||
JWTFamilyName, //KeyName = "jwt:family_name"
|
||||
JWTMiddleName, //KeyName = "jwt:middle_name"
|
||||
JWTNickName, //KeyName = "jwt:nickname"
|
||||
JWTPrefUsername, //KeyName = "jwt:preferred_username"
|
||||
JWTProfile, //KeyName = "jwt:profile"
|
||||
JWTPicture, //KeyName = "jwt:picture"
|
||||
JWTWebsite, //KeyName = "jwt:website"
|
||||
JWTEmail, //KeyName = "jwt:email"
|
||||
JWTGender, //KeyName = "jwt:gender"
|
||||
JWTBirthdate, //KeyName = "jwt:birthdate"
|
||||
JWTPhoneNumber, //KeyName = "jwt:phone_number"
|
||||
JWTAddress, //KeyName = "jwt:address"
|
||||
JWTScope, //KeyName = "jwt:scope"
|
||||
JWTClientID, //KeyName = "jwt:client_id"
|
||||
|
||||
// LDAPUser - LDAP username, this value is equal to your authenticating LDAP user DN.
|
||||
LDAPUser, // KeyName = "ldap:user"
|
||||
|
||||
// LDAPUsername - LDAP username, is the authenticated simple user.
|
||||
LDAPUsername, // KeyName = "ldap:username"
|
||||
|
||||
// LDAPGroups - LDAP groups, this value is equal LDAP Group DNs for the authenticating user.
|
||||
LDAPGroups, // KeyName = "ldap:groups"
|
||||
|
||||
// STSDurationSeconds - Duration seconds condition for STS policy
|
||||
STSDurationSeconds, // KeyName = "sts:DurationSeconds"
|
||||
// SVCDurationSeconds - Duration seconds condition for Admin policy
|
||||
SVCDurationSeconds, // KeyName = "svc:DurationSeconds"
|
||||
|
||||
Undefined,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref JWTKEYS: Vec<KeyName> = {
|
||||
vec![
|
||||
KeyName::JWTSub,
|
||||
KeyName::JWTIss,
|
||||
KeyName::JWTAud,
|
||||
KeyName::JWTJti,
|
||||
KeyName::JWTName,
|
||||
KeyName::JWTUpn,
|
||||
KeyName::JWTGroups,
|
||||
KeyName::JWTGivenName,
|
||||
KeyName::JWTFamilyName,
|
||||
KeyName::JWTMiddleName,
|
||||
KeyName::JWTNickName,
|
||||
KeyName::JWTPrefUsername,
|
||||
KeyName::JWTProfile,
|
||||
KeyName::JWTPicture,
|
||||
KeyName::JWTWebsite,
|
||||
KeyName::JWTEmail,
|
||||
KeyName::JWTGender,
|
||||
KeyName::JWTBirthdate,
|
||||
KeyName::JWTPhoneNumber,
|
||||
KeyName::JWTAddress,
|
||||
KeyName::JWTScope,
|
||||
KeyName::JWTClientID,
|
||||
]
|
||||
};
|
||||
pub static ref ALL_SUPPORT_KEYS: Vec<KeyName> = {
|
||||
vec![
|
||||
KeyName::S3SignatureVersion,
|
||||
KeyName::S3AuthType,
|
||||
KeyName::S3SignatureAge,
|
||||
KeyName::S3XAmzCopySource,
|
||||
KeyName::S3XAmzServerSideEncryption,
|
||||
KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm,
|
||||
KeyName::S3XAmzMetadataDirective,
|
||||
KeyName::S3XAmzStorageClass,
|
||||
KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID,
|
||||
KeyName::S3XAmzContentSha256,
|
||||
KeyName::S3LocationConstraint,
|
||||
KeyName::S3Prefix,
|
||||
KeyName::S3Delimiter,
|
||||
KeyName::S3MaxKeys,
|
||||
KeyName::S3VersionID,
|
||||
KeyName::S3ObjectLockRemainingRetentionDays,
|
||||
KeyName::S3ObjectLockMode,
|
||||
KeyName::S3ObjectLockLegalHold,
|
||||
KeyName::S3ObjectLockRetainUntilDate,
|
||||
KeyName::AWSReferer,
|
||||
KeyName::AWSSourceIP,
|
||||
KeyName::AWSUserAgent,
|
||||
KeyName::AWSSecureTransport,
|
||||
KeyName::AWSCurrentTime,
|
||||
KeyName::AWSEpochTime,
|
||||
KeyName::AWSPrincipalType,
|
||||
KeyName::AWSUserID,
|
||||
KeyName::AWSUsername,
|
||||
KeyName::AWSGroups,
|
||||
KeyName::LDAPUser,
|
||||
KeyName::LDAPUsername,
|
||||
KeyName::LDAPGroups,
|
||||
KeyName::RequestObjectTag,
|
||||
KeyName::ExistingObjectTag,
|
||||
KeyName::RequestObjectTagKeys,
|
||||
KeyName::JWTSub,
|
||||
KeyName::JWTIss,
|
||||
KeyName::JWTAud,
|
||||
KeyName::JWTJti,
|
||||
KeyName::JWTName,
|
||||
KeyName::JWTUpn,
|
||||
KeyName::JWTGroups,
|
||||
KeyName::JWTGivenName,
|
||||
KeyName::JWTFamilyName,
|
||||
KeyName::JWTMiddleName,
|
||||
KeyName::JWTNickName,
|
||||
KeyName::JWTPrefUsername,
|
||||
KeyName::JWTProfile,
|
||||
KeyName::JWTPicture,
|
||||
KeyName::JWTWebsite,
|
||||
KeyName::JWTEmail,
|
||||
KeyName::JWTGender,
|
||||
KeyName::JWTBirthdate,
|
||||
KeyName::JWTPhoneNumber,
|
||||
KeyName::JWTAddress,
|
||||
KeyName::JWTScope,
|
||||
KeyName::JWTClientID,
|
||||
KeyName::STSDurationSeconds,
|
||||
KeyName::SVCDurationSeconds,
|
||||
]
|
||||
};
|
||||
pub static ref COMMOM_KEYS: Vec<KeyName> = {
|
||||
let mut keys = vec![
|
||||
KeyName::S3SignatureVersion,
|
||||
KeyName::S3AuthType,
|
||||
KeyName::S3SignatureAge,
|
||||
KeyName::S3XAmzContentSha256,
|
||||
KeyName::S3LocationConstraint,
|
||||
KeyName::AWSReferer,
|
||||
KeyName::AWSSourceIP,
|
||||
KeyName::AWSUserAgent,
|
||||
KeyName::AWSSecureTransport,
|
||||
KeyName::AWSCurrentTime,
|
||||
KeyName::AWSEpochTime,
|
||||
KeyName::AWSPrincipalType,
|
||||
KeyName::AWSUserID,
|
||||
KeyName::AWSUsername,
|
||||
KeyName::AWSGroups,
|
||||
KeyName::LDAPUser,
|
||||
KeyName::LDAPUsername,
|
||||
KeyName::LDAPGroups,
|
||||
];
|
||||
|
||||
keys.extend(JWTKEYS.iter().cloned());
|
||||
|
||||
keys
|
||||
};
|
||||
pub static ref ALL_SUPPORT_ADMIN_KEYS: Vec<KeyName> = {
|
||||
let mut keys = vec![
|
||||
KeyName::AWSReferer,
|
||||
KeyName::AWSSourceIP,
|
||||
KeyName::AWSUserAgent,
|
||||
KeyName::AWSSecureTransport,
|
||||
KeyName::AWSCurrentTime,
|
||||
KeyName::AWSEpochTime,
|
||||
KeyName::AWSPrincipalType,
|
||||
KeyName::AWSUserID,
|
||||
KeyName::AWSUsername,
|
||||
KeyName::AWSGroups,
|
||||
KeyName::LDAPUser,
|
||||
KeyName::LDAPUsername,
|
||||
KeyName::LDAPGroups,
|
||||
KeyName::SVCDurationSeconds,
|
||||
];
|
||||
|
||||
keys.extend(JWTKEYS.iter().cloned());
|
||||
|
||||
keys
|
||||
};
|
||||
pub static ref ALL_SUPPORT_STS_KEYS: Vec<KeyName> = vec![KeyName::STSDurationSeconds];
|
||||
}
|
||||
|
||||
// 实现KeyName枚举的方法
|
||||
impl KeyName {
|
||||
pub fn name(&self) -> &str {
|
||||
let name = self.as_str();
|
||||
if name.starts_with("aws:") {
|
||||
name.trim_start_matches("aws:")
|
||||
} else if name.starts_with("jwt:") {
|
||||
name.trim_start_matches("jwt:")
|
||||
} else if name.starts_with("ldap:") {
|
||||
name.trim_start_matches("ldap:")
|
||||
} else if name.starts_with("sts:") {
|
||||
name.trim_start_matches("sts:")
|
||||
} else if name.starts_with("svc:") {
|
||||
name.trim_start_matches("svc:")
|
||||
} else {
|
||||
name.trim_start_matches("s3:")
|
||||
}
|
||||
}
|
||||
|
||||
// Name方法,返回键名的名称
|
||||
pub fn as_str(&self) -> &str {
|
||||
match self {
|
||||
KeyName::S3XAmzCopySource => "s3:x-amz-copy-source",
|
||||
KeyName::S3XAmzServerSideEncryption => "s3:x-amz-server-side-encryption",
|
||||
KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm => "s3:x-amz-server-side-encryption-customer-algorithm",
|
||||
KeyName::S3XAmzMetadataDirective => "s3:x-amz-metadata-directive",
|
||||
KeyName::S3XAmzContentSha256 => "s3:x-amz-content-sha256",
|
||||
KeyName::S3XAmzStorageClass => "s3:x-amz-storage-class",
|
||||
KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID => "s3:x-amz-server-side-encryption-aws-kms-key-id",
|
||||
KeyName::S3LocationConstraint => "s3:LocationConstraint",
|
||||
KeyName::S3Prefix => "s3:prefix",
|
||||
KeyName::S3Delimiter => "s3:delimiter",
|
||||
KeyName::S3VersionID => "s3:versionid",
|
||||
KeyName::S3MaxKeys => "s3:max-keys",
|
||||
KeyName::S3ObjectLockRemainingRetentionDays => "s3:object-lock-remaining-retention-days",
|
||||
KeyName::S3ObjectLockMode => "s3:object-lock-mode",
|
||||
KeyName::S3ObjectLockRetainUntilDate => "s3:object-lock-retain-until-date",
|
||||
KeyName::S3ObjectLockLegalHold => "s3:object-lock-legal-hold",
|
||||
KeyName::AWSReferer => "aws:Referer",
|
||||
KeyName::AWSSourceIP => "aws:SourceIp",
|
||||
KeyName::AWSUserAgent => "aws:UserAgent",
|
||||
KeyName::AWSSecureTransport => "aws:SecureTransport",
|
||||
KeyName::AWSCurrentTime => "aws:CurrentTime",
|
||||
KeyName::AWSEpochTime => "aws:EpochTime",
|
||||
KeyName::AWSPrincipalType => "aws:principaltype",
|
||||
KeyName::AWSUserID => "aws:userid",
|
||||
KeyName::AWSUsername => "aws:username",
|
||||
KeyName::AWSGroups => "ss",
|
||||
KeyName::S3SignatureVersion => "s3:signatureversion",
|
||||
KeyName::S3SignatureAge => "s3:signatureAge",
|
||||
KeyName::S3AuthType => "s3:authType",
|
||||
KeyName::ExistingObjectTag => "s3:ExistingObjectTag",
|
||||
KeyName::RequestObjectTagKeys => "s3:RequestObjectTagKeys",
|
||||
KeyName::RequestObjectTag => "s3:RequestObjectTag",
|
||||
KeyName::JWTSub => "jwt:sub",
|
||||
KeyName::JWTIss => "jwt:iss",
|
||||
KeyName::JWTAud => "jwt:aud",
|
||||
KeyName::JWTJti => "jwt:jti",
|
||||
KeyName::JWTUpn => "jwt:upn",
|
||||
KeyName::JWTName => "jwt:name",
|
||||
KeyName::JWTGroups => "jwt:groups",
|
||||
KeyName::JWTGivenName => "jwt:given_name",
|
||||
KeyName::JWTFamilyName => "jwt:family_name",
|
||||
KeyName::JWTMiddleName => "jwt:middle_name",
|
||||
KeyName::JWTNickName => "jwt:nickname",
|
||||
KeyName::JWTPrefUsername => "jwt:preferred_username",
|
||||
KeyName::JWTProfile => "jwt:profile",
|
||||
KeyName::JWTPicture => "jwt:picture",
|
||||
KeyName::JWTWebsite => "jwt:website",
|
||||
KeyName::JWTEmail => "jwt:email",
|
||||
KeyName::JWTGender => "jwt:gender",
|
||||
KeyName::JWTBirthdate => "jwt:birthdate",
|
||||
KeyName::JWTPhoneNumber => "jwt:phone_number",
|
||||
KeyName::JWTAddress => "jwt:address",
|
||||
KeyName::JWTScope => "jwt:scope",
|
||||
KeyName::JWTClientID => "jwt:client_id",
|
||||
KeyName::LDAPUser => "ldap:user",
|
||||
KeyName::LDAPUsername => "ldap:username",
|
||||
KeyName::LDAPGroups => "ldap:groups",
|
||||
KeyName::STSDurationSeconds => "sts:DurationSeconds",
|
||||
KeyName::SVCDurationSeconds => "svc:DurationSeconds",
|
||||
KeyName::Undefined => "",
|
||||
}
|
||||
}
|
||||
|
||||
// VarName方法,返回变量键名,例如 "${aws:username}"
|
||||
pub fn var_name(&self) -> String {
|
||||
format!("${{{}}}", self.name())
|
||||
}
|
||||
|
||||
// ToKey方法,从名称创建键
|
||||
pub fn to_key(&self) -> Key {
|
||||
Key::new(self.clone(), "".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for KeyName {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for KeyName {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"s3:x-amz-copy-source" => Ok(KeyName::S3XAmzCopySource),
|
||||
"s3:x-amz-server-side-encryption" => Ok(KeyName::S3XAmzServerSideEncryption),
|
||||
"s3:x-amz-server-side-encryption-customer-algorithm" => Ok(KeyName::S3XAmzServerSideEncryptionCustomerAlgorithm),
|
||||
"s3:x-amz-metadata-directive" => Ok(KeyName::S3XAmzMetadataDirective),
|
||||
"s3:x-amz-content-sha256" => Ok(KeyName::S3XAmzContentSha256),
|
||||
"s3:x-amz-storage-class" => Ok(KeyName::S3XAmzStorageClass),
|
||||
"s3:x-amz-server-side-encryption-aws-kms-key-id" => Ok(KeyName::S3XAmzServerSideEncryptionAwsKmsKeyID),
|
||||
"s3:LocationConstraint" => Ok(KeyName::S3LocationConstraint),
|
||||
"s3:prefix" => Ok(KeyName::S3Prefix),
|
||||
"s3:delimiter" => Ok(KeyName::S3Delimiter),
|
||||
"s3:versionid" => Ok(KeyName::S3VersionID),
|
||||
"s3:max-keys" => Ok(KeyName::S3MaxKeys),
|
||||
"s3:object-lock-remaining-retention-days" => Ok(KeyName::S3ObjectLockRemainingRetentionDays),
|
||||
"s3:object-lock-mode" => Ok(KeyName::S3ObjectLockMode),
|
||||
"s3:object-lock-retain-until-date" => Ok(KeyName::S3ObjectLockRetainUntilDate),
|
||||
"s3:object-lock-legal-hold" => Ok(KeyName::S3ObjectLockLegalHold),
|
||||
"aws:Referer" => Ok(KeyName::AWSReferer),
|
||||
"aws:SourceIp" => Ok(KeyName::AWSSourceIP),
|
||||
"aws:UserAgent" => Ok(KeyName::AWSUserAgent),
|
||||
"aws:SecureTransport" => Ok(KeyName::AWSSecureTransport),
|
||||
"aws:CurrentTime" => Ok(KeyName::AWSCurrentTime),
|
||||
"aws:EpochTime" => Ok(KeyName::AWSEpochTime),
|
||||
"aws:principaltype" => Ok(KeyName::AWSPrincipalType),
|
||||
"aws:userid" => Ok(KeyName::AWSUserID),
|
||||
"aws:username" => Ok(KeyName::AWSUsername),
|
||||
"aws:groups" => Ok(KeyName::AWSGroups),
|
||||
"s3:signatureversion" => Ok(KeyName::S3SignatureVersion),
|
||||
"s3:signatureAge" => Ok(KeyName::S3SignatureAge),
|
||||
"s3:authType" => Ok(KeyName::S3AuthType),
|
||||
"s3:ExistingObjectTag" => Ok(KeyName::ExistingObjectTag),
|
||||
"s3:RequestObjectTagKeys" => Ok(KeyName::RequestObjectTagKeys),
|
||||
"s3:RequestObjectTag" => Ok(KeyName::RequestObjectTag),
|
||||
"jwt:sub" => Ok(KeyName::JWTSub),
|
||||
"jwt:iss" => Ok(KeyName::JWTIss),
|
||||
"jwt:aud" => Ok(KeyName::JWTAud),
|
||||
"jwt:jti" => Ok(KeyName::JWTJti),
|
||||
"jwt:upn" => Ok(KeyName::JWTUpn),
|
||||
"jwt:name" => Ok(KeyName::JWTName),
|
||||
"jwt:groups" => Ok(KeyName::JWTGroups),
|
||||
"jwt:given_name" => Ok(KeyName::JWTGivenName),
|
||||
"jwt:family_name" => Ok(KeyName::JWTFamilyName),
|
||||
"jwt:middle_name" => Ok(KeyName::JWTMiddleName),
|
||||
"jwt:nickname" => Ok(KeyName::JWTNickName),
|
||||
"jwt:preferred_username" => Ok(KeyName::JWTPrefUsername),
|
||||
"jwt:profile" => Ok(KeyName::JWTProfile),
|
||||
"jwt:picture" => Ok(KeyName::JWTPicture),
|
||||
"jwt:website" => Ok(KeyName::JWTWebsite),
|
||||
"jwt:email" => Ok(KeyName::JWTEmail),
|
||||
"jwt:gender" => Ok(KeyName::JWTGender),
|
||||
"jwt:birthdate" => Ok(KeyName::JWTBirthdate),
|
||||
"jwt:phone_number" => Ok(KeyName::JWTPhoneNumber),
|
||||
"jwt:address" => Ok(KeyName::JWTAddress),
|
||||
"jwt:scope" => Ok(KeyName::JWTScope),
|
||||
"jwt:client_id" => Ok(KeyName::JWTClientID),
|
||||
"ldap:user" => Ok(KeyName::LDAPUser),
|
||||
"ldap:username" => Ok(KeyName::LDAPUsername),
|
||||
"ldap:groups" => Ok(KeyName::LDAPGroups),
|
||||
"sts:DurationSeconds" => Ok(KeyName::STSDurationSeconds),
|
||||
"svc:DurationSeconds" => Ok(KeyName::SVCDurationSeconds),
|
||||
_ => Err(Error::msg(format!("keyname not found: {}", s))),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
pub mod function;
|
||||
pub mod key;
|
||||
pub mod keyname;
|
||||
pub mod name;
|
||||
@@ -1,75 +0,0 @@
|
||||
// 定义Name枚举类型
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum Name {
|
||||
StringEquals,
|
||||
StringNotEquals,
|
||||
StringEqualsIgnoreCase,
|
||||
StringNotEqualsIgnoreCase,
|
||||
StringLike,
|
||||
StringNotLike,
|
||||
BinaryEquals,
|
||||
IpAddress,
|
||||
NotIpAddress,
|
||||
Null,
|
||||
Bool,
|
||||
NumericEquals,
|
||||
NumericNotEquals,
|
||||
NumericLessThan,
|
||||
NumericLessThanEquals,
|
||||
NumericGreaterThan,
|
||||
NumericGreaterThanIfExists,
|
||||
NumericGreaterThanEquals,
|
||||
DateEquals,
|
||||
DateNotEquals,
|
||||
DateLessThan,
|
||||
DateLessThanEquals,
|
||||
DateGreaterThan,
|
||||
DateGreaterThanEquals,
|
||||
ForAllValues,
|
||||
ForAnyValue,
|
||||
}
|
||||
|
||||
impl Name {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Name::StringEquals => "StringEquals",
|
||||
Name::StringNotEquals => "StringNotEquals",
|
||||
Name::StringEqualsIgnoreCase => "StringEqualsIgnoreCase",
|
||||
Name::StringNotEqualsIgnoreCase => "StringNotEqualsIgnoreCase",
|
||||
Name::StringLike => "StringLike",
|
||||
Name::StringNotLike => "StringNotLike",
|
||||
Name::BinaryEquals => "BinaryEquals",
|
||||
Name::IpAddress => "IpAddress",
|
||||
Name::NotIpAddress => "NotIpAddress",
|
||||
Name::Null => "Null",
|
||||
Name::Bool => "Bool",
|
||||
Name::NumericEquals => "NumericEquals",
|
||||
Name::NumericNotEquals => "NumericNotEquals",
|
||||
Name::NumericLessThan => "NumericLessThan",
|
||||
Name::NumericLessThanEquals => "NumericLessThanEquals",
|
||||
Name::NumericGreaterThan => "NumericGreaterThan",
|
||||
Name::NumericGreaterThanIfExists => "NumericGreaterThanIfExists",
|
||||
Name::NumericGreaterThanEquals => "NumericGreaterThanEquals",
|
||||
Name::DateEquals => "DateEquals",
|
||||
Name::DateNotEquals => "DateNotEquals",
|
||||
Name::DateLessThan => "DateLessThan",
|
||||
Name::DateLessThanEquals => "DateLessThanEquals",
|
||||
Name::DateGreaterThan => "DateGreaterThan",
|
||||
Name::DateGreaterThanEquals => "DateGreaterThanEquals",
|
||||
Name::ForAllValues => "ForAllValues",
|
||||
Name::ForAnyValue => "ForAnyValue",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// impl ToString for Name {
|
||||
// fn to_string(&self) -> String {
|
||||
// self.as_str().to_string()
|
||||
// }
|
||||
// }
|
||||
|
||||
impl std::fmt::Display for Name {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(f, "{}", self.as_str())
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::str::FromStr;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Default)]
|
||||
#[serde(rename_all = "PascalCase")]
|
||||
pub enum Effect {
|
||||
#[default]
|
||||
Allow,
|
||||
Deny,
|
||||
}
|
||||
|
||||
impl Effect {
|
||||
pub fn is_allowed(self, b: bool) -> bool {
|
||||
if self == Effect::Allow {
|
||||
b
|
||||
} else {
|
||||
!b
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_valid(self) -> bool {
|
||||
match self {
|
||||
Effect::Allow => true,
|
||||
Effect::Deny => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 实现从字符串解析Effect的功能
|
||||
impl FromStr for Effect {
|
||||
type Err = ();
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"Allow" => Ok(Effect::Allow),
|
||||
"Deny" => Ok(Effect::Deny),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
// use std::collections::HashMap;
|
||||
|
||||
// use action::Action;
|
||||
// use s3s_policy::model::{Effect, Policy, Principal, PrincipalRule, Statement};
|
||||
// use serde::{Deserialize, Serialize};
|
||||
// use tower::ready_cache::cache::Equivalent;
|
||||
|
||||
// use crate::utils::wildcard;
|
||||
|
||||
pub mod action;
|
||||
pub mod bucket_policy;
|
||||
pub mod condition;
|
||||
pub mod effect;
|
||||
pub mod principal;
|
||||
pub mod resource;
|
||||
|
||||
// #[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
// pub struct BucketPolicyArgs {
|
||||
// pub account_name: String,
|
||||
// pub groups: Vec<String>,
|
||||
// pub action: Action,
|
||||
// pub bucket_name: String,
|
||||
// pub condition_values: HashMap<String, Vec<String>>,
|
||||
// pub is_owner: bool,
|
||||
// pub object_name: String,
|
||||
// }
|
||||
|
||||
// pub trait AllowApi {
|
||||
// fn is_allowed(&self, args: &BucketPolicyArgs) -> bool;
|
||||
// }
|
||||
|
||||
// pub trait MatchApi {
|
||||
// fn is_match(&self, found: &str) -> bool;
|
||||
// }
|
||||
|
||||
// impl AllowApi for Policy {
|
||||
// fn is_allowed(&self, args: &BucketPolicyArgs) -> bool {
|
||||
// for statement in self.statement.as_slice().iter() {
|
||||
// if statement.effect == Effect::Deny {
|
||||
// if !statement.is_allowed(args) {
|
||||
// return false;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// false
|
||||
// }
|
||||
// }
|
||||
|
||||
// impl AllowApi for Statement {
|
||||
// fn is_allowed(&self, args: &BucketPolicyArgs) -> bool {
|
||||
// let check = || -> bool {
|
||||
// if let Some(principal) = &self.principal {
|
||||
// if !principal.is_match(&args.account_name) {
|
||||
// return false;
|
||||
// }
|
||||
// }
|
||||
|
||||
// false
|
||||
// };
|
||||
|
||||
// self.effect.is_allowed(check())
|
||||
// }
|
||||
// }
|
||||
|
||||
// impl MatchApi for PrincipalRule {
|
||||
// fn is_match(&self, found: &str) -> bool {
|
||||
// match self {
|
||||
// PrincipalRule::Principal(principal) => match principal {
|
||||
// Principal::Wildcard => return true,
|
||||
// Principal::Map(index_map) => {
|
||||
// if let Some(keys) = index_map.get("AWS") {
|
||||
// for key in keys.as_slice() {
|
||||
// if wildcard::match_simple(key, found) {
|
||||
// return true;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// return false;
|
||||
// }
|
||||
// },
|
||||
// PrincipalRule::NotPrincipal(principal) => match principal {
|
||||
// Principal::Wildcard => return true,
|
||||
// Principal::Map(index_map) => todo!(),
|
||||
// },
|
||||
// }
|
||||
|
||||
// false
|
||||
// }
|
||||
// }
|
||||
|
||||
// trait EffectApi {
|
||||
// fn is_allowed(&self, b: bool) -> bool;
|
||||
// }
|
||||
|
||||
// impl EffectApi for Effect {
|
||||
// fn is_allowed(&self, b: bool) -> bool {
|
||||
// if self == &Effect::Allow {
|
||||
// b
|
||||
// } else {
|
||||
// !b
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
@@ -1,263 +0,0 @@
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{
|
||||
bucket::policy::condition::keyname::COMMOM_KEYS,
|
||||
utils::{self, wildcard},
|
||||
};
|
||||
use core::fmt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
// 定义ResourceARNType枚举类型
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize, Default)]
|
||||
pub enum ResourceARNType {
|
||||
#[default]
|
||||
UnknownARN,
|
||||
ResourceARNS3,
|
||||
ResourceARNKMS,
|
||||
}
|
||||
|
||||
impl fmt::Display for ResourceARNType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ResourceARNType::UnknownARN => write!(f, ""),
|
||||
ResourceARNType::ResourceARNS3 => write!(f, "{}", RESOURCE_ARN_PREFIX),
|
||||
ResourceARNType::ResourceARNKMS => write!(f, "{}", RESOURCE_ARN_KMS_PREFIX),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 定义资源ARN前缀
|
||||
const RESOURCE_ARN_PREFIX: &str = "arn:aws:s3:::";
|
||||
const RESOURCE_ARN_KMS_PREFIX: &str = "arn:rustfs:kms::::";
|
||||
|
||||
// 定义Resource结构体
|
||||
#[derive(Debug, Default, PartialEq, Eq, Hash, Clone)]
|
||||
pub struct Resource {
|
||||
pattern: String,
|
||||
rtype: ResourceARNType,
|
||||
}
|
||||
|
||||
impl Resource {
|
||||
pub fn new(pattern: &str) -> Self {
|
||||
Self {
|
||||
pattern: pattern.to_owned(),
|
||||
rtype: ResourceARNType::ResourceARNS3,
|
||||
}
|
||||
}
|
||||
pub fn validate_bucket(&self, bucket: &str) -> Result<()> {
|
||||
self.validate()?;
|
||||
if !wildcard::match_pattern(&self.pattern, bucket)
|
||||
&& !wildcard::match_as_pattern_prefix(&self.pattern, format!("{}/", bucket).as_str())
|
||||
{
|
||||
return Err(Error::msg("bucket name does not match"));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
if !self.is_valid() {
|
||||
Err(Error::msg("invalid resource"))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if self.rtype == ResourceARNType::UnknownARN {
|
||||
return false;
|
||||
}
|
||||
if self.is_s3() && self.pattern.starts_with('/') {
|
||||
return false;
|
||||
}
|
||||
if self.is_kms() && self.pattern.as_bytes().iter().any(|&v| v == b'/' || v == b'\\' || v == b'.') {
|
||||
return false;
|
||||
}
|
||||
|
||||
!self.pattern.is_empty()
|
||||
}
|
||||
pub fn is_s3(&self) -> bool {
|
||||
self.rtype == ResourceARNType::ResourceARNS3
|
||||
}
|
||||
pub fn is_kms(&self) -> bool {
|
||||
self.rtype == ResourceARNType::ResourceARNKMS
|
||||
}
|
||||
pub fn is_bucket_pattern(&self) -> bool {
|
||||
!self.pattern.contains('/') || self.pattern.eq("*")
|
||||
}
|
||||
pub fn is_object_pattern(&self) -> bool {
|
||||
self.pattern.contains('/') || self.pattern.contains('*')
|
||||
}
|
||||
pub fn is_match(&self, res: &str, condition_values: &HashMap<String, Vec<String>>) -> bool {
|
||||
let mut pattern = res.to_string();
|
||||
if !condition_values.is_empty() {
|
||||
for key in COMMOM_KEYS.iter() {
|
||||
if let Some(vals) = condition_values.get(key.name()) {
|
||||
if let Some(v0) = vals.first() {
|
||||
pattern = pattern.replace(key.name(), v0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let cp = utils::path::clean(res);
|
||||
|
||||
if cp != "." && cp == pattern {
|
||||
return true;
|
||||
}
|
||||
|
||||
wildcard::match_pattern(&pattern, res)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Resource {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}{}", self.rtype, self.pattern)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Resource {
|
||||
type Err = serde_json::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s.starts_with(RESOURCE_ARN_PREFIX) {
|
||||
let pattern = {
|
||||
if let Some(val) = s.strip_prefix(RESOURCE_ARN_PREFIX) {
|
||||
val.to_string()
|
||||
} else {
|
||||
s.to_string()
|
||||
}
|
||||
};
|
||||
Ok(Self {
|
||||
rtype: ResourceARNType::ResourceARNS3,
|
||||
pattern,
|
||||
})
|
||||
} else if s.starts_with(RESOURCE_ARN_KMS_PREFIX) {
|
||||
let pattern = {
|
||||
if let Some(val) = s.strip_prefix(RESOURCE_ARN_KMS_PREFIX) {
|
||||
val.to_string()
|
||||
} else {
|
||||
s.to_string()
|
||||
}
|
||||
};
|
||||
Ok(Self {
|
||||
rtype: ResourceARNType::ResourceARNS3,
|
||||
pattern,
|
||||
})
|
||||
} else {
|
||||
Ok(Self {
|
||||
rtype: ResourceARNType::UnknownARN,
|
||||
pattern: "".to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Resource {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_str(self.to_string().as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Resource {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
struct Visitor;
|
||||
|
||||
#[allow(clippy::needless_lifetimes)]
|
||||
impl<'de> serde::de::Visitor<'de> for Visitor {
|
||||
type Value = Resource;
|
||||
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
formatter.write_str("string resource")
|
||||
}
|
||||
|
||||
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
match Resource::from_str(value) {
|
||||
Ok(res) => Ok(res),
|
||||
Err(_) => Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(value), &self)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deserializer.deserialize_any(Visitor)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
#[serde(transparent)]
|
||||
pub struct ResourceSet(pub HashSet<Resource>);
|
||||
|
||||
impl ResourceSet {
|
||||
pub fn validate_bucket(&self, bucket: &str) -> Result<()> {
|
||||
for res in self.0.iter() {
|
||||
res.validate_bucket(bucket)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_match(&self, res: &str, condition_values: &HashMap<String, Vec<String>>) -> bool {
|
||||
for item in self.0.iter() {
|
||||
if item.is_match(res, condition_values) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
pub fn object_resource_exists(&self) -> bool {
|
||||
for res in self.0.iter() {
|
||||
if res.is_object_pattern() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
pub fn bucket_resource_exists(&self) -> bool {
|
||||
for res in self.0.iter() {
|
||||
if res.is_bucket_pattern() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<HashSet<Resource>> for ResourceSet {
|
||||
fn as_ref(&self) -> &HashSet<Resource> {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
// impl Serialize for ResourceSet {
|
||||
// fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
// where
|
||||
// S: serde::Serializer,
|
||||
// {
|
||||
// let ress: Vec<Resource> = self.0.iter().cloned().collect();
|
||||
// serializer.collect_seq(ress)
|
||||
// }
|
||||
// }
|
||||
|
||||
// impl<'de> Deserialize<'de> for ResourceSet {
|
||||
// fn deserialize<D>(deserializer: D) -> Result<ResourceSet, D::Error>
|
||||
// where
|
||||
// D: Deserializer<'de>,
|
||||
// {
|
||||
// let vec: Vec<Resource> = Deserialize::deserialize(deserializer)?;
|
||||
// let ha: HashSet<Resource> = vec.into_iter().collect();
|
||||
// Ok(ResourceSet(ha))
|
||||
// }
|
||||
// }
|
||||
@@ -1,16 +1,13 @@
|
||||
use super::{
|
||||
error::BucketMetadataError,
|
||||
metadata_sys::get_bucket_metadata_sys,
|
||||
policy::bucket_policy::{BucketPolicy, BucketPolicyArgs},
|
||||
};
|
||||
use crate::error::Result;
|
||||
use super::{error::BucketMetadataError, metadata_sys::get_bucket_metadata_sys};
|
||||
use common::error::Result;
|
||||
use policy::policy::{BucketPolicy, BucketPolicyArgs};
|
||||
use tracing::warn;
|
||||
|
||||
pub struct PolicySys {}
|
||||
|
||||
impl PolicySys {
|
||||
pub async fn is_allowed(args: &BucketPolicyArgs) -> bool {
|
||||
match Self::get(&args.bucket_name).await {
|
||||
pub async fn is_allowed(args: &BucketPolicyArgs<'_>) -> bool {
|
||||
match Self::get(args.bucket).await {
|
||||
Ok(cfg) => return cfg.is_allowed(args),
|
||||
Err(err) => {
|
||||
if !BucketMetadataError::BucketPolicyNotFound.is(&err) {
|
||||
@@ -22,7 +19,7 @@ impl PolicySys {
|
||||
args.is_owner
|
||||
}
|
||||
pub async fn get(bucket: &str) -> Result<BucketPolicy> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.write().await;
|
||||
|
||||
let (cfg, _) = bucket_meta_sys.get_bucket_policy(bucket).await?;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::error::Result;
|
||||
use common::error::Result;
|
||||
use rmp_serde::Serializer as rmpSerializer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::error::Result;
|
||||
use common::error::Result;
|
||||
use rmp_serde::Serializer as rmpSerializer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::{disk::RUSTFS_META_BUCKET, error::Error};
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use common::error::{Error, Result};
|
||||
|
||||
pub fn is_meta_bucketname(name: &str) -> bool {
|
||||
name.starts_with(RUSTFS_META_BUCKET)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use super::{metadata_sys::get_bucket_metadata_sys, versioning::VersioningApi};
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use crate::error::Result;
|
||||
use common::error::Result;
|
||||
use s3s::dto::VersioningConfiguration;
|
||||
use tracing::warn;
|
||||
|
||||
@@ -61,7 +61,7 @@ impl BucketVersioningSys {
|
||||
return Ok(VersioningConfiguration::default());
|
||||
}
|
||||
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys();
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.write().await;
|
||||
|
||||
let (cfg, _) = bucket_meta_sys.get_versioning_config(bucket).await?;
|
||||
|
||||
@@ -14,7 +14,7 @@ use std::{
|
||||
|
||||
use tokio::{spawn, sync::Mutex};
|
||||
|
||||
use crate::error::Result;
|
||||
use common::error::Result;
|
||||
|
||||
pub type UpdateFn<T> = Box<dyn Fn() -> Pin<Box<dyn Future<Output = Result<T>> + Send>> + Send + Sync + 'static>;
|
||||
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
use crate::disk::{DiskAPI, DiskStore, MetaCacheEntries, MetaCacheEntry, WalkDirOptions};
|
||||
use crate::{
|
||||
disk::error::{is_err_eof, is_err_file_not_found, is_err_volume_not_found, DiskError},
|
||||
metacache::writer::MetacacheReader,
|
||||
};
|
||||
use crate::{
|
||||
disk::{DiskAPI, DiskStore, MetaCacheEntries, MetaCacheEntry, WalkDirOptions},
|
||||
error::{Error, Result},
|
||||
};
|
||||
use common::error::{Error, Result};
|
||||
use futures::future::join_all;
|
||||
use std::{future::Future, pin::Pin, sync::Arc};
|
||||
use tokio::{spawn, sync::broadcast::Receiver as B_Receiver};
|
||||
@@ -140,7 +138,11 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
}
|
||||
|
||||
let revjob = spawn(async move {
|
||||
let mut errs: Vec<Option<Error>> = vec![None; readers.len()];
|
||||
let mut errs: Vec<Option<Error>> = Vec::with_capacity(readers.len());
|
||||
for _ in 0..readers.len() {
|
||||
errs.push(None);
|
||||
}
|
||||
|
||||
loop {
|
||||
let mut current = MetaCacheEntry::default();
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use super::error::{is_err_config_not_found, ConfigError};
|
||||
use super::{storageclass, Config, GLOBAL_StorageClass, KVS};
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::store_api::{ObjectInfo, ObjectOptions, PutObjReader, StorageAPI};
|
||||
use crate::store_err::is_err_object_not_found;
|
||||
use crate::utils::path::SLASH_SEPARATOR;
|
||||
use common::error::{Error, Result};
|
||||
use http::HeaderMap;
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashSet;
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::{disk, error::Error, store_err::is_err_object_not_found};
|
||||
use crate::{disk, store_err::is_err_object_not_found};
|
||||
use common::error::Error;
|
||||
|
||||
#[derive(Debug, PartialEq, thiserror::Error)]
|
||||
pub enum ConfigError {
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::{
|
||||
error::{Error, Result},
|
||||
utils::bool_flag::parse_bool,
|
||||
};
|
||||
use crate::utils::bool_flag::parse_bool;
|
||||
use common::error::{Error, Result};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Config {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
pub mod common;
|
||||
pub mod com;
|
||||
pub mod error;
|
||||
#[allow(dead_code)]
|
||||
pub mod heal;
|
||||
pub mod storageclass;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::store::ECStore;
|
||||
use common::{lookup_configs, read_config_without_migrate, STORAGE_CLASS_SUB_SYS};
|
||||
use com::{lookup_configs, read_config_without_migrate, STORAGE_CLASS_SUB_SYS};
|
||||
use common::error::Result;
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
use std::env;
|
||||
|
||||
use crate::{
|
||||
config::KV,
|
||||
error::{Error, Result},
|
||||
};
|
||||
use crate::config::KV;
|
||||
use common::error::{Error, Result};
|
||||
|
||||
use super::KVS;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::error::{Error, Result};
|
||||
use crate::utils::net;
|
||||
use common::error::{Error, Result};
|
||||
use path_absolutize::Absolutize;
|
||||
use std::{fmt::Display, path::Path};
|
||||
use url::{ParseError, Url};
|
||||
|
||||
@@ -2,11 +2,9 @@ use std::io::{self, ErrorKind};
|
||||
|
||||
use tracing::error;
|
||||
|
||||
use crate::quorum::CheckErrorFn;
|
||||
use crate::utils::ERROR_TYPE_MASK;
|
||||
use crate::{
|
||||
error::{Error, Result},
|
||||
quorum::CheckErrorFn,
|
||||
};
|
||||
use common::error::{Error, Result};
|
||||
|
||||
// DiskError == StorageErr
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
@@ -565,3 +563,13 @@ pub fn is_err_os_not_exist(err: &Error) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_err_os_disk_full(err: &Error) -> bool {
|
||||
if let Some(os_err) = err.downcast_ref::<io::Error>() {
|
||||
is_sys_err_no_space(os_err)
|
||||
} else if let Some(e) = err.downcast_ref::<DiskError>() {
|
||||
e == &DiskError::DiskFull
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use super::{error::DiskError, DiskInfo};
|
||||
use crate::error::{Error, Result};
|
||||
use common::error::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Error as JsonError;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use super::error::{
|
||||
is_err_file_not_found, is_err_file_version_not_found, is_sys_err_io, is_sys_err_not_empty, is_sys_err_too_many_files,
|
||||
os_is_not_exist, os_is_permission,
|
||||
is_err_file_not_found, is_err_file_version_not_found, is_err_os_disk_full, is_sys_err_io, is_sys_err_not_empty,
|
||||
is_sys_err_too_many_files, os_is_not_exist, os_is_permission,
|
||||
};
|
||||
use super::os::{is_root_disk, rename_all};
|
||||
use super::{endpoint::Endpoint, error::DiskError, format::FormatV3};
|
||||
@@ -18,7 +18,6 @@ use crate::disk::error::{
|
||||
};
|
||||
use crate::disk::os::{check_path_length, is_empty_dir};
|
||||
use crate::disk::STORAGE_FORMAT_FILE;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::file_meta::{get_file_info, read_xl_meta_no_data, FileInfoOpts};
|
||||
use crate::global::{GLOBAL_IsErasureSD, GLOBAL_RootDiskThreshold};
|
||||
use crate::heal::data_scanner::{has_active_rules, scan_data_folder, ScannerItem, ShouldSleepFn, SizeSummary};
|
||||
@@ -35,11 +34,11 @@ use crate::set_disk::{
|
||||
CHECK_PART_VOLUME_NOT_FOUND,
|
||||
};
|
||||
use crate::store_api::{BitrotAlgorithm, StorageAPI};
|
||||
use crate::utils::fs::{access, lstat, O_APPEND, O_CREATE, O_RDONLY, O_WRONLY};
|
||||
use crate::utils::fs::{access, lstat, remove, remove_all, rename, O_APPEND, O_CREATE, O_RDONLY, O_WRONLY};
|
||||
use crate::utils::os::get_info;
|
||||
use crate::utils::path::{
|
||||
self, clean, decode_dir_object, has_suffix, path_join, path_join_buf, GLOBAL_DIR_SUFFIX, GLOBAL_DIR_SUFFIX_WITH_SLASH,
|
||||
SLASH_SEPARATOR,
|
||||
self, clean, decode_dir_object, encode_dir_object, has_suffix, path_join, path_join_buf, GLOBAL_DIR_SUFFIX,
|
||||
GLOBAL_DIR_SUFFIX_WITH_SLASH, SLASH_SEPARATOR,
|
||||
};
|
||||
use crate::{
|
||||
file_meta::FileMeta,
|
||||
@@ -47,6 +46,7 @@ use crate::{
|
||||
utils,
|
||||
};
|
||||
use common::defer;
|
||||
use common::error::{Error, Result};
|
||||
use path_absolutize::Absolutize;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fmt::Debug;
|
||||
@@ -308,44 +308,46 @@ impl LocalDisk {
|
||||
// })
|
||||
// }
|
||||
|
||||
pub async fn move_to_trash(&self, delete_path: &PathBuf, _recursive: bool, _immediate_purge: bool) -> Result<()> {
|
||||
pub async fn move_to_trash(&self, delete_path: &PathBuf, recursive: bool, immediate_purge: bool) -> Result<()> {
|
||||
let trash_path = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
|
||||
if let Some(parent) = trash_path.parent() {
|
||||
if !parent.exists() {
|
||||
fs::create_dir_all(parent).await?;
|
||||
}
|
||||
}
|
||||
// debug!("move_to_trash from:{:?} to {:?}", &delete_path, &trash_path);
|
||||
// TODO: 清空回收站
|
||||
if let Err(err) = fs::rename(&delete_path, &trash_path).await {
|
||||
match err.kind() {
|
||||
ErrorKind::NotFound => (),
|
||||
_ => {
|
||||
warn!("delete_file rename {:?} err {:?}", &delete_path, &err);
|
||||
return Err(Error::from(err));
|
||||
}
|
||||
}
|
||||
|
||||
let err = if recursive {
|
||||
rename_all(delete_path, trash_path, self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?)
|
||||
.await
|
||||
.err()
|
||||
} else {
|
||||
rename(&delete_path, &trash_path).await.map_err(Error::new).err()
|
||||
};
|
||||
|
||||
if immediate_purge || delete_path.to_string_lossy().ends_with(path::SLASH_SEPARATOR) {
|
||||
warn!("move_to_trash immediate_purge {:?}", &delete_path.to_string_lossy());
|
||||
let trash_path2 = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
|
||||
let _ = rename_all(
|
||||
encode_dir_object(delete_path.to_string_lossy().as_ref()),
|
||||
trash_path2,
|
||||
self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// TODO: 优化 FIXME: 先清空回收站吧,有时间再添加判断逻辑
|
||||
|
||||
if let Err(err) = {
|
||||
if trash_path.is_dir() {
|
||||
fs::remove_dir_all(&trash_path).await
|
||||
} else {
|
||||
fs::remove_file(&trash_path).await
|
||||
}
|
||||
} {
|
||||
match err.kind() {
|
||||
ErrorKind::NotFound => (),
|
||||
_ => {
|
||||
warn!("delete_file remove trash {:?} err {:?}", &trash_path, &err);
|
||||
return Err(Error::from(err));
|
||||
if let Some(err) = err {
|
||||
if is_err_os_disk_full(&err) {
|
||||
if recursive {
|
||||
remove_all(delete_path).await?;
|
||||
} else {
|
||||
remove(delete_path).await?;
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// TODO: immediate
|
||||
// TODO: 异步通知 检测硬盘空间 清空回收站
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1971,7 +1973,7 @@ impl DiskAPI for LocalDisk {
|
||||
created: modtime,
|
||||
})
|
||||
}
|
||||
async fn delete_paths(&self, volume: &str, paths: &[&str]) -> Result<()> {
|
||||
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> {
|
||||
let volume_dir = self.get_bucket_path(volume)?;
|
||||
if !skip_access_checks(volume) {
|
||||
utils::fs::access(&volume_dir)
|
||||
|
||||
@@ -16,7 +16,6 @@ pub const STORAGE_FORMAT_FILE_BACKUP: &str = "xl.meta.bkp";
|
||||
|
||||
use crate::{
|
||||
bucket::{metadata_sys::get_versioning_config, versioning::VersioningApi},
|
||||
error::{Error, Result},
|
||||
file_meta::{merge_file_meta_versions, FileMeta, FileMetaShallowVersion, VersionType},
|
||||
heal::{
|
||||
data_scanner::ShouldSleepFn,
|
||||
@@ -27,6 +26,7 @@ use crate::{
|
||||
store_api::{FileInfo, ObjectInfo, RawFileInfo},
|
||||
utils::path::SLASH_SEPARATOR,
|
||||
};
|
||||
use common::error::{Error, Result};
|
||||
use endpoint::Endpoint;
|
||||
use error::DiskError;
|
||||
use local::LocalDisk;
|
||||
@@ -250,7 +250,7 @@ impl DiskAPI for Disk {
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_paths(&self, volume: &str, paths: &[&str]) -> Result<()> {
|
||||
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> {
|
||||
match self {
|
||||
Disk::Local(local_disk) => local_disk.delete_paths(volume, paths).await,
|
||||
Disk::Remote(remote_disk) => remote_disk.delete_paths(volume, paths).await,
|
||||
@@ -412,7 +412,7 @@ pub trait DiskAPI: Debug + Send + Sync + 'static {
|
||||
versions: Vec<FileInfoVersions>,
|
||||
opts: DeleteOptions,
|
||||
) -> Result<Vec<Option<Error>>>;
|
||||
async fn delete_paths(&self, volume: &str, paths: &[&str]) -> Result<()>;
|
||||
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()>;
|
||||
async fn write_metadata(&self, org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()>;
|
||||
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()>;
|
||||
async fn read_version(
|
||||
|
||||
@@ -3,14 +3,13 @@ use std::{
|
||||
path::{Component, Path},
|
||||
};
|
||||
|
||||
use tokio::fs;
|
||||
use tracing::info;
|
||||
|
||||
use crate::{
|
||||
disk::error::{is_sys_err_not_dir, is_sys_err_path_not_found, os_is_not_exist},
|
||||
error::{Error, Result},
|
||||
utils::{self, os::same_disk},
|
||||
};
|
||||
use common::error::{Error, Result};
|
||||
use tokio::fs;
|
||||
use tracing::info;
|
||||
|
||||
use super::error::{os_err_to_file_err, os_is_exist, DiskError};
|
||||
|
||||
@@ -137,20 +136,12 @@ pub async fn reliable_rename(
|
||||
base_dir: impl AsRef<Path>,
|
||||
) -> io::Result<()> {
|
||||
if let Some(parent) = dst_file_path.as_ref().parent() {
|
||||
reliable_mkdir_all(parent, base_dir.as_ref()).await?;
|
||||
}
|
||||
// need remove dst path
|
||||
if let Err(err) = utils::fs::remove_all(dst_file_path.as_ref()).await {
|
||||
if err.kind() != io::ErrorKind::NotFound {
|
||||
info!(
|
||||
"reliable_rename rm dst failed. src_file_path: {:?}, dst_file_path: {:?}, base_dir: {:?}, err: {:?}",
|
||||
src_file_path.as_ref(),
|
||||
dst_file_path.as_ref(),
|
||||
base_dir.as_ref(),
|
||||
err
|
||||
);
|
||||
if !file_exists(parent).await {
|
||||
info!("reliable_rename reliable_mkdir_all parent: {:?}", parent);
|
||||
reliable_mkdir_all(parent, base_dir.as_ref()).await?;
|
||||
}
|
||||
}
|
||||
|
||||
let mut i = 0;
|
||||
loop {
|
||||
if let Err(e) = utils::fs::rename(src_file_path.as_ref(), dst_file_path.as_ref()).await {
|
||||
@@ -158,13 +149,13 @@ pub async fn reliable_rename(
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
info!(
|
||||
"reliable_rename failed. src_file_path: {:?}, dst_file_path: {:?}, base_dir: {:?}, err: {:?}",
|
||||
src_file_path.as_ref(),
|
||||
dst_file_path.as_ref(),
|
||||
base_dir.as_ref(),
|
||||
e
|
||||
);
|
||||
// info!(
|
||||
// "reliable_rename failed. src_file_path: {:?}, dst_file_path: {:?}, base_dir: {:?}, err: {:?}",
|
||||
// src_file_path.as_ref(),
|
||||
// dst_file_path.as_ref(),
|
||||
// base_dir.as_ref(),
|
||||
// e
|
||||
// );
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
@@ -229,3 +220,7 @@ pub async fn os_mkdir_all(dir_path: impl AsRef<Path>, base_dir: impl AsRef<Path>
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn file_exists(path: impl AsRef<Path>) -> bool {
|
||||
fs::metadata(path.as_ref()).await.map(|_| true).unwrap_or(false)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ use super::{
|
||||
};
|
||||
use crate::{
|
||||
disk::error::DiskError,
|
||||
error::{Error, Result},
|
||||
heal::{
|
||||
data_scanner::ShouldSleepFn,
|
||||
data_usage_cache::{DataUsageCache, DataUsageEntry},
|
||||
@@ -41,6 +40,7 @@ use crate::{
|
||||
io::{FileReader, FileWriter, HttpFileReader, HttpFileWriter},
|
||||
utils::proto_err_to_err,
|
||||
};
|
||||
use common::error::{Error, Result};
|
||||
use protos::proto_gen::node_service::RenamePartRequst;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -565,9 +565,9 @@ impl DiskAPI for RemoteDisk {
|
||||
Ok(volume_info)
|
||||
}
|
||||
|
||||
async fn delete_paths(&self, volume: &str, paths: &[&str]) -> Result<()> {
|
||||
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> {
|
||||
info!("delete_paths");
|
||||
let paths = paths.iter().map(|s| s.to_string()).collect::<Vec<String>>();
|
||||
let paths = paths.to_owned();
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::error::{Error, Result};
|
||||
use crate::utils::ellipses::*;
|
||||
use common::error::{Error, Result};
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
|
||||
@@ -3,10 +3,10 @@ use tracing::{instrument, warn};
|
||||
use crate::{
|
||||
disk::endpoint::{Endpoint, EndpointType},
|
||||
disks_layout::DisksLayout,
|
||||
error::{Error, Result},
|
||||
global::global_rustfs_port,
|
||||
utils::net::{self, XHost},
|
||||
};
|
||||
use common::error::{Error, Result};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, HashMap, HashSet},
|
||||
net::IpAddr,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crate::bitrot::{BitrotReader, BitrotWriter};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::error::clone_err;
|
||||
use crate::quorum::{object_op_ignored_errs, reduce_write_quorum_errs};
|
||||
use common::error::{Error, Result};
|
||||
use futures::future::join_all;
|
||||
use reed_solomon_erasure::galois_8::ReedSolomon;
|
||||
use std::any::Any;
|
||||
@@ -487,7 +488,7 @@ impl Erasure {
|
||||
}
|
||||
}
|
||||
if !errs.is_empty() {
|
||||
return Err(errs[0].clone());
|
||||
return Err(clone_err(&errs[0]));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,106 +1,122 @@
|
||||
use crate::disk::error::{clone_disk_err, DiskError};
|
||||
use common::error::Error;
|
||||
use std::io;
|
||||
use tracing_error::{SpanTrace, SpanTraceStatus};
|
||||
// use tracing_error::{SpanTrace, SpanTraceStatus};
|
||||
|
||||
pub type StdError = Box<dyn std::error::Error + Send + Sync + 'static>;
|
||||
// pub type StdError = Box<dyn std::error::Error + Send + Sync + 'static>;
|
||||
|
||||
pub type Result<T = (), E = Error> = std::result::Result<T, E>;
|
||||
// pub type Result<T = (), E = Error> = std::result::Result<T, E>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Error {
|
||||
inner: Box<dyn std::error::Error + Send + Sync + 'static>,
|
||||
span_trace: SpanTrace,
|
||||
}
|
||||
// #[derive(Debug)]
|
||||
// pub struct Error {
|
||||
// inner: Box<dyn std::error::Error + Send + Sync + 'static>,
|
||||
// span_trace: SpanTrace,
|
||||
// }
|
||||
|
||||
impl Error {
|
||||
/// Create a new error from a `std::error::Error`.
|
||||
#[must_use]
|
||||
#[track_caller]
|
||||
pub fn new<T: std::error::Error + Send + Sync + 'static>(source: T) -> Self {
|
||||
Self::from_std_error(source.into())
|
||||
}
|
||||
// impl Error {
|
||||
// /// Create a new error from a `std::error::Error`.
|
||||
// #[must_use]
|
||||
// #[track_caller]
|
||||
// pub fn new<T: std::error::Error + Send + Sync + 'static>(source: T) -> Self {
|
||||
// Self::from_std_error(source.into())
|
||||
// }
|
||||
|
||||
/// Create a new error from a `std::error::Error`.
|
||||
#[must_use]
|
||||
#[track_caller]
|
||||
pub fn from_std_error(inner: StdError) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
span_trace: SpanTrace::capture(),
|
||||
}
|
||||
}
|
||||
// /// Create a new error from a `std::error::Error`.
|
||||
// #[must_use]
|
||||
// #[track_caller]
|
||||
// pub fn from_std_error(inner: StdError) -> Self {
|
||||
// Self {
|
||||
// inner,
|
||||
// span_trace: SpanTrace::capture(),
|
||||
// }
|
||||
// }
|
||||
|
||||
/// Create a new error from a string.
|
||||
#[must_use]
|
||||
#[track_caller]
|
||||
pub fn from_string(s: impl Into<String>) -> Self {
|
||||
Self::msg(s)
|
||||
}
|
||||
// /// Create a new error from a string.
|
||||
// #[must_use]
|
||||
// #[track_caller]
|
||||
// pub fn from_string(s: impl Into<String>) -> Self {
|
||||
// Self::msg(s)
|
||||
// }
|
||||
|
||||
/// Create a new error from a string.
|
||||
#[must_use]
|
||||
#[track_caller]
|
||||
pub fn msg(s: impl Into<String>) -> Self {
|
||||
Self::from_std_error(s.into().into())
|
||||
}
|
||||
// /// Create a new error from a string.
|
||||
// #[must_use]
|
||||
// #[track_caller]
|
||||
// pub fn msg(s: impl Into<String>) -> Self {
|
||||
// Self::from_std_error(s.into().into())
|
||||
// }
|
||||
|
||||
/// Returns `true` if the inner type is the same as `T`.
|
||||
#[inline]
|
||||
pub fn is<T: std::error::Error + 'static>(&self) -> bool {
|
||||
self.inner.is::<T>()
|
||||
}
|
||||
// /// Returns `true` if the inner type is the same as `T`.
|
||||
// #[inline]
|
||||
// pub fn is<T: std::error::Error + 'static>(&self) -> bool {
|
||||
// self.inner.is::<T>()
|
||||
// }
|
||||
|
||||
/// Returns some reference to the inner value if it is of type `T`, or
|
||||
/// `None` if it isn't.
|
||||
#[inline]
|
||||
pub fn downcast_ref<T: std::error::Error + 'static>(&self) -> Option<&T> {
|
||||
self.inner.downcast_ref()
|
||||
}
|
||||
// /// Returns some reference to the inner value if it is of type `T`, or
|
||||
// /// `None` if it isn't.
|
||||
// #[inline]
|
||||
// pub fn downcast_ref<T: std::error::Error + 'static>(&self) -> Option<&T> {
|
||||
// self.inner.downcast_ref()
|
||||
// }
|
||||
|
||||
/// Returns some mutable reference to the inner value if it is of type `T`, or
|
||||
/// `None` if it isn't.
|
||||
#[inline]
|
||||
pub fn downcast_mut<T: std::error::Error + 'static>(&mut self) -> Option<&mut T> {
|
||||
self.inner.downcast_mut()
|
||||
}
|
||||
// /// Returns some mutable reference to the inner value if it is of type `T`, or
|
||||
// /// `None` if it isn't.
|
||||
// #[inline]
|
||||
// pub fn downcast_mut<T: std::error::Error + 'static>(&mut self) -> Option<&mut T> {
|
||||
// self.inner.downcast_mut()
|
||||
// }
|
||||
|
||||
pub fn to_io_err(&self) -> Option<io::Error> {
|
||||
self.downcast_ref::<io::Error>()
|
||||
.map(|e| io::Error::new(e.kind(), e.to_string()))
|
||||
}
|
||||
}
|
||||
// pub fn to_io_err(&self) -> Option<io::Error> {
|
||||
// self.downcast_ref::<io::Error>()
|
||||
// .map(|e| io::Error::new(e.kind(), e.to_string()))
|
||||
// }
|
||||
// }
|
||||
|
||||
impl<T: std::error::Error + Send + Sync + 'static> From<T> for Error {
|
||||
fn from(e: T) -> Self {
|
||||
Self::new(e)
|
||||
}
|
||||
}
|
||||
// impl<T: std::error::Error + Send + Sync + 'static> From<T> for Error {
|
||||
// fn from(e: T) -> Self {
|
||||
// Self::new(e)
|
||||
// }
|
||||
// }
|
||||
|
||||
impl std::fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.inner)?;
|
||||
// impl std::fmt::Display for Error {
|
||||
// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// write!(f, "{}", self.inner)?;
|
||||
|
||||
if self.span_trace.status() != SpanTraceStatus::EMPTY {
|
||||
write!(f, "\nspan_trace:\n{}", self.span_trace)?;
|
||||
}
|
||||
// if self.span_trace.status() != SpanTraceStatus::EMPTY {
|
||||
// write!(f, "\nspan_trace:\n{}", self.span_trace)?;
|
||||
// }
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
// Ok(())
|
||||
// }
|
||||
// }
|
||||
|
||||
impl Clone for Error {
|
||||
fn clone(&self) -> Self {
|
||||
if let Some(e) = self.downcast_ref::<DiskError>() {
|
||||
clone_disk_err(e)
|
||||
} else if let Some(e) = self.downcast_ref::<io::Error>() {
|
||||
if let Some(code) = e.raw_os_error() {
|
||||
Error::new(io::Error::from_raw_os_error(code))
|
||||
} else {
|
||||
Error::new(io::Error::new(e.kind(), e.to_string()))
|
||||
}
|
||||
// impl Clone for Error {
|
||||
// fn clone(&self) -> Self {
|
||||
// if let Some(e) = self.downcast_ref::<DiskError>() {
|
||||
// clone_disk_err(e)
|
||||
// } else if let Some(e) = self.downcast_ref::<io::Error>() {
|
||||
// if let Some(code) = e.raw_os_error() {
|
||||
// Error::new(io::Error::from_raw_os_error(code))
|
||||
// } else {
|
||||
// Error::new(io::Error::new(e.kind(), e.to_string()))
|
||||
// }
|
||||
// } else {
|
||||
// // TODO: 优化其他类型
|
||||
// Error::msg(self.to_string())
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
pub fn clone_err(e: &Error) -> Error {
|
||||
if let Some(e) = e.downcast_ref::<DiskError>() {
|
||||
clone_disk_err(e)
|
||||
} else if let Some(e) = e.downcast_ref::<io::Error>() {
|
||||
if let Some(code) = e.raw_os_error() {
|
||||
Error::new(io::Error::from_raw_os_error(code))
|
||||
} else {
|
||||
// TODO: 优化其他类型
|
||||
Error::msg(self.to_string())
|
||||
Error::new(io::Error::new(e.kind(), e.to_string()))
|
||||
}
|
||||
} else {
|
||||
//TODO: 优化其他类型
|
||||
Error::msg(e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,13 @@
|
||||
use crate::disk::FileInfoVersions;
|
||||
use crate::file_meta_inline::InlineData;
|
||||
use crate::store_api::RawFileInfo;
|
||||
use crate::store_err::StorageError;
|
||||
use crate::{
|
||||
disk::error::DiskError,
|
||||
store_api::{ErasureInfo, FileInfo, ObjectPartInfo, ERASURE_ALGORITHM},
|
||||
};
|
||||
use byteorder::ByteOrder;
|
||||
use common::error::{Error, Result};
|
||||
use rmp::Marker;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cmp::Ordering;
|
||||
@@ -11,16 +20,6 @@ use tracing::{error, warn};
|
||||
use uuid::Uuid;
|
||||
use xxhash_rust::xxh64;
|
||||
|
||||
use crate::disk::FileInfoVersions;
|
||||
use crate::file_meta_inline::InlineData;
|
||||
use crate::store_api::RawFileInfo;
|
||||
use crate::store_err::StorageError;
|
||||
use crate::{
|
||||
disk::error::DiskError,
|
||||
error::{Error, Result},
|
||||
store_api::{ErasureInfo, FileInfo, ObjectPartInfo, ERASURE_ALGORITHM},
|
||||
};
|
||||
|
||||
// XL header specifies the format
|
||||
pub static XL_FILE_HEADER: [u8; 4] = [b'X', b'L', b'2', b' '];
|
||||
// pub static XL_FILE_VERSION_CURRENT: [u8; 4] = [0; 4];
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
use common::error::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use std::io::{Cursor, Read};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct InlineData(Vec<u8>);
|
||||
|
||||
@@ -23,7 +23,6 @@ use crate::heal::heal_ops::{HealSource, BG_HEALING_UUID};
|
||||
use crate::{
|
||||
config::RUSTFS_CONFIG_PREFIX,
|
||||
disk::{endpoint::Endpoint, error::DiskError, DiskAPI, DiskInfoOptions, BUCKET_META_PREFIX, RUSTFS_META_BUCKET},
|
||||
error::{Error, Result},
|
||||
global::{GLOBAL_BackgroundHealRoutine, GLOBAL_BackgroundHealState, GLOBAL_LOCAL_DISK_MAP},
|
||||
heal::{
|
||||
data_usage::{DATA_USAGE_CACHE_NAME, DATA_USAGE_ROOT},
|
||||
@@ -36,6 +35,7 @@ use crate::{
|
||||
store_api::{BucketInfo, BucketOptions, StorageAPI},
|
||||
utils::path::{path_join, SLASH_SEPARATOR},
|
||||
};
|
||||
use common::error::{Error, Result};
|
||||
|
||||
pub static DEFAULT_MONITOR_NEW_DISK_INTERVAL: Duration = Duration::from_secs(10);
|
||||
|
||||
|
||||
@@ -12,22 +12,6 @@ use std::{
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use lazy_static::lazy_static;
|
||||
use rand::Rng;
|
||||
use rmp_serde::{Deserializer, Serializer};
|
||||
use s3s::dto::{ReplicationConfiguration, ReplicationRuleStatus};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::{
|
||||
sync::{
|
||||
broadcast,
|
||||
mpsc::{self, Sender},
|
||||
RwLock,
|
||||
},
|
||||
time::sleep,
|
||||
};
|
||||
use tracing::{error, info};
|
||||
|
||||
use super::{
|
||||
data_scanner_metric::{globalScannerMetrics, ScannerMetric, ScannerMetrics},
|
||||
data_usage::{store_data_usage_in_backend, DATA_USAGE_BLOOM_NAME_PATH},
|
||||
@@ -38,11 +22,10 @@ use crate::heal::data_usage::DATA_USAGE_ROOT;
|
||||
use crate::{
|
||||
cache_value::metacache_set::{list_path_raw, ListPathRawOptions},
|
||||
config::{
|
||||
common::{read_config, save_config},
|
||||
com::{read_config, save_config},
|
||||
heal::Config,
|
||||
},
|
||||
disk::{error::DiskError, DiskInfoOptions, DiskStore, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams},
|
||||
error::{Error, Result},
|
||||
global::{GLOBAL_BackgroundHealState, GLOBAL_IsErasure, GLOBAL_IsErasureSD},
|
||||
heal::{
|
||||
data_usage::BACKGROUND_HEAL_INFO_PATH,
|
||||
@@ -61,6 +44,22 @@ use crate::{
|
||||
disk::DiskAPI,
|
||||
store_api::{FileInfo, ObjectInfo},
|
||||
};
|
||||
use chrono::{DateTime, Utc};
|
||||
use common::error::{Error, Result};
|
||||
use lazy_static::lazy_static;
|
||||
use rand::Rng;
|
||||
use rmp_serde::{Deserializer, Serializer};
|
||||
use s3s::dto::{ReplicationConfiguration, ReplicationRuleStatus};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::{
|
||||
sync::{
|
||||
broadcast,
|
||||
mpsc::{self, Sender},
|
||||
RwLock,
|
||||
},
|
||||
time::sleep,
|
||||
};
|
||||
use tracing::{error, info};
|
||||
|
||||
const DATA_SCANNER_SLEEP_PER_FOLDER: Duration = Duration::from_millis(1); // Time to wait between folders.
|
||||
const DATA_USAGE_UPDATE_DIR_CYCLES: u32 = 16; // Visit all folders every n cycles.
|
||||
|
||||
@@ -1,22 +1,21 @@
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashMap, sync::Arc, time::SystemTime};
|
||||
use tokio::sync::mpsc::Receiver;
|
||||
use tracing::{error, warn};
|
||||
|
||||
use crate::{
|
||||
bucket::metadata_sys::get_replication_config,
|
||||
config::{
|
||||
common::{read_config, save_config},
|
||||
com::{read_config, save_config},
|
||||
error::is_err_config_not_found,
|
||||
},
|
||||
disk::{BUCKET_META_PREFIX, RUSTFS_META_BUCKET},
|
||||
error::Result,
|
||||
new_object_layer_fn,
|
||||
store::ECStore,
|
||||
store_err::to_object_err,
|
||||
utils::path::SLASH_SEPARATOR,
|
||||
};
|
||||
use common::error::Result;
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashMap, sync::Arc, time::SystemTime};
|
||||
use tokio::sync::mpsc::Receiver;
|
||||
use tracing::{error, warn};
|
||||
|
||||
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR;
|
||||
const DATA_USAGE_OBJ_NAME: &str = ".usage.json";
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use crate::config::common::save_config;
|
||||
use crate::config::com::save_config;
|
||||
use crate::disk::error::DiskError;
|
||||
use crate::disk::{BUCKET_META_PREFIX, RUSTFS_META_BUCKET};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::new_object_layer_fn;
|
||||
use crate::set_disk::SetDisks;
|
||||
use crate::store_api::{BucketInfo, ObjectIO, ObjectOptions};
|
||||
use bytesize::ByteSize;
|
||||
use common::error::{Error, Result};
|
||||
use http::HeaderMap;
|
||||
use path_clean::PathClean;
|
||||
use rand::Rng;
|
||||
|
||||
@@ -4,22 +4,21 @@ use std::{
|
||||
time::SystemTime,
|
||||
};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
config::storageclass::{RRS, STANDARD},
|
||||
disk::{DeleteOptions, DiskAPI, DiskStore, BUCKET_META_PREFIX, RUSTFS_META_BUCKET},
|
||||
error::{Error, Result},
|
||||
global::GLOBAL_BackgroundHealState,
|
||||
heal::heal_ops::HEALING_TRACKER_FILENAME,
|
||||
new_object_layer_fn,
|
||||
store_api::{BucketInfo, StorageAPI},
|
||||
utils::fs::read_file,
|
||||
};
|
||||
use chrono::{DateTime, Utc};
|
||||
use common::error::{Error, Result};
|
||||
use lazy_static::lazy_static;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use super::{background_heal_ops::get_local_disks_to_heal, heal_ops::BG_HEALING_UUID};
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ use super::{
|
||||
};
|
||||
use crate::store_api::StorageAPI;
|
||||
use crate::{
|
||||
config::common::CONFIG_PREFIX,
|
||||
config::com::CONFIG_PREFIX,
|
||||
disk::RUSTFS_META_BUCKET,
|
||||
global::GLOBAL_BackgroundHealRoutine,
|
||||
heal::{error::ERR_HEAL_STOP_SIGNALLED, heal_commands::DRIVE_STATE_OK},
|
||||
@@ -14,7 +14,6 @@ use crate::{
|
||||
use crate::{
|
||||
disk::{endpoint::Endpoint, MetaCacheEntry},
|
||||
endpoints::Endpoints,
|
||||
error::{Error, Result},
|
||||
global::GLOBAL_IsDistErasure,
|
||||
heal::heal_commands::{HealStartSuccess, HEAL_UNKNOWN_SCAN},
|
||||
new_object_layer_fn,
|
||||
@@ -25,6 +24,7 @@ use crate::{
|
||||
utils::path::path_join,
|
||||
};
|
||||
use chrono::Utc;
|
||||
use common::error::{Error, Result};
|
||||
use futures::join;
|
||||
use lazy_static::lazy_static;
|
||||
use madmin::heal_commands::{HealDriveInfo, HealItemType, HealResultItem};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::disk::MetaCacheEntry;
|
||||
use crate::error::Error;
|
||||
use crate::error::Result;
|
||||
use crate::error::clone_err;
|
||||
use common::error::{Error, Result};
|
||||
use rmp::Marker;
|
||||
use std::str::from_utf8;
|
||||
use tokio::io::AsyncRead;
|
||||
@@ -246,7 +246,7 @@ impl<R: AsyncRead + Unpin> MetacacheReader<R> {
|
||||
self.check_init().await?;
|
||||
|
||||
if let Some(err) = &self.err {
|
||||
return Err(err.clone());
|
||||
return Err(clone_err(err));
|
||||
}
|
||||
|
||||
let mut n = size;
|
||||
@@ -285,7 +285,7 @@ impl<R: AsyncRead + Unpin> MetacacheReader<R> {
|
||||
self.check_init().await?;
|
||||
|
||||
if let Some(err) = &self.err {
|
||||
return Err(err.clone());
|
||||
return Err(clone_err(err));
|
||||
}
|
||||
|
||||
match rmp::decode::read_bool(&mut self.read_more(1).await?) {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use crate::endpoints::EndpointServerPools;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::global::get_global_endpoints;
|
||||
use crate::peer_rest_client::PeerRestClient;
|
||||
use crate::StorageAPI;
|
||||
use common::error::{Error, Result};
|
||||
use futures::future::join_all;
|
||||
use lazy_static::lazy_static;
|
||||
use madmin::{ItemState, ServerProperties};
|
||||
|
||||
@@ -1,18 +1,6 @@
|
||||
use async_trait::async_trait;
|
||||
use futures::future::join_all;
|
||||
use madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use protos::node_service_time_out_client;
|
||||
use protos::proto_gen::node_service::{
|
||||
DeleteBucketRequest, GetBucketInfoRequest, HealBucketRequest, ListBucketRequest, MakeBucketRequest,
|
||||
};
|
||||
use regex::Regex;
|
||||
use std::{collections::HashMap, fmt::Debug, sync::Arc};
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::Request;
|
||||
use tracing::info;
|
||||
|
||||
use crate::disk::error::is_all_buckets_not_found;
|
||||
use crate::disk::{DiskAPI, DiskStore};
|
||||
use crate::error::clone_err;
|
||||
use crate::global::GLOBAL_LOCAL_DISK_MAP;
|
||||
use crate::heal::heal_commands::{
|
||||
HealOpts, DRIVE_STATE_CORRUPT, DRIVE_STATE_MISSING, DRIVE_STATE_OFFLINE, DRIVE_STATE_OK, HEAL_ITEM_BUCKET,
|
||||
@@ -25,9 +13,21 @@ use crate::utils::wildcard::is_rustfs_meta_bucket_name;
|
||||
use crate::{
|
||||
disk::{self, error::DiskError, VolumeInfo},
|
||||
endpoints::{EndpointServerPools, Node},
|
||||
error::{Error, Result},
|
||||
store_api::{BucketInfo, BucketOptions, DeleteBucketOptions, MakeBucketOptions},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use common::error::{Error, Result};
|
||||
use futures::future::join_all;
|
||||
use madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use protos::node_service_time_out_client;
|
||||
use protos::proto_gen::node_service::{
|
||||
DeleteBucketRequest, GetBucketInfoRequest, HealBucketRequest, ListBucketRequest, MakeBucketRequest,
|
||||
};
|
||||
use regex::Regex;
|
||||
use std::{collections::HashMap, fmt::Debug, sync::Arc};
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::Request;
|
||||
use tracing::info;
|
||||
|
||||
type Client = Arc<Box<dyn PeerS3Client>>;
|
||||
|
||||
@@ -95,7 +95,7 @@ impl S3PeerSys {
|
||||
for (i, client) in self.clients.iter().enumerate() {
|
||||
if let Some(v) = client.get_pools() {
|
||||
if v.contains(&pool_idx) {
|
||||
per_pool_errs.push(errs[i].clone());
|
||||
per_pool_errs.push(errs[i].as_ref().map(clone_err));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -130,7 +130,7 @@ impl S3PeerSys {
|
||||
for (i, client) in self.clients.iter().enumerate() {
|
||||
if let Some(v) = client.get_pools() {
|
||||
if v.contains(&pool_idx) {
|
||||
per_pool_errs.push(errs[i].clone());
|
||||
per_pool_errs.push(errs[i].as_ref().map(clone_err));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -781,7 +781,7 @@ pub async fn heal_bucket_local(bucket: &str, opts: &HealOpts) -> Result<HealResu
|
||||
let bucket = bucket.to_string();
|
||||
let bs_clone = before_state.clone();
|
||||
let as_clone = after_state.clone();
|
||||
let errs_clone = errs.clone();
|
||||
let errs_clone = errs.iter().map(|e| e.as_ref().map(clone_err)).collect::<Vec<_>>();
|
||||
futures.push(async move {
|
||||
if bs_clone.read().await[idx] == DRIVE_STATE_MISSING {
|
||||
info!("bucket not find, will recreate");
|
||||
@@ -795,7 +795,7 @@ pub async fn heal_bucket_local(bucket: &str, opts: &HealOpts) -> Result<HealResu
|
||||
}
|
||||
}
|
||||
}
|
||||
errs_clone[idx].clone()
|
||||
errs_clone[idx].as_ref().map(clone_err)
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
use crate::bucket::versioning_sys::BucketVersioningSys;
|
||||
use crate::config::common::{read_config, save_config, CONFIG_PREFIX};
|
||||
use crate::config::com::{read_config, save_config, CONFIG_PREFIX};
|
||||
use crate::config::error::ConfigError;
|
||||
use crate::disk::{BUCKET_META_PREFIX, RUSTFS_META_BUCKET};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::heal::heal_commands::HealOpts;
|
||||
use crate::new_object_layer_fn;
|
||||
use crate::notification_sys::get_global_notification_sys;
|
||||
@@ -11,6 +10,7 @@ use crate::store_err::{is_err_bucket_exists, StorageError};
|
||||
use crate::utils::path::{path_join, SLASH_SEPARATOR};
|
||||
use crate::{sets::Sets, store::ECStore};
|
||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||
use common::error::{Error, Result};
|
||||
use rmp_serde::{Deserializer, Serializer};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt::Display;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::{disk::error::DiskError, error::Error};
|
||||
use crate::{disk::error::DiskError, error::clone_err};
|
||||
use common::error::Error;
|
||||
use std::{collections::HashMap, fmt::Debug};
|
||||
// pub type CheckErrorFn = fn(e: &Error) -> bool;
|
||||
|
||||
@@ -106,7 +107,7 @@ fn reduce_errs(errs: &[Option<Error>], ignored_errs: &[Box<dyn CheckErrorFn>]) -
|
||||
|
||||
if let Some(&c) = error_counts.get(&max_err) {
|
||||
if let Some(&err_idx) = error_map.get(&max_err) {
|
||||
let err = errs[err_idx].clone();
|
||||
let err = errs[err_idx].as_ref().map(clone_err);
|
||||
|
||||
return (c, err);
|
||||
}
|
||||
|
||||
@@ -1,12 +1,3 @@
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
io::{Cursor, Write},
|
||||
mem::replace,
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
bitrot::{bitrot_verify, close_bitrot_writers, new_bitrot_filereader, new_bitrot_filewriter, BitrotFileWriter},
|
||||
cache_value::metacache_set::{list_path_raw, ListPathRawOptions},
|
||||
@@ -20,7 +11,7 @@ use crate::{
|
||||
UpdateMetadataOpts, RUSTFS_META_BUCKET, RUSTFS_META_MULTIPART_BUCKET, RUSTFS_META_TMP_BUCKET,
|
||||
},
|
||||
erasure::Erasure,
|
||||
error::{Error, Result},
|
||||
error::clone_err,
|
||||
file_meta::{merge_file_meta_versions, FileMeta, FileMetaShallowVersion},
|
||||
global::{
|
||||
get_global_deployment_id, is_dist_erasure, GLOBAL_BackgroundHealState, GLOBAL_LOCAL_DISK_MAP,
|
||||
@@ -64,6 +55,7 @@ use crate::{
|
||||
};
|
||||
use bytesize::ByteSize;
|
||||
use chrono::Utc;
|
||||
use common::error::{Error, Result};
|
||||
use futures::future::join_all;
|
||||
use glob::Pattern;
|
||||
use http::HeaderMap;
|
||||
@@ -82,6 +74,14 @@ use rand::{
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::hash::Hash;
|
||||
use std::time::SystemTime;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
io::{Cursor, Write},
|
||||
mem::replace,
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::{
|
||||
io::{empty, AsyncWrite},
|
||||
@@ -320,7 +320,7 @@ impl SetDisks {
|
||||
}
|
||||
Err(e) => {
|
||||
// ress.push(None);
|
||||
errs.push(Some(e.clone()));
|
||||
errs.push(Some(clone_err(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -453,7 +453,7 @@ impl SetDisks {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn cleanup_multipart_path(disks: &[Option<DiskStore>], paths: &[&str]) {
|
||||
async fn cleanup_multipart_path(disks: &[Option<DiskStore>], paths: &[String]) {
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
|
||||
let mut errs = Vec::with_capacity(disks.len());
|
||||
@@ -479,6 +479,10 @@ impl SetDisks {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if errs.iter().any(|e| e.is_some()) {
|
||||
warn!("cleanup_multipart_path errs {:?}", &errs);
|
||||
}
|
||||
}
|
||||
async fn rename_part(
|
||||
disks: &[Option<DiskStore>],
|
||||
@@ -518,7 +522,7 @@ impl SetDisks {
|
||||
|
||||
if let Some(err) = reduce_write_quorum_errs(&errs, object_op_ignored_errs().as_ref(), write_quorum) {
|
||||
warn!("rename_part errs {:?}", &errs);
|
||||
Self::cleanup_multipart_path(disks, vec![dst_object, format!("{}.meta", dst_object).as_str()].as_slice()).await;
|
||||
Self::cleanup_multipart_path(disks, &[dst_object.to_owned(), format!("{}.meta", dst_object)]).await;
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
@@ -1238,7 +1242,7 @@ impl SetDisks {
|
||||
Err(err) => {
|
||||
for item in errs.iter_mut() {
|
||||
if item.is_none() {
|
||||
*item = Some(err.clone())
|
||||
*item = Some(clone_err(&err));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1490,92 +1494,92 @@ impl SetDisks {
|
||||
// (ress, errs)
|
||||
// }
|
||||
|
||||
async fn remove_object_part(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
upload_id: &str,
|
||||
data_dir: &str,
|
||||
part_num: usize,
|
||||
) -> Result<()> {
|
||||
let upload_id_path = Self::get_upload_id_dir(bucket, object, upload_id);
|
||||
let disks = self.disks.read().await;
|
||||
// async fn remove_object_part(
|
||||
// &self,
|
||||
// bucket: &str,
|
||||
// object: &str,
|
||||
// upload_id: &str,
|
||||
// data_dir: &str,
|
||||
// part_num: usize,
|
||||
// ) -> Result<()> {
|
||||
// let upload_id_path = Self::get_upload_id_dir(bucket, object, upload_id);
|
||||
// let disks = self.disks.read().await;
|
||||
|
||||
let disks = disks.clone();
|
||||
// let disks = disks.clone();
|
||||
|
||||
let file_path = format!("{}/{}/part.{}", upload_id_path, data_dir, part_num);
|
||||
// let file_path = format!("{}/{}/part.{}", upload_id_path, data_dir, part_num);
|
||||
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
let mut errors = Vec::with_capacity(disks.len());
|
||||
// let mut futures = Vec::with_capacity(disks.len());
|
||||
// let mut errors = Vec::with_capacity(disks.len());
|
||||
|
||||
for disk in disks.iter() {
|
||||
let file_path = file_path.clone();
|
||||
let meta_file_path = format!("{}.meta", file_path);
|
||||
// for disk in disks.iter() {
|
||||
// let file_path = file_path.clone();
|
||||
// let meta_file_path = format!("{}.meta", file_path);
|
||||
|
||||
futures.push(async move {
|
||||
if let Some(disk) = disk {
|
||||
disk.delete(RUSTFS_META_MULTIPART_BUCKET, &file_path, DeleteOptions::default())
|
||||
.await?;
|
||||
disk.delete(RUSTFS_META_MULTIPART_BUCKET, &meta_file_path, DeleteOptions::default())
|
||||
.await
|
||||
} else {
|
||||
Err(Error::new(DiskError::DiskNotFound))
|
||||
}
|
||||
});
|
||||
}
|
||||
// futures.push(async move {
|
||||
// if let Some(disk) = disk {
|
||||
// disk.delete(RUSTFS_META_MULTIPART_BUCKET, &file_path, DeleteOptions::default())
|
||||
// .await?;
|
||||
// disk.delete(RUSTFS_META_MULTIPART_BUCKET, &meta_file_path, DeleteOptions::default())
|
||||
// .await
|
||||
// } else {
|
||||
// Err(Error::new(DiskError::DiskNotFound))
|
||||
// }
|
||||
// });
|
||||
// }
|
||||
|
||||
let results = join_all(futures).await;
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
errors.push(None);
|
||||
}
|
||||
Err(e) => {
|
||||
errors.push(Some(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
// let results = join_all(futures).await;
|
||||
// for result in results {
|
||||
// match result {
|
||||
// Ok(_) => {
|
||||
// errors.push(None);
|
||||
// }
|
||||
// Err(e) => {
|
||||
// errors.push(Some(e));
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
Ok(())
|
||||
}
|
||||
async fn remove_part_meta(&self, bucket: &str, object: &str, upload_id: &str, data_dir: &str, part_num: usize) -> Result<()> {
|
||||
let upload_id_path = Self::get_upload_id_dir(bucket, object, upload_id);
|
||||
let disks = self.disks.read().await;
|
||||
// Ok(())
|
||||
// }
|
||||
// async fn remove_part_meta(&self, bucket: &str, object: &str, upload_id: &str, data_dir: &str, part_num: usize) -> Result<()> {
|
||||
// let upload_id_path = Self::get_upload_id_dir(bucket, object, upload_id);
|
||||
// let disks = self.disks.read().await;
|
||||
|
||||
let disks = disks.clone();
|
||||
// let disks = Self::shuffle_disks(&disks, &fi.erasure.distribution);
|
||||
// let disks = disks.clone();
|
||||
// // let disks = Self::shuffle_disks(&disks, &fi.erasure.distribution);
|
||||
|
||||
let file_path = format!("{}/{}/part.{}.meta", upload_id_path, data_dir, part_num);
|
||||
// let file_path = format!("{}/{}/part.{}.meta", upload_id_path, data_dir, part_num);
|
||||
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
let mut errors = Vec::with_capacity(disks.len());
|
||||
// let mut futures = Vec::with_capacity(disks.len());
|
||||
// let mut errors = Vec::with_capacity(disks.len());
|
||||
|
||||
for disk in disks.iter() {
|
||||
let file_path = file_path.clone();
|
||||
futures.push(async move {
|
||||
if let Some(disk) = disk {
|
||||
disk.delete(RUSTFS_META_MULTIPART_BUCKET, &file_path, DeleteOptions::default())
|
||||
.await
|
||||
} else {
|
||||
Err(Error::new(DiskError::DiskNotFound))
|
||||
}
|
||||
});
|
||||
}
|
||||
// for disk in disks.iter() {
|
||||
// let file_path = file_path.clone();
|
||||
// futures.push(async move {
|
||||
// if let Some(disk) = disk {
|
||||
// disk.delete(RUSTFS_META_MULTIPART_BUCKET, &file_path, DeleteOptions::default())
|
||||
// .await
|
||||
// } else {
|
||||
// Err(Error::new(DiskError::DiskNotFound))
|
||||
// }
|
||||
// });
|
||||
// }
|
||||
|
||||
let results = join_all(futures).await;
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
errors.push(None);
|
||||
}
|
||||
Err(e) => {
|
||||
errors.push(Some(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
// let results = join_all(futures).await;
|
||||
// for result in results {
|
||||
// match result {
|
||||
// Ok(_) => {
|
||||
// errors.push(None);
|
||||
// }
|
||||
// Err(e) => {
|
||||
// errors.push(Some(e));
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Ok(())
|
||||
// }
|
||||
|
||||
// #[tracing::instrument(skip(self))]
|
||||
pub async fn delete_all(&self, bucket: &str, prefix: &str) -> Result<()> {
|
||||
@@ -2288,7 +2292,7 @@ impl SetDisks {
|
||||
"file({} : {}) part corrupt too much, can not to fix, disks_to_heal_count: {}, parity_blocks: {}",
|
||||
bucket, object, disks_to_heal_count, lastest_meta.erasure.parity_blocks
|
||||
);
|
||||
let mut t_errs = vec![None; errs.len()];
|
||||
|
||||
// Allow for dangling deletes, on versions that have DataDir missing etc.
|
||||
// this would end up restoring the correct readable versions.
|
||||
match self
|
||||
@@ -2311,13 +2315,22 @@ impl SetDisks {
|
||||
} else {
|
||||
Error::new(DiskError::FileNotFound)
|
||||
};
|
||||
let mut t_errs = Vec::with_capacity(errs.len());
|
||||
for _ in 0..errs.len() {
|
||||
t_errs.push(None);
|
||||
}
|
||||
return Ok((
|
||||
self.default_heal_result(m, &t_errs, bucket, object, version_id).await,
|
||||
Some(derr),
|
||||
));
|
||||
}
|
||||
Err(err) => {
|
||||
t_errs = vec![Some(err.clone()); errs.len()];
|
||||
// t_errs = vec![Some(err.clone()); errs.len()];
|
||||
let mut t_errs = Vec::with_capacity(errs.len());
|
||||
for _ in 0..errs.len() {
|
||||
t_errs.push(Some(clone_err(&err)));
|
||||
}
|
||||
|
||||
return Ok((
|
||||
self.default_heal_result(FileInfo::default(), &t_errs, bucket, object, version_id)
|
||||
.await,
|
||||
@@ -3517,7 +3530,7 @@ impl SetDisks {
|
||||
}
|
||||
|
||||
if let Some(err) = ret_err.as_ref() {
|
||||
return Err(err.clone());
|
||||
return Err(clone_err(err));
|
||||
}
|
||||
if !tracker.read().await.queue_buckets.is_empty() {
|
||||
return Err(Error::from_string(format!(
|
||||
@@ -4434,7 +4447,7 @@ impl StorageAPI for SetDisks {
|
||||
}
|
||||
};
|
||||
|
||||
let mut upload_ids = Vec::new();
|
||||
let mut upload_ids: Vec<String> = Vec::new();
|
||||
|
||||
for disk in disks.iter().flatten() {
|
||||
if !disk.is_online().await {
|
||||
@@ -4847,29 +4860,49 @@ impl StorageAPI for SetDisks {
|
||||
}
|
||||
}
|
||||
|
||||
let mut parts = Vec::with_capacity(curr_fi.parts.len());
|
||||
// TODO: 优化 cleanupMultipartPath
|
||||
for p in curr_fi.parts.iter() {
|
||||
let _ = self
|
||||
.remove_part_meta(
|
||||
bucket,
|
||||
object,
|
||||
upload_id,
|
||||
curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(),
|
||||
p.number,
|
||||
)
|
||||
.await;
|
||||
parts.push(path_join_buf(&[
|
||||
&upload_id_path,
|
||||
curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(),
|
||||
format!("part.{}.meta", p.number).as_str(),
|
||||
]));
|
||||
|
||||
if !fi.parts.iter().any(|v| v.number == p.number) {
|
||||
let _ = self
|
||||
.remove_object_part(
|
||||
bucket,
|
||||
object,
|
||||
upload_id,
|
||||
curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(),
|
||||
p.number,
|
||||
)
|
||||
.await;
|
||||
parts.push(path_join_buf(&[
|
||||
&upload_id_path,
|
||||
curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(),
|
||||
format!("part.{}", p.number).as_str(),
|
||||
]));
|
||||
}
|
||||
|
||||
// let _ = self
|
||||
// .remove_part_meta(
|
||||
// bucket,
|
||||
// object,
|
||||
// upload_id,
|
||||
// curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(),
|
||||
// p.number,
|
||||
// )
|
||||
// .await;
|
||||
|
||||
// if !fi.parts.iter().any(|v| v.number == p.number) {
|
||||
// let _ = self
|
||||
// .remove_object_part(
|
||||
// bucket,
|
||||
// object,
|
||||
// upload_id,
|
||||
// curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(),
|
||||
// p.number,
|
||||
// )
|
||||
// .await;
|
||||
// }
|
||||
}
|
||||
|
||||
{
|
||||
let disks = self.get_disks_internal().await;
|
||||
Self::cleanup_multipart_path(&disks, &parts).await;
|
||||
}
|
||||
|
||||
let (online_disks, versions, op_old_dir) = Self::rename_data(
|
||||
@@ -5203,7 +5236,10 @@ async fn disks_with_all_parts(
|
||||
|
||||
let erasure_distribution_reliable = inconsistent <= parts_metadata.len() / 2;
|
||||
|
||||
let mut meta_errs = vec![None; errs.len()];
|
||||
let mut meta_errs = Vec::with_capacity(errs.len());
|
||||
for _ in 0..errs.len() {
|
||||
meta_errs.push(None);
|
||||
}
|
||||
|
||||
for (index, disk) in online_disks.iter().enumerate() {
|
||||
let disk = if let Some(disk) = disk {
|
||||
@@ -5213,8 +5249,8 @@ async fn disks_with_all_parts(
|
||||
continue;
|
||||
};
|
||||
|
||||
if let Some(err) = errs[index].clone() {
|
||||
meta_errs[index] = Some(err);
|
||||
if let Some(err) = &errs[index] {
|
||||
meta_errs[index] = Some(clone_err(err));
|
||||
continue;
|
||||
}
|
||||
if !disk.is_online().await {
|
||||
@@ -5370,7 +5406,7 @@ pub fn should_heal_object_on_disk(
|
||||
match err {
|
||||
Some(err) => match err.downcast_ref::<DiskError>() {
|
||||
Some(DiskError::FileNotFound) | Some(DiskError::FileVersionNotFound) | Some(DiskError::FileCorrupt) => {
|
||||
return (true, Some(err.clone()));
|
||||
return (true, Some(clone_err(err)));
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
@@ -5393,7 +5429,7 @@ pub fn should_heal_object_on_disk(
|
||||
}
|
||||
}
|
||||
}
|
||||
(false, err.clone())
|
||||
(false, err.as_ref().map(clone_err))
|
||||
}
|
||||
|
||||
async fn get_disks_info(disks: &[Option<DiskStore>], eps: &[Endpoint]) -> Vec<madmin::Disk> {
|
||||
|
||||
@@ -1,14 +1,6 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use common::globals::GLOBAL_Local_Node_Name;
|
||||
use futures::future::join_all;
|
||||
use http::HeaderMap;
|
||||
use lock::{namespace_lock::NsLockMap, new_lock_api, LockApi};
|
||||
use madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use tokio::sync::RwLock;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
disk::{
|
||||
error::{is_unformatted_disk, DiskError},
|
||||
@@ -16,7 +8,6 @@ use crate::{
|
||||
new_disk, DiskAPI, DiskInfo, DiskOption, DiskStore,
|
||||
},
|
||||
endpoints::{Endpoints, PoolEndpoints},
|
||||
error::{Error, Result},
|
||||
global::{is_dist_erasure, GLOBAL_LOCAL_DISK_SET_DRIVES},
|
||||
heal::heal_commands::{
|
||||
HealOpts, DRIVE_STATE_CORRUPT, DRIVE_STATE_MISSING, DRIVE_STATE_OFFLINE, DRIVE_STATE_OK, HEAL_ITEM_METADATA,
|
||||
@@ -31,6 +22,14 @@ use crate::{
|
||||
store_init::{check_format_erasure_values, get_format_erasure_in_quorum, load_format_erasure_all, save_format_file},
|
||||
utils::{hash, path::path_join_buf},
|
||||
};
|
||||
use common::error::{Error, Result};
|
||||
use common::globals::GLOBAL_Local_Node_Name;
|
||||
use futures::future::join_all;
|
||||
use http::HeaderMap;
|
||||
use lock::{namespace_lock::NsLockMap, new_lock_api, LockApi};
|
||||
use madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use tokio::sync::RwLock;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::heal::heal_ops::HealSequence;
|
||||
use tokio::time::Duration;
|
||||
@@ -767,30 +766,50 @@ async fn init_storage_disks_with_errors(
|
||||
opts: &DiskOption,
|
||||
) -> (Vec<Option<DiskStore>>, Vec<Option<Error>>) {
|
||||
// Bootstrap disks.
|
||||
let disks = Arc::new(RwLock::new(vec![None; endpoints.as_ref().len()]));
|
||||
let errs = Arc::new(RwLock::new(vec![None; endpoints.as_ref().len()]));
|
||||
// let disks = Arc::new(RwLock::new(vec![None; endpoints.as_ref().len()]));
|
||||
// let errs = Arc::new(RwLock::new(vec![None; endpoints.as_ref().len()]));
|
||||
let mut futures = Vec::with_capacity(endpoints.as_ref().len());
|
||||
for (index, endpoint) in endpoints.as_ref().iter().enumerate() {
|
||||
let ep = endpoint.clone();
|
||||
let opt = opts.clone();
|
||||
let disks_clone = disks.clone();
|
||||
let errs_clone = errs.clone();
|
||||
futures.push(tokio::spawn(async move {
|
||||
match new_disk(&ep, &opt).await {
|
||||
Ok(disk) => {
|
||||
disks_clone.write().await[index] = Some(disk);
|
||||
errs_clone.write().await[index] = None;
|
||||
}
|
||||
Err(err) => {
|
||||
disks_clone.write().await[index] = None;
|
||||
errs_clone.write().await[index] = Some(err);
|
||||
}
|
||||
}
|
||||
}));
|
||||
for endpoint in endpoints.as_ref().iter() {
|
||||
futures.push(new_disk(endpoint, opts));
|
||||
|
||||
// let ep = endpoint.clone();
|
||||
// let opt = opts.clone();
|
||||
// let disks_clone = disks.clone();
|
||||
// let errs_clone = errs.clone();
|
||||
// futures.push(tokio::spawn(async move {
|
||||
// match new_disk(&ep, &opt).await {
|
||||
// Ok(disk) => {
|
||||
// disks_clone.write().await[index] = Some(disk);
|
||||
// errs_clone.write().await[index] = None;
|
||||
// }
|
||||
// Err(err) => {
|
||||
// disks_clone.write().await[index] = None;
|
||||
// errs_clone.write().await[index] = Some(err);
|
||||
// }
|
||||
// }
|
||||
// }));
|
||||
}
|
||||
let _ = join_all(futures).await;
|
||||
let disks = disks.read().await.clone();
|
||||
let errs = errs.read().await.clone();
|
||||
// let _ = join_all(futures).await;
|
||||
// let disks = disks.read().await.clone();
|
||||
// let errs = errs.read().await.clone();
|
||||
|
||||
let mut disks = Vec::with_capacity(endpoints.as_ref().len());
|
||||
let mut errs = Vec::with_capacity(endpoints.as_ref().len());
|
||||
|
||||
let results = join_all(futures).await;
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(disk) => {
|
||||
disks.push(Some(disk));
|
||||
errs.push(None);
|
||||
}
|
||||
Err(err) => {
|
||||
disks.push(None);
|
||||
errs.push(Some(err));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(disks, errs)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ use crate::config::GLOBAL_StorageClass;
|
||||
use crate::config::{self, storageclass, GLOBAL_ConfigSys};
|
||||
use crate::disk::endpoint::{Endpoint, EndpointType};
|
||||
use crate::disk::{DiskAPI, DiskInfo, DiskInfoOptions, MetaCacheEntry};
|
||||
use crate::error::clone_err;
|
||||
use crate::global::{
|
||||
is_dist_erasure, is_erasure_sd, set_global_deployment_id, set_object_layer, DISK_ASSUME_UNKNOWN_SIZE, DISK_FILL_FRACTION,
|
||||
DISK_MIN_INODES, DISK_RESERVE_FRACTION, GLOBAL_LOCAL_DISK_MAP, GLOBAL_LOCAL_DISK_SET_DRIVES,
|
||||
@@ -30,7 +31,6 @@ use crate::{
|
||||
bucket::metadata::BucketMetadata,
|
||||
disk::{error::DiskError, new_disk, DiskOption, DiskStore, BUCKET_META_PREFIX, RUSTFS_META_BUCKET},
|
||||
endpoints::EndpointServerPools,
|
||||
error::{Error, Result},
|
||||
peer::S3PeerSys,
|
||||
sets::Sets,
|
||||
store_api::{
|
||||
@@ -40,6 +40,7 @@ use crate::{
|
||||
},
|
||||
store_init, utils,
|
||||
};
|
||||
use common::error::{Error, Result};
|
||||
use common::globals::{GLOBAL_Local_Node_Name, GLOBAL_Rustfs_Host, GLOBAL_Rustfs_Port};
|
||||
use futures::future::join_all;
|
||||
use glob::Pattern;
|
||||
@@ -664,7 +665,7 @@ impl ECStore {
|
||||
has_def_pool = true;
|
||||
|
||||
if !is_err_object_not_found(err) && !is_err_version_not_found(err) {
|
||||
return Err(err.clone());
|
||||
return Err(clone_err(err));
|
||||
}
|
||||
|
||||
if pinfo.object_info.delete_marker && !pinfo.object_info.name.is_empty() {
|
||||
@@ -802,7 +803,7 @@ impl ECStore {
|
||||
}
|
||||
let _ = task.await;
|
||||
if let Some(err) = first_err.read().await.as_ref() {
|
||||
return Err(err.clone());
|
||||
return Err(clone_err(err));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -932,8 +933,8 @@ impl ECStore {
|
||||
}
|
||||
}
|
||||
|
||||
if derrs[0].is_some() {
|
||||
return Err(derrs[0].as_ref().unwrap().clone());
|
||||
if let Some(e) = &derrs[0] {
|
||||
return Err(clone_err(e));
|
||||
}
|
||||
|
||||
Ok(objs[0].as_ref().unwrap().clone())
|
||||
@@ -1056,13 +1057,23 @@ struct PoolErr {
|
||||
err: Option<Error>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
#[derive(Debug, Default)]
|
||||
pub struct PoolObjInfo {
|
||||
pub index: usize,
|
||||
pub object_info: ObjectInfo,
|
||||
pub err: Option<Error>,
|
||||
}
|
||||
|
||||
impl Clone for PoolObjInfo {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
index: self.index,
|
||||
object_info: self.object_info.clone(),
|
||||
err: self.err.as_ref().map(clone_err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// #[derive(Debug, Default, Clone)]
|
||||
// pub struct ListPathOptions {
|
||||
// pub id: String,
|
||||
@@ -1312,7 +1323,7 @@ impl StorageAPI for ECStore {
|
||||
|
||||
meta.save().await.map_err(|e| to_object_err(e, vec![bucket]))?;
|
||||
|
||||
set_bucket_metadata(bucket.to_string(), meta).await;
|
||||
set_bucket_metadata(bucket.to_string(), meta).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -2037,45 +2048,59 @@ impl StorageAPI for ECStore {
|
||||
) -> Result<(HealResultItem, Option<Error>)> {
|
||||
info!("ECStore heal_object");
|
||||
let object = utils::path::encode_dir_object(object);
|
||||
let errs = Arc::new(RwLock::new(vec![None; self.pools.len()]));
|
||||
let results = Arc::new(RwLock::new(vec![HealResultItem::default(); self.pools.len()]));
|
||||
let mut futures = Vec::with_capacity(self.pools.len());
|
||||
for (idx, pool) in self.pools.iter().enumerate() {
|
||||
//TODO: IsSuspended
|
||||
let object = object.clone();
|
||||
let results = results.clone();
|
||||
let errs = errs.clone();
|
||||
futures.push(async move {
|
||||
match pool.heal_object(bucket, &object, version_id, opts).await {
|
||||
Ok((mut result, err)) => {
|
||||
result.object = utils::path::decode_dir_object(&result.object);
|
||||
results.write().await.insert(idx, result);
|
||||
errs.write().await[idx] = err;
|
||||
}
|
||||
Err(err) => {
|
||||
errs.write().await[idx] = Some(err);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
let _ = join_all(futures).await;
|
||||
|
||||
// Return the first nil error
|
||||
for (index, err) in errs.read().await.iter().enumerate() {
|
||||
let mut futures = Vec::with_capacity(self.pools.len());
|
||||
for pool in self.pools.iter() {
|
||||
//TODO: IsSuspended
|
||||
futures.push(pool.heal_object(bucket, &object, version_id, opts));
|
||||
// futures.push(async move {
|
||||
// match pool.heal_object(bucket, &object, version_id, opts).await {
|
||||
// Ok((mut result, err)) => {
|
||||
// result.object = utils::path::decode_dir_object(&result.object);
|
||||
// results.write().await.insert(idx, result);
|
||||
// errs.write().await[idx] = err;
|
||||
// }
|
||||
// Err(err) => {
|
||||
// errs.write().await[idx] = Some(err);
|
||||
// }
|
||||
// }
|
||||
// });
|
||||
}
|
||||
let results = join_all(futures).await;
|
||||
|
||||
let mut errs = Vec::with_capacity(self.pools.len());
|
||||
let mut ress = Vec::with_capacity(self.pools.len());
|
||||
|
||||
for res in results.into_iter() {
|
||||
match res {
|
||||
Ok((result, err)) => {
|
||||
let mut result = result;
|
||||
result.object = utils::path::decode_dir_object(&result.object);
|
||||
ress.push(result);
|
||||
errs.push(err);
|
||||
}
|
||||
Err(err) => {
|
||||
errs.push(Some(err));
|
||||
ress.push(HealResultItem::default());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (idx, err) in errs.iter().enumerate() {
|
||||
if err.is_none() {
|
||||
return Ok((results.write().await.remove(index), None));
|
||||
return Ok((ress.remove(idx), None));
|
||||
}
|
||||
}
|
||||
|
||||
// No pool returned a nil error, return the first non 'not found' error
|
||||
for (index, err) in errs.read().await.iter().enumerate() {
|
||||
for (index, err) in errs.iter().enumerate() {
|
||||
match err {
|
||||
Some(err) => match err.downcast_ref::<DiskError>() {
|
||||
Some(DiskError::FileNotFound) | Some(DiskError::FileVersionNotFound) => {}
|
||||
_ => return Ok((results.write().await.remove(index), Some(err.clone()))),
|
||||
_ => return Ok((ress.remove(index), Some(clone_err(err)))),
|
||||
},
|
||||
None => {
|
||||
return Ok((results.write().await.remove(index), None));
|
||||
return Ok((ress.remove(index), None));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2227,7 +2252,7 @@ impl StorageAPI for ECStore {
|
||||
}
|
||||
|
||||
if !errs.is_empty() {
|
||||
return Err(errs[0].clone());
|
||||
return Err(clone_err(&errs[0]));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,13 +1,8 @@
|
||||
use crate::heal::heal_ops::HealSequence;
|
||||
use crate::io::FileReader;
|
||||
use crate::store_utils::clean_metadata;
|
||||
use crate::{
|
||||
disk::DiskStore,
|
||||
error::{Error, Result},
|
||||
heal::heal_commands::HealOpts,
|
||||
utils::path::decode_dir_object,
|
||||
xhttp,
|
||||
};
|
||||
use crate::{disk::DiskStore, heal::heal_commands::HealOpts, utils::path::decode_dir_object, xhttp};
|
||||
use common::error::{Error, Result};
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
use madmin::heal_commands::HealResultItem;
|
||||
use rmp_serde::Serializer;
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use crate::{
|
||||
disk::error::{is_err_file_not_found, DiskError},
|
||||
error::Error,
|
||||
utils::path::decode_dir_object,
|
||||
};
|
||||
use common::error::Error;
|
||||
|
||||
#[derive(Debug, thiserror::Error, PartialEq, Eq)]
|
||||
pub enum StorageError {
|
||||
|
||||
@@ -7,9 +7,9 @@ use crate::{
|
||||
new_disk, DiskInfoOptions, DiskOption, DiskStore, FORMAT_CONFIG_FILE, RUSTFS_META_BUCKET,
|
||||
},
|
||||
endpoints::Endpoints,
|
||||
error::{Error, Result},
|
||||
heal::heal_commands::init_healing_tracker,
|
||||
};
|
||||
use common::error::{Error, Result};
|
||||
use futures::future::join_all;
|
||||
use std::{
|
||||
collections::{hash_map::Entry, HashMap},
|
||||
|
||||
@@ -6,7 +6,7 @@ use crate::disk::{
|
||||
DiskInfo, DiskStore, MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry,
|
||||
MetadataResolutionParams,
|
||||
};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::error::clone_err;
|
||||
use crate::file_meta::merge_file_meta_versions;
|
||||
use crate::peer::is_reserved_or_invalid_bucket;
|
||||
use crate::set_disk::SetDisks;
|
||||
@@ -16,6 +16,7 @@ use crate::store_err::{is_err_bucket_not_found, to_object_err, StorageError};
|
||||
use crate::utils::path::{self, base_dir_from_prefix, SLASH_SEPARATOR};
|
||||
use crate::StorageAPI;
|
||||
use crate::{store::ECStore, store_api::ListObjectsV2Info};
|
||||
use common::error::{Error, Result};
|
||||
use futures::future::join_all;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::thread_rng;
|
||||
@@ -508,7 +509,7 @@ impl ECStore {
|
||||
|
||||
// cancel channel
|
||||
let (cancel_tx, cancel_rx) = broadcast::channel(1);
|
||||
let (err_tx, mut err_rx) = broadcast::channel::<Error>(1);
|
||||
let (err_tx, mut err_rx) = broadcast::channel::<Arc<Error>>(1);
|
||||
|
||||
let (sender, recv) = mpsc::channel(o.limit as usize);
|
||||
|
||||
@@ -521,7 +522,7 @@ impl ECStore {
|
||||
opts.stop_disk_at_limit = true;
|
||||
if let Err(err) = store.list_merged(cancel_rx1, opts, sender).await {
|
||||
error!("list_merged err {:?}", err);
|
||||
let _ = err_tx1.send(err);
|
||||
let _ = err_tx1.send(Arc::new(err));
|
||||
}
|
||||
});
|
||||
|
||||
@@ -533,7 +534,7 @@ impl ECStore {
|
||||
let job2 = tokio::spawn(async move {
|
||||
if let Err(err) = gather_results(cancel_rx2, opts, recv, result_tx).await {
|
||||
error!("gather_results err {:?}", err);
|
||||
let _ = err_tx2.send(err);
|
||||
let _ = err_tx2.send(Arc::new(err));
|
||||
}
|
||||
});
|
||||
|
||||
@@ -545,7 +546,7 @@ impl ECStore {
|
||||
match res{
|
||||
Ok(o) => {
|
||||
error!("list_path err_rx.recv() ok {:?}", &o);
|
||||
MetaCacheEntriesSortedResult{ entries: None, err: Some(o) }
|
||||
MetaCacheEntriesSortedResult{ entries: None, err: Some(clone_err(o.as_ref())) }
|
||||
},
|
||||
Err(err) => {
|
||||
error!("list_path err_rx.recv() err {:?}", &err);
|
||||
@@ -659,7 +660,7 @@ impl ECStore {
|
||||
continue;
|
||||
}
|
||||
|
||||
return Err(err.clone());
|
||||
return Err(clone_err(err));
|
||||
} else {
|
||||
all_at_eof = false;
|
||||
continue;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::error::{Error, Result};
|
||||
use common::error::{Error, Result};
|
||||
|
||||
pub fn parse_bool(str: &str) -> Result<bool> {
|
||||
match str {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::error::{Error, Result};
|
||||
use common::error::{Error, Result};
|
||||
use lazy_static::*;
|
||||
use regex::Regex;
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use crate::bucket::error::BucketMetadataError;
|
||||
use crate::config::error::ConfigError;
|
||||
use crate::disk::error::DiskError;
|
||||
use crate::error::Error;
|
||||
use crate::quorum::QuorumError;
|
||||
use crate::store_err::StorageError;
|
||||
use crate::store_init::ErasureError;
|
||||
use common::error::Error;
|
||||
use protos::proto_gen::node_service::Error as Proto_Error;
|
||||
|
||||
pub mod bool_flag;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::error::{Error, Result};
|
||||
use common::error::{Error, Result};
|
||||
use lazy_static::lazy_static;
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
|
||||
@@ -4,10 +4,8 @@ use std::fs::File;
|
||||
use std::io::{self, BufRead, Error, ErrorKind};
|
||||
use std::path::Path;
|
||||
|
||||
use crate::{
|
||||
disk::Info,
|
||||
error::{Error as e_Error, Result},
|
||||
};
|
||||
use crate::disk::Info;
|
||||
use common::error::{Error as e_Error, Result};
|
||||
|
||||
use super::IOStats;
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use super::IOStats;
|
||||
use crate::{disk::Info, error::Result};
|
||||
use crate::disk::Info;
|
||||
use common::error::Result;
|
||||
use nix::sys::{stat::stat, statfs::statfs};
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::path::Path;
|
||||
|
||||
@@ -15,6 +15,7 @@ log.workspace = true
|
||||
time = { workspace = true, features = ["serde-human-readable"] }
|
||||
serde = { workspace = true, features = ["derive", "rc"] }
|
||||
ecstore = { path = "../ecstore" }
|
||||
policy.workspace = true
|
||||
serde_json.workspace = true
|
||||
async-trait.workspace = true
|
||||
thiserror.workspace = true
|
||||
@@ -31,6 +32,7 @@ tracing.workspace = true
|
||||
madmin.workspace = true
|
||||
lazy_static.workspace = true
|
||||
regex = "1.11.1"
|
||||
common.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
test-case.workspace = true
|
||||
|
||||
@@ -7,14 +7,13 @@ use std::{
|
||||
|
||||
use arc_swap::{ArcSwap, AsRaw, Guard};
|
||||
use log::warn;
|
||||
use policy::{
|
||||
auth::UserIdentity,
|
||||
policy::{Args, PolicyDoc},
|
||||
};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use crate::{
|
||||
auth::UserIdentity,
|
||||
policy::PolicyDoc,
|
||||
store::{GroupInfo, MappedPolicy},
|
||||
sys::Args,
|
||||
};
|
||||
use crate::store::{GroupInfo, MappedPolicy};
|
||||
|
||||
pub struct Cache {
|
||||
pub policy_docs: ArcSwap<CacheEntity<PolicyDoc>>,
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
use crate::policy;
|
||||
use ecstore::disk::error::clone_disk_err;
|
||||
use ecstore::disk::error::DiskError;
|
||||
use policy::policy::Error as PolicyError;
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error(transparent)]
|
||||
PolicyError(#[from] policy::Error),
|
||||
PolicyError(#[from] PolicyError),
|
||||
|
||||
#[error("ecsotre error: {0}")]
|
||||
EcstoreError(ecstore::error::Error),
|
||||
EcstoreError(common::error::Error),
|
||||
|
||||
#[error("{0}")]
|
||||
StringError(String),
|
||||
@@ -96,7 +98,7 @@ pub enum Error {
|
||||
// matches!(e, Error::NoSuchUser(_))
|
||||
// }
|
||||
|
||||
pub fn is_err_no_such_policy(err: &ecstore::error::Error) -> bool {
|
||||
pub fn is_err_no_such_policy(err: &common::error::Error) -> bool {
|
||||
if let Some(e) = err.downcast_ref::<Error>() {
|
||||
matches!(e, Error::NoSuchPolicy)
|
||||
} else {
|
||||
@@ -104,7 +106,7 @@ pub fn is_err_no_such_policy(err: &ecstore::error::Error) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_err_no_such_user(err: &ecstore::error::Error) -> bool {
|
||||
pub fn is_err_no_such_user(err: &common::error::Error) -> bool {
|
||||
if let Some(e) = err.downcast_ref::<Error>() {
|
||||
matches!(e, Error::NoSuchUser(_))
|
||||
} else {
|
||||
@@ -112,7 +114,7 @@ pub fn is_err_no_such_user(err: &ecstore::error::Error) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_err_no_such_account(err: &ecstore::error::Error) -> bool {
|
||||
pub fn is_err_no_such_account(err: &common::error::Error) -> bool {
|
||||
if let Some(e) = err.downcast_ref::<Error>() {
|
||||
matches!(e, Error::NoSuchAccount(_))
|
||||
} else {
|
||||
@@ -120,7 +122,7 @@ pub fn is_err_no_such_account(err: &ecstore::error::Error) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_err_no_such_temp_account(err: &ecstore::error::Error) -> bool {
|
||||
pub fn is_err_no_such_temp_account(err: &common::error::Error) -> bool {
|
||||
if let Some(e) = err.downcast_ref::<Error>() {
|
||||
matches!(e, Error::NoSuchTempAccount(_))
|
||||
} else {
|
||||
@@ -128,7 +130,7 @@ pub fn is_err_no_such_temp_account(err: &ecstore::error::Error) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_err_no_such_group(err: &ecstore::error::Error) -> bool {
|
||||
pub fn is_err_no_such_group(err: &common::error::Error) -> bool {
|
||||
if let Some(e) = err.downcast_ref::<Error>() {
|
||||
matches!(e, Error::NoSuchGroup(_))
|
||||
} else {
|
||||
@@ -136,10 +138,25 @@ pub fn is_err_no_such_group(err: &ecstore::error::Error) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_err_no_such_service_account(err: &ecstore::error::Error) -> bool {
|
||||
pub fn is_err_no_such_service_account(err: &common::error::Error) -> bool {
|
||||
if let Some(e) = err.downcast_ref::<Error>() {
|
||||
matches!(e, Error::NoSuchServiceAccount(_))
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clone_err(e: &common::error::Error) -> common::error::Error {
|
||||
if let Some(e) = e.downcast_ref::<DiskError>() {
|
||||
clone_disk_err(e)
|
||||
} else if let Some(e) = e.downcast_ref::<std::io::Error>() {
|
||||
if let Some(code) = e.raw_os_error() {
|
||||
common::error::Error::new(std::io::Error::from_raw_os_error(code))
|
||||
} else {
|
||||
common::error::Error::new(std::io::Error::new(e.kind(), e.to_string()))
|
||||
}
|
||||
} else {
|
||||
//TODO: 优化其他类型
|
||||
common::error::Error::msg(e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,154 +0,0 @@
|
||||
// use std::{borrow::Cow, collections::HashMap};
|
||||
|
||||
// use log::{info, warn};
|
||||
|
||||
// use crate::{
|
||||
// arn::ARN,
|
||||
// auth::UserIdentity,
|
||||
// cache::CacheInner,
|
||||
// policy::{utils::get_values_from_claims, Args, Policy},
|
||||
// store::Store,
|
||||
// Error,
|
||||
// };
|
||||
|
||||
// pub(crate) struct Handler<'m, T> {
|
||||
// cache: CacheInner,
|
||||
// api: &'m T,
|
||||
// roles: &'m HashMap<ARN, Vec<String>>,
|
||||
// }
|
||||
|
||||
// impl<'m, T> Handler<'m, T> {
|
||||
// pub fn new(cache: CacheInner, api: &'m T, roles: &'m HashMap<ARN, Vec<String>>) -> Self {
|
||||
// Self { cache, api, roles }
|
||||
// }
|
||||
// }
|
||||
|
||||
// impl<'m, T> Handler<'m, T>
|
||||
// where
|
||||
// T: Store,
|
||||
// {
|
||||
// #[inline]
|
||||
// fn get_user<'a>(&self, user_name: &'a str) -> Option<&UserIdentity> {
|
||||
// self.cache
|
||||
// .users
|
||||
// .get(user_name)
|
||||
// .or_else(|| self.cache.sts_accounts.get(user_name))
|
||||
// }
|
||||
|
||||
// async fn get_policy(&self, name: &str, _groups: &[String]) -> crate::Result<Vec<String>> {
|
||||
// if name.is_empty() {
|
||||
// return Err(Error::InvalidArgument);
|
||||
// }
|
||||
|
||||
// todo!()
|
||||
// // self.api.policy_db_get(name, groups)
|
||||
// }
|
||||
|
||||
// /// 如果是临时用户,返回Ok(Some(partent_name)))
|
||||
// /// 如果不是临时用户,返回Ok(None)
|
||||
// fn is_temp_user<'a>(&self, user_name: &'a str) -> crate::Result<Option<&str>> {
|
||||
// let user = self
|
||||
// .get_user(user_name)
|
||||
// .ok_or_else(|| Error::NoSuchUser(user_name.to_owned()))?;
|
||||
|
||||
// if user.credentials.is_temp() {
|
||||
// Ok(Some(&user.credentials.parent_user))
|
||||
// } else {
|
||||
// Ok(None)
|
||||
// }
|
||||
// }
|
||||
|
||||
// /// 如果是临时用户,返回Ok(Some(partent_name)))
|
||||
// /// 如果不是临时用户,返回Ok(None)
|
||||
// fn is_service_account<'a>(&self, user_name: &'a str) -> crate::Result<Option<&str>> {
|
||||
// let user = self
|
||||
// .get_user(user_name)
|
||||
// .ok_or_else(|| Error::NoSuchUser(user_name.to_owned()))?;
|
||||
|
||||
// if user.credentials.is_service_account() {
|
||||
// Ok(Some(&user.credentials.parent_user))
|
||||
// } else {
|
||||
// Ok(None)
|
||||
// }
|
||||
// }
|
||||
|
||||
// // todo
|
||||
// pub fn is_allowed_sts(&self, args: &Args, parent: &str) -> bool {
|
||||
// warn!("unimplement is_allowed_sts");
|
||||
// false
|
||||
// }
|
||||
|
||||
// // todo
|
||||
// pub async fn is_allowed_service_account<'a>(&self, args: &Args<'a>, parent: &str) -> bool {
|
||||
// let Some(p) = args.claims.get(parent) else {
|
||||
// return false;
|
||||
// };
|
||||
|
||||
// if let Some(parent_in_chaim) = p.as_str() {
|
||||
// if parent_in_chaim != parent {
|
||||
// return false;
|
||||
// }
|
||||
// } else {
|
||||
// return false;
|
||||
// }
|
||||
|
||||
// let is_owner_derived = parent == "rustfsadmin"; // todo ,使用全局变量
|
||||
// let role_arn = args.get_role_arn();
|
||||
// let mut svc_policies = None;
|
||||
|
||||
// if is_owner_derived {
|
||||
// } else if let Some(x) = role_arn {
|
||||
// let Ok(arn) = x.parse::<ARN>() else {
|
||||
// info!("error parsing role ARN {x}");
|
||||
// return false;
|
||||
// };
|
||||
|
||||
// svc_policies = self.roles.get(&arn).map(|x| Cow::from(x));
|
||||
// } else {
|
||||
// let Ok(mut p) = self.get_policy(parent, &args.groups[..]).await else { return false };
|
||||
// if p.is_empty() {
|
||||
// // todo iamPolicyClaimNameOpenID
|
||||
// let (p1, _) = get_values_from_claims(&args.claims, "");
|
||||
// p = p1;
|
||||
// }
|
||||
// svc_policies = Some(Cow::Owned(p));
|
||||
// }
|
||||
|
||||
// if is_owner_derived && svc_policies.as_ref().map(|x| x.as_ref().len()).unwrap_or_default() == 0 {
|
||||
// return false;
|
||||
// }
|
||||
|
||||
// false
|
||||
// }
|
||||
|
||||
// pub async fn get_combined_policy(&self, _policies: &[String]) -> Policy {
|
||||
// todo!()
|
||||
// }
|
||||
|
||||
// pub async fn is_allowed<'a>(&self, args: Args<'a>) -> bool {
|
||||
// if args.is_owner {
|
||||
// return true;
|
||||
// }
|
||||
|
||||
// match self.is_temp_user(&args.account) {
|
||||
// Ok(Some(parent)) => return self.is_allowed_sts(&args, parent),
|
||||
// Err(_) => return false,
|
||||
// _ => {}
|
||||
// }
|
||||
|
||||
// match self.is_service_account(&args.account) {
|
||||
// Ok(Some(parent)) => return self.is_allowed_service_account(&args, parent).await,
|
||||
// Err(_) => return false,
|
||||
// _ => {}
|
||||
// }
|
||||
|
||||
// let Ok(policies) = self.get_policy(&args.account, &args.groups).await else { return false };
|
||||
|
||||
// if policies.is_empty() {
|
||||
// return false;
|
||||
// }
|
||||
|
||||
// let policy = self.get_combined_policy(&policies[..]).await;
|
||||
// policy.is_allowed(&args)
|
||||
// }
|
||||
// }
|
||||
@@ -1,23 +1,16 @@
|
||||
use auth::Credentials;
|
||||
use ecstore::error::{Error, Result};
|
||||
use common::error::{Error, Result};
|
||||
use ecstore::store::ECStore;
|
||||
use error::Error as IamError;
|
||||
use manager::IamCache;
|
||||
use policy::auth::Credentials;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use store::object::ObjectStore;
|
||||
use sys::IamSys;
|
||||
use tracing::{debug, instrument};
|
||||
|
||||
pub mod cache;
|
||||
mod format;
|
||||
mod handler;
|
||||
|
||||
pub mod arn;
|
||||
pub mod auth;
|
||||
pub mod error;
|
||||
pub mod manager;
|
||||
pub mod policy;
|
||||
pub mod service_type;
|
||||
pub mod store;
|
||||
pub mod utils;
|
||||
|
||||
|
||||
@@ -1,24 +1,26 @@
|
||||
use crate::{
|
||||
arn::ARN,
|
||||
auth::{self, get_claims_from_token_with_secret, is_secret_key_valid, jwt_sign, Credentials, UserIdentity},
|
||||
cache::{Cache, CacheEntity},
|
||||
error::{is_err_no_such_group, is_err_no_such_policy, is_err_no_such_user, Error as IamError},
|
||||
format::Format,
|
||||
get_global_action_cred,
|
||||
policy::{Policy, PolicyDoc, DEFAULT_POLICIES},
|
||||
store::{object::IAM_CONFIG_PREFIX, GroupInfo, MappedPolicy, Store, UserType},
|
||||
sys::{
|
||||
iam_policy_claim_name_sa, UpdateServiceAccountOpts, EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE,
|
||||
MAX_SVCSESSION_POLICY_SIZE, SESSION_POLICY_NAME, SESSION_POLICY_NAME_EXTRACTED, STATUS_DISABLED, STATUS_ENABLED,
|
||||
UpdateServiceAccountOpts, MAX_SVCSESSION_POLICY_SIZE, SESSION_POLICY_NAME, SESSION_POLICY_NAME_EXTRACTED,
|
||||
STATUS_DISABLED, STATUS_ENABLED,
|
||||
},
|
||||
};
|
||||
use common::error::{Error, Result};
|
||||
use ecstore::config::error::is_err_config_not_found;
|
||||
use ecstore::utils::{crypto::base64_encode, path::path_join_buf};
|
||||
use ecstore::{
|
||||
config::error::is_err_config_not_found,
|
||||
error::{Error, Result},
|
||||
};
|
||||
use log::{debug, warn};
|
||||
use madmin::{AccountStatus, AddOrUpdateUserReq, GroupDesc};
|
||||
use policy::{
|
||||
arn::ARN,
|
||||
auth::{self, get_claims_from_token_with_secret, is_secret_key_valid, jwt_sign, Credentials, UserIdentity},
|
||||
format::Format,
|
||||
policy::{
|
||||
default::DEFAULT_POLICIES, iam_policy_claim_name_sa, Policy, PolicyDoc, EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE,
|
||||
},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::{
|
||||
@@ -488,7 +490,6 @@ where
|
||||
if !is_secret_key_valid(&secret) {
|
||||
return Err(IamError::InvalidSecretKeyLength.into());
|
||||
}
|
||||
|
||||
cr.secret_key = secret;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
pub mod object;
|
||||
|
||||
use crate::{auth::UserIdentity, cache::Cache, policy::PolicyDoc};
|
||||
use ecstore::error::Result;
|
||||
use crate::cache::Cache;
|
||||
use common::error::Result;
|
||||
use policy::{auth::UserIdentity, policy::PolicyDoc};
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
@@ -1,19 +1,17 @@
|
||||
use super::{GroupInfo, MappedPolicy, Store, UserType};
|
||||
use crate::{
|
||||
auth::UserIdentity,
|
||||
cache::{Cache, CacheEntity},
|
||||
error::{is_err_no_such_policy, is_err_no_such_user},
|
||||
get_global_action_cred,
|
||||
manager::{extract_jwt_claims, get_default_policyes},
|
||||
policy::PolicyDoc,
|
||||
};
|
||||
use common::error::{Error, Result};
|
||||
use ecstore::{
|
||||
config::{
|
||||
common::{delete_config, read_config, read_config_with_metadata, save_config},
|
||||
com::{delete_config, read_config, read_config_with_metadata, save_config},
|
||||
error::is_err_config_not_found,
|
||||
RUSTFS_CONFIG_PREFIX,
|
||||
},
|
||||
error::{Error, Result},
|
||||
store::ECStore,
|
||||
store_api::{ObjectInfo, ObjectOptions},
|
||||
store_list_objects::{ObjectInfoOrErr, WalkOptions},
|
||||
@@ -21,6 +19,7 @@ use ecstore::{
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use lazy_static::lazy_static;
|
||||
use policy::{auth::UserIdentity, policy::PolicyDoc};
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
|
||||
|
||||
108
iam/src/sys.rs
108
iam/src/sys.rs
@@ -1,16 +1,3 @@
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::arn::ARN;
|
||||
use crate::auth::contains_reserved_chars;
|
||||
use crate::auth::create_new_credentials_with_metadata;
|
||||
use crate::auth::generate_credentials;
|
||||
use crate::auth::is_access_key_valid;
|
||||
use crate::auth::is_secret_key_valid;
|
||||
use crate::auth::Credentials;
|
||||
use crate::auth::UserIdentity;
|
||||
use crate::auth::ACCOUNT_ON;
|
||||
use crate::error::is_err_no_such_account;
|
||||
use crate::error::is_err_no_such_temp_account;
|
||||
use crate::error::Error as IamError;
|
||||
@@ -18,19 +5,33 @@ use crate::get_global_action_cred;
|
||||
use crate::manager::extract_jwt_claims;
|
||||
use crate::manager::get_default_policyes;
|
||||
use crate::manager::IamCache;
|
||||
use crate::policy::action::Action;
|
||||
use crate::policy::Policy;
|
||||
use crate::policy::PolicyDoc;
|
||||
use crate::store::MappedPolicy;
|
||||
use crate::store::Store;
|
||||
use crate::store::UserType;
|
||||
use ecstore::error::{Error, Result};
|
||||
use common::error::{Error, Result};
|
||||
use ecstore::utils::crypto::base64_decode;
|
||||
use ecstore::utils::crypto::base64_encode;
|
||||
use madmin::AddOrUpdateUserReq;
|
||||
use madmin::GroupDesc;
|
||||
use policy::arn::ARN;
|
||||
use policy::auth::contains_reserved_chars;
|
||||
use policy::auth::create_new_credentials_with_metadata;
|
||||
use policy::auth::generate_credentials;
|
||||
use policy::auth::is_access_key_valid;
|
||||
use policy::auth::is_secret_key_valid;
|
||||
use policy::auth::Credentials;
|
||||
use policy::auth::UserIdentity;
|
||||
use policy::auth::ACCOUNT_ON;
|
||||
use policy::policy::iam_policy_claim_name_sa;
|
||||
use policy::policy::Args;
|
||||
use policy::policy::Policy;
|
||||
use policy::policy::PolicyDoc;
|
||||
use policy::policy::EMBEDDED_POLICY_TYPE;
|
||||
use policy::policy::INHERITED_POLICY_TYPE;
|
||||
use serde_json::json;
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
pub const MAX_SVCSESSION_POLICY_SIZE: usize = 4096;
|
||||
@@ -42,9 +43,6 @@ pub const POLICYNAME: &str = "policy";
|
||||
pub const SESSION_POLICY_NAME: &str = "sessionPolicy";
|
||||
pub const SESSION_POLICY_NAME_EXTRACTED: &str = "sessionPolicy-extracted";
|
||||
|
||||
pub const EMBEDDED_POLICY_TYPE: &str = "embedded-policy";
|
||||
pub const INHERITED_POLICY_TYPE: &str = "inherited-policy";
|
||||
|
||||
pub struct IamSys<T> {
|
||||
store: Arc<IamCache<T>>,
|
||||
roles_map: HashMap<ARN, String>,
|
||||
@@ -697,73 +695,3 @@ pub struct UpdateServiceAccountOpts {
|
||||
pub expiration: Option<OffsetDateTime>,
|
||||
pub status: Option<String>,
|
||||
}
|
||||
|
||||
pub fn iam_policy_claim_name_sa() -> String {
|
||||
"sa-policy".to_string()
|
||||
}
|
||||
|
||||
/// DEFAULT_VERSION is the default version.
|
||||
/// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html
|
||||
pub const DEFAULT_VERSION: &str = "2012-10-17";
|
||||
|
||||
/// check the data is Validator
|
||||
pub trait Validator {
|
||||
type Error;
|
||||
fn is_valid(&self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Args<'a> {
|
||||
pub account: &'a str,
|
||||
pub groups: &'a Option<Vec<String>>,
|
||||
pub action: Action,
|
||||
pub bucket: &'a str,
|
||||
pub conditions: &'a HashMap<String, Vec<String>>,
|
||||
pub is_owner: bool,
|
||||
pub object: &'a str,
|
||||
pub claims: &'a HashMap<String, Value>,
|
||||
pub deny_only: bool,
|
||||
}
|
||||
|
||||
impl Args<'_> {
|
||||
pub fn get_role_arn(&self) -> Option<&str> {
|
||||
self.claims.get("roleArn").and_then(|x| x.as_str())
|
||||
}
|
||||
pub fn get_policies(&self, policy_claim_name: &str) -> (HashSet<String>, bool) {
|
||||
get_policies_from_claims(self.claims, policy_claim_name)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_values_from_claims(claims: &HashMap<String, Value>, claim_name: &str) -> (HashSet<String>, bool) {
|
||||
let mut s = HashSet::new();
|
||||
if let Some(pname) = claims.get(claim_name) {
|
||||
if let Some(pnames) = pname.as_array() {
|
||||
for pname in pnames {
|
||||
if let Some(pname_str) = pname.as_str() {
|
||||
for pname in pname_str.split(',') {
|
||||
let pname = pname.trim();
|
||||
if !pname.is_empty() {
|
||||
s.insert(pname.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return (s, true);
|
||||
} else if let Some(pname_str) = pname.as_str() {
|
||||
for pname in pname_str.split(',') {
|
||||
let pname = pname.trim();
|
||||
if !pname.is_empty() {
|
||||
s.insert(pname.to_string());
|
||||
}
|
||||
}
|
||||
return (s, true);
|
||||
}
|
||||
}
|
||||
(s, false)
|
||||
}
|
||||
|
||||
fn get_policies_from_claims(claims: &HashMap<String, Value>, policy_claim_name: &str) -> (HashSet<String>, bool) {
|
||||
get_values_from_claims(claims, policy_claim_name)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use ecstore::error::{Error, Result};
|
||||
use common::error::{Error, Result};
|
||||
use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey, Header};
|
||||
use rand::{Rng, RngCore};
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
|
||||
36
policy/Cargo.toml
Normal file
36
policy/Cargo.toml
Normal file
@@ -0,0 +1,36 @@
|
||||
[package]
|
||||
name = "policy"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
tokio.workspace = true
|
||||
log.workspace = true
|
||||
time = { workspace = true, features = ["serde-human-readable"] }
|
||||
serde = { workspace = true, features = ["derive", "rc"] }
|
||||
serde_json.workspace = true
|
||||
async-trait.workspace = true
|
||||
thiserror.workspace = true
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
arc-swap = "1.7.1"
|
||||
crypto = { path = "../crypto" }
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
itertools = "0.14.0"
|
||||
futures.workspace = true
|
||||
rand.workspace = true
|
||||
base64-simd = "0.8.0"
|
||||
jsonwebtoken = "9.3.0"
|
||||
tracing.workspace = true
|
||||
madmin.workspace = true
|
||||
lazy_static.workspace = true
|
||||
regex = "1.11.1"
|
||||
common.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
test-case.workspace = true
|
||||
@@ -1,4 +1,4 @@
|
||||
use ecstore::error::{Error, Result};
|
||||
use common::error::{Error, Result};
|
||||
use regex::Regex;
|
||||
|
||||
const ARN_PREFIX_ARN: &str = "arn";
|
||||
@@ -1,9 +1,8 @@
|
||||
use crate::error::Error as IamError;
|
||||
use crate::policy::Policy;
|
||||
use crate::sys::{iam_policy_claim_name_sa, Validator, INHERITED_POLICY_TYPE};
|
||||
use crate::policy::{iam_policy_claim_name_sa, Policy, Validator, INHERITED_POLICY_TYPE};
|
||||
use crate::utils;
|
||||
use crate::utils::extract_claims;
|
||||
use ecstore::error::{Error, Result};
|
||||
use common::error::{Error, Result};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{json, Value};
|
||||
145
policy/src/error.rs
Normal file
145
policy/src/error.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
use crate::policy;
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error(transparent)]
|
||||
PolicyError(#[from] policy::Error),
|
||||
|
||||
#[error("ecsotre error: {0}")]
|
||||
EcstoreError(common::error::Error),
|
||||
|
||||
#[error("{0}")]
|
||||
StringError(String),
|
||||
|
||||
#[error("crypto: {0}")]
|
||||
CryptoError(#[from] crypto::Error),
|
||||
|
||||
#[error("user '{0}' does not exist")]
|
||||
NoSuchUser(String),
|
||||
|
||||
#[error("account '{0}' does not exist")]
|
||||
NoSuchAccount(String),
|
||||
|
||||
#[error("service account '{0}' does not exist")]
|
||||
NoSuchServiceAccount(String),
|
||||
|
||||
#[error("temp account '{0}' does not exist")]
|
||||
NoSuchTempAccount(String),
|
||||
|
||||
#[error("group '{0}' does not exist")]
|
||||
NoSuchGroup(String),
|
||||
|
||||
#[error("policy does not exist")]
|
||||
NoSuchPolicy,
|
||||
|
||||
#[error("policy in use")]
|
||||
PolicyInUse,
|
||||
|
||||
#[error("group not empty")]
|
||||
GroupNotEmpty,
|
||||
|
||||
#[error("invalid arguments specified")]
|
||||
InvalidArgument,
|
||||
|
||||
#[error("not initialized")]
|
||||
IamSysNotInitialized,
|
||||
|
||||
#[error("invalid service type: {0}")]
|
||||
InvalidServiceType(String),
|
||||
|
||||
#[error("malformed credential")]
|
||||
ErrCredMalformed,
|
||||
|
||||
#[error("CredNotInitialized")]
|
||||
CredNotInitialized,
|
||||
|
||||
#[error("invalid access key length")]
|
||||
InvalidAccessKeyLength,
|
||||
|
||||
#[error("invalid secret key length")]
|
||||
InvalidSecretKeyLength,
|
||||
|
||||
#[error("access key contains reserved characters =,")]
|
||||
ContainsReservedChars,
|
||||
|
||||
#[error("group name contains reserved characters =,")]
|
||||
GroupNameContainsReservedChars,
|
||||
|
||||
#[error("jwt err {0}")]
|
||||
JWTError(jsonwebtoken::errors::Error),
|
||||
|
||||
#[error("no access key")]
|
||||
NoAccessKey,
|
||||
|
||||
#[error("invalid token")]
|
||||
InvalidToken,
|
||||
|
||||
#[error("invalid access_key")]
|
||||
InvalidAccessKey,
|
||||
#[error("action not allowed")]
|
||||
IAMActionNotAllowed,
|
||||
|
||||
#[error("invalid expiration")]
|
||||
InvalidExpiration,
|
||||
|
||||
#[error("no secret key with access key")]
|
||||
NoSecretKeyWithAccessKey,
|
||||
|
||||
#[error("no access key with secret key")]
|
||||
NoAccessKeyWithSecretKey,
|
||||
|
||||
#[error("policy too large")]
|
||||
PolicyTooLarge,
|
||||
}
|
||||
|
||||
// pub fn is_err_no_such_user(e: &Error) -> bool {
|
||||
// matches!(e, Error::NoSuchUser(_))
|
||||
// }
|
||||
|
||||
pub fn is_err_no_such_policy(err: &common::error::Error) -> bool {
|
||||
if let Some(e) = err.downcast_ref::<Error>() {
|
||||
matches!(e, Error::NoSuchPolicy)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_err_no_such_user(err: &common::error::Error) -> bool {
|
||||
if let Some(e) = err.downcast_ref::<Error>() {
|
||||
matches!(e, Error::NoSuchUser(_))
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_err_no_such_account(err: &common::error::Error) -> bool {
|
||||
if let Some(e) = err.downcast_ref::<Error>() {
|
||||
matches!(e, Error::NoSuchAccount(_))
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_err_no_such_temp_account(err: &common::error::Error) -> bool {
|
||||
if let Some(e) = err.downcast_ref::<Error>() {
|
||||
matches!(e, Error::NoSuchTempAccount(_))
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_err_no_such_group(err: &common::error::Error) -> bool {
|
||||
if let Some(e) = err.downcast_ref::<Error>() {
|
||||
matches!(e, Error::NoSuchGroup(_))
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_err_no_such_service_account(err: &common::error::Error) -> bool {
|
||||
if let Some(e) = err.downcast_ref::<Error>() {
|
||||
matches!(e, Error::NoSuchServiceAccount(_))
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
7
policy/src/lib.rs
Normal file
7
policy/src/lib.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
pub mod arn;
|
||||
pub mod auth;
|
||||
pub mod error;
|
||||
pub mod format;
|
||||
pub mod policy;
|
||||
pub mod service_type;
|
||||
pub mod utils;
|
||||
@@ -5,6 +5,7 @@ mod function;
|
||||
mod id;
|
||||
#[allow(clippy::module_inception)]
|
||||
mod policy;
|
||||
mod principal;
|
||||
pub mod resource;
|
||||
pub mod statement;
|
||||
pub(crate) mod utils;
|
||||
@@ -15,11 +16,14 @@ pub use doc::PolicyDoc;
|
||||
pub use effect::Effect;
|
||||
pub use function::Functions;
|
||||
pub use id::ID;
|
||||
pub use policy::{default::DEFAULT_POLICIES, Policy};
|
||||
pub use policy::*;
|
||||
pub use principal::Principal;
|
||||
pub use resource::ResourceSet;
|
||||
|
||||
pub use statement::Statement;
|
||||
|
||||
pub const EMBEDDED_POLICY_TYPE: &str = "embedded-policy";
|
||||
pub const INHERITED_POLICY_TYPE: &str = "inherited-policy";
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
#[cfg_attr(test, derive(Eq, PartialEq))]
|
||||
pub enum Error {
|
||||
@@ -1,11 +1,9 @@
|
||||
use ecstore::error::{Error, Result};
|
||||
use common::error::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashSet, ops::Deref};
|
||||
use strum::{EnumString, IntoStaticStr};
|
||||
|
||||
use crate::sys::Validator;
|
||||
|
||||
use super::{utils::wildcard, Error as IamError};
|
||||
use super::{utils::wildcard, Error as IamError, Validator};
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
|
||||
pub struct ActionSet(pub HashSet<Action>);
|
||||
@@ -49,7 +47,7 @@ impl PartialEq for ActionSet {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, Debug)]
|
||||
#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, Debug, Copy)]
|
||||
#[serde(try_from = "&str", untagged)]
|
||||
pub enum Action {
|
||||
S3Action(S3Action),
|
||||
@@ -107,7 +105,7 @@ impl TryFrom<&str> for Action {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug)]
|
||||
#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug, Copy)]
|
||||
#[cfg_attr(test, derive(Default))]
|
||||
#[serde(try_from = "&str", into = "&str")]
|
||||
pub enum S3Action {
|
||||
@@ -234,7 +232,7 @@ pub enum S3Action {
|
||||
PutObjectFanOutAction,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug)]
|
||||
#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug, Copy)]
|
||||
#[serde(try_from = "&str", into = "&str")]
|
||||
pub enum AdminAction {
|
||||
#[strum(serialize = "admin:*")]
|
||||
@@ -265,11 +263,11 @@ pub enum AdminAction {
|
||||
CreateServiceAccountAdminAction,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug)]
|
||||
#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug, Copy)]
|
||||
#[serde(try_from = "&str", into = "&str")]
|
||||
pub enum StsAction {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug)]
|
||||
#[derive(Serialize, Deserialize, Hash, PartialEq, Eq, Clone, EnumString, IntoStaticStr, Debug, Copy)]
|
||||
#[serde(try_from = "&str", into = "&str")]
|
||||
pub enum KmsAction {
|
||||
#[strum(serialize = "kms:*")]
|
||||
@@ -1,8 +1,8 @@
|
||||
use ecstore::error::{Error, Result};
|
||||
use common::error::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::{EnumString, IntoStaticStr};
|
||||
|
||||
use crate::sys::Validator;
|
||||
use super::Validator;
|
||||
|
||||
#[derive(Serialize, Clone, Deserialize, EnumString, IntoStaticStr, Default, Debug, PartialEq)]
|
||||
#[serde(try_from = "&str", into = "&str")]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user