From b4008605385ebce7fa059c6422d357da0dfdccc6 Mon Sep 17 00:00:00 2001 From: weisd Date: Tue, 12 Nov 2024 17:38:36 +0800 Subject: [PATCH] test accountinfo --- Cargo.lock | 1 + ecstore/src/bucket/policy/action.rs | 2 +- ecstore/src/bucket/policy/resource.rs | 8 +++- ecstore/src/store.rs | 23 +++++----- ecstore/src/store_api.rs | 23 +++++++--- router/Cargo.toml | 1 + router/src/handlers.rs | 66 +++++++++++++++++++++++++-- scripts/run.sh | 4 +- 8 files changed, 102 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 926af7b0..d565ff3b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2115,6 +2115,7 @@ dependencies = [ "s3s", "serde", "serde-xml-rs", + "serde_json", "serde_urlencoded", "time", "tracing", diff --git a/ecstore/src/bucket/policy/action.rs b/ecstore/src/bucket/policy/action.rs index 2b87bb1a..d444e52d 100644 --- a/ecstore/src/bucket/policy/action.rs +++ b/ecstore/src/bucket/policy/action.rs @@ -14,7 +14,7 @@ use super::condition::{ #[derive(Debug, Deserialize, Serialize, Default, Clone, PartialEq, Eq)] -pub struct ActionSet(HashSet); +pub struct ActionSet(pub HashSet); impl ActionSet { pub fn is_match(&self, act: &Action) -> bool { diff --git a/ecstore/src/bucket/policy/resource.rs b/ecstore/src/bucket/policy/resource.rs index cfa8e1cd..7e2c6472 100644 --- a/ecstore/src/bucket/policy/resource.rs +++ b/ecstore/src/bucket/policy/resource.rs @@ -41,6 +41,12 @@ pub struct Resource { } impl Resource { + pub fn new(pattern: &str) -> Self { + Self { + pattern: pattern.to_owned(), + rtype: ResourceARNType::ResourceARNS3, + } + } pub fn validate_bucket(&self, bucket: &str) -> Result<()> { self.validate()?; if !wildcard::match_pattern(&self.pattern, bucket) @@ -187,7 +193,7 @@ impl<'de> Deserialize<'de> for Resource { #[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq, Eq)] #[serde(transparent)] -pub struct ResourceSet(HashSet); +pub struct ResourceSet(pub HashSet); impl ResourceSet { pub fn validate_bucket(&self, bucket: &str) -> Result<()> { diff --git a/ecstore/src/store.rs b/ecstore/src/store.rs index 21ccceb3..1057c707 100644 --- a/ecstore/src/store.rs +++ b/ecstore/src/store.rs @@ -819,7 +819,7 @@ lazy_static! { #[async_trait::async_trait] impl StorageAPI for ECStore { async fn backend_info(&self) -> BackendInfo { - let (standard_scparities, rrscparities) = { + let (standard_sc_parity, rr_sc_parity) = { if let Some(sc) = GLOBAL_StorageClass.get() { let sc_parity = sc .get_parity_for_sc(storageclass::CLASS_STANDARD) @@ -833,17 +833,17 @@ impl StorageAPI for ECStore { } }; - let mut standard_scdata = Vec::new(); - let mut rrscdata = Vec::new(); + let mut standard_sc_data = Vec::new(); + let mut rr_sc_data = Vec::new(); let mut drives_per_set = Vec::new(); let mut total_sets = Vec::new(); for (idx, set_count) in self.set_drive_counts().iter().enumerate() { - if let Some(sc_parity) = standard_scparities { - standard_scdata.push(set_count - sc_parity); + if let Some(sc_parity) = standard_sc_parity { + standard_sc_data.push(set_count - sc_parity); } - if let Some(rr_sc_parity) = rrscparities { - rrscdata.push(set_count - rr_sc_parity); + if let Some(sc_parity) = rr_sc_parity { + rr_sc_data.push(set_count - sc_parity); } total_sets.push(self.pools[idx].set_count); drives_per_set.push(*set_count); @@ -853,12 +853,13 @@ impl StorageAPI for ECStore { backend_type: BackendByte::Erasure, online_disks: BackendDisks::new(), offline_disks: BackendDisks::new(), - standard_scdata, - standard_scparities, - rrscdata, - rrscparities, + standard_sc_data, + standard_sc_parity, + rr_sc_data, + rr_sc_parity, total_sets, drives_per_set, + ..Default::default() } } async fn storage_info(&self) -> StorageInfo { diff --git a/ecstore/src/store_api.rs b/ecstore/src/store_api.rs index 332de108..16cf36b3 100644 --- a/ecstore/src/store_api.rs +++ b/ecstore/src/store_api.rs @@ -780,7 +780,7 @@ pub struct DeletedObject { // pub replication_state: ReplicationState, } -#[derive(Debug, Default)] +#[derive(Debug, Default, Serialize)] pub enum BackendByte { #[default] Unknown, @@ -824,7 +824,7 @@ pub struct StorageInfo { pub backend: BackendInfo, } -#[derive(Debug, Default)] +#[derive(Debug, Default, Serialize)] pub struct BackendDisks(HashMap); impl BackendDisks { @@ -836,15 +836,24 @@ impl BackendDisks { } } -#[derive(Debug, Default)] +#[derive(Debug, Default, Serialize)] +#[serde(rename_all = "PascalCase", default)] pub struct BackendInfo { pub backend_type: BackendByte, pub online_disks: BackendDisks, pub offline_disks: BackendDisks, - pub standard_scdata: Vec, - pub standard_scparities: Option, - pub rrscdata: Vec, - pub rrscparities: Option, + #[serde(rename = "StandardSCData")] + pub standard_sc_data: Vec, + #[serde(rename = "StandardSCParities")] + pub standard_sc_parities: Vec, + #[serde(rename = "StandardSCParity")] + pub standard_sc_parity: Option, + #[serde(rename = "RRSCData")] + pub rr_sc_data: Vec, + #[serde(rename = "RRSCParities")] + pub rr_sc_parities: Vec, + #[serde(rename = "RRSCParity")] + pub rr_sc_parity: Option, pub total_sets: Vec, pub drives_per_set: Vec, } diff --git a/router/Cargo.toml b/router/Cargo.toml index 27dd7e41..447cd0ec 100644 --- a/router/Cargo.toml +++ b/router/Cargo.toml @@ -21,3 +21,4 @@ quick-xml = "0.37.0" serde-xml-rs = "0.6.0" ecstore.workspace = true time.workspace = true +serde_json.workspace = true diff --git a/router/src/handlers.rs b/router/src/handlers.rs index 111cacf0..6206dcb8 100644 --- a/router/src/handlers.rs +++ b/router/src/handlers.rs @@ -1,16 +1,24 @@ +use std::collections::HashSet; + use crate::router::Operation; +use ecstore::bucket::policy::action::{Action, ActionSet}; +use ecstore::bucket::policy::bucket_policy::{BPStatement, BucketPolicy}; +use ecstore::bucket::policy::effect::Effect; +use ecstore::bucket::policy::resource::{Resource, ResourceSet}; +use ecstore::store_api::StorageAPI; use ecstore::utils::xml; +use ecstore::{new_object_layer_fn, store_api::BackendInfo}; use hyper::StatusCode; use matchit::Params; +use s3s::S3ErrorCode; use s3s::{ dto::{AssumeRoleOutput, Credentials, Timestamp}, - s3_error, Body, S3Request, S3Response, S3Result, + s3_error, Body, S3Error, S3Request, S3Response, S3Result, }; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use serde_urlencoded::from_bytes; use time::{Duration, OffsetDateTime}; use tracing::warn; - #[derive(Deserialize, Debug, Default)] #[serde(rename_all = "PascalCase", default)] pub struct AssumeRoleRequest { @@ -88,6 +96,14 @@ impl Operation for AssumeRoleHandle { } } +#[derive(Debug, Serialize, Default)] +#[serde(rename_all = "PascalCase", default)] +pub struct AccountInfo { + pub account_name: String, + pub server: BackendInfo, + pub policy: BucketPolicy, +} + pub struct AccountInfoHandler {} #[async_trait::async_trait] impl Operation for AccountInfoHandler { @@ -98,7 +114,49 @@ impl Operation for AccountInfoHandler { warn!("AccountInfoHandler cread {:?}", &cred); - return Err(s3_error!(NotImplemented)); + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())), + }; + + // test policy + + let mut s3_all_act = HashSet::with_capacity(1); + s3_all_act.insert(Action::AllActions); + + let mut all_res = HashSet::with_capacity(1); + all_res.insert(Resource::new("*")); + + let bucket_policy = BucketPolicy { + id: "".to_owned(), + version: "2012-10-17".to_owned(), + statements: vec![BPStatement { + sid: "".to_owned(), + effect: Effect::Allow, + actions: ActionSet(s3_all_act), + resources: ResourceSet(all_res), + ..Default::default() + }], + }; + + // let policy = bucket_policy + // .marshal_msg() + // .map_err(|_e| S3Error::with_message(S3ErrorCode::InternalError, "parse policy failed"))?; + + let backend_info = store.backend_info().await; + + let info = AccountInfo { + account_name: cred.access_key, + server: backend_info, + policy: bucket_policy, + }; + + let output = serde_json::to_string(&info) + .map_err(|_e| S3Error::with_message(S3ErrorCode::InternalError, "parse accountInfo failed"))?; + + Ok(S3Response::new((StatusCode::OK, Body::from(output)))) } } diff --git a/scripts/run.sh b/scripts/run.sh index c67370d1..e7bd04f0 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -14,8 +14,8 @@ fi export RUSTFS_STORAGE_CLASS_INLINE_BLOCK="512 KB" -DATA_DIR_ARG="./target/volume/test{0...4}" -# DATA_DIR_ARG="./target/volume/test" +# DATA_DIR_ARG="./target/volume/test{0...4}" +DATA_DIR_ARG="./target/volume/test" if [ -n "$1" ]; then DATA_DIR_ARG="$1"