mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
todo
This commit is contained in:
5
.github/dependabot.yml
vendored
5
.github/dependabot.yml
vendored
@@ -26,6 +26,9 @@ updates:
|
||||
day: "monday"
|
||||
timezone: "Asia/Shanghai"
|
||||
time: "08:00"
|
||||
ignore:
|
||||
- dependency-name: "object_store"
|
||||
versions: [ "0.13.x" ]
|
||||
groups:
|
||||
s3s:
|
||||
update-types:
|
||||
@@ -36,4 +39,4 @@ updates:
|
||||
- "s3s-*"
|
||||
dependencies:
|
||||
patterns:
|
||||
- "*"
|
||||
- "*"
|
||||
673
Cargo.lock
generated
673
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
36
Cargo.toml
36
Cargo.toml
@@ -98,7 +98,7 @@ rustfs-zip = { path = "./crates/zip", version = "0.0.5" }
|
||||
|
||||
# Async Runtime and Networking
|
||||
async-channel = "2.5.0"
|
||||
async-compression = { version = "0.4.19" }
|
||||
async-compression = { version = "0.4.37" }
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
axum = "0.8.8"
|
||||
@@ -123,7 +123,7 @@ tokio-util = { version = "0.7.18", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.2", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower = { version = "0.5.3", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.8", features = ["cors"] }
|
||||
|
||||
# Serialization and Data Formats
|
||||
@@ -147,11 +147,11 @@ aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
|
||||
argon2 = { version = "0.6.0-rc.5" }
|
||||
blake3 = { version = "1.8.3", features = ["rayon", "mmap"] }
|
||||
chacha20poly1305 = { version = "0.11.0-rc.2" }
|
||||
crc-fast = "1.6.0"
|
||||
crc-fast = "1.9.0"
|
||||
hmac = { version = "0.13.0-rc.3" }
|
||||
jsonwebtoken = { version = "10.2.0", features = ["aws_lc_rs"] }
|
||||
pbkdf2 = "0.13.0-rc.6"
|
||||
rsa = { version = "0.10.0-rc.11" }
|
||||
pbkdf2 = "0.13.0-rc.7"
|
||||
rsa = { version = "0.10.0-rc.12" }
|
||||
rustls = { version = "0.23.36", default-features = false, features = ["aws-lc-rs", "logging", "tls12", "prefer-post-quantum", "std"] }
|
||||
rustls-pemfile = "2.2.0"
|
||||
rustls-pki-types = "1.13.2"
|
||||
@@ -161,9 +161,9 @@ subtle = "2.6"
|
||||
zeroize = { version = "1.8.2", features = ["derive"] }
|
||||
|
||||
# Time and Date
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
chrono = { version = "0.4.43", features = ["serde"] }
|
||||
humantime = "2.3.0"
|
||||
time = { version = "0.3.44", features = ["std", "parsing", "formatting", "macros", "serde"] }
|
||||
time = { version = "0.3.45", features = ["std", "parsing", "formatting", "macros", "serde"] }
|
||||
|
||||
# Utilities and Tools
|
||||
anyhow = "1.0.100"
|
||||
@@ -174,7 +174,7 @@ atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.12" }
|
||||
aws-credential-types = { version = "1.2.11" }
|
||||
aws-sdk-s3 = { version = "1.119.0", default-features = false, features = ["sigv4a", "default-https-client", "rt-tokio"] }
|
||||
aws-smithy-types = { version = "1.3.5" }
|
||||
aws-smithy-types = { version = "1.3.6" }
|
||||
base64 = "0.22.1"
|
||||
base64-simd = "0.8.0"
|
||||
brotli = "8.0.2"
|
||||
@@ -184,16 +184,16 @@ const-str = { version = "1.0.0", features = ["std", "proc"] }
|
||||
convert_case = "0.10.0"
|
||||
criterion = { version = "0.8", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
datafusion = "51.0.0"
|
||||
datafusion = "52.0.0"
|
||||
derive_builder = "0.20.2"
|
||||
dunce = "1.0.5"
|
||||
enumset = "1.1.10"
|
||||
faster-hex = "0.10.0"
|
||||
flate2 = "1.1.5"
|
||||
flate2 = "1.1.8"
|
||||
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv", "json"] }
|
||||
glob = "0.3.3"
|
||||
google-cloud-storage = "1.5.0"
|
||||
google-cloud-auth = "1.3.0"
|
||||
google-cloud-storage = "1.6.0"
|
||||
google-cloud-auth = "1.4.0"
|
||||
hashbrown = { version = "0.16.1", features = ["serde", "rayon"] }
|
||||
heed = { version = "0.22.0" }
|
||||
hex-simd = "0.8.0"
|
||||
@@ -202,7 +202,7 @@ ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
lazy_static = "1.5.0"
|
||||
libc = "0.2.180"
|
||||
libsystemd = "0.7.2"
|
||||
local-ip-address = "0.6.8"
|
||||
local-ip-address = "0.6.9"
|
||||
lz4 = "1.28.1"
|
||||
matchit = "0.9.1"
|
||||
md-5 = "0.11.0-rc.3"
|
||||
@@ -225,9 +225,9 @@ rayon = "1.11.0"
|
||||
reed-solomon-simd = { version = "3.1.0" }
|
||||
regex = { version = "1.12.2" }
|
||||
rumqttc = { version = "0.25.1" }
|
||||
rust-embed = { version = "8.9.0" }
|
||||
rust-embed = { version = "8.11.0" }
|
||||
rustc-hash = { version = "2.1.1" }
|
||||
s3s = { version = "0.13.0-alpha", features = ["minio"], git = "https://github.com/s3s-project/s3s.git", branch = "main" }
|
||||
s3s = { version = "0.13.0-alpha.2", features = ["minio"] }
|
||||
serial_test = "3.3.1"
|
||||
shadow-rs = { version = "1.5.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
@@ -245,7 +245,7 @@ thiserror = "2.0.17"
|
||||
tracing = { version = "0.1.44" }
|
||||
tracing-appender = "0.2.4"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
tracing-opentelemetry = "0.32.1"
|
||||
tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.8"
|
||||
@@ -256,7 +256,7 @@ walkdir = "2.5.0"
|
||||
wildmatch = { version = "2.6.1", features = ["serde"] }
|
||||
windows = { version = "0.62.2" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "7.0.0"
|
||||
zip = "7.1.0"
|
||||
zstd = "0.13.3"
|
||||
|
||||
# Observability and Metrics
|
||||
@@ -272,7 +272,7 @@ opentelemetry-stdout = { version = "0.31.0" }
|
||||
libunftp = "0.21.0"
|
||||
russh = { version = "0.56.0", features = ["aws-lc-rs", "rsa"], default-features = false }
|
||||
russh-sftp = "2.1.1"
|
||||
ssh-key = { version = "0.7.0-rc.4", features = ["std", "rsa", "ed25519"] }
|
||||
ssh-key = { version = "0.7.0-rc.6", features = ["std", "rsa", "ed25519"] }
|
||||
suppaftp = { version = "7.1.0", features = ["tokio", "tokio-rustls", "rustls"] }
|
||||
rcgen = "0.14.6"
|
||||
|
||||
|
||||
@@ -12,8 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::error::DiskError;
|
||||
use crate::disk::{self, DiskAPI as _, DiskStore};
|
||||
use crate::disk::{self, DiskAPI as _, DiskStore, error::DiskError};
|
||||
use crate::erasure_coding::{BitrotReader, BitrotWriterWrapper, CustomWriter};
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use std::io::Cursor;
|
||||
|
||||
@@ -13,6 +13,14 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::metadata::BucketMetadata;
|
||||
use crate::bucket::metadata_sys::get_bucket_targets_config;
|
||||
use crate::bucket::metadata_sys::get_replication_config;
|
||||
use crate::bucket::replication::ObjectOpts;
|
||||
use crate::bucket::replication::ReplicationConfigurationExt;
|
||||
use crate::bucket::target::ARN;
|
||||
use crate::bucket::target::BucketTargetType;
|
||||
use crate::bucket::target::{self, BucketTarget, BucketTargets, Credentials};
|
||||
use crate::bucket::versioning_sys::BucketVersioningSys;
|
||||
use aws_credential_types::Credentials as SdkCredentials;
|
||||
use aws_sdk_s3::config::Region as SdkRegion;
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
@@ -52,15 +60,6 @@ use tracing::warn;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::bucket::metadata_sys::get_bucket_targets_config;
|
||||
use crate::bucket::metadata_sys::get_replication_config;
|
||||
use crate::bucket::replication::ObjectOpts;
|
||||
use crate::bucket::replication::ReplicationConfigurationExt;
|
||||
use crate::bucket::target::ARN;
|
||||
use crate::bucket::target::BucketTargetType;
|
||||
use crate::bucket::target::{self, BucketTarget, BucketTargets, Credentials};
|
||||
use crate::bucket::versioning_sys::BucketVersioningSys;
|
||||
|
||||
const DEFAULT_HEALTH_CHECK_DURATION: Duration = Duration::from_secs(5);
|
||||
const DEFAULT_HEALTH_CHECK_RELOAD_DURATION: Duration = Duration::from_secs(30 * 60);
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::lifecycle;
|
||||
use crate::bucket::lifecycle::lifecycle;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub enum LcEventSrc {
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use crate::bucket::lifecycle::rule::TransitionOps;
|
||||
use crate::store_api::ObjectInfo;
|
||||
use rustfs_filemeta::{ReplicationStatusType, VersionPurgeStatusType};
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, ExpirationStatus, LifecycleExpiration, LifecycleRule, NoncurrentVersionTransition,
|
||||
@@ -33,19 +35,15 @@ use time::{self, Duration, OffsetDateTime};
|
||||
use tracing::info;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::bucket::lifecycle::rule::TransitionOps;
|
||||
use crate::store_api::ObjectInfo;
|
||||
|
||||
pub const TRANSITION_COMPLETE: &str = "complete";
|
||||
pub const TRANSITION_PENDING: &str = "pending";
|
||||
|
||||
const ERR_LIFECYCLE_TOO_MANY_RULES: &str = "Lifecycle configuration allows a maximum of 1000 rules";
|
||||
const ERR_LIFECYCLE_NO_RULE: &str = "Lifecycle configuration should have at least one rule";
|
||||
const ERR_LIFECYCLE_DUPLICATE_ID: &str = "Rule ID must be unique. Found same ID for more than one rule";
|
||||
const _ERR_XML_NOT_WELL_FORMED: &str =
|
||||
"The XML you provided was not well-formed or did not validate against our published schema";
|
||||
const ERR_LIFECYCLE_BUCKET_LOCKED: &str =
|
||||
"ExpiredObjectAllVersions element and DelMarkerExpiration action cannot be used on an retention bucket";
|
||||
const ERR_LIFECYCLE_TOO_MANY_RULES: &str = "Lifecycle configuration should have at most 1000 rules";
|
||||
|
||||
pub use rustfs_common::metrics::IlmAction;
|
||||
|
||||
|
||||
@@ -18,15 +18,13 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use rustfs_common::data_usage::TierStats;
|
||||
use sha2::Sha256;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Sub;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::{error, warn};
|
||||
|
||||
use rustfs_common::data_usage::TierStats;
|
||||
|
||||
pub type DailyAllTierStats = HashMap<String, LastDayTierStats>;
|
||||
|
||||
#[derive(Clone)]
|
||||
|
||||
@@ -18,16 +18,15 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use crate::bucket::lifecycle::bucket_lifecycle_ops::{ExpiryOp, GLOBAL_ExpiryState, TransitionedObject};
|
||||
use crate::bucket::lifecycle::lifecycle::{self, ObjectOpts};
|
||||
use crate::global::GLOBAL_TierConfigMgr;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::any::Any;
|
||||
use std::io::Write;
|
||||
use uuid::Uuid;
|
||||
use xxhash_rust::xxh64;
|
||||
|
||||
use super::bucket_lifecycle_ops::{ExpiryOp, GLOBAL_ExpiryState, TransitionedObject};
|
||||
use super::lifecycle::{self, ObjectOpts};
|
||||
use crate::global::GLOBAL_TierConfigMgr;
|
||||
|
||||
static XXHASH_SEED: u64 = 0;
|
||||
|
||||
#[derive(Default)]
|
||||
|
||||
@@ -12,20 +12,21 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::{quota::BucketQuota, target::BucketTargets};
|
||||
|
||||
use super::object_lock::ObjectLockApi;
|
||||
use super::versioning::VersioningApi;
|
||||
use super::{quota::BucketQuota, target::BucketTargets};
|
||||
use crate::bucket::utils::deserialize;
|
||||
use crate::config::com::{read_config, save_config};
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::new_object_layer_fn;
|
||||
use crate::store::ECStore;
|
||||
use byteorder::{BigEndian, ByteOrder, LittleEndian};
|
||||
use rmp_serde::Serializer as rmpSerializer;
|
||||
use rustfs_policy::policy::BucketPolicy;
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ReplicationConfiguration,
|
||||
ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
|
||||
BucketLifecycleConfiguration, CORSConfiguration, NotificationConfiguration, ObjectLockConfiguration,
|
||||
ReplicationConfiguration, ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
|
||||
};
|
||||
use serde::Serializer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -34,9 +35,6 @@ use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::error;
|
||||
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use crate::store::ECStore;
|
||||
|
||||
pub const BUCKET_METADATA_FILE: &str = ".metadata.bin";
|
||||
pub const BUCKET_METADATA_FORMAT: u16 = 1;
|
||||
pub const BUCKET_METADATA_VERSION: u16 = 1;
|
||||
@@ -51,6 +49,7 @@ pub const OBJECT_LOCK_CONFIG: &str = "object-lock.xml";
|
||||
pub const BUCKET_VERSIONING_CONFIG: &str = "versioning.xml";
|
||||
pub const BUCKET_REPLICATION_CONFIG: &str = "replication.xml";
|
||||
pub const BUCKET_TARGETS_FILE: &str = "bucket-targets.json";
|
||||
pub const BUCKET_CORS_CONFIG: &str = "cors.xml";
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
#[serde(rename_all = "PascalCase", default)]
|
||||
@@ -69,6 +68,7 @@ pub struct BucketMetadata {
|
||||
pub replication_config_xml: Vec<u8>,
|
||||
pub bucket_targets_config_json: Vec<u8>,
|
||||
pub bucket_targets_config_meta_json: Vec<u8>,
|
||||
pub cors_config_xml: Vec<u8>,
|
||||
|
||||
pub policy_config_updated_at: OffsetDateTime,
|
||||
pub object_lock_config_updated_at: OffsetDateTime,
|
||||
@@ -81,6 +81,7 @@ pub struct BucketMetadata {
|
||||
pub notification_config_updated_at: OffsetDateTime,
|
||||
pub bucket_targets_config_updated_at: OffsetDateTime,
|
||||
pub bucket_targets_config_meta_updated_at: OffsetDateTime,
|
||||
pub cors_config_updated_at: OffsetDateTime,
|
||||
|
||||
#[serde(skip)]
|
||||
pub new_field_updated_at: OffsetDateTime,
|
||||
@@ -107,6 +108,8 @@ pub struct BucketMetadata {
|
||||
pub bucket_target_config: Option<BucketTargets>,
|
||||
#[serde(skip)]
|
||||
pub bucket_target_config_meta: Option<HashMap<String, String>>,
|
||||
#[serde(skip)]
|
||||
pub cors_config: Option<CORSConfiguration>,
|
||||
}
|
||||
|
||||
impl Default for BucketMetadata {
|
||||
@@ -126,6 +129,7 @@ impl Default for BucketMetadata {
|
||||
replication_config_xml: Default::default(),
|
||||
bucket_targets_config_json: Default::default(),
|
||||
bucket_targets_config_meta_json: Default::default(),
|
||||
cors_config_xml: Default::default(),
|
||||
policy_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
object_lock_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
encryption_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
@@ -137,6 +141,7 @@ impl Default for BucketMetadata {
|
||||
notification_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
bucket_targets_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
bucket_targets_config_meta_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
cors_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
new_field_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
policy_config: Default::default(),
|
||||
notification_config: Default::default(),
|
||||
@@ -149,6 +154,7 @@ impl Default for BucketMetadata {
|
||||
replication_config: Default::default(),
|
||||
bucket_target_config: Default::default(),
|
||||
bucket_target_config_meta: Default::default(),
|
||||
cors_config: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -297,6 +303,10 @@ impl BucketMetadata {
|
||||
self.bucket_targets_config_json = data.clone();
|
||||
self.bucket_targets_config_updated_at = updated;
|
||||
}
|
||||
BUCKET_CORS_CONFIG => {
|
||||
self.cors_config_xml = data;
|
||||
self.cors_config_updated_at = updated;
|
||||
}
|
||||
_ => return Err(Error::other(format!("config file not found : {config_file}"))),
|
||||
}
|
||||
|
||||
@@ -367,6 +377,9 @@ impl BucketMetadata {
|
||||
} else {
|
||||
self.bucket_target_config = Some(BucketTargets::default())
|
||||
}
|
||||
if !self.cors_config_xml.is_empty() {
|
||||
self.cors_config = Some(deserialize::<CORSConfiguration>(&self.cors_config_xml)?);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::metadata::{BucketMetadata, load_bucket_metadata};
|
||||
use super::quota::BucketQuota;
|
||||
use super::target::BucketTargets;
|
||||
use crate::StorageAPI as _;
|
||||
use crate::bucket::bucket_target_sys::BucketTargetSys;
|
||||
use crate::bucket::metadata::{BUCKET_LIFECYCLE_CONFIG, load_bucket_metadata_parse};
|
||||
@@ -20,12 +23,13 @@ use crate::error::{Error, Result, is_err_bucket_not_found};
|
||||
use crate::global::{GLOBAL_Endpoints, is_dist_erasure, is_erasure, new_object_layer_fn};
|
||||
use crate::store::ECStore;
|
||||
use futures::future::join_all;
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_common::heal_channel::HealOpts;
|
||||
use rustfs_policy::policy::BucketPolicy;
|
||||
use s3s::dto::ReplicationConfiguration;
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ServerSideEncryptionConfiguration, Tagging,
|
||||
VersioningConfiguration,
|
||||
BucketLifecycleConfiguration, CORSConfiguration, NotificationConfiguration, ObjectLockConfiguration,
|
||||
ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::OnceLock;
|
||||
@@ -36,12 +40,6 @@ use tokio::sync::RwLock;
|
||||
use tokio::time::sleep;
|
||||
use tracing::error;
|
||||
|
||||
use super::metadata::{BucketMetadata, load_bucket_metadata};
|
||||
use super::quota::BucketQuota;
|
||||
use super::target::BucketTargets;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_BucketMetadataSys: OnceLock<Arc<RwLock<BucketMetadataSys>>> = OnceLock::new();
|
||||
}
|
||||
@@ -112,6 +110,13 @@ pub async fn get_bucket_targets_config(bucket: &str) -> Result<BucketTargets> {
|
||||
bucket_meta_sys.get_bucket_targets_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_cors_config(bucket: &str) -> Result<(CORSConfiguration, OffsetDateTime)> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.get_cors_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_tagging_config(bucket: &str) -> Result<(Tagging, OffsetDateTime)> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
@@ -509,6 +514,16 @@ impl BucketMetadataSys {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_cors_config(&self, bucket: &str) -> Result<(CORSConfiguration, OffsetDateTime)> {
|
||||
let (bm, _) = self.get_config(bucket).await?;
|
||||
|
||||
if let Some(config) = &bm.cors_config {
|
||||
Ok((config.clone(), bm.cors_config_updated_at))
|
||||
} else {
|
||||
Err(Error::ConfigNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn created_at(&self, bucket: &str) -> Result<OffsetDateTime> {
|
||||
let bm = match self.get_config(bucket).await {
|
||||
Ok((bm, _)) => bm.created,
|
||||
|
||||
@@ -12,11 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use time::{OffsetDateTime, format_description};
|
||||
|
||||
use s3s::dto::{Date, ObjectLockLegalHold, ObjectLockLegalHoldStatus, ObjectLockRetention, ObjectLockRetentionMode};
|
||||
use s3s::header::{X_AMZ_OBJECT_LOCK_LEGAL_HOLD, X_AMZ_OBJECT_LOCK_MODE, X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE};
|
||||
use std::collections::HashMap;
|
||||
use time::{OffsetDateTime, format_description};
|
||||
|
||||
const _ERR_MALFORMED_BUCKET_OBJECT_CONFIG: &str = "invalid bucket object lock config";
|
||||
const _ERR_INVALID_RETENTION_DATE: &str = "date must be provided in ISO 8601 format";
|
||||
|
||||
@@ -12,16 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::metadata_sys::get_object_lock_config;
|
||||
use crate::bucket::object_lock::objectlock;
|
||||
use crate::store_api::ObjectInfo;
|
||||
use s3s::dto::{DefaultRetention, ObjectLockLegalHoldStatus, ObjectLockRetentionMode};
|
||||
use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use s3s::dto::{DefaultRetention, ObjectLockLegalHoldStatus, ObjectLockRetentionMode};
|
||||
|
||||
use crate::bucket::metadata_sys::get_object_lock_config;
|
||||
use crate::store_api::ObjectInfo;
|
||||
|
||||
use super::objectlock;
|
||||
|
||||
pub struct BucketObjectLockSys {}
|
||||
|
||||
impl BucketObjectLockSys {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::ReplicationRuleExt as _;
|
||||
use crate::bucket::replication::ReplicationRuleExt as _;
|
||||
use crate::bucket::tagging::decode_tags_to_map;
|
||||
use rustfs_filemeta::ReplicationType;
|
||||
use s3s::dto::DeleteMarkerReplicationStatus;
|
||||
|
||||
@@ -1,25 +1,33 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::StorageAPI;
|
||||
use crate::bucket::bucket_target_sys::BucketTargetSys;
|
||||
use crate::bucket::metadata_sys;
|
||||
use crate::bucket::replication::ResyncOpts;
|
||||
use crate::bucket::replication::ResyncStatusType;
|
||||
use crate::bucket::replication::replicate_delete;
|
||||
use crate::bucket::replication::replicate_object;
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicI32;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use crate::bucket::bucket_target_sys::BucketTargetSys;
|
||||
use crate::bucket::metadata_sys;
|
||||
use crate::bucket::replication::replication_resyncer::{
|
||||
BucketReplicationResyncStatus, DeletedObjectReplicationInfo, ReplicationConfig, ReplicationResyncer,
|
||||
get_heal_replicate_object_info,
|
||||
};
|
||||
use crate::bucket::replication::replication_state::ReplicationStats;
|
||||
use crate::config::com::read_config;
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use crate::error::Error as EcstoreError;
|
||||
use crate::store_api::ObjectInfo;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_filemeta::MrfReplicateEntry;
|
||||
use rustfs_filemeta::ReplicateDecision;
|
||||
@@ -34,6 +42,10 @@ use rustfs_filemeta::replication_statuses_map;
|
||||
use rustfs_filemeta::version_purge_statuses_map;
|
||||
use rustfs_filemeta::{REPLICATE_EXISTING, REPLICATE_HEAL, REPLICATE_HEAL_DELETE};
|
||||
use rustfs_utils::http::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicI32;
|
||||
use std::sync::atomic::Ordering;
|
||||
use time::OffsetDateTime;
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::bucket_target_sys::{
|
||||
AdvancedPutOptions, BucketTargetSys, PutObjectOptions, PutObjectPartOptions, RemoveObjectOptions, TargetClient,
|
||||
};
|
||||
@@ -16,7 +30,6 @@ use crate::event_notification::{EventArgs, send_event};
|
||||
use crate::global::GLOBAL_LocalNodeName;
|
||||
use crate::store_api::{DeletedObject, ObjectInfo, ObjectOptions, ObjectToDelete, WalkOptions};
|
||||
use crate::{StorageAPI, new_object_layer_fn};
|
||||
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
use aws_sdk_s3::operation::head_object::HeadObjectOutput;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
@@ -24,7 +37,6 @@ use aws_sdk_s3::types::{CompletedPart, ObjectLockLegalHoldStatus};
|
||||
use byteorder::ByteOrder;
|
||||
use futures::future::join_all;
|
||||
use http::HeaderMap;
|
||||
|
||||
use regex::Regex;
|
||||
use rustfs_filemeta::{
|
||||
MrfReplicateEntry, REPLICATE_EXISTING, REPLICATE_EXISTING_DELETE, REPLICATION_RESET, ReplicateDecision, ReplicateObjectInfo,
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::Error;
|
||||
use rustfs_filemeta::{ReplicatedTargetInfo, ReplicationStatusType, ReplicationType};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -12,11 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::replication::ObjectOpts;
|
||||
use s3s::dto::ReplicaModificationsStatus;
|
||||
use s3s::dto::ReplicationRule;
|
||||
|
||||
use super::ObjectOpts;
|
||||
|
||||
pub trait ReplicationRuleExt {
|
||||
fn prefix(&self) -> &str;
|
||||
fn metadata_replicate(&self, obj: &ObjectOpts) -> bool;
|
||||
|
||||
@@ -12,9 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use s3s::dto::Tag;
|
||||
use std::collections::HashMap;
|
||||
use url::form_urlencoded;
|
||||
|
||||
pub fn decode_tags(tags: &str) -> Vec<Tag> {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::BucketTargetType;
|
||||
use crate::bucket::target::BucketTargetType;
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
|
||||
|
||||
@@ -14,16 +14,15 @@
|
||||
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use crate::error::{Error, Result, StorageError};
|
||||
use regex::Regex;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use s3s::xml;
|
||||
use tracing::instrument;
|
||||
|
||||
pub fn is_meta_bucketname(name: &str) -> bool {
|
||||
name.starts_with(RUSTFS_META_BUCKET)
|
||||
}
|
||||
|
||||
use regex::Regex;
|
||||
use tracing::instrument;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref VALID_BUCKET_NAME: Regex = Regex::new(r"^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$").unwrap();
|
||||
static ref VALID_BUCKET_NAME_STRICT: Regex = Regex::new(r"^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$").unwrap();
|
||||
|
||||
@@ -12,9 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use s3s::dto::{BucketVersioningStatus, VersioningConfiguration};
|
||||
|
||||
use rustfs_utils::string::match_simple;
|
||||
use s3s::dto::{BucketVersioningStatus, VersioningConfiguration};
|
||||
|
||||
pub trait VersioningApi {
|
||||
fn enabled(&self) -> bool;
|
||||
|
||||
@@ -12,9 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use std::sync::Arc;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
pub mod metacache_set;
|
||||
|
||||
@@ -12,8 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_utils::string::has_pattern;
|
||||
use rustfs_utils::string::has_string_suffix_in_slice;
|
||||
use rustfs_utils::string::{has_pattern, has_string_suffix_in_slice};
|
||||
use std::env;
|
||||
use tracing::error;
|
||||
|
||||
|
||||
@@ -12,33 +12,29 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, hash_map::Entry},
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::debug;
|
||||
|
||||
pub mod local_snapshot;
|
||||
|
||||
use crate::{
|
||||
bucket::metadata_sys::get_replication_config, config::com::read_config, disk::DiskAPI, error::Error, store::ECStore,
|
||||
store_api::StorageAPI,
|
||||
};
|
||||
pub use local_snapshot::{
|
||||
DATA_USAGE_DIR, DATA_USAGE_STATE_DIR, LOCAL_USAGE_SNAPSHOT_VERSION, LocalUsageSnapshot, LocalUsageSnapshotMeta,
|
||||
data_usage_dir, data_usage_state_dir, ensure_data_usage_layout, read_snapshot as read_local_snapshot, snapshot_file_name,
|
||||
snapshot_object_path, snapshot_path, write_snapshot as write_local_snapshot,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
bucket::metadata_sys::get_replication_config, config::com::read_config, disk::DiskAPI, store::ECStore, store_api::StorageAPI,
|
||||
};
|
||||
use rustfs_common::data_usage::{
|
||||
BucketTargetUsageInfo, BucketUsageInfo, DataUsageCache, DataUsageEntry, DataUsageInfo, DiskUsageStatus, SizeSummary,
|
||||
};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use std::sync::OnceLock;
|
||||
use std::{
|
||||
collections::{HashMap, hash_map::Entry},
|
||||
sync::{Arc, OnceLock},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tokio::fs;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use crate::error::Error;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
// Data usage storage constants
|
||||
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR_STR;
|
||||
@@ -112,8 +108,8 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
|
||||
Ok(data) => data,
|
||||
Err(e) => {
|
||||
error!("Failed to read data usage info from backend: {}", e);
|
||||
if e == crate::error::Error::ConfigNotFound {
|
||||
warn!("Data usage config not found, building basic statistics");
|
||||
if e == Error::ConfigNotFound {
|
||||
info!("Data usage config not found, building basic statistics");
|
||||
return build_basic_data_usage_info(store).await;
|
||||
}
|
||||
return Err(Error::other(e));
|
||||
@@ -146,7 +142,7 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
|
||||
.map(|(bucket, &size)| {
|
||||
(
|
||||
bucket.clone(),
|
||||
rustfs_common::data_usage::BucketUsageInfo {
|
||||
BucketUsageInfo {
|
||||
size,
|
||||
..Default::default()
|
||||
},
|
||||
@@ -263,7 +259,7 @@ pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskU
|
||||
|
||||
// If a snapshot is corrupted or unreadable, skip it but keep processing others
|
||||
if let Err(err) = &snapshot_result {
|
||||
warn!(
|
||||
info!(
|
||||
"Failed to read data usage snapshot for disk {} (pool {}, set {}, disk {}): {}",
|
||||
disk_id, pool_idx, set_disks.set_index, disk_index, err
|
||||
);
|
||||
@@ -272,7 +268,7 @@ pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskU
|
||||
if let Err(remove_err) = fs::remove_file(&snapshot_file).await
|
||||
&& remove_err.kind() != std::io::ErrorKind::NotFound
|
||||
{
|
||||
warn!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
|
||||
info!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -359,7 +355,7 @@ pub async fn compute_bucket_usage(store: Arc<ECStore>, bucket_name: &str) -> Res
|
||||
|
||||
continuation = result.next_continuation_token.clone();
|
||||
if continuation.is_none() {
|
||||
warn!(
|
||||
info!(
|
||||
"Bucket {} listing marked truncated but no continuation token returned; stopping early",
|
||||
bucket_name
|
||||
);
|
||||
@@ -567,7 +563,7 @@ pub fn cache_to_data_usage_info(cache: &DataUsageCache, path: &str, buckets: &[c
|
||||
None => continue,
|
||||
};
|
||||
let flat = cache.flatten(&e);
|
||||
let mut bui = rustfs_common::data_usage::BucketUsageInfo {
|
||||
let mut bui = BucketUsageInfo {
|
||||
size: flat.size as u64,
|
||||
versions_count: flat.versions as u64,
|
||||
objects_count: flat.objects as u64,
|
||||
@@ -645,7 +641,7 @@ pub async fn load_data_usage_cache(store: &crate::set_disk::SetDisks, name: &str
|
||||
break;
|
||||
}
|
||||
Err(err) => match err {
|
||||
crate::error::Error::FileNotFound | crate::error::Error::VolumeNotFound => {
|
||||
Error::FileNotFound | Error::VolumeNotFound => {
|
||||
match store
|
||||
.get_object_reader(
|
||||
RUSTFS_META_BUCKET,
|
||||
@@ -666,7 +662,7 @@ pub async fn load_data_usage_cache(store: &crate::set_disk::SetDisks, name: &str
|
||||
break;
|
||||
}
|
||||
Err(_) => match err {
|
||||
crate::error::Error::FileNotFound | crate::error::Error::VolumeNotFound => {
|
||||
Error::FileNotFound | Error::VolumeNotFound => {
|
||||
break;
|
||||
}
|
||||
_ => {}
|
||||
@@ -695,9 +691,9 @@ pub async fn save_data_usage_cache(cache: &DataUsageCache, name: &str) -> crate:
|
||||
use std::path::Path;
|
||||
|
||||
let Some(store) = new_object_layer_fn() else {
|
||||
return Err(crate::error::Error::other("errServerNotInitialized"));
|
||||
return Err(Error::other("errServerNotInitialized"));
|
||||
};
|
||||
let buf = cache.marshal_msg().map_err(crate::error::Error::other)?;
|
||||
let buf = cache.marshal_msg().map_err(Error::other)?;
|
||||
let buf_clone = buf.clone();
|
||||
|
||||
let store_clone = store.clone();
|
||||
|
||||
@@ -1,13 +1,25 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::SystemTime;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::fs;
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::data_usage::BucketUsageInfo;
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use crate::error::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::SystemTime;
|
||||
use tokio::fs;
|
||||
|
||||
/// Directory used to store per-disk usage snapshots under the metadata bucket.
|
||||
pub const DATA_USAGE_DIR: &str = "datausage";
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::error::{Error, Result};
|
||||
use crate::disk::error::{Error, Result};
|
||||
use path_absolutize::Absolutize;
|
||||
use rustfs_utils::{is_local_host, is_socket_addr};
|
||||
use std::{fmt::Display, path::Path};
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// use crate::quorum::CheckErrorFn;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::io::{self};
|
||||
use std::path::PathBuf;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::error::DiskError;
|
||||
use crate::disk::error::DiskError;
|
||||
|
||||
pub fn to_file_error(io_err: std::io::Error) -> std::io::Error {
|
||||
match io_err.kind() {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::error::Error;
|
||||
use crate::disk::error::Error;
|
||||
|
||||
pub static OBJECT_OP_IGNORED_ERRS: &[Error] = &[
|
||||
Error::DiskNotFound,
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::error::{Error, Result};
|
||||
use super::{DiskInfo, error::DiskError};
|
||||
use crate::disk::error::{Error, Result};
|
||||
use crate::disk::{DiskInfo, error::DiskError};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Error as JsonError;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::{
|
||||
path::Path,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use tokio::{
|
||||
fs::{self, File},
|
||||
io,
|
||||
|
||||
@@ -831,7 +831,11 @@ impl LocalDisk {
|
||||
self.write_all_internal(&tmp_file_path, InternalBuf::Ref(buf), sync, &tmp_volume_dir)
|
||||
.await?;
|
||||
|
||||
rename_all(tmp_file_path, file_path, volume_dir).await
|
||||
rename_all(tmp_file_path, &file_path, volume_dir).await?;
|
||||
|
||||
// Invalidate cache after successful write
|
||||
get_global_file_cache().invalidate(&file_path).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// write_all_public for trail
|
||||
|
||||
@@ -12,19 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::error::DiskError;
|
||||
use crate::disk::error::Result;
|
||||
use crate::disk::error_conv::to_file_error;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use std::{
|
||||
io,
|
||||
path::{Component, Path},
|
||||
};
|
||||
|
||||
use super::error::Result;
|
||||
use crate::disk::error_conv::to_file_error;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use tokio::fs;
|
||||
use tracing::warn;
|
||||
|
||||
use super::error::DiskError;
|
||||
|
||||
/// Check path length according to OS limits.
|
||||
pub fn check_path_length(path_name: &str) -> Result<()> {
|
||||
// Apple OS X path length is limited to 1016
|
||||
|
||||
@@ -12,19 +12,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_utils::{XHost, check_local_server_addr, get_host_ip, is_local_host};
|
||||
use tracing::{error, info, instrument, warn};
|
||||
|
||||
use crate::{
|
||||
disk::endpoint::{Endpoint, EndpointType},
|
||||
disks_layout::DisksLayout,
|
||||
global::global_rustfs_port,
|
||||
};
|
||||
use std::io::{Error, Result};
|
||||
use rustfs_utils::{XHost, check_local_server_addr, get_host_ip, is_local_host};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet, hash_map::Entry},
|
||||
io::{Error, Result},
|
||||
net::IpAddr,
|
||||
};
|
||||
use tracing::{error, info, instrument, warn};
|
||||
|
||||
/// enum for setup type.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
|
||||
@@ -12,10 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::BitrotReader;
|
||||
use super::Erasure;
|
||||
use crate::disk::error::Error;
|
||||
use crate::disk::error_reduce::reduce_errs;
|
||||
use crate::erasure_coding::{BitrotReader, Erasure};
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use pin_project_lite::pin_project;
|
||||
use std::io;
|
||||
@@ -312,11 +311,12 @@ impl Erasure {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
|
||||
use crate::{disk::error::DiskError, erasure_coding::BitrotWriter};
|
||||
|
||||
use super::*;
|
||||
use crate::{
|
||||
disk::error::DiskError,
|
||||
erasure_coding::{BitrotReader, BitrotWriter},
|
||||
};
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use std::io::Cursor;
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -12,11 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::BitrotWriterWrapper;
|
||||
use super::Erasure;
|
||||
use crate::disk::error::Error;
|
||||
use crate::disk::error_reduce::count_errs;
|
||||
use crate::disk::error_reduce::{OBJECT_OP_IGNORED_ERRS, reduce_write_quorum_errs};
|
||||
use crate::erasure_coding::BitrotWriterWrapper;
|
||||
use crate::erasure_coding::Erasure;
|
||||
use bytes::Bytes;
|
||||
use futures::StreamExt;
|
||||
use futures::stream::FuturesUnordered;
|
||||
|
||||
@@ -12,10 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::BitrotReader;
|
||||
use super::BitrotWriterWrapper;
|
||||
use super::decode::ParallelReader;
|
||||
use crate::disk::error::{Error, Result};
|
||||
use crate::erasure_coding::BitrotReader;
|
||||
use crate::erasure_coding::BitrotWriterWrapper;
|
||||
use crate::erasure_coding::decode::ParallelReader;
|
||||
use crate::erasure_coding::encode::MultiWriter;
|
||||
use bytes::Bytes;
|
||||
use tokio::io::AsyncRead;
|
||||
|
||||
@@ -12,12 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod bitrot;
|
||||
pub mod decode;
|
||||
pub mod encode;
|
||||
pub mod erasure;
|
||||
pub mod heal;
|
||||
|
||||
mod bitrot;
|
||||
pub use bitrot::*;
|
||||
|
||||
pub use erasure::{Erasure, ReedSolomonEncoder, calc_shard_size};
|
||||
|
||||
@@ -12,12 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use s3s::{S3Error, S3ErrorCode};
|
||||
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
|
||||
use crate::bucket::error::BucketMetadataError;
|
||||
use crate::disk::error::DiskError;
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
use s3s::{S3Error, S3ErrorCode};
|
||||
|
||||
pub type Error = StorageError;
|
||||
pub type Result<T> = core::result::Result<T, Error>;
|
||||
|
||||
@@ -12,10 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::event::targetid::TargetID;
|
||||
use std::sync::atomic::AtomicI64;
|
||||
|
||||
use super::targetid::TargetID;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct TargetList {
|
||||
pub current_send_calls: AtomicI64,
|
||||
|
||||
@@ -14,15 +14,14 @@
|
||||
// limitations under the License.
|
||||
#![allow(unused_variables)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::bucket::metadata::BucketMetadata;
|
||||
use crate::event::name::EventName;
|
||||
use crate::event::targetlist::TargetList;
|
||||
use crate::store::ECStore;
|
||||
use crate::store_api::ObjectInfo;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub struct EventNotifier {
|
||||
target_list: TargetList,
|
||||
|
||||
@@ -12,12 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{
|
||||
admin_server_info::get_local_server_property,
|
||||
new_object_layer_fn,
|
||||
store_api::StorageAPI,
|
||||
// utils::os::get_drive_stats,
|
||||
};
|
||||
use crate::{admin_server_info::get_local_server_property, new_object_layer_fn, store_api::StorageAPI};
|
||||
use chrono::Utc;
|
||||
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, GLOBAL_RUSTFS_ADDR, heal_channel::DriveState, metrics::global_metrics};
|
||||
use rustfs_madmin::metrics::{DiskIOStats, DiskMetric, RealtimeMetrics};
|
||||
|
||||
@@ -12,16 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::error::Error;
|
||||
|
||||
use crate::rpc::{TONIC_RPC_PREFIX, gen_signature_headers};
|
||||
use http::Method;
|
||||
use rustfs_common::GLOBAL_CONN_MAP;
|
||||
use rustfs_protos::{create_new_channel, proto_gen::node_service::node_service_client::NodeServiceClient};
|
||||
use std::error::Error;
|
||||
use tonic::{service::interceptor::InterceptedService, transport::Channel};
|
||||
use tracing::debug;
|
||||
|
||||
use crate::rpc::{TONIC_RPC_PREFIX, gen_signature_headers};
|
||||
|
||||
/// 3. Subsequent calls will attempt fresh connections
|
||||
/// 4. If node is still down, connection will fail fast (3s timeout)
|
||||
pub async fn node_service_time_out_client(
|
||||
|
||||
@@ -27,7 +27,6 @@ use rustfs_madmin::{
|
||||
net::NetInfo,
|
||||
};
|
||||
use rustfs_protos::evict_failed_connection;
|
||||
use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
DeleteBucketMetadataRequest, DeletePolicyRequest, DeleteServiceAccountRequest, DeleteUserRequest, GetCpusRequest,
|
||||
GetMemInfoRequest, GetMetricsRequest, GetNetInfoRequest, GetOsInfoRequest, GetPartitionsRequest, GetProcInfoRequest,
|
||||
@@ -35,6 +34,7 @@ use rustfs_protos::proto_gen::node_service::{
|
||||
LoadPolicyMappingRequest, LoadPolicyRequest, LoadRebalanceMetaRequest, LoadServiceAccountRequest,
|
||||
LoadTransitionTierConfigRequest, LoadUserRequest, LocalStorageInfoRequest, Mss, ReloadPoolMetaRequest,
|
||||
ReloadSiteReplicationConfigRequest, ServerInfoRequest, SignalServiceRequest, StartProfilingRequest, StopRebalanceRequest,
|
||||
node_service_client::NodeServiceClient,
|
||||
};
|
||||
use rustfs_utils::XHost;
|
||||
use serde::{Deserialize, Serialize as _};
|
||||
|
||||
@@ -12,6 +12,34 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions, FileReader,
|
||||
FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions,
|
||||
disk_store::{
|
||||
CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE, get_max_timeout_duration,
|
||||
},
|
||||
endpoint::Endpoint,
|
||||
};
|
||||
use crate::disk::{disk_store::DiskHealthTracker, error::DiskError, local::ScanGuard};
|
||||
use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use crate::{
|
||||
disk::error::{Error, Result},
|
||||
rpc::build_auth_headers,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use futures::lock::Mutex;
|
||||
use http::{HeaderMap, HeaderValue, Method, header::CONTENT_TYPE};
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_protos::proto_gen::node_service::RenamePartRequest;
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
|
||||
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest,
|
||||
ReadMetadataRequest, ReadMultipleRequest, ReadPartsRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest,
|
||||
RenameFileRequest, StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
|
||||
node_service_client::NodeServiceClient,
|
||||
};
|
||||
use rustfs_rio::{HttpReader, HttpWriter};
|
||||
use rustfs_utils::string::parse_bool_with_default;
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
@@ -20,56 +48,17 @@ use std::{
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::lock::Mutex;
|
||||
use http::{HeaderMap, HeaderValue, Method, header::CONTENT_TYPE};
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
|
||||
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest,
|
||||
ReadMetadataRequest, ReadMultipleRequest, ReadPartsRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest,
|
||||
RenameFileRequest, StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
|
||||
node_service_client::NodeServiceClient,
|
||||
};
|
||||
use rustfs_utils::string::parse_bool_with_default;
|
||||
use tokio::time;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::disk::{disk_store::DiskHealthTracker, error::DiskError, local::ScanGuard};
|
||||
use crate::{
|
||||
disk::error::{Error, Result},
|
||||
rpc::build_auth_headers,
|
||||
};
|
||||
use crate::{
|
||||
disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions,
|
||||
ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions,
|
||||
disk_store::{
|
||||
CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE,
|
||||
get_max_timeout_duration,
|
||||
},
|
||||
endpoint::Endpoint,
|
||||
},
|
||||
rpc::client::gen_tonic_signature_interceptor,
|
||||
};
|
||||
use crate::{
|
||||
disk::{FileReader, FileWriter},
|
||||
rpc::client::{TonicInterceptor, node_service_time_out_client},
|
||||
};
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_protos::proto_gen::node_service::RenamePartRequest;
|
||||
use rustfs_rio::{HttpReader, HttpWriter};
|
||||
use tokio::{io::AsyncWrite, net::TcpStream, time::timeout};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tonic::{Request, service::interceptor::InterceptedService, transport::Channel};
|
||||
use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RemoteDisk {
|
||||
pub id: Mutex<Option<Uuid>>,
|
||||
pub addr: String,
|
||||
pub url: url::Url,
|
||||
pub root: PathBuf,
|
||||
endpoint: Endpoint,
|
||||
pub scanning: Arc<AtomicU32>,
|
||||
/// Whether health checking is enabled
|
||||
@@ -82,8 +71,6 @@ pub struct RemoteDisk {
|
||||
|
||||
impl RemoteDisk {
|
||||
pub async fn new(ep: &Endpoint, opt: &DiskOption) -> Result<Self> {
|
||||
// let root = fs::canonicalize(ep.url.path()).await?;
|
||||
let root = PathBuf::from(ep.get_file_path());
|
||||
let addr = if let Some(port) = ep.url.port() {
|
||||
format!("{}://{}:{}", ep.url.scheme(), ep.url.host_str().unwrap(), port)
|
||||
} else {
|
||||
@@ -97,8 +84,6 @@ impl RemoteDisk {
|
||||
let disk = Self {
|
||||
id: Mutex::new(None),
|
||||
addr: addr.clone(),
|
||||
url: ep.url.clone(),
|
||||
root,
|
||||
endpoint: ep.clone(),
|
||||
scanning: Arc::new(AtomicU32::new(0)),
|
||||
health_check: opt.health_check && env_health_check,
|
||||
|
||||
@@ -14,9 +14,10 @@
|
||||
|
||||
use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use async_trait::async_trait;
|
||||
use rustfs_lock::types::{LockId, LockMetadata, LockPriority};
|
||||
use rustfs_lock::{LockClient, LockError, LockInfo, LockResponse, LockStats, LockStatus, Result};
|
||||
use rustfs_lock::{LockRequest, LockType};
|
||||
use rustfs_lock::{
|
||||
LockClient, LockError, LockInfo, LockRequest, LockResponse, LockStats, LockStatus, LockType, Result,
|
||||
types::{LockId, LockMetadata, LockPriority},
|
||||
};
|
||||
use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_protos::proto_gen::node_service::{GenerallyLockRequest, PingRequest};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use crate::disk::error_reduce::count_errs;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::store_api::{ListPartsInfo, ObjectInfoOrErr, WalkOptions};
|
||||
@@ -44,18 +42,17 @@ use rustfs_common::{
|
||||
heal_channel::{DriveState, HealItemType},
|
||||
};
|
||||
use rustfs_filemeta::FileInfo;
|
||||
|
||||
use rustfs_lock::FastLockGuard;
|
||||
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use rustfs_utils::{crc_hash, path::path_join_buf, sip_hash};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use uuid::Uuid;
|
||||
|
||||
use tokio::sync::broadcast::{Receiver, Sender};
|
||||
use tokio::time::Duration;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use tracing::{error, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Sets {
|
||||
|
||||
@@ -1830,16 +1830,16 @@ impl StorageAPI for ECStore {
|
||||
if self.is_suspended(pool.pool_idx).await {
|
||||
continue;
|
||||
}
|
||||
match pool
|
||||
return match pool
|
||||
.list_object_parts(bucket, object, upload_id, part_number_marker, max_parts, opts)
|
||||
.await
|
||||
{
|
||||
Ok(res) => return Ok(res),
|
||||
Ok(res) => Ok(res),
|
||||
Err(err) => {
|
||||
if is_err_invalid_upload_id(&err) {
|
||||
continue;
|
||||
}
|
||||
return Err(err);
|
||||
Err(err)
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -2209,7 +2209,7 @@ impl StorageAPI for ECStore {
|
||||
async fn delete_object_version(&self, bucket: &str, object: &str, fi: &FileInfo, force_del_marker: bool) -> Result<()> {
|
||||
check_del_obj_args(bucket, object)?;
|
||||
|
||||
let object = rustfs_utils::path::encode_dir_object(object);
|
||||
let object = encode_dir_object(object);
|
||||
|
||||
if self.single_pool() {
|
||||
return self.pools[0]
|
||||
@@ -2329,17 +2329,15 @@ impl StorageAPI for ECStore {
|
||||
|
||||
// No pool returned a nil error, return the first non 'not found' error
|
||||
for (index, err) in errs.iter().enumerate() {
|
||||
match err {
|
||||
return match err {
|
||||
Some(err) => {
|
||||
if is_err_object_not_found(err) || is_err_version_not_found(err) {
|
||||
continue;
|
||||
}
|
||||
return Ok((ress.remove(index), Some(err.clone())));
|
||||
Ok((ress.remove(index), Some(err.clone())))
|
||||
}
|
||||
None => {
|
||||
return Ok((ress.remove(index), None));
|
||||
}
|
||||
}
|
||||
None => Ok((ress.remove(index), None)),
|
||||
};
|
||||
}
|
||||
|
||||
// At this stage, all errors are 'not found'
|
||||
|
||||
@@ -27,7 +27,6 @@ use crate::{
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use std::collections::{HashMap, hash_map::Entry};
|
||||
|
||||
use tracing::{info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
|
||||
@@ -410,13 +410,13 @@ impl ECStore {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut list_result = match self.list_path(&opts).await {
|
||||
Ok(res) => res,
|
||||
Err(err) => MetaCacheEntriesSortedResult {
|
||||
let mut list_result = self
|
||||
.list_path(&opts)
|
||||
.await
|
||||
.unwrap_or_else(|err| MetaCacheEntriesSortedResult {
|
||||
err: Some(err.into()),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
if let Some(err) = list_result.err.clone()
|
||||
&& err != rustfs_filemeta::Error::Unexpected
|
||||
@@ -988,7 +988,7 @@ async fn gather_results(
|
||||
}
|
||||
|
||||
if let Some(marker) = &opts.marker
|
||||
&& &entry.name < marker
|
||||
&& &entry.name <= marker
|
||||
{
|
||||
continue;
|
||||
}
|
||||
@@ -1476,7 +1476,6 @@ mod test {
|
||||
// use crate::error::Error;
|
||||
// use crate::metacache::writer::MetacacheReader;
|
||||
// use crate::set_disk::SetDisks;
|
||||
// use crate::store::ECStore;
|
||||
// use crate::store_list_objects::ListPathOptions;
|
||||
// use crate::store_list_objects::WalkOptions;
|
||||
// use crate::store_list_objects::WalkVersionsSortOrder;
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::notification_system_subscriber::NotificationSystemSubscriberView;
|
||||
use crate::notifier::TargetList;
|
||||
use crate::{
|
||||
Event, error::NotificationError, notifier::EventNotifier, registry::TargetRegistry, rules::BucketNotificationConfig, stream,
|
||||
};
|
||||
@@ -191,6 +192,22 @@ impl NotificationSystem {
|
||||
self.notifier.target_list().read().await.keys()
|
||||
}
|
||||
|
||||
/// Gets the complete Target list, including both active and inactive Targets.
|
||||
///
|
||||
/// # Return
|
||||
/// An `Arc<RwLock<TargetList>>` containing all Targets.
|
||||
pub async fn get_all_targets(&self) -> Arc<RwLock<TargetList>> {
|
||||
self.notifier.target_list()
|
||||
}
|
||||
|
||||
/// Gets all Target values, including both active and inactive Targets.
|
||||
///
|
||||
/// # Return
|
||||
/// A Vec containing all Targets.
|
||||
pub async fn get_target_values(&self) -> Vec<Arc<dyn Target<Event> + Send + Sync>> {
|
||||
self.notifier.target_list().read().await.values()
|
||||
}
|
||||
|
||||
/// Checks if there are active subscribers for the given bucket and event name.
|
||||
pub async fn has_subscriber(&self, bucket: &str, event: &EventName) -> bool {
|
||||
if !self.subscriber_view.has_subscriber(bucket, event) {
|
||||
|
||||
@@ -370,6 +370,11 @@ impl TargetList {
|
||||
self.targets.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Returns all targets in the list
|
||||
pub fn values(&self) -> Vec<Arc<dyn Target<Event> + Send + Sync>> {
|
||||
self.targets.values().cloned().collect()
|
||||
}
|
||||
|
||||
/// Returns the number of targets
|
||||
pub fn len(&self) -> usize {
|
||||
self.targets.len()
|
||||
|
||||
@@ -22,8 +22,8 @@ use strum::{EnumString, IntoStaticStr};
|
||||
|
||||
use super::{Error as IamError, Validator, utils::wildcard};
|
||||
|
||||
/// A set of policy actions that serializes as a single string when containing one item,
|
||||
/// or as an array when containing multiple items (matching AWS S3 API format).
|
||||
/// A set of policy actions that always serializes as an array of strings,
|
||||
/// conforming to the S3 policy specification for consistency and compatibility.
|
||||
#[derive(Clone, Default, Debug)]
|
||||
pub struct ActionSet(pub HashSet<Action>);
|
||||
|
||||
@@ -34,15 +34,8 @@ impl Serialize for ActionSet {
|
||||
{
|
||||
use serde::ser::SerializeSeq;
|
||||
|
||||
if self.0.len() == 1 {
|
||||
// Serialize single action as string (not array)
|
||||
if let Some(action) = self.0.iter().next() {
|
||||
let action_str: &str = action.into();
|
||||
return serializer.serialize_str(action_str);
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize multiple actions as array
|
||||
// Always serialize as array, even for single action, to match S3 specification
|
||||
// and ensure compatibility with AWS SDK clients that expect array format
|
||||
let mut seq = serializer.serialize_seq(Some(self.0.len()))?;
|
||||
for action in &self.0 {
|
||||
let action_str: &str = action.into();
|
||||
@@ -610,13 +603,17 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_actionset_serialize_single_element() {
|
||||
// Single element should serialize as string
|
||||
// Single element should serialize as array for S3 specification compliance
|
||||
let mut set = HashSet::new();
|
||||
set.insert(Action::S3Action(S3Action::GetObjectAction));
|
||||
let actionset = ActionSet(set);
|
||||
|
||||
let json = serde_json::to_string(&actionset).expect("Should serialize");
|
||||
assert_eq!(json, "\"s3:GetObject\"");
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
assert!(parsed.is_array(), "Should serialize as array");
|
||||
let arr = parsed.as_array().expect("Should be array");
|
||||
assert_eq!(arr.len(), 1);
|
||||
assert_eq!(arr[0].as_str().unwrap(), "s3:GetObject");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -636,12 +633,16 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_actionset_wildcard_serialization() {
|
||||
// Wildcard action should serialize correctly
|
||||
// Wildcard action should serialize as array for S3 specification compliance
|
||||
let mut set = HashSet::new();
|
||||
set.insert(Action::try_from("*").expect("Should parse wildcard"));
|
||||
let actionset = ActionSet(set);
|
||||
|
||||
let json = serde_json::to_string(&actionset).expect("Should serialize");
|
||||
assert_eq!(json, "\"s3:*\"");
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
assert!(parsed.is_array(), "Should serialize as array");
|
||||
let arr = parsed.as_array().expect("Should be array");
|
||||
assert_eq!(arr.len(), 1);
|
||||
assert_eq!(arr[0].as_str().unwrap(), "s3:*");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1119,7 +1119,7 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_policy_serialize_single_action_as_string() {
|
||||
fn test_bucket_policy_serialize_single_action_as_array() {
|
||||
use crate::policy::action::{Action, ActionSet, S3Action};
|
||||
use crate::policy::resource::{Resource, ResourceSet};
|
||||
use crate::policy::{Effect, Principal};
|
||||
@@ -1153,8 +1153,10 @@ mod test {
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
let action = &parsed["Statement"][0]["Action"];
|
||||
|
||||
// Single action should be serialized as string
|
||||
assert!(action.is_string(), "Single action should serialize as string");
|
||||
assert_eq!(action.as_str().unwrap(), "s3:ListBucket");
|
||||
// Single action should be serialized as array for S3 specification compliance
|
||||
assert!(action.is_array(), "Single action should serialize as array");
|
||||
let arr = action.as_array().expect("Should be array");
|
||||
assert_eq!(arr.len(), 1);
|
||||
assert_eq!(arr[0].as_str().unwrap(), "s3:ListBucket");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
mod linux;
|
||||
#[cfg(all(unix, not(target_os = "linux")))]
|
||||
mod unix;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
mod windows;
|
||||
|
||||
|
||||
@@ -13,56 +13,19 @@
|
||||
// limitations under the License.
|
||||
|
||||
use super::{DiskInfo, IOStats};
|
||||
use nix::sys::statfs::Statfs;
|
||||
use nix::sys::{stat::stat, statfs::statfs};
|
||||
use nix::sys::{stat::stat, statvfs::statvfs};
|
||||
use std::io::Error;
|
||||
use std::path::Path;
|
||||
|
||||
// FreeBSD and OpenBSD return a signed integer for blocks_available.
|
||||
// Cast to an unsigned integer to use with DiskInfo.
|
||||
#[cfg(any(target_os = "freebsd", target_os = "openbsd"))]
|
||||
fn blocks_available(stat: &Statfs) -> u64 {
|
||||
match stat.blocks_available().try_into() {
|
||||
Ok(bavail) => bavail,
|
||||
Err(e) => {
|
||||
tracing::warn!("blocks_available returned a negative value: Using 0 as fallback. {}", e);
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FreeBSD returns a signed integer for files_free. Cast to an unsigned integer
|
||||
// to use with DiskInfo
|
||||
#[cfg(target_os = "freebsd")]
|
||||
fn files_free(stat: &Statfs) -> u64 {
|
||||
match stat.files_free().try_into() {
|
||||
Ok(files_free) => files_free,
|
||||
Err(e) => {
|
||||
tracing::warn!("files_free returned a negative value: Using 0 as fallback. {}", e);
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "freebsd"))]
|
||||
fn files_free(stat: &Statfs) -> u64 {
|
||||
stat.files_free()
|
||||
}
|
||||
|
||||
#[cfg(not(any(target_os = "freebsd", target_os = "openbsd")))]
|
||||
fn blocks_available(stat: &Statfs) -> u64 {
|
||||
stat.blocks_available()
|
||||
}
|
||||
|
||||
/// Returns total and free bytes available in a directory, e.g. `/`.
|
||||
pub fn get_info(p: impl AsRef<Path>) -> std::io::Result<DiskInfo> {
|
||||
let path_display = p.as_ref().display();
|
||||
let stat = statfs(p.as_ref())?;
|
||||
let stat = statvfs(p.as_ref())?;
|
||||
|
||||
let bsize = stat.block_size() as u64;
|
||||
let bfree = stat.blocks_free();
|
||||
let bavail = blocks_available(&stat);
|
||||
let blocks = stat.blocks();
|
||||
let bsize = stat.block_size();
|
||||
let bfree = stat.blocks_free() as u64;
|
||||
let bavail = stat.blocks_available() as u64;
|
||||
let blocks = stat.blocks() as u64;
|
||||
|
||||
let reserved = match bfree.checked_sub(bavail) {
|
||||
Some(reserved) => reserved,
|
||||
@@ -96,9 +59,9 @@ pub fn get_info(p: impl AsRef<Path>) -> std::io::Result<DiskInfo> {
|
||||
total,
|
||||
free,
|
||||
used,
|
||||
files: stat.files(),
|
||||
ffree: files_free(&stat),
|
||||
fstype: stat.filesystem_type_name().to_string(),
|
||||
files: stat.files() as u64,
|
||||
ffree: stat.files_free() as u64,
|
||||
// Statvfs does not provide a way to return the filesystem as name.
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -13,9 +13,11 @@
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_config::VERSION;
|
||||
use std::borrow::Cow;
|
||||
use std::env;
|
||||
use std::fmt;
|
||||
#[cfg(not(any(target_os = "openbsd", target_os = "freebsd")))]
|
||||
use std::sync::OnceLock;
|
||||
#[cfg(not(target_os = "openbsd"))]
|
||||
use sysinfo::System;
|
||||
|
||||
/// Business Type Enumeration
|
||||
@@ -25,7 +27,7 @@ pub enum ServiceType {
|
||||
Core,
|
||||
Event,
|
||||
Logger,
|
||||
Custom(String),
|
||||
Custom(Cow<'static, str>),
|
||||
}
|
||||
|
||||
impl ServiceType {
|
||||
@@ -35,71 +37,65 @@ impl ServiceType {
|
||||
ServiceType::Core => "core",
|
||||
ServiceType::Event => "event",
|
||||
ServiceType::Logger => "logger",
|
||||
ServiceType::Custom(s) => s.as_str(),
|
||||
ServiceType::Custom(s) => s,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// UserAgent structure to hold User-Agent information
|
||||
/// including OS platform, architecture, version, and service type.
|
||||
/// It provides methods to generate a formatted User-Agent string.
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use rustfs_utils::{get_user_agent, ServiceType};
|
||||
///
|
||||
/// let ua = get_user_agent(ServiceType::Core);
|
||||
/// println!("User-Agent: {}", ua);
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
struct UserAgent {
|
||||
os_platform: String,
|
||||
arch: String,
|
||||
version: String,
|
||||
os_platform: &'static str,
|
||||
arch: &'static str,
|
||||
version: &'static str,
|
||||
service: ServiceType,
|
||||
}
|
||||
|
||||
static OS_PLATFORM: OnceLock<String> = OnceLock::new();
|
||||
|
||||
impl UserAgent {
|
||||
/// Create a new UserAgent instance and accept business type parameters
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `service` - The type of service for which the User-Agent is being created.
|
||||
/// # Returns
|
||||
/// A new instance of `UserAgent` with the current OS platform, architecture, version, and service type.
|
||||
fn new(service: ServiceType) -> Self {
|
||||
let os_platform = Self::get_os_platform();
|
||||
let arch = env::consts::ARCH.to_string();
|
||||
let version = VERSION.to_string();
|
||||
|
||||
UserAgent {
|
||||
os_platform,
|
||||
arch,
|
||||
version,
|
||||
os_platform: Self::get_os_platform(),
|
||||
arch: env::consts::ARCH,
|
||||
version: VERSION,
|
||||
service,
|
||||
}
|
||||
}
|
||||
|
||||
/// Obtain operating system platform information
|
||||
fn get_os_platform() -> String {
|
||||
if cfg!(target_os = "windows") {
|
||||
Self::get_windows_platform()
|
||||
} else if cfg!(target_os = "macos") {
|
||||
Self::get_macos_platform()
|
||||
} else if cfg!(target_os = "linux") {
|
||||
Self::get_linux_platform()
|
||||
} else {
|
||||
"Unknown".to_string()
|
||||
}
|
||||
/// Obtain operating system platform information using a thread-safe cache.
|
||||
///
|
||||
/// The value is computed once on first use via `OnceLock` and then reused
|
||||
/// for all subsequent calls for the lifetime of the program.
|
||||
fn get_os_platform() -> &'static str {
|
||||
OS_PLATFORM.get_or_init(|| {
|
||||
if cfg!(target_os = "windows") {
|
||||
Self::get_windows_platform()
|
||||
} else if cfg!(target_os = "macos") {
|
||||
Self::get_macos_platform()
|
||||
} else if cfg!(target_os = "linux") {
|
||||
Self::get_linux_platform()
|
||||
} else if cfg!(target_os = "freebsd") {
|
||||
Self::get_freebsd_platform()
|
||||
} else if cfg!(target_os = "netbsd") {
|
||||
Self::get_netbsd_platform()
|
||||
} else {
|
||||
"Unknown".to_string()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Get Windows platform information
|
||||
#[cfg(windows)]
|
||||
fn get_windows_platform() -> String {
|
||||
// Priority to using sysinfo to get versions
|
||||
let version = match System::os_version() {
|
||||
Some(version) => version,
|
||||
None => "Windows NT Unknown".to_string(),
|
||||
};
|
||||
format!("Windows NT {version}")
|
||||
let version = System::os_version().unwrap_or_else(|| "NT Unknown".to_string());
|
||||
if version.starts_with("Windows") {
|
||||
version
|
||||
} else {
|
||||
format!("Windows NT {version}")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
@@ -110,16 +106,14 @@ impl UserAgent {
|
||||
/// Get macOS platform information
|
||||
#[cfg(target_os = "macos")]
|
||||
fn get_macos_platform() -> String {
|
||||
let binding = System::os_version().unwrap_or("14.5.0".to_string());
|
||||
let version = binding.split('.').collect::<Vec<&str>>();
|
||||
let major = version.first().unwrap_or(&"14").to_string();
|
||||
let minor = version.get(1).unwrap_or(&"5").to_string();
|
||||
let patch = version.get(2).unwrap_or(&"0").to_string();
|
||||
let version_str = System::os_version().unwrap_or_else(|| "14.0.0".to_string());
|
||||
let mut parts = version_str.split('.');
|
||||
let major = parts.next().unwrap_or("14");
|
||||
let minor = parts.next().unwrap_or("0");
|
||||
let patch = parts.next().unwrap_or("0");
|
||||
|
||||
let arch = env::consts::ARCH;
|
||||
let cpu_info = if arch == "aarch64" { "Apple" } else { "Intel" };
|
||||
let cpu_info = if env::consts::ARCH == "aarch64" { "Apple" } else { "Intel" };
|
||||
|
||||
// Convert to User-Agent format
|
||||
format!("Macintosh; {cpu_info} Mac OS X {major}_{minor}_{patch}")
|
||||
}
|
||||
|
||||
@@ -131,40 +125,47 @@ impl UserAgent {
|
||||
/// Get Linux platform information
|
||||
#[cfg(target_os = "linux")]
|
||||
fn get_linux_platform() -> String {
|
||||
format!("X11; {}", System::long_os_version().unwrap_or("Linux Unknown".to_string()))
|
||||
let os_name = System::long_os_version().unwrap_or_else(|| "Linux Unknown".to_string());
|
||||
format!("X11; {os_name}")
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn get_linux_platform() -> String {
|
||||
"N/A".to_string()
|
||||
}
|
||||
|
||||
#[cfg(target_os = "freebsd")]
|
||||
fn get_freebsd_platform() -> String {
|
||||
format!("FreeBSD; {}", env::consts::ARCH)
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "freebsd"))]
|
||||
fn get_freebsd_platform() -> String {
|
||||
"N/A".to_string()
|
||||
}
|
||||
|
||||
#[cfg(target_os = "netbsd")]
|
||||
fn get_netbsd_platform() -> String {
|
||||
format!("NetBSD; {}", env::consts::ARCH)
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "netbsd"))]
|
||||
fn get_netbsd_platform() -> String {
|
||||
"N/A".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement Display trait to format User-Agent
|
||||
impl fmt::Display for UserAgent {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
if self.service == ServiceType::Basis {
|
||||
return write!(f, "Mozilla/5.0 ({}; {}) RustFS/{}", self.os_platform, self.arch, self.version);
|
||||
write!(f, "Mozilla/5.0 ({}; {}) RustFS/{}", self.os_platform, self.arch, self.version)?;
|
||||
if self.service != ServiceType::Basis {
|
||||
write!(f, " ({})", self.service.as_str())?;
|
||||
}
|
||||
write!(
|
||||
f,
|
||||
"Mozilla/5.0 ({}; {}) RustFS/{} ({})",
|
||||
self.os_platform,
|
||||
self.arch,
|
||||
self.version,
|
||||
self.service.as_str()
|
||||
)
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the User-Agent string and accept business type parameters
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `service` - The type of service for which the User-Agent is being created.
|
||||
///
|
||||
/// # Returns
|
||||
/// A formatted User-Agent string.
|
||||
///
|
||||
pub fn get_user_agent(service: ServiceType) -> String {
|
||||
UserAgent::new(service).to_string()
|
||||
}
|
||||
@@ -173,58 +174,33 @@ pub fn get_user_agent(service: ServiceType) -> String {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rustfs_config::VERSION;
|
||||
use tracing::debug;
|
||||
|
||||
#[test]
|
||||
fn test_user_agent_format_basis() {
|
||||
let ua = get_user_agent(ServiceType::Basis);
|
||||
assert!(ua.starts_with("Mozilla/5.0"));
|
||||
assert!(ua.contains(&format!("RustFS/{VERSION}").to_string()));
|
||||
debug!("Basic User-Agent: {}", ua);
|
||||
assert!(ua.contains(&format!("RustFS/{VERSION}")));
|
||||
assert!(!ua.contains("(basis)"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_user_agent_format_core() {
|
||||
let ua = get_user_agent(ServiceType::Core);
|
||||
assert!(ua.starts_with("Mozilla/5.0"));
|
||||
assert!(ua.contains(&format!("RustFS/{VERSION} (core)").to_string()));
|
||||
debug!("Core User-Agent: {}", ua);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_user_agent_format_event() {
|
||||
let ua = get_user_agent(ServiceType::Event);
|
||||
assert!(ua.starts_with("Mozilla/5.0"));
|
||||
assert!(ua.contains(&format!("RustFS/{VERSION} (event)").to_string()));
|
||||
debug!("Event User-Agent: {}", ua);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_user_agent_format_logger() {
|
||||
let ua = get_user_agent(ServiceType::Logger);
|
||||
assert!(ua.starts_with("Mozilla/5.0"));
|
||||
assert!(ua.contains(&format!("RustFS/{VERSION} (logger)").to_string()));
|
||||
debug!("Logger User-Agent: {}", ua);
|
||||
assert!(ua.contains(&format!("RustFS/{VERSION} (core)")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_user_agent_format_custom() {
|
||||
let ua = get_user_agent(ServiceType::Custom("monitor".to_string()));
|
||||
assert!(ua.starts_with("Mozilla/5.0"));
|
||||
assert!(ua.contains(&format!("RustFS/{VERSION} (monitor)").to_string()));
|
||||
debug!("Monitor User-Agent: {}", ua);
|
||||
let ua = get_user_agent(ServiceType::Custom("monitor".into()));
|
||||
assert!(ua.contains(&format!("RustFS/{VERSION} (monitor)")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_service_type() {
|
||||
// Example: Generate User-Agents of Different Business Types
|
||||
let ua_core = get_user_agent(ServiceType::Core);
|
||||
let ua_event = get_user_agent(ServiceType::Event);
|
||||
let ua_logger = get_user_agent(ServiceType::Logger);
|
||||
let ua_custom = get_user_agent(ServiceType::Custom("monitor".to_string()));
|
||||
|
||||
debug!("Core User-Agent: {}", ua_core);
|
||||
debug!("Event User-Agent: {}", ua_event);
|
||||
debug!("Logger User-Agent: {}", ua_logger);
|
||||
debug!("Custom User-Agent: {}", ua_custom);
|
||||
fn test_os_platform_caching() {
|
||||
let ua1 = UserAgent::new(ServiceType::Basis);
|
||||
let ua2 = UserAgent::new(ServiceType::Basis);
|
||||
assert_eq!(ua1.os_platform, ua2.os_platform);
|
||||
// Ensure they point to the same static memory
|
||||
assert!(std::ptr::eq(ua1.os_platform.as_ptr(), ua2.os_platform.as_ptr()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ services:
|
||||
[
|
||||
"CMD",
|
||||
"sh", "-c",
|
||||
"curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health"
|
||||
"curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/rustfs/console/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
|
||||
@@ -52,7 +52,7 @@ services:
|
||||
[
|
||||
"CMD",
|
||||
"sh", "-c",
|
||||
"curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health"
|
||||
"curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/rustfs/console/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
@@ -99,7 +99,7 @@ services:
|
||||
[
|
||||
"CMD",
|
||||
"sh", "-c",
|
||||
"curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health"
|
||||
"curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/rustfs/console/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
|
||||
@@ -63,7 +63,7 @@
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"curl -f http://localhost:9000/health && curl -f http://localhost:9001/health || exit 1"
|
||||
"curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/health || exit 1"
|
||||
]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
|
||||
@@ -15,7 +15,7 @@ services:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_EXTERNAL_ADDRESS=:9000
|
||||
- RUSTFS_CORS_ALLOWED_ORIGINS=http://localhost:9001
|
||||
- RUSTFS_CORS_ALLOWED_ORIGINS=http://127.0.0.1:9001
|
||||
- RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS=*
|
||||
- RUSTFS_ACCESS_KEY=admin
|
||||
- RUSTFS_SECRET_KEY=password
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: [ "CMD", "sh", "-c", "curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health" ]
|
||||
test: [ "CMD", "sh", "-c", "curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/rustfs/console/health" ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -56,7 +56,7 @@ services:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: [ "CMD", "sh", "-c", "curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health" ]
|
||||
test: [ "CMD", "sh", "-c", "curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/rustfs/console/health" ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -92,7 +92,7 @@ services:
|
||||
- rustfs_secret_key
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: [ "CMD", "sh", "-c", "curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health" ]
|
||||
test: [ "CMD", "sh", "-c", "curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/rustfs/console/health" ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -127,7 +127,7 @@ services:
|
||||
- rustfs_enterprise_secret_key
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: [ "CMD", "sh", "-c", "curl -f http://localhost:9000/health && curl -k -f https://localhost:9001/rustfs/console/health" ]
|
||||
test: [ "CMD", "sh", "-c", "curl -f http://127.0.0.1:9000/health && curl -k -f https://127.0.0.1:9001/rustfs/console/health" ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -152,7 +152,7 @@ services:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost:9000/health" ]
|
||||
test: [ "CMD", "curl", "-f", "http://127.0.0.1:9000/health" ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
@@ -114,6 +114,8 @@ RustFS helm chart supports **standalone and distributed mode**. For standalone m
|
||||
| gatewayApi.gatewayClass | string | `traefik` | Gateway class implementation. |
|
||||
| gatewayApi.hostname | string | Hostname to access RustFS via gateway api. |
|
||||
| gatewayApi.secretName | string | Secret tls to via RustFS using HTTPS. |
|
||||
| gatewayApi.existingGateway.name | string | `""` | The existing gateway name, instead of creating a new one. |
|
||||
| gatewayApi.existingGateway.namespace | string | `""` | The namespace of the existing gateway, if not the local namespace. |
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if .Values.gatewayApi.enabled }}
|
||||
{{- if and .Values.gatewayApi.enabled (empty .Values.gatewayApi.existingGateway.name) }}
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: Gateway
|
||||
metadata:
|
||||
|
||||
@@ -5,7 +5,14 @@ metadata:
|
||||
name: {{ include "rustfs.fullname" . }}-route
|
||||
spec:
|
||||
parentRefs:
|
||||
{{- if .Values.gatewayApi.existingGateway.name }}
|
||||
- name: {{ .Values.gatewayApi.existingGateway.name }}
|
||||
{{- if .Values.gatewayApi.existingGateway.namespace }}
|
||||
namespace: {{ .Values.gatewayApi.existingGateway.namespace }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
- name: {{ include "rustfs.fullname" . }}-gateway
|
||||
{{- end }}
|
||||
hostnames:
|
||||
- {{ .Values.gatewayApi.hostname }}
|
||||
rules:
|
||||
|
||||
@@ -1,32 +1,39 @@
|
||||
{{- if .Values.mode.standalone.enabled }}
|
||||
{{- with .Values.storageclass }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
name: {{ include "rustfs.fullname" . }}-data
|
||||
name: {{ include "rustfs.fullname" $ }}-data
|
||||
labels:
|
||||
{{- toYaml .Values.commonLabels | nindent 4 }}
|
||||
{{- toYaml $.Values.commonLabels | nindent 4 }}
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
storageClassName: {{ .Values.storageclass.name }}
|
||||
accessModes: [ ReadWriteOnce ]
|
||||
{{- with .name }}
|
||||
storageClassName: {{ . }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.storageclass.dataStorageSize }}
|
||||
storage: {{ .dataStorageSize }}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
name: {{ include "rustfs.fullname" . }}-logs
|
||||
name: {{ include "rustfs.fullname" $ }}-logs
|
||||
labels:
|
||||
{{- toYaml .Values.commonLabels | nindent 4 }}
|
||||
{{- toYaml $.Values.commonLabels | nindent 4 }}
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
storageClassName: {{ .Values.storageclass.name }}
|
||||
accessModes: [ ReadWriteOnce ]
|
||||
{{- with .name }}
|
||||
storageClassName: {{ . }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.storageclass.logStorageSize }}
|
||||
storage: {{ .logStorageSize }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if and (or .Values.gatewayApi.enabled .Values.ingress.tls.enabled) (not .Values.ingress.tls.certManager.enabled) }}
|
||||
{{- if and (or .Values.gatewayApi.enabled .Values.ingress.tls.enabled) (not (or .Values.ingress.tls.certManager.enabled .Values.gatewayApi.existingGateway.name)) }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
|
||||
@@ -140,6 +140,9 @@ gatewayApi:
|
||||
gatewayClass: traefik
|
||||
hostname: example.rustfs.com
|
||||
secretName: secret-tls
|
||||
existingGateway:
|
||||
name: ""
|
||||
namespace: ""
|
||||
|
||||
resources:
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
|
||||
@@ -269,6 +269,7 @@ async fn version_handler() -> impl IntoResponse {
|
||||
/// - 200 OK with JSON body containing the console configuration if initialized.
|
||||
/// - 500 Internal Server Error if configuration is not initialized.
|
||||
#[instrument(fields(uri))]
|
||||
#[allow(dead_code)]
|
||||
async fn config_handler(uri: Uri, headers: HeaderMap) -> impl IntoResponse {
|
||||
// Get the scheme from the headers or use the URI scheme
|
||||
let scheme = headers
|
||||
@@ -482,7 +483,6 @@ fn setup_console_middleware_stack(
|
||||
let mut app = Router::new()
|
||||
.route(FAVICON_PATH, get(static_handler))
|
||||
.route(&format!("{CONSOLE_PREFIX}/license"), get(license_handler))
|
||||
.route(&format!("{CONSOLE_PREFIX}/config.json"), get(config_handler))
|
||||
.route(&format!("{CONSOLE_PREFIX}/version"), get(version_handler))
|
||||
.route(&format!("{CONSOLE_PREFIX}{HEALTH_PREFIX}"), get(health_check).head(health_check))
|
||||
.nest(CONSOLE_PREFIX, Router::new().fallback_service(get(static_handler)))
|
||||
|
||||
@@ -14,21 +14,24 @@
|
||||
|
||||
use crate::admin::router::Operation;
|
||||
use crate::auth::{check_key_valid, get_session_token};
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use matchit::Params;
|
||||
use rustfs_config::notify::{NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS};
|
||||
use rustfs_config::{ENABLE_KEY, EnableState, MAX_ADMIN_REQUEST_BODY_SIZE};
|
||||
use rustfs_targets::check_mqtt_broker_available;
|
||||
use s3s::header::CONTENT_LENGTH;
|
||||
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use s3s::{Body, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::future::Future;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tokio::net::lookup_host;
|
||||
use tokio::time::{Duration, sleep};
|
||||
use tracing::{Span, debug, error, info, warn};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::{Duration, sleep, timeout};
|
||||
use tracing::{Span, info, warn};
|
||||
use url::Url;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -54,12 +57,34 @@ struct NotificationEndpointsResponse {
|
||||
notification_endpoints: Vec<NotificationEndpoint>,
|
||||
}
|
||||
|
||||
// --- Helper Functions ---
|
||||
|
||||
async fn check_permissions(req: &S3Request<Body>) -> S3Result<()> {
|
||||
let Some(input_cred) = &req.credentials else {
|
||||
return Err(s3_error!(InvalidRequest, "credentials not found"));
|
||||
};
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_notification_system() -> S3Result<Arc<rustfs_notify::NotificationSystem>> {
|
||||
rustfs_notify::notification_system().ok_or_else(|| s3_error!(InternalError, "notification system not initialized"))
|
||||
}
|
||||
|
||||
fn build_response(status: StatusCode, body: Body, request_id: Option<&http::HeaderValue>) -> S3Response<(StatusCode, Body)> {
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
if let Some(v) = request_id {
|
||||
header.insert("x-request-id", v.clone());
|
||||
}
|
||||
S3Response::with_headers((status, body), header)
|
||||
}
|
||||
|
||||
async fn retry_with_backoff<F, Fut, T>(mut operation: F, max_attempts: usize, base_delay: Duration) -> Result<T, Error>
|
||||
where
|
||||
F: FnMut() -> Fut,
|
||||
Fut: Future<Output = Result<T, Error>>,
|
||||
{
|
||||
assert!(max_attempts > 0, "max_attempts must be greater than 0");
|
||||
let mut attempts = 0;
|
||||
let mut delay = base_delay;
|
||||
let mut last_err = None;
|
||||
@@ -71,13 +96,6 @@ where
|
||||
last_err = Some(e);
|
||||
attempts += 1;
|
||||
if attempts < max_attempts {
|
||||
warn!(
|
||||
"Retry attempt {}/{} failed: {}. Retrying in {:?}",
|
||||
attempts,
|
||||
max_attempts,
|
||||
last_err.as_ref().unwrap(),
|
||||
delay
|
||||
);
|
||||
sleep(delay).await;
|
||||
delay = delay.saturating_mul(2);
|
||||
}
|
||||
@@ -87,130 +105,73 @@ where
|
||||
Err(last_err.unwrap_or_else(|| Error::other("retry_with_backoff: unknown error")))
|
||||
}
|
||||
|
||||
async fn retry_metadata(path: &str) -> Result<(), Error> {
|
||||
retry_with_backoff(|| async { tokio::fs::metadata(path).await.map(|_| ()) }, 3, Duration::from_millis(100)).await
|
||||
}
|
||||
|
||||
async fn validate_queue_dir(queue_dir: &str) -> S3Result<()> {
|
||||
if !queue_dir.is_empty() {
|
||||
if !Path::new(queue_dir).is_absolute() {
|
||||
return Err(s3_error!(InvalidArgument, "queue_dir must be absolute path"));
|
||||
}
|
||||
|
||||
if let Err(e) = retry_metadata(queue_dir).await {
|
||||
return match e.kind() {
|
||||
ErrorKind::NotFound => Err(s3_error!(InvalidArgument, "queue_dir does not exist")),
|
||||
ErrorKind::PermissionDenied => Err(s3_error!(InvalidArgument, "queue_dir exists but permission denied")),
|
||||
_ => Err(s3_error!(InvalidArgument, "failed to access queue_dir: {}", e)),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn validate_cert_key_pair(cert: &Option<String>, key: &Option<String>) -> S3Result<()> {
|
||||
if cert.is_some() != key.is_some() {
|
||||
return Err(s3_error!(InvalidArgument, "client_cert and client_key must be specified as a pair"));
|
||||
retry_with_backoff(
|
||||
|| async { tokio::fs::metadata(queue_dir).await.map(|_| ()) },
|
||||
3,
|
||||
Duration::from_millis(100),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| match e.kind() {
|
||||
ErrorKind::NotFound => s3_error!(InvalidArgument, "queue_dir does not exist"),
|
||||
ErrorKind::PermissionDenied => s3_error!(InvalidArgument, "queue_dir exists but permission denied"),
|
||||
_ => s3_error!(InvalidArgument, "failed to access queue_dir: {}", e),
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Set (create or update) a notification target
|
||||
// --- Operations ---
|
||||
|
||||
pub struct NotificationTarget {}
|
||||
#[async_trait::async_trait]
|
||||
impl Operation for NotificationTarget {
|
||||
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
|
||||
let span = Span::current();
|
||||
let _enter = span.enter();
|
||||
// 1. Analyze query parameters
|
||||
let (target_type, target_name) = extract_target_params(¶ms)?;
|
||||
|
||||
// 2. Permission verification
|
||||
let Some(input_cred) = &req.credentials else {
|
||||
return Err(s3_error!(InvalidRequest, "credentials not found"));
|
||||
};
|
||||
let (_cred, _owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
check_permissions(&req).await?;
|
||||
let ns = get_notification_system()?;
|
||||
|
||||
// 3. Get notification system instance
|
||||
let Some(ns) = rustfs_notify::notification_system() else {
|
||||
return Err(s3_error!(InternalError, "notification system not initialized"));
|
||||
};
|
||||
|
||||
// 4. The parsing request body is KVS (Key-Value Store)
|
||||
let mut input = req.input;
|
||||
let body = input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await.map_err(|e| {
|
||||
let body_bytes = input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await.map_err(|e| {
|
||||
warn!("failed to read request body: {:?}", e);
|
||||
s3_error!(InvalidRequest, "failed to read request body")
|
||||
})?;
|
||||
|
||||
// 1. Get the allowed key range
|
||||
let allowed_keys: std::collections::HashSet<&str> = match target_type {
|
||||
let notification_body: NotificationTargetBody = serde_json::from_slice(&body_bytes)
|
||||
.map_err(|e| s3_error!(InvalidArgument, "invalid json body for target config: {}", e))?;
|
||||
|
||||
let allowed_keys: HashSet<&str> = match target_type {
|
||||
NOTIFY_WEBHOOK_SUB_SYS => rustfs_config::notify::NOTIFY_WEBHOOK_KEYS.iter().cloned().collect(),
|
||||
NOTIFY_MQTT_SUB_SYS => rustfs_config::notify::NOTIFY_MQTT_KEYS.iter().cloned().collect(),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let notification_body: NotificationTargetBody = serde_json::from_slice(&body)
|
||||
.map_err(|e| s3_error!(InvalidArgument, "invalid json body for target config: {}", e))?;
|
||||
let kv_map: HashMap<&str, &str> = notification_body
|
||||
.key_values
|
||||
.iter()
|
||||
.map(|kv| (kv.key.as_str(), kv.value.as_str()))
|
||||
.collect();
|
||||
|
||||
// 2. Filter and verify keys, and splice target_name
|
||||
let mut kvs_vec = Vec::new();
|
||||
let mut endpoint_val = None;
|
||||
let mut queue_dir_val = None;
|
||||
let mut client_cert_val = None;
|
||||
let mut client_key_val = None;
|
||||
let mut qos_val = None;
|
||||
let mut topic_val = String::new();
|
||||
|
||||
for kv in notification_body.key_values.iter() {
|
||||
if !allowed_keys.contains(kv.key.as_str()) {
|
||||
return Err(s3_error!(
|
||||
InvalidArgument,
|
||||
"key '{}' not allowed for target type '{}'",
|
||||
kv.key,
|
||||
target_type
|
||||
));
|
||||
// Validate keys
|
||||
for key in kv_map.keys() {
|
||||
if !allowed_keys.contains(key) {
|
||||
return Err(s3_error!(InvalidArgument, "key '{}' not allowed for target type '{}'", key, target_type));
|
||||
}
|
||||
if kv.key == "endpoint" {
|
||||
endpoint_val = Some(kv.value.clone());
|
||||
}
|
||||
|
||||
if target_type == NOTIFY_MQTT_SUB_SYS {
|
||||
if kv.key == rustfs_config::MQTT_BROKER {
|
||||
endpoint_val = Some(kv.value.clone());
|
||||
}
|
||||
if kv.key == rustfs_config::MQTT_TOPIC {
|
||||
topic_val = kv.value.clone();
|
||||
}
|
||||
}
|
||||
|
||||
if kv.key == "queue_dir" {
|
||||
queue_dir_val = Some(kv.value.clone());
|
||||
}
|
||||
if kv.key == "client_cert" {
|
||||
client_cert_val = Some(kv.value.clone());
|
||||
}
|
||||
if kv.key == "client_key" {
|
||||
client_key_val = Some(kv.value.clone());
|
||||
}
|
||||
if kv.key == "qos" {
|
||||
qos_val = Some(kv.value.clone());
|
||||
}
|
||||
|
||||
kvs_vec.push(rustfs_ecstore::config::KV {
|
||||
key: kv.key.clone(),
|
||||
value: kv.value.clone(),
|
||||
hidden_if_empty: false,
|
||||
});
|
||||
}
|
||||
|
||||
// Type-specific validation
|
||||
if target_type == NOTIFY_WEBHOOK_SUB_SYS {
|
||||
let endpoint = endpoint_val
|
||||
.clone()
|
||||
let endpoint = kv_map
|
||||
.get("endpoint")
|
||||
.ok_or_else(|| s3_error!(InvalidArgument, "endpoint is required"))?;
|
||||
let url = Url::parse(&endpoint).map_err(|e| s3_error!(InvalidArgument, "invalid endpoint url: {}", e))?;
|
||||
let url = Url::parse(endpoint).map_err(|e| s3_error!(InvalidArgument, "invalid endpoint url: {}", e))?;
|
||||
let host = url
|
||||
.host_str()
|
||||
.ok_or_else(|| s3_error!(InvalidArgument, "endpoint missing host"))?;
|
||||
@@ -218,207 +179,147 @@ impl Operation for NotificationTarget {
|
||||
.port_or_known_default()
|
||||
.ok_or_else(|| s3_error!(InvalidArgument, "endpoint missing port"))?;
|
||||
let addr = format!("{host}:{port}");
|
||||
// First, try to parse as SocketAddr (IP:port)
|
||||
if addr.parse::<SocketAddr>().is_err() {
|
||||
// If not an IP:port, try DNS resolution
|
||||
if lookup_host(&addr).await.is_err() {
|
||||
return Err(s3_error!(InvalidArgument, "invalid or unresolvable endpoint address"));
|
||||
}
|
||||
if addr.parse::<SocketAddr>().is_err() && lookup_host(&addr).await.is_err() {
|
||||
return Err(s3_error!(InvalidArgument, "invalid or unresolvable endpoint address"));
|
||||
}
|
||||
if let Some(queue_dir) = queue_dir_val.clone() {
|
||||
validate_queue_dir(&queue_dir).await?;
|
||||
if let Some(queue_dir) = kv_map.get("queue_dir") {
|
||||
validate_queue_dir(queue_dir).await?;
|
||||
}
|
||||
validate_cert_key_pair(&client_cert_val, &client_key_val)?;
|
||||
}
|
||||
if kv_map.contains_key("client_cert") != kv_map.contains_key("client_key") {
|
||||
return Err(s3_error!(InvalidArgument, "client_cert and client_key must be specified as a pair"));
|
||||
}
|
||||
} else if target_type == NOTIFY_MQTT_SUB_SYS {
|
||||
let endpoint = kv_map
|
||||
.get(rustfs_config::MQTT_BROKER)
|
||||
.ok_or_else(|| s3_error!(InvalidArgument, "broker endpoint is required"))?;
|
||||
let topic = kv_map
|
||||
.get(rustfs_config::MQTT_TOPIC)
|
||||
.ok_or_else(|| s3_error!(InvalidArgument, "topic is required"))?;
|
||||
check_mqtt_broker_available(endpoint, topic)
|
||||
.await
|
||||
.map_err(|e| s3_error!(InvalidArgument, "MQTT Broker unavailable: {}", e))?;
|
||||
|
||||
if target_type == NOTIFY_MQTT_SUB_SYS {
|
||||
let endpoint = endpoint_val.ok_or_else(|| s3_error!(InvalidArgument, "broker endpoint is required"))?;
|
||||
if topic_val.is_empty() {
|
||||
return Err(s3_error!(InvalidArgument, "topic is required"));
|
||||
}
|
||||
// Check MQTT Broker availability
|
||||
if let Err(e) = check_mqtt_broker_available(&endpoint, &topic_val).await {
|
||||
return Err(s3_error!(InvalidArgument, "MQTT Broker unavailable: {}", e));
|
||||
}
|
||||
|
||||
if let Some(queue_dir) = queue_dir_val {
|
||||
validate_queue_dir(&queue_dir).await?;
|
||||
if let Some(qos) = qos_val {
|
||||
if let Some(queue_dir) = kv_map.get("queue_dir") {
|
||||
validate_queue_dir(queue_dir).await?;
|
||||
if let Some(qos) = kv_map.get("qos") {
|
||||
match qos.parse::<u8>() {
|
||||
Ok(qos_int) if qos_int == 1 || qos_int == 2 => {}
|
||||
Ok(0) => {
|
||||
return Err(s3_error!(InvalidArgument, "qos should be 1 or 2 if queue_dir is set"));
|
||||
}
|
||||
_ => {
|
||||
return Err(s3_error!(InvalidArgument, "qos must be an integer 0, 1, or 2"));
|
||||
}
|
||||
Ok(1) | Ok(2) => {}
|
||||
Ok(0) => return Err(s3_error!(InvalidArgument, "qos should be 1 or 2 if queue_dir is set")),
|
||||
_ => return Err(s3_error!(InvalidArgument, "qos must be an integer 0, 1, or 2")),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Add ENABLE_KEY
|
||||
let mut kvs_vec: Vec<_> = notification_body
|
||||
.key_values
|
||||
.into_iter()
|
||||
.map(|kv| rustfs_ecstore::config::KV {
|
||||
key: kv.key,
|
||||
value: kv.value,
|
||||
hidden_if_empty: false,
|
||||
})
|
||||
.collect();
|
||||
|
||||
kvs_vec.push(rustfs_ecstore::config::KV {
|
||||
key: ENABLE_KEY.to_string(),
|
||||
value: EnableState::On.to_string(),
|
||||
hidden_if_empty: false,
|
||||
});
|
||||
|
||||
let kvs = rustfs_ecstore::config::KVS(kvs_vec);
|
||||
|
||||
// 5. Call notification system to set target configuration
|
||||
info!("Setting target config for type '{}', name '{}'", target_type, target_name);
|
||||
ns.set_target_config(target_type, target_name, kvs).await.map_err(|e| {
|
||||
error!("failed to set target config: {}", e);
|
||||
S3Error::with_message(S3ErrorCode::InternalError, format!("failed to set target config: {e}"))
|
||||
})?;
|
||||
ns.set_target_config(target_type, target_name, rustfs_ecstore::config::KVS(kvs_vec))
|
||||
.await
|
||||
.map_err(|e| s3_error!(InternalError, "failed to set target config: {}", e))?;
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
if let Some(v) = req.headers.get("x-request-id") {
|
||||
header.insert("x-request-id", v.clone());
|
||||
}
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
Ok(build_response(StatusCode::OK, Body::empty(), req.headers.get("x-request-id")))
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a list of notification targets for all activities
|
||||
pub struct ListNotificationTargets {}
|
||||
#[async_trait::async_trait]
|
||||
impl Operation for ListNotificationTargets {
|
||||
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
|
||||
let span = Span::current();
|
||||
let _enter = span.enter();
|
||||
debug!("ListNotificationTargets call start request params: {:?}", req.uri.query());
|
||||
check_permissions(&req).await?;
|
||||
let ns = get_notification_system()?;
|
||||
|
||||
// 1. Permission verification
|
||||
let Some(input_cred) = &req.credentials else {
|
||||
return Err(s3_error!(InvalidRequest, "credentials not found"));
|
||||
};
|
||||
let (_cred, _owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
let targets = ns.get_target_values().await;
|
||||
let target_count = targets.len();
|
||||
|
||||
// 2. Get notification system instance
|
||||
let Some(ns) = rustfs_notify::notification_system() else {
|
||||
return Err(s3_error!(InternalError, "notification system not initialized"));
|
||||
};
|
||||
let semaphore = Arc::new(Semaphore::new(10));
|
||||
let mut futures = FuturesUnordered::new();
|
||||
|
||||
// 3. Get the list of activity targets
|
||||
let active_targets = ns.get_active_targets().await;
|
||||
|
||||
debug!("ListNotificationTargets call found {} active targets", active_targets.len());
|
||||
let mut notification_endpoints = Vec::new();
|
||||
for target_id in active_targets.iter() {
|
||||
notification_endpoints.push(NotificationEndpoint {
|
||||
account_id: target_id.id.clone(),
|
||||
service: target_id.name.to_string(),
|
||||
status: "online".to_string(),
|
||||
for target in targets {
|
||||
let sem = Arc::clone(&semaphore);
|
||||
futures.push(async move {
|
||||
let _permit = sem.acquire().await;
|
||||
let status = match timeout(Duration::from_secs(3), target.is_active()).await {
|
||||
Ok(Ok(true)) => "online",
|
||||
_ => "offline",
|
||||
};
|
||||
NotificationEndpoint {
|
||||
account_id: target.id().to_string(),
|
||||
service: target.name().to_string(),
|
||||
status: status.to_string(),
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let response = NotificationEndpointsResponse { notification_endpoints };
|
||||
|
||||
// 4. Serialize and return the result
|
||||
let data = serde_json::to_vec(&response).map_err(|e| {
|
||||
error!("Failed to serialize notification targets response: {:?}", response);
|
||||
S3Error::with_message(S3ErrorCode::InternalError, format!("failed to serialize targets: {e}"))
|
||||
})?;
|
||||
debug!("ListNotificationTargets call end, response data length: {}", data.len(),);
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
if let Some(v) = req.headers.get("x-request-id") {
|
||||
header.insert("x-request-id", v.clone());
|
||||
let mut notification_endpoints = Vec::with_capacity(target_count);
|
||||
while let Some(endpoint) = futures.next().await {
|
||||
notification_endpoints.push(endpoint);
|
||||
}
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
|
||||
|
||||
let data = serde_json::to_vec(&NotificationEndpointsResponse { notification_endpoints })
|
||||
.map_err(|e| s3_error!(InternalError, "failed to serialize targets: {}", e))?;
|
||||
|
||||
Ok(build_response(StatusCode::OK, Body::from(data), req.headers.get("x-request-id")))
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a list of notification targets for all activities
|
||||
pub struct ListTargetsArns {}
|
||||
#[async_trait::async_trait]
|
||||
impl Operation for ListTargetsArns {
|
||||
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
|
||||
let span = Span::current();
|
||||
let _enter = span.enter();
|
||||
debug!("ListTargetsArns call start request params: {:?}", req.uri.query());
|
||||
check_permissions(&req).await?;
|
||||
let ns = get_notification_system()?;
|
||||
|
||||
// 1. Permission verification
|
||||
let Some(input_cred) = &req.credentials else {
|
||||
return Err(s3_error!(InvalidRequest, "credentials not found"));
|
||||
};
|
||||
let (_cred, _owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
// 2. Get notification system instance
|
||||
let Some(ns) = rustfs_notify::notification_system() else {
|
||||
return Err(s3_error!(InternalError, "notification system not initialized"));
|
||||
};
|
||||
|
||||
// 3. Get the list of activity targets
|
||||
let active_targets = ns.get_active_targets().await;
|
||||
let region = req
|
||||
.region
|
||||
.clone()
|
||||
.ok_or_else(|| s3_error!(InvalidRequest, "region not found"))?;
|
||||
|
||||
debug!("ListTargetsArns call found {} active targets", active_targets.len());
|
||||
let data_target_arn_list: Vec<_> = active_targets.iter().map(|id| id.to_arn(®ion).to_string()).collect();
|
||||
|
||||
let region = match req.region.clone() {
|
||||
Some(region) => region,
|
||||
None => return Err(s3_error!(InvalidRequest, "region not found")),
|
||||
};
|
||||
let mut data_target_arn_list = Vec::new();
|
||||
|
||||
for target_id in active_targets.iter() {
|
||||
data_target_arn_list.push(target_id.to_arn(®ion).to_string());
|
||||
}
|
||||
|
||||
// 4. Serialize and return the result
|
||||
let data = serde_json::to_vec(&data_target_arn_list)
|
||||
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("failed to serialize targets: {e}")))?;
|
||||
debug!("ListTargetsArns call end, response data length: {}", data.len(),);
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
if let Some(v) = req.headers.get("x-request-id") {
|
||||
header.insert("x-request-id", v.clone());
|
||||
}
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
|
||||
.map_err(|e| s3_error!(InternalError, "failed to serialize targets: {}", e))?;
|
||||
|
||||
Ok(build_response(StatusCode::OK, Body::from(data), req.headers.get("x-request-id")))
|
||||
}
|
||||
}
|
||||
|
||||
/// Delete a specified notification target
|
||||
pub struct RemoveNotificationTarget {}
|
||||
#[async_trait::async_trait]
|
||||
impl Operation for RemoveNotificationTarget {
|
||||
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
|
||||
let span = Span::current();
|
||||
let _enter = span.enter();
|
||||
// 1. Analyze query parameters
|
||||
let (target_type, target_name) = extract_target_params(¶ms)?;
|
||||
|
||||
// 2. Permission verification
|
||||
let Some(input_cred) = &req.credentials else {
|
||||
return Err(s3_error!(InvalidRequest, "credentials not found"));
|
||||
};
|
||||
let (_cred, _owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
check_permissions(&req).await?;
|
||||
let ns = get_notification_system()?;
|
||||
|
||||
// 3. Get notification system instance
|
||||
let Some(ns) = rustfs_notify::notification_system() else {
|
||||
return Err(s3_error!(InternalError, "notification system not initialized"));
|
||||
};
|
||||
|
||||
// 4. Call notification system to remove target configuration
|
||||
info!("Removing target config for type '{}', name '{}'", target_type, target_name);
|
||||
ns.remove_target_config(target_type, target_name).await.map_err(|e| {
|
||||
error!("failed to remove target config: {}", e);
|
||||
S3Error::with_message(S3ErrorCode::InternalError, format!("failed to remove target config: {e}"))
|
||||
})?;
|
||||
ns.remove_target_config(target_type, target_name)
|
||||
.await
|
||||
.map_err(|e| s3_error!(InternalError, "failed to remove target config: {}", e))?;
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
if let Some(v) = req.headers.get("x-request-id") {
|
||||
header.insert("x-request-id", v.clone());
|
||||
}
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
Ok(build_response(StatusCode::OK, Body::empty(), req.headers.get("x-request-id")))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -433,7 +334,6 @@ fn extract_target_params<'a>(params: &'a Params<'_, '_>) -> S3Result<(&'a str, &
|
||||
if target_type != NOTIFY_WEBHOOK_SUB_SYS && target_type != NOTIFY_MQTT_SUB_SYS {
|
||||
return Err(s3_error!(InvalidArgument, "unsupported target type: '{}'", target_type));
|
||||
}
|
||||
|
||||
let target_name = extract_param(params, "target_name")?;
|
||||
Ok((target_type, target_name))
|
||||
}
|
||||
|
||||
@@ -84,6 +84,7 @@ where
|
||||
{
|
||||
fn is_match(&self, method: &Method, uri: &Uri, headers: &HeaderMap, _: &mut Extensions) -> bool {
|
||||
let path = uri.path();
|
||||
|
||||
// Profiling endpoints
|
||||
if method == Method::GET && (path == PROFILE_CPU_PATH || path == PROFILE_MEMORY_PATH) {
|
||||
return true;
|
||||
@@ -150,6 +151,8 @@ where
|
||||
}
|
||||
|
||||
async fn call(&self, req: S3Request<Body>) -> S3Result<S3Response<Body>> {
|
||||
// Console requests should be handled by console router first (including OPTIONS)
|
||||
// Console has its own CORS layer configured
|
||||
if self.console_enabled && is_console_path(req.uri.path()) {
|
||||
if let Some(console_router) = &self.console_router {
|
||||
let mut console_router = console_router.clone();
|
||||
@@ -164,11 +167,14 @@ where
|
||||
}
|
||||
|
||||
let uri = format!("{}|{}", &req.method, req.uri.path());
|
||||
|
||||
if let Ok(mat) = self.router.at(&uri) {
|
||||
let op: &T = mat.value;
|
||||
let mut resp = op.call(req, mat.params).await?;
|
||||
resp.status = Some(resp.output.0);
|
||||
return Ok(resp.map_output(|x| x.1));
|
||||
let response = resp.map_output(|x| x.1);
|
||||
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
Err(s3_error!(NotImplemented))
|
||||
|
||||
40
rustfs/src/server/cors.rs
Normal file
40
rustfs/src/server/cors.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! CORS (Cross-Origin Resource Sharing) header name constants.
|
||||
//!
|
||||
//! This module provides centralized constants for CORS-related HTTP header names.
|
||||
//! The http crate doesn't provide pre-defined constants for CORS headers,
|
||||
//! so we define them here for type safety and maintainability.
|
||||
|
||||
/// CORS response header names
|
||||
pub mod response {
|
||||
pub const ACCESS_CONTROL_ALLOW_ORIGIN: &str = "access-control-allow-origin";
|
||||
pub const ACCESS_CONTROL_ALLOW_METHODS: &str = "access-control-allow-methods";
|
||||
pub const ACCESS_CONTROL_ALLOW_HEADERS: &str = "access-control-allow-headers";
|
||||
pub const ACCESS_CONTROL_EXPOSE_HEADERS: &str = "access-control-expose-headers";
|
||||
pub const ACCESS_CONTROL_ALLOW_CREDENTIALS: &str = "access-control-allow-credentials";
|
||||
pub const ACCESS_CONTROL_MAX_AGE: &str = "access-control-max-age";
|
||||
}
|
||||
|
||||
/// CORS request header names
|
||||
pub mod request {
|
||||
pub const ACCESS_CONTROL_REQUEST_METHOD: &str = "access-control-request-method";
|
||||
pub const ACCESS_CONTROL_REQUEST_HEADERS: &str = "access-control-request-headers";
|
||||
}
|
||||
|
||||
/// Standard HTTP header names used in CORS processing
|
||||
pub mod standard {
|
||||
pub use http::header::{ORIGIN, VARY};
|
||||
}
|
||||
@@ -17,7 +17,11 @@ use super::compress::{CompressionConfig, CompressionPredicate};
|
||||
use crate::admin;
|
||||
use crate::auth::IAMAuth;
|
||||
use crate::config;
|
||||
use crate::server::{ReadinessGateLayer, RemoteAddr, ServiceState, ServiceStateManager, hybrid::hybrid, layer::RedirectLayer};
|
||||
use crate::server::{
|
||||
ReadinessGateLayer, RemoteAddr, ServiceState, ServiceStateManager,
|
||||
hybrid::hybrid,
|
||||
layer::{ConditionalCorsLayer, RedirectLayer},
|
||||
};
|
||||
use crate::storage;
|
||||
use crate::storage::tonic_service::make_server;
|
||||
use bytes::Bytes;
|
||||
@@ -30,9 +34,6 @@ use hyper_util::{
|
||||
};
|
||||
use metrics::{counter, histogram};
|
||||
use rustfs_common::GlobalReadiness;
|
||||
#[cfg(not(target_os = "openbsd"))]
|
||||
use rustfs_config::{MI_B, RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
#[cfg(target_os = "openbsd")]
|
||||
use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
use rustfs_ecstore::rpc::{TONIC_RPC_PREFIX, verify_rpc_signature};
|
||||
use rustfs_protos::proto_gen::node_service::node_service_server::NodeServiceServer;
|
||||
@@ -51,70 +52,10 @@ use tower::ServiceBuilder;
|
||||
use tower_http::add_extension::AddExtensionLayer;
|
||||
use tower_http::catch_panic::CatchPanicLayer;
|
||||
use tower_http::compression::CompressionLayer;
|
||||
use tower_http::cors::{AllowOrigin, Any, CorsLayer};
|
||||
use tower_http::request_id::{MakeRequestUuid, PropagateRequestIdLayer, SetRequestIdLayer};
|
||||
use tower_http::trace::TraceLayer;
|
||||
use tracing::{Span, debug, error, info, instrument, warn};
|
||||
|
||||
/// Parse CORS allowed origins from configuration
|
||||
fn parse_cors_origins(origins: Option<&String>) -> CorsLayer {
|
||||
use http::Method;
|
||||
|
||||
let cors_layer = CorsLayer::new()
|
||||
.allow_methods([
|
||||
Method::GET,
|
||||
Method::POST,
|
||||
Method::PUT,
|
||||
Method::DELETE,
|
||||
Method::HEAD,
|
||||
Method::OPTIONS,
|
||||
])
|
||||
.allow_headers(Any);
|
||||
|
||||
match origins {
|
||||
Some(origins_str) if origins_str == "*" => cors_layer.allow_origin(Any).expose_headers(Any),
|
||||
Some(origins_str) => {
|
||||
let origins: Vec<&str> = origins_str.split(',').map(|s| s.trim()).collect();
|
||||
if origins.is_empty() {
|
||||
warn!("Empty CORS origins provided, using permissive CORS");
|
||||
cors_layer.allow_origin(Any).expose_headers(Any)
|
||||
} else {
|
||||
// Parse origins with proper error handling
|
||||
let mut valid_origins = Vec::new();
|
||||
for origin in origins {
|
||||
match origin.parse::<http::HeaderValue>() {
|
||||
Ok(header_value) => {
|
||||
valid_origins.push(header_value);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Invalid CORS origin '{}': {}", origin, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if valid_origins.is_empty() {
|
||||
warn!("No valid CORS origins found, using permissive CORS");
|
||||
cors_layer.allow_origin(Any).expose_headers(Any)
|
||||
} else {
|
||||
info!("Endpoint CORS origins configured: {:?}", valid_origins);
|
||||
cors_layer.allow_origin(AllowOrigin::list(valid_origins)).expose_headers(Any)
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
debug!("No CORS origins configured for endpoint, using permissive CORS");
|
||||
cors_layer.allow_origin(Any).expose_headers(Any)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_cors_allowed_origins() -> String {
|
||||
std::env::var(rustfs_config::ENV_CORS_ALLOWED_ORIGINS)
|
||||
.unwrap_or_else(|_| rustfs_config::DEFAULT_CORS_ALLOWED_ORIGINS.to_string())
|
||||
.parse::<String>()
|
||||
.unwrap_or(rustfs_config::DEFAULT_CONSOLE_CORS_ALLOWED_ORIGINS.to_string())
|
||||
}
|
||||
|
||||
pub async fn start_http_server(
|
||||
opt: &config::Opt,
|
||||
worker_state_manager: ServiceStateManager,
|
||||
@@ -276,14 +217,6 @@ pub async fn start_http_server(
|
||||
let (shutdown_tx, mut shutdown_rx) = tokio::sync::broadcast::channel(1);
|
||||
let shutdown_tx_clone = shutdown_tx.clone();
|
||||
|
||||
// Capture CORS configuration for the server loop
|
||||
let cors_allowed_origins = get_cors_allowed_origins();
|
||||
let cors_allowed_origins = if cors_allowed_origins.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(cors_allowed_origins)
|
||||
};
|
||||
|
||||
// Create compression configuration from environment variables
|
||||
let compression_config = CompressionConfig::from_env();
|
||||
if compression_config.enabled {
|
||||
@@ -297,8 +230,10 @@ pub async fn start_http_server(
|
||||
|
||||
let is_console = opt.console_enable;
|
||||
tokio::spawn(async move {
|
||||
// Create CORS layer inside the server loop closure
|
||||
let cors_layer = parse_cors_origins(cors_allowed_origins.as_ref());
|
||||
// Note: CORS layer is removed from global middleware stack
|
||||
// - S3 API CORS is handled by bucket-level CORS configuration in apply_cors_headers()
|
||||
// - Console CORS is handled by its own cors_layer in setup_console_middleware_stack()
|
||||
// This ensures S3 API CORS behavior matches AWS S3 specification
|
||||
|
||||
#[cfg(unix)]
|
||||
let (mut sigterm_inner, mut sigint_inner) = {
|
||||
@@ -379,11 +314,14 @@ pub async fn start_http_server(
|
||||
|
||||
// Enable TCP Keepalive to detect dead clients (e.g. power loss)
|
||||
// Idle: 10s, Interval: 5s, Retries: 3
|
||||
let mut ka = TcpKeepalive::new().with_time(Duration::from_secs(10));
|
||||
#[cfg(target_os = "openbsd")]
|
||||
let ka = TcpKeepalive::new().with_time(Duration::from_secs(10));
|
||||
|
||||
#[cfg(not(target_os = "openbsd"))]
|
||||
{
|
||||
ka = ka.with_interval(Duration::from_secs(5)).with_retries(3);
|
||||
}
|
||||
let ka = TcpKeepalive::new()
|
||||
.with_time(Duration::from_secs(10))
|
||||
.with_interval(Duration::from_secs(5))
|
||||
.with_retries(3);
|
||||
|
||||
if let Err(err) = socket_ref.set_tcp_keepalive(&ka) {
|
||||
warn!(?err, "Failed to set TCP_KEEPALIVE");
|
||||
@@ -392,19 +330,19 @@ pub async fn start_http_server(
|
||||
if let Err(err) = socket_ref.set_tcp_nodelay(true) {
|
||||
warn!(?err, "Failed to set TCP_NODELAY");
|
||||
}
|
||||
#[cfg(not(any(target_os = "openbsd")))]
|
||||
if let Err(err) = socket_ref.set_recv_buffer_size(4 * MI_B) {
|
||||
|
||||
#[cfg(not(target_os = "openbsd"))]
|
||||
if let Err(err) = socket_ref.set_recv_buffer_size(4 * rustfs_config::MI_B) {
|
||||
warn!(?err, "Failed to set set_recv_buffer_size");
|
||||
}
|
||||
#[cfg(not(any(target_os = "openbsd")))]
|
||||
if let Err(err) = socket_ref.set_send_buffer_size(4 * MI_B) {
|
||||
#[cfg(not(target_os = "openbsd"))]
|
||||
if let Err(err) = socket_ref.set_send_buffer_size(4 * rustfs_config::MI_B) {
|
||||
warn!(?err, "Failed to set set_send_buffer_size");
|
||||
}
|
||||
|
||||
let connection_ctx = ConnectionContext {
|
||||
http_server: http_server.clone(),
|
||||
s3_service: s3_service.clone(),
|
||||
cors_layer: cors_layer.clone(),
|
||||
compression_config: compression_config.clone(),
|
||||
is_console,
|
||||
readiness: readiness.clone(),
|
||||
@@ -520,7 +458,6 @@ async fn setup_tls_acceptor(tls_path: &str) -> Result<Option<TlsAcceptor>> {
|
||||
struct ConnectionContext {
|
||||
http_server: Arc<ConnBuilder<TokioExecutor>>,
|
||||
s3_service: S3Service,
|
||||
cors_layer: CorsLayer,
|
||||
compression_config: CompressionConfig,
|
||||
is_console: bool,
|
||||
readiness: Arc<GlobalReadiness>,
|
||||
@@ -545,7 +482,6 @@ fn process_connection(
|
||||
let ConnectionContext {
|
||||
http_server,
|
||||
s3_service,
|
||||
cors_layer,
|
||||
compression_config,
|
||||
is_console,
|
||||
readiness,
|
||||
@@ -559,7 +495,7 @@ fn process_connection(
|
||||
let remote_addr = match socket.peer_addr() {
|
||||
Ok(addr) => Some(RemoteAddr(addr)),
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
warn!(
|
||||
error = %e,
|
||||
"Failed to obtain peer address; policy evaluation may fall back to a default source IP"
|
||||
);
|
||||
@@ -628,10 +564,15 @@ fn process_connection(
|
||||
}),
|
||||
)
|
||||
.layer(PropagateRequestIdLayer::x_request_id())
|
||||
.layer(cors_layer)
|
||||
// Compress responses based on whitelist configuration
|
||||
// Only compresses when enabled and matches configured extensions/MIME types
|
||||
.layer(CompressionLayer::new().compress_when(CompressionPredicate::new(compression_config)))
|
||||
// Conditional CORS layer: only applies to S3 API requests (not Admin, not Console)
|
||||
// Admin has its own CORS handling in router.rs
|
||||
// Console has its own CORS layer in setup_console_middleware_stack()
|
||||
// S3 API uses this system default CORS (RUSTFS_CORS_ALLOWED_ORIGINS)
|
||||
// Bucket-level CORS takes precedence when configured (handled in router.rs for OPTIONS, and in ecfs.rs for actual requests)
|
||||
.layer(ConditionalCorsLayer::new())
|
||||
.option_layer(if is_console { Some(RedirectLayer) } else { None })
|
||||
.service(service);
|
||||
|
||||
@@ -752,17 +693,18 @@ fn get_listen_backlog() -> i32 {
|
||||
}
|
||||
|
||||
// For macOS and BSD variants use the syscall way of getting the connection queue length.
|
||||
#[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))]
|
||||
// NetBSD has no somaxconn-like kernel state.
|
||||
#[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "openbsd"))]
|
||||
#[allow(unsafe_code)]
|
||||
fn get_listen_backlog() -> i32 {
|
||||
const DEFAULT_BACKLOG: i32 = 1024;
|
||||
|
||||
#[cfg(target_os = "openbsd")]
|
||||
let mut name = [libc::CTL_KERN, libc::KERN_SOMAXCONN];
|
||||
#[cfg(any(target_os = "netbsd", target_os = "macos", target_os = "freebsd"))]
|
||||
#[cfg(any(target_os = "macos", target_os = "freebsd"))]
|
||||
let mut name = [libc::CTL_KERN, libc::KERN_IPC, libc::KIPC_SOMAXCONN];
|
||||
let mut buf = [0; 1];
|
||||
let mut buf_len = std::mem::size_of_val(&buf);
|
||||
let mut buf_len = size_of_val(&buf);
|
||||
|
||||
if unsafe {
|
||||
libc::sysctl(
|
||||
@@ -781,14 +723,8 @@ fn get_listen_backlog() -> i32 {
|
||||
buf[0]
|
||||
}
|
||||
|
||||
// Fallback for Windows and other operating systems
|
||||
#[cfg(not(any(
|
||||
target_os = "linux",
|
||||
target_os = "macos",
|
||||
target_os = "freebsd",
|
||||
target_os = "netbsd",
|
||||
target_os = "openbsd"
|
||||
)))]
|
||||
// Fallback for Windows, NetBSD and other operating systems.
|
||||
#[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "freebsd", target_os = "openbsd")))]
|
||||
fn get_listen_backlog() -> i32 {
|
||||
const DEFAULT_BACKLOG: i32 = 1024;
|
||||
DEFAULT_BACKLOG
|
||||
|
||||
@@ -12,14 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::admin::console::is_console_path;
|
||||
use crate::server::cors;
|
||||
use crate::server::hybrid::HybridBody;
|
||||
use http::{Request as HttpRequest, Response, StatusCode};
|
||||
use crate::server::{ADMIN_PREFIX, RPC_PREFIX};
|
||||
use crate::storage::ecfs;
|
||||
use http::{HeaderMap, HeaderValue, Method, Request as HttpRequest, Response, StatusCode};
|
||||
use hyper::body::Incoming;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use tower::{Layer, Service};
|
||||
use tracing::debug;
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// Redirect layer that redirects browser requests to the console
|
||||
#[derive(Clone)]
|
||||
@@ -89,3 +94,173 @@ where
|
||||
Box::pin(async move { inner.call(req).await.map_err(Into::into) })
|
||||
}
|
||||
}
|
||||
|
||||
/// Conditional CORS layer that only applies to S3 API requests
|
||||
/// (not Admin, not Console, not RPC)
|
||||
#[derive(Clone)]
|
||||
pub struct ConditionalCorsLayer {
|
||||
cors_origins: Option<String>,
|
||||
}
|
||||
|
||||
impl ConditionalCorsLayer {
|
||||
pub fn new() -> Self {
|
||||
let cors_origins = std::env::var("RUSTFS_CORS_ALLOWED_ORIGINS").ok().filter(|s| !s.is_empty());
|
||||
Self { cors_origins }
|
||||
}
|
||||
|
||||
/// Exact paths that should be excluded from being treated as S3 paths.
|
||||
const EXCLUDED_EXACT_PATHS: &'static [&'static str] = &["/health", "/profile/cpu", "/profile/memory"];
|
||||
|
||||
fn is_s3_path(path: &str) -> bool {
|
||||
// Exclude Admin, Console, RPC, and configured special paths
|
||||
!path.starts_with(ADMIN_PREFIX)
|
||||
&& !path.starts_with(RPC_PREFIX)
|
||||
&& !is_console_path(path)
|
||||
&& !Self::EXCLUDED_EXACT_PATHS.contains(&path)
|
||||
}
|
||||
|
||||
fn apply_cors_headers(&self, request_headers: &HeaderMap, response_headers: &mut HeaderMap) {
|
||||
let origin = request_headers
|
||||
.get(cors::standard::ORIGIN)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let allowed_origin = match (origin, &self.cors_origins) {
|
||||
(Some(orig), Some(config)) if config == "*" => Some(orig),
|
||||
(Some(orig), Some(config)) => {
|
||||
let origins: Vec<&str> = config.split(',').map(|s| s.trim()).collect();
|
||||
if origins.contains(&orig.as_str()) { Some(orig) } else { None }
|
||||
}
|
||||
(Some(orig), None) => Some(orig), // Default: allow all if not configured
|
||||
_ => None,
|
||||
};
|
||||
|
||||
// Track whether we're using a specific origin (not wildcard)
|
||||
let using_specific_origin = if let Some(origin) = &allowed_origin {
|
||||
if let Ok(header_value) = HeaderValue::from_str(origin) {
|
||||
response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_ORIGIN, header_value);
|
||||
true // Using specific origin, credentials allowed
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// Allow all methods by default (S3-compatible set)
|
||||
response_headers.insert(
|
||||
cors::response::ACCESS_CONTROL_ALLOW_METHODS,
|
||||
HeaderValue::from_static("GET, POST, PUT, DELETE, OPTIONS, HEAD"),
|
||||
);
|
||||
|
||||
// Allow all headers by default
|
||||
response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_HEADERS, HeaderValue::from_static("*"));
|
||||
|
||||
// Expose common headers
|
||||
response_headers.insert(
|
||||
cors::response::ACCESS_CONTROL_EXPOSE_HEADERS,
|
||||
HeaderValue::from_static("x-request-id, content-type, content-length, etag"),
|
||||
);
|
||||
|
||||
// Only set credentials when using a specific origin (not wildcard)
|
||||
// CORS spec: credentials cannot be used with wildcard origins
|
||||
if using_specific_origin {
|
||||
response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_CREDENTIALS, HeaderValue::from_static("true"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ConditionalCorsLayer {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Layer<S> for ConditionalCorsLayer {
|
||||
type Service = ConditionalCorsService<S>;
|
||||
|
||||
fn layer(&self, inner: S) -> Self::Service {
|
||||
ConditionalCorsService {
|
||||
inner,
|
||||
cors_origins: Arc::new(self.cors_origins.clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Service implementation for conditional CORS
|
||||
#[derive(Clone)]
|
||||
pub struct ConditionalCorsService<S> {
|
||||
inner: S,
|
||||
cors_origins: Arc<Option<String>>,
|
||||
}
|
||||
|
||||
impl<S, ResBody> Service<HttpRequest<Incoming>> for ConditionalCorsService<S>
|
||||
where
|
||||
S: Service<HttpRequest<Incoming>, Response = Response<ResBody>> + Clone + Send + 'static,
|
||||
S::Future: Send + 'static,
|
||||
S::Error: Into<Box<dyn std::error::Error + Send + Sync>> + Send + 'static,
|
||||
ResBody: Default + Send + 'static,
|
||||
{
|
||||
type Response = Response<ResBody>;
|
||||
type Error = Box<dyn std::error::Error + Send + Sync>;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.inner.poll_ready(cx).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: HttpRequest<Incoming>) -> Self::Future {
|
||||
let path = req.uri().path().to_string();
|
||||
let method = req.method().clone();
|
||||
let request_headers = req.headers().clone();
|
||||
let cors_origins = self.cors_origins.clone();
|
||||
// Handle OPTIONS preflight requests - return response directly without calling handler
|
||||
if method == Method::OPTIONS && request_headers.contains_key(cors::standard::ORIGIN) {
|
||||
info!("OPTIONS preflight request for path: {}", path);
|
||||
|
||||
let path_trimmed = path.trim_start_matches('/');
|
||||
let bucket = path_trimmed.split('/').next().unwrap_or("").to_string(); // virtual host style?
|
||||
let method_clone = method.clone();
|
||||
let request_headers_clone = request_headers.clone();
|
||||
|
||||
return Box::pin(async move {
|
||||
let mut response = Response::builder().status(StatusCode::OK).body(ResBody::default()).unwrap();
|
||||
|
||||
if ConditionalCorsLayer::is_s3_path(&path)
|
||||
&& !bucket.is_empty()
|
||||
&& cors_origins.is_some()
|
||||
&& let Some(cors_headers) = ecfs::apply_cors_headers(&bucket, &method_clone, &request_headers_clone).await
|
||||
{
|
||||
for (key, value) in cors_headers.iter() {
|
||||
response.headers_mut().insert(key, value.clone());
|
||||
}
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
let cors_layer = ConditionalCorsLayer {
|
||||
cors_origins: (*cors_origins).clone(),
|
||||
};
|
||||
cors_layer.apply_cors_headers(&request_headers_clone, response.headers_mut());
|
||||
|
||||
Ok(response)
|
||||
});
|
||||
}
|
||||
|
||||
let mut inner = self.inner.clone();
|
||||
Box::pin(async move {
|
||||
let mut response = inner.call(req).await.map_err(Into::into)?;
|
||||
|
||||
// Apply CORS headers only to S3 API requests (non-OPTIONS)
|
||||
if request_headers.contains_key(cors::standard::ORIGIN)
|
||||
&& !response.headers().contains_key(cors::response::ACCESS_CONTROL_ALLOW_ORIGIN)
|
||||
{
|
||||
let cors_layer = ConditionalCorsLayer {
|
||||
cors_origins: (*cors_origins).clone(),
|
||||
};
|
||||
cors_layer.apply_cors_headers(&request_headers, response.headers_mut());
|
||||
}
|
||||
|
||||
Ok(response)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
mod audit;
|
||||
mod cert;
|
||||
mod compress;
|
||||
pub mod cors;
|
||||
mod event;
|
||||
mod http;
|
||||
mod hybrid;
|
||||
|
||||
@@ -342,7 +342,7 @@ impl S3Access for FS {
|
||||
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
|
||||
req_info.bucket = Some(req.input.bucket.clone());
|
||||
|
||||
authorize_request(req, Action::S3Action(S3Action::PutBucketCorsAction)).await
|
||||
authorize_request(req, Action::S3Action(S3Action::DeleteBucketCorsAction)).await
|
||||
}
|
||||
|
||||
/// Checks whether the DeleteBucketEncryption request has accesses to the resources.
|
||||
|
||||
@@ -18,6 +18,7 @@ use crate::config::workload_profiles::{
|
||||
};
|
||||
use crate::error::ApiError;
|
||||
use crate::server::RemoteAddr;
|
||||
use crate::server::cors;
|
||||
use crate::storage::concurrency::{
|
||||
CachedGetObject, ConcurrencyManager, GetObjectGuard, get_concurrency_aware_buffer_size, get_concurrency_manager,
|
||||
};
|
||||
@@ -39,7 +40,7 @@ use datafusion::arrow::{
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use metrics::counter;
|
||||
use metrics::{counter, histogram};
|
||||
use rustfs_ecstore::bucket::quota::checker::QuotaChecker;
|
||||
use rustfs_ecstore::{
|
||||
bucket::{
|
||||
@@ -48,8 +49,8 @@ use rustfs_ecstore::{
|
||||
lifecycle::{self, Lifecycle, TransitionOptions},
|
||||
},
|
||||
metadata::{
|
||||
BUCKET_LIFECYCLE_CONFIG, BUCKET_NOTIFICATION_CONFIG, BUCKET_POLICY_CONFIG, BUCKET_REPLICATION_CONFIG,
|
||||
BUCKET_SSECONFIG, BUCKET_TAGGING_CONFIG, BUCKET_VERSIONING_CONFIG, OBJECT_LOCK_CONFIG,
|
||||
BUCKET_CORS_CONFIG, BUCKET_LIFECYCLE_CONFIG, BUCKET_NOTIFICATION_CONFIG, BUCKET_POLICY_CONFIG,
|
||||
BUCKET_REPLICATION_CONFIG, BUCKET_SSECONFIG, BUCKET_TAGGING_CONFIG, BUCKET_VERSIONING_CONFIG, OBJECT_LOCK_CONFIG,
|
||||
},
|
||||
metadata_sys,
|
||||
metadata_sys::get_replication_config,
|
||||
@@ -116,10 +117,9 @@ use rustfs_utils::{
|
||||
AMZ_BUCKET_REPLICATION_STATUS, AMZ_CHECKSUM_MODE, AMZ_CHECKSUM_TYPE,
|
||||
headers::{
|
||||
AMZ_DECODED_CONTENT_LENGTH, AMZ_OBJECT_TAGGING, AMZ_RESTORE_EXPIRY_DAYS, AMZ_RESTORE_REQUEST_DATE,
|
||||
RESERVED_METADATA_PREFIX_LOWER,
|
||||
RESERVED_METADATA_PREFIX, RESERVED_METADATA_PREFIX_LOWER,
|
||||
},
|
||||
},
|
||||
obj::extract_user_defined_metadata,
|
||||
path::{is_dir_object, path_join_buf},
|
||||
};
|
||||
use rustfs_zip::CompressionFormat;
|
||||
@@ -782,6 +782,25 @@ impl FS {
|
||||
let _ = helper.complete(&result);
|
||||
result
|
||||
}
|
||||
|
||||
/// Auxiliary functions: parse version ID
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `version_id` - An optional string representing the version ID to be parsed.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `S3Result<Option<Uuid>>` - A result containing an optional UUID if parsing is successful, or an S3 error if parsing fails.
|
||||
fn parse_version_id(&self, version_id: Option<String>) -> S3Result<Option<Uuid>> {
|
||||
if let Some(vid) = version_id {
|
||||
let uuid = Uuid::parse_str(&vid).map_err(|e| {
|
||||
error!("Invalid version ID: {}", e);
|
||||
s3_error!(InvalidArgument, "Invalid version ID")
|
||||
})?;
|
||||
Ok(Some(uuid))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to get store and validate bucket exists
|
||||
@@ -799,6 +818,205 @@ async fn get_validated_store(bucket: &str) -> S3Result<Arc<rustfs_ecstore::store
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
/// Quick check if CORS processing is needed (lightweight check for Origin header)
|
||||
/// This avoids unnecessary function calls for non-CORS requests
|
||||
#[inline]
|
||||
fn needs_cors_processing(headers: &HeaderMap) -> bool {
|
||||
headers.contains_key(cors::standard::ORIGIN)
|
||||
}
|
||||
|
||||
/// Apply CORS headers to response based on bucket CORS configuration and request origin
|
||||
///
|
||||
/// This function:
|
||||
/// 1. Reads the Origin header from the request
|
||||
/// 2. Retrieves the bucket's CORS configuration
|
||||
/// 3. Matches the origin against CORS rules
|
||||
/// 4. Validates AllowedHeaders if request headers are present
|
||||
/// 5. Returns headers to add to the response if a match is found
|
||||
///
|
||||
/// Note: This function should only be called if `needs_cors_processing()` returns true
|
||||
/// to avoid unnecessary overhead for non-CORS requests.
|
||||
pub(crate) async fn apply_cors_headers(bucket: &str, method: &http::Method, headers: &HeaderMap) -> Option<HeaderMap> {
|
||||
use http::HeaderValue;
|
||||
|
||||
// Get Origin header from request
|
||||
let origin = headers.get(cors::standard::ORIGIN)?.to_str().ok()?;
|
||||
|
||||
// Get CORS configuration for the bucket
|
||||
let cors_config = match metadata_sys::get_cors_config(bucket).await {
|
||||
Ok((config, _)) => config,
|
||||
Err(_) => return None, // No CORS config, no headers to add
|
||||
};
|
||||
|
||||
// Early return if no CORS rules configured
|
||||
if cors_config.cors_rules.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Check if method is supported and get its string representation
|
||||
const SUPPORTED_METHODS: &[&str] = &["GET", "PUT", "POST", "DELETE", "HEAD", "OPTIONS"];
|
||||
let method_str = method.as_str();
|
||||
if !SUPPORTED_METHODS.contains(&method_str) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// For OPTIONS (preflight) requests, check Access-Control-Request-Method
|
||||
let is_preflight = method == http::Method::OPTIONS;
|
||||
let requested_method = if is_preflight {
|
||||
headers
|
||||
.get(cors::request::ACCESS_CONTROL_REQUEST_METHOD)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or(method_str)
|
||||
} else {
|
||||
method_str
|
||||
};
|
||||
|
||||
// Get requested headers from preflight request
|
||||
let requested_headers = if is_preflight {
|
||||
headers
|
||||
.get(cors::request::ACCESS_CONTROL_REQUEST_HEADERS)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|h| h.split(',').map(|s| s.trim().to_lowercase()).collect::<Vec<_>>())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Find matching CORS rule
|
||||
for rule in cors_config.cors_rules.iter() {
|
||||
// Check if origin matches
|
||||
let origin_matches = rule.allowed_origins.iter().any(|allowed_origin| {
|
||||
if allowed_origin == "*" {
|
||||
true
|
||||
} else {
|
||||
// Exact match or pattern match (support wildcards like https://*.example.com)
|
||||
allowed_origin == origin || matches_origin_pattern(allowed_origin, origin)
|
||||
}
|
||||
});
|
||||
|
||||
if !origin_matches {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if method is allowed
|
||||
let method_allowed = rule
|
||||
.allowed_methods
|
||||
.iter()
|
||||
.any(|allowed_method| allowed_method.as_str() == requested_method);
|
||||
|
||||
if !method_allowed {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate AllowedHeaders if present in the request
|
||||
if let Some(ref req_headers) = requested_headers {
|
||||
if let Some(ref allowed_headers) = rule.allowed_headers {
|
||||
// Check if all requested headers are allowed
|
||||
let all_headers_allowed = req_headers.iter().all(|req_header| {
|
||||
allowed_headers.iter().any(|allowed_header| {
|
||||
let allowed_lower = allowed_header.to_lowercase();
|
||||
// "*" allows all headers, or exact match
|
||||
allowed_lower == "*" || allowed_lower == *req_header
|
||||
})
|
||||
});
|
||||
|
||||
if !all_headers_allowed {
|
||||
// If not all headers are allowed, skip this rule
|
||||
continue;
|
||||
}
|
||||
} else if !req_headers.is_empty() {
|
||||
// If no AllowedHeaders specified but headers were requested, skip this rule
|
||||
// Unless the rule explicitly allows all headers
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Found matching rule, build response headers
|
||||
let mut response_headers = HeaderMap::new();
|
||||
|
||||
// Access-Control-Allow-Origin
|
||||
// If origin is "*", use "*", otherwise echo back the origin
|
||||
let has_wildcard_origin = rule.allowed_origins.iter().any(|o| o == "*");
|
||||
if has_wildcard_origin {
|
||||
response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_ORIGIN, HeaderValue::from_static("*"));
|
||||
} else if let Ok(origin_value) = HeaderValue::from_str(origin) {
|
||||
response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_ORIGIN, origin_value);
|
||||
}
|
||||
|
||||
// Vary: Origin (required for caching, except when using wildcard)
|
||||
if !has_wildcard_origin {
|
||||
response_headers.insert(cors::standard::VARY, HeaderValue::from_static("Origin"));
|
||||
}
|
||||
|
||||
// Access-Control-Allow-Methods (required for preflight)
|
||||
if is_preflight || !rule.allowed_methods.is_empty() {
|
||||
let methods_str = rule.allowed_methods.iter().map(|m| m.as_str()).collect::<Vec<_>>().join(", ");
|
||||
if let Ok(methods_value) = HeaderValue::from_str(&methods_str) {
|
||||
response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_METHODS, methods_value);
|
||||
}
|
||||
}
|
||||
|
||||
// Access-Control-Allow-Headers (required for preflight if headers were requested)
|
||||
if is_preflight && let Some(ref allowed_headers) = rule.allowed_headers {
|
||||
let headers_str = allowed_headers.iter().map(|h| h.as_str()).collect::<Vec<_>>().join(", ");
|
||||
if let Ok(headers_value) = HeaderValue::from_str(&headers_str) {
|
||||
response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_HEADERS, headers_value);
|
||||
}
|
||||
}
|
||||
|
||||
// Access-Control-Expose-Headers (for actual requests)
|
||||
if !is_preflight && let Some(ref expose_headers) = rule.expose_headers {
|
||||
let expose_headers_str = expose_headers.iter().map(|h| h.as_str()).collect::<Vec<_>>().join(", ");
|
||||
if let Ok(expose_value) = HeaderValue::from_str(&expose_headers_str) {
|
||||
response_headers.insert(cors::response::ACCESS_CONTROL_EXPOSE_HEADERS, expose_value);
|
||||
}
|
||||
}
|
||||
|
||||
// Access-Control-Max-Age (for preflight requests)
|
||||
if is_preflight
|
||||
&& let Some(max_age) = rule.max_age_seconds
|
||||
&& let Ok(max_age_value) = HeaderValue::from_str(&max_age.to_string())
|
||||
{
|
||||
response_headers.insert(cors::response::ACCESS_CONTROL_MAX_AGE, max_age_value);
|
||||
}
|
||||
|
||||
return Some(response_headers);
|
||||
}
|
||||
|
||||
None // No matching rule found
|
||||
}
|
||||
/// Check if an origin matches a pattern (supports wildcards like https://*.example.com)
|
||||
fn matches_origin_pattern(pattern: &str, origin: &str) -> bool {
|
||||
// Simple wildcard matching: * matches any sequence
|
||||
if pattern.contains('*') {
|
||||
let pattern_parts: Vec<&str> = pattern.split('*').collect();
|
||||
if pattern_parts.len() == 2 {
|
||||
origin.starts_with(pattern_parts[0]) && origin.ends_with(pattern_parts[1])
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
pattern == origin
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrap S3Response with CORS headers if needed
|
||||
/// This function performs a lightweight check first to avoid unnecessary CORS processing
|
||||
/// for non-CORS requests (requests without Origin header)
|
||||
async fn wrap_response_with_cors<T>(bucket: &str, method: &http::Method, headers: &HeaderMap, output: T) -> S3Response<T> {
|
||||
let mut response = S3Response::new(output);
|
||||
|
||||
// Quick check: only process CORS if Origin header is present
|
||||
if needs_cors_processing(headers)
|
||||
&& let Some(cors_headers) = apply_cors_headers(bucket, method, headers).await
|
||||
{
|
||||
for (key, value) in cors_headers.iter() {
|
||||
response.headers.insert(key, value.clone());
|
||||
}
|
||||
}
|
||||
|
||||
response
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl S3 for FS {
|
||||
#[instrument(
|
||||
@@ -854,6 +1072,9 @@ impl S3 for FS {
|
||||
sse_customer_key_md5,
|
||||
metadata_directive,
|
||||
metadata,
|
||||
copy_source_if_match,
|
||||
copy_source_if_none_match,
|
||||
content_type,
|
||||
..
|
||||
} = req.input.clone();
|
||||
let (src_bucket, src_key, version_id) = match copy_source {
|
||||
@@ -869,6 +1090,19 @@ impl S3 for FS {
|
||||
validate_object_key(&src_key, "COPY (source)")?;
|
||||
validate_object_key(&key, "COPY (dest)")?;
|
||||
|
||||
// AWS S3 allows self-copy when metadata directive is REPLACE (used to update metadata in-place).
|
||||
// Reject only when the directive is not REPLACE.
|
||||
if metadata_directive.as_ref().map(|d| d.as_str()) != Some(MetadataDirective::REPLACE)
|
||||
&& src_bucket == bucket
|
||||
&& src_key == key
|
||||
{
|
||||
error!("Rejected self-copy operation: bucket={}, key={}", bucket, key);
|
||||
return Err(s3_error!(
|
||||
InvalidRequest,
|
||||
"Cannot copy an object to itself. Source and destination must be different."
|
||||
));
|
||||
}
|
||||
|
||||
// warn!("copy_object {}/{}, to {}/{}", &src_bucket, &src_key, &bucket, &key);
|
||||
|
||||
let mut src_opts = copy_src_opts(&src_bucket, &src_key, &req.headers).map_err(ApiError::from)?;
|
||||
@@ -929,6 +1163,30 @@ impl S3 for FS {
|
||||
|
||||
let mut src_info = gr.object_info.clone();
|
||||
|
||||
// Validate copy source conditions
|
||||
if let Some(if_match) = copy_source_if_match {
|
||||
if let Some(ref etag) = src_info.etag {
|
||||
if let Some(strong_etag) = if_match.into_etag() {
|
||||
if ETag::Strong(etag.clone()) != strong_etag {
|
||||
return Err(s3_error!(PreconditionFailed));
|
||||
}
|
||||
} else {
|
||||
// Weak ETag or Any (*) in If-Match should fail per RFC 9110
|
||||
return Err(s3_error!(PreconditionFailed));
|
||||
}
|
||||
} else {
|
||||
return Err(s3_error!(PreconditionFailed));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(if_none_match) = copy_source_if_none_match
|
||||
&& let Some(ref etag) = src_info.etag
|
||||
&& let Some(strong_etag) = if_none_match.into_etag()
|
||||
&& ETag::Strong(etag.clone()) == strong_etag
|
||||
{
|
||||
return Err(s3_error!(PreconditionFailed));
|
||||
}
|
||||
|
||||
if cp_src_dst_same {
|
||||
src_info.metadata_only = true;
|
||||
}
|
||||
@@ -969,12 +1227,35 @@ impl S3 for FS {
|
||||
src_info
|
||||
.user_defined
|
||||
.remove(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression"));
|
||||
src_info
|
||||
.user_defined
|
||||
.remove(&format!("{RESERVED_METADATA_PREFIX}compression"));
|
||||
src_info
|
||||
.user_defined
|
||||
.remove(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size"));
|
||||
src_info
|
||||
.user_defined
|
||||
.remove(&format!("{RESERVED_METADATA_PREFIX}actual-size"));
|
||||
src_info
|
||||
.user_defined
|
||||
.remove(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression-size"));
|
||||
src_info
|
||||
.user_defined
|
||||
.remove(&format!("{RESERVED_METADATA_PREFIX}compression-size"));
|
||||
}
|
||||
|
||||
// Handle MetadataDirective REPLACE: replace user metadata while preserving system metadata.
|
||||
// System metadata (compression, encryption) is added after this block to ensure
|
||||
// it's not cleared by the REPLACE operation.
|
||||
if metadata_directive.as_ref().map(|d| d.as_str()) == Some(MetadataDirective::REPLACE) {
|
||||
src_info.user_defined.clear();
|
||||
if let Some(metadata) = metadata {
|
||||
src_info.user_defined.extend(metadata);
|
||||
}
|
||||
if let Some(ct) = content_type {
|
||||
src_info.content_type = Some(ct.clone());
|
||||
src_info.user_defined.insert("content-type".to_string(), ct);
|
||||
}
|
||||
}
|
||||
|
||||
let mut reader = HashReader::new(reader, length, actual_size, None, None, false).map_err(ApiError::from)?;
|
||||
@@ -1059,16 +1340,6 @@ impl S3 for FS {
|
||||
.insert("x-amz-server-side-encryption-customer-key-md5".to_string(), sse_md5.clone());
|
||||
}
|
||||
|
||||
if metadata_directive.as_ref().map(|d| d.as_str()) == Some(MetadataDirective::REPLACE) {
|
||||
let src_user_defined = extract_user_defined_metadata(&src_info.user_defined);
|
||||
src_user_defined.keys().for_each(|k| {
|
||||
src_info.user_defined.remove(k);
|
||||
});
|
||||
if let Some(metadata) = metadata {
|
||||
src_info.user_defined.extend(metadata);
|
||||
}
|
||||
}
|
||||
|
||||
// check quota for copy operation
|
||||
if let Some(metadata_sys) = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get() {
|
||||
let quota_checker = QuotaChecker::new(metadata_sys.clone());
|
||||
@@ -2491,6 +2762,23 @@ impl S3 for FS {
|
||||
}
|
||||
}
|
||||
|
||||
let versioned = BucketVersioningSys::prefix_enabled(&bucket, &key).await;
|
||||
|
||||
// Get version_id from object info
|
||||
// If versioning is enabled and version_id exists in object info, return it
|
||||
// If version_id is Uuid::nil(), return "null" string (AWS S3 convention)
|
||||
let output_version_id = if versioned {
|
||||
info.version_id.map(|vid| {
|
||||
if vid == Uuid::nil() {
|
||||
"null".to_string()
|
||||
} else {
|
||||
vid.to_string()
|
||||
}
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let output = GetObjectOutput {
|
||||
body,
|
||||
content_length: Some(response_content_length),
|
||||
@@ -2511,6 +2799,7 @@ impl S3 for FS {
|
||||
checksum_sha256,
|
||||
checksum_crc64nvme,
|
||||
checksum_type,
|
||||
version_id: output_version_id,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -2535,7 +2824,8 @@ impl S3 for FS {
|
||||
cache_key, response_content_length, total_duration, optimal_buffer_size
|
||||
);
|
||||
|
||||
let result = Ok(S3Response::new(output));
|
||||
let response = wrap_response_with_cors(&bucket, &req.method, &req.headers, output).await;
|
||||
let result = Ok(response);
|
||||
let _ = helper.complete(&result);
|
||||
result
|
||||
}
|
||||
@@ -2610,7 +2900,18 @@ impl S3 for FS {
|
||||
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
|
||||
};
|
||||
|
||||
let info = store.get_object_info(&bucket, &key, &opts).await.map_err(ApiError::from)?;
|
||||
// Modification Points: Explicitly handles get_object_info errors, distinguishing between object absence and other errors
|
||||
let info = match store.get_object_info(&bucket, &key, &opts).await {
|
||||
Ok(info) => info,
|
||||
Err(err) => {
|
||||
// If the error indicates the object or its version was not found, return 404 (NoSuchKey)
|
||||
if is_err_object_not_found(&err) || is_err_version_not_found(&err) {
|
||||
return Err(S3Error::new(S3ErrorCode::NoSuchKey));
|
||||
}
|
||||
// Other errors, such as insufficient permissions, still return the original error
|
||||
return Err(ApiError::from(err).into());
|
||||
}
|
||||
};
|
||||
|
||||
if info.delete_marker {
|
||||
if opts.version_id.is_none() {
|
||||
@@ -2689,7 +2990,7 @@ impl S3 for FS {
|
||||
.get("x-amz-server-side-encryption-customer-algorithm")
|
||||
.map(|v| SSECustomerAlgorithm::from(v.clone()));
|
||||
let sse_customer_key_md5 = metadata_map.get("x-amz-server-side-encryption-customer-key-md5").cloned();
|
||||
let ssekms_key_id = metadata_map.get("x-amz-server-side-encryption-aws-kms-key-id").cloned();
|
||||
let sse_kms_key_id = metadata_map.get("x-amz-server-side-encryption-aws-kms-key-id").cloned();
|
||||
// Prefer explicit storage_class from object info; fall back to persisted metadata header.
|
||||
let storage_class = info
|
||||
.storage_class
|
||||
@@ -2754,7 +3055,7 @@ impl S3 for FS {
|
||||
server_side_encryption,
|
||||
sse_customer_algorithm,
|
||||
sse_customer_key_md5,
|
||||
ssekms_key_id,
|
||||
ssekms_key_id: sse_kms_key_id,
|
||||
checksum_crc32,
|
||||
checksum_crc32c,
|
||||
checksum_sha1,
|
||||
@@ -2769,7 +3070,14 @@ impl S3 for FS {
|
||||
let version_id = req.input.version_id.clone().unwrap_or_default();
|
||||
helper = helper.object(event_info).version_id(version_id);
|
||||
|
||||
let result = Ok(S3Response::new(output));
|
||||
// NOTE ON CORS:
|
||||
// Bucket-level CORS headers are intentionally applied only for object retrieval
|
||||
// operations (GET/HEAD) via `wrap_response_with_cors`. Other S3 operations that
|
||||
// interact with objects (PUT/POST/DELETE/LIST, etc.) rely on the system-level
|
||||
// CORS layer instead. In case both are applicable, this bucket-level CORS logic
|
||||
// takes precedence for these read operations.
|
||||
let response = wrap_response_with_cors(&bucket, &req.method, &req.headers, output).await;
|
||||
let result = Ok(response);
|
||||
let _ = helper.complete(&result);
|
||||
|
||||
result
|
||||
@@ -4639,8 +4947,85 @@ impl S3 for FS {
|
||||
Ok(S3Response::new(DeleteBucketTaggingOutput {}))
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn get_bucket_cors(&self, req: S3Request<GetBucketCorsInput>) -> S3Result<S3Response<GetBucketCorsOutput>> {
|
||||
let bucket = req.input.bucket.clone();
|
||||
// check bucket exists.
|
||||
let _bucket = self
|
||||
.head_bucket(req.map_input(|input| HeadBucketInput {
|
||||
bucket: input.bucket,
|
||||
expected_bucket_owner: None,
|
||||
}))
|
||||
.await?;
|
||||
|
||||
let cors_configuration = match metadata_sys::get_cors_config(&bucket).await {
|
||||
Ok((config, _)) => config,
|
||||
Err(err) => {
|
||||
if err == StorageError::ConfigNotFound {
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::NoSuchCORSConfiguration,
|
||||
"The CORS configuration does not exist".to_string(),
|
||||
));
|
||||
}
|
||||
warn!("get_cors_config err {:?}", &err);
|
||||
return Err(ApiError::from(err).into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(S3Response::new(GetBucketCorsOutput {
|
||||
cors_rules: Some(cors_configuration.cors_rules),
|
||||
}))
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn put_bucket_cors(&self, req: S3Request<PutBucketCorsInput>) -> S3Result<S3Response<PutBucketCorsOutput>> {
|
||||
let PutBucketCorsInput {
|
||||
bucket,
|
||||
cors_configuration,
|
||||
..
|
||||
} = req.input;
|
||||
|
||||
let Some(store) = new_object_layer_fn() else {
|
||||
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
|
||||
};
|
||||
|
||||
store
|
||||
.get_bucket_info(&bucket, &BucketOptions::default())
|
||||
.await
|
||||
.map_err(ApiError::from)?;
|
||||
|
||||
let data = try_!(serialize(&cors_configuration));
|
||||
|
||||
metadata_sys::update(&bucket, BUCKET_CORS_CONFIG, data)
|
||||
.await
|
||||
.map_err(ApiError::from)?;
|
||||
|
||||
Ok(S3Response::new(PutBucketCorsOutput::default()))
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn delete_bucket_cors(&self, req: S3Request<DeleteBucketCorsInput>) -> S3Result<S3Response<DeleteBucketCorsOutput>> {
|
||||
let DeleteBucketCorsInput { bucket, .. } = req.input;
|
||||
|
||||
let Some(store) = new_object_layer_fn() else {
|
||||
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
|
||||
};
|
||||
|
||||
store
|
||||
.get_bucket_info(&bucket, &BucketOptions::default())
|
||||
.await
|
||||
.map_err(ApiError::from)?;
|
||||
|
||||
metadata_sys::delete(&bucket, BUCKET_CORS_CONFIG)
|
||||
.await
|
||||
.map_err(ApiError::from)?;
|
||||
|
||||
Ok(S3Response::new(DeleteBucketCorsOutput {}))
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self, req))]
|
||||
async fn put_object_tagging(&self, req: S3Request<PutObjectTaggingInput>) -> S3Result<S3Response<PutObjectTaggingOutput>> {
|
||||
let start_time = std::time::Instant::now();
|
||||
let mut helper = OperationHelper::new(&req, EventName::ObjectCreatedPutTagging, "s3:PutObjectTagging");
|
||||
let PutObjectTaggingInput {
|
||||
bucket,
|
||||
@@ -4654,6 +5039,8 @@ impl S3 for FS {
|
||||
// Reference: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html
|
||||
// Reference: https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_PutObjectTagging.html
|
||||
// https://github.com/minio/mint/blob/master/run/core/aws-sdk-go-v2/main.go#L1647
|
||||
error!("Tag set exceeds maximum of 10 tags: {}", tagging.tag_set.len());
|
||||
return Err(s3_error!(InvalidTag, "Cannot have more than 10 tags per object"));
|
||||
}
|
||||
|
||||
let Some(store) = new_object_layer_fn() else {
|
||||
@@ -4662,71 +5049,118 @@ impl S3 for FS {
|
||||
|
||||
let mut tag_keys = std::collections::HashSet::with_capacity(tagging.tag_set.len());
|
||||
for tag in &tagging.tag_set {
|
||||
let key = tag
|
||||
.key
|
||||
.as_ref()
|
||||
.filter(|k| !k.is_empty())
|
||||
.ok_or_else(|| s3_error!(InvalidTag, "Tag key cannot be empty"))?;
|
||||
let key = tag.key.as_ref().filter(|k| !k.is_empty()).ok_or_else(|| {
|
||||
error!("Empty tag key");
|
||||
s3_error!(InvalidTag, "Tag key cannot be empty")
|
||||
})?;
|
||||
|
||||
if key.len() > 128 {
|
||||
error!("Tag key too long: {} bytes", key.len());
|
||||
return Err(s3_error!(InvalidTag, "Tag key is too long, maximum allowed length is 128 characters"));
|
||||
}
|
||||
|
||||
let value = tag
|
||||
.value
|
||||
.as_ref()
|
||||
.ok_or_else(|| s3_error!(InvalidTag, "Tag value cannot be null"))?;
|
||||
let value = tag.value.as_ref().ok_or_else(|| {
|
||||
error!("Null tag value");
|
||||
s3_error!(InvalidTag, "Tag value cannot be null")
|
||||
})?;
|
||||
|
||||
if value.is_empty() {
|
||||
error!("Empty tag value");
|
||||
return Err(s3_error!(InvalidTag, "Tag value cannot be empty"));
|
||||
}
|
||||
|
||||
if value.len() > 256 {
|
||||
error!("Tag value too long: {} bytes", value.len());
|
||||
return Err(s3_error!(InvalidTag, "Tag value is too long, maximum allowed length is 256 characters"));
|
||||
}
|
||||
|
||||
if !tag_keys.insert(key) {
|
||||
error!("Duplicate tag key: {}", key);
|
||||
return Err(s3_error!(InvalidTag, "Cannot provide multiple Tags with the same key"));
|
||||
}
|
||||
}
|
||||
|
||||
let tags = encode_tags(tagging.tag_set);
|
||||
debug!("Encoded tags: {}", tags);
|
||||
|
||||
// TODO: getOpts
|
||||
// TODO: Replicate
|
||||
// TODO: getOpts, Replicate
|
||||
// Support versioned objects
|
||||
let version_id = req.input.version_id.clone();
|
||||
let opts = ObjectOptions {
|
||||
version_id: self.parse_version_id(version_id)?.map(Into::into),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
store
|
||||
.put_object_tags(&bucket, &object, &tags, &ObjectOptions::default())
|
||||
.await
|
||||
.map_err(ApiError::from)?;
|
||||
store.put_object_tags(&bucket, &object, &tags, &opts).await.map_err(|e| {
|
||||
error!("Failed to put object tags: {}", e);
|
||||
counter!("rustfs.put_object_tagging.failure").increment(1);
|
||||
ApiError::from(e)
|
||||
})?;
|
||||
|
||||
let version_id = req.input.version_id.clone().unwrap_or_default();
|
||||
helper = helper.version_id(version_id);
|
||||
// Invalidate cache for the tagged object
|
||||
let manager = get_concurrency_manager();
|
||||
let version_id = req.input.version_id.clone();
|
||||
let cache_key = ConcurrencyManager::make_cache_key(&bucket, &object, version_id.clone().as_deref());
|
||||
tokio::spawn(async move {
|
||||
manager
|
||||
.invalidate_cache_versioned(&bucket, &object, version_id.as_deref())
|
||||
.await;
|
||||
debug!("Cache invalidated for tagged object: {}", cache_key);
|
||||
});
|
||||
|
||||
let result = Ok(S3Response::new(PutObjectTaggingOutput { version_id: None }));
|
||||
// Add metrics
|
||||
counter!("rustfs.put_object_tagging.success").increment(1);
|
||||
|
||||
let version_id_resp = req.input.version_id.clone().unwrap_or_default();
|
||||
helper = helper.version_id(version_id_resp);
|
||||
|
||||
let result = Ok(S3Response::new(PutObjectTaggingOutput {
|
||||
version_id: req.input.version_id.clone(),
|
||||
}));
|
||||
let _ = helper.complete(&result);
|
||||
let duration = start_time.elapsed();
|
||||
histogram!("rustfs.object_tagging.operation.duration.seconds", "operation" => "put").record(duration.as_secs_f64());
|
||||
result
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn get_object_tagging(&self, req: S3Request<GetObjectTaggingInput>) -> S3Result<S3Response<GetObjectTaggingOutput>> {
|
||||
let start_time = std::time::Instant::now();
|
||||
let GetObjectTaggingInput { bucket, key: object, .. } = req.input;
|
||||
|
||||
info!("Starting get_object_tagging for bucket: {}, object: {}", bucket, object);
|
||||
|
||||
let Some(store) = new_object_layer_fn() else {
|
||||
error!("Store not initialized");
|
||||
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
|
||||
};
|
||||
|
||||
// TODO: version
|
||||
let tags = store
|
||||
.get_object_tags(&bucket, &object, &ObjectOptions::default())
|
||||
.await
|
||||
.map_err(ApiError::from)?;
|
||||
// Support versioned objects
|
||||
let version_id = req.input.version_id.clone();
|
||||
let opts = ObjectOptions {
|
||||
version_id: self.parse_version_id(version_id)?.map(Into::into),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let tags = store.get_object_tags(&bucket, &object, &opts).await.map_err(|e| {
|
||||
if is_err_object_not_found(&e) {
|
||||
error!("Object not found: {}", e);
|
||||
return s3_error!(NoSuchKey);
|
||||
}
|
||||
error!("Failed to get object tags: {}", e);
|
||||
ApiError::from(e).into()
|
||||
})?;
|
||||
|
||||
let tag_set = decode_tags(tags.as_str());
|
||||
debug!("Decoded tag set: {:?}", tag_set);
|
||||
|
||||
// Add metrics
|
||||
counter!("rustfs.get_object_tagging.success").increment(1);
|
||||
let duration = start_time.elapsed();
|
||||
histogram!("rustfs.object_tagging.operation.duration.seconds", "operation" => "put").record(duration.as_secs_f64());
|
||||
Ok(S3Response::new(GetObjectTaggingOutput {
|
||||
tag_set,
|
||||
version_id: None,
|
||||
version_id: req.input.version_id.clone(),
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -4735,25 +5169,56 @@ impl S3 for FS {
|
||||
&self,
|
||||
req: S3Request<DeleteObjectTaggingInput>,
|
||||
) -> S3Result<S3Response<DeleteObjectTaggingOutput>> {
|
||||
let start_time = std::time::Instant::now();
|
||||
let mut helper = OperationHelper::new(&req, EventName::ObjectCreatedDeleteTagging, "s3:DeleteObjectTagging");
|
||||
let DeleteObjectTaggingInput { bucket, key: object, .. } = req.input.clone();
|
||||
let DeleteObjectTaggingInput {
|
||||
bucket,
|
||||
key: object,
|
||||
version_id,
|
||||
..
|
||||
} = req.input.clone();
|
||||
|
||||
let Some(store) = new_object_layer_fn() else {
|
||||
error!("Store not initialized");
|
||||
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
|
||||
};
|
||||
|
||||
// TODO: Replicate
|
||||
// TODO: version
|
||||
store
|
||||
.delete_object_tags(&bucket, &object, &ObjectOptions::default())
|
||||
.await
|
||||
.map_err(ApiError::from)?;
|
||||
// Support versioned objects
|
||||
let version_id_for_parse = version_id.clone();
|
||||
let opts = ObjectOptions {
|
||||
version_id: self.parse_version_id(version_id_for_parse)?.map(Into::into),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let version_id = req.input.version_id.clone().unwrap_or_else(|| Uuid::new_v4().to_string());
|
||||
helper = helper.version_id(version_id);
|
||||
// TODO: Replicate (keep the original TODO, if further replication logic is needed)
|
||||
store.delete_object_tags(&bucket, &object, &opts).await.map_err(|e| {
|
||||
error!("Failed to delete object tags: {}", e);
|
||||
ApiError::from(e)
|
||||
})?;
|
||||
|
||||
let result = Ok(S3Response::new(DeleteObjectTaggingOutput { version_id: None }));
|
||||
// Invalidate cache for the deleted tagged object
|
||||
let manager = get_concurrency_manager();
|
||||
let version_id_clone = version_id.clone();
|
||||
tokio::spawn(async move {
|
||||
manager
|
||||
.invalidate_cache_versioned(&bucket, &object, version_id_clone.as_deref())
|
||||
.await;
|
||||
debug!(
|
||||
"Cache invalidated for deleted tagged object: bucket={}, object={}, version_id={:?}",
|
||||
bucket, object, version_id_clone
|
||||
);
|
||||
});
|
||||
|
||||
// Add metrics
|
||||
counter!("rustfs.delete_object_tagging.success").increment(1);
|
||||
|
||||
let version_id_resp = version_id.clone().unwrap_or_default();
|
||||
helper = helper.version_id(version_id_resp);
|
||||
|
||||
let result = Ok(S3Response::new(DeleteObjectTaggingOutput { version_id }));
|
||||
let _ = helper.complete(&result);
|
||||
let duration = start_time.elapsed();
|
||||
histogram!("rustfs.object_tagging.operation.duration.seconds", "operation" => "delete").record(duration.as_secs_f64());
|
||||
result
|
||||
}
|
||||
|
||||
@@ -6402,4 +6867,201 @@ mod tests {
|
||||
assert!(filtered_version_marker.is_some());
|
||||
assert_eq!(filtered_version_marker.unwrap(), "null");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_matches_origin_pattern_exact_match() {
|
||||
// Test exact match
|
||||
assert!(matches_origin_pattern("https://example.com", "https://example.com"));
|
||||
assert!(matches_origin_pattern("http://localhost:3000", "http://localhost:3000"));
|
||||
assert!(!matches_origin_pattern("https://example.com", "https://other.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_matches_origin_pattern_wildcard() {
|
||||
// Test wildcard pattern matching (S3 CORS supports * as subdomain wildcard)
|
||||
assert!(matches_origin_pattern("https://*.example.com", "https://app.example.com"));
|
||||
assert!(matches_origin_pattern("https://*.example.com", "https://api.example.com"));
|
||||
assert!(matches_origin_pattern("https://*.example.com", "https://subdomain.example.com"));
|
||||
|
||||
// Test wildcard at start (matches any domain)
|
||||
assert!(matches_origin_pattern("https://*", "https://example.com"));
|
||||
assert!(matches_origin_pattern("https://*", "https://any-domain.com"));
|
||||
|
||||
// Test wildcard at end (matches any protocol)
|
||||
assert!(matches_origin_pattern("*://example.com", "https://example.com"));
|
||||
assert!(matches_origin_pattern("*://example.com", "http://example.com"));
|
||||
|
||||
// Test invalid wildcard patterns (should not match)
|
||||
assert!(!matches_origin_pattern("https://*.*.com", "https://app.example.com")); // Multiple wildcards (invalid pattern)
|
||||
// Note: "https://*example.com" actually matches "https://app.example.com" with our current implementation
|
||||
// because it splits on * and checks starts_with/ends_with. This is a limitation but acceptable
|
||||
// for S3 CORS which typically uses patterns like "https://*.example.com"
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_matches_origin_pattern_no_wildcard() {
|
||||
// Test patterns without wildcards
|
||||
assert!(matches_origin_pattern("https://example.com", "https://example.com"));
|
||||
assert!(!matches_origin_pattern("https://example.com", "https://example.org"));
|
||||
assert!(!matches_origin_pattern("http://example.com", "https://example.com")); // Different protocol
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_matches_origin_pattern_edge_cases() {
|
||||
// Test edge cases
|
||||
assert!(!matches_origin_pattern("", "https://example.com")); // Empty pattern
|
||||
assert!(!matches_origin_pattern("https://example.com", "")); // Empty origin
|
||||
assert!(matches_origin_pattern("", "")); // Both empty
|
||||
assert!(!matches_origin_pattern("https://example.com", "http://example.com")); // Protocol mismatch
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cors_headers_validation() {
|
||||
use http::HeaderMap;
|
||||
|
||||
// Test case 1: Validate header name case-insensitivity
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert("access-control-request-headers", "Content-Type,X-Custom-Header".parse().unwrap());
|
||||
|
||||
let req_headers_str = headers
|
||||
.get("access-control-request-headers")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap();
|
||||
let req_headers: Vec<String> = req_headers_str.split(',').map(|s| s.trim().to_lowercase()).collect();
|
||||
|
||||
// Headers should be lowercased for comparison
|
||||
assert_eq!(req_headers, vec!["content-type", "x-custom-header"]);
|
||||
|
||||
// Test case 2: Wildcard matching
|
||||
let allowed_headers = ["*".to_string()];
|
||||
let all_allowed = req_headers.iter().all(|req_header| {
|
||||
allowed_headers
|
||||
.iter()
|
||||
.any(|allowed| allowed.to_lowercase() == "*" || allowed.to_lowercase() == *req_header)
|
||||
});
|
||||
assert!(all_allowed, "Wildcard should allow all headers");
|
||||
|
||||
// Test case 3: Specific header matching
|
||||
let allowed_headers = ["content-type".to_string(), "x-custom-header".to_string()];
|
||||
let all_allowed = req_headers
|
||||
.iter()
|
||||
.all(|req_header| allowed_headers.iter().any(|allowed| allowed.to_lowercase() == *req_header));
|
||||
assert!(all_allowed, "All requested headers should be allowed");
|
||||
|
||||
// Test case 4: Disallowed header
|
||||
let req_headers = ["content-type".to_string(), "x-forbidden-header".to_string()];
|
||||
let allowed_headers = ["content-type".to_string()];
|
||||
let all_allowed = req_headers
|
||||
.iter()
|
||||
.all(|req_header| allowed_headers.iter().any(|allowed| allowed.to_lowercase() == *req_header));
|
||||
assert!(!all_allowed, "Forbidden header should not be allowed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cors_response_headers_structure() {
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
|
||||
let mut cors_headers = HeaderMap::new();
|
||||
|
||||
// Simulate building CORS response headers
|
||||
let origin = "https://example.com";
|
||||
let methods = ["GET", "PUT", "POST"];
|
||||
let allowed_headers = ["Content-Type", "Authorization"];
|
||||
let expose_headers = ["ETag", "x-amz-version-id"];
|
||||
let max_age = 3600;
|
||||
|
||||
// Add headers
|
||||
cors_headers.insert("access-control-allow-origin", HeaderValue::from_str(origin).unwrap());
|
||||
cors_headers.insert("vary", HeaderValue::from_static("Origin"));
|
||||
|
||||
let methods_str = methods.join(", ");
|
||||
cors_headers.insert("access-control-allow-methods", HeaderValue::from_str(&methods_str).unwrap());
|
||||
|
||||
let headers_str = allowed_headers.join(", ");
|
||||
cors_headers.insert("access-control-allow-headers", HeaderValue::from_str(&headers_str).unwrap());
|
||||
|
||||
let expose_str = expose_headers.join(", ");
|
||||
cors_headers.insert("access-control-expose-headers", HeaderValue::from_str(&expose_str).unwrap());
|
||||
|
||||
cors_headers.insert("access-control-max-age", HeaderValue::from_str(&max_age.to_string()).unwrap());
|
||||
|
||||
// Verify all headers are present
|
||||
assert_eq!(cors_headers.get("access-control-allow-origin").unwrap(), origin);
|
||||
assert_eq!(cors_headers.get("vary").unwrap(), "Origin");
|
||||
assert_eq!(cors_headers.get("access-control-allow-methods").unwrap(), "GET, PUT, POST");
|
||||
assert_eq!(cors_headers.get("access-control-allow-headers").unwrap(), "Content-Type, Authorization");
|
||||
assert_eq!(cors_headers.get("access-control-expose-headers").unwrap(), "ETag, x-amz-version-id");
|
||||
assert_eq!(cors_headers.get("access-control-max-age").unwrap(), "3600");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cors_preflight_vs_actual_request() {
|
||||
use http::Method;
|
||||
|
||||
// Test that we can distinguish preflight from actual requests
|
||||
let preflight_method = Method::OPTIONS;
|
||||
let actual_method = Method::PUT;
|
||||
|
||||
assert_eq!(preflight_method, Method::OPTIONS);
|
||||
assert_ne!(actual_method, Method::OPTIONS);
|
||||
|
||||
// Preflight should check Access-Control-Request-Method
|
||||
// Actual request should use the actual method
|
||||
let is_preflight_1 = preflight_method == Method::OPTIONS;
|
||||
let is_preflight_2 = actual_method == Method::OPTIONS;
|
||||
|
||||
assert!(is_preflight_1);
|
||||
assert!(!is_preflight_2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_apply_cors_headers_no_origin() {
|
||||
// Test when no Origin header is present
|
||||
let headers = HeaderMap::new();
|
||||
let method = http::Method::GET;
|
||||
|
||||
// Should return None when no origin header
|
||||
let result = apply_cors_headers("test-bucket", &method, &headers).await;
|
||||
assert!(result.is_none(), "Should return None when no Origin header");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_apply_cors_headers_no_cors_config() {
|
||||
// Test when bucket has no CORS configuration
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert("origin", "https://example.com".parse().unwrap());
|
||||
let method = http::Method::GET;
|
||||
|
||||
// Should return None when no CORS config exists
|
||||
// Note: This test may fail if test-bucket actually has CORS config
|
||||
// In a real scenario, we'd use a mock or ensure the bucket doesn't exist
|
||||
let _result = apply_cors_headers("non-existent-bucket-for-testing", &method, &headers).await;
|
||||
// Result depends on whether bucket exists and has CORS config
|
||||
// This is expected behavior - we just verify it doesn't panic
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_apply_cors_headers_unsupported_method() {
|
||||
// Test with unsupported HTTP method
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert("origin", "https://example.com".parse().unwrap());
|
||||
let method = http::Method::PATCH; // Unsupported method
|
||||
|
||||
let result = apply_cors_headers("test-bucket", &method, &headers).await;
|
||||
assert!(result.is_none(), "Should return None for unsupported methods");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_matches_origin_pattern_complex_wildcards() {
|
||||
// Test more complex wildcard scenarios
|
||||
assert!(matches_origin_pattern("https://*.example.com", "https://sub.example.com"));
|
||||
// Note: "https://*.example.com" matches "https://api.sub.example.com" with our implementation
|
||||
// because it only checks starts_with and ends_with. Real S3 might be more strict.
|
||||
|
||||
// Test wildcard in middle position
|
||||
// Our implementation allows this, but it's not standard S3 CORS pattern
|
||||
// The pattern "https://example.*.com" splits to ["https://example.", ".com"]
|
||||
// and "https://example.sub.com" matches because it starts with "https://example." and ends with ".com"
|
||||
// This is acceptable for our use case as S3 CORS typically uses "https://*.example.com" format
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,13 +65,23 @@ pub async fn del_opts(
|
||||
|
||||
let vid = vid.map(|v| v.as_str().trim().to_owned());
|
||||
|
||||
if let Some(ref id) = vid
|
||||
&& *id != Uuid::nil().to_string()
|
||||
&& let Err(err) = Uuid::parse_str(id.as_str())
|
||||
{
|
||||
error!("del_opts: invalid version id: {} error: {}", id, err);
|
||||
return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone()));
|
||||
}
|
||||
// Handle AWS S3 special case: "null" string represents null version ID
|
||||
// When VersionId='null' is specified, it means delete the object with null version ID
|
||||
let vid = if let Some(ref id) = vid {
|
||||
if id.eq_ignore_ascii_case("null") {
|
||||
// Convert "null" to Uuid::nil() string representation
|
||||
Some(Uuid::nil().to_string())
|
||||
} else {
|
||||
// Validate UUID format for other version IDs
|
||||
if *id != Uuid::nil().to_string() && Uuid::parse_str(id.as_str()).is_err() {
|
||||
error!("del_opts: invalid version id: {} error: invalid UUID format", id);
|
||||
return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone()));
|
||||
}
|
||||
Some(id.clone())
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut opts = put_opts_from_headers(headers, metadata.clone()).map_err(|err| {
|
||||
error!("del_opts: invalid argument: {} error: {}", object, err);
|
||||
@@ -704,6 +714,16 @@ mod tests {
|
||||
assert!(!opts.delete_prefix);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_del_opts_with_null_version_id() {
|
||||
let headers = create_test_headers();
|
||||
let metadata = create_test_metadata();
|
||||
let result = del_opts("test-bucket", "test-object", Some("null".to_string()), &headers, metadata.clone()).await;
|
||||
assert!(result.is_ok());
|
||||
let result = del_opts("test-bucket", "test-object", Some("NULL".to_string()), &headers, metadata.clone()).await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_opts_basic() {
|
||||
let headers = create_test_headers();
|
||||
|
||||
Reference in New Issue
Block a user