Merge branch 'main' of github.com:rustfs/rustfs into feature/double-metadata

This commit is contained in:
houseme
2026-02-24 19:44:35 +08:00
25 changed files with 3869 additions and 1558 deletions

24
Cargo.lock generated
View File

@@ -1527,6 +1527,17 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831"
[[package]]
name = "clocksource"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "129026dd5a8a9592d96916258f3a5379589e513ea5e86aeb0bd2530286e44e9e"
dependencies = [
"libc",
"time",
"winapi",
]
[[package]] [[package]]
name = "cmake" name = "cmake"
version = "0.1.57" version = "0.1.57"
@@ -6457,6 +6468,17 @@ version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c8d0fd677905edcbeedbf2edb6494d676f0e98d54d5cf9bda0b061cb8fb8aba" checksum = "0c8d0fd677905edcbeedbf2edb6494d676f0e98d54d5cf9bda0b061cb8fb8aba"
[[package]]
name = "ratelimit"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36ea961700fd7260e7fa3701c8287d901b2172c51f9c1421fa0f21d7f7e184b7"
dependencies = [
"clocksource",
"parking_lot",
"thiserror 1.0.69",
]
[[package]] [[package]]
name = "rayon" name = "rayon"
version = "1.11.0" version = "1.11.0"
@@ -7136,6 +7158,7 @@ dependencies = [
"pin-project-lite", "pin-project-lite",
"quick-xml 0.39.2", "quick-xml 0.39.2",
"rand 0.10.0", "rand 0.10.0",
"ratelimit",
"reed-solomon-simd", "reed-solomon-simd",
"regex", "regex",
"reqwest 0.13.2", "reqwest 0.13.2",
@@ -7509,6 +7532,7 @@ dependencies = [
"rustfs-common", "rustfs-common",
"rustfs-ecstore", "rustfs-ecstore",
"s3s", "s3s",
"serde_json",
"snafu 0.8.9", "snafu 0.8.9",
"tokio", "tokio",
"tokio-util", "tokio-util",

View File

@@ -226,6 +226,7 @@ path-clean = "1.0.1"
pin-project-lite = "0.2.16" pin-project-lite = "0.2.16"
pretty_assertions = "1.4.1" pretty_assertions = "1.4.1"
rand = { version = "0.10.0", features = ["serde"] } rand = { version = "0.10.0", features = ["serde"] }
ratelimit = "0.10.0"
rayon = "1.11.0" rayon = "1.11.0"
reed-solomon-simd = { version = "3.1.0" } reed-solomon-simd = { version = "3.1.0" }
regex = { version = "1.12.3" } regex = { version = "1.12.3" }

View File

@@ -0,0 +1,76 @@
// Copyright 2026 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::common::{RustFSTestEnvironment, awscurl_delete, awscurl_get, awscurl_put, init_logging};
use serial_test::serial;
use tracing::info;
/// Test that deleting a group with members fails, and deleting an empty group succeeds.
#[tokio::test(flavor = "multi_thread")]
#[serial]
#[ignore = "requires awscurl and spawns a real RustFS server"]
async fn test_delete_group_requires_empty_membership() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let mut env = RustFSTestEnvironment::new().await?;
env.start_rustfs_server(vec![]).await?;
// 1. Create a user
let add_user_url = format!("{}/rustfs/admin/v3/add-user?accessKey=testuser1", env.url);
let user_body = serde_json::json!({
"secretKey": "testuser1secret",
"status": "enabled"
});
awscurl_put(&add_user_url, &user_body.to_string(), &env.access_key, &env.secret_key).await?;
info!("Created testuser1");
// 2. Create a group with testuser1 as a member
let update_members_url = format!("{}/rustfs/admin/v3/update-group-members", env.url);
let add_member_body = serde_json::json!({
"group": "testgroup",
"members": ["testuser1"],
"isRemove": false,
"groupStatus": "enabled"
});
awscurl_put(&update_members_url, &add_member_body.to_string(), &env.access_key, &env.secret_key).await?;
info!("Added testuser1 to testgroup");
// 3. Attempt to delete the group while it still has members — should fail
let delete_group_url = format!("{}/rustfs/admin/v3/group/testgroup", env.url);
let delete_result = awscurl_delete(&delete_group_url, &env.access_key, &env.secret_key).await;
assert!(delete_result.is_err(), "deleting a non-empty group should fail");
info!("Delete of non-empty group correctly rejected");
// 4. Remove the member from the group
let remove_member_body = serde_json::json!({
"group": "testgroup",
"members": ["testuser1"],
"isRemove": true,
"groupStatus": "enabled"
});
awscurl_put(&update_members_url, &remove_member_body.to_string(), &env.access_key, &env.secret_key).await?;
info!("Removed testuser1 from testgroup");
// 5. Delete the now-empty group — should succeed
awscurl_delete(&delete_group_url, &env.access_key, &env.secret_key).await?;
info!("Deleted empty testgroup successfully");
// 6. Verify the group no longer exists
let get_group_url = format!("{}/rustfs/admin/v3/group?group=testgroup", env.url);
let get_result = awscurl_get(&get_group_url, &env.access_key, &env.secret_key).await;
assert!(get_result.is_err(), "group should no longer exist after deletion");
info!("Confirmed testgroup no longer exists");
Ok(())
}

View File

@@ -75,3 +75,7 @@ mod cluster_concurrency_test;
// PutObject / MultipartUpload with checksum (Content-MD5, x-amz-checksum-*) // PutObject / MultipartUpload with checksum (Content-MD5, x-amz-checksum-*)
#[cfg(test)] #[cfg(test)]
mod checksum_upload_test; mod checksum_upload_test;
// Group deletion tests
#[cfg(test)]
mod group_delete_test;

View File

@@ -111,6 +111,7 @@ google-cloud-storage = { workspace = true }
google-cloud-auth = { workspace = true } google-cloud-auth = { workspace = true }
aws-config = { workspace = true } aws-config = { workspace = true }
faster-hex = { workspace = true } faster-hex = { workspace = true }
ratelimit = { workspace = true }
[dev-dependencies] [dev-dependencies]
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }

View File

@@ -0,0 +1,16 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod monitor;
pub mod reader;

View File

@@ -0,0 +1,321 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::bucket::bandwidth::reader::BucketOptions;
use ratelimit::{Error as RatelimitError, Ratelimiter};
use std::collections::HashMap;
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
use tracing::warn;
#[derive(Clone)]
pub struct BucketThrottle {
limiter: Arc<Mutex<Ratelimiter>>,
pub node_bandwidth_per_sec: i64,
}
impl BucketThrottle {
fn new(node_bandwidth_per_sec: i64) -> Result<Self, RatelimitError> {
let node_bandwidth_per_sec = node_bandwidth_per_sec.max(1);
let amount = node_bandwidth_per_sec as u64;
let limiter_inner = Ratelimiter::builder(amount, Duration::from_secs(1))
.max_tokens(amount)
.build()?;
Ok(Self {
limiter: Arc::new(Mutex::new(limiter_inner)),
node_bandwidth_per_sec,
})
}
pub fn burst(&self) -> u64 {
self.limiter.lock().unwrap_or_else(|e| e.into_inner()).max_tokens()
}
/// The ratelimit crate (0.10.0) does not provide a bulk token consumption API.
/// try_wait() first to consume 1 token AND trigger the internal refill
/// mechanism (tokens are only refilled during try_wait/wait calls).
/// directly adjust available tokens via set_available() to consume the remaining amount.
pub(crate) fn consume(&self, n: u64) -> (u64, f64, u64) {
let guard = self.limiter.lock().unwrap_or_else(|e| {
warn!("bucket throttle mutex poisoned, recovering");
e.into_inner()
});
if n == 0 {
return (0, guard.rate(), 0);
}
let mut consumed = 0u64;
if guard.try_wait().is_ok() {
consumed = 1;
}
let available = guard.available();
let to_consume = n - consumed;
let batch = to_consume.min(available);
if batch > 0 {
let _ = guard.set_available(available - batch);
consumed += batch;
}
let deficit = n.saturating_sub(consumed);
let rate = guard.rate();
(deficit, rate, consumed)
}
}
pub struct Monitor {
t_lock: RwLock<HashMap<BucketOptions, BucketThrottle>>,
pub node_count: u64,
}
impl Monitor {
pub fn new(num_nodes: u64) -> Arc<Self> {
let node_cnt = num_nodes.max(1);
Arc::new(Monitor {
t_lock: RwLock::new(HashMap::new()),
node_count: node_cnt,
})
}
pub fn delete_bucket(&self, bucket: &str) {
self.t_lock
.write()
.unwrap_or_else(|e| {
warn!("bucket monitor rwlock write poisoned, recovering");
e.into_inner()
})
.retain(|opts, _| opts.name != bucket);
}
pub fn delete_bucket_throttle(&self, bucket: &str, arn: &str) {
let opts = BucketOptions {
name: bucket.to_string(),
replication_arn: arn.to_string(),
};
self.t_lock
.write()
.unwrap_or_else(|e| {
warn!("bucket monitor rwlock write poisoned, recovering");
e.into_inner()
})
.remove(&opts);
}
pub fn throttle(&self, opts: &BucketOptions) -> Option<BucketThrottle> {
self.t_lock
.read()
.unwrap_or_else(|e| {
warn!("bucket monitor rwlock read poisoned, recovering");
e.into_inner()
})
.get(opts)
.cloned()
}
pub fn set_bandwidth_limit(&self, bucket: &str, arn: &str, limit: i64) {
if limit <= 0 {
warn!(
bucket = bucket,
arn = arn,
limit = limit,
"invalid bandwidth limit, must be positive; ignoring"
);
return;
}
let limit_bytes = limit / self.node_count as i64;
if limit_bytes == 0 && limit > 0 {
warn!(
bucket = bucket,
arn = arn,
limit = limit,
node_count = self.node_count,
"bandwidth limit too small for cluster size, per-node limit will clamp to 1 byte/s"
);
}
let opts = BucketOptions {
name: bucket.to_string(),
replication_arn: arn.to_string(),
};
let throttle = match BucketThrottle::new(limit_bytes) {
Ok(t) => t,
Err(e) => {
warn!(
bucket = bucket,
arn = arn,
limit_bytes = limit_bytes,
err = %e,
"failed to build bandwidth throttle, throttling disabled for this target"
);
return;
}
};
self.t_lock
.write()
.unwrap_or_else(|e| {
warn!("bucket monitor rwlock write poisoned, recovering");
e.into_inner()
})
.insert(opts, throttle);
}
pub fn is_throttled(&self, bucket: &str, arn: &str) -> bool {
let opt = BucketOptions {
name: bucket.to_string(),
replication_arn: arn.to_string(),
};
self.t_lock
.read()
.unwrap_or_else(|e| {
warn!("bucket monitor rwlock read poisoned, recovering");
e.into_inner()
})
.contains_key(&opt)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_set_and_get_throttle_with_node_split() {
let monitor = Monitor::new(4);
monitor.set_bandwidth_limit("b1", "arn1", 400);
let throttle = monitor
.throttle(&BucketOptions {
name: "b1".to_string(),
replication_arn: "arn1".to_string(),
})
.expect("throttle should exist");
assert_eq!(throttle.node_bandwidth_per_sec, 100);
assert!(monitor.is_throttled("b1", "arn1"));
}
#[test]
fn test_delete_bucket_throttle() {
let monitor = Monitor::new(2);
monitor.set_bandwidth_limit("b1", "arn1", 200);
assert!(monitor.is_throttled("b1", "arn1"));
monitor.delete_bucket_throttle("b1", "arn1");
assert!(!monitor.is_throttled("b1", "arn1"));
}
#[test]
fn test_delete_bucket_removes_all_arns() {
let monitor = Monitor::new(1);
monitor.set_bandwidth_limit("b1", "arn1", 100);
monitor.set_bandwidth_limit("b1", "arn2", 100);
monitor.set_bandwidth_limit("b2", "arn3", 100);
monitor.delete_bucket("b1");
assert!(!monitor.is_throttled("b1", "arn1"));
assert!(!monitor.is_throttled("b1", "arn2"));
assert!(monitor.is_throttled("b2", "arn3"));
}
#[test]
fn test_set_bandwidth_limit_ignores_non_positive() {
let monitor = Monitor::new(2);
monitor.set_bandwidth_limit("b1", "arn1", 0);
assert!(!monitor.is_throttled("b1", "arn1"));
monitor.set_bandwidth_limit("b1", "arn1", -10);
assert!(!monitor.is_throttled("b1", "arn1"));
}
#[test]
fn test_consume_returns_deficit_when_tokens_exhausted() {
let throttle = BucketThrottle::new(100).expect("test");
let (deficit, rate, _consumed) = throttle.consume(200);
assert!(deficit > 0);
assert!(rate > 0.0);
}
#[test]
fn test_consume_no_deficit_when_tokens_sufficient() {
let throttle = BucketThrottle::new(10000).expect("test");
std::thread::sleep(std::time::Duration::from_millis(1100));
let (deficit, _rate, _consumed) = throttle.consume(5000);
assert_eq!(deficit, 0);
}
#[test]
fn test_burst_equals_bandwidth() {
let throttle = BucketThrottle::new(500).expect("test");
assert_eq!(throttle.burst(), 500);
}
#[test]
fn test_concurrent_consume() {
let throttle = BucketThrottle::new(10000).expect("test");
std::thread::sleep(std::time::Duration::from_millis(1100));
let mut handles = vec![];
for _ in 0..10 {
let t = throttle.clone();
handles.push(std::thread::spawn(move || t.consume(100)));
}
let mut total_deficit = 0u64;
let mut total_consumed = 0u64;
for h in handles {
let (deficit, _rate, consumed) = h.join().unwrap();
total_consumed += consumed;
total_deficit += deficit;
}
assert_eq!(total_consumed + total_deficit, 1000);
assert!(total_consumed <= 10000);
}
#[test]
fn test_zero_bandwidth_clamped_to_one() {
let throttle = BucketThrottle::new(0).expect("test");
assert_eq!(throttle.burst(), 1);
assert_eq!(throttle.node_bandwidth_per_sec, 1);
}
#[test]
fn test_negative_bandwidth_clamped_to_one() {
let throttle = BucketThrottle::new(-100).expect("test");
assert_eq!(throttle.burst(), 1);
assert_eq!(throttle.node_bandwidth_per_sec, 1);
}
#[test]
fn test_update_bandwidth_limit_overrides_previous() {
let monitor = Monitor::new(1);
monitor.set_bandwidth_limit("b1", "arn1", 1000);
let opts = BucketOptions {
name: "b1".to_string(),
replication_arn: "arn1".to_string(),
};
let t1 = monitor.throttle(&opts).expect("throttle should exist");
assert_eq!(t1.burst(), 1000);
assert_eq!(t1.node_bandwidth_per_sec, 1000);
monitor.set_bandwidth_limit("b1", "arn1", 500);
let t2 = monitor.throttle(&opts).expect("throttle should exist after update");
assert_eq!(t2.burst(), 500);
assert_eq!(t2.node_bandwidth_per_sec, 500);
}
}

View File

@@ -0,0 +1,336 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::bucket::bandwidth::monitor::Monitor;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
use tokio::time::Sleep;
use tracing::{debug, warn};
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct BucketOptions {
pub name: String,
pub replication_arn: String,
}
pub struct MonitorReaderOptions {
pub bucket_options: BucketOptions,
pub header_size: usize,
}
struct WaitState {
sleep: Pin<Box<Sleep>>,
}
pub struct MonitoredReader<R> {
r: R,
m: Arc<Monitor>,
opts: MonitorReaderOptions,
wait_state: std::sync::Mutex<Option<WaitState>>,
temp_buf: Vec<u8>,
}
impl<R> MonitoredReader<R> {
pub fn new(m: Arc<Monitor>, r: R, opts: MonitorReaderOptions) -> Self {
let throttle = m.throttle(&opts.bucket_options);
debug!(
bucket = opts.bucket_options.name,
arn = opts.bucket_options.replication_arn,
throttle_active = throttle.is_some(),
limit_bps = throttle.as_ref().map(|t| t.node_bandwidth_per_sec).unwrap_or(0),
header_size = opts.header_size,
"MonitoredReader created"
);
MonitoredReader {
r,
m,
opts,
wait_state: std::sync::Mutex::new(None),
temp_buf: Vec::new(),
}
}
}
impl<R: AsyncRead + Unpin> AsyncRead for MonitoredReader<R> {
fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<std::io::Result<()>> {
let this = self.get_mut();
{
let mut guard = this.wait_state.lock().unwrap_or_else(|e| {
warn!("MonitoredReader wait_state mutex poisoned, recovering");
e.into_inner()
});
if let Some(ref mut ws) = *guard {
match ws.sleep.as_mut().poll(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(()) => {
*guard = None;
drop(guard);
}
}
}
}
let throttle = match this.m.throttle(&this.opts.bucket_options) {
Some(t) => t,
None => return Pin::new(&mut this.r).poll_read(cx, buf),
};
let b = throttle.burst();
debug_assert!(b >= 1, "burst must be at least 1");
let (need, tokens) = calc_need_and_tokens(b, buf.remaining(), &mut this.opts.header_size);
let (deficit, rate, consumed) = throttle.consume(tokens);
let need = need.min(consumed as usize);
if deficit > 0 && rate > 0.0 {
let duration = std::time::Duration::from_secs_f64(deficit as f64 / rate);
debug!(
tokens = tokens,
deficit = deficit,
rate = rate,
sleep_ms = duration.as_millis() as u64,
"bandwidth throttle sleep"
);
let mut sleep = Box::pin(tokio::time::sleep(duration));
match sleep.as_mut().poll(cx) {
Poll::Pending => {
*this.wait_state.lock().unwrap_or_else(|e| {
warn!("MonitoredReader wait_state mutex poisoned, recovering");
e.into_inner()
}) = Some(WaitState { sleep });
return Poll::Pending;
}
Poll::Ready(()) => {}
}
}
poll_limited_read(&mut this.r, cx, buf, need, &mut this.temp_buf)
}
}
fn calc_need_and_tokens(burst: u64, need_upper: usize, header_size: &mut usize) -> (usize, u64) {
let hdr = *header_size;
let mut need = need_upper;
let tokens: u64 = if hdr > 0 {
if (hdr as u64) < burst {
*header_size = 0;
need = ((burst - hdr as u64) as usize).min(need);
need as u64 + hdr as u64
} else {
*header_size -= burst as usize;
need = 0;
burst
}
} else {
need = need.min(burst as usize);
need as u64
};
(need, tokens)
}
fn poll_limited_read<R: AsyncRead + Unpin>(
r: &mut R,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
limit: usize,
reusable_buf: &mut Vec<u8>,
) -> Poll<std::io::Result<()>> {
let remaining = buf.remaining();
if limit == 0 || remaining == 0 {
return Poll::Ready(Ok(()));
}
if remaining <= limit {
return Pin::new(r).poll_read(cx, buf);
}
reusable_buf.resize(limit, 0);
let mut temp_buf = ReadBuf::new(&mut reusable_buf[..limit]);
match Pin::new(r).poll_read(cx, &mut temp_buf) {
Poll::Ready(Ok(())) => {
buf.put_slice(temp_buf.filled());
Poll::Ready(Ok(()))
}
other => other,
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures_util::task::noop_waker_ref;
use std::io;
use std::pin::Pin;
use std::task::Context;
use tokio::io::AsyncReadExt;
#[derive(Default)]
struct TestAsyncReader {
data: Vec<u8>,
pos: usize,
}
impl TestAsyncReader {
fn new(data: &[u8]) -> Self {
Self {
data: data.to_vec(),
pos: 0,
}
}
}
impl AsyncRead for TestAsyncReader {
fn poll_read(mut self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> {
if self.pos >= self.data.len() {
return Poll::Ready(Ok(()));
}
let remaining = self.data.len() - self.pos;
let n = remaining.min(buf.remaining());
buf.put_slice(&self.data[self.pos..self.pos + n]);
self.pos += n;
Poll::Ready(Ok(()))
}
}
#[tokio::test]
async fn test_monitored_reader_passthrough_when_throttle_absent() {
let monitor = Monitor::new(1);
let opts = MonitorReaderOptions {
bucket_options: BucketOptions {
name: "b1".to_string(),
replication_arn: "arn1".to_string(),
},
header_size: 0,
};
let inner = TestAsyncReader::new(b"hello-world");
let mut reader = MonitoredReader::new(monitor, inner, opts);
let mut out = Vec::new();
reader.read_to_end(&mut out).await.unwrap();
assert_eq!(out, b"hello-world");
}
#[test]
fn test_poll_limited_read_honors_limit() {
let mut inner = TestAsyncReader::new(b"abcdef");
let mut out = [0u8; 8];
let mut reusable = Vec::new();
let waker = noop_waker_ref();
let mut cx = Context::from_waker(waker);
let mut read_buf = ReadBuf::new(&mut out);
let first = poll_limited_read(&mut inner, &mut cx, &mut read_buf, 3, &mut reusable);
assert!(matches!(first, Poll::Ready(Ok(()))));
assert_eq!(read_buf.filled(), b"abc");
let second = poll_limited_read(&mut inner, &mut cx, &mut read_buf, 3, &mut reusable);
assert!(matches!(second, Poll::Ready(Ok(()))));
assert_eq!(read_buf.filled(), b"abcdef");
}
#[tokio::test]
async fn test_header_only_consumption_skips_io() {
let monitor = Monitor::new(1);
monitor.set_bandwidth_limit("b1", "arn1", 10);
let data = vec![0xAAu8; 20];
let inner = TestAsyncReader::new(&data);
let opts = MonitorReaderOptions {
bucket_options: BucketOptions {
name: "b1".to_string(),
replication_arn: "arn1".to_string(),
},
header_size: 25,
};
let mut reader = MonitoredReader::new(monitor, inner, opts);
let waker = noop_waker_ref();
let mut cx = Context::from_waker(waker);
let mut out = [0u8; 16];
let mut read_buf = ReadBuf::new(&mut out);
let poll = Pin::new(&mut reader).poll_read(&mut cx, &mut read_buf);
assert!(matches!(poll, Poll::Ready(Ok(())) | Poll::Pending));
assert!(read_buf.filled().is_empty());
assert_eq!(reader.opts.header_size, 15);
}
#[tokio::test]
async fn test_monitored_reader_with_throttle_reads_all_data() {
let monitor = Monitor::new(1);
monitor.set_bandwidth_limit("b1", "arn1", 1024);
let data = vec![0xABu8; 4096];
let inner = TestAsyncReader::new(&data);
let opts = MonitorReaderOptions {
bucket_options: BucketOptions {
name: "b1".to_string(),
replication_arn: "arn1".to_string(),
},
header_size: 0,
};
let mut reader = MonitoredReader::new(monitor, inner, opts);
let mut out = Vec::new();
reader.read_to_end(&mut out).await.unwrap();
assert_eq!(out.len(), 4096);
assert!(out.iter().all(|&b| b == 0xAB));
}
#[tokio::test]
async fn test_monitored_reader_header_size_accounting() {
let monitor = Monitor::new(1);
monitor.set_bandwidth_limit("b1", "arn1", 100);
let data = vec![0u8; 200];
let inner = TestAsyncReader::new(&data);
let opts = MonitorReaderOptions {
bucket_options: BucketOptions {
name: "b1".to_string(),
replication_arn: "arn1".to_string(),
},
header_size: 50,
};
let mut reader = MonitoredReader::new(monitor, inner, opts);
let mut out = Vec::new();
reader.read_to_end(&mut out).await.unwrap();
assert_eq!(out.len(), 200);
assert_eq!(reader.opts.header_size, 0);
}
#[tokio::test]
async fn test_monitored_reader_very_small_limit() {
let monitor = Monitor::new(1);
monitor.set_bandwidth_limit("b1", "arn1", 1);
let data = vec![0xFFu8; 10];
let inner = TestAsyncReader::new(&data);
let opts = MonitorReaderOptions {
bucket_options: BucketOptions {
name: "b1".to_string(),
replication_arn: "arn1".to_string(),
},
header_size: 0,
};
let mut reader = MonitoredReader::new(monitor, inner, opts);
let mut out = Vec::new();
reader.read_to_end(&mut out).await.unwrap();
assert_eq!(out.len(), 10);
}
}

View File

@@ -21,6 +21,7 @@ use crate::bucket::target::ARN;
use crate::bucket::target::BucketTargetType; use crate::bucket::target::BucketTargetType;
use crate::bucket::target::{self, BucketTarget, BucketTargets, Credentials}; use crate::bucket::target::{self, BucketTarget, BucketTargets, Credentials};
use crate::bucket::versioning_sys::BucketVersioningSys; use crate::bucket::versioning_sys::BucketVersioningSys;
use crate::global::get_global_bucket_monitor;
use aws_credential_types::Credentials as SdkCredentials; use aws_credential_types::Credentials as SdkCredentials;
use aws_sdk_s3::config::Region as SdkRegion; use aws_sdk_s3::config::Region as SdkRegion;
use aws_sdk_s3::error::ProvideErrorMetadata; use aws_sdk_s3::error::ProvideErrorMetadata;
@@ -667,9 +668,19 @@ impl BucketTargetSys {
Ok(true) Ok(true)
} }
fn update_bandwidth_limit(&self, _bucket: &str, _arn: &str, _limit: i64) { fn update_bandwidth_limit(&self, bucket: &str, arn: &str, limit: i64) {
// Implementation for bandwidth limit update if let Some(bucket_monitor) = get_global_bucket_monitor() {
// This would interact with the global bucket monitor if limit == 0 {
bucket_monitor.delete_bucket_throttle(bucket, arn);
return;
}
bucket_monitor.set_bandwidth_limit(bucket, arn, limit);
} else {
error!(
"Global bucket monitor uninitialized; skipping bandwidth limit update for bucket '{}' and ARN '{}'",
bucket, arn
);
}
} }
pub async fn get_remote_target_client_by_arn(&self, _bucket: &str, arn: &str) -> Option<Arc<TargetClient>> { pub async fn get_remote_target_client_by_arn(&self, _bucket: &str, arn: &str) -> Option<Arc<TargetClient>> {
@@ -691,6 +702,7 @@ impl BucketTargetSys {
if let Some(existing_targets) = targets_map.remove(bucket) { if let Some(existing_targets) = targets_map.remove(bucket) {
for target in existing_targets { for target in existing_targets {
arn_remotes_map.remove(&target.arn); arn_remotes_map.remove(&target.arn);
self.update_bandwidth_limit(bucket, &target.arn, 0);
} }
} }

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
pub mod bandwidth;
pub mod bucket_target_sys; pub mod bucket_target_sys;
pub mod error; pub mod error;
pub mod lifecycle; pub mod lifecycle;

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use crate::bucket::bandwidth::reader::{BucketOptions, MonitorReaderOptions, MonitoredReader};
use crate::bucket::bucket_target_sys::{ use crate::bucket::bucket_target_sys::{
AdvancedPutOptions, BucketTargetSys, PutObjectOptions, PutObjectPartOptions, RemoveObjectOptions, TargetClient, AdvancedPutOptions, BucketTargetSys, PutObjectOptions, PutObjectPartOptions, RemoveObjectOptions, TargetClient,
}; };
@@ -28,16 +29,21 @@ use crate::error::{Error, Result, is_err_object_not_found, is_err_version_not_fo
use crate::event::name::EventName; use crate::event::name::EventName;
use crate::event_notification::{EventArgs, send_event}; use crate::event_notification::{EventArgs, send_event};
use crate::global::GLOBAL_LocalNodeName; use crate::global::GLOBAL_LocalNodeName;
use crate::global::get_global_bucket_monitor;
use crate::set_disk::get_lock_acquire_timeout; use crate::set_disk::get_lock_acquire_timeout;
use crate::store_api::{DeletedObject, ObjectInfo, ObjectOptions, ObjectToDelete, WalkOptions}; use crate::store_api::{DeletedObject, HTTPRangeSpec, ObjectInfo, ObjectOptions, ObjectToDelete, WalkOptions};
use crate::{StorageAPI, new_object_layer_fn}; use crate::{StorageAPI, new_object_layer_fn};
use aws_sdk_s3::error::SdkError; use aws_sdk_s3::error::SdkError;
use aws_sdk_s3::operation::head_object::HeadObjectOutput; use aws_sdk_s3::operation::head_object::HeadObjectOutput;
use aws_sdk_s3::primitives::ByteStream; use aws_sdk_s3::primitives::ByteStream;
use aws_sdk_s3::types::{CompletedPart, ObjectLockLegalHoldStatus}; use aws_sdk_s3::types::{CompletedPart, ObjectLockLegalHoldStatus};
use aws_smithy_types::body::SdkBody;
use byteorder::ByteOrder; use byteorder::ByteOrder;
use futures::future::join_all; use futures::future::join_all;
use futures::stream::StreamExt;
use http::HeaderMap; use http::HeaderMap;
use http_body::Frame;
use http_body_util::StreamBody;
use regex::Regex; use regex::Regex;
use rustfs_filemeta::{ use rustfs_filemeta::{
MrfReplicateEntry, REPLICATE_EXISTING, REPLICATE_EXISTING_DELETE, REPLICATION_RESET, ReplicateDecision, ReplicateObjectInfo, MrfReplicateEntry, REPLICATE_EXISTING, REPLICATE_EXISTING_DELETE, REPLICATION_RESET, ReplicateDecision, ReplicateObjectInfo,
@@ -61,10 +67,11 @@ use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use time::OffsetDateTime; use time::OffsetDateTime;
use time::format_description::well_known::Rfc3339; use time::format_description::well_known::Rfc3339;
use tokio::io::{AsyncRead, AsyncReadExt}; use tokio::io::AsyncRead;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use tokio::task::JoinSet; use tokio::task::JoinSet;
use tokio::time::Duration as TokioDuration; use tokio::time::Duration as TokioDuration;
use tokio_util::io::ReaderStream;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use tracing::{error, info, warn}; use tracing::{error, info, warn};
@@ -74,6 +81,8 @@ const RESYNC_META_FORMAT: u16 = 1;
const RESYNC_META_VERSION: u16 = 1; const RESYNC_META_VERSION: u16 = 1;
const RESYNC_TIME_INTERVAL: TokioDuration = TokioDuration::from_secs(60); const RESYNC_TIME_INTERVAL: TokioDuration = TokioDuration::from_secs(60);
static WARNED_MONITOR_UNINIT: std::sync::Once = std::sync::Once::new();
#[derive(Debug, Clone, Default)] #[derive(Debug, Clone, Default)]
pub struct ResyncOpts { pub struct ResyncOpts {
pub bucket: String, pub bucket: String,
@@ -1956,20 +1965,16 @@ impl ReplicateObjectInfoExt for ReplicateObjectInfo {
let versioned = BucketVersioningSys::prefix_enabled(&bucket, &object).await; let versioned = BucketVersioningSys::prefix_enabled(&bucket, &object).await;
let version_suspended = BucketVersioningSys::prefix_suspended(&bucket, &object).await; let version_suspended = BucketVersioningSys::prefix_suspended(&bucket, &object).await;
let obj_opts = ObjectOptions {
version_id: self.version_id.map(|v| v.to_string()),
version_suspended,
versioned,
replication_request: true,
..Default::default()
};
let mut gr = match storage let mut gr = match storage
.get_object_reader( .get_object_reader(&bucket, &object, None, HeaderMap::new(), &obj_opts)
&bucket,
&object,
None,
HeaderMap::new(),
&ObjectOptions {
version_id: self.version_id.map(|v| v.to_string()),
version_suspended,
versioned,
replication_request: true,
..Default::default()
},
)
.await .await
{ {
Ok(gr) => gr, Ok(gr) => gr,
@@ -2078,34 +2083,26 @@ impl ReplicateObjectInfoExt for ReplicateObjectInfo {
} }
}; };
// TODO:bandwidth
if let Some(err) = if is_multipart { if let Some(err) = if is_multipart {
replicate_object_with_multipart(tgt_client.clone(), &tgt_client.bucket, &object, gr.stream, &object_info, put_opts) drop(gr);
.await replicate_object_with_multipart(MultipartReplicationContext {
.err() storage: storage.clone(),
cli: tgt_client.clone(),
src_bucket: &bucket,
dst_bucket: &tgt_client.bucket,
object: &object,
object_info: &object_info,
obj_opts: &obj_opts,
arn: &rinfo.arn,
put_opts,
})
.await
.err()
} else { } else {
// TODO: use stream gr.stream = wrap_with_bandwidth_monitor(gr.stream, &put_opts, &bucket, &rinfo.arn);
let body = match gr.read_all().await { let byte_stream = async_read_to_bytestream(gr.stream);
Ok(body) => body,
Err(e) => {
rinfo.replication_status = ReplicationStatusType::Failed;
rinfo.error = Some(e.to_string());
warn!("failed to read object for bucket:{} arn:{} error:{}", bucket, tgt_client.arn, e);
send_event(EventArgs {
event_name: EventName::ObjectReplicationNotTracked.as_ref().to_string(),
bucket_name: bucket.clone(),
object: object_info.clone(),
host: GLOBAL_LocalNodeName.to_string(),
user_agent: "Internal: [Replication]".to_string(),
..Default::default()
});
return rinfo;
}
};
let reader = ByteStream::from(body);
tgt_client tgt_client
.put_object(&tgt_client.bucket, &object, size, reader, &put_opts) .put_object(&tgt_client.bucket, &object, size, byte_stream, &put_opts)
.await .await
.map_err(|e| std::io::Error::other(e.to_string())) .map_err(|e| std::io::Error::other(e.to_string()))
.err() .err()
@@ -2161,20 +2158,16 @@ impl ReplicateObjectInfoExt for ReplicateObjectInfo {
let versioned = BucketVersioningSys::prefix_enabled(&bucket, &object).await; let versioned = BucketVersioningSys::prefix_enabled(&bucket, &object).await;
let version_suspended = BucketVersioningSys::prefix_suspended(&bucket, &object).await; let version_suspended = BucketVersioningSys::prefix_suspended(&bucket, &object).await;
let obj_opts = ObjectOptions {
version_id: self.version_id.map(|v| v.to_string()),
version_suspended,
versioned,
replication_request: true,
..Default::default()
};
let mut gr = match storage let mut gr = match storage
.get_object_reader( .get_object_reader(&bucket, &object, None, HeaderMap::new(), &obj_opts)
&bucket,
&object,
None,
HeaderMap::new(),
&ObjectOptions {
version_id: self.version_id.map(|v| v.to_string()),
version_suspended,
versioned,
replication_request: true,
..Default::default()
},
)
.await .await
{ {
Ok(gr) => gr, Ok(gr) => gr,
@@ -2370,39 +2363,27 @@ impl ReplicateObjectInfoExt for ReplicateObjectInfo {
return rinfo; return rinfo;
} }
}; };
if let Some(err) = if is_multipart { if let Some(err) = if is_multipart {
replicate_object_with_multipart( drop(gr);
tgt_client.clone(), replicate_object_with_multipart(MultipartReplicationContext {
&tgt_client.bucket, storage: storage.clone(),
&object, cli: tgt_client.clone(),
gr.stream, src_bucket: &bucket,
&object_info, dst_bucket: &tgt_client.bucket,
object: &object,
object_info: &object_info,
obj_opts: &obj_opts,
arn: &rinfo.arn,
put_opts, put_opts,
) })
.await .await
.err() .err()
} else { } else {
let body = match gr.read_all().await { gr.stream = wrap_with_bandwidth_monitor(gr.stream, &put_opts, &bucket, &rinfo.arn);
Ok(body) => body, let byte_stream = async_read_to_bytestream(gr.stream);
Err(e) => {
rinfo.replication_status = ReplicationStatusType::Failed;
rinfo.error = Some(e.to_string());
warn!("failed to read object for bucket:{} arn:{} error:{}", bucket, tgt_client.arn, e);
send_event(EventArgs {
event_name: EventName::ObjectReplicationNotTracked.as_ref().to_string(),
bucket_name: bucket.clone(),
object: object_info,
host: GLOBAL_LocalNodeName.to_string(),
user_agent: "Internal: [Replication]".to_string(),
..Default::default()
});
rinfo.duration = (OffsetDateTime::now_utc() - start_time).unsigned_abs();
return rinfo;
}
};
let reader = ByteStream::from(body);
tgt_client tgt_client
.put_object(&tgt_client.bucket, &object, size, reader, &put_opts) .put_object(&tgt_client.bucket, &object, size, byte_stream, &put_opts)
.await .await
.map_err(|e| std::io::Error::other(e.to_string())) .map_err(|e| std::io::Error::other(e.to_string()))
.err() .err()
@@ -2456,6 +2437,63 @@ static STANDARD_HEADERS: &[&str] = &[
headers::AMZ_SERVER_SIDE_ENCRYPTION, headers::AMZ_SERVER_SIDE_ENCRYPTION,
]; ];
fn calc_put_object_header_size(put_opts: &PutObjectOptions) -> usize {
let mut header_size: usize = 0;
for (key, value) in put_opts.header().iter() {
header_size += key.as_str().len();
header_size += value.as_bytes().len();
// Account for HTTP header formatting: ": " (2 bytes) and "\r\n" (2 bytes)
header_size += 4;
}
header_size
}
fn wrap_with_bandwidth_monitor_with_header(
stream: Box<dyn AsyncRead + Unpin + Send + Sync>,
bucket: &str,
arn: &str,
header_size: usize,
) -> Box<dyn AsyncRead + Unpin + Send + Sync> {
if let Some(monitor) = get_global_bucket_monitor() {
Box::new(MonitoredReader::new(
monitor,
stream,
MonitorReaderOptions {
bucket_options: BucketOptions {
name: bucket.to_string(),
replication_arn: arn.to_string(),
},
header_size,
},
))
} else {
WARNED_MONITOR_UNINIT.call_once(|| {
warn!(
"Global bucket monitor uninitialized; proceeding with unthrottled replication (bandwidth limits will be ignored)"
)
});
stream
}
}
fn wrap_with_bandwidth_monitor(
stream: Box<dyn AsyncRead + Unpin + Send + Sync>,
put_opts: &PutObjectOptions,
bucket: &str,
arn: &str,
) -> Box<dyn AsyncRead + Unpin + Send + Sync> {
let header_size = calc_put_object_header_size(put_opts);
wrap_with_bandwidth_monitor_with_header(stream, bucket, arn, header_size)
}
fn async_read_to_bytestream(reader: impl AsyncRead + Send + Sync + Unpin + 'static) -> ByteStream {
// Non-retryable: SDK-level retries are not supported for streaming bodies.
// Replication-level retry handles failures at a higher layer.
let stream = ReaderStream::new(reader);
let body = StreamBody::new(stream.map(|r| r.map(Frame::data)));
ByteStream::new(SdkBody::from_body_1_x(body))
}
fn is_standard_header(k: &str) -> bool { fn is_standard_header(k: &str) -> bool {
STANDARD_HEADERS.iter().any(|h| h.eq_ignore_ascii_case(k)) STANDARD_HEADERS.iter().any(|h| h.eq_ignore_ascii_case(k))
} }
@@ -2679,17 +2717,56 @@ fn put_replication_opts(sc: &str, object_info: &ObjectInfo) -> Result<(PutObject
Ok((put_op, is_multipart)) Ok((put_op, is_multipart))
} }
async fn replicate_object_with_multipart( fn part_range_spec_from_actual_size(offset: i64, part_size: i64) -> std::io::Result<(HTTPRangeSpec, i64)> {
if offset < 0 {
return Err(std::io::Error::other("invalid part offset"));
}
if part_size <= 0 {
return Err(std::io::Error::other(format!("invalid part size {part_size}")));
}
let end = offset
.checked_add(part_size - 1)
.ok_or_else(|| std::io::Error::other("part range overflow"))?;
let next_offset = end
.checked_add(1)
.ok_or_else(|| std::io::Error::other("part offset overflow"))?;
Ok((
HTTPRangeSpec {
is_suffix_length: false,
start: offset,
end,
},
next_offset,
))
}
struct MultipartReplicationContext<'a, S: StorageAPI> {
storage: Arc<S>,
cli: Arc<TargetClient>, cli: Arc<TargetClient>,
bucket: &str, src_bucket: &'a str,
object: &str, dst_bucket: &'a str,
reader: Box<dyn AsyncRead + Unpin + Send + Sync>, object: &'a str,
object_info: &ObjectInfo, object_info: &'a ObjectInfo,
opts: PutObjectOptions, obj_opts: &'a ObjectOptions,
) -> std::io::Result<()> { arn: &'a str,
put_opts: PutObjectOptions,
}
async fn replicate_object_with_multipart<S: StorageAPI>(ctx: MultipartReplicationContext<'_, S>) -> std::io::Result<()> {
let MultipartReplicationContext {
storage,
cli,
src_bucket,
dst_bucket,
object,
object_info,
obj_opts,
arn,
put_opts,
} = ctx;
let mut attempts = 1; let mut attempts = 1;
let upload_id = loop { let upload_id = loop {
match cli.create_multipart_upload(bucket, object, &opts).await { match cli.create_multipart_upload(dst_bucket, object, &put_opts).await {
Ok(id) => { Ok(id) => {
break id; break id;
} }
@@ -2708,19 +2785,30 @@ async fn replicate_object_with_multipart(
let mut uploaded_parts: Vec<CompletedPart> = Vec::new(); let mut uploaded_parts: Vec<CompletedPart> = Vec::new();
let mut reader = reader; let mut header_size = calc_put_object_header_size(&put_opts);
let mut offset: i64 = 0;
for part_info in object_info.parts.iter() { for part_info in object_info.parts.iter() {
let mut chunk = vec![0u8; part_info.actual_size as usize]; let part_size = part_info.actual_size;
AsyncReadExt::read_exact(&mut *reader, &mut chunk).await?; let (range_spec, next_offset) = part_range_spec_from_actual_size(offset, part_size)?;
offset = next_offset;
let part_reader = storage
.get_object_reader(src_bucket, object, Some(range_spec), HeaderMap::new(), obj_opts)
.await
.map_err(|e| std::io::Error::other(e.to_string()))?;
let part_stream = wrap_with_bandwidth_monitor_with_header(part_reader.stream, src_bucket, arn, header_size);
header_size = 0;
let byte_stream = async_read_to_bytestream(part_stream);
let object_part = cli let object_part = cli
.put_object_part( .put_object_part(
bucket, dst_bucket,
object, object,
&upload_id, &upload_id,
part_info.number as i32, part_info.number as i32,
part_info.actual_size, part_size,
ByteStream::from(chunk), byte_stream,
&PutObjectPartOptions { ..Default::default() }, &PutObjectPartOptions { ..Default::default() },
) )
.await .await
@@ -2748,7 +2836,7 @@ async fn replicate_object_with_multipart(
); );
cli.complete_multipart_upload( cli.complete_multipart_upload(
bucket, dst_bucket,
object, object,
&upload_id, &upload_id,
uploaded_parts, uploaded_parts,
@@ -2869,3 +2957,22 @@ fn get_replication_action(oi1: &ObjectInfo, oi2: &HeadObjectOutput, op_type: Rep
ReplicationAction::None ReplicationAction::None
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_part_range_spec_from_actual_size() {
let (rs, next) = part_range_spec_from_actual_size(0, 10).unwrap();
assert_eq!(rs.start, 0);
assert_eq!(rs.end, 9);
assert_eq!(next, 10);
}
#[test]
fn test_part_range_spec_rejects_non_positive() {
assert!(part_range_spec_from_actual_size(0, 0).is_err());
assert!(part_range_spec_from_actual_size(0, -1).is_err());
}
}

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use crate::bucket::bandwidth::monitor::Monitor;
use crate::{ use crate::{
bucket::lifecycle::bucket_lifecycle_ops::LifecycleSys, bucket::lifecycle::bucket_lifecycle_ops::LifecycleSys,
disk::DiskStore, disk::DiskStore,
@@ -29,6 +30,7 @@ use std::{
}; };
use tokio::sync::{OnceCell, RwLock}; use tokio::sync::{OnceCell, RwLock};
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use tracing::warn;
use uuid::Uuid; use uuid::Uuid;
pub const DISK_ASSUME_UNKNOWN_SIZE: u64 = 1 << 30; pub const DISK_ASSUME_UNKNOWN_SIZE: u64 = 1 << 30;
@@ -57,6 +59,20 @@ lazy_static! {
pub static ref GLOBAL_REGION: OnceLock<String> = OnceLock::new(); pub static ref GLOBAL_REGION: OnceLock<String> = OnceLock::new();
pub static ref GLOBAL_LOCAL_LOCK_CLIENT: OnceLock<Arc<dyn LockClient>> = OnceLock::new(); pub static ref GLOBAL_LOCAL_LOCK_CLIENT: OnceLock<Arc<dyn LockClient>> = OnceLock::new();
pub static ref GLOBAL_LOCK_CLIENTS: OnceLock<HashMap<String, Arc<dyn LockClient>>> = OnceLock::new(); pub static ref GLOBAL_LOCK_CLIENTS: OnceLock<HashMap<String, Arc<dyn LockClient>>> = OnceLock::new();
pub static ref GLOBAL_BUCKET_MONITOR: OnceLock<Arc<Monitor>> = OnceLock::new();
}
pub fn init_global_bucket_monitor(num_nodes: u64) {
if GLOBAL_BUCKET_MONITOR.set(Monitor::new(num_nodes)).is_err() {
warn!(
"global bucket monitor already initialized, ignoring re-initialization with num_nodes={}",
num_nodes
);
}
}
pub fn get_global_bucket_monitor() -> Option<Arc<Monitor>> {
GLOBAL_BUCKET_MONITOR.get().cloned()
} }
/// Global cancellation token for background services (data scanner and auto heal) /// Global cancellation token for background services (data scanner and auto heal)

View File

@@ -39,8 +39,9 @@ use crate::error::{
}; };
use crate::global::{ use crate::global::{
DISK_ASSUME_UNKNOWN_SIZE, DISK_FILL_FRACTION, DISK_MIN_INODES, DISK_RESERVE_FRACTION, GLOBAL_BOOT_TIME, DISK_ASSUME_UNKNOWN_SIZE, DISK_FILL_FRACTION, DISK_MIN_INODES, DISK_RESERVE_FRACTION, GLOBAL_BOOT_TIME,
GLOBAL_LOCAL_DISK_MAP, GLOBAL_LOCAL_DISK_SET_DRIVES, GLOBAL_TierConfigMgr, get_global_deployment_id, get_global_endpoints, GLOBAL_LOCAL_DISK_MAP, GLOBAL_LOCAL_DISK_SET_DRIVES, GLOBAL_TierConfigMgr, get_global_bucket_monitor,
is_dist_erasure, is_erasure_sd, set_global_deployment_id, set_object_layer, get_global_deployment_id, get_global_endpoints, init_global_bucket_monitor, is_dist_erasure, is_erasure_sd,
set_global_deployment_id, set_object_layer,
}; };
use crate::notification_sys::get_global_notification_sys; use crate::notification_sys::get_global_notification_sys;
use crate::pools::PoolMeta; use crate::pools::PoolMeta;
@@ -405,6 +406,9 @@ impl ECStore {
} }
} }
let num_nodes = get_global_endpoints().get_nodes().len() as u64;
init_global_bucket_monitor(num_nodes);
init_background_expiry(self.clone()).await; init_background_expiry(self.clone()).await;
TransitionState::init(self.clone()).await; TransitionState::init(self.clone()).await;
@@ -1523,6 +1527,9 @@ impl StorageAPI for ECStore {
// Delete the metadata // Delete the metadata
self.delete_all(RUSTFS_META_BUCKET, format!("{BUCKET_META_PREFIX}/{bucket}").as_str()) self.delete_all(RUSTFS_META_BUCKET, format!("{BUCKET_META_PREFIX}/{bucket}").as_str())
.await?; .await?;
if let Some(monitor) = get_global_bucket_monitor() {
monitor.delete_bucket(bucket);
}
Ok(()) Ok(())
} }

View File

@@ -38,6 +38,7 @@ http.workspace = true
object_store = { workspace = true } object_store = { workspace = true }
pin-project-lite.workspace = true pin-project-lite.workspace = true
s3s.workspace = true s3s.workspace = true
serde_json = { workspace = true }
snafu = { workspace = true, features = ["backtrace"] } snafu = { workspace = true, features = ["backtrace"] }
parking_lot.workspace = true parking_lot.workspace = true
tokio.workspace = true tokio.workspace = true

View File

@@ -40,15 +40,39 @@ use std::sync::Arc;
use std::task::Poll; use std::task::Poll;
use std::task::ready; use std::task::ready;
use tokio::io::AsyncRead; use tokio::io::AsyncRead;
use tokio::io::AsyncReadExt;
use tokio_util::io::ReaderStream; use tokio_util::io::ReaderStream;
use tracing::info; use tracing::info;
use transform_stream::AsyncTryStream; use transform_stream::AsyncTryStream;
/// Maximum allowed object size for JSON DOCUMENT mode.
///
/// JSON DOCUMENT format requires loading the entire file into memory for DOM
/// parsing, so memory consumption grows linearly with file size. Objects
/// larger than this threshold are rejected with an error rather than risking
/// an OOM condition.
///
/// To process larger JSON files, convert the input to **JSON LINES** (NDJSON,
/// `type = LINES`), which supports line-by-line streaming with no memory
/// size limit.
///
/// Default: 128 MiB. This matches the AWS S3 Select limit for JSON DOCUMENT
/// inputs.
pub const MAX_JSON_DOCUMENT_BYTES: u64 = 128 * 1024 * 1024;
#[derive(Debug)] #[derive(Debug)]
pub struct EcObjectStore { pub struct EcObjectStore {
input: Arc<SelectObjectContentInput>, input: Arc<SelectObjectContentInput>,
need_convert: bool, need_convert: bool,
delimiter: String, delimiter: String,
/// True when the JSON input type is DOCUMENT (multi-line formatted JSON).
/// In that case the raw bytes are buffered and flattened to NDJSON before
/// being handed to DataFusion's Arrow JSON reader.
is_json_document: bool,
/// Optional JSON sub-path extracted from `FROM s3object.<path>` in the SQL
/// expression. When set, `flatten_json_document_to_ndjson` navigates to
/// this key in the root JSON object before flattening.
json_sub_path: Option<String>,
store: Arc<ECStore>, store: Arc<ECStore>,
} }
@@ -72,10 +96,31 @@ impl EcObjectStore {
(false, String::new()) (false, String::new())
}; };
// Detect JSON DOCUMENT type: the entire file is a single (possibly
// multi-line) JSON object/array, NOT newline-delimited JSON.
let is_json_document = input
.request
.input_serialization
.json
.as_ref()
.and_then(|j| j.type_.as_ref())
.map(|t| t.as_str() == "DOCUMENT")
.unwrap_or(false);
// Extract the JSON sub-path from the SQL expression, e.g.
// `SELECT … FROM s3object.employees e` → `Some("employees")`.
let json_sub_path = if is_json_document {
extract_json_sub_path_from_expression(&input.request.expression)
} else {
None
};
Ok(Self { Ok(Self {
input, input,
need_convert, need_convert,
delimiter, delimiter,
is_json_document,
json_sub_path,
store, store,
}) })
} }
@@ -110,39 +155,66 @@ impl ObjectStore for EcObjectStore {
source: "can not get object info".into(), source: "can not get object info".into(),
})?; })?;
let original_size = reader.object_info.size as u64;
let etag = reader.object_info.etag;
let attributes = Attributes::default();
let (payload, size) = if self.is_json_document {
// JSON DOCUMENT mode: gate on object size before doing any I/O.
//
// Small files (<= MAX_JSON_DOCUMENT_BYTES): build a lazy stream
// that defers all I/O and JSON parsing until DataFusion first
// polls it. Parsing runs inside spawn_blocking so the async
// runtime thread is never blocked.
//
// Large files (> MAX_JSON_DOCUMENT_BYTES): return an error
// immediately. JSON DOCUMENT relies on serde_json DOM parsing
// which must load the whole file into memory; rejecting oversized
// files upfront is safer than risking OOM. Users should convert
// their data to JSON LINES (NDJSON) format for large files.
if original_size > MAX_JSON_DOCUMENT_BYTES {
return Err(o_Error::Generic {
store: "EcObjectStore",
source: format!(
"JSON DOCUMENT object is {original_size} bytes, which exceeds the \
maximum allowed size of {MAX_JSON_DOCUMENT_BYTES} bytes \
({} MiB). Convert the input to JSON LINES (NDJSON) to process \
large files.",
MAX_JSON_DOCUMENT_BYTES / (1024 * 1024)
)
.into(),
});
}
let stream = json_document_ndjson_stream(reader.stream, original_size, self.json_sub_path.clone());
(object_store::GetResultPayload::Stream(stream), original_size)
} else if self.need_convert {
let stream = bytes_stream(
ReaderStream::with_capacity(ConvertStream::new(reader.stream, self.delimiter.clone()), DEFAULT_READ_BUFFER_SIZE),
original_size as usize,
)
.boxed();
(object_store::GetResultPayload::Stream(stream), original_size)
} else {
let stream = bytes_stream(
ReaderStream::with_capacity(reader.stream, DEFAULT_READ_BUFFER_SIZE),
original_size as usize,
)
.boxed();
(object_store::GetResultPayload::Stream(stream), original_size)
};
let meta = ObjectMeta { let meta = ObjectMeta {
location: location.clone(), location: location.clone(),
last_modified: Utc::now(), last_modified: Utc::now(),
size: reader.object_info.size as u64, size,
e_tag: reader.object_info.etag, e_tag: etag,
version: None, version: None,
}; };
let attributes = Attributes::default();
let payload = if self.need_convert {
object_store::GetResultPayload::Stream(
bytes_stream(
ReaderStream::with_capacity(
ConvertStream::new(reader.stream, self.delimiter.clone()),
DEFAULT_READ_BUFFER_SIZE,
),
reader.object_info.size as usize,
)
.boxed(),
)
} else {
object_store::GetResultPayload::Stream(
bytes_stream(
ReaderStream::with_capacity(reader.stream, DEFAULT_READ_BUFFER_SIZE),
reader.object_info.size as usize,
)
.boxed(),
)
};
Ok(GetResult { Ok(GetResult {
payload, payload,
meta, meta,
range: 0..reader.object_info.size as u64, range: 0..size,
attributes, attributes,
}) })
} }
@@ -241,6 +313,193 @@ fn replace_symbol(delimiter: &[u8], slice: &[u8]) -> Vec<u8> {
result result
} }
/// Extract the JSON sub-path from a SQL expression's FROM clause.
///
/// Given `SELECT e.name FROM s3object.employees e WHERE …` this returns
/// `Some("employees")`. Returns `None` when the FROM target is plain
/// `s3object` (no sub-path) or when the expression cannot be parsed.
fn extract_json_sub_path_from_expression(expression: &str) -> Option<String> {
// Find " FROM " (case-insensitive).
let lower = expression.to_lowercase();
let from_pos = lower.find(" from ")?;
let after_from = expression[from_pos + 6..].trim_start();
// Must start with "s3object" (case-insensitive, ASCII-only for the prefix).
const S3OBJECT_LOWER: &str = "s3object";
let mut chars = after_from.char_indices();
for expected in S3OBJECT_LOWER.chars() {
let (idx, actual) = chars.next()?;
if actual.to_ascii_lowercase() != expected {
return None;
}
// When we have consumed the full prefix, `idx` is the byte index of
// the current character; use it plus its UTF-8 length as the slice
// boundary for the remaining string.
if expected == 't' {
let end_of_prefix = idx + actual.len_utf8();
let after_s3object = &after_from[end_of_prefix..];
// If the very next character is '.' there is a sub-path.
if let Some(rest) = after_s3object.strip_prefix('.') {
let rest = rest.trim_start();
if rest.is_empty() {
return None;
}
// Support quoted identifiers: s3object."my.path" or s3object.'my path'
let mut chars = rest.chars();
if let Some(first) = chars.next()
&& (first == '"' || first == '\'')
{
let quote = first;
let inner = &rest[first.len_utf8()..];
if let Some(end) = inner.find(quote) {
let path = &inner[..end];
if !path.trim().is_empty() {
return Some(path.to_string());
}
}
// Quoted but no closing quote or empty: treat as no sub-path.
return None;
}
// Unquoted identifier: collect characters until whitespace, '[', or ']'.
let end = rest
.find(|c: char| c.is_whitespace() || c == '[' || c == ']')
.unwrap_or(rest.len());
let path = rest[..end].trim();
if !path.is_empty() {
return Some(path.to_string());
}
}
return None;
}
}
// We only reach here if the loop completed without hitting the 't'
// branch above, which would be unexpected given S3OBJECT_LOWER.
None
}
/// Build a lazy NDJSON stream from a JSON DOCUMENT reader.
///
/// `get_opts` calls this and returns immediately no I/O is performed until
/// DataFusion begins polling the returned stream. The pipeline is:
///
/// 1. **Read** the object bytes are read asynchronously from `stream` only
/// when the returned stream is first polled.
/// 2. **Parse** JSON deserialization runs inside
/// `tokio::task::spawn_blocking` so the async runtime is never blocked by
/// CPU-bound work, even for very large documents.
/// 3. **Yield** each NDJSON line (one per array element, or one line for a
/// scalar/object root) is yielded as a separate [`Bytes`] chunk, so
/// DataFusion can pipeline row processing as lines arrive.
fn json_document_ndjson_stream(
stream: Box<dyn tokio::io::AsyncRead + Unpin + Send + Sync>,
original_size: u64,
json_sub_path: Option<String>,
) -> futures_core::stream::BoxStream<'static, Result<Bytes>> {
AsyncTryStream::<Bytes, o_Error, _>::new(|mut y| async move {
pin_mut!(stream);
// ── 1. Read phase (lazy: only runs when the stream is polled) ────
let mut all_bytes = Vec::with_capacity(original_size as usize);
stream
.take(original_size)
.read_to_end(&mut all_bytes)
.await
.map_err(|e| o_Error::Generic {
store: "EcObjectStore",
source: Box::new(e),
})?;
// ── 2. Parse phase (blocking thread pool, non-blocking runtime) ──
let lines = tokio::task::spawn_blocking(move || parse_json_document_to_lines(&all_bytes, json_sub_path.as_deref()))
.await
.map_err(|e| o_Error::Generic {
store: "EcObjectStore",
source: e.to_string().into(),
})?
.map_err(|e| o_Error::Generic {
store: "EcObjectStore",
source: Box::new(e),
})?;
// ── 3. Yield phase (one Bytes per NDJSON line) ───────────────────
for line in lines {
y.yield_ok(line).await;
}
Ok(())
})
.boxed()
}
/// Parse a JSON DOCUMENT (a single JSON value, possibly multi-line) into a
/// list of NDJSON lines one [`Bytes`] per record.
///
/// `json_sub_path` when the SQL expression contains `FROM s3object.<key>`,
/// pass `Some(key)` to navigate into that key before flattening. For
/// example, given `{"employees":[{…},{…}]}` and `json_sub_path =
/// Some("employees")`, each element of the `employees` array becomes one
/// NDJSON line.
///
/// - A JSON array → one line per element.
/// - A JSON object (no sub-path match, or scalar root) → one line.
fn parse_json_document_to_lines(bytes: &[u8], json_sub_path: Option<&str>) -> std::io::Result<Vec<Bytes>> {
let root: serde_json::Value =
serde_json::from_slice(bytes).map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
// Navigate into the sub-path when the root is an object and a path was
// extracted from the SQL FROM clause (e.g. `FROM s3object.employees`).
let value = if let Some(path) = json_sub_path {
if let serde_json::Value::Object(ref obj) = root {
match obj.get(path) {
Some(sub) => sub.clone(),
// Path not found fall back to emitting the whole root object.
None => root,
}
} else {
// Root is already an array or scalar; ignore the path hint.
root
}
} else {
root
};
let mut lines: Vec<Bytes> = Vec::new();
match value {
serde_json::Value::Array(arr) => {
for item in arr {
let mut line = serde_json::to_vec(&item).map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
line.push(b'\n');
lines.push(Bytes::from(line));
}
}
other => {
let mut line = serde_json::to_vec(&other).map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
line.push(b'\n');
lines.push(Bytes::from(line));
}
}
Ok(lines)
}
/// Convert a JSON DOCUMENT to a single concatenated NDJSON [`Bytes`] blob.
///
/// This is a convenience wrapper around [`parse_json_document_to_lines`] used
/// by the unit tests. Production code uses `json_document_ndjson_stream`
/// instead, which streams lines lazily without constructing this intermediate
/// blob.
#[cfg(test)]
fn flatten_json_document_to_ndjson(bytes: &[u8], json_sub_path: Option<&str>) -> std::io::Result<Bytes> {
let lines = parse_json_document_to_lines(bytes, json_sub_path)?;
let total = lines.iter().map(|b| b.len()).sum();
let mut output = Vec::with_capacity(total);
for line in lines {
output.extend_from_slice(&line);
}
Ok(Bytes::from(output))
}
pub fn bytes_stream<S>(stream: S, content_length: usize) -> impl Stream<Item = Result<Bytes>> + Send + 'static pub fn bytes_stream<S>(stream: S, content_length: usize) -> impl Stream<Item = Result<Bytes>> + Send + 'static
where where
S: Stream<Item = Result<Bytes, std::io::Error>> + Send + 'static, S: Stream<Item = Result<Bytes, std::io::Error>> + Send + 'static,
@@ -265,7 +524,7 @@ where
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::replace_symbol; use super::{extract_json_sub_path_from_expression, flatten_json_document_to_ndjson, replace_symbol};
#[test] #[test]
fn test_replace() { fn test_replace() {
@@ -279,4 +538,163 @@ mod test {
Err(e) => eprintln!("Error converting to string: {e}"), Err(e) => eprintln!("Error converting to string: {e}"),
} }
} }
/// A JSON array is split into one NDJSON line per element.
#[test]
fn test_flatten_array_produces_one_line_per_element() {
let input = br#"[{"id":1,"name":"Alice"},{"id":2,"name":"Bob"}]"#;
let result = flatten_json_document_to_ndjson(input, None).expect("should succeed");
let text = std::str::from_utf8(&result).unwrap();
let lines: Vec<&str> = text.lines().collect();
assert_eq!(lines.len(), 2);
// Each line must be valid JSON
for line in &lines {
serde_json::from_str::<serde_json::Value>(line).expect("each line must be valid JSON");
}
// Spot-check field values
let first: serde_json::Value = serde_json::from_str(lines[0]).unwrap();
assert_eq!(first["id"], 1);
assert_eq!(first["name"], "Alice");
}
/// A single JSON object emits exactly one NDJSON line.
#[test]
fn test_flatten_single_object_produces_one_line() {
let input = br#"{"id":42,"value":"hello world"}"#;
let result = flatten_json_document_to_ndjson(input, None).expect("should succeed");
let text = std::str::from_utf8(&result).unwrap();
let lines: Vec<&str> = text.lines().collect();
assert_eq!(lines.len(), 1);
let parsed: serde_json::Value = serde_json::from_str(lines[0]).unwrap();
assert_eq!(parsed["id"], 42);
assert_eq!(parsed["value"], "hello world");
}
/// An empty JSON array produces empty output (zero bytes).
#[test]
fn test_flatten_empty_array_produces_no_output() {
let input = b"[]";
let result = flatten_json_document_to_ndjson(input, None).expect("should succeed");
assert!(result.is_empty(), "empty array should yield zero bytes");
}
/// A multi-line (pretty-printed) JSON document is flattened correctly.
#[test]
fn test_flatten_pretty_printed_document() {
let input = b"[\n {\"a\": 1},\n {\"a\": 2},\n {\"a\": 3}\n]";
let result = flatten_json_document_to_ndjson(input, None).expect("should succeed");
let text = std::str::from_utf8(&result).unwrap();
assert_eq!(text.lines().count(), 3);
}
/// Nested objects inside array elements are preserved as compact single-line JSON.
#[test]
fn test_flatten_array_with_nested_objects() {
let input = br#"[{"outer":{"inner":99}},{"outer":{"inner":100}}]"#;
let result = flatten_json_document_to_ndjson(input, None).expect("should succeed");
let text = std::str::from_utf8(&result).unwrap();
let lines: Vec<&str> = text.lines().collect();
assert_eq!(lines.len(), 2);
// Each line must not contain a newline mid-value
for line in &lines {
assert!(!line.is_empty());
let v: serde_json::Value = serde_json::from_str(line).unwrap();
assert!(v["outer"]["inner"].as_i64().unwrap() >= 99);
}
}
/// Each output line ends with exactly one newline (no blank lines between records).
#[test]
fn test_flatten_output_ends_with_newline_per_record() {
let input = br#"[{"x":1},{"x":2}]"#;
let result = flatten_json_document_to_ndjson(input, None).expect("should succeed");
let text = std::str::from_utf8(&result).unwrap();
// Exactly 2 newlines for 2 records
assert_eq!(text.chars().filter(|&c| c == '\n').count(), 2);
// No leading blank line
assert!(!text.starts_with('\n'));
}
/// Invalid JSON returns an `InvalidData` IO error.
#[test]
fn test_flatten_invalid_json_returns_error() {
let input = b"{ not valid json }";
let err = flatten_json_document_to_ndjson(input, None).expect_err("should fail on invalid JSON");
assert_eq!(err.kind(), std::io::ErrorKind::InvalidData);
}
/// Completely empty input returns an error (not valid JSON).
#[test]
fn test_flatten_empty_input_returns_error() {
let err = flatten_json_document_to_ndjson(b"", None).expect_err("empty bytes are not valid JSON");
assert_eq!(err.kind(), std::io::ErrorKind::InvalidData);
}
// ── sub-path navigation tests ─────────────────────────────────────────
/// `FROM s3object.employees` with a root JSON object navigates into the
/// `employees` array and emits one NDJSON line per element.
#[test]
fn test_flatten_sub_path_object_with_array() {
let input = br#"{"employees":[{"id":1,"name":"Alice","salary":75000},{"id":2,"name":"Bob","salary":65000}]}"#;
let result = flatten_json_document_to_ndjson(input, Some("employees")).expect("should succeed");
let text = std::str::from_utf8(&result).unwrap();
let lines: Vec<&str> = text.lines().collect();
assert_eq!(lines.len(), 2, "each employee should be its own NDJSON line");
let first: serde_json::Value = serde_json::from_str(lines[0]).unwrap();
assert_eq!(first["name"], "Alice");
assert_eq!(first["salary"], 75000);
let second: serde_json::Value = serde_json::from_str(lines[1]).unwrap();
assert_eq!(second["name"], "Bob");
}
/// Sub-path that does not exist in the root object falls back to emitting the
/// entire root object as one NDJSON line (graceful degradation).
#[test]
fn test_flatten_sub_path_missing_key_falls_back() {
let input = br#"{"employees":[]}"#;
let result = flatten_json_document_to_ndjson(input, Some("nonexistent")).expect("should succeed");
let text = std::str::from_utf8(&result).unwrap();
// Falls back to emitting the whole root object.
assert_eq!(text.lines().count(), 1);
let parsed: serde_json::Value = serde_json::from_str(text.trim_end()).unwrap();
assert!(parsed.get("employees").is_some(), "root object preserved");
}
/// Sub-path is ignored when the root is already an array.
#[test]
fn test_flatten_sub_path_ignored_for_root_array() {
let input = br#"[{"id":1},{"id":2}]"#;
let result = flatten_json_document_to_ndjson(input, Some("employees")).expect("should succeed");
let text = std::str::from_utf8(&result).unwrap();
// The root array is flattened directly regardless of the sub-path hint.
assert_eq!(text.lines().count(), 2);
}
// ── SQL path extraction tests ─────────────────────────────────────────
#[test]
fn test_extract_json_sub_path_basic() {
let sql = "SELECT e.name FROM s3object.employees e WHERE e.salary > 70000";
assert_eq!(extract_json_sub_path_from_expression(sql), Some("employees".to_string()));
}
#[test]
fn test_extract_json_sub_path_uppercase() {
let sql = "SELECT s.name FROM S3Object.records s";
assert_eq!(extract_json_sub_path_from_expression(sql), Some("records".to_string()));
}
#[test]
fn test_extract_json_sub_path_no_sub_path() {
let sql = "SELECT * FROM s3object WHERE s3object.age > 30";
assert_eq!(extract_json_sub_path_from_expression(sql), None);
}
#[test]
fn test_extract_json_sub_path_with_bracket() {
// `FROM s3object.employees[*]` — bracket stops path collection.
let sql = "SELECT e.name FROM s3object.employees[*] e";
assert_eq!(extract_json_sub_path_from_expression(sql), Some("employees".to_string()));
}
} }

View File

@@ -16,7 +16,6 @@ use crate::query::Context;
use crate::{QueryError, QueryResult, object_store::EcObjectStore}; use crate::{QueryError, QueryResult, object_store::EcObjectStore};
use datafusion::{ use datafusion::{
execution::{SessionStateBuilder, context::SessionState, runtime_env::RuntimeEnvBuilder}, execution::{SessionStateBuilder, context::SessionState, runtime_env::RuntimeEnvBuilder},
parquet::data_type::AsBytes,
prelude::SessionContext, prelude::SessionContext,
}; };
use object_store::{ObjectStore, memory::InMemory, path::Path}; use object_store::{ObjectStore, memory::InMemory, path::Path};
@@ -65,30 +64,36 @@ impl SessionCtxFactory {
let df_session_state = if self.is_test { let df_session_state = if self.is_test {
let store: Arc<dyn ObjectStore> = Arc::new(InMemory::new()); let store: Arc<dyn ObjectStore> = Arc::new(InMemory::new());
let data = b"id,name,age,department,salary
1,Alice,25,HR,5000 // Choose test data format based on what the request serialization specifies.
2,Bob,30,IT,6000 let data_bytes: &[u8] = if context.input.request.input_serialization.json.is_some() {
3,Charlie,35,Finance,7000 // NDJSON: one JSON object per line — usable for both LINES and DOCUMENT
4,Diana,22,Marketing,4500 // requests (DOCUMENT inputs are converted to NDJSON by EcObjectStore, but
5,Eve,28,IT,5500 // in test mode we bypass EcObjectStore, so we put NDJSON here directly).
6,Frank,40,Finance,8000 b"{\"id\":1,\"name\":\"Alice\",\"age\":25,\"department\":\"HR\",\"salary\":5000}\n\
7,Grace,26,HR,5200 {\"id\":2,\"name\":\"Bob\",\"age\":30,\"department\":\"IT\",\"salary\":6000}\n\
8,Henry,32,IT,6200 {\"id\":3,\"name\":\"Charlie\",\"age\":35,\"department\":\"Finance\",\"salary\":7000}\n\
9,Ivy,24,Marketing,4800 {\"id\":4,\"name\":\"Diana\",\"age\":22,\"department\":\"Marketing\",\"salary\":4500}\n\
10,Jack,38,Finance,7500"; {\"id\":5,\"name\":\"Eve\",\"age\":28,\"department\":\"IT\",\"salary\":5500}\n\
let data_bytes = data.as_bytes(); {\"id\":6,\"name\":\"Frank\",\"age\":40,\"department\":\"Finance\",\"salary\":8000}\n\
// let data = r#""year"╦"gender"╦"ethnicity"╦"firstname"╦"count"╦"rank" {\"id\":7,\"name\":\"Grace\",\"age\":26,\"department\":\"HR\",\"salary\":5200}\n\
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"SOPHIA"╦"119"╦"1" {\"id\":8,\"name\":\"Henry\",\"age\":32,\"department\":\"IT\",\"salary\":6200}\n\
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"CHLOE"╦"106"╦"2" {\"id\":9,\"name\":\"Ivy\",\"age\":24,\"department\":\"Marketing\",\"salary\":4800}\n\
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"EMILY"╦"93"╦"3" {\"id\":10,\"name\":\"Jack\",\"age\":38,\"department\":\"Finance\",\"salary\":7500}\n"
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"OLIVIA"╦"89"╦"4" } else {
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"EMMA"╦"75"╦"5" b"id,name,age,department,salary
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"ISABELLA"╦"67"╦"6" 1,Alice,25,HR,5000
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"TIFFANY"╦"54"╦"7" 2,Bob,30,IT,6000
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"ASHLEY"╦"52"╦"8" 3,Charlie,35,Finance,7000
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"FIONA"╦"48"╦"9" 4,Diana,22,Marketing,4500
// "2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"ANGELA"╦"47"╦"10""#; 5,Eve,28,IT,5500
// let data_bytes = Bytes::from(data); 6,Frank,40,Finance,8000
7,Grace,26,HR,5200
8,Henry,32,IT,6200
9,Ivy,24,Marketing,4800
10,Jack,38,Finance,7500"
};
let path = Path::from(context.input.key.clone()); let path = Path::from(context.input.key.clone());
store.put(&path, data_bytes.into()).await.map_err(|e| { store.put(&path, data_bytes.into()).await.map_err(|e| {
error!("put data into memory failed: {}", e.to_string()); error!("put data into memory failed: {}", e.to_string());
@@ -97,7 +102,7 @@ impl SessionCtxFactory {
df_session_state.with_object_store(&store_url, Arc::new(store)).build() df_session_state.with_object_store(&store_url, Arc::new(store)).build()
} else { } else {
let store = let store: EcObjectStore =
EcObjectStore::new(context.input.clone()).map_err(|_| QueryError::NotImplemented { err: String::new() })?; EcObjectStore::new(context.input.clone()).map_err(|_| QueryError::NotImplemented { err: String::new() })?;
df_session_state.with_object_store(&store_url, Arc::new(store)).build() df_session_state.with_object_store(&store_url, Arc::new(store)).build()
}; };

View File

@@ -218,7 +218,16 @@ impl SimpleQueryDispatcher {
(ListingOptions::new(Arc::new(file_format)).with_file_extension(".parquet"), false, false) (ListingOptions::new(Arc::new(file_format)).with_file_extension(".parquet"), false, false)
} else if self.input.request.input_serialization.json.is_some() { } else if self.input.request.input_serialization.json.is_some() {
let file_format = JsonFormat::default(); let file_format = JsonFormat::default();
(ListingOptions::new(Arc::new(file_format)).with_file_extension(".json"), false, false) // Use the actual file extension from the object key so that files stored
// with a `.jsonl` suffix (newline-delimited JSON) are also matched by
// DataFusion's listing/schema-inference logic. Falling back to ".json"
// preserves behaviour for keys that have no extension.
let file_ext = std::path::Path::new(&self.input.key)
.extension()
.and_then(|e| e.to_str())
.map(|e| format!(".{e}"))
.unwrap_or_else(|| ".json".to_string());
(ListingOptions::new(Arc::new(file_format)).with_file_extension(file_ext), false, false)
} else { } else {
return Err(QueryError::NotImplemented { return Err(QueryError::NotImplemented {
err: "not support this file type".to_string(), err: "not support this file type".to_string(),

View File

@@ -20,8 +20,8 @@ mod integration_tests {
query::{Context, Query}, query::{Context, Query},
}; };
use s3s::dto::{ use s3s::dto::{
CSVInput, CSVOutput, ExpressionType, FileHeaderInfo, InputSerialization, OutputSerialization, SelectObjectContentInput, CSVInput, CSVOutput, ExpressionType, FileHeaderInfo, InputSerialization, JSONInput, JSONOutput, JSONType,
SelectObjectContentRequest, OutputSerialization, SelectObjectContentInput, SelectObjectContentRequest,
}; };
use std::sync::Arc; use std::sync::Arc;
@@ -53,6 +53,36 @@ mod integration_tests {
} }
} }
/// Build a `SelectObjectContentInput` targeting a JSON DOCUMENT file.
/// Uses `JSONType::DOCUMENT` so the NDJSON-flattening path in
/// `EcObjectStore` is exercised.
fn create_test_json_input(sql: &str) -> SelectObjectContentInput {
SelectObjectContentInput {
bucket: "test-bucket".to_string(),
expected_bucket_owner: None,
key: "test.json".to_string(),
sse_customer_algorithm: None,
sse_customer_key: None,
sse_customer_key_md5: None,
request: SelectObjectContentRequest {
expression: sql.to_string(),
expression_type: ExpressionType::from_static("SQL"),
input_serialization: InputSerialization {
json: Some(JSONInput {
type_: Some(JSONType::from_static(JSONType::DOCUMENT)),
}),
..Default::default()
},
output_serialization: OutputSerialization {
json: Some(JSONOutput::default()),
..Default::default()
},
request_progress: None,
scan_range: None,
},
}
}
#[tokio::test] #[tokio::test]
async fn test_database_creation() { async fn test_database_creation() {
let input = create_test_input("SELECT * FROM S3Object"); let input = create_test_input("SELECT * FROM S3Object");
@@ -225,4 +255,170 @@ mod integration_tests {
assert!(result.is_ok()); assert!(result.is_ok());
} }
} }
// ──────────────────────────────────────────────
// JSON-input variants of all the above tests
// These exercise the JSONType::LINES (JSON lines) code path
// ──────────────────────────────────────────────
#[tokio::test]
async fn test_database_creation_json() {
let input = create_test_json_input("SELECT * FROM S3Object");
let result = make_rustfsms(Arc::new(input), true).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_global_db_creation_json() {
let input = create_test_json_input("SELECT * FROM S3Object");
let result = get_global_db(input.clone(), true).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_simple_select_query_json() {
let sql = "SELECT * FROM S3Object";
let input = create_test_json_input(sql);
let db = get_global_db(input.clone(), true).await.unwrap();
let query = Query::new(Context { input: Arc::new(input) }, sql.to_string());
let result = db.execute(&query).await;
assert!(result.is_ok());
let query_handle = result.unwrap();
let output = query_handle.result().chunk_result().await;
assert!(output.is_ok());
}
#[tokio::test]
async fn test_select_with_where_clause_json() {
let sql = "SELECT name, age FROM S3Object WHERE age > 30";
let input = create_test_json_input(sql);
let db = get_global_db(input.clone(), true).await.unwrap();
let query = Query::new(Context { input: Arc::new(input) }, sql.to_string());
let result = db.execute(&query).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_select_with_aggregation_json() {
let sql = "SELECT department, COUNT(*) as count FROM S3Object GROUP BY department";
let input = create_test_json_input(sql);
let db = get_global_db(input.clone(), true).await.unwrap();
let query = Query::new(Context { input: Arc::new(input) }, sql.to_string());
let result = db.execute(&query).await;
// Aggregation queries may fail due to lack of actual data, which is acceptable
match result {
Ok(_) => {
// If successful, that's great
}
Err(_) => {
// Expected to fail due to no actual data source
}
}
}
#[tokio::test]
async fn test_invalid_sql_syntax_json() {
let sql = "INVALID SQL SYNTAX";
let input = create_test_json_input(sql);
let db = get_global_db(input.clone(), true).await.unwrap();
let query = Query::new(Context { input: Arc::new(input) }, sql.to_string());
let result = db.execute(&query).await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_multi_statement_error_json() {
let sql = "SELECT * FROM S3Object; SELECT 1;";
let input = create_test_json_input(sql);
let db = get_global_db(input.clone(), true).await.unwrap();
let query = Query::new(Context { input: Arc::new(input) }, sql.to_string());
let result = db.execute(&query).await;
assert!(result.is_err());
if let Err(QueryError::MultiStatement { num, .. }) = result {
assert_eq!(num, 2);
} else {
panic!("Expected MultiStatement error");
}
}
#[tokio::test]
async fn test_query_state_machine_workflow_json() {
let sql = "SELECT * FROM S3Object";
let input = create_test_json_input(sql);
let db = get_global_db(input.clone(), true).await.unwrap();
let query = Query::new(Context { input: Arc::new(input) }, sql.to_string());
let state_machine = db.build_query_state_machine(query.clone()).await;
assert!(state_machine.is_ok());
let state_machine = state_machine.unwrap();
let logical_plan = db.build_logical_plan(state_machine.clone()).await;
assert!(logical_plan.is_ok());
if let Ok(Some(plan)) = logical_plan {
let execution_result = db.execute_logical_plan(plan, state_machine).await;
assert!(execution_result.is_ok());
}
}
#[tokio::test]
async fn test_query_with_limit_json() {
let sql = "SELECT * FROM S3Object LIMIT 5";
let input = create_test_json_input(sql);
let db = get_global_db(input.clone(), true).await.unwrap();
let query = Query::new(Context { input: Arc::new(input) }, sql.to_string());
let result = db.execute(&query).await;
assert!(result.is_ok());
let query_handle = result.unwrap();
let output = query_handle.result().chunk_result().await.unwrap();
let total_rows: usize = output.iter().map(|batch| batch.num_rows()).sum();
assert!(total_rows <= 5);
}
#[tokio::test]
async fn test_query_with_order_by_json() {
let sql = "SELECT name, age FROM S3Object ORDER BY age DESC";
let input = create_test_json_input(sql);
let db = get_global_db(input.clone(), true).await.unwrap();
let query = Query::new(Context { input: Arc::new(input) }, sql.to_string());
let result = db.execute(&query).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_concurrent_queries_json() {
let sql = "SELECT * FROM S3Object";
let input = create_test_json_input(sql);
let db = get_global_db(input.clone(), true).await.unwrap();
let mut handles = vec![];
for i in 0..3 {
let query = Query::new(
Context {
input: Arc::new(input.clone()),
},
format!("SELECT * FROM S3Object LIMIT {}", i + 1),
);
let db_clone = db.clone();
let handle = tokio::spawn(async move { db_clone.execute(&query).await });
handles.push(handle);
}
for handle in handles {
let result = handle.await.unwrap();
assert!(result.is_ok());
}
}
} }

View File

@@ -51,6 +51,12 @@ pub fn register_group_management_route(r: &mut S3Router<AdminOperation>) -> std:
AdminOperation(&GetGroup {}), AdminOperation(&GetGroup {}),
)?; )?;
r.insert(
Method::DELETE,
format!("{}{}", ADMIN_PREFIX, "/v3/group/{group}").as_str(),
AdminOperation(&DeleteGroup {}),
)?;
r.insert( r.insert(
Method::PUT, Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/set-group-status").as_str(), format!("{}{}", ADMIN_PREFIX, "/v3/set-group-status").as_str(),
@@ -159,6 +165,89 @@ impl Operation for GetGroup {
} }
} }
/// Deletes an empty user group.
///
/// # Arguments
/// * `group` - The name of the group to delete
///
/// # Returns
/// - `200 OK` - Group deleted successfully
/// - `400 Bad Request` - Group name missing or invalid
/// - `401 Unauthorized` - Insufficient permissions
/// - `404 Not Found` - Group does not exist
/// - `409 Conflict` - Group contains members and cannot be deleted
/// - `500 Internal Server Error` - Server-side error
///
/// # Example
/// ```
/// DELETE /rustfs/admin/v3/group/developers
/// ```
pub struct DeleteGroup {}
#[async_trait::async_trait]
impl Operation for DeleteGroup {
async fn call(&self, req: S3Request<Body>, params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("handle DeleteGroup");
let Some(input_cred) = req.credentials else {
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
let (cred, owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
validate_admin_request(
&req.headers,
&cred,
owner,
false,
vec![Action::AdminAction(AdminAction::RemoveUserFromGroupAdminAction)],
req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0)),
)
.await?;
let group = params
.get("group")
.ok_or_else(|| s3_error!(InvalidArgument, "missing group name in request"))?
.trim();
// Validate the group name format
if group.is_empty() || group.len() > 256 {
return Err(s3_error!(InvalidArgument, "invalid group name"));
}
// Sanity check the group name
if group.contains(['/', '\\', '\0']) {
return Err(s3_error!(InvalidArgument, "group name contains invalid characters"));
}
let Ok(iam_store) = rustfs_iam::get() else { return Err(s3_error!(InternalError, "iam not init")) };
iam_store.remove_users_from_group(group, vec![]).await.map_err(|e| {
warn!("delete group failed, e: {:?}", e);
match e {
rustfs_iam::error::Error::GroupNotEmpty => {
s3_error!(InvalidRequest, "group is not empty")
}
rustfs_iam::error::Error::InvalidArgument => {
s3_error!(InvalidArgument, "{e}")
}
_ => {
if is_err_no_such_group(&e) {
s3_error!(NoSuchKey, "group '{group}' does not exist")
} else {
s3_error!(InternalError, "{e}")
}
}
}
})?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
pub struct SetGroupStatus {} pub struct SetGroupStatus {}
#[async_trait::async_trait] #[async_trait::async_trait]
impl Operation for SetGroupStatus { impl Operation for SetGroupStatus {

View File

@@ -64,6 +64,7 @@ fn test_register_routes_cover_representative_admin_paths() {
assert_route(&router, Method::PUT, &admin_path("/v3/add-user")); assert_route(&router, Method::PUT, &admin_path("/v3/add-user"));
assert_route(&router, Method::PUT, &admin_path("/v3/set-user-status")); assert_route(&router, Method::PUT, &admin_path("/v3/set-user-status"));
assert_route(&router, Method::GET, &admin_path("/v3/groups")); assert_route(&router, Method::GET, &admin_path("/v3/groups"));
assert_route(&router, Method::DELETE, &admin_path("/v3/group/test-group"));
assert_route(&router, Method::PUT, &admin_path("/v3/update-group-members")); assert_route(&router, Method::PUT, &admin_path("/v3/update-group-members"));
assert_route(&router, Method::PUT, &admin_path("/v3/add-service-accounts")); assert_route(&router, Method::PUT, &admin_path("/v3/add-service-accounts"));
assert_route(&router, Method::GET, &admin_path("/v3/export-iam")); assert_route(&router, Method::GET, &admin_path("/v3/export-iam"));

View File

@@ -19,19 +19,22 @@ use crate::app::context::{AppContext, get_global_app_context};
use crate::auth::get_condition_values; use crate::auth::get_condition_values;
use crate::error::ApiError; use crate::error::ApiError;
use crate::server::RemoteAddr; use crate::server::RemoteAddr;
use crate::storage::access::authorize_request; use crate::storage::access::{ReqInfo, authorize_request};
use crate::storage::ecfs::{ use crate::storage::ecfs::{
default_owner, is_public_grant, parse_acl_json_or_canned_bucket, serialize_acl, stored_acl_from_canned_bucket, RUSTFS_OWNER, default_owner, is_public_grant, parse_acl_json_or_canned_bucket, serialize_acl, stored_acl_from_canned_bucket,
stored_acl_from_grant_headers, stored_acl_from_grant_headers, stored_acl_from_policy, stored_grant_to_dto, stored_owner_to_dto,
}; };
use crate::storage::helper::OperationHelper; use crate::storage::helper::OperationHelper;
use crate::storage::*; use crate::storage::*;
use futures::StreamExt;
use http::StatusCode;
use metrics::counter; use metrics::counter;
use rustfs_ecstore::bucket::{ use rustfs_ecstore::bucket::{
lifecycle::bucket_lifecycle_ops::validate_transition_tier, lifecycle::bucket_lifecycle_ops::validate_transition_tier,
metadata::{ metadata::{
BUCKET_ACL_CONFIG, BUCKET_LIFECYCLE_CONFIG, BUCKET_NOTIFICATION_CONFIG, BUCKET_POLICY_CONFIG, BUCKET_SSECONFIG, BUCKET_ACL_CONFIG, BUCKET_CORS_CONFIG, BUCKET_LIFECYCLE_CONFIG, BUCKET_NOTIFICATION_CONFIG, BUCKET_POLICY_CONFIG,
BUCKET_TAGGING_CONFIG, BUCKET_VERSIONING_CONFIG, BUCKET_PUBLIC_ACCESS_BLOCK_CONFIG, BUCKET_REPLICATION_CONFIG, BUCKET_SSECONFIG, BUCKET_TAGGING_CONFIG,
BUCKET_VERSIONING_CONFIG,
}, },
metadata_sys, metadata_sys,
policy_sys::PolicySys, policy_sys::PolicySys,
@@ -57,7 +60,7 @@ use s3s::dto::*;
use s3s::xml; use s3s::xml;
use s3s::{S3Error, S3ErrorCode, S3Request, S3Response, S3Result, s3_error}; use s3s::{S3Error, S3ErrorCode, S3Request, S3Response, S3Result, s3_error};
use std::{fmt::Display, sync::Arc}; use std::{fmt::Display, sync::Arc};
use tracing::{debug, info, instrument, warn}; use tracing::{debug, error, info, instrument, warn};
use urlencoding::encode; use urlencoding::encode;
pub type BucketUsecaseResult<T> = Result<T, ApiError>; pub type BucketUsecaseResult<T> = Result<T, ApiError>;
@@ -232,6 +235,72 @@ impl DefaultBucketUsecase {
result result
} }
pub async fn execute_put_bucket_acl(&self, req: S3Request<PutBucketAclInput>) -> S3Result<S3Response<PutBucketAclOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let PutBucketAclInput {
bucket,
acl,
access_control_policy,
grant_full_control,
grant_read,
grant_read_acp,
grant_write,
grant_write_acp,
..
} = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
let owner = default_owner();
let mut stored_acl = access_control_policy
.as_ref()
.map(|policy| stored_acl_from_policy(policy, &owner))
.transpose()?;
if stored_acl.is_none() {
stored_acl = stored_acl_from_grant_headers(
&owner,
grant_read.map(|v| v.to_string()),
grant_write.map(|v| v.to_string()),
grant_read_acp.map(|v| v.to_string()),
grant_write_acp.map(|v| v.to_string()),
grant_full_control.map(|v| v.to_string()),
)?;
}
if stored_acl.is_none()
&& let Some(canned) = acl
{
stored_acl = Some(stored_acl_from_canned_bucket(canned.as_str(), &owner));
}
let stored_acl = stored_acl.unwrap_or_else(|| stored_acl_from_canned_bucket(BucketCannedACL::PRIVATE, &owner));
if let Ok((config, _)) = metadata_sys::get_public_access_block_config(&bucket).await
&& config.block_public_acls.unwrap_or(false)
&& stored_acl.grants.iter().any(is_public_grant)
{
return Err(s3_error!(AccessDenied, "Access Denied"));
}
let data = serialize_acl(&stored_acl)?;
metadata_sys::update(&bucket, BUCKET_ACL_CONFIG, data)
.await
.map_err(ApiError::from)?;
Ok(S3Response::new(PutBucketAclOutput::default()))
}
#[instrument(level = "debug", skip(self, req))] #[instrument(level = "debug", skip(self, req))]
pub async fn execute_delete_bucket(&self, mut req: S3Request<DeleteBucketInput>) -> S3Result<S3Response<DeleteBucketOutput>> { pub async fn execute_delete_bucket(&self, mut req: S3Request<DeleteBucketInput>) -> S3Result<S3Response<DeleteBucketOutput>> {
if let Some(context) = &self.context { if let Some(context) = &self.context {
@@ -299,6 +368,140 @@ impl DefaultBucketUsecase {
Ok(S3Response::new(HeadBucketOutput::default())) Ok(S3Response::new(HeadBucketOutput::default()))
} }
pub async fn execute_get_bucket_acl(&self, req: S3Request<GetBucketAclInput>) -> S3Result<S3Response<GetBucketAclOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let GetBucketAclInput { bucket, .. } = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
let owner = default_owner();
let stored_acl = match metadata_sys::get_bucket_acl_config(&bucket).await {
Ok((acl, _)) => parse_acl_json_or_canned_bucket(&acl, &owner),
Err(err) => {
if err != StorageError::ConfigNotFound {
return Err(ApiError::from(err).into());
}
stored_acl_from_canned_bucket(BucketCannedACL::PRIVATE, &owner)
}
};
let mut sorted_grants = stored_acl.grants.clone();
sorted_grants.sort_by_key(|grant| grant.grantee.grantee_type != "Group");
let grants = sorted_grants.iter().map(stored_grant_to_dto).collect();
Ok(S3Response::new(GetBucketAclOutput {
grants: Some(grants),
owner: Some(stored_owner_to_dto(&stored_acl.owner)),
}))
}
#[instrument(level = "debug", skip(self, req))]
pub async fn execute_get_bucket_location(
&self,
req: S3Request<GetBucketLocationInput>,
) -> S3Result<S3Response<GetBucketLocationOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let input = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&input.bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
if let Some(region) = rustfs_ecstore::global::get_global_region() {
return Ok(S3Response::new(GetBucketLocationOutput {
location_constraint: Some(BucketLocationConstraint::from(region)),
}));
}
Ok(S3Response::new(GetBucketLocationOutput::default()))
}
#[instrument(level = "debug", skip(self))]
pub async fn execute_list_buckets(&self, req: S3Request<ListBucketsInput>) -> S3Result<S3Response<ListBucketsOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let mut req = req;
if req.credentials.as_ref().is_none_or(|cred| cred.access_key.is_empty()) {
return Err(S3Error::with_message(S3ErrorCode::AccessDenied, "Access Denied"));
}
let bucket_infos = if let Err(e) = authorize_request(&mut req, Action::S3Action(S3Action::ListAllMyBucketsAction)).await {
if e.code() != &S3ErrorCode::AccessDenied {
return Err(e);
}
let mut list_bucket_infos = store.list_bucket(&BucketOptions::default()).await.map_err(ApiError::from)?;
list_bucket_infos = futures::stream::iter(list_bucket_infos)
.filter_map(|info| async {
let mut req_clone = req.clone();
let req_info = req_clone.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(info.name.clone());
if authorize_request(&mut req_clone, Action::S3Action(S3Action::ListBucketAction))
.await
.is_ok()
|| authorize_request(&mut req_clone, Action::S3Action(S3Action::GetBucketLocationAction))
.await
.is_ok()
{
Some(info)
} else {
None
}
})
.collect()
.await;
if list_bucket_infos.is_empty() {
return Err(S3Error::with_message(S3ErrorCode::AccessDenied, "Access Denied"));
}
list_bucket_infos
} else {
store.list_bucket(&BucketOptions::default()).await.map_err(ApiError::from)?
};
let buckets: Vec<Bucket> = bucket_infos
.iter()
.map(|v| Bucket {
creation_date: v.created.map(Timestamp::from),
name: Some(v.name.clone()),
..Default::default()
})
.collect();
Ok(S3Response::new(ListBucketsOutput {
buckets: Some(buckets),
owner: Some(RUSTFS_OWNER.to_owned()),
..Default::default()
}))
}
pub async fn execute_delete_bucket_encryption( pub async fn execute_delete_bucket_encryption(
&self, &self,
req: S3Request<DeleteBucketEncryptionInput>, req: S3Request<DeleteBucketEncryptionInput>,
@@ -325,6 +528,33 @@ impl DefaultBucketUsecase {
Ok(S3Response::new(DeleteBucketEncryptionOutput::default())) Ok(S3Response::new(DeleteBucketEncryptionOutput::default()))
} }
#[instrument(level = "debug", skip(self))]
pub async fn execute_delete_bucket_cors(
&self,
req: S3Request<DeleteBucketCorsInput>,
) -> S3Result<S3Response<DeleteBucketCorsOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let DeleteBucketCorsInput { bucket, .. } = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
metadata_sys::delete(&bucket, BUCKET_CORS_CONFIG)
.await
.map_err(ApiError::from)?;
Ok(S3Response::new(DeleteBucketCorsOutput {}))
}
#[instrument(level = "debug", skip(self))] #[instrument(level = "debug", skip(self))]
pub async fn execute_delete_bucket_lifecycle( pub async fn execute_delete_bucket_lifecycle(
&self, &self,
@@ -378,6 +608,34 @@ impl DefaultBucketUsecase {
Ok(S3Response::new(DeleteBucketPolicyOutput {})) Ok(S3Response::new(DeleteBucketPolicyOutput {}))
} }
pub async fn execute_delete_bucket_replication(
&self,
req: S3Request<DeleteBucketReplicationInput>,
) -> S3Result<S3Response<DeleteBucketReplicationOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let DeleteBucketReplicationInput { bucket, .. } = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
metadata_sys::delete(&bucket, BUCKET_REPLICATION_CONFIG)
.await
.map_err(ApiError::from)?;
// TODO: remove targets
info!(bucket = %bucket, "deleted bucket replication config");
Ok(S3Response::new(DeleteBucketReplicationOutput::default()))
}
#[instrument(level = "debug", skip(self))] #[instrument(level = "debug", skip(self))]
pub async fn execute_delete_bucket_tagging( pub async fn execute_delete_bucket_tagging(
&self, &self,
@@ -396,6 +654,33 @@ impl DefaultBucketUsecase {
Ok(S3Response::new(DeleteBucketTaggingOutput {})) Ok(S3Response::new(DeleteBucketTaggingOutput {}))
} }
#[instrument(level = "debug", skip(self))]
pub async fn execute_delete_public_access_block(
&self,
req: S3Request<DeletePublicAccessBlockInput>,
) -> S3Result<S3Response<DeletePublicAccessBlockOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let DeletePublicAccessBlockInput { bucket, .. } = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
metadata_sys::delete(&bucket, BUCKET_PUBLIC_ACCESS_BLOCK_CONFIG)
.await
.map_err(ApiError::from)?;
Ok(S3Response::with_status(DeletePublicAccessBlockOutput::default(), StatusCode::NO_CONTENT))
}
pub async fn execute_get_bucket_encryption( pub async fn execute_get_bucket_encryption(
&self, &self,
req: S3Request<GetBucketEncryptionInput>, req: S3Request<GetBucketEncryptionInput>,
@@ -428,6 +713,42 @@ impl DefaultBucketUsecase {
})) }))
} }
#[instrument(level = "debug", skip(self))]
pub async fn execute_get_bucket_cors(&self, req: S3Request<GetBucketCorsInput>) -> S3Result<S3Response<GetBucketCorsOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let GetBucketCorsInput { bucket, .. } = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
let cors_configuration = match metadata_sys::get_cors_config(&bucket).await {
Ok((config, _)) => config,
Err(err) => {
if err == StorageError::ConfigNotFound {
return Err(S3Error::with_message(
S3ErrorCode::NoSuchCORSConfiguration,
"The CORS configuration does not exist".to_string(),
));
}
warn!("get_cors_config err {:?}", &err);
return Err(ApiError::from(err).into());
}
};
Ok(S3Response::new(GetBucketCorsOutput {
cors_rules: Some(cors_configuration.cors_rules),
}))
}
#[instrument(level = "debug", skip(self))] #[instrument(level = "debug", skip(self))]
pub async fn execute_get_bucket_lifecycle_configuration( pub async fn execute_get_bucket_lifecycle_configuration(
&self, &self,
@@ -632,6 +953,44 @@ impl DefaultBucketUsecase {
Ok(S3Response::new(output)) Ok(S3Response::new(output))
} }
pub async fn execute_get_bucket_replication(
&self,
req: S3Request<GetBucketReplicationInput>,
) -> S3Result<S3Response<GetBucketReplicationOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let GetBucketReplicationInput { bucket, .. } = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
let replication_configuration = match metadata_sys::get_replication_config(&bucket).await {
Ok((cfg, _created)) => cfg,
Err(err) => {
error!("get_replication_config err {:?}", err);
if err == StorageError::ConfigNotFound {
return Err(S3Error::with_message(
S3ErrorCode::ReplicationConfigurationNotFoundError,
"replication not found".to_string(),
));
}
return Err(ApiError::from(err).into());
}
};
Ok(S3Response::new(GetBucketReplicationOutput {
replication_configuration: Some(replication_configuration),
}))
}
#[instrument(level = "debug", skip(self))] #[instrument(level = "debug", skip(self))]
pub async fn execute_get_bucket_tagging( pub async fn execute_get_bucket_tagging(
&self, &self,
@@ -666,6 +1025,44 @@ impl DefaultBucketUsecase {
Ok(S3Response::new(GetBucketTaggingOutput { tag_set })) Ok(S3Response::new(GetBucketTaggingOutput { tag_set }))
} }
#[instrument(level = "debug", skip(self))]
pub async fn execute_get_public_access_block(
&self,
req: S3Request<GetPublicAccessBlockInput>,
) -> S3Result<S3Response<GetPublicAccessBlockOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let GetPublicAccessBlockInput { bucket, .. } = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
let config = match metadata_sys::get_public_access_block_config(&bucket).await {
Ok((config, _)) => config,
Err(err) => {
if err == StorageError::ConfigNotFound {
return Err(S3Error::with_message(
S3ErrorCode::Custom("NoSuchPublicAccessBlockConfiguration".into()),
"Public access block configuration does not exist".to_string(),
));
}
return Err(ApiError::from(err).into());
}
};
Ok(S3Response::new(GetPublicAccessBlockOutput {
public_access_block_configuration: Some(config),
}))
}
#[instrument(level = "debug", skip(self))] #[instrument(level = "debug", skip(self))]
pub async fn execute_get_bucket_versioning( pub async fn execute_get_bucket_versioning(
&self, &self,
@@ -889,6 +1286,100 @@ impl DefaultBucketUsecase {
Ok(S3Response::new(PutBucketPolicyOutput {})) Ok(S3Response::new(PutBucketPolicyOutput {}))
} }
#[instrument(level = "debug", skip(self))]
pub async fn execute_put_bucket_cors(&self, req: S3Request<PutBucketCorsInput>) -> S3Result<S3Response<PutBucketCorsOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let PutBucketCorsInput {
bucket,
cors_configuration,
..
} = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
let data = serialize_config(&cors_configuration)?;
metadata_sys::update(&bucket, BUCKET_CORS_CONFIG, data)
.await
.map_err(ApiError::from)?;
Ok(S3Response::new(PutBucketCorsOutput::default()))
}
pub async fn execute_put_bucket_replication(
&self,
req: S3Request<PutBucketReplicationInput>,
) -> S3Result<S3Response<PutBucketReplicationOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let PutBucketReplicationInput {
bucket,
replication_configuration,
..
} = req.input;
info!(bucket = %bucket, "updating bucket replication config");
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
// TODO: check enable, versioning enable
let data = serialize_config(&replication_configuration)?;
metadata_sys::update(&bucket, BUCKET_REPLICATION_CONFIG, data)
.await
.map_err(ApiError::from)?;
Ok(S3Response::new(PutBucketReplicationOutput::default()))
}
#[instrument(level = "debug", skip(self))]
pub async fn execute_put_public_access_block(
&self,
req: S3Request<PutPublicAccessBlockInput>,
) -> S3Result<S3Response<PutPublicAccessBlockOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let PutPublicAccessBlockInput {
bucket,
public_access_block_configuration,
..
} = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
let data = serialize_config(&public_access_block_configuration)?;
metadata_sys::update(&bucket, BUCKET_PUBLIC_ACCESS_BLOCK_CONFIG, data)
.await
.map_err(ApiError::from)?;
Ok(S3Response::new(PutPublicAccessBlockOutput::default()))
}
#[instrument(level = "debug", skip(self))] #[instrument(level = "debug", skip(self))]
pub async fn execute_put_bucket_tagging( pub async fn execute_put_bucket_tagging(
&self, &self,
@@ -1197,6 +1688,34 @@ mod tests {
assert_eq!(err.code(), &S3ErrorCode::InternalError); assert_eq!(err.code(), &S3ErrorCode::InternalError);
} }
#[tokio::test]
async fn execute_delete_bucket_cors_returns_internal_error_when_store_uninitialized() {
let input = DeleteBucketCorsInput::builder()
.bucket("test-bucket".to_string())
.build()
.unwrap();
let req = build_request(input, Method::DELETE);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_delete_bucket_cors(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_delete_bucket_replication_returns_internal_error_when_store_uninitialized() {
let input = DeleteBucketReplicationInput::builder()
.bucket("test-bucket".to_string())
.build()
.unwrap();
let req = build_request(input, Method::DELETE);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_delete_bucket_replication(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test] #[tokio::test]
async fn execute_head_bucket_returns_internal_error_when_store_uninitialized() { async fn execute_head_bucket_returns_internal_error_when_store_uninitialized() {
let input = HeadBucketInput::builder().bucket("test-bucket".to_string()).build().unwrap(); let input = HeadBucketInput::builder().bucket("test-bucket".to_string()).build().unwrap();
@@ -1222,6 +1741,90 @@ mod tests {
assert_eq!(err.code(), &S3ErrorCode::InternalError); assert_eq!(err.code(), &S3ErrorCode::InternalError);
} }
#[tokio::test]
async fn execute_get_bucket_acl_returns_internal_error_when_store_uninitialized() {
let input = GetBucketAclInput::builder()
.bucket("test-bucket".to_string())
.build()
.unwrap();
let req = build_request(input, Method::GET);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_get_bucket_acl(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_get_bucket_location_returns_internal_error_when_store_uninitialized() {
let input = GetBucketLocationInput::builder()
.bucket("test-bucket".to_string())
.build()
.unwrap();
let req = build_request(input, Method::GET);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_get_bucket_location(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_get_bucket_cors_returns_internal_error_when_store_uninitialized() {
let input = GetBucketCorsInput::builder()
.bucket("test-bucket".to_string())
.build()
.unwrap();
let req = build_request(input, Method::GET);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_get_bucket_cors(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_delete_public_access_block_returns_internal_error_when_store_uninitialized() {
let input = DeletePublicAccessBlockInput::builder()
.bucket("test-bucket".to_string())
.build()
.unwrap();
let req = build_request(input, Method::DELETE);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_delete_public_access_block(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_get_bucket_replication_returns_internal_error_when_store_uninitialized() {
let input = GetBucketReplicationInput::builder()
.bucket("test-bucket".to_string())
.build()
.unwrap();
let req = build_request(input, Method::GET);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_get_bucket_replication(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_get_public_access_block_returns_internal_error_when_store_uninitialized() {
let input = GetPublicAccessBlockInput::builder()
.bucket("test-bucket".to_string())
.build()
.unwrap();
let req = build_request(input, Method::GET);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_get_public_access_block(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test] #[tokio::test]
async fn execute_get_bucket_versioning_returns_internal_error_when_store_uninitialized() { async fn execute_get_bucket_versioning_returns_internal_error_when_store_uninitialized() {
let input = GetBucketVersioningInput::builder() let input = GetBucketVersioningInput::builder()
@@ -1236,6 +1839,17 @@ mod tests {
assert_eq!(err.code(), &S3ErrorCode::InternalError); assert_eq!(err.code(), &S3ErrorCode::InternalError);
} }
#[tokio::test]
async fn execute_list_buckets_returns_internal_error_when_store_uninitialized() {
let input = ListBucketsInput::builder().build().unwrap();
let req = build_request(input, Method::GET);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_list_buckets(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test] #[tokio::test]
async fn execute_put_bucket_lifecycle_configuration_rejects_missing_configuration() { async fn execute_put_bucket_lifecycle_configuration_rejects_missing_configuration() {
let input = PutBucketLifecycleConfigurationInput::builder() let input = PutBucketLifecycleConfigurationInput::builder()
@@ -1265,6 +1879,68 @@ mod tests {
assert_eq!(err.code(), &S3ErrorCode::InternalError); assert_eq!(err.code(), &S3ErrorCode::InternalError);
} }
#[tokio::test]
async fn execute_put_bucket_cors_returns_internal_error_when_store_uninitialized() {
let input = PutBucketCorsInput::builder()
.bucket("test-bucket".to_string())
.cors_configuration(CORSConfiguration::default())
.build()
.unwrap();
let req = build_request(input, Method::PUT);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_put_bucket_cors(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_put_bucket_replication_returns_internal_error_when_store_uninitialized() {
let input = PutBucketReplicationInput::builder()
.bucket("test-bucket".to_string())
.replication_configuration(ReplicationConfiguration {
role: "arn:aws:iam::123456789012:role/test".to_string(),
rules: vec![],
})
.build()
.unwrap();
let req = build_request(input, Method::PUT);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_put_bucket_replication(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_put_bucket_acl_returns_internal_error_when_store_uninitialized() {
let input = PutBucketAclInput::builder()
.bucket("test-bucket".to_string())
.build()
.unwrap();
let req = build_request(input, Method::PUT);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_put_bucket_acl(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_put_public_access_block_returns_internal_error_when_store_uninitialized() {
let input = PutPublicAccessBlockInput::builder()
.bucket("test-bucket".to_string())
.public_access_block_configuration(PublicAccessBlockConfiguration::default())
.build()
.unwrap();
let req = build_request(input, Method::PUT);
let usecase = DefaultBucketUsecase::without_context();
let err = usecase.execute_put_public_access_block(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test] #[tokio::test]
async fn execute_list_objects_v2_rejects_negative_max_keys() { async fn execute_list_objects_v2_rejects_negative_max_keys() {
let input = ListObjectsV2Input::builder() let input = ListObjectsV2Input::builder()

View File

@@ -18,10 +18,12 @@
use crate::app::context::{AppContext, get_global_app_context}; use crate::app::context::{AppContext, get_global_app_context};
use crate::error::ApiError; use crate::error::ApiError;
use crate::storage::concurrency::get_concurrency_manager; use crate::storage::concurrency::get_concurrency_manager;
use crate::storage::ecfs::ManagedEncryptionMaterial; use crate::storage::ecfs::{ManagedEncryptionMaterial, RUSTFS_OWNER};
use crate::storage::entity; use crate::storage::entity;
use crate::storage::helper::OperationHelper; use crate::storage::helper::OperationHelper;
use crate::storage::options::{extract_metadata, get_complete_multipart_upload_opts, get_content_sha256, put_opts}; use crate::storage::options::{
copy_src_opts, extract_metadata, get_complete_multipart_upload_opts, get_content_sha256, parse_copy_source_range, put_opts,
};
use crate::storage::*; use crate::storage::*;
use bytes::Bytes; use bytes::Bytes;
use futures::StreamExt; use futures::StreamExt;
@@ -36,10 +38,10 @@ use rustfs_ecstore::client::object_api_utils::to_s3s_etag;
use rustfs_ecstore::compress::is_compressible; use rustfs_ecstore::compress::is_compressible;
use rustfs_ecstore::error::{StorageError, is_err_object_not_found, is_err_version_not_found}; use rustfs_ecstore::error::{StorageError, is_err_object_not_found, is_err_version_not_found};
use rustfs_ecstore::new_object_layer_fn; use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::set_disk::is_valid_storage_class; use rustfs_ecstore::set_disk::{MAX_PARTS_COUNT, is_valid_storage_class};
use rustfs_ecstore::store_api::{CompletePart, MultipartUploadResult, ObjectOptions, PutObjReader}; use rustfs_ecstore::store_api::{CompletePart, MultipartUploadResult, ObjectIO, ObjectOptions, PutObjReader};
use rustfs_filemeta::{ReplicationStatusType, ReplicationType}; use rustfs_filemeta::{ReplicationStatusType, ReplicationType};
use rustfs_rio::{CompressReader, EncryptReader, HashReader, Reader, WarpReader}; use rustfs_rio::{CompressReader, DecryptReader, EncryptReader, HashReader, Reader, WarpReader};
use rustfs_targets::EventName; use rustfs_targets::EventName;
use rustfs_utils::CompressionAlgorithm; use rustfs_utils::CompressionAlgorithm;
use rustfs_utils::http::{ use rustfs_utils::http::{
@@ -829,6 +831,317 @@ impl DefaultMultipartUsecase {
Ok(S3Response::new(output)) Ok(S3Response::new(output))
} }
pub async fn execute_list_multipart_uploads(
&self,
req: S3Request<ListMultipartUploadsInput>,
) -> S3Result<S3Response<ListMultipartUploadsOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let ListMultipartUploadsInput {
bucket,
prefix,
delimiter,
key_marker,
upload_id_marker,
max_uploads,
..
} = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let prefix = prefix.unwrap_or_default();
let max_uploads = max_uploads.map(|x| x as usize).unwrap_or(MAX_PARTS_COUNT);
if let Some(key_marker) = &key_marker
&& !key_marker.starts_with(prefix.as_str())
{
return Err(s3_error!(NotImplemented, "Invalid key marker"));
}
let result = store
.list_multipart_uploads(&bucket, &prefix, delimiter, key_marker, upload_id_marker, max_uploads)
.await
.map_err(ApiError::from)?;
let output = ListMultipartUploadsOutput {
bucket: Some(bucket),
prefix: Some(prefix),
delimiter: result.delimiter,
key_marker: result.key_marker,
upload_id_marker: result.upload_id_marker,
max_uploads: Some(result.max_uploads as i32),
is_truncated: Some(result.is_truncated),
uploads: Some(
result
.uploads
.into_iter()
.map(|u| MultipartUpload {
key: Some(u.object),
upload_id: Some(u.upload_id),
initiated: u.initiated.map(Timestamp::from),
..Default::default()
})
.collect(),
),
common_prefixes: Some(
result
.common_prefixes
.into_iter()
.map(|c| CommonPrefix { prefix: Some(c) })
.collect(),
),
..Default::default()
};
Ok(S3Response::new(output))
}
pub async fn execute_list_parts(&self, req: S3Request<ListPartsInput>) -> S3Result<S3Response<ListPartsOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let ListPartsInput {
bucket,
key,
upload_id,
part_number_marker,
max_parts,
..
} = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let part_number_marker = part_number_marker.map(|x| x as usize);
let max_parts = match max_parts {
Some(parts) => {
if !(1..=1000).contains(&parts) {
return Err(s3_error!(InvalidArgument, "max-parts must be between 1 and 1000"));
}
parts as usize
}
None => 1000,
};
let res = store
.list_object_parts(&bucket, &key, &upload_id, part_number_marker, max_parts, &ObjectOptions::default())
.await
.map_err(ApiError::from)?;
let output = ListPartsOutput {
bucket: Some(res.bucket),
key: Some(res.object),
upload_id: Some(res.upload_id),
parts: Some(
res.parts
.into_iter()
.map(|p| Part {
e_tag: p.etag.map(|etag| to_s3s_etag(&etag)),
last_modified: p.last_mod.map(Timestamp::from),
part_number: Some(p.part_num as i32),
size: Some(p.size as i64),
..Default::default()
})
.collect(),
),
owner: Some(RUSTFS_OWNER.to_owned()),
initiator: Some(Initiator {
id: RUSTFS_OWNER.id.clone(),
display_name: RUSTFS_OWNER.display_name.clone(),
}),
is_truncated: Some(res.is_truncated),
next_part_number_marker: res.next_part_number_marker.try_into().ok(),
max_parts: res.max_parts.try_into().ok(),
part_number_marker: res.part_number_marker.try_into().ok(),
storage_class: if res.storage_class.is_empty() {
None
} else {
Some(res.storage_class.into())
},
..Default::default()
};
Ok(S3Response::new(output))
}
#[instrument(level = "debug", skip(self, req))]
pub async fn execute_upload_part_copy(
&self,
req: S3Request<UploadPartCopyInput>,
) -> S3Result<S3Response<UploadPartCopyOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let UploadPartCopyInput {
bucket,
key,
copy_source,
copy_source_range,
part_number,
upload_id,
copy_source_if_match,
copy_source_if_none_match,
..
} = req.input;
let (src_bucket, src_key, src_version_id) = match copy_source {
CopySource::AccessPoint { .. } => return Err(s3_error!(NotImplemented)),
CopySource::Bucket {
bucket: ref src_bucket,
key: ref src_key,
version_id,
} => (src_bucket.to_string(), src_key.to_string(), version_id.map(|v| v.to_string())),
};
let rs = if let Some(range_str) = copy_source_range {
Some(parse_copy_source_range(&range_str)?)
} else {
None
};
let part_id = part_number as usize;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let mp_info = store
.get_multipart_info(&bucket, &key, &upload_id, &ObjectOptions::default())
.await
.map_err(ApiError::from)?;
let mut src_opts = copy_src_opts(&src_bucket, &src_key, &req.headers).map_err(ApiError::from)?;
src_opts.version_id = src_version_id.clone();
let h = http::HeaderMap::new();
let get_opts = ObjectOptions {
version_id: src_opts.version_id.clone(),
versioned: src_opts.versioned,
version_suspended: src_opts.version_suspended,
..Default::default()
};
let src_reader = store
.get_object_reader(&src_bucket, &src_key, rs.clone(), h, &get_opts)
.await
.map_err(ApiError::from)?;
let mut src_info = src_reader.object_info;
if let Some(if_match) = copy_source_if_match {
if let Some(ref etag) = src_info.etag {
if let Some(strong_etag) = if_match.into_etag() {
if ETag::Strong(etag.clone()) != strong_etag {
return Err(s3_error!(PreconditionFailed));
}
} else {
return Err(s3_error!(PreconditionFailed));
}
} else {
return Err(s3_error!(PreconditionFailed));
}
}
if let Some(if_none_match) = copy_source_if_none_match
&& let Some(ref etag) = src_info.etag
&& let Some(strong_etag) = if_none_match.into_etag()
&& ETag::Strong(etag.clone()) == strong_etag
{
return Err(s3_error!(PreconditionFailed));
}
let (_start_offset, length) = if let Some(ref range_spec) = rs {
let validation_size = match src_info.is_compressed_ok() {
Ok((_, true)) => src_info.get_actual_size().unwrap_or(src_info.size),
_ => src_info.size,
};
range_spec
.get_offset_length(validation_size)
.map_err(|e| S3Error::with_message(S3ErrorCode::InvalidRange, e.to_string()))?
} else {
(0, src_info.size)
};
let h = http::HeaderMap::new();
let get_opts = ObjectOptions {
version_id: src_opts.version_id.clone(),
versioned: src_opts.versioned,
version_suspended: src_opts.version_suspended,
..Default::default()
};
let src_reader = store
.get_object_reader(&src_bucket, &src_key, rs.clone(), h, &get_opts)
.await
.map_err(ApiError::from)?;
let src_stream = src_reader.stream;
let is_compressible = mp_info
.user_defined
.contains_key(format!("{RESERVED_METADATA_PREFIX_LOWER}compression").as_str());
let mut reader: Box<dyn Reader> = Box::new(WarpReader::new(src_stream));
if let Some((key_bytes, nonce, original_size_opt)) =
decrypt_managed_encryption_key(&src_bucket, &src_key, &src_info.user_defined).await?
{
reader = Box::new(DecryptReader::new(reader, key_bytes, nonce));
if let Some(original) = original_size_opt {
src_info.actual_size = original;
}
}
let actual_size = length;
let mut size = length;
if is_compressible {
let hrd = HashReader::new(reader, size, actual_size, None, None, false).map_err(ApiError::from)?;
reader = Box::new(CompressReader::new(hrd, CompressionAlgorithm::default()));
size = HashReader::SIZE_PRESERVE_LAYER;
}
let mut reader = HashReader::new(reader, size, actual_size, None, None, false).map_err(ApiError::from)?;
if let Some((key_bytes, base_nonce, _)) = decrypt_managed_encryption_key(&bucket, &key, &mp_info.user_defined).await? {
let part_nonce = derive_part_nonce(base_nonce, part_id);
let encrypt_reader = EncryptReader::new(reader, key_bytes, part_nonce);
reader = HashReader::new(Box::new(encrypt_reader), -1, actual_size, None, None, false).map_err(ApiError::from)?;
}
let mut reader = PutObjReader::new(reader);
let dst_opts = ObjectOptions {
user_defined: mp_info.user_defined.clone(),
..Default::default()
};
let part_info = store
.put_object_part(&bucket, &key, &upload_id, part_id, &mut reader, &dst_opts)
.await
.map_err(ApiError::from)?;
let copy_part_result = CopyPartResult {
e_tag: part_info.etag.map(|etag| to_s3s_etag(&etag)),
last_modified: part_info.last_mod.map(Timestamp::from),
..Default::default()
};
let output = UploadPartCopyOutput {
copy_part_result: Some(copy_part_result),
copy_source_version_id: src_version_id,
..Default::default()
};
Ok(S3Response::new(output))
}
} }
#[async_trait::async_trait] #[async_trait::async_trait]
@@ -936,6 +1249,52 @@ mod tests {
assert_eq!(err.code(), &S3ErrorCode::InvalidPart); assert_eq!(err.code(), &S3ErrorCode::InvalidPart);
} }
#[tokio::test]
async fn execute_list_multipart_uploads_returns_internal_error_when_store_uninitialized() {
let input = ListMultipartUploadsInput::builder()
.bucket("bucket".to_string())
.build()
.unwrap();
let req = build_request(input, Method::GET);
let err = make_usecase().execute_list_multipart_uploads(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_list_parts_returns_internal_error_when_store_uninitialized() {
let input = ListPartsInput::builder()
.bucket("bucket".to_string())
.key("object".to_string())
.upload_id("upload-id".to_string())
.build()
.unwrap();
let req = build_request(input, Method::GET);
let err = make_usecase().execute_list_parts(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_upload_part_copy_returns_internal_error_when_store_uninitialized() {
let input = UploadPartCopyInput::builder()
.bucket("bucket".to_string())
.key("object".to_string())
.copy_source(CopySource::Bucket {
bucket: "src-bucket".into(),
key: "src-object".into(),
version_id: None,
})
.part_number(1)
.upload_id("upload-id".to_string())
.build()
.unwrap();
let req = build_request(input, Method::PUT);
let err = make_usecase().execute_upload_part_copy(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test] #[tokio::test]
async fn execute_upload_part_rejects_missing_body() { async fn execute_upload_part_rejects_missing_body() {
let input = UploadPartInput::builder() let input = UploadPartInput::builder()

View File

@@ -38,21 +38,25 @@ use datafusion::arrow::{
}; };
use futures::StreamExt; use futures::StreamExt;
use http::{HeaderMap, HeaderValue, StatusCode}; use http::{HeaderMap, HeaderValue, StatusCode};
use metrics::{counter, histogram};
use rustfs_ecstore::StorageAPI; use rustfs_ecstore::StorageAPI;
use rustfs_ecstore::bucket::{ use rustfs_ecstore::bucket::{
lifecycle::{ lifecycle::{
bucket_lifecycle_ops::{RestoreRequestOps, post_restore_opts}, bucket_lifecycle_ops::{RestoreRequestOps, post_restore_opts},
lifecycle::{self, TransitionOptions}, lifecycle::{self, TransitionOptions},
}, },
metadata::{BUCKET_VERSIONING_CONFIG, OBJECT_LOCK_CONFIG},
metadata_sys, metadata_sys,
object_lock::objectlock_sys::{BucketObjectLockSys, check_object_lock_for_deletion}, object_lock::objectlock_sys::{BucketObjectLockSys, check_object_lock_for_deletion, check_retention_for_modification},
quota::QuotaOperation, quota::QuotaOperation,
quota::checker::QuotaChecker, quota::checker::QuotaChecker,
replication::{ replication::{
DeletedObjectReplicationInfo, get_must_replicate_options, must_replicate, schedule_replication, DeletedObjectReplicationInfo, get_must_replicate_options, must_replicate, schedule_replication,
schedule_replication_delete, schedule_replication_delete,
}, },
tagging::decode_tags, tagging::{decode_tags, encode_tags},
utils::serialize,
versioning::VersioningApi,
versioning_sys::BucketVersioningSys, versioning_sys::BucketVersioningSys,
}; };
use rustfs_ecstore::client::object_api_utils::to_s3s_etag; use rustfs_ecstore::client::object_api_utils::to_s3s_etag;
@@ -60,7 +64,7 @@ use rustfs_ecstore::compress::{MIN_COMPRESSIBLE_SIZE, is_compressible};
use rustfs_ecstore::error::{StorageError, is_err_bucket_not_found, is_err_object_not_found, is_err_version_not_found}; use rustfs_ecstore::error::{StorageError, is_err_bucket_not_found, is_err_object_not_found, is_err_version_not_found};
use rustfs_ecstore::new_object_layer_fn; use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::set_disk::is_valid_storage_class; use rustfs_ecstore::set_disk::is_valid_storage_class;
use rustfs_ecstore::store_api::{HTTPRangeSpec, ObjectIO, ObjectInfo, ObjectOptions, PutObjReader}; use rustfs_ecstore::store_api::{BucketOptions, HTTPRangeSpec, ObjectIO, ObjectInfo, ObjectOptions, PutObjReader};
use rustfs_filemeta::{ use rustfs_filemeta::{
REPLICATE_INCOMING_DELETE, ReplicationStatusType, ReplicationType, RestoreStatusOps, VersionPurgeStatusType, REPLICATE_INCOMING_DELETE, ReplicationStatusType, ReplicationType, RestoreStatusOps, VersionPurgeStatusType,
parse_restore_obj_status, parse_restore_obj_status,
@@ -659,6 +663,401 @@ impl DefaultObjectUsecase {
result result
} }
pub async fn execute_put_object_acl(&self, req: S3Request<PutObjectAclInput>) -> S3Result<S3Response<PutObjectAclOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let PutObjectAclInput {
bucket,
key,
acl,
access_control_policy,
grant_full_control,
grant_read,
grant_read_acp,
grant_write,
grant_write_acp,
version_id,
..
} = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let opts: ObjectOptions = get_opts(&bucket, &key, version_id.clone(), None, &req.headers)
.await
.map_err(ApiError::from)?;
let info = store.get_object_info(&bucket, &key, &opts).await.map_err(ApiError::from)?;
let bucket_owner = default_owner();
let existing_owner = info
.user_defined
.get(INTERNAL_ACL_METADATA_KEY)
.and_then(|acl| serde_json::from_str::<StoredAcl>(acl).ok())
.map(|acl| acl.owner)
.unwrap_or_else(|| bucket_owner.clone());
let mut stored_acl = access_control_policy
.as_ref()
.map(|policy| stored_acl_from_policy(policy, &existing_owner))
.transpose()?;
if stored_acl.is_none() {
stored_acl = stored_acl_from_grant_headers(
&existing_owner,
grant_read.map(|v| v.to_string()),
grant_write.map(|v| v.to_string()),
grant_read_acp.map(|v| v.to_string()),
grant_write_acp.map(|v| v.to_string()),
grant_full_control.map(|v| v.to_string()),
)?;
}
if stored_acl.is_none()
&& let Some(canned) = acl
{
stored_acl = Some(stored_acl_from_canned_object(canned.as_str(), &bucket_owner, &existing_owner));
}
let stored_acl =
stored_acl.unwrap_or_else(|| stored_acl_from_canned_object(ObjectCannedACL::PRIVATE, &bucket_owner, &existing_owner));
if let Ok((config, _)) = metadata_sys::get_public_access_block_config(&bucket).await
&& config.block_public_acls.unwrap_or(false)
&& stored_acl.grants.iter().any(is_public_grant)
{
return Err(s3_error!(AccessDenied, "Access Denied"));
}
let acl_data = serialize_acl(&stored_acl)?;
let mut eval_metadata = HashMap::new();
eval_metadata.insert(INTERNAL_ACL_METADATA_KEY.to_string(), String::from_utf8_lossy(&acl_data).to_string());
let popts = ObjectOptions {
mod_time: info.mod_time,
version_id: opts.version_id,
eval_metadata: Some(eval_metadata),
..Default::default()
};
store.put_object_metadata(&bucket, &key, &popts).await.map_err(|e| {
error!("put_object_metadata failed, {}", e.to_string());
s3_error!(InternalError, "{}", e.to_string())
})?;
Ok(S3Response::new(PutObjectAclOutput::default()))
}
pub async fn execute_put_object_legal_hold(
&self,
req: S3Request<PutObjectLegalHoldInput>,
) -> S3Result<S3Response<PutObjectLegalHoldOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let mut helper = OperationHelper::new(&req, EventName::ObjectCreatedPutLegalHold, "s3:PutObjectLegalHold");
let PutObjectLegalHoldInput {
bucket,
key,
legal_hold,
version_id,
..
} = req.input.clone();
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let _ = store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
validate_bucket_object_lock_enabled(&bucket).await?;
let opts: ObjectOptions = get_opts(&bucket, &key, version_id, None, &req.headers)
.await
.map_err(ApiError::from)?;
let eval_metadata = parse_object_lock_legal_hold(legal_hold)?;
let popts = ObjectOptions {
mod_time: opts.mod_time,
version_id: opts.version_id,
eval_metadata: Some(eval_metadata),
..Default::default()
};
let info = store.put_object_metadata(&bucket, &key, &popts).await.map_err(|e| {
error!("put_object_metadata failed, {}", e.to_string());
s3_error!(InternalError, "{}", e.to_string())
})?;
let output = PutObjectLegalHoldOutput {
request_charged: Some(RequestCharged::from_static(RequestCharged::REQUESTER)),
};
let version_id = req.input.version_id.clone().unwrap_or_default();
helper = helper.object(info).version_id(version_id);
let result = Ok(S3Response::new(output));
let _ = helper.complete(&result);
result
}
#[instrument(level = "debug", skip(self))]
pub async fn execute_put_object_lock_configuration(
&self,
req: S3Request<PutObjectLockConfigurationInput>,
) -> S3Result<S3Response<PutObjectLockConfigurationOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let PutObjectLockConfigurationInput {
bucket,
object_lock_configuration,
..
} = req.input;
let Some(input_cfg) = object_lock_configuration else { return Err(s3_error!(InvalidArgument)) };
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
match metadata_sys::get_object_lock_config(&bucket).await {
Ok(_) => {}
Err(err) => {
if err == StorageError::ConfigNotFound {
return Err(S3Error::with_message(
S3ErrorCode::InvalidBucketState,
"Object Lock configuration cannot be enabled on existing buckets".to_string(),
));
}
warn!("get_object_lock_config err {:?}", err);
return Err(S3Error::with_message(
S3ErrorCode::InternalError,
"Failed to get bucket ObjectLockConfiguration".to_string(),
));
}
};
let data = serialize(&input_cfg).map_err(|err| S3Error::with_message(S3ErrorCode::InternalError, format!("{}", err)))?;
metadata_sys::update(&bucket, OBJECT_LOCK_CONFIG, data)
.await
.map_err(ApiError::from)?;
// When Object Lock is enabled, automatically enable versioning if not already enabled.
// This matches AWS S3 and MinIO behavior.
let versioning_config = BucketVersioningSys::get(&bucket).await.map_err(ApiError::from)?;
if !versioning_config.enabled() {
let enable_versioning_config = VersioningConfiguration {
status: Some(BucketVersioningStatus::from_static(BucketVersioningStatus::ENABLED)),
..Default::default()
};
let versioning_data = serialize(&enable_versioning_config)
.map_err(|err| S3Error::with_message(S3ErrorCode::InternalError, format!("{}", err)))?;
metadata_sys::update(&bucket, BUCKET_VERSIONING_CONFIG, versioning_data)
.await
.map_err(ApiError::from)?;
}
Ok(S3Response::new(PutObjectLockConfigurationOutput::default()))
}
pub async fn execute_put_object_retention(
&self,
req: S3Request<PutObjectRetentionInput>,
) -> S3Result<S3Response<PutObjectRetentionOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let mut helper = OperationHelper::new(&req, EventName::ObjectCreatedPutRetention, "s3:PutObjectRetention");
let PutObjectRetentionInput {
bucket,
key,
retention,
version_id,
..
} = req.input.clone();
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
validate_bucket_object_lock_enabled(&bucket).await?;
let new_retain_until = retention
.as_ref()
.and_then(|r| r.retain_until_date.as_ref())
.map(|d| OffsetDateTime::from(d.clone()));
// TODO(security): Known TOCTOU race condition (fix in future PR).
//
// There is a time-of-check-time-of-use (TOCTOU) window between the retention
// check below (using get_object_info + check_retention_for_modification) and
// the actual update performed later in put_object_metadata.
//
// In theory:
// * Thread A reads retention mode = GOVERNANCE and checks the bypass header.
// * Thread B updates retention to COMPLIANCE mode.
// * Thread A then proceeds to modify retention, still assuming GOVERNANCE,
// and effectively bypasses what is now COMPLIANCE mode.
//
// This would violate the S3 spec, which states that COMPLIANCE-mode retention
// cannot be modified even with a bypass header.
//
// Possible fixes (to be implemented in a future change):
// 1. Pass the expected retention mode down to the storage layer and verify
// it has not changed immediately before the update.
// 2. Use optimistic concurrency (e.g., version/etag) so that the update
// fails if the object changed between check and update.
// 3. Perform the retention check inside the same lock/transaction scope as
// the metadata update within the storage layer.
//
// Current mitigation: the storage layer provides a fast_lock_manager, which
// offers some protection, but it does not fully eliminate this race.
let check_opts: ObjectOptions = get_opts(&bucket, &key, version_id.clone(), None, &req.headers)
.await
.map_err(ApiError::from)?;
if let Ok(existing_obj_info) = store.get_object_info(&bucket, &key, &check_opts).await {
let bypass_governance = has_bypass_governance_header(&req.headers);
if let Some(block_reason) =
check_retention_for_modification(&existing_obj_info.user_defined, new_retain_until, bypass_governance)
{
return Err(S3Error::with_message(S3ErrorCode::AccessDenied, block_reason.error_message()));
}
}
let eval_metadata = parse_object_lock_retention(retention)?;
let mut opts: ObjectOptions = get_opts(&bucket, &key, version_id, None, &req.headers)
.await
.map_err(ApiError::from)?;
opts.eval_metadata = Some(eval_metadata);
let object_info = store.put_object_metadata(&bucket, &key, &opts).await.map_err(|e| {
error!("put_object_metadata failed, {}", e.to_string());
s3_error!(InternalError, "{}", e.to_string())
})?;
let output = PutObjectRetentionOutput {
request_charged: Some(RequestCharged::from_static(RequestCharged::REQUESTER)),
};
let version_id = req.input.version_id.clone().unwrap_or_else(|| Uuid::new_v4().to_string());
helper = helper.object(object_info).version_id(version_id);
let result = Ok(S3Response::new(output));
let _ = helper.complete(&result);
result
}
#[instrument(level = "debug", skip(self, req))]
pub async fn execute_put_object_tagging(
&self,
req: S3Request<PutObjectTaggingInput>,
) -> S3Result<S3Response<PutObjectTaggingOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let start_time = std::time::Instant::now();
let mut helper = OperationHelper::new(&req, EventName::ObjectCreatedPutTagging, "s3:PutObjectTagging");
let PutObjectTaggingInput {
bucket,
key: object,
tagging,
..
} = req.input.clone();
if tagging.tag_set.len() > 10 {
error!("Tag set exceeds maximum of 10 tags: {}", tagging.tag_set.len());
return Err(s3_error!(InvalidTag, "Cannot have more than 10 tags per object"));
}
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let mut tag_keys = std::collections::HashSet::with_capacity(tagging.tag_set.len());
for tag in &tagging.tag_set {
let key = tag.key.as_ref().filter(|k| !k.is_empty()).ok_or_else(|| {
error!("Empty tag key");
s3_error!(InvalidTag, "Tag key cannot be empty")
})?;
if key.len() > 128 {
error!("Tag key too long: {} bytes", key.len());
return Err(s3_error!(InvalidTag, "Tag key is too long, maximum allowed length is 128 characters"));
}
let value = tag.value.as_ref().ok_or_else(|| {
error!("Null tag value");
s3_error!(InvalidTag, "Tag value cannot be null")
})?;
if value.len() > 256 {
error!("Tag value too long: {} bytes", value.len());
return Err(s3_error!(InvalidTag, "Tag value is too long, maximum allowed length is 256 characters"));
}
if !tag_keys.insert(key) {
error!("Duplicate tag key: {}", key);
return Err(s3_error!(InvalidTag, "Cannot provide multiple Tags with the same key"));
}
}
let tags = encode_tags(tagging.tag_set);
debug!("Encoded tags: {}", tags);
let version_id = req.input.version_id.clone();
let opts = ObjectOptions {
version_id: parse_object_version_id(version_id)?.map(Into::into),
..Default::default()
};
store.put_object_tags(&bucket, &object, &tags, &opts).await.map_err(|e| {
error!("Failed to put object tags: {}", e);
counter!("rustfs.put_object_tagging.failure").increment(1);
ApiError::from(e)
})?;
let manager = get_concurrency_manager();
let version_id = req.input.version_id.clone();
let cache_key = ConcurrencyManager::make_cache_key(&bucket, &object, version_id.clone().as_deref());
tokio::spawn(async move {
manager
.invalidate_cache_versioned(&bucket, &object, version_id.as_deref())
.await;
debug!("Cache invalidated for tagged object: {}", cache_key);
});
counter!("rustfs.put_object_tagging.success").increment(1);
let version_id_resp = req.input.version_id.clone().unwrap_or_default();
helper = helper.version_id(version_id_resp);
let result = Ok(S3Response::new(PutObjectTaggingOutput {
version_id: req.input.version_id.clone(),
}));
let _ = helper.complete(&result);
let duration = start_time.elapsed();
histogram!("rustfs.object_tagging.operation.duration.seconds", "operation" => "put").record(duration.as_secs_f64());
result
}
#[instrument( #[instrument(
level = "debug", level = "debug",
skip(self, req), skip(self, req),
@@ -1416,6 +1815,278 @@ impl DefaultObjectUsecase {
result result
} }
pub async fn execute_get_object_acl(&self, req: S3Request<GetObjectAclInput>) -> S3Result<S3Response<GetObjectAclOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let GetObjectAclInput {
bucket, key, version_id, ..
} = req.input;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let opts: ObjectOptions = get_opts(&bucket, &key, version_id.clone(), None, &req.headers)
.await
.map_err(ApiError::from)?;
let info = store.get_object_info(&bucket, &key, &opts).await.map_err(ApiError::from)?;
let bucket_owner = default_owner();
let object_owner = info
.user_defined
.get(INTERNAL_ACL_METADATA_KEY)
.and_then(|acl| serde_json::from_str::<StoredAcl>(acl).ok())
.map(|acl| acl.owner)
.unwrap_or_else(default_owner);
let stored_acl = info
.user_defined
.get(INTERNAL_ACL_METADATA_KEY)
.map(|acl| parse_acl_json_or_canned_object(acl, &bucket_owner, &object_owner))
.unwrap_or_else(|| stored_acl_from_canned_object(ObjectCannedACL::PRIVATE, &bucket_owner, &object_owner));
let mut sorted_grants = stored_acl.grants.clone();
sorted_grants.sort_by_key(|grant| grant.grantee.grantee_type != "Group");
let grants = sorted_grants.iter().map(stored_grant_to_dto).collect();
Ok(S3Response::new(GetObjectAclOutput {
grants: Some(grants),
owner: Some(stored_owner_to_dto(&stored_acl.owner)),
..Default::default()
}))
}
pub async fn execute_get_object_attributes(
&self,
req: S3Request<GetObjectAttributesInput>,
) -> S3Result<S3Response<GetObjectAttributesOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let mut helper = OperationHelper::new(&req, EventName::ObjectAccessedAttributes, "s3:GetObjectAttributes");
let GetObjectAttributesInput { bucket, key, .. } = req.input.clone();
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
if let Err(e) = store
.get_object_reader(&bucket, &key, None, HeaderMap::new(), &ObjectOptions::default())
.await
{
return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("{e}")));
}
let output = GetObjectAttributesOutput {
delete_marker: None,
object_parts: None,
..Default::default()
};
let version_id = req.input.version_id.clone().unwrap_or_default();
helper = helper
.object(ObjectInfo {
name: key.clone(),
bucket,
..Default::default()
})
.version_id(version_id);
let result = Ok(S3Response::new(output));
let _ = helper.complete(&result);
result
}
pub async fn execute_get_object_legal_hold(
&self,
req: S3Request<GetObjectLegalHoldInput>,
) -> S3Result<S3Response<GetObjectLegalHoldOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let mut helper = OperationHelper::new(&req, EventName::ObjectAccessedGetLegalHold, "s3:GetObjectLegalHold");
let GetObjectLegalHoldInput {
bucket, key, version_id, ..
} = req.input.clone();
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let _ = store
.get_bucket_info(&bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
validate_bucket_object_lock_enabled(&bucket).await?;
let opts: ObjectOptions = get_opts(&bucket, &key, version_id, None, &req.headers)
.await
.map_err(ApiError::from)?;
let object_info = store.get_object_info(&bucket, &key, &opts).await.map_err(|e| {
error!("get_object_info failed, {}", e.to_string());
s3_error!(InternalError, "{}", e.to_string())
})?;
let legal_hold = object_info
.user_defined
.get(AMZ_OBJECT_LOCK_LEGAL_HOLD_LOWER)
.map(|v| v.as_str().to_string());
let status = if let Some(v) = legal_hold {
v
} else {
ObjectLockLegalHoldStatus::OFF.to_string()
};
let output = GetObjectLegalHoldOutput {
legal_hold: Some(ObjectLockLegalHold {
status: Some(ObjectLockLegalHoldStatus::from(status)),
}),
};
let version_id = req.input.version_id.clone().unwrap_or_else(|| Uuid::new_v4().to_string());
helper = helper.object(object_info).version_id(version_id);
let result = Ok(S3Response::new(output));
let _ = helper.complete(&result);
result
}
#[instrument(level = "debug", skip(self))]
pub async fn execute_get_object_lock_configuration(
&self,
req: S3Request<GetObjectLockConfigurationInput>,
) -> S3Result<S3Response<GetObjectLockConfigurationOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let GetObjectLockConfigurationInput { bucket, .. } = req.input;
let object_lock_configuration = match metadata_sys::get_object_lock_config(&bucket).await {
Ok((cfg, _created)) => Some(cfg),
Err(err) => {
if err == StorageError::ConfigNotFound {
return Err(S3Error::with_message(
S3ErrorCode::ObjectLockConfigurationNotFoundError,
"Object Lock configuration does not exist for this bucket".to_string(),
));
}
warn!("get_object_lock_config err {:?}", err);
return Err(S3Error::with_message(
S3ErrorCode::InternalError,
"Failed to load Object Lock configuration".to_string(),
));
}
};
Ok(S3Response::new(GetObjectLockConfigurationOutput {
object_lock_configuration,
}))
}
pub async fn execute_get_object_retention(
&self,
req: S3Request<GetObjectRetentionInput>,
) -> S3Result<S3Response<GetObjectRetentionOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let mut helper = OperationHelper::new(&req, EventName::ObjectAccessedGetRetention, "s3:GetObjectRetention");
let GetObjectRetentionInput {
bucket, key, version_id, ..
} = req.input.clone();
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
validate_bucket_object_lock_enabled(&bucket).await?;
let opts: ObjectOptions = get_opts(&bucket, &key, version_id, None, &req.headers)
.await
.map_err(ApiError::from)?;
let object_info = store.get_object_info(&bucket, &key, &opts).await.map_err(|e| {
error!("get_object_info failed, {}", e.to_string());
s3_error!(InternalError, "{}", e.to_string())
})?;
let mode = object_info
.user_defined
.get("x-amz-object-lock-mode")
.map(|v| ObjectLockRetentionMode::from(v.as_str().to_string()));
let retain_until_date = object_info
.user_defined
.get("x-amz-object-lock-retain-until-date")
.and_then(|v| OffsetDateTime::parse(v.as_str(), &Rfc3339).ok())
.map(Timestamp::from);
let output = GetObjectRetentionOutput {
retention: Some(ObjectLockRetention { mode, retain_until_date }),
};
let version_id = req.input.version_id.clone().unwrap_or_default();
helper = helper.object(object_info).version_id(version_id);
let result = Ok(S3Response::new(output));
let _ = helper.complete(&result);
result
}
#[instrument(level = "debug", skip(self, req))]
pub async fn execute_get_object_tagging(
&self,
req: S3Request<GetObjectTaggingInput>,
) -> S3Result<S3Response<GetObjectTaggingOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let start_time = std::time::Instant::now();
let GetObjectTaggingInput { bucket, key: object, .. } = req.input;
info!("Starting get_object_tagging for bucket: {}, object: {}", bucket, object);
let Some(store) = new_object_layer_fn() else {
error!("Store not initialized");
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let version_id = req.input.version_id.clone();
let opts = ObjectOptions {
version_id: parse_object_version_id(version_id)?.map(Into::into),
..Default::default()
};
let tags = store.get_object_tags(&bucket, &object, &opts).await.map_err(|e| {
if is_err_object_not_found(&e) {
error!("Object not found: {}", e);
return s3_error!(NoSuchKey);
}
error!("Failed to get object tags: {}", e);
ApiError::from(e).into()
})?;
let tag_set = decode_tags(tags.as_str());
debug!("Decoded tag set: {:?}", tag_set);
counter!("rustfs.get_object_tagging.success").increment(1);
let duration = start_time.elapsed();
histogram!("rustfs.object_tagging.operation.duration.seconds", "operation" => "get").record(duration.as_secs_f64());
Ok(S3Response::new(GetObjectTaggingOutput {
tag_set,
version_id: req.input.version_id.clone(),
}))
}
#[instrument(level = "debug", skip(self, req))] #[instrument(level = "debug", skip(self, req))]
pub async fn execute_select_object_content( pub async fn execute_select_object_content(
&self, &self,
@@ -2031,6 +2702,64 @@ impl DefaultObjectUsecase {
result result
} }
#[instrument(level = "debug", skip(self, req))]
pub async fn execute_delete_object_tagging(
&self,
req: S3Request<DeleteObjectTaggingInput>,
) -> S3Result<S3Response<DeleteObjectTaggingOutput>> {
if let Some(context) = &self.context {
let _ = context.object_store();
}
let start_time = std::time::Instant::now();
let mut helper = OperationHelper::new(&req, EventName::ObjectCreatedDeleteTagging, "s3:DeleteObjectTagging");
let DeleteObjectTaggingInput {
bucket,
key: object,
version_id,
..
} = req.input.clone();
let Some(store) = new_object_layer_fn() else {
error!("Store not initialized");
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let version_id_for_parse = version_id.clone();
let opts = ObjectOptions {
version_id: parse_object_version_id(version_id_for_parse)?.map(Into::into),
..Default::default()
};
store.delete_object_tags(&bucket, &object, &opts).await.map_err(|e| {
error!("Failed to delete object tags: {}", e);
ApiError::from(e)
})?;
let manager = get_concurrency_manager();
let version_id_clone = version_id.clone();
tokio::spawn(async move {
manager
.invalidate_cache_versioned(&bucket, &object, version_id_clone.as_deref())
.await;
debug!(
"Cache invalidated for deleted tagged object: bucket={}, object={}, version_id={:?}",
bucket, object, version_id_clone
);
});
counter!("rustfs.delete_object_tagging.success").increment(1);
let version_id_resp = version_id.clone().unwrap_or_default();
helper = helper.version_id(version_id_resp);
let result = Ok(S3Response::new(DeleteObjectTaggingOutput { version_id }));
let _ = helper.complete(&result);
let duration = start_time.elapsed();
histogram!("rustfs.object_tagging.operation.duration.seconds", "operation" => "delete").record(duration.as_secs_f64());
result
}
#[instrument(level = "debug", skip(self, req))] #[instrument(level = "debug", skip(self, req))]
pub async fn execute_head_object(&self, req: S3Request<HeadObjectInput>) -> S3Result<S3Response<HeadObjectOutput>> { pub async fn execute_head_object(&self, req: S3Request<HeadObjectInput>) -> S3Result<S3Response<HeadObjectOutput>> {
if let Some(context) = &self.context { if let Some(context) = &self.context {
@@ -2639,6 +3368,180 @@ mod tests {
assert_eq!(err.code(), &S3ErrorCode::InvalidArgument); assert_eq!(err.code(), &S3ErrorCode::InvalidArgument);
} }
#[tokio::test]
async fn execute_delete_object_tagging_returns_internal_error_when_store_uninitialized() {
let input = DeleteObjectTaggingInput::builder()
.bucket("test-bucket".to_string())
.key("test-key".to_string())
.build()
.unwrap();
let req = build_request(input, Method::DELETE);
let usecase = DefaultObjectUsecase::without_context();
let err = usecase.execute_delete_object_tagging(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_get_object_acl_returns_internal_error_when_store_uninitialized() {
let input = GetObjectAclInput::builder()
.bucket("test-bucket".to_string())
.key("test-key".to_string())
.build()
.unwrap();
let req = build_request(input, Method::GET);
let usecase = DefaultObjectUsecase::without_context();
let err = usecase.execute_get_object_acl(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_get_object_attributes_returns_internal_error_when_store_uninitialized() {
let input = GetObjectAttributesInput::builder()
.bucket("test-bucket".to_string())
.key("test-key".to_string())
.build()
.unwrap();
let req = build_request(input, Method::GET);
let usecase = DefaultObjectUsecase::without_context();
let err = usecase.execute_get_object_attributes(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_get_object_legal_hold_returns_internal_error_when_store_uninitialized() {
let input = GetObjectLegalHoldInput::builder()
.bucket("test-bucket".to_string())
.key("test-key".to_string())
.build()
.unwrap();
let req = build_request(input, Method::GET);
let usecase = DefaultObjectUsecase::without_context();
let err = usecase.execute_get_object_legal_hold(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_get_object_retention_returns_internal_error_when_store_uninitialized() {
let input = GetObjectRetentionInput::builder()
.bucket("test-bucket".to_string())
.key("test-key".to_string())
.build()
.unwrap();
let req = build_request(input, Method::GET);
let usecase = DefaultObjectUsecase::without_context();
let err = usecase.execute_get_object_retention(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_get_object_tagging_returns_internal_error_when_store_uninitialized() {
let input = GetObjectTaggingInput::builder()
.bucket("test-bucket".to_string())
.key("test-key".to_string())
.build()
.unwrap();
let req = build_request(input, Method::GET);
let usecase = DefaultObjectUsecase::without_context();
let err = usecase.execute_get_object_tagging(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_put_object_acl_returns_internal_error_when_store_uninitialized() {
let input = PutObjectAclInput::builder()
.bucket("test-bucket".to_string())
.key("test-key".to_string())
.build()
.unwrap();
let req = build_request(input, Method::PUT);
let usecase = DefaultObjectUsecase::without_context();
let err = usecase.execute_put_object_acl(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_put_object_legal_hold_returns_internal_error_when_store_uninitialized() {
let input = PutObjectLegalHoldInput::builder()
.bucket("test-bucket".to_string())
.key("test-key".to_string())
.build()
.unwrap();
let req = build_request(input, Method::PUT);
let usecase = DefaultObjectUsecase::without_context();
let err = usecase.execute_put_object_legal_hold(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_put_object_lock_configuration_returns_internal_error_when_store_uninitialized() {
let input = PutObjectLockConfigurationInput::builder()
.bucket("test-bucket".to_string())
.object_lock_configuration(Some(ObjectLockConfiguration {
object_lock_enabled: Some(ObjectLockEnabled::from_static(ObjectLockEnabled::ENABLED)),
rule: None,
}))
.build()
.unwrap();
let req = build_request(input, Method::PUT);
let usecase = DefaultObjectUsecase::without_context();
let err = usecase.execute_put_object_lock_configuration(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_put_object_retention_returns_internal_error_when_store_uninitialized() {
let input = PutObjectRetentionInput::builder()
.bucket("test-bucket".to_string())
.key("test-key".to_string())
.build()
.unwrap();
let req = build_request(input, Method::PUT);
let usecase = DefaultObjectUsecase::without_context();
let err = usecase.execute_put_object_retention(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test]
async fn execute_put_object_tagging_returns_internal_error_when_store_uninitialized() {
let input = PutObjectTaggingInput::builder()
.bucket("test-bucket".to_string())
.key("test-key".to_string())
.tagging(Tagging {
tag_set: vec![Tag {
key: Some("k".to_string()),
value: Some("v".to_string()),
}],
})
.build()
.unwrap();
let req = build_request(input, Method::PUT);
let usecase = DefaultObjectUsecase::without_context();
let err = usecase.execute_put_object_tagging(req).await.unwrap_err();
assert_eq!(err.code(), &S3ErrorCode::InternalError);
}
#[tokio::test] #[tokio::test]
async fn execute_head_object_rejects_range_with_part_number() { async fn execute_head_object_rejects_range_with_part_number() {
let input = HeadObjectInput::builder() let input = HeadObjectInput::builder()

View File

@@ -421,6 +421,22 @@ pub fn has_bypass_governance_header(headers: &http::HeaderMap) -> bool {
.unwrap_or(false) .unwrap_or(false)
} }
fn get_bucket_policy_authorize_action() -> Action {
Action::S3Action(S3Action::GetBucketPolicyAction)
}
fn get_object_acl_authorize_action() -> Action {
Action::S3Action(S3Action::GetObjectAclAction)
}
fn put_bucket_policy_authorize_action() -> Action {
Action::S3Action(S3Action::PutBucketPolicyAction)
}
fn put_object_acl_authorize_action() -> Action {
Action::S3Action(S3Action::PutObjectAclAction)
}
#[async_trait::async_trait] #[async_trait::async_trait]
impl S3Access for FS { impl S3Access for FS {
// /// Checks whether the current request has accesses to the resources. // /// Checks whether the current request has accesses to the resources.
@@ -865,7 +881,7 @@ impl S3Access for FS {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found"); let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone()); req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetObjectAclAction)).await authorize_request(req, get_bucket_policy_authorize_action()).await
} }
/// Checks whether the GetBucketPolicyStatus request has accesses to the resources. /// Checks whether the GetBucketPolicyStatus request has accesses to the resources.
@@ -943,7 +959,7 @@ impl S3Access for FS {
req_info.object = Some(req.input.key.clone()); req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone(); req_info.version_id = req.input.version_id.clone();
authorize_request(req, Action::S3Action(S3Action::GetBucketPolicyAction)).await authorize_request(req, get_object_acl_authorize_action()).await
} }
/// Checks whether the GetObjectAttributes request has accesses to the resources. /// Checks whether the GetObjectAttributes request has accesses to the resources.
@@ -1270,7 +1286,7 @@ impl S3Access for FS {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found"); let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
req_info.bucket = Some(req.input.bucket.clone()); req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutObjectAclAction)).await authorize_request(req, put_bucket_policy_authorize_action()).await
} }
/// Checks whether the PutBucketReplication request has accesses to the resources. /// Checks whether the PutBucketReplication request has accesses to the resources.
@@ -1340,7 +1356,7 @@ impl S3Access for FS {
req_info.object = Some(req.input.key.clone()); req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone(); req_info.version_id = req.input.version_id.clone();
authorize_request(req, Action::S3Action(S3Action::PutBucketPolicyAction)).await authorize_request(req, put_object_acl_authorize_action()).await
} }
/// Checks whether the PutObjectLegalHold request has accesses to the resources. /// Checks whether the PutObjectLegalHold request has accesses to the resources.
@@ -1452,3 +1468,28 @@ impl S3Access for FS {
Ok(()) Ok(())
} }
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn get_bucket_policy_uses_get_bucket_policy_action() {
assert_eq!(get_bucket_policy_authorize_action(), Action::S3Action(S3Action::GetBucketPolicyAction));
}
#[test]
fn get_object_acl_uses_get_object_acl_action() {
assert_eq!(get_object_acl_authorize_action(), Action::S3Action(S3Action::GetObjectAclAction));
}
#[test]
fn put_bucket_policy_uses_put_bucket_policy_action() {
assert_eq!(put_bucket_policy_authorize_action(), Action::S3Action(S3Action::PutBucketPolicyAction));
}
#[test]
fn put_object_acl_uses_put_object_acl_action() {
assert_eq!(put_object_acl_authorize_action(), Action::S3Action(S3Action::PutObjectAclAction));
}
}

File diff suppressed because it is too large Load Diff