Compare commits

..

10 Commits

Author SHA1 Message Date
overtrue
7df97fb266 Merge branch 'main' into feat/net-mock-resolver 2025-10-15 21:26:07 +08:00
安正超
8f310cd4a8 test: allow mocking dns resolver (#656) 2025-10-15 21:24:03 +08:00
overtrue
e99da872ac test: allow mocking dns resolver 2025-10-15 20:53:56 +08:00
majinghe
8ed01a3e06 Refactor mnmd docker compose for extendence (#652) 2025-10-15 03:48:05 +08:00
loverustfs
9e1739ed8d chore(docs): update README and README_ZH (#649) 2025-10-13 18:49:34 +08:00
loverustfs
7abbfc9c2c RustFS trending images
RustFS trending
2025-10-13 17:45:54 +08:00
安正超
639bf0c233 Revert "feat(append): implement object append operations with state tracking (#599)" (#646)
This reverts commit 4f73760a45.
2025-10-12 23:47:51 +08:00
Copilot
ad99019749 Add complete MNMD Docker deployment example with startup coordination and VolumeNotFound fix (#642)
* Initial plan

* Add MNMD Docker deployment example with 4 nodes x 4 drives

- Create docs/examples/mnmd/ directory structure
- Add docker-compose.yml with proper disk indexing (1..4)
- Add wait-and-start.sh for startup coordination
- Add README.md with usage instructions and alternatives
- Add CHECKLIST.md with step-by-step verification
- Fixes VolumeNotFound issue by using correct volume paths
- Implements health checks and startup ordering
- Uses service names for stable inter-node addressing

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add docs/examples README as index for deployment examples

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add automated test script for MNMD deployment

- Add test-deployment.sh with comprehensive validation
- Test container status, health, endpoints, connectivity
- Update README to reference test script
- Make script executable

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* improve code

* improve code

* improve dep crates `cargo shear --fix`

* upgrade aws-sdk-s3

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-10-12 13:15:14 +08:00
houseme
aac9b1edb7 chore: improve event and docker-compose ,Improve the permissions of the endpoint health interface, upgrade otel from 0.30.0 to 0.31.0 (#620)
* feat: improve code for notify

* upgrade starshard version

* upgrade version

* Fix ETag format to comply with HTTP standards by wrapping with quotes (#592)

* Initial plan

* Fix ETag format to comply with HTTP standards by wrapping with quotes

Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>

* bufigx

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>
Co-authored-by: overtrue <anzhengchao@gmail.com>

* Improve lock (#596)

* improve lock

Signed-off-by: Mu junxiang <1948535941@qq.com>

* feat(tests): add wait_for_object_absence helper and improve lifecycle test reliability

Signed-off-by: Mu junxiang <1948535941@qq.com>

* chore: remove dirty docs

Signed-off-by: Mu junxiang <1948535941@qq.com>

---------

Signed-off-by: Mu junxiang <1948535941@qq.com>

* feat(append): implement object append operations with state tracking (#599)

* feat(append): implement object append operations with state tracking

Signed-off-by: junxiang Mu <1948535941@qq.com>

* chore: rebase

Signed-off-by: junxiang Mu <1948535941@qq.com>

---------

Signed-off-by: junxiang Mu <1948535941@qq.com>

* build(deps): upgrade s3s (#595)

Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>

* fix: validate mqtt broker

* improve code for `import`

* upgrade otel relation crates version

* fix:dep("jsonwebtoken") feature = 'rust_crypto'

* fix

* fix

* fix

* upgrade version

* improve code for ecfs

* chore: improve event and docker-compose ,Improve the permissions of the `endpoint` health interface

* fix

* fix

* fix

* fix

* improve code

* fix

---------

Signed-off-by: Mu junxiang <1948535941@qq.com>
Signed-off-by: junxiang Mu <1948535941@qq.com>
Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com>
Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>
Co-authored-by: overtrue <anzhengchao@gmail.com>
Co-authored-by: guojidan <63799833+guojidan@users.noreply.github.com>
Co-authored-by: Nugine <nugine@foxmail.com>
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-10-11 09:08:25 +08:00
weisd
5689311cff fix:#630 (#633) 2025-10-10 15:16:28 +08:00
63 changed files with 2780 additions and 5067 deletions

1081
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -99,8 +99,10 @@ async-recursion = "1.1.1"
async-trait = "0.1.89"
async-compression = { version = "0.4.19" }
atomic_enum = "0.3.0"
aws-config = { version = "1.8.6" }
aws-sdk-s3 = { version = "1.106.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
aws-config = { version = "1.8.8" }
aws-credential-types = { version = "1.2.8" }
aws-smithy-types = { version = "1.3.3" }
aws-sdk-s3 = { version = "1.108.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
axum = "0.8.6"
axum-extra = "0.10.3"
axum-server = { version = "0.7.2", features = ["tls-rustls-no-provider"], default-features = false }
@@ -111,6 +113,7 @@ bytes = { version = "1.10.1", features = ["serde"] }
bytesize = "2.1.0"
byteorder = "1.5.0"
cfg-if = "1.0.3"
convert_case = "0.8.0"
crc-fast = "1.3.0"
chacha20poly1305 = { version = "0.10.1" }
chrono = { version = "0.4.42", features = ["serde"] }
@@ -119,18 +122,18 @@ const-str = { version = "0.7.0", features = ["std", "proc"] }
crc32fast = "1.5.0"
criterion = { version = "0.7", features = ["html_reports"] }
crossbeam-queue = "0.3.12"
dashmap = "6.1.0"
datafusion = "50.0.0"
datafusion = "50.1.0"
derive_builder = "0.20.2"
enumset = "1.1.10"
flatbuffers = "25.9.23"
flate2 = "1.1.2"
flexi_logger = { version = "0.31.4", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv"] }
flate2 = "1.1.4"
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv"] }
form_urlencoded = "1.2.2"
futures = "0.3.31"
futures-core = "0.3.31"
futures-util = "0.3.31"
glob = "0.3.3"
hashbrown = { version = "0.16.0", features = ["serde", "rayon"] }
hex-simd = "0.8.0"
highway = { version = "1.3.0" }
hickory-resolver = { version = "0.25.2", features = ["tls-ring"] }
@@ -146,8 +149,9 @@ http = "1.3.1"
http-body = "1.0.1"
humantime = "2.3.0"
ipnetwork = { version = "0.21.1", features = ["serde"] }
jsonwebtoken = "9.3.1"
jsonwebtoken = { version = "10.0.0", features = ["rust_crypto"] }
lazy_static = "1.5.0"
libc = "0.2.177"
libsystemd = { version = "0.7.2" }
local-ip-address = "0.6.5"
lz4 = "1.28.1"
@@ -158,39 +162,39 @@ mime_guess = "2.0.5"
moka = { version = "0.12.11", features = ["future"] }
netif = "0.1.6"
nix = { version = "0.30.1", features = ["fs"] }
nu-ansi-term = "0.50.1"
nu-ansi-term = "0.50.3"
num_cpus = { version = "1.17.0" }
nvml-wrapper = "0.11.0"
object_store = "0.12.4"
once_cell = "1.21.3"
opentelemetry = { version = "0.30.0" }
opentelemetry-appender-tracing = { version = "0.30.1", features = [
opentelemetry = { version = "0.31.0" }
opentelemetry-appender-tracing = { version = "0.31.1", features = [
"experimental_use_tracing_span_context",
"experimental_metadata_attributes",
"spec_unstable_logs_enabled"
] }
opentelemetry_sdk = { version = "0.30.0" }
opentelemetry-stdout = { version = "0.30.0" }
opentelemetry-otlp = { version = "0.30.0", default-features = false, features = [
opentelemetry_sdk = { version = "0.31.0" }
opentelemetry-stdout = { version = "0.31.0" }
opentelemetry-otlp = { version = "0.31.0", default-features = false, features = [
"grpc-tonic", "gzip-tonic", "trace", "metrics", "logs", "internal-logs"
] }
opentelemetry-semantic-conventions = { version = "0.30.0", features = [
opentelemetry-semantic-conventions = { version = "0.31.0", features = [
"semconv_experimental",
] }
parking_lot = "0.12.4"
parking_lot = "0.12.5"
path-absolutize = "3.1.1"
path-clean = "1.0.1"
blake3 = { version = "1.8.2" }
pbkdf2 = "0.12.2"
percent-encoding = "2.3.2"
pin-project-lite = "0.2.16"
prost = "0.14.1"
pretty_assertions = "1.4.1"
quick-xml = "0.38.3"
rand = "0.9.2"
rayon = "1.11.0"
rdkafka = { version = "0.38.0", features = ["tokio"] }
reed-solomon-simd = { version = "3.0.1" }
regex = { version = "1.11.3" }
regex = { version = "1.12.1" }
reqwest = { version = "0.12.23", default-features = false, features = [
"rustls-tls-webpki-roots",
"charset",
@@ -200,13 +204,13 @@ reqwest = { version = "0.12.23", default-features = false, features = [
"json",
"blocking",
] }
rmcp = { version = "0.6.4" }
rmcp = { version = "0.8.1" }
rmp = "0.8.14"
rmp-serde = "1.3.0"
rsa = "0.9.8"
rumqttc = { version = "0.25.0" }
rust-embed = { version = "8.7.2" }
rustfs-rsc = "2025.506.1"
rustc-hash = { version = "2.1.1" }
rustls = { version = "0.23.32", features = ["ring", "logging", "std", "tls12"], default-features = false }
rustls-pki-types = "1.12.0"
rustls-pemfile = "2.2.0"
@@ -225,6 +229,7 @@ smartstring = "1.0.1"
snafu = "0.8.9"
snap = "1.1.1"
socket2 = "0.6.0"
starshard = { version = "0.5.0", features = ["rayon", "async", "serde"] }
strum = { version = "0.27.2", features = ["derive"] }
sysinfo = "0.37.1"
sysctl = "0.7.1"
@@ -253,7 +258,7 @@ tower-http = { version = "0.6.6", features = ["cors"] }
tracing = "0.1.41"
tracing-core = "0.1.34"
tracing-error = "0.2.1"
tracing-opentelemetry = "0.31.0"
tracing-opentelemetry = "0.32.0"
tracing-subscriber = { version = "0.3.20", features = ["env-filter", "time"] }
transform-stream = "0.3.1"
url = "2.5.7"
@@ -269,7 +274,7 @@ wildmatch = { version = "2.5.0", features = ["serde"] }
zeroize = { version = "1.8.2", features = ["derive"] }
winapi = { version = "0.3.9" }
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
zip = "5.1.1"
zip = "6.0.0"
zstd = "0.13.3"

View File

@@ -172,8 +172,18 @@ RustFS is a community-driven project, and we appreciate all contributions. Check
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
</a>
## Github Trending Top
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending top charts.
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
## License
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
**RustFS** is a trademark of RustFS, Inc. All other trademarks are the property of their respective owners.

View File

@@ -122,6 +122,14 @@ RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看[贡
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
</a >
## Github 全球推荐榜
🚀 RustFS 受到了全世界开源爱好者和企业用户的喜欢多次登顶Github Trending全球榜。
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
## 许可证
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)

View File

@@ -12,9 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::AuditEntry;
use crate::AuditResult;
use crate::AuditSystem;
use crate::{AuditEntry, AuditResult, AuditSystem};
use once_cell::sync::OnceCell;
use rustfs_ecstore::config::Config;
use std::sync::Arc;

View File

@@ -17,9 +17,11 @@ use crate::AuditRegistry;
use crate::observability;
use crate::{AuditError, AuditResult};
use rustfs_ecstore::config::Config;
use rustfs_targets::store::{Key, Store};
use rustfs_targets::target::EntityTarget;
use rustfs_targets::{StoreError, Target, TargetError};
use rustfs_targets::{
StoreError, Target, TargetError,
store::{Key, Store},
target::EntityTarget,
};
use std::sync::Arc;
use tokio::sync::{Mutex, RwLock};
use tracing::{error, info, warn};
@@ -257,7 +259,7 @@ impl AuditSystem {
let target_id_clone = target_id.clone();
// Create EntityTarget for the audit log entry
let entity_target = rustfs_targets::target::EntityTarget {
let entity_target = EntityTarget {
object_name: entry.api.name.clone().unwrap_or_default(),
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
event_name: rustfs_targets::EventName::ObjectCreatedPut, // Default, should be derived from entry
@@ -337,7 +339,7 @@ impl AuditSystem {
let mut success_count = 0;
let mut errors = Vec::new();
for entry in entries_clone {
let entity_target = rustfs_targets::target::EntityTarget {
let entity_target = EntityTarget {
object_name: entry.api.name.clone().unwrap_or_default(),
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
event_name: rustfs_targets::EventName::ObjectCreatedPut,

View File

@@ -49,5 +49,4 @@ uuid = { workspace = true }
base64 = { workspace = true }
rand = { workspace = true }
chrono = { workspace = true }
http.workspace = true
md5 = { workspace = true }

View File

@@ -13,16 +13,25 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! 分片上传加密功能的分步测试用例
//!
//! 这个测试套件将验证分片上传加密功能的每一个步骤:
//! 1. 测试基础的单分片加密(验证加密基础逻辑)
//! 2. 测试多分片上传(验证分片拼接逻辑)
//! 3. 测试加密元数据的保存和读取
//! 4. 测试完整的分片上传加密流程
use super::common::LocalKMSTestEnvironment;
use crate::common::{TEST_BUCKET, init_logging};
use serial_test::serial;
use tracing::{debug, info};
/// 步骤1测试基础单文件加密功能确保SSE-S3在非分片场景下正常工作
#[tokio::test]
#[serial]
async fn test_step1_basic_single_file_encryption() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
info!("🧪 step1: test basic single file encryption");
info!("🧪 步骤1测试基础单文件加密功能");
let mut kms_env = LocalKMSTestEnvironment::new().await?;
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
@@ -31,11 +40,11 @@ async fn test_step1_basic_single_file_encryption() -> Result<(), Box<dyn std::er
let s3_client = kms_env.base_env.create_s3_client();
kms_env.base_env.create_test_bucket(TEST_BUCKET).await?;
// test small file encryption (should inline store)
// 测试小文件加密(应该会内联存储)
let test_data = b"Hello, this is a small test file for SSE-S3!";
let object_key = "test-single-file-encrypted";
info!("📤 step1: upload small file ({}) with SSE-S3 encryption", test_data.len());
info!("📤 上传小文件({}字节启用SSE-S3加密", test_data.len());
let put_response = s3_client
.put_object()
.bucket(TEST_BUCKET)
@@ -45,41 +54,41 @@ async fn test_step1_basic_single_file_encryption() -> Result<(), Box<dyn std::er
.send()
.await?;
debug!("PUT response ETag: {:?}", put_response.e_tag());
debug!("PUT response SSE: {:?}", put_response.server_side_encryption());
debug!("PUT响应ETag: {:?}", put_response.e_tag());
debug!("PUT响应SSE: {:?}", put_response.server_side_encryption());
// verify PUT response contains correct encryption header
// 验证PUT响应包含正确的加密头
assert_eq!(
put_response.server_side_encryption(),
Some(&aws_sdk_s3::types::ServerSideEncryption::Aes256)
);
info!("📥 step1: download file and verify encryption status");
info!("📥 下载文件并验证加密状态");
let get_response = s3_client.get_object().bucket(TEST_BUCKET).key(object_key).send().await?;
debug!("GET response SSE: {:?}", get_response.server_side_encryption());
debug!("GET响应SSE: {:?}", get_response.server_side_encryption());
// verify GET response contains correct encryption header
// 验证GET响应包含正确的加密头
assert_eq!(
get_response.server_side_encryption(),
Some(&aws_sdk_s3::types::ServerSideEncryption::Aes256)
);
// verify data integrity
// 验证数据完整性
let downloaded_data = get_response.body.collect().await?.into_bytes();
assert_eq!(&downloaded_data[..], test_data);
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
info!("step1: basic single file encryption works as expected");
info!("步骤1通过基础单文件加密功能正常");
Ok(())
}
/// test basic multipart upload without encryption
/// 步骤2测试不加密的分片上传确保分片上传基础功能正常
#[tokio::test]
#[serial]
async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
info!("🧪 step2: test basic multipart upload without encryption");
info!("🧪 步骤2测试不加密的分片上传");
let mut kms_env = LocalKMSTestEnvironment::new().await?;
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
@@ -93,16 +102,12 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
let total_parts = 2;
let total_size = part_size * total_parts;
// generate test data (with clear pattern for easy verification)
// 生成测试数据(有明显的模式便于验证)
let test_data: Vec<u8> = (0..total_size).map(|i| (i % 256) as u8).collect();
info!(
"🚀 step2: start multipart upload (no encryption) with {} parts, each {}MB",
total_parts,
part_size / (1024 * 1024)
);
info!("🚀 开始分片上传(无加密):{} parts每个 {}MB", total_parts, part_size / (1024 * 1024));
// step1: create multipart upload
// 步骤1创建分片上传
let create_multipart_output = s3_client
.create_multipart_upload()
.bucket(TEST_BUCKET)
@@ -111,16 +116,16 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
.await?;
let upload_id = create_multipart_output.upload_id().unwrap();
info!("📋 step2: create multipart upload, ID: {}", upload_id);
info!("📋 创建分片上传,ID: {}", upload_id);
// step2: upload each part
// 步骤2上传各个分片
let mut completed_parts = Vec::new();
for part_number in 1..=total_parts {
let start = (part_number - 1) * part_size;
let end = std::cmp::min(start + part_size, total_size);
let part_data = &test_data[start..end];
info!("📤 step2: upload part {} ({} bytes)", part_number, part_data.len());
info!("📤 上传分片 {} ({} bytes)", part_number, part_data.len());
let upload_part_output = s3_client
.upload_part()
@@ -140,15 +145,15 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
.build(),
);
debug!("step2: part {} uploaded, ETag: {}", part_number, etag);
debug!("分片 {} 上传完成,ETag: {}", part_number, etag);
}
// step3: complete multipart upload
// 步骤3完成分片上传
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
.set_parts(Some(completed_parts))
.build();
info!("🔗 step2: complete multipart upload");
info!("🔗 完成分片上传");
let complete_output = s3_client
.complete_multipart_upload()
.bucket(TEST_BUCKET)
@@ -158,16 +163,10 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
.send()
.await?;
debug!("step2: multipart upload completed, ETag: {:?}", complete_output.e_tag());
debug!("完成分片上传,ETag: {:?}", complete_output.e_tag());
// step4: verify multipart upload completed successfully
assert_eq!(
complete_output.e_tag().unwrap().to_string(),
format!("\"{}-{}-{}\"", object_key, upload_id, total_parts)
);
// verify data integrity
info!("📥 step2: download file and verify data integrity");
// 步骤4下载并验证
info!("📥 下载文件并验证数据完整性");
let get_response = s3_client.get_object().bucket(TEST_BUCKET).key(object_key).send().await?;
let downloaded_data = get_response.body.collect().await?.into_bytes();
@@ -175,16 +174,16 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
assert_eq!(&downloaded_data[..], &test_data[..]);
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
info!("step2: basic multipart upload without encryption works as expected");
info!("步骤2通过不加密的分片上传功能正常");
Ok(())
}
/// test multipart upload with SSE-S3 encryption
/// 步骤3测试分片上传 + SSE-S3加密重点测试
#[tokio::test]
#[serial]
async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
info!("🧪 step3: test multipart upload with SSE-S3 encryption");
info!("🧪 步骤3测试分片上传 + SSE-S3加密");
let mut kms_env = LocalKMSTestEnvironment::new().await?;
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
@@ -198,16 +197,16 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::er
let total_parts = 2;
let total_size = part_size * total_parts;
// generate test data (with clear pattern for easy verification)
// 生成测试数据
let test_data: Vec<u8> = (0..total_size).map(|i| ((i / 1000) % 256) as u8).collect();
info!(
"🔐 step3: start multipart upload with SSE-S3 encryption: {} parts, each {}MB",
"🔐 开始分片上传SSE-S3加密{} parts,每个 {}MB",
total_parts,
part_size / (1024 * 1024)
);
// step1: create multipart upload and enable SSE-S3
// 步骤1创建分片上传并启用SSE-S3
let create_multipart_output = s3_client
.create_multipart_upload()
.bucket(TEST_BUCKET)
@@ -217,24 +216,24 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::er
.await?;
let upload_id = create_multipart_output.upload_id().unwrap();
info!("📋 step3: create multipart upload with SSE-S3 encryption, ID: {}", upload_id);
info!("📋 创建加密分片上传,ID: {}", upload_id);
// step2: verify CreateMultipartUpload response (SSE-S3 header should be included)
// 验证CreateMultipartUpload响应如果有SSE头的话
if let Some(sse) = create_multipart_output.server_side_encryption() {
debug!("CreateMultipartUpload response contains SSE header: {:?}", sse);
debug!("CreateMultipartUpload包含SSE响应: {:?}", sse);
assert_eq!(sse, &aws_sdk_s3::types::ServerSideEncryption::Aes256);
} else {
debug!("CreateMultipartUpload response does not contain SSE header (some implementations may return empty string)");
debug!("CreateMultipartUpload不包含SSE响应头某些实现中正常");
}
// step2: upload each part
// 步骤2上传各个分片
let mut completed_parts = Vec::new();
for part_number in 1..=total_parts {
let start = (part_number - 1) * part_size;
let end = std::cmp::min(start + part_size, total_size);
let part_data = &test_data[start..end];
info!("🔐 step3: upload encrypted part {} ({} bytes)", part_number, part_data.len());
info!("🔐 上传加密分片 {} ({} bytes)", part_number, part_data.len());
let upload_part_output = s3_client
.upload_part()
@@ -254,15 +253,15 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::er
.build(),
);
debug!("step3: part {} uploaded, ETag: {}", part_number, etag);
debug!("加密分片 {} 上传完成,ETag: {}", part_number, etag);
}
// step3: complete multipart upload
// 步骤3完成分片上传
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
.set_parts(Some(completed_parts))
.build();
info!("🔗 step3: complete multipart upload with SSE-S3 encryption");
info!("🔗 完成加密分片上传");
let complete_output = s3_client
.complete_multipart_upload()
.bucket(TEST_BUCKET)
@@ -272,46 +271,43 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::er
.send()
.await?;
debug!(
"step3: complete multipart upload with SSE-S3 encryption, ETag: {:?}",
complete_output.e_tag()
);
debug!("完成加密分片上传ETag: {:?}", complete_output.e_tag());
// step4: HEAD request to check metadata
info!("📋 step4: check object metadata");
// 步骤4HEAD请求检查元数据
info!("📋 检查对象元数据");
let head_response = s3_client.head_object().bucket(TEST_BUCKET).key(object_key).send().await?;
debug!("HEAD response SSE: {:?}", head_response.server_side_encryption());
debug!("HEAD response metadata: {:?}", head_response.metadata());
debug!("HEAD响应 SSE: {:?}", head_response.server_side_encryption());
debug!("HEAD响应 元数据: {:?}", head_response.metadata());
// step5: GET request to download and verify
info!("📥 step5: download encrypted file and verify");
// 步骤5GET请求下载并验证
info!("📥 下载加密文件并验证");
let get_response = s3_client.get_object().bucket(TEST_BUCKET).key(object_key).send().await?;
debug!("GET response SSE: {:?}", get_response.server_side_encryption());
debug!("GET响应 SSE: {:?}", get_response.server_side_encryption());
// step5: verify GET response contains SSE-S3 encryption header
// 🎯 关键验证GET响应必须包含SSE-S3加密头
assert_eq!(
get_response.server_side_encryption(),
Some(&aws_sdk_s3::types::ServerSideEncryption::Aes256)
);
// step5: verify downloaded data matches original test data
// 验证数据完整性
let downloaded_data = get_response.body.collect().await?.into_bytes();
assert_eq!(downloaded_data.len(), total_size);
assert_eq!(&downloaded_data[..], &test_data[..]);
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
info!("step3: multipart upload with SSE-S3 encryption function is normal");
info!("步骤3通过分片上传 + SSE-S3加密功能正常");
Ok(())
}
/// step4: test larger multipart upload with encryption (streaming encryption)
/// 步骤4测试更大的分片上传测试流式加密
#[tokio::test]
#[serial]
async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
info!("🧪 step4: test larger multipart upload with encryption (streaming encryption)");
info!("🧪 步骤4测试大文件分片上传加密");
let mut kms_env = LocalKMSTestEnvironment::new().await?;
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
@@ -326,13 +322,13 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
let total_size = part_size * total_parts;
info!(
"🗂️ step4: generate large test data: {} parts, each {}MB, total {}MB",
"🗂️ 生成大文件测试数据:{} parts每个 {}MB总计 {}MB",
total_parts,
part_size / (1024 * 1024),
total_size / (1024 * 1024)
);
// step4: generate large test data (using complex pattern for verification)
// 生成大文件测试数据(使用复杂模式便于验证)
let test_data: Vec<u8> = (0..total_size)
.map(|i| {
let part_num = i / part_size;
@@ -341,9 +337,9 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
})
.collect();
info!("🔐 step4: start large multipart upload with encryption (SSE-S3)");
info!("🔐 开始大文件分片上传(SSE-S3加密)");
// step4: create multipart upload
// 创建分片上传
let create_multipart_output = s3_client
.create_multipart_upload()
.bucket(TEST_BUCKET)
@@ -353,9 +349,9 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
.await?;
let upload_id = create_multipart_output.upload_id().unwrap();
info!("📋 step4: create multipart upload with encryption (SSE-S3), ID: {}", upload_id);
info!("📋 创建大文件加密分片上传,ID: {}", upload_id);
// step4: upload parts
// 上传各个分片
let mut completed_parts = Vec::new();
for part_number in 1..=total_parts {
let start = (part_number - 1) * part_size;
@@ -363,7 +359,7 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
let part_data = &test_data[start..end];
info!(
"🔐 step4: upload part {} ({:.2}MB)",
"🔐 上传大文件加密分片 {} ({:.2}MB)",
part_number,
part_data.len() as f64 / (1024.0 * 1024.0)
);
@@ -386,15 +382,15 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
.build(),
);
debug!("step4: upload part {} completed, ETag: {}", part_number, etag);
debug!("大文件加密分片 {} 上传完成,ETag: {}", part_number, etag);
}
// step4: complete multipart upload
// 完成分片上传
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
.set_parts(Some(completed_parts))
.build();
info!("🔗 step4: complete multipart upload with encryption (SSE-S3)");
info!("🔗 完成大文件加密分片上传");
let complete_output = s3_client
.complete_multipart_upload()
.bucket(TEST_BUCKET)
@@ -404,46 +400,40 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
.send()
.await?;
debug!(
"step4: complete multipart upload with encryption (SSE-S3), ETag: {:?}",
complete_output.e_tag()
);
debug!("完成大文件加密分片上传ETag: {:?}", complete_output.e_tag());
// step4: download and verify
info!("📥 step4: download and verify large multipart upload with encryption (SSE-S3)");
// 下载并验证
info!("📥 下载大文件并验证");
let get_response = s3_client.get_object().bucket(TEST_BUCKET).key(object_key).send().await?;
// step4: verify encryption header
// 验证加密头
assert_eq!(
get_response.server_side_encryption(),
Some(&aws_sdk_s3::types::ServerSideEncryption::Aes256)
);
// step4: verify data integrity
// 验证数据完整性
let downloaded_data = get_response.body.collect().await?.into_bytes();
assert_eq!(downloaded_data.len(), total_size);
// step4: verify data matches original test data
// 逐字节验证数据(对于大文件更严格)
for (i, (&actual, &expected)) in downloaded_data.iter().zip(test_data.iter()).enumerate() {
if actual != expected {
panic!(
"step4: large multipart upload with encryption (SSE-S3) data mismatch at byte {}: actual={}, expected={}",
i, actual, expected
);
panic!("大文件数据在第{i}字节不匹配: 实际={actual}, 期待={expected}");
}
}
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
info!("step4: large multipart upload with encryption (SSE-S3) functionality normal");
info!("步骤4通过大文件分片上传加密功能正常");
Ok(())
}
/// step5: test all encryption types multipart upload
/// 步骤5测试所有加密类型的分片上传
#[tokio::test]
#[serial]
async fn test_step5_all_encryption_types_multipart() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
info!("🧪 step5: test all encryption types multipart upload");
info!("🧪 步骤5测试所有加密类型的分片上传");
let mut kms_env = LocalKMSTestEnvironment::new().await?;
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
@@ -456,8 +446,8 @@ async fn test_step5_all_encryption_types_multipart() -> Result<(), Box<dyn std::
let total_parts = 2;
let total_size = part_size * total_parts;
// step5: test SSE-KMS multipart upload
info!("🔐 step5: test SSE-KMS multipart upload");
// 测试SSE-KMS
info!("🔐 测试 SSE-KMS 分片上传");
test_multipart_encryption_type(
&s3_client,
TEST_BUCKET,
@@ -469,8 +459,8 @@ async fn test_step5_all_encryption_types_multipart() -> Result<(), Box<dyn std::
)
.await?;
// step5: test SSE-C multipart upload
info!("🔐 step5: test SSE-C multipart upload");
// 测试SSE-C
info!("🔐 测试 SSE-C 分片上传");
test_multipart_encryption_type(
&s3_client,
TEST_BUCKET,
@@ -483,7 +473,7 @@ async fn test_step5_all_encryption_types_multipart() -> Result<(), Box<dyn std::
.await?;
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
info!("step5: all encryption types multipart upload functionality normal");
info!("步骤5通过所有加密类型的分片上传功能正常");
Ok(())
}
@@ -493,7 +483,7 @@ enum EncryptionType {
SSEC,
}
/// step5: test specific encryption type multipart upload
/// 辅助函数:测试特定加密类型的分片上传
async fn test_multipart_encryption_type(
s3_client: &aws_sdk_s3::Client,
bucket: &str,
@@ -503,10 +493,10 @@ async fn test_multipart_encryption_type(
total_parts: usize,
encryption_type: EncryptionType,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// step5: generate test data
// 生成测试数据
let test_data: Vec<u8> = (0..total_size).map(|i| ((i * 7) % 256) as u8).collect();
// step5: prepare SSE-C key and MD5 (if needed)
// 准备SSE-C所需的密钥如果需要
let (sse_c_key, sse_c_md5) = if matches!(encryption_type, EncryptionType::SSEC) {
let key = "01234567890123456789012345678901";
let key_b64 = base64::Engine::encode(&base64::engine::general_purpose::STANDARD, key);
@@ -516,10 +506,9 @@ async fn test_multipart_encryption_type(
(None, None)
};
// step5: create multipart upload
info!("🔗 step5: create multipart upload with encryption {:?}", encryption_type);
info!("📋 创建分片上传 - {:?}", encryption_type);
// step5: create multipart upload request
// 创建分片上传
let mut create_request = s3_client.create_multipart_upload().bucket(bucket).key(object_key);
create_request = match encryption_type {
@@ -533,6 +522,7 @@ async fn test_multipart_encryption_type(
let create_multipart_output = create_request.send().await?;
let upload_id = create_multipart_output.upload_id().unwrap();
// 上传分片
let mut completed_parts = Vec::new();
for part_number in 1..=total_parts {
let start = (part_number - 1) * part_size;
@@ -547,7 +537,7 @@ async fn test_multipart_encryption_type(
.part_number(part_number as i32)
.body(aws_sdk_s3::primitives::ByteStream::from(part_data.to_vec()));
// step5: include SSE-C key and MD5 in each UploadPart request (if needed)
// SSE-C需要在每个UploadPart请求中包含密钥
if matches!(encryption_type, EncryptionType::SSEC) {
upload_request = upload_request
.sse_customer_algorithm("AES256")
@@ -564,11 +554,10 @@ async fn test_multipart_encryption_type(
.build(),
);
// step5: complete multipart upload request
debug!("🔗 step5: complete multipart upload part {} with etag {}", part_number, etag);
debug!("{:?} 分片 {} 上传完成", encryption_type, part_number);
}
// step5: complete multipart upload
// 完成分片上传
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
.set_parts(Some(completed_parts))
.build();
@@ -582,12 +571,10 @@ async fn test_multipart_encryption_type(
.send()
.await?;
// step5: download and verify multipart upload
info!("🔗 step5: download and verify multipart upload with encryption {:?}", encryption_type);
// 下载并验证
let mut get_request = s3_client.get_object().bucket(bucket).key(object_key);
// step5: include SSE-C key and MD5 in each GET request (if needed)
// SSE-C需要在GET请求中包含密钥
if matches!(encryption_type, EncryptionType::SSEC) {
get_request = get_request
.sse_customer_algorithm("AES256")
@@ -597,7 +584,7 @@ async fn test_multipart_encryption_type(
let get_response = get_request.send().await?;
// step5: verify encryption headers
// 验证加密头
match encryption_type {
EncryptionType::SSEKMS => {
assert_eq!(
@@ -610,15 +597,11 @@ async fn test_multipart_encryption_type(
}
}
// step5: verify data integrity
// 验证数据完整性
let downloaded_data = get_response.body.collect().await?.into_bytes();
assert_eq!(downloaded_data.len(), total_size);
assert_eq!(&downloaded_data[..], &test_data[..]);
// step5: verify data integrity
info!(
"✅ step5: verify data integrity for multipart upload with encryption {:?}",
encryption_type
);
info!("✅ {:?} 分片上传测试通过", encryption_type);
Ok(())
}

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod append;
mod conditional_writes;
mod lifecycle;
mod lock;

View File

@@ -75,7 +75,6 @@ hyper-util.workspace = true
hyper-rustls.workspace = true
rustls.workspace = true
tokio = { workspace = true, features = ["io-util", "sync", "signal"] }
tokio-stream = { workspace = true }
tonic.workspace = true
xxhash-rust = { workspace = true, features = ["xxh64", "xxh3"] }
tower.workspace = true
@@ -89,8 +88,6 @@ rustfs-madmin.workspace = true
rustfs-workers.workspace = true
reqwest = { workspace = true }
aws-sdk-s3 = { workspace = true }
once_cell = { workspace = true }
rustfs-rsc = { workspace = true }
urlencoding = { workspace = true }
smallvec = { workspace = true }
shadow-rs.workspace = true
@@ -99,13 +96,11 @@ rustfs-utils = { workspace = true, features = ["full"] }
rustfs-rio.workspace = true
rustfs-signer.workspace = true
rustfs-checksums.workspace = true
futures-util.workspace = true
async-recursion.workspace = true
aws-credential-types = "1.2.6"
aws-smithy-types = "1.3.2"
parking_lot = "0.12"
moka = { version = "0.12", features = ["future"] }
aws-smithy-runtime-api = "1.9.0"
aws-credential-types = { workspace = true }
aws-smithy-types = { workspace = true }
parking_lot = { workspace = true }
moka = { workspace = true }
[target.'cfg(not(windows))'.dependencies]
nix = { workspace = true }

View File

@@ -167,19 +167,8 @@ async fn write_data_blocks<W>(
where
W: tokio::io::AsyncWrite + Send + Sync + Unpin,
{
let available = get_data_block_len(en_blocks, data_blocks);
if available < length {
let block_sizes: Vec<usize> = en_blocks
.iter()
.take(data_blocks)
.map(|block| block.as_ref().map(|buf| buf.len()).unwrap_or(0))
.collect();
error!(
expected = length,
available,
?block_sizes,
"write_data_blocks get_data_block_len < length"
);
if get_data_block_len(en_blocks, data_blocks) < length {
error!("write_data_blocks get_data_block_len < length");
return Err(io::Error::new(ErrorKind::UnexpectedEof, "Not enough data blocks to write"));
}

View File

@@ -33,7 +33,6 @@ pub mod file_cache;
pub mod global;
pub mod metrics_realtime;
pub mod notification_sys;
pub mod object_append;
pub mod pools;
pub mod rebalance;
pub mod rpc;

View File

@@ -1,725 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::bitrot::{create_bitrot_reader, create_bitrot_writer};
use crate::erasure_coding::{Erasure, calc_shard_size};
use crate::error::{Error, StorageError};
use crate::store_api::ObjectInfo;
use rustfs_filemeta::TRANSITION_COMPLETE;
use rustfs_utils::HashAlgorithm;
use rustfs_utils::http::headers::{
AMZ_SERVER_SIDE_ENCRYPTION, AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM, AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5, AMZ_SERVER_SIDE_ENCRYPTION_KMS_CONTEXT, AMZ_SERVER_SIDE_ENCRYPTION_KMS_ID,
RESERVED_METADATA_PREFIX_LOWER,
};
use std::collections::HashSet;
/// Ensure the target object can accept append writes under current state.
pub fn validate_append_preconditions(bucket: &str, object: &str, info: &ObjectInfo) -> Result<(), Error> {
if info.is_compressed() {
return Err(StorageError::InvalidArgument(
bucket.to_string(),
object.to_string(),
"append is not supported for compressed objects".to_string(),
));
}
let encryption_headers = [
AMZ_SERVER_SIDE_ENCRYPTION,
AMZ_SERVER_SIDE_ENCRYPTION_KMS_ID,
AMZ_SERVER_SIDE_ENCRYPTION_KMS_CONTEXT,
AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
];
if encryption_headers
.iter()
.any(|header| info.user_defined.contains_key(*header) || info.user_defined.contains_key(&header.to_ascii_lowercase()))
{
return Err(StorageError::InvalidArgument(
bucket.to_string(),
object.to_string(),
"append is not supported for encrypted objects".to_string(),
));
}
if info.transitioned_object.status == TRANSITION_COMPLETE || !info.transitioned_object.tier.is_empty() {
return Err(StorageError::InvalidArgument(
bucket.to_string(),
object.to_string(),
"append is not supported for transitioned objects".to_string(),
));
}
Ok(())
}
/// Validate that the requested append position matches the current object length.
pub fn validate_append_position(bucket: &str, object: &str, info: &ObjectInfo, expected_position: i64) -> Result<(), Error> {
if expected_position != info.size {
return Err(StorageError::InvalidArgument(
bucket.to_string(),
object.to_string(),
format!("append position mismatch: provided {}, expected {}", expected_position, info.size),
));
}
Ok(())
}
pub struct InlineAppendContext<'a> {
pub existing_inline: Option<&'a [u8]>,
pub existing_plain: Option<&'a [u8]>,
pub existing_size: i64,
pub append_payload: &'a [u8],
pub erasure: &'a Erasure,
pub hash_algorithm: HashAlgorithm,
pub has_checksums: bool,
}
pub struct InlineAppendResult {
pub inline_data: Vec<u8>,
pub total_size: i64,
pub etag: String,
}
/// Decode inline payload using available checksum algorithms. Returns raw bytes when decoding fails but
/// the inline buffer already contains the plain payload.
pub async fn decode_inline_payload(
inline: &[u8],
size: usize,
erasure: &Erasure,
preferred: HashAlgorithm,
) -> Result<(Vec<u8>, HashAlgorithm), Error> {
match decode_inline_variants(inline, size, erasure, preferred).await {
Ok((data, algo)) => Ok((data, algo)),
Err(err) => {
if inline.len() >= size {
Ok((inline[..size].to_vec(), HashAlgorithm::None))
} else {
Err(err)
}
}
}
}
/// Append data to an inline object and return the re-encoded inline buffer.
pub async fn append_inline_data(ctx: InlineAppendContext<'_>) -> Result<InlineAppendResult, Error> {
let mut plain = Vec::with_capacity(ctx.existing_inline.map(|data| data.len()).unwrap_or(0) + ctx.append_payload.len());
let mut encode_algorithm = ctx.hash_algorithm.clone();
if let Some(existing_plain) = ctx.existing_plain {
if existing_plain.len() != ctx.existing_size as usize {
return Err(StorageError::other("existing plain payload length mismatch"));
}
plain.extend_from_slice(existing_plain);
} else if ctx.existing_size > 0 {
let inline = ctx
.existing_inline
.ok_or_else(|| StorageError::other("inline payload missing"))?;
let (decoded, detected_algo) =
decode_inline_payload(inline, ctx.existing_size as usize, ctx.erasure, ctx.hash_algorithm.clone()).await?;
encode_algorithm = detected_algo;
plain.extend_from_slice(&decoded);
} else if let Some(inline) = ctx.existing_inline {
plain.extend_from_slice(inline);
}
plain.extend_from_slice(ctx.append_payload);
let total_size = plain.len() as i64;
let etag = md5_hex(&plain);
if encode_algorithm == HashAlgorithm::None {
if ctx.has_checksums {
encode_algorithm = ctx.hash_algorithm.clone();
} else {
return Ok(InlineAppendResult {
inline_data: plain,
total_size,
etag,
});
}
}
let mut writer = create_bitrot_writer(
true,
None,
"",
"",
ctx.erasure.shard_file_size(total_size),
ctx.erasure.shard_size(),
encode_algorithm,
)
.await
.map_err(|e| StorageError::other(format!("failed to create inline writer: {e}")))?;
let mut remaining = plain.as_slice();
while !remaining.is_empty() {
let chunk_len = remaining.len().min(ctx.erasure.block_size);
writer
.write(&remaining[..chunk_len])
.await
.map_err(|e| StorageError::other(format!("failed to write inline data: {e}")))?;
remaining = &remaining[chunk_len..];
}
writer
.shutdown()
.await
.map_err(|e| StorageError::other(format!("failed to finalize inline writer: {e}")))?;
let inline_data = writer
.into_inline_data()
.ok_or_else(|| StorageError::other("inline writer did not return data"))?;
Ok(InlineAppendResult {
inline_data,
total_size,
etag,
})
}
fn md5_hex(data: &[u8]) -> String {
let digest = HashAlgorithm::Md5.hash_encode(data);
hex_from_bytes(digest.as_ref())
}
fn hex_from_bytes(bytes: &[u8]) -> String {
let mut out = String::with_capacity(bytes.len() * 2);
for byte in bytes {
use std::fmt::Write;
write!(&mut out, "{:02x}", byte).expect("write hex");
}
out
}
async fn decode_inline_variants(
inline: &[u8],
size: usize,
erasure: &Erasure,
preferred: HashAlgorithm,
) -> Result<(Vec<u8>, HashAlgorithm), Error> {
let mut tried = HashSet::new();
let candidates = [preferred, HashAlgorithm::HighwayHash256, HashAlgorithm::HighwayHash256S];
let mut last_err: Option<Error> = None;
for algo in candidates {
if !tried.insert(algo.clone()) {
continue;
}
match decode_inline_with_algo(inline, size, erasure, algo.clone()).await {
Ok(data) => return Ok((data, algo)),
Err(err) => last_err = Some(err),
}
}
Err(last_err.unwrap_or_else(|| StorageError::other("failed to decode inline data")))
}
async fn decode_inline_with_algo(inline: &[u8], size: usize, erasure: &Erasure, algo: HashAlgorithm) -> Result<Vec<u8>, Error> {
let total_len = inline
.len()
.max(erasure.shard_file_size(size as i64).max(size as i64) as usize);
let mut reader = create_bitrot_reader(Some(inline), None, "", "", 0, total_len, erasure.shard_size(), algo)
.await
.map_err(|e| StorageError::other(format!("failed to create inline reader: {e}")))?
.ok_or_else(|| StorageError::other("inline reader unavailable"))?;
let mut out = Vec::with_capacity(size);
while out.len() < size {
let remaining = size - out.len();
let plain_chunk = remaining.min(erasure.block_size);
let shard_payload = calc_shard_size(plain_chunk, erasure.data_shards).max(1);
let mut buf = vec![0u8; shard_payload];
let read = reader
.read(&mut buf)
.await
.map_err(|e| StorageError::other(format!("failed to read inline data: {e}")))?;
if read == 0 {
return Err(StorageError::other("incomplete inline data read"));
}
let copy_len = remaining.min(read);
out.extend_from_slice(&buf[..copy_len]);
}
Ok(out)
}
/// Background task to spill inline data to segmented format
pub struct InlineSpillProcessor {
pub disks: Vec<Option<crate::disk::DiskStore>>,
pub write_quorum: usize,
}
impl InlineSpillProcessor {
pub fn new(disks: Vec<Option<crate::disk::DiskStore>>, write_quorum: usize) -> Self {
Self { disks, write_quorum }
}
/// Process a single spill operation from InlinePendingSpill to SegmentedActive
pub async fn process_spill(
&self,
bucket: &str,
object: &str,
mut fi: rustfs_filemeta::FileInfo,
mut parts_metadata: Vec<rustfs_filemeta::FileInfo>,
epoch: u64,
) -> Result<(), Error> {
use rustfs_filemeta::AppendStateKind;
use tracing::{debug, error, info, warn};
// Verify we're in the correct state
let current_state = fi.get_append_state();
if current_state.state != AppendStateKind::InlinePendingSpill {
warn!(
bucket = bucket,
object = object,
current_state = ?current_state.state,
"Spill processor called on object not in InlinePendingSpill state"
);
return Ok(());
}
// Check epoch to ensure we're processing the correct version
if current_state.epoch != epoch {
debug!(
bucket = bucket,
object = object,
current_epoch = current_state.epoch,
expected_epoch = epoch,
"Spill operation skipped due to epoch mismatch"
);
return Ok(());
}
info!(
bucket = bucket,
object = object,
size = fi.size,
epoch = epoch,
"Starting inline data spill to segmented format"
);
// Extract inline data
let inline_data = fi
.data
.clone()
.ok_or_else(|| StorageError::other("Cannot spill object without inline data"))?;
// Create erasure encoder
let erasure = Erasure::new(fi.erasure.data_blocks, fi.erasure.parity_blocks, fi.erasure.block_size);
// Decode inline data to plain data
let hash_algorithm = fi
.parts
.first()
.map(|part| fi.erasure.get_checksum_info(part.number).algorithm)
.unwrap_or(HashAlgorithm::HighwayHash256);
let plain_data = match decode_inline_payload(&inline_data, fi.size as usize, &erasure, hash_algorithm.clone()).await {
Ok((plain, _detected_algo)) => plain,
Err(err) => {
error!(
bucket = bucket,
object = object,
error = ?err,
"Failed to decode inline data during spill"
);
return Err(StorageError::other(format!("Failed to decode inline data for spill: {err}")));
}
};
// Generate data directory for the object
let data_dir = uuid::Uuid::new_v4();
// Create temporary directory for the spill operation
let tmp_root = format!("{}x{}", uuid::Uuid::new_v4(), time::OffsetDateTime::now_utc().unix_timestamp());
let tmp_path = format!("{tmp_root}/{}/part.1", data_dir);
// Encode and write the data to all disks
match self.write_segmented_data(&plain_data, &tmp_path, &erasure).await {
Ok(_) => {
// Move from temp to permanent location
let final_path = format!("{}/part.1", data_dir);
if let Err(err) = self.move_temp_to_final(&tmp_path, &final_path).await {
error!(
bucket = bucket,
object = object,
error = ?err,
"Failed to move spilled data to final location"
);
// Clean up temp files
let _ = self.cleanup_temp_files(&tmp_path).await;
return Err(err);
}
// Update file metadata
fi.data_dir = Some(data_dir);
fi.data = None; // Remove inline data
fi.metadata.remove(&format!("{}inline-data", RESERVED_METADATA_PREFIX_LOWER));
// Update append state to SegmentedActive
let mut new_state = current_state;
new_state.state = AppendStateKind::SegmentedActive;
new_state.epoch = new_state.epoch.saturating_add(1);
new_state.pending_segments.clear();
fi.set_append_state(&new_state)
.map_err(|err| StorageError::other(format!("Failed to update append state after spill: {err}")))?;
// Update all parts metadata
for meta in parts_metadata.iter_mut() {
if !meta.is_valid() {
continue;
}
meta.data_dir = Some(data_dir);
meta.data = None;
meta.metadata = fi.metadata.clone();
meta.metadata
.remove(&format!("{}inline-data", RESERVED_METADATA_PREFIX_LOWER));
}
// Write updated metadata back to disks
// TODO: Implement metadata write-back logic
// This would typically involve writing the updated FileInfo to all disks
info!(
bucket = bucket,
object = object,
data_dir = ?data_dir,
new_epoch = new_state.epoch,
"Successfully spilled inline data to segmented format"
);
Ok(())
}
Err(err) => {
error!(
bucket = bucket,
object = object,
error = ?err,
"Failed to write segmented data during spill"
);
// Clean up temp files
let _ = self.cleanup_temp_files(&tmp_path).await;
Err(err)
}
}
}
async fn write_segmented_data(&self, data: &[u8], tmp_path: &str, _erasure: &Erasure) -> Result<(), Error> {
use tracing::debug;
// TODO: Implement proper erasure encoding and writing to disks
// This is a placeholder implementation
debug!(
data_len = data.len(),
path = tmp_path,
"Writing segmented data (placeholder implementation)"
);
// For now, just return success - full implementation would:
// 1. Create bitrot writers for each disk
// 2. Erasure encode the data
// 3. Write each shard to its corresponding disk
Ok(())
}
async fn move_temp_to_final(&self, tmp_path: &str, final_path: &str) -> Result<(), Error> {
use tracing::debug;
// TODO: Implement moving temp files to final location
debug!(
tmp_path = tmp_path,
final_path = final_path,
"Moving temp files to final location (placeholder)"
);
Ok(())
}
async fn cleanup_temp_files(&self, tmp_path: &str) -> Result<(), Error> {
use tracing::debug;
// TODO: Implement temp file cleanup
debug!(tmp_path = tmp_path, "Cleaning up temp files (placeholder)");
Ok(())
}
}
/// Trigger background spill processing for an object
pub fn trigger_spill_process(
bucket: String,
object: String,
fi: rustfs_filemeta::FileInfo,
parts_metadata: Vec<rustfs_filemeta::FileInfo>,
epoch: u64,
disks: Vec<Option<crate::disk::DiskStore>>,
write_quorum: usize,
) {
use tracing::error;
tokio::spawn(async move {
let processor = InlineSpillProcessor::new(disks, write_quorum);
if let Err(err) = processor.process_spill(&bucket, &object, fi, parts_metadata, epoch).await {
error!(
bucket = bucket,
object = object,
epoch = epoch,
error = ?err,
"Background spill process failed"
);
}
});
}
#[cfg(test)]
mod tests {
use super::*;
use rustfs_utils::HashAlgorithm;
fn make_object_info() -> ObjectInfo {
ObjectInfo {
bucket: "test-bucket".to_string(),
name: "obj".to_string(),
..Default::default()
}
}
#[test]
fn rejects_compressed_objects() {
let mut info = make_object_info();
info.user_defined
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}compression"), "zstd".to_string());
let err = validate_append_preconditions("test-bucket", "obj", &info).unwrap_err();
matches!(err, StorageError::InvalidArgument(..))
.then_some(())
.expect("expected invalid argument");
}
#[test]
fn rejects_encrypted_objects() {
let mut info = make_object_info();
info.user_defined
.insert("x-amz-server-side-encryption".to_string(), "AES256".to_string());
let err = validate_append_preconditions("test-bucket", "obj", &info).unwrap_err();
matches!(err, StorageError::InvalidArgument(..))
.then_some(())
.expect("expected invalid argument");
}
#[test]
fn rejects_transitioned_objects() {
let mut info = make_object_info();
info.transitioned_object.tier = "GLACIER".to_string();
info.transitioned_object.status = TRANSITION_COMPLETE.to_string();
let err = validate_append_preconditions("test-bucket", "obj", &info).unwrap_err();
matches!(err, StorageError::InvalidArgument(..))
.then_some(())
.expect("expected invalid argument");
}
#[test]
fn accepts_plain_objects() {
let info = make_object_info();
validate_append_preconditions("test-bucket", "obj", &info).expect("append should be allowed");
}
#[test]
fn rejects_position_mismatch() {
let mut info = make_object_info();
info.size = 10;
let err = validate_append_position("test-bucket", "obj", &info, 5).unwrap_err();
matches!(err, StorageError::InvalidArgument(..))
.then_some(())
.expect("expected invalid argument");
}
fn make_inline_erasure() -> Erasure {
Erasure::new(1, 0, 1024)
}
async fn encode_inline(data: &[u8], erasure: &Erasure) -> Vec<u8> {
let mut writer = create_bitrot_writer(
true,
None,
"",
"",
erasure.shard_file_size(data.len() as i64),
erasure.shard_size(),
HashAlgorithm::HighwayHash256,
)
.await
.unwrap();
let mut remaining = data;
while !remaining.is_empty() {
let chunk_len = remaining.len().min(erasure.block_size);
writer.write(&remaining[..chunk_len]).await.unwrap();
remaining = &remaining[chunk_len..];
}
writer.shutdown().await.unwrap();
writer.into_inline_data().unwrap()
}
async fn decode_inline(encoded: &[u8], size: usize, erasure: &Erasure) -> Vec<u8> {
let mut reader =
create_bitrot_reader(Some(encoded), None, "", "", 0, size, erasure.shard_size(), HashAlgorithm::HighwayHash256)
.await
.unwrap()
.unwrap();
let mut out = Vec::with_capacity(size);
while out.len() < size {
let remaining = size - out.len();
let mut buf = vec![0u8; erasure.block_size.min(remaining.max(1))];
let read = reader.read(&mut buf).await.unwrap();
if read == 0 {
break;
}
out.extend_from_slice(&buf[..read.min(remaining)]);
}
out
}
#[tokio::test]
async fn append_inline_combines_payloads() {
let erasure = make_inline_erasure();
let existing_plain = b"hello";
let encoded = encode_inline(existing_plain, &erasure).await;
let ctx = InlineAppendContext {
existing_inline: Some(&encoded),
existing_plain: None,
existing_size: existing_plain.len() as i64,
append_payload: b" world",
erasure: &erasure,
hash_algorithm: HashAlgorithm::HighwayHash256,
has_checksums: true,
};
let result = append_inline_data(ctx).await.expect("inline append to succeed");
assert_eq!(result.total_size, 11);
assert_eq!(result.etag, md5_hex(b"hello world"));
let decoded = decode_inline(&result.inline_data, result.total_size as usize, &erasure).await;
assert_eq!(decoded, b"hello world");
}
#[tokio::test]
async fn decode_inline_handles_padded_shards() {
let erasure = Erasure::new(1, 0, 1024);
let plain = b"hello";
let mut padded = vec![0u8; calc_shard_size(plain.len(), erasure.data_shards)];
padded[..plain.len()].copy_from_slice(plain);
let mut writer = create_bitrot_writer(
true,
None,
"",
"",
erasure.shard_file_size(plain.len() as i64),
erasure.shard_size(),
HashAlgorithm::HighwayHash256,
)
.await
.unwrap();
writer.write(&padded).await.unwrap();
writer.shutdown().await.unwrap();
let inline = writer.into_inline_data().unwrap();
let (decoded, algo) = decode_inline_payload(&inline, plain.len(), &erasure, HashAlgorithm::HighwayHash256)
.await
.expect("inline decode should succeed");
assert_eq!(decoded, plain);
assert_eq!(algo, HashAlgorithm::HighwayHash256);
}
#[tokio::test]
async fn append_inline_handles_empty_original() {
let erasure = make_inline_erasure();
let ctx = InlineAppendContext {
existing_inline: None,
existing_plain: None,
existing_size: 0,
append_payload: b"data",
erasure: &erasure,
hash_algorithm: HashAlgorithm::HighwayHash256,
has_checksums: true,
};
let result = append_inline_data(ctx).await.expect("inline append to succeed");
assert_eq!(result.total_size, 4);
assert_eq!(result.etag, md5_hex(b"data"));
let decoded = decode_inline(&result.inline_data, result.total_size as usize, &erasure).await;
assert_eq!(decoded, b"data");
}
#[tokio::test]
async fn append_inline_without_checksums_uses_raw_bytes() {
let erasure = Erasure::new(1, 0, 1024);
let existing = b"hello";
let ctx = InlineAppendContext {
existing_inline: Some(existing),
existing_plain: None,
existing_size: existing.len() as i64,
append_payload: b" world",
erasure: &erasure,
hash_algorithm: HashAlgorithm::HighwayHash256,
has_checksums: false,
};
let result = append_inline_data(ctx).await.expect("inline append to succeed");
assert_eq!(result.total_size, 11);
assert_eq!(result.etag, md5_hex(b"hello world"));
assert_eq!(result.inline_data, b"hello world");
}
#[tokio::test]
async fn append_inline_decodes_bitrot_without_checksums() {
let erasure = Erasure::new(1, 0, 1024);
let existing_plain = b"hello";
let encoded = encode_inline(existing_plain, &erasure).await;
let ctx = InlineAppendContext {
existing_inline: Some(&encoded),
existing_plain: None,
existing_size: existing_plain.len() as i64,
append_payload: b" world",
erasure: &erasure,
hash_algorithm: HashAlgorithm::HighwayHash256,
has_checksums: false,
};
let result = append_inline_data(ctx).await.expect("inline append to succeed");
assert_eq!(result.total_size, 11);
assert_eq!(result.etag, md5_hex(b"hello world"));
let decoded = decode_inline(&result.inline_data, result.total_size as usize, &erasure).await;
assert_eq!(decoded, b"hello world");
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -602,14 +602,6 @@ impl StorageAPI for Sets {
(del_objects, del_errs)
}
async fn complete_append(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object).complete_append(bucket, object, opts).await
}
async fn abort_append(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object).abort_append(bucket, object, opts).await
}
async fn list_object_parts(
&self,
bucket: &str,

View File

@@ -1709,17 +1709,6 @@ impl StorageAPI for ECStore {
// Ok((del_objects, del_errs))
}
async fn complete_append(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
let object = encode_dir_object(object);
let (pinfo, _) = self.internal_get_pool_info_existing_with_opts(bucket, &object, opts).await?;
self.pools[pinfo.index].complete_append(bucket, &object, opts).await
}
async fn abort_append(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
let object = encode_dir_object(object);
let (pinfo, _) = self.internal_get_pool_info_existing_with_opts(bucket, &object, opts).await?;
self.pools[pinfo.index].abort_append(bucket, &object, opts).await
}
#[tracing::instrument(skip(self))]
async fn list_object_parts(
&self,

View File

@@ -328,8 +328,6 @@ pub struct ObjectOptions {
pub max_parity: bool,
pub mod_time: Option<OffsetDateTime>,
pub part_number: Option<usize>,
pub append_object: bool,
pub append_position: Option<i64>,
pub delete_prefix: bool,
pub delete_prefix_object: bool,
@@ -658,15 +656,6 @@ impl ObjectInfo {
})
.collect();
let append_state = fi.get_append_state();
let pending_length: i64 = append_state.pending_segments.iter().map(|seg| seg.length).sum();
let logical_size = append_state.committed_length.saturating_add(pending_length);
let actual_size_meta = fi
.metadata
.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size"))
.and_then(|o| o.parse::<i64>().ok())
.unwrap_or(logical_size);
ObjectInfo {
bucket: bucket.to_string(),
name,
@@ -676,7 +665,7 @@ impl ObjectInfo {
version_id,
delete_marker: fi.deleted,
mod_time: fi.mod_time,
size: logical_size,
size: fi.size,
parts,
is_latest: fi.is_latest,
user_tags,
@@ -688,7 +677,6 @@ impl ObjectInfo {
inlined,
user_defined: metadata,
transitioned_object,
actual_size: actual_size_meta,
..Default::default()
}
}
@@ -1200,10 +1188,6 @@ pub trait StorageAPI: ObjectIO + Debug {
opts: ObjectOptions,
) -> (Vec<DeletedObject>, Vec<Option<Error>>);
async fn complete_append(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
async fn abort_append(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
// TransitionObject TODO:
// RestoreTransitionedObject TODO:

View File

@@ -30,7 +30,6 @@ crc32fast = { workspace = true }
rmp.workspace = true
rmp-serde.workspace = true
serde.workspace = true
serde_json.workspace = true
time.workspace = true
uuid = { workspace = true, features = ["v4", "fast-rng", "serde"] }
tokio = { workspace = true, features = ["io-util", "macros", "sync"] }

View File

@@ -1,541 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::{Error, Result};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use uuid::Uuid;
const APPEND_STATE_META_KEY: &str = "x-rustfs-internal-append-state";
/// Tracks the state of append-enabled objects.
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub struct AppendState {
pub state: AppendStateKind,
pub epoch: u64,
pub committed_length: i64,
pub pending_segments: Vec<AppendSegment>,
}
/// Represents individual append segments that still need consolidation.
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub struct AppendSegment {
pub offset: i64,
pub length: i64,
pub data_dir: Option<Uuid>,
pub etag: Option<String>,
pub epoch: u64,
}
/// Possible append lifecycle states for an object version.
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
pub enum AppendStateKind {
#[default]
Disabled,
Inline,
InlinePendingSpill,
SegmentedActive,
SegmentedSealed,
}
/// Persist the provided append state into object metadata.
pub fn set_append_state(metadata: &mut HashMap<String, String>, state: &AppendState) -> Result<()> {
let encoded = serde_json::to_string(state).map_err(Error::other)?;
metadata.insert(APPEND_STATE_META_KEY.to_string(), encoded);
Ok(())
}
/// Remove the append state marker from metadata.
pub fn clear_append_state(metadata: &mut HashMap<String, String>) {
metadata.remove(APPEND_STATE_META_KEY);
}
/// Load append state stored in metadata, if any.
pub fn get_append_state(metadata: &HashMap<String, String>) -> Result<Option<AppendState>> {
let raw = match metadata.get(APPEND_STATE_META_KEY) {
Some(val) if !val.is_empty() => val,
_ => return Ok(None),
};
let decoded = serde_json::from_str(raw).map_err(Error::other)?;
Ok(Some(decoded))
}
/// Complete append operations by consolidating pending segments and sealing the object
pub fn complete_append_operation(state: &mut AppendState) -> Result<()> {
match state.state {
AppendStateKind::SegmentedActive => {
// Move all pending segments data to main parts and seal
state.committed_length += state.pending_segments.iter().map(|s| s.length).sum::<i64>();
state.pending_segments.clear();
state.state = AppendStateKind::SegmentedSealed;
state.epoch = state.epoch.saturating_add(1);
Ok(())
}
AppendStateKind::Inline => {
// Inline objects are always immediately committed, just seal them
state.state = AppendStateKind::SegmentedSealed; // Transition to sealed
state.epoch = state.epoch.saturating_add(1);
Ok(())
}
AppendStateKind::InlinePendingSpill => {
// Wait for spill to complete, then seal
// In practice, this might need to trigger the spill completion first
state.state = AppendStateKind::SegmentedSealed;
state.pending_segments.clear();
state.epoch = state.epoch.saturating_add(1);
Ok(())
}
AppendStateKind::SegmentedSealed | AppendStateKind::Disabled => {
// Already sealed or disabled
Err(Error::other("Cannot complete append on sealed or disabled object"))
}
}
}
/// Abort append operations by discarding pending segments and returning to sealed state
pub fn abort_append_operation(state: &mut AppendState) -> Result<()> {
match state.state {
AppendStateKind::SegmentedActive => {
// Discard all pending segments and seal
state.pending_segments.clear();
state.state = AppendStateKind::SegmentedSealed;
state.epoch = state.epoch.saturating_add(1);
Ok(())
}
AppendStateKind::Inline => {
// Inline data is already committed, just seal
state.state = AppendStateKind::SegmentedSealed;
state.epoch = state.epoch.saturating_add(1);
Ok(())
}
AppendStateKind::InlinePendingSpill => {
// Cancel spill and keep inline data, then seal
state.state = AppendStateKind::SegmentedSealed;
state.pending_segments.clear();
state.epoch = state.epoch.saturating_add(1);
Ok(())
}
AppendStateKind::SegmentedSealed | AppendStateKind::Disabled => {
// Already sealed or disabled
Err(Error::other("Cannot abort append on sealed or disabled object"))
}
}
}
/// Check if an append operation can be completed
pub fn can_complete_append(state: &AppendState) -> bool {
matches!(
state.state,
AppendStateKind::Inline | AppendStateKind::InlinePendingSpill | AppendStateKind::SegmentedActive
)
}
/// Check if an append operation can be aborted
pub fn can_abort_append(state: &AppendState) -> bool {
matches!(
state.state,
AppendStateKind::Inline | AppendStateKind::InlinePendingSpill | AppendStateKind::SegmentedActive
)
}
/// Verify epoch for optimistic concurrency control
pub fn verify_append_epoch(current_state: &AppendState, expected_epoch: u64) -> Result<()> {
if current_state.epoch != expected_epoch {
Err(Error::other(format!(
"Append operation conflict: expected epoch {}, found {}",
expected_epoch, current_state.epoch
)))
} else {
Ok(())
}
}
/// Prepare next append operation by incrementing epoch
pub fn prepare_next_append(state: &mut AppendState) {
state.epoch = state.epoch.saturating_add(1);
}
/// Validate that a new append segment doesn't conflict with existing segments
pub fn validate_new_segment(state: &AppendState, new_offset: i64, new_length: i64) -> Result<()> {
let new_end = new_offset + new_length;
// Check it doesn't overlap with committed data
if new_offset < state.committed_length {
return Err(Error::other(format!(
"New segment overlaps with committed data: offset {} < committed_length {}",
new_offset, state.committed_length
)));
}
// Check it doesn't overlap with existing pending segments
for existing in &state.pending_segments {
let existing_start = existing.offset;
let existing_end = existing.offset + existing.length;
// Check for any overlap
if new_offset < existing_end && new_end > existing_start {
return Err(Error::other(format!(
"New segment [{}, {}) overlaps with existing segment [{}, {})",
new_offset, new_end, existing_start, existing_end
)));
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::fileinfo::FileInfo;
#[test]
fn append_state_roundtrip_in_metadata() {
let mut metadata = HashMap::new();
let state = AppendState {
state: AppendStateKind::SegmentedActive,
epoch: 42,
committed_length: 2048,
pending_segments: vec![AppendSegment {
offset: 2048,
length: 512,
data_dir: Some(Uuid::new_v4()),
etag: Some("abc123".to_string()),
epoch: 0,
}],
};
set_append_state(&mut metadata, &state).expect("persist append state");
assert!(metadata.contains_key(APPEND_STATE_META_KEY));
let decoded = get_append_state(&metadata)
.expect("decode append state")
.expect("state present");
assert_eq!(decoded, state);
clear_append_state(&mut metadata);
assert!(!metadata.contains_key(APPEND_STATE_META_KEY));
assert!(get_append_state(&metadata).unwrap().is_none());
}
#[test]
fn fileinfo_append_state_migration_compatibility() {
// Test old inline data object
let mut inline_fi = FileInfo {
size: 1024,
..Default::default()
};
inline_fi.set_inline_data();
let state = inline_fi.get_append_state();
assert_eq!(state.state, AppendStateKind::Inline);
assert_eq!(state.committed_length, 1024);
assert!(state.pending_segments.is_empty());
assert!(inline_fi.is_appendable());
assert!(!inline_fi.has_pending_appends());
// Test old regular object
let regular_fi = FileInfo {
size: 2048,
..Default::default()
};
// No inline_data marker
let state = regular_fi.get_append_state();
assert_eq!(state.state, AppendStateKind::SegmentedSealed);
assert_eq!(state.committed_length, 2048);
assert!(state.pending_segments.is_empty());
assert!(!regular_fi.is_appendable());
assert!(!regular_fi.has_pending_appends());
// Test explicit append state
let mut append_fi = FileInfo::default();
let explicit_state = AppendState {
state: AppendStateKind::SegmentedActive,
epoch: 5,
committed_length: 1500,
pending_segments: vec![AppendSegment {
offset: 1500,
length: 300,
data_dir: Some(Uuid::new_v4()),
etag: Some("def456".to_string()),
epoch: 0,
}],
};
append_fi.set_append_state(&explicit_state).expect("set explicit state");
let retrieved_state = append_fi.get_append_state();
assert_eq!(retrieved_state, explicit_state);
assert!(append_fi.is_appendable());
assert!(append_fi.has_pending_appends());
}
#[test]
fn append_state_transitions() {
// Test state transition validation
assert_eq!(AppendStateKind::default(), AppendStateKind::Disabled);
let inline_state = AppendState {
state: AppendStateKind::Inline,
..Default::default()
};
let spill_state = AppendState {
state: AppendStateKind::InlinePendingSpill,
..Default::default()
};
let active_state = AppendState {
state: AppendStateKind::SegmentedActive,
..Default::default()
};
let sealed_state = AppendState {
state: AppendStateKind::SegmentedSealed,
..Default::default()
};
// Verify serialization works for all states
for state in [inline_state, spill_state, active_state, sealed_state] {
let mut metadata = HashMap::new();
set_append_state(&mut metadata, &state).expect("serialize state");
let decoded = get_append_state(&metadata).unwrap().unwrap();
assert_eq!(decoded, state);
}
}
#[test]
fn complete_append_transitions() {
// Test completing SegmentedActive with pending segments
let mut active_state = AppendState {
state: AppendStateKind::SegmentedActive,
epoch: 5,
committed_length: 1000,
pending_segments: vec![
AppendSegment {
offset: 1000,
length: 200,
data_dir: Some(Uuid::new_v4()),
etag: Some("abc123".to_string()),
epoch: 0,
},
AppendSegment {
offset: 1200,
length: 300,
data_dir: Some(Uuid::new_v4()),
etag: Some("def456".to_string()),
epoch: 0,
},
],
};
assert!(can_complete_append(&active_state));
complete_append_operation(&mut active_state).expect("complete should succeed");
assert_eq!(active_state.state, AppendStateKind::SegmentedSealed);
assert_eq!(active_state.committed_length, 1500); // 1000 + 200 + 300
assert!(active_state.pending_segments.is_empty());
assert_eq!(active_state.epoch, 6);
// Test completing Inline state
let mut inline_state = AppendState {
state: AppendStateKind::Inline,
epoch: 2,
committed_length: 500,
..Default::default()
};
assert!(can_complete_append(&inline_state));
complete_append_operation(&mut inline_state).expect("complete should succeed");
assert_eq!(inline_state.state, AppendStateKind::SegmentedSealed);
assert_eq!(inline_state.committed_length, 500); // Unchanged
assert_eq!(inline_state.epoch, 3);
// Test completing already sealed state should fail
let mut sealed_state = AppendState {
state: AppendStateKind::SegmentedSealed,
..Default::default()
};
assert!(!can_complete_append(&sealed_state));
assert!(complete_append_operation(&mut sealed_state).is_err());
}
#[test]
fn abort_append_transitions() {
// Test aborting SegmentedActive with pending segments
let mut active_state = AppendState {
state: AppendStateKind::SegmentedActive,
epoch: 3,
committed_length: 800,
pending_segments: vec![AppendSegment {
offset: 800,
length: 400,
data_dir: Some(Uuid::new_v4()),
etag: Some("xyz789".to_string()),
epoch: 0,
}],
};
assert!(can_abort_append(&active_state));
abort_append_operation(&mut active_state).expect("abort should succeed");
assert_eq!(active_state.state, AppendStateKind::SegmentedSealed);
assert_eq!(active_state.committed_length, 800); // Unchanged, pending discarded
assert!(active_state.pending_segments.is_empty());
assert_eq!(active_state.epoch, 4);
// Test aborting InlinePendingSpill
let mut spill_state = AppendState {
state: AppendStateKind::InlinePendingSpill,
epoch: 1,
committed_length: 100,
pending_segments: vec![],
};
assert!(can_abort_append(&spill_state));
abort_append_operation(&mut spill_state).expect("abort should succeed");
assert_eq!(spill_state.state, AppendStateKind::SegmentedSealed);
assert_eq!(spill_state.committed_length, 100);
assert_eq!(spill_state.epoch, 2);
// Test aborting disabled state should fail
let mut disabled_state = AppendState {
state: AppendStateKind::Disabled,
..Default::default()
};
assert!(!can_abort_append(&disabled_state));
assert!(abort_append_operation(&mut disabled_state).is_err());
}
#[test]
fn epoch_validation() {
let state = AppendState {
state: AppendStateKind::SegmentedActive,
epoch: 10,
committed_length: 1000,
pending_segments: vec![],
};
// Valid epoch should succeed
assert!(verify_append_epoch(&state, 10).is_ok());
// Invalid epoch should fail
assert!(verify_append_epoch(&state, 9).is_err());
assert!(verify_append_epoch(&state, 11).is_err());
// Error message should contain epoch information
let error = verify_append_epoch(&state, 5).unwrap_err();
let error_msg = error.to_string();
assert!(error_msg.contains("expected epoch 5"));
assert!(error_msg.contains("found 10"));
}
#[test]
fn next_append_preparation() {
let mut state = AppendState {
state: AppendStateKind::SegmentedActive,
epoch: 5,
committed_length: 1000,
pending_segments: vec![],
};
prepare_next_append(&mut state);
assert_eq!(state.epoch, 6);
// Test saturation behavior
let mut max_state = AppendState {
epoch: u64::MAX,
..Default::default()
};
prepare_next_append(&mut max_state);
assert_eq!(max_state.epoch, u64::MAX); // Should saturate, not overflow
}
#[test]
fn segment_validation() {
let state = AppendState {
state: AppendStateKind::SegmentedActive,
epoch: 3,
committed_length: 1000,
pending_segments: vec![
AppendSegment {
offset: 1000,
length: 200,
data_dir: Some(Uuid::new_v4()),
etag: Some("abc123".to_string()),
epoch: 0,
},
AppendSegment {
offset: 1300,
length: 300,
data_dir: Some(Uuid::new_v4()),
etag: Some("def456".to_string()),
epoch: 0,
},
],
};
// Valid segment after existing segments
assert!(validate_new_segment(&state, 1600, 100).is_ok());
// Valid segment filling gap between committed and first pending
assert!(validate_new_segment(&state, 1200, 100).is_ok());
// Invalid segment overlapping with committed data
assert!(validate_new_segment(&state, 900, 200).is_err());
let error = validate_new_segment(&state, 900, 200).unwrap_err();
assert!(error.to_string().contains("overlaps with committed data"));
// Invalid segment overlapping with first pending segment
assert!(validate_new_segment(&state, 1100, 100).is_err());
let error = validate_new_segment(&state, 1100, 100).unwrap_err();
assert!(error.to_string().contains("overlaps with existing segment"));
// Invalid segment overlapping with second pending segment
assert!(validate_new_segment(&state, 1400, 100).is_err());
// Edge case: segment exactly touching committed data (should be valid)
assert!(validate_new_segment(&state, 1000, 0).is_ok());
// Edge case: segment exactly touching existing segment (should be valid)
assert!(validate_new_segment(&state, 1200, 0).is_ok());
}
#[test]
fn segment_validation_edge_cases() {
let empty_state = AppendState {
state: AppendStateKind::SegmentedActive,
epoch: 1,
committed_length: 500,
pending_segments: vec![],
};
// First segment after committed data
assert!(validate_new_segment(&empty_state, 500, 100).is_ok());
assert!(validate_new_segment(&empty_state, 600, 200).is_ok());
// Zero-length segments (edge case)
assert!(validate_new_segment(&empty_state, 500, 0).is_ok());
// Segment exactly at committed boundary
assert!(validate_new_segment(&empty_state, 499, 1).is_err());
assert!(validate_new_segment(&empty_state, 500, 1).is_ok());
}
}

View File

@@ -494,96 +494,6 @@ impl FileInfo {
ReplicationStatusType::Empty
}
}
/// Get the append state for this FileInfo, with migration compatibility
pub fn get_append_state(&self) -> crate::append::AppendState {
use crate::append::{AppendState, AppendStateKind, get_append_state};
// Try to load from metadata first
if let Ok(Some(state)) = get_append_state(&self.metadata) {
return state;
}
// Migration compatibility: determine state based on existing data
if self.inline_data() {
// Has inline data, treat as Inline state
AppendState {
state: AppendStateKind::Inline,
epoch: 0,
committed_length: self.size,
pending_segments: Vec::new(),
}
} else {
// No inline data, treat as SegmentedSealed (traditional object)
AppendState {
state: AppendStateKind::SegmentedSealed,
epoch: 0,
committed_length: self.size,
pending_segments: Vec::new(),
}
}
}
/// Set the append state for this FileInfo
pub fn set_append_state(&mut self, state: &crate::append::AppendState) -> crate::error::Result<()> {
crate::append::set_append_state(&mut self.metadata, state)
}
/// Check if this object supports append operations
pub fn is_appendable(&self) -> bool {
use crate::append::AppendStateKind;
match self.get_append_state().state {
AppendStateKind::Disabled => false,
AppendStateKind::Inline | AppendStateKind::InlinePendingSpill | AppendStateKind::SegmentedActive => true,
AppendStateKind::SegmentedSealed => false,
}
}
/// Check if this object has pending append operations
pub fn has_pending_appends(&self) -> bool {
use crate::append::AppendStateKind;
matches!(
self.get_append_state().state,
AppendStateKind::InlinePendingSpill | AppendStateKind::SegmentedActive
)
}
/// Complete all pending append operations and seal the object
pub fn complete_append(&mut self) -> crate::error::Result<()> {
let mut append_state = self.get_append_state();
crate::append::complete_append_operation(&mut append_state)?;
self.set_append_state(&append_state)?;
// Update file size to reflect completed operation
if append_state.state == crate::append::AppendStateKind::SegmentedSealed {
self.size = append_state.committed_length;
}
Ok(())
}
/// Abort all pending append operations and seal the object
pub fn abort_append(&mut self) -> crate::error::Result<()> {
let mut append_state = self.get_append_state();
crate::append::abort_append_operation(&mut append_state)?;
self.set_append_state(&append_state)?;
// Update file size to only include committed data
if append_state.state == crate::append::AppendStateKind::SegmentedSealed {
self.size = append_state.committed_length;
}
Ok(())
}
/// Check if append operations can be completed for this object
pub fn can_complete_append(&self) -> bool {
crate::append::can_complete_append(&self.get_append_state())
}
/// Check if append operations can be aborted for this object
pub fn can_abort_append(&self) -> bool {
crate::append::can_abort_append(&self.get_append_state())
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]

View File

@@ -169,6 +169,9 @@ impl InlineData {
}
pub fn remove(&mut self, remove_keys: Vec<Uuid>) -> Result<bool> {
let buf = self.after_version();
if buf.is_empty() {
return Ok(false);
}
let mut cur = Cursor::new(buf);
let mut fields_len = rmp::decode::read_map_len(&mut cur)? as usize;

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod append;
mod error;
pub mod fileinfo;
mod filemeta;
@@ -23,7 +22,6 @@ mod replication;
pub mod test_data;
pub use append::*;
pub use error::*;
pub use fileinfo::*;
pub use filemeta::*;

View File

@@ -87,7 +87,7 @@ pub fn generate_jwt<T: Serialize>(claims: &T, secret: &str) -> std::result::Resu
jsonwebtoken::encode(&header, &claims, &EncodingKey::from_secret(secret.as_bytes()))
}
pub fn extract_claims<T: DeserializeOwned>(
pub fn extract_claims<T: DeserializeOwned + Clone>(
token: &str,
secret: &str,
) -> std::result::Result<jsonwebtoken::TokenData<T>, jsonwebtoken::errors::Error> {
@@ -193,7 +193,7 @@ mod tests {
assert_eq!(error.to_string(), "secret key length is too short");
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
struct Claims {
sub: String,
company: String,

View File

@@ -32,14 +32,17 @@ rustfs-utils = { workspace = true, features = ["path", "sys"] }
rustfs-targets = { workspace = true }
async-trait = { workspace = true }
chrono = { workspace = true, features = ["serde"] }
dashmap = { workspace = true }
futures = { workspace = true }
form_urlencoded = { workspace = true }
hashbrown = { workspace = true }
once_cell = { workspace = true }
quick-xml = { workspace = true, features = ["serialize", "async-tokio"] }
rayon = { workspace = true }
rumqttc = { workspace = true }
rustc-hash = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
starshard = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["rt-multi-thread", "sync", "time"] }
tracing = { workspace = true }

View File

@@ -13,9 +13,9 @@
// limitations under the License.
use chrono::{DateTime, Utc};
use hashbrown::HashMap;
use rustfs_targets::EventName;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use url::form_urlencoded;
/// Represents the identity of the user who triggered the event

View File

@@ -14,6 +14,7 @@
use crate::Event;
use async_trait::async_trait;
use hashbrown::HashSet;
use rumqttc::QoS;
use rustfs_config::notify::{ENV_NOTIFY_MQTT_KEYS, ENV_NOTIFY_WEBHOOK_KEYS, NOTIFY_MQTT_KEYS, NOTIFY_WEBHOOK_KEYS};
use rustfs_config::{
@@ -27,7 +28,6 @@ use rustfs_targets::{
error::TargetError,
target::{mqtt::MQTTArgs, webhook::WebhookArgs},
};
use std::collections::HashSet;
use std::time::Duration;
use tracing::{debug, warn};
use url::Url;

View File

@@ -15,13 +15,13 @@
use crate::{
Event, error::NotificationError, notifier::EventNotifier, registry::TargetRegistry, rules::BucketNotificationConfig, stream,
};
use hashbrown::HashMap;
use rustfs_ecstore::config::{Config, KVS};
use rustfs_targets::EventName;
use rustfs_targets::arn::TargetID;
use rustfs_targets::store::{Key, Store};
use rustfs_targets::target::EntityTarget;
use rustfs_targets::{StoreError, Target};
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::{Duration, Instant};
@@ -212,11 +212,6 @@ impl NotificationSystem {
return Ok(());
}
// if let Err(e) = rustfs_ecstore::config::com::save_server_config(store, &new_config).await {
// error!("Failed to save config: {}", e);
// return Err(NotificationError::SaveConfig(e.to_string()));
// }
info!("Configuration updated. Reloading system...");
self.reload_config(new_config).await
}

View File

@@ -13,19 +13,20 @@
// limitations under the License.
use crate::{error::NotificationError, event::Event, rules::RulesMap};
use dashmap::DashMap;
use hashbrown::HashMap;
use rustfs_targets::EventName;
use rustfs_targets::Target;
use rustfs_targets::arn::TargetID;
use rustfs_targets::target::EntityTarget;
use std::{collections::HashMap, sync::Arc};
use starshard::AsyncShardedHashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::{debug, error, info, instrument, warn};
/// Manages event notification to targets based on rules
pub struct EventNotifier {
target_list: Arc<RwLock<TargetList>>,
bucket_rules_map: Arc<DashMap<String, RulesMap>>,
bucket_rules_map: Arc<AsyncShardedHashMap<String, RulesMap, rustc_hash::FxBuildHasher>>,
}
impl Default for EventNotifier {
@@ -39,7 +40,7 @@ impl EventNotifier {
pub fn new() -> Self {
EventNotifier {
target_list: Arc::new(RwLock::new(TargetList::new())),
bucket_rules_map: Arc::new(DashMap::new()),
bucket_rules_map: Arc::new(AsyncShardedHashMap::new(0)),
}
}
@@ -58,7 +59,7 @@ impl EventNotifier {
/// This method removes all rules associated with the specified bucket name.
/// It will log a message indicating the removal of rules.
pub async fn remove_rules_map(&self, bucket_name: &str) {
if self.bucket_rules_map.remove(bucket_name).is_some() {
if self.bucket_rules_map.remove(&bucket_name.to_string()).await.is_some() {
info!("Removed all notification rules for bucket: {}", bucket_name);
}
}
@@ -76,21 +77,21 @@ impl EventNotifier {
/// Adds a rules map for a bucket
pub async fn add_rules_map(&self, bucket_name: &str, rules_map: RulesMap) {
if rules_map.is_empty() {
self.bucket_rules_map.remove(bucket_name);
self.bucket_rules_map.remove(&bucket_name.to_string()).await;
} else {
self.bucket_rules_map.insert(bucket_name.to_string(), rules_map);
self.bucket_rules_map.insert(bucket_name.to_string(), rules_map).await;
}
info!("Added rules for bucket: {}", bucket_name);
}
/// Gets the rules map for a specific bucket.
pub fn get_rules_map(&self, bucket_name: &str) -> Option<RulesMap> {
self.bucket_rules_map.get(bucket_name).map(|r| r.clone())
pub async fn get_rules_map(&self, bucket_name: &str) -> Option<RulesMap> {
self.bucket_rules_map.get(&bucket_name.to_string()).await
}
/// Removes notification rules for a bucket
pub async fn remove_notification(&self, bucket_name: &str) {
self.bucket_rules_map.remove(bucket_name);
self.bucket_rules_map.remove(&bucket_name.to_string()).await;
info!("Removed notification rules for bucket: {}", bucket_name);
}
@@ -113,7 +114,7 @@ impl EventNotifier {
/// Return `true` if at least one matching notification rule exists.
pub async fn has_subscriber(&self, bucket_name: &str, event_name: &EventName) -> bool {
// Rules to check if the bucket exists
if let Some(rules_map) = self.bucket_rules_map.get(bucket_name) {
if let Some(rules_map) = self.bucket_rules_map.get(&bucket_name.to_string()).await {
// A composite event (such as ObjectCreatedAll) is expanded to multiple single events.
// We need to check whether any of these single events have the rules configured.
rules_map.has_subscriber(event_name)
@@ -129,7 +130,7 @@ impl EventNotifier {
let bucket_name = &event.s3.bucket.name;
let object_key = &event.s3.object.key;
let event_name = event.event_name;
if let Some(rules) = self.bucket_rules_map.get(bucket_name) {
if let Some(rules) = self.bucket_rules_map.get(bucket_name).await {
let target_ids = rules.match_rules(event_name, object_key);
if target_ids.is_empty() {
debug!("No matching targets for event in bucket: {}", bucket_name);

View File

@@ -15,13 +15,13 @@
use crate::Event;
use crate::factory::{MQTTTargetFactory, TargetFactory, WebhookTargetFactory};
use futures::stream::{FuturesUnordered, StreamExt};
use hashbrown::{HashMap, HashSet};
use rustfs_config::notify::NOTIFY_ROUTE_PREFIX;
use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX};
use rustfs_ecstore::config::{Config, KVS};
use rustfs_targets::Target;
use rustfs_targets::TargetError;
use rustfs_targets::target::ChannelTargetType;
use std::collections::{HashMap, HashSet};
use tracing::{debug, error, info, warn};
/// Registry for managing target factories

View File

@@ -17,10 +17,10 @@ use super::xml_config::ParseConfigError as BucketNotificationConfigError;
use crate::rules::NotificationConfiguration;
use crate::rules::pattern_rules;
use crate::rules::target_id_set;
use hashbrown::HashMap;
use rustfs_targets::EventName;
use rustfs_targets::arn::TargetID;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::io::Read;
/// Configuration for bucket notifications.

View File

@@ -14,9 +14,10 @@
use super::pattern;
use super::target_id_set::TargetIdSet;
use hashbrown::HashMap;
use rayon::prelude::*;
use rustfs_targets::arn::TargetID;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// PatternRules - Event rule that maps object name patterns to TargetID collections.
/// `event.Rules` (map[string]TargetIDSet) in the Go code
@@ -43,13 +44,19 @@ impl PatternRules {
/// Returns all TargetIDs that match the object name.
pub fn match_targets(&self, object_name: &str) -> TargetIdSet {
let mut matched_targets = TargetIdSet::new();
for (pattern_str, target_set) in &self.rules {
if pattern::match_simple(pattern_str, object_name) {
matched_targets.extend(target_set.iter().cloned());
}
}
matched_targets
self.rules
.par_iter()
.filter_map(|(pattern_str, target_set)| {
if pattern::match_simple(pattern_str, object_name) {
Some(target_set.iter().cloned().collect::<TargetIdSet>())
} else {
None
}
})
.reduce(TargetIdSet::new, |mut acc, set| {
acc.extend(set);
acc
})
}
pub fn is_empty(&self) -> bool {

View File

@@ -14,10 +14,10 @@
use super::pattern_rules::PatternRules;
use super::target_id_set::TargetIdSet;
use hashbrown::HashMap;
use rustfs_targets::EventName;
use rustfs_targets::arn::TargetID;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// RulesMap - Rule mapping organized by event name。
/// `event.RulesMap` (map[Name]Rules) in the corresponding Go code

View File

@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use hashbrown::HashSet;
use rustfs_targets::arn::TargetID;
use std::collections::HashSet;
/// TargetIDSet - A collection representation of TargetID.
pub type TargetIdSet = HashSet<TargetID>;

View File

@@ -13,10 +13,10 @@
// limitations under the License.
use super::pattern;
use hashbrown::HashSet;
use rustfs_targets::EventName;
use rustfs_targets::arn::{ARN, ArnError, TargetIDError};
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::io::Read;
use thiserror::Error;

View File

@@ -228,7 +228,7 @@ mod tests {
use jsonwebtoken::{Algorithm, DecodingKey, Validation, decode};
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Serialize, Deserialize, Clone)]
struct Claims {
sub: String,
exp: usize,

View File

@@ -59,7 +59,7 @@ pub fn generate_jwt<T: Serialize>(claims: &T, secret: &str) -> std::result::Resu
jsonwebtoken::encode(&header, &claims, &EncodingKey::from_secret(secret.as_bytes()))
}
pub fn extract_claims<T: DeserializeOwned>(
pub fn extract_claims<T: DeserializeOwned + Clone>(
token: &str,
secret: &str,
) -> std::result::Result<jsonwebtoken::TokenData<T>, jsonwebtoken::errors::Error> {

View File

@@ -13,7 +13,7 @@ documentation = "https://docs.rs/rustfs-target/latest/rustfs_target/"
[dependencies]
rustfs-config = { workspace = true, features = ["notify", "constants", "audit"] }
rustfs-utils = { workspace = true, features = ["sys"] }
rustfs-utils = { workspace = true, features = ["sys", "notify"] }
async-trait = { workspace = true }
reqwest = { workspace = true }
rumqttc = { workspace = true }

View File

@@ -0,0 +1,70 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Check if MQTT Broker is available
/// # Arguments
/// * `broker_url` - URL of MQTT Broker, for example `mqtt://localhost:1883`
/// * `topic` - Topic for testing connections
/// # Returns
/// * `Ok(())` - If the connection is successful
/// * `Err(String)` - If the connection fails, contains an error message
///
/// # Example
/// ```rust,no_run
/// #[tokio::main]
/// async fn main() {
/// let result = rustfs_targets::check_mqtt_broker_available("mqtt://localhost:1883", "test/topic").await;
/// if result.is_ok() {
/// println!("MQTT Broker is available");
/// } else {
/// println!("MQTT Broker is not available: {}", result.err().unwrap());
/// }
/// }
/// ```
/// # Note
/// Need to add `rumqttc` and `url` dependencies in `Cargo.toml`
/// ```toml
/// [dependencies]
/// rumqttc = "0.25.0"
/// url = "2.5.7"
/// tokio = { version = "1", features = ["full"] }
/// ```
pub async fn check_mqtt_broker_available(broker_url: &str, topic: &str) -> Result<(), String> {
use rumqttc::{AsyncClient, MqttOptions, QoS};
let url = rustfs_utils::parse_url(broker_url).map_err(|e| format!("Broker URL parsing failed:{e}"))?;
let url = url.url();
match url.scheme() {
"tcp" | "ssl" | "ws" | "wss" | "mqtt" | "mqtts" | "tls" | "tcps" => {}
_ => return Err("unsupported broker url scheme".to_string()),
}
let host = url.host_str().ok_or("Broker is missing host")?;
let port = url.port().unwrap_or(1883);
let mut mqtt_options = MqttOptions::new("rustfs_check", host, port);
mqtt_options.set_keep_alive(std::time::Duration::from_secs(5));
let (client, mut eventloop) = AsyncClient::new(mqtt_options, 1);
// Try to connect and subscribe
client
.subscribe(topic, QoS::AtLeastOnce)
.await
.map_err(|e| format!("MQTT subscription failed:{e}"))?;
// Wait for eventloop to receive at least one event
match tokio::time::timeout(std::time::Duration::from_secs(3), eventloop.poll()).await {
Ok(Ok(_)) => Ok(()),
Ok(Err(e)) => Err(format!("MQTT connection failed:{e}")),
Err(_) => Err("MQTT connection timeout".to_string()),
}
}

View File

@@ -13,11 +13,13 @@
// limitations under the License.
pub mod arn;
mod check;
pub mod error;
mod event_name;
pub mod store;
pub mod target;
pub use check::check_mqtt_broker_available;
pub use error::{StoreError, TargetError};
pub use event_name::EventName;
use serde::{Deserialize, Serialize};

View File

@@ -324,7 +324,7 @@ async fn run_mqtt_event_loop(
Ok(Err(e)) => Err(e),
Err(_) => {
debug!(target_id = %target_id, "MQTT poll timed out (EVENT_LOOP_POLL_TIMEOUT) while not connected or status pending.");
Err(rumqttc::ConnectionError::NetworkTimeout)
Err(ConnectionError::NetworkTimeout)
}
}
} else {
@@ -376,7 +376,7 @@ async fn run_mqtt_event_loop(
connected_status.store(false, Ordering::SeqCst);
error!(target_id = %target_id, error = %e, "Error from MQTT event loop poll");
if matches!(e, rumqttc::ConnectionError::NetworkTimeout) && (!initial_connection_established || !connected_status.load(Ordering::SeqCst)) {
if matches!(e, ConnectionError::NetworkTimeout) && (!initial_connection_established || !connected_status.load(Ordering::SeqCst)) {
warn!(target_id = %target_id, "Timeout during initial poll or pending state, will retry.");
continue;
}
@@ -395,8 +395,8 @@ async fn run_mqtt_event_loop(
error!(target_id = %target_id, error = %e, "Fatal MQTT error, terminating event loop.");
break;
}
// rumqttc's eventloop.poll() may return Err and terminate after some errors,
// Or it will handle reconnection internally. The continue here will make select! wait again.
// rumqttc's eventloop.poll() may return Err and terminate after some errors,
// Or it will handle reconnection internally. To continue here will make select! wait again.
// If the error is temporary and rumqttc is handling reconnection, poll() should eventually succeed or return a different error again.
// Sleep briefly to avoid busy cycles in case of rapid failure.
tokio::time::sleep(Duration::from_secs(1)).await;

View File

@@ -29,14 +29,16 @@ base64-simd = { workspace = true, optional = true }
blake3 = { workspace = true, optional = true }
brotli = { workspace = true, optional = true }
bytes = { workspace = true, optional = true }
crc32fast = { workspace = true }
crc32fast = { workspace = true, optional = true }
flate2 = { workspace = true, optional = true }
futures = { workspace = true, optional = true }
hashbrown = { workspace = true, optional = true }
hex-simd = { workspace = true, optional = true }
highway = { workspace = true, optional = true }
hickory-resolver = { workspace = true, optional = true }
hmac = { workspace = true, optional = true }
hyper = { workspace = true, optional = true }
libc = { workspace = true, optional = true }
local-ip-address = { workspace = true, optional = true }
lz4 = { workspace = true, optional = true }
md-5 = { workspace = true, optional = true }
@@ -53,7 +55,7 @@ s3s = { workspace = true, optional = true }
serde = { workspace = true, optional = true }
sha1 = { workspace = true, optional = true }
sha2 = { workspace = true, optional = true }
convert_case = "0.8.0"
convert_case = { workspace = true, optional = true }
siphasher = { workspace = true, optional = true }
snap = { workspace = true, optional = true }
sysinfo = { workspace = true, optional = true }
@@ -83,13 +85,13 @@ tls = ["dep:rustls", "dep:rustls-pemfile", "dep:rustls-pki-types"] # tls charac
net = ["ip", "dep:url", "dep:netif", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:hickory-resolver", "dep:moka", "dep:thiserror", "dep:tokio"] # network features with DNS resolver
io = ["dep:tokio"]
path = []
notify = ["dep:hyper", "dep:s3s"] # file system notification features
notify = ["dep:hyper", "dep:s3s", "dep:hashbrown", "dep:thiserror", "dep:serde", "dep:libc"] # file system notification features
compress = ["dep:flate2", "dep:brotli", "dep:snap", "dep:lz4", "dep:zstd"]
string = ["dep:regex", "dep:rand"]
crypto = ["dep:base64-simd", "dep:hex-simd", "dep:hmac", "dep:hyper", "dep:sha1"]
hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde", "dep:siphasher", "dep:hex-simd", "dep:base64-simd"]
hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde", "dep:siphasher", "dep:hex-simd", "dep:base64-simd", "dep:crc32fast"]
os = ["dep:nix", "dep:tempfile", "winapi"] # operating system utilities
integration = [] # integration test features
sys = ["dep:sysinfo"] # system information features
http = []
full = ["ip", "tls", "net", "io", "hash", "os", "integration", "path", "crypto", "string", "compress", "sys", "notify","http"] # all features
http = ["dep:convert_case"]
full = ["ip", "tls", "net", "io", "hash", "os", "integration", "path", "crypto", "string", "compress", "sys", "notify", "http"] # all features

View File

@@ -420,7 +420,6 @@ mod tests {
}
#[tokio::test]
#[ignore]
async fn test_invalid_domain_resolution() {
let resolver = LayeredDnsResolver::new().await.unwrap();

View File

@@ -16,14 +16,16 @@ use bytes::Bytes;
use futures::pin_mut;
use futures::{Stream, StreamExt};
use std::io::Error;
use std::net::Ipv6Addr;
use std::sync::{LazyLock, Mutex};
use std::{
collections::{HashMap, HashSet},
fmt::Display,
net::{IpAddr, SocketAddr, TcpListener, ToSocketAddrs},
time::{Duration, Instant},
};
use std::{
net::Ipv6Addr,
sync::{Arc, LazyLock, Mutex, RwLock},
};
use tracing::{error, info};
use transform_stream::AsyncTryStream;
use url::{Host, Url};
@@ -51,6 +53,41 @@ impl DnsCacheEntry {
static DNS_CACHE: LazyLock<Mutex<HashMap<String, DnsCacheEntry>>> = LazyLock::new(|| Mutex::new(HashMap::new()));
const DNS_CACHE_TTL: Duration = Duration::from_secs(300); // 5 minutes
type DynDnsResolver = dyn Fn(&str) -> std::io::Result<HashSet<IpAddr>> + Send + Sync + 'static;
static CUSTOM_DNS_RESOLVER: LazyLock<RwLock<Option<Arc<DynDnsResolver>>>> = LazyLock::new(|| RwLock::new(None));
fn resolve_domain(domain: &str) -> std::io::Result<HashSet<IpAddr>> {
if let Some(resolver) = CUSTOM_DNS_RESOLVER.read().unwrap().clone() {
return resolver(domain);
}
(domain, 0)
.to_socket_addrs()
.map(|v| v.map(|v| v.ip()).collect::<HashSet<_>>())
.map_err(Error::other)
}
#[cfg(test)]
fn clear_dns_cache() {
if let Ok(mut cache) = DNS_CACHE.lock() {
cache.clear();
}
}
#[cfg(test)]
pub fn set_mock_dns_resolver<F>(resolver: F)
where
F: Fn(&str) -> std::io::Result<HashSet<IpAddr>> + Send + Sync + 'static,
{
*CUSTOM_DNS_RESOLVER.write().unwrap() = Some(Arc::new(resolver));
clear_dns_cache();
}
#[cfg(test)]
pub fn reset_dns_resolver() {
*CUSTOM_DNS_RESOLVER.write().unwrap() = None;
clear_dns_cache();
}
/// helper for validating if the provided arg is an ip address.
pub fn is_socket_addr(addr: &str) -> bool {
@@ -93,10 +130,7 @@ pub fn is_local_host(host: Host<&str>, port: u16, local_port: u16) -> std::io::R
let local_set: HashSet<IpAddr> = LOCAL_IPS.iter().copied().collect();
let is_local_host = match host {
Host::Domain(domain) => {
let ips = match (domain, 0).to_socket_addrs().map(|v| v.map(|v| v.ip()).collect::<Vec<_>>()) {
Ok(ips) => ips,
Err(err) => return Err(Error::other(err)),
};
let ips = resolve_domain(domain)?.into_iter().collect::<Vec<_>>();
ips.iter().any(|ip| local_set.contains(ip))
}
@@ -130,30 +164,31 @@ pub async fn get_host_ip(host: Host<&str>) -> std::io::Result<HashSet<IpAddr>> {
// }
// }
// Check cache first
if let Ok(mut cache) = DNS_CACHE.lock() {
if let Some(entry) = cache.get(domain) {
if !entry.is_expired(DNS_CACHE_TTL) {
return Ok(entry.ips.clone());
if CUSTOM_DNS_RESOLVER.read().unwrap().is_none() {
if let Ok(mut cache) = DNS_CACHE.lock() {
if let Some(entry) = cache.get(domain) {
if !entry.is_expired(DNS_CACHE_TTL) {
return Ok(entry.ips.clone());
}
// Remove expired entry
cache.remove(domain);
}
// Remove expired entry
cache.remove(domain);
}
}
info!("Cache miss for domain {domain}, querying system resolver.");
// Fallback to standard resolution when DNS resolver is not available
match (domain, 0)
.to_socket_addrs()
.map(|v| v.map(|v| v.ip()).collect::<HashSet<_>>())
{
match resolve_domain(domain) {
Ok(ips) => {
// Cache the result
if let Ok(mut cache) = DNS_CACHE.lock() {
cache.insert(domain.to_string(), DnsCacheEntry::new(ips.clone()));
// Limit cache size to prevent memory bloat
if cache.len() > 1000 {
cache.retain(|_, v| !v.is_expired(DNS_CACHE_TTL));
if CUSTOM_DNS_RESOLVER.read().unwrap().is_none() {
// Cache the result
if let Ok(mut cache) = DNS_CACHE.lock() {
cache.insert(domain.to_string(), DnsCacheEntry::new(ips.clone()));
// Limit cache size to prevent memory bloat
if cache.len() > 1000 {
cache.retain(|_, v| !v.is_expired(DNS_CACHE_TTL));
}
}
}
info!("System query for domain {domain}: {:?}", ips);
@@ -292,6 +327,21 @@ mod test {
use super::*;
use crate::init_global_dns_resolver;
use std::net::{Ipv4Addr, Ipv6Addr};
use std::{collections::HashSet, io::Error as IoError};
fn mock_resolver(domain: &str) -> std::io::Result<HashSet<IpAddr>> {
match domain {
"localhost" => Ok([
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)),
]
.into_iter()
.collect()),
"example.org" => Ok([IpAddr::V4(Ipv4Addr::new(192, 0, 2, 10))].into_iter().collect()),
"invalid.nonexistent.domain.example" => Err(IoError::other("mock DNS failure")),
_ => Ok(HashSet::new()),
}
}
#[test]
fn test_is_socket_addr() {
@@ -349,7 +399,7 @@ mod test {
let invalid_cases = [
("localhost", "invalid socket address"),
("", "invalid socket address"),
("example.org:54321", "host in server address should be this server"),
("203.0.113.1:54321", "host in server address should be this server"),
("8.8.8.8:53", "host in server address should be this server"),
(":-10", "invalid port value"),
("invalid:port", "invalid port value"),
@@ -369,6 +419,8 @@ mod test {
#[test]
fn test_is_local_host() {
set_mock_dns_resolver(mock_resolver);
// Test localhost domain
let localhost_host = Host::Domain("localhost");
assert!(is_local_host(localhost_host, 0, 0).unwrap());
@@ -393,10 +445,13 @@ mod test {
// Test invalid domain should return error
let invalid_host = Host::Domain("invalid.nonexistent.domain.example");
assert!(is_local_host(invalid_host, 0, 0).is_err());
reset_dns_resolver();
}
#[tokio::test]
async fn test_get_host_ip() {
set_mock_dns_resolver(mock_resolver);
match init_global_dns_resolver().await {
Ok(_) => {}
Err(e) => {
@@ -427,16 +482,9 @@ mod test {
// Test invalid domain
let invalid_host = Host::Domain("invalid.nonexistent.domain.example");
match get_host_ip(invalid_host.clone()).await {
Ok(ips) => {
// Depending on DNS resolver behavior, it might return empty set or error
assert!(ips.is_empty(), "Expected empty IP set for invalid domain, got: {ips:?}");
}
Err(_) => {
error!("Expected error for invalid domain");
} // Expected error
}
assert!(get_host_ip(invalid_host).await.is_err());
reset_dns_resolver();
}
#[test]

View File

@@ -12,9 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod net;
use hashbrown::HashMap;
use hyper::HeaderMap;
use s3s::{S3Request, S3Response};
use std::collections::HashMap;
pub use net::*;
/// Extract request parameters from S3Request, mainly header information.
#[allow(dead_code)]

View File

@@ -0,0 +1,533 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use regex::Regex;
use serde::{Deserialize, Serialize};
use std::net::IpAddr;
use std::path::Path;
use std::sync::LazyLock;
use thiserror::Error;
use url::Url;
// Lazy static for the host label regex.
static HOST_LABEL_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"^[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?$").unwrap());
/// NetError represents errors that can occur in network operations.
#[derive(Error, Debug)]
pub enum NetError {
#[error("invalid argument")]
InvalidArgument,
#[error("invalid hostname")]
InvalidHost,
#[error("missing '[' in host")]
MissingBracket,
#[error("parse error: {0}")]
ParseError(String),
#[error("unexpected scheme: {0}")]
UnexpectedScheme(String),
#[error("scheme appears with empty host")]
SchemeWithEmptyHost,
}
// Host represents a network host with IP/name and port.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Host {
pub name: String,
pub port: Option<u16>, // Using Option<u16> to represent if port is set, similar to IsPortSet.
}
// Implementation of Host methods.
impl Host {
// is_empty returns true if the host name is empty.
pub fn is_empty(&self) -> bool {
self.name.is_empty()
}
// equal checks if two hosts are equal by comparing their string representations.
pub fn equal(&self, other: &Host) -> bool {
self.to_string() == other.to_string()
}
}
impl std::fmt::Display for Host {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.port {
Some(p) => write!(f, "{}:{}", self.name, p),
None => write!(f, "{}", self.name),
}
}
}
// parse_host parses a string into a Host, with validation similar to Go's ParseHost.
pub fn parse_host(s: &str) -> Result<Host, NetError> {
if s.is_empty() {
return Err(NetError::InvalidArgument);
}
// is_valid_host validates the host string, checking for IP or hostname validity.
let is_valid_host = |host: &str| -> bool {
if host.is_empty() {
return true;
}
if host.parse::<IpAddr>().is_ok() {
return true;
}
if !(1..=253).contains(&host.len()) {
return false;
}
for (i, label) in host.split('.').enumerate() {
if i + 1 == host.split('.').count() && label.is_empty() {
continue;
}
if !(1..=63).contains(&label.len()) || !HOST_LABEL_REGEX.is_match(label) {
return false;
}
}
true
};
// Split host and port, similar to net.SplitHostPort.
let (host_str, port_str) = s.rsplit_once(':').map_or((s, ""), |(h, p)| (h, p));
let port = if !port_str.is_empty() {
Some(port_str.parse().map_err(|_| NetError::ParseError(port_str.to_string()))?)
} else {
None
};
// Trim IPv6 brackets if present.
let host = trim_ipv6(host_str)?;
// Handle IPv6 zone identifier.
let trimmed_host = host.split('%').next().unwrap_or(&host);
if !is_valid_host(trimmed_host) {
return Err(NetError::InvalidHost);
}
Ok(Host { name: host, port })
}
// trim_ipv6 removes square brackets from IPv6 addresses, similar to Go's trimIPv6.
fn trim_ipv6(host: &str) -> Result<String, NetError> {
if host.ends_with(']') {
if !host.starts_with('[') {
return Err(NetError::MissingBracket);
}
Ok(host[1..host.len() - 1].to_string())
} else {
Ok(host.to_string())
}
}
// URL is a wrapper around url::Url for custom handling.
#[derive(Debug, Clone)]
pub struct ParsedURL(pub Url);
impl ParsedURL {
/// is_empty returns true if the URL is empty or "about:blank".
pub fn is_empty(&self) -> bool {
self.0.as_str() == "" || (self.0.scheme() == "about" && self.0.path() == "blank")
}
/// hostname returns the hostname of the URL.
pub fn hostname(&self) -> String {
self.0.host_str().unwrap_or("").to_string()
}
/// port returns the port of the URL as a string, defaulting to "80" for http and "443" for https if not set.
pub fn port(&self) -> String {
match self.0.port() {
Some(p) => p.to_string(),
None => match self.0.scheme() {
"http" => "80".to_string(),
"https" => "443".to_string(),
_ => "".to_string(),
},
}
}
/// scheme returns the scheme of the URL.
pub fn scheme(&self) -> &str {
self.0.scheme()
}
/// url returns a reference to the underlying Url.
pub fn url(&self) -> &Url {
&self.0
}
}
impl std::fmt::Display for ParsedURL {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut url = self.0.clone();
if let Some(host) = url.host_str().map(|h| h.to_string()) {
if let Some(port) = url.port() {
if (url.scheme() == "http" && port == 80) || (url.scheme() == "https" && port == 443) {
url.set_host(Some(&host)).unwrap();
url.set_port(None).unwrap();
}
}
}
let mut s = url.to_string();
// If the URL ends with a slash and the path is just "/", remove the trailing slash.
if s.ends_with('/') && url.path() == "/" {
s.pop();
}
write!(f, "{}", s)
}
}
impl serde::Serialize for ParsedURL {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> serde::Deserialize<'de> for ParsedURL {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s: String = serde::Deserialize::deserialize(deserializer)?;
if s.is_empty() {
Ok(ParsedURL(Url::parse("about:blank").unwrap()))
} else {
parse_url(&s).map_err(serde::de::Error::custom)
}
}
}
// parse_url parses a string into a ParsedURL, with host validation and path cleaning.
pub fn parse_url(s: &str) -> Result<ParsedURL, NetError> {
if let Some(scheme_end) = s.find("://") {
if s[scheme_end + 3..].starts_with('/') {
let scheme = &s[..scheme_end];
if !scheme.is_empty() {
return Err(NetError::SchemeWithEmptyHost);
}
}
}
let mut uu = Url::parse(s).map_err(|e| NetError::ParseError(e.to_string()))?;
if uu.host_str().is_none_or(|h| h.is_empty()) {
if uu.scheme() != "" {
return Err(NetError::SchemeWithEmptyHost);
}
} else {
let port_str = uu.port().map(|p| p.to_string()).unwrap_or_else(|| match uu.scheme() {
"http" => "80".to_string(),
"https" => "443".to_string(),
_ => "".to_string(),
});
if !port_str.is_empty() {
let host_port = format!("{}:{}", uu.host_str().unwrap(), port_str);
parse_host(&host_port)?; // Validate host.
}
}
// Clean path: Use Url's path_segments to normalize.
if !uu.path().is_empty() {
// Url automatically cleans paths, but we ensure trailing slash if original had it.
let mut cleaned_path = String::new();
for comp in Path::new(uu.path()).components() {
use std::path::Component;
match comp {
Component::RootDir => cleaned_path.push('/'),
Component::Normal(s) => {
if !cleaned_path.ends_with('/') {
cleaned_path.push('/');
}
cleaned_path.push_str(&s.to_string_lossy());
}
_ => {}
}
}
if s.ends_with('/') && !cleaned_path.ends_with('/') {
cleaned_path.push('/');
}
if cleaned_path.is_empty() {
cleaned_path.push('/');
}
uu.set_path(&cleaned_path);
}
Ok(ParsedURL(uu))
}
#[allow(dead_code)]
/// parse_http_url parses a string into a ParsedURL, ensuring the scheme is http or https.
pub fn parse_http_url(s: &str) -> Result<ParsedURL, NetError> {
let u = parse_url(s)?;
match u.0.scheme() {
"http" | "https" => Ok(u),
_ => Err(NetError::UnexpectedScheme(u.0.scheme().to_string())),
}
}
#[allow(dead_code)]
/// is_network_or_host_down checks if an error indicates network or host down, considering timeouts.
pub fn is_network_or_host_down(err: &std::io::Error, expect_timeouts: bool) -> bool {
if err.kind() == std::io::ErrorKind::TimedOut {
return !expect_timeouts;
}
// Simplified checks based on Go logic; adapt for Rust as needed
let err_str = err.to_string().to_lowercase();
err_str.contains("connection reset by peer")
|| err_str.contains("connection timed out")
|| err_str.contains("broken pipe")
|| err_str.contains("use of closed network connection")
}
#[allow(dead_code)]
/// is_conn_reset_err checks if an error indicates a connection reset by peer.
pub fn is_conn_reset_err(err: &std::io::Error) -> bool {
err.to_string().contains("connection reset by peer") || matches!(err.raw_os_error(), Some(libc::ECONNRESET))
}
#[allow(dead_code)]
/// is_conn_refused_err checks if an error indicates a connection refused.
pub fn is_conn_refused_err(err: &std::io::Error) -> bool {
err.to_string().contains("connection refused") || matches!(err.raw_os_error(), Some(libc::ECONNREFUSED))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_host_with_empty_string_returns_error() {
let result = parse_host("");
assert!(matches!(result, Err(NetError::InvalidArgument)));
}
#[test]
fn parse_host_with_valid_ipv4() {
let result = parse_host("192.168.1.1:8080");
assert!(result.is_ok());
let host = result.unwrap();
assert_eq!(host.name, "192.168.1.1");
assert_eq!(host.port, Some(8080));
}
#[test]
fn parse_host_with_valid_hostname() {
let result = parse_host("example.com:443");
assert!(result.is_ok());
let host = result.unwrap();
assert_eq!(host.name, "example.com");
assert_eq!(host.port, Some(443));
}
#[test]
fn parse_host_with_ipv6_brackets() {
let result = parse_host("[::1]:8080");
assert!(result.is_ok());
let host = result.unwrap();
assert_eq!(host.name, "::1");
assert_eq!(host.port, Some(8080));
}
#[test]
fn parse_host_with_invalid_ipv6_missing_bracket() {
let result = parse_host("::1]:8080");
assert!(matches!(result, Err(NetError::MissingBracket)));
}
#[test]
fn parse_host_with_invalid_hostname() {
let result = parse_host("invalid..host:80");
assert!(matches!(result, Err(NetError::InvalidHost)));
}
#[test]
fn parse_host_without_port() {
let result = parse_host("example.com");
assert!(result.is_ok());
let host = result.unwrap();
assert_eq!(host.name, "example.com");
assert_eq!(host.port, None);
}
#[test]
fn host_is_empty_when_name_is_empty() {
let host = Host {
name: "".to_string(),
port: None,
};
assert!(host.is_empty());
}
#[test]
fn host_is_not_empty_when_name_present() {
let host = Host {
name: "example.com".to_string(),
port: Some(80),
};
assert!(!host.is_empty());
}
#[test]
fn host_to_string_with_port() {
let host = Host {
name: "example.com".to_string(),
port: Some(80),
};
assert_eq!(host.to_string(), "example.com:80");
}
#[test]
fn host_to_string_without_port() {
let host = Host {
name: "example.com".to_string(),
port: None,
};
assert_eq!(host.to_string(), "example.com");
}
#[test]
fn host_equal_when_same() {
let host1 = Host {
name: "example.com".to_string(),
port: Some(80),
};
let host2 = Host {
name: "example.com".to_string(),
port: Some(80),
};
assert!(host1.equal(&host2));
}
#[test]
fn host_not_equal_when_different() {
let host1 = Host {
name: "example.com".to_string(),
port: Some(80),
};
let host2 = Host {
name: "example.com".to_string(),
port: Some(443),
};
assert!(!host1.equal(&host2));
}
#[test]
fn parse_url_with_valid_http_url() {
let result = parse_url("http://example.com/path");
assert!(result.is_ok());
let parsed = result.unwrap();
assert_eq!(parsed.hostname(), "example.com");
assert_eq!(parsed.port(), "80");
}
#[test]
fn parse_url_with_valid_https_url() {
let result = parse_url("https://example.com:443/path");
assert!(result.is_ok());
let parsed = result.unwrap();
assert_eq!(parsed.hostname(), "example.com");
assert_eq!(parsed.port(), "443");
}
#[test]
fn parse_url_with_scheme_but_empty_host() {
let result = parse_url("http:///path");
assert!(matches!(result, Err(NetError::SchemeWithEmptyHost)));
}
#[test]
fn parse_url_with_invalid_host() {
let result = parse_url("http://invalid..host/path");
assert!(matches!(result, Err(NetError::InvalidHost)));
}
#[test]
fn parse_url_with_path_cleaning() {
let result = parse_url("http://example.com//path/../path/");
assert!(result.is_ok());
let parsed = result.unwrap();
assert_eq!(parsed.0.path(), "/path/");
}
#[test]
fn parse_http_url_with_http_scheme() {
let result = parse_http_url("http://example.com");
assert!(result.is_ok());
}
#[test]
fn parse_http_url_with_https_scheme() {
let result = parse_http_url("https://example.com");
assert!(result.is_ok());
}
#[test]
fn parse_http_url_with_invalid_scheme() {
let result = parse_http_url("ftp://example.com");
assert!(matches!(result, Err(NetError::UnexpectedScheme(_))));
}
#[test]
fn parsed_url_is_empty_when_url_is_empty() {
let url = ParsedURL(Url::parse("about:blank").unwrap());
assert!(url.is_empty());
}
#[test]
fn parsed_url_hostname() {
let url = ParsedURL(Url::parse("http://example.com:8080").unwrap());
assert_eq!(url.hostname(), "example.com");
}
#[test]
fn parsed_url_port() {
let url = ParsedURL(Url::parse("http://example.com:8080").unwrap());
assert_eq!(url.port(), "8080");
}
#[test]
fn parsed_url_to_string_removes_default_ports() {
let url = ParsedURL(Url::parse("http://example.com:80").unwrap());
assert_eq!(url.to_string(), "http://example.com");
}
#[test]
fn is_network_or_host_down_with_timeout() {
let err = std::io::Error::new(std::io::ErrorKind::TimedOut, "timeout");
assert!(is_network_or_host_down(&err, false));
}
#[test]
fn is_network_or_host_down_with_expected_timeout() {
let err = std::io::Error::new(std::io::ErrorKind::TimedOut, "timeout");
assert!(!is_network_or_host_down(&err, true));
}
#[test]
fn is_conn_reset_err_with_reset_message() {
let err = std::io::Error::other("connection reset by peer");
assert!(is_conn_reset_err(&err));
}
#[test]
fn is_conn_refused_err_with_refused_message() {
let err = std::io::Error::other("connection refused");
assert!(is_conn_refused_err(&err));
}
}

0
deploy/data/dev/.gitkeep Normal file
View File

0
deploy/data/pro/.gitkeep Normal file
View File

View File

@@ -30,7 +30,7 @@ services:
- "9000:9000" # S3 API port
- "9001:9001" # Console port
environment:
- RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3
- RUSTFS_VOLUMES=/data/rustfs{0..3} # Define 4 storage volumes
- RUSTFS_ADDRESS=0.0.0.0:9000
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
- RUSTFS_CONSOLE_ENABLE=true
@@ -43,12 +43,9 @@ services:
- RUSTFS_TLS_PATH=/opt/tls
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
volumes:
- rustfs_data_0:/data/rustfs0
- rustfs_data_1:/data/rustfs1
- rustfs_data_2:/data/rustfs2
- rustfs_data_3:/data/rustfs3
- logs_data:/app/logs
- .docker/tls/:/opt/tls # TLS configuration, you should create tls directory and put your tls files in it and then specify the path here
- deploy/data/pro:/data
- deploy/logs:/app/logs
- deploy/data/certs/:/opt/tls # TLS configuration, you should create tls directory and put your tls files in it and then specify the path here
networks:
- rustfs-network
restart: unless-stopped
@@ -78,7 +75,7 @@ services:
- "9010:9000" # S3 API port
- "9011:9001" # Console port
environment:
- RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1
- RUSTFS_VOLUMES=/data/rustfs{1..4}
- RUSTFS_ADDRESS=0.0.0.0:9000
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
- RUSTFS_CONSOLE_ENABLE=true
@@ -90,7 +87,7 @@ services:
- RUSTFS_LOG_LEVEL=debug
volumes:
- .:/app # Mount source code to /app for development
- rustfs_dev_data:/data
- deploy/data/dev:/data
networks:
- rustfs-network
restart: unless-stopped
@@ -98,7 +95,7 @@ services:
test:
[
"CMD",
"sh", "-c",
"sh", "-c",
"curl -f http://localhost:9000/health && curl -f http://localhost:9001/health"
]
interval: 30s
@@ -239,5 +236,5 @@ volumes:
driver: local
redis_data:
driver: local
logs_data:
logs:
driver: local

View File

@@ -1,147 +0,0 @@
# Append Write Design
This document captures the current design of the append-write feature in RustFS so that new contributors can quickly understand the moving parts, data flows, and testing expectations.
## Goals & Non-Goals
### Goals
- Allow clients to append payloads to existing objects without re-uploading the full body.
- Support inline objects and spill seamlessly into segmented layout once thresholds are exceeded.
- Preserve strong read-after-write semantics via optimistic concurrency controls (ETag / epoch).
- Expose minimal S3-compatible surface area (`x-amz-object-append`, `x-amz-append-position`, `x-amz-append-action`).
### Non-Goals
- Full multipart-upload parity; append is intentionally simpler and serialized per object.
- Cross-object transactions; each object is isolated.
- Rebalancing or background compaction (future work).
## State Machine
Append state is persisted inside `FileInfo.metadata` under `x-rustfs-internal-append-state` and serialized as `AppendState` (`crates/filemeta/src/append.rs`).
```
Disabled --(initial PUT w/o append)--> SegmentedSealed
Inline --(inline append)--> Inline / InlinePendingSpill
InlinePendingSpill --(spill success)--> SegmentedActive
SegmentedActive --(Complete)--> SegmentedSealed
SegmentedActive --(Abort)--> SegmentedSealed
SegmentedSealed --(new append)--> SegmentedActive
```
Definitions:
- **Inline**: Object data fully stored in metadata (`FileInfo.data`).
- **InlinePendingSpill**: Inline data after append exceeded inline threshold; awaiting spill to disk.
- **SegmentedActive**: Object data lives in erasure-coded part(s) plus one or more pending append segments on disk (`append/<epoch>/<uuid>`).
- **SegmentedSealed**: No pending segments; logical content equals committed parts.
`AppendState` fields:
- `state`: current state enum (see above).
- `epoch`: monotonically increasing counter for concurrency control.
- `committed_length`: logical size already durable in the base parts/inline region.
- `pending_segments`: ordered list of `AppendSegment { offset, length, data_dir, etag, epoch }`.
## Metadata & Storage Layout
### Inline Objects
- Inline payload stored in `FileInfo.data`.
- Hash metadata maintained through `append_inline_data` (re-encoding with bitrot writer when checksums exist).
- When spilling is required, inline data is decoded, appended, and re-encoded into erasure shards written to per-disk `append/<epoch>/<segment_id>/part.1` temporary path before rename to primary data directory.
### Segmented Objects
- Base object content is represented by standard erasure-coded parts (`FileInfo.parts`, `FileInfo.data_dir`).
- Pending append segments live under `<object>/append/<epoch>/<segment_uuid>/part.1` (per disk).
- Each append stores segment metadata (`etag`, `offset`, `length`) inside `AppendState.pending_segments` and updates `FileInfo.size` to include pending bytes.
- Aggregate ETag is recomputed using multipart MD5 helper (`get_complete_multipart_md5`).
### Metadata Writes
- `SetDisks::write_unique_file_info` persists `FileInfo` updates to the quorum of disks.
- During spill/append/complete/abort, all mirrored `FileInfo` copies within `parts_metadata` are updated to keep nodes consistent.
- Abort ensures inline markers are cleared (`x-rustfs-internal-inline-data`) and `FileInfo.data = None` to avoid stale inline reads.
## Request Flows
### Append (Inline Path)
1. Handler (`rustfs/src/storage/ecfs.rs`) validates headers and fills `ObjectOptions.append_*`.
2. `SetDisks::append_inline_object` verifies append position using `AppendState` snapshot.
3. Existing inline payload decoded (if checksums present) and appended in-memory (`append_inline_data`).
4. Storage class decision determines whether to remain inline or spill.
5. Inline success updates `FileInfo.data`, metadata, `AppendState` (state `Inline`, lengths updated).
6. Spill path delegates to `spill_inline_into_segmented` (see segmented path below).
### Append (Segmented Path)
1. `SetDisks::append_segmented_object` validates state (must be `SegmentedActive` or `SegmentedSealed`).
2. Snapshot expected offset = committed length + sum of pending segments.
3. Payload encoded using erasure coding; shards written to temp volume; renamed into `append/<epoch>/<segment_uuid>` under object data directory.
4. New `AppendSegment` pushed, `AppendState.epoch` incremented, aggregated ETag recalculated.
5. `FileInfo.size` reflects committed + pending bytes; metadata persisted across quorum.
### GET / Range Reads
1. `SetDisks::get_object_with_fileinfo` inspects `AppendState`.
2. Reads committed data from inline or erasure parts (ignoring inline buffers once segmented).
3. If requested range includes pending segments, loader fetches each segment via `load_pending_segment`, decodes shards, and streams appended bytes.
### Complete Append (`x-amz-append-action: complete`)
1. `complete_append_object` fetches current `FileInfo`, ensures pending segments exist.
2. Entire logical object (committed + pending) streamed through `VecAsyncWriter` (TODO: potential optimization) to produce contiguous payload.
3. Inline spill routine (`spill_inline_into_segmented`) consolidates data into primary part, sets state `SegmentedSealed`, clears pending list, updates `committed_length`.
4. Pending segment directories removed and quorum metadata persisted.
### Abort Append (`x-amz-append-action: abort`)
1. `abort_append_object` removes pending segment directories.
2. Ensures `committed_length` matches actual durable data (inline length or sum of parts); logs and corrects if mismatch is found.
3. Clears pending list, sets state `SegmentedSealed`, bumps epoch, removes inline markers/data.
4. Persists metadata and returns base ETag (multipart MD5 of committed parts).
## Error Handling & Recovery
- All disk writes go through quorum helpers (`reduce_write_quorum_errs`, `reduce_read_quorum_errs`) and propagate `StorageError` variants for HTTP mapping.
- Append operations are single-threaded per object via locking in higher layers (`fast_lock_manager` in `SetDisks::put_object`).
- On spill/append rename failure, temp directories are cleaned up; operation aborts without mutating metadata.
- Abort path now realigns `committed_length` if metadata drifted (observed during development) and strips inline remnants to prevent stale reads.
- Pending segments are only removed once metadata update succeeds; no partial deletion is performed ahead of state persistence.
## Concurrency
- Append requests rely on exact `x-amz-append-position` to ensure the client has an up-to-date view.
- Optional header `If-Match` is honored in S3 handler before actual append (shared with regular PUT path).
- `AppendState.epoch` increments after each append/complete/abort; future work may expose it for stronger optimistic control.
- e2e test `append_segments_concurrency_then_complete` verifies that simultaneous appends result in exactly one success; the loser receives 400.
## Key Modules
- `crates/ecstore/src/set_disk.rs`: core implementation (inline append, spill, segmented append, complete, abort, GET integration).
- `crates/ecstore/src/erasure_coding/{encode,decode}.rs`: encode/decode helpers used by append pipeline.
- `crates/filemeta/src/append.rs`: metadata schema + helper functions.
- `rustfs/src/storage/ecfs.rs`: HTTP/S3 layer that parses headers and routes to append operations.
## Testing Strategy
### Unit Tests
- `crates/filemeta/src/append.rs` covers serialization and state transitions.
- `crates/ecstore/src/set_disk.rs` contains lower-level utilities and regression tests for metadata helpers.
- Additional unit coverage is recommended for spill/append failure paths (e.g., injected rename failures).
### End-to-End Tests (`cargo test --package e2e_test append`)
- Inline append success, wrong position, precondition failures.
- Segmented append success, wrong position, wrong ETag.
- Spill threshold transition (`append_threshold_crossing_inline_to_segmented`).
- Pending segment streaming (`append_range_requests_across_segments`).
- Complete append consolidates pending segments.
- Abort append discards pending data and allows new append.
- Concurrency: two clients racing to append, followed by additional append + complete.
### Tooling Considerations
- `make clippy` must pass; the append code relies on async operations and custom logging.
- `make test` / `cargo nextest run` recommended before submitting PRs.
- Use `RUST_LOG=rustfs_ecstore=debug` when debugging append flows; targeted `info!`/`warn!` logs are emitted during spill/abort.
## Future Work
- Streamed consolidation in `complete_append_object` to avoid buffering entire logical object.
- Throttling or automatic `Complete` when pending segments exceed size/quantity thresholds.
- Stronger epoch exposure to clients (header-based conflict detection).
- Automated cleanup or garbage collection for orphaned `append/*` directories.
---
For questions or design discussions, drop a note in the append-write channel or ping the storage team.

60
docs/examples/README.md Normal file
View File

@@ -0,0 +1,60 @@
# RustFS Deployment Examples
This directory contains practical deployment examples and configurations for RustFS.
## Available Examples
### [MNMD (Multi-Node Multi-Drive)](./mnmd/)
Complete Docker Compose example for deploying RustFS in a 4-node, 4-drive-per-node configuration.
**Features:**
- Proper disk indexing (1..4) to avoid VolumeNotFound errors
- Startup coordination via `wait-and-start.sh` script
- Service discovery using Docker service names
- Health checks with alternatives for different base images
- Comprehensive documentation and verification checklist
**Use Case:** Production-ready multi-node deployment for high availability and performance.
**Quick Start:**
```bash
cd docs/examples/mnmd
docker-compose up -d
```
**See also:**
- [MNMD README](./mnmd/README.md) - Detailed usage guide
- [MNMD CHECKLIST](./mnmd/CHECKLIST.md) - Step-by-step verification
## Other Deployment Examples
For additional deployment examples, see:
- [`examples/`](/examples/) - Root-level examples directory with:
- `docker-quickstart.sh` - Quick start script for basic deployments
- `enhanced-docker-deployment.sh` - Advanced deployment scenarios
- `docker-comprehensive.yml` - Docker Compose with multiple profiles
- [`.docker/compose/`](/.docker/compose/) - Docker Compose configurations:
- `docker-compose.cluster.yaml` - Basic cluster setup
- `docker-compose.observability.yaml` - Observability stack integration
## Related Documentation
- [Console & Endpoint Service Separation](../console-separation.md)
- [Environment Variables](../ENVIRONMENT_VARIABLES.md)
- [Performance Testing](../PERFORMANCE_TESTING.md)
## Contributing
When adding new examples:
1. Create a dedicated subdirectory under `docs/examples/`
2. Include a comprehensive README.md
3. Provide working configuration files
4. Add verification steps or checklists
5. Document common issues and troubleshooting
## Support
For issues or questions:
- GitHub Issues: https://github.com/rustfs/rustfs/issues
- Documentation: https://rustfs.io

View File

@@ -0,0 +1,329 @@
# MNMD Deployment Checklist
This checklist provides step-by-step verification for deploying RustFS in MNMD (Multi-Node Multi-Drive) mode using
Docker.
## Pre-Deployment Checks
### 1. System Requirements
- [ ] Docker Engine 20.10+ installed
- [ ] Docker Compose 2.0+ installed
- [ ] At least 8GB RAM available
- [ ] At least 40GB disk space available (for 4 nodes × 4 volumes)
Verify with:
```bash
docker --version
docker-compose --version
free -h
df -h
```
### 2. File System Checks
- [ ] Using XFS, ext4, or another suitable filesystem (not NFS for production)
- [ ] File system supports extended attributes
Verify with:
```bash
df -T | grep -E '(xfs|ext4)'
```
### 3. Permissions and SELinux
- [ ] Current user is in `docker` group or can run `sudo docker`
- [ ] SELinux is properly configured (if enabled)
Verify with:
```bash
groups | grep docker
getenforce # If enabled, should show "Permissive" or "Enforcing" with proper policies
```
### 4. Network Configuration
- [ ] Ports 9000-9031 are available
- [ ] No firewall blocking Docker bridge network
Verify with:
```bash
# Check if ports are free
netstat -tuln | grep -E ':(9000|9001|9010|9011|9020|9021|9030|9031)'
# Should return nothing if ports are free
```
### 5. Files Present
- [ ] `docker-compose.yml` exists in current directory
Verify with:
```bash
cd docs/examples/mnmd
ls -la
chmod +x wait-and-start.sh # If needed
```
## Deployment Steps
### 1. Start the Cluster
- [ ] Navigate to the example directory
- [ ] Pull the latest RustFS image
- [ ] Start the cluster
```bash
cd docs/examples/mnmd
docker-compose pull
docker-compose up -d
```
### 2. Monitor Startup
- [ ] Watch container logs during startup
- [ ] Verify no VolumeNotFound errors
- [ ] Check that peer discovery completes
```bash
# Watch all logs
docker-compose logs -f
# Watch specific node
docker-compose logs -f rustfs-node1
# Look for successful startup messages
docker-compose logs | grep -i "ready\|listening\|started"
```
### 3. Verify Container Status
- [ ] All 4 containers are running
- [ ] All 4 containers show as healthy
```bash
docker-compose ps
# Expected output: 4 containers in "Up" state with "healthy" status
```
### 4. Check Health Endpoints
- [ ] API health endpoints respond on all nodes
- [ ] Console health endpoints respond on all nodes
```bash
# Test API endpoints
curl http://localhost:9000/health
curl http://localhost:9010/health
curl http://localhost:9020/health
curl http://localhost:9030/health
# Test Console endpoints
curl http://localhost:9001/health
curl http://localhost:9011/health
curl http://localhost:9021/health
curl http://localhost:9031/health
# All should return successful health status
```
## Post-Deployment Verification
### 1. In-Container Checks
- [ ] Data directories exist
- [ ] Directories have correct permissions
- [ ] RustFS process is running
```bash
# Check node1
docker exec rustfs-node1 ls -la /data/
docker exec rustfs-node1 ps aux | grep rustfs
# Verify all 4 data directories exist
docker exec rustfs-node1 ls -d /data/rustfs{1..4}
```
### 2. DNS and Network Validation
- [ ] Service names resolve correctly
- [ ] Inter-node connectivity works
```bash
# DNS resolution test
docker exec rustfs-node1 nslookup rustfs-node2
docker exec rustfs-node1 nslookup rustfs-node3
docker exec rustfs-node1 nslookup rustfs-node4
# Connectivity test (using nc if available)
docker exec rustfs-node1 nc -zv rustfs-node2 9000
docker exec rustfs-node1 nc -zv rustfs-node3 9000
docker exec rustfs-node1 nc -zv rustfs-node4 9000
# Or using telnet/curl
docker exec rustfs-node1 curl -v http://rustfs-node2:9000/health
```
### 3. Volume Configuration Validation
- [ ] RUSTFS_VOLUMES environment variable is correct
- [ ] All 16 endpoints are configured (4 nodes × 4 drives)
```bash
# Check environment variable
docker exec rustfs-node1 env | grep RUSTFS_VOLUMES
# Expected output:
# RUSTFS_VOLUMES=http://rustfs-node{1...4}:9000/data/rustfs{1...4}
```
### 4. Cluster Functionality
- [ ] Can list buckets via API
- [ ] Can create a bucket
- [ ] Can upload an object
- [ ] Can download an object
```bash
# Configure AWS CLI or s3cmd
export AWS_ACCESS_KEY_ID=rustfsadmin
export AWS_SECRET_ACCESS_KEY=rustfsadmin
# Using AWS CLI (if installed)
aws --endpoint-url http://localhost:9000 s3 mb s3://test-bucket
aws --endpoint-url http://localhost:9000 s3 ls
echo "test content" > test.txt
aws --endpoint-url http://localhost:9000 s3 cp test.txt s3://test-bucket/
aws --endpoint-url http://localhost:9000 s3 ls s3://test-bucket/
aws --endpoint-url http://localhost:9000 s3 cp s3://test-bucket/test.txt downloaded.txt
cat downloaded.txt
# Or using curl
curl -X PUT http://localhost:9000/test-bucket \
-H "Host: localhost:9000" \
--user rustfsadmin:rustfsadmin
```
### 5. Healthcheck Verification
- [ ] Docker reports all services as healthy
- [ ] Healthcheck scripts work in containers
```bash
# Check Docker health status
docker inspect rustfs-node1 --format='{{.State.Health.Status}}'
docker inspect rustfs-node2 --format='{{.State.Health.Status}}'
docker inspect rustfs-node3 --format='{{.State.Health.Status}}'
docker inspect rustfs-node4 --format='{{.State.Health.Status}}'
# All should return "healthy"
# Test healthcheck command manually
docker exec rustfs-node1 nc -z localhost 9000
echo $? # Should be 0
```
## Troubleshooting Checks
### If VolumeNotFound Error Occurs
- [ ] Verify volume indexing starts at 1, not 0
- [ ] Check that RUSTFS_VOLUMES matches mounted paths
- [ ] Ensure all /data/rustfs{1..4} directories exist
```bash
# Check mounted volumes
docker inspect rustfs-node1 | jq '.[].Mounts'
# Verify directories in container
docker exec rustfs-node1 ls -la /data/
```
### If Healthcheck Fails
- [ ] Check if `nc` is available in the image
- [ ] Try alternative healthcheck (curl/wget)
- [ ] Increase `start_period` in docker-compose.yml
```bash
# Check if nc is available
docker exec rustfs-node1 which nc
# Test healthcheck manually
docker exec rustfs-node1 nc -z localhost 9000
# Check logs for errors
docker-compose logs rustfs-node1 | grep -i error
```
### If Startup Takes Too Long
- [ ] Check peer discovery timeout in logs
- [ ] Verify network connectivity between nodes
- [ ] Consider increasing timeout in wait-and-start.sh
```bash
# Check startup logs
docker-compose logs rustfs-node1 | grep -i "waiting\|peer\|timeout"
# Check network
docker network inspect mnmd_rustfs-mnmd
```
### If Containers Crash or Restart
- [ ] Review container logs
- [ ] Check resource usage (CPU/Memory)
- [ ] Verify no port conflicts
```bash
# View last crash logs
docker-compose logs --tail=100 rustfs-node1
# Check resource usage
docker stats --no-stream
# Check restart count
docker-compose ps
```
## Cleanup Checklist
When done testing:
- [ ] Stop the cluster: `docker-compose down`
- [ ] Remove volumes (optional, destroys data): `docker-compose down -v`
- [ ] Clean up dangling images: `docker image prune`
- [ ] Verify ports are released: `netstat -tuln | grep -E ':(9000|9001|9010|9011|9020|9021|9030|9031)'`
## Production Deployment Additional Checks
Before deploying to production:
- [ ] Change default credentials (RUSTFS_ACCESS_KEY, RUSTFS_SECRET_KEY)
- [ ] Configure TLS certificates
- [ ] Set up proper logging and monitoring
- [ ] Configure backups for volumes
- [ ] Review and adjust resource limits
- [ ] Set up external load balancer (if needed)
- [ ] Document disaster recovery procedures
- [ ] Test failover scenarios
- [ ] Verify data persistence after container restart
## Summary
This checklist ensures:
- ✓ Correct disk indexing (1..4 instead of 0..3)
- ✓ Proper startup coordination via wait-and-start.sh
- ✓ Service discovery via Docker service names
- ✓ Health checks function correctly
- ✓ All 16 endpoints (4 nodes × 4 drives) are operational
- ✓ No VolumeNotFound errors occur
For more details, see [README.md](./README.md) in this directory.

View File

@@ -0,0 +1,268 @@
# RustFS MNMD (Multi-Node Multi-Drive) Docker Example
This directory contains a complete, ready-to-use MNMD deployment example for RustFS with 4 nodes and 4 drives per node (
4x4 configuration).
## Overview
This example addresses common deployment issues including:
- **VolumeNotFound errors** - Fixed by using correct disk indexing (`/data/rustfs{1...4}` instead of
`/data/rustfs{0...3}`)
- **Startup race conditions** - Solved with a simple `sleep` command in each service.
- **Service discovery** - Uses Docker service names (`rustfs-node{1..4}`) instead of hard-coded IPs
- **Health checks** - Implements proper health monitoring with `nc` (with alternatives documented)
## Quick Start
From this directory (`docs/examples/mnmd`), run:
```bash
# Start the cluster
docker-compose up -d
# Check the status
docker-compose ps
# View logs
docker-compose logs -f
# Test the deployment
curl http://localhost:9000/health
curl http://localhost:9001/health
# Run comprehensive tests
./test-deployment.sh
# Stop the cluster
docker-compose down
# Clean up volumes (WARNING: deletes all data)
docker-compose down -v
```
## Configuration Details
### Volume Configuration
The example uses the following volume configuration:
```bash
RUSTFS_VOLUMES=http://rustfs-node{1...4}:9000/data/rustfs{1...4}
```
This expands to 16 endpoints (4 nodes × 4 drives):
- Node 1: `/data/rustfs1`, `/data/rustfs2`, `/data/rustfs3`, `/data/rustfs4`
- Node 2: `/data/rustfs1`, `/data/rustfs2`, `/data/rustfs3`, `/data/rustfs4`
- Node 3: `/data/rustfs1`, `/data/rustfs2`, `/data/rustfs3`, `/data/rustfs4`
- Node 4: `/data/rustfs1`, `/data/rustfs2`, `/data/rustfs3`, `/data/rustfs4`
**Important:** Disk indexing starts at 1 to match the mounted paths (`/data/rustfs1..4`).
### Port Mappings
| Node | API Port | Console Port |
|-------|----------|--------------|
| node1 | 9000 | 9001 |
| node2 | 9010 | 9011 |
| node3 | 9020 | 9021 |
| node4 | 9030 | 9031 |
### Startup Coordination
To prevent race conditions during startup where nodes might not find each other, a simple `sleep 3` command is added to
each service's command. This provides a brief delay, allowing the network and other services to initialize before RustFS
starts. For more complex scenarios, a more robust health-check dependency or an external entrypoint script might be
required.
### Health Checks
Default health check using `nc` (netcat):
```yaml
healthcheck:
test: [ "CMD-SHELL", "nc -z localhost 9000 || exit 1" ]
interval: 10s
timeout: 5s
retries: 3
start_period: 30s
```
#### Alternative Health Checks
If your base image lacks `nc`, use one of these alternatives:
**Using curl:**
```yaml
healthcheck:
test: [ "CMD-SHELL", "curl -f http://localhost:9000/health || exit 1" ]
interval: 10s
timeout: 5s
retries: 3
start_period: 30s
```
**Using wget:**
```yaml
healthcheck:
test: [ "CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1" ]
interval: 10s
timeout: 5s
retries: 3
start_period: 30s
```
### Brace Expansion Alternatives
If your Docker Compose runtime doesn't support brace expansion (`{1...4}`), replace with explicit endpoints:
```yaml
environment:
- RUSTFS_VOLUMES=http://rustfs-node1:9000/data/rustfs1,http://rustfs-node1:9000/data/rustfs2,http://rustfs-node1:9000/data/rustfs3,http://rustfs-node1:9000/data/rustfs4,http://rustfs-node2:9000/data/rustfs1,http://rustfs-node2:9000/data/rustfs2,http://rustfs-node2:9000/data/rustfs3,http://rustfs-node2:9000/data/rustfs4,http://rustfs-node3:9000/data/rustfs1,http://rustfs-node3:9000/data/rustfs2,http://rustfs-node3:9000/data/rustfs3,http://rustfs-node3:9000/data/rustfs4,http://rustfs-node4:9000/data/rustfs1,http://rustfs-node4:9000/data/rustfs2,http://rustfs-node4:9000/data/rustfs3,http://rustfs-node4:9000/data/rustfs4
```
## Using RUSTFS_CMD
The `RUSTFS_CMD` environment variable provides a fallback when no command is specified:
```yaml
environment:
- RUSTFS_CMD=rustfs # Default fallback command
```
This allows the entrypoint to execute the correct command when Docker doesn't provide one.
## Testing the Deployment
After starting the cluster, verify it's working:
### Automated Testing
Use the provided test script for comprehensive validation:
```bash
./test-deployment.sh
```
This script tests:
- Container status (4/4 running)
- Health checks (4/4 healthy)
- API endpoints (4 ports)
- Console endpoints (4 ports)
- Inter-node connectivity
- Data directory existence
### Manual Testing
For manual verification:
```bash
# 1. Check all containers are healthy
docker-compose ps
# 2. Test API endpoints
for port in 9000 9010 9020 9030; do
echo "Testing port $port..."
curl -s http://localhost:${port}/health | jq '.'
done
# 3. Test console endpoints
for port in 9001 9011 9021 9031; do
echo "Testing console port $port..."
curl -s http://localhost:${port}/health | jq '.'
done
# 4. Check inter-node connectivity
docker exec rustfs-node1 nc -zv rustfs-node2 9000
docker exec rustfs-node1 nc -zv rustfs-node3 9000
docker exec rustfs-node1 nc -zv rustfs-node4 9000
```
## Troubleshooting
### VolumeNotFound Error
**Symptom:** Error message about `/data/rustfs0` not found.
**Solution:** This example uses `/data/rustfs{1...4}` indexing to match the mounted Docker volumes. Ensure your
`RUSTFS_VOLUMES` configuration starts at index 1, not 0.
### Health Check Failures
**Symptom:** Containers show as unhealthy.
**Solutions:**
1. Check if `nc` is available: `docker exec rustfs-node1 which nc`
2. Use alternative health checks (curl/wget) as documented above
3. Increase `start_period` if nodes need more time to initialize
### Startup Timeouts
**Symptom:** Services timeout waiting for peers.
**Solutions:**
1. Check logs: `docker-compose logs rustfs-node1`
2. Verify network connectivity: `docker-compose exec rustfs-node1 ping rustfs-node2`
3. Consider increasing the `sleep` duration in the `docker-compose.yml` `command` directive if a longer delay is needed.
### Permission Issues
**Symptom:** Cannot create directories or write data.
**Solution:** Ensure volumes have correct permissions or set `RUSTFS_UID` and `RUSTFS_GID` environment variables.
## Advanced Configuration
### Custom Credentials
Replace default credentials in production:
```yaml
environment:
- RUSTFS_ACCESS_KEY=your_access_key
- RUSTFS_SECRET_KEY=your_secret_key
```
### TLS Configuration
Add TLS certificates:
```yaml
volumes:
- ./certs:/opt/tls:ro
environment:
- RUSTFS_TLS_PATH=/opt/tls
```
### Resource Limits
Add resource constraints:
```yaml
deploy:
resources:
limits:
cpus: '2'
memory: 4G
reservations:
cpus: '1'
memory: 2G
```
## See Also
- [CHECKLIST.md](./CHECKLIST.md) - Step-by-step verification guide
- [../../console-separation.md](../../console-separation.md) - Console & endpoint service separation guide
- [../../../examples/docker-comprehensive.yml](../../../examples/docker-comprehensive.yml) - More deployment examples
- [Issue #618](https://github.com/rustfs/rustfs/issues/618) - Original VolumeNotFound issue
## References
- RustFS Documentation: https://rustfs.io
- Docker Compose Documentation: https://docs.docker.com/compose/

View File

@@ -0,0 +1,121 @@
# Copyright 2024 RustFS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MNMD (Multi-Node Multi-Drive) Docker Compose Example
# 4 nodes x 4 drives configuration
# This example demonstrates a complete, ready-to-use MNMD deployment
# addressing startup coordination and VolumeNotFound issues.
x-node-template: &node-template
image: rustfs/rustfs:latest
environment:
# Use service names and correct disk indexing (1..4 to match mounted paths)
- RUSTFS_VOLUMES=http://rustfs-node{1...4}:9000/data/rustfs{1...4}
- RUSTFS_ADDRESS=0.0.0.0:9000
- RUSTFS_CONSOLE_ENABLE=true
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
- RUSTFS_EXTERNAL_ADDRESS=0.0.0.0:9000 # Same as internal since no port mapping
- RUSTFS_ACCESS_KEY=rustfsadmin
- RUSTFS_SECRET_KEY=rustfsadmin
- RUSTFS_CMD=rustfs
command: ["sh", "-c", "sleep 3 && rustfs"]
healthcheck:
test:
[
"CMD",
"sh", "-c",
"curl -f http://localhost:9000/health && curl -f http://localhost:9001/health"
]
interval: 10s
timeout: 5s
retries: 3
start_period: 30s
networks:
- rustfs-mnmd
services:
rustfs-node1:
<<: *node-template
container_name: rustfs-node1
hostname: rustfs-node1
ports:
- "9000:9000" # API endpoint
- "9001:9001" # Console
volumes:
- node1-data1:/data/rustfs1
- node1-data2:/data/rustfs2
- node1-data3:/data/rustfs3
- node1-data4:/data/rustfs4
rustfs-node2:
<<: *node-template
container_name: rustfs-node2
hostname: rustfs-node2
ports:
- "9010:9000" # API endpoint
- "9011:9001" # Console
volumes:
- node2-data1:/data/rustfs1
- node2-data2:/data/rustfs2
- node2-data3:/data/rustfs3
- node2-data4:/data/rustfs4
rustfs-node3:
<<: *node-template
container_name: rustfs-node3
hostname: rustfs-node3
ports:
- "9020:9000" # API endpoint
- "9021:9001" # Console
volumes:
- node3-data1:/data/rustfs1
- node3-data2:/data/rustfs2
- node3-data3:/data/rustfs3
- node3-data4:/data/rustfs4
rustfs-node4:
<<: *node-template
container_name: rustfs-node4
hostname: rustfs-node4
ports:
- "9030:9000" # API endpoint
- "9031:9001" # Console
volumes:
- node4-data1:/data/rustfs1
- node4-data2:/data/rustfs2
- node4-data3:/data/rustfs3
- node4-data4:/data/rustfs4
networks:
rustfs-mnmd:
driver: bridge
volumes:
node1-data1:
node1-data2:
node1-data3:
node1-data4:
node2-data1:
node2-data2:
node2-data3:
node2-data4:
node3-data1:
node3-data2:
node3-data3:
node3-data4:
node4-data1:
node4-data2:
node4-data3:
node4-data4:

View File

@@ -0,0 +1,172 @@
#!/bin/bash
# Copyright 2024 RustFS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# test-deployment.sh - Quick test script for MNMD deployment
# Usage: ./test-deployment.sh
set -e
# Colors for output
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo "========================================="
echo "RustFS MNMD Deployment Test"
echo "========================================="
echo ""
# Test 1: Check if all containers are running
echo "Test 1: Checking container status..."
RUNNING=$(docker-compose ps | grep -c "Up" || echo "0")
if [ "$RUNNING" -eq 4 ]; then
echo -e "${GREEN}✓ All 4 containers are running${NC}"
else
echo -e "${RED}✗ Only $RUNNING/4 containers are running${NC}"
docker-compose ps
exit 1
fi
echo ""
# Test 2: Check health status
echo "Test 2: Checking health status..."
HEALTHY=0
for node in rustfs-node1 rustfs-node2 rustfs-node3 rustfs-node4; do
STATUS=$(docker inspect "$node" --format='{{.State.Health.Status}}' 2>/dev/null || echo "unknown")
if [ "$STATUS" = "healthy" ]; then
echo -e " ${GREEN}$node is healthy${NC}"
HEALTHY=$((HEALTHY + 1))
elif [ "$STATUS" = "starting" ]; then
echo -e " ${YELLOW}$node is starting (wait a moment)${NC}"
else
echo -e " ${RED}$node status: $STATUS${NC}"
fi
done
if [ "$HEALTHY" -eq 4 ]; then
echo -e "${GREEN}✓ All containers are healthy${NC}"
elif [ "$HEALTHY" -gt 0 ]; then
echo -e "${YELLOW}$HEALTHY/4 containers are healthy (some may still be starting)${NC}"
else
echo -e "${RED}✗ No containers are healthy${NC}"
exit 1
fi
echo ""
# Test 3: Check API endpoints
echo "Test 3: Testing API endpoints..."
PORTS=(9000 9010 9020 9030)
API_SUCCESS=0
for port in "${PORTS[@]}"; do
if curl -sf http://localhost:${port}/health >/dev/null 2>&1; then
echo -e " ${GREEN}✓ API on port $port is responding${NC}"
API_SUCCESS=$((API_SUCCESS + 1))
else
echo -e " ${RED}✗ API on port $port is not responding${NC}"
fi
done
if [ "$API_SUCCESS" -eq 4 ]; then
echo -e "${GREEN}✓ All API endpoints are working${NC}"
else
echo -e "${YELLOW}$API_SUCCESS/4 API endpoints are working${NC}"
fi
echo ""
# Test 4: Check Console endpoints
echo "Test 4: Testing Console endpoints..."
CONSOLE_PORTS=(9001 9011 9021 9031)
CONSOLE_SUCCESS=0
for port in "${CONSOLE_PORTS[@]}"; do
if curl -sf http://localhost:${port}/health >/dev/null 2>&1; then
echo -e " ${GREEN}✓ Console on port $port is responding${NC}"
CONSOLE_SUCCESS=$((CONSOLE_SUCCESS + 1))
else
echo -e " ${RED}✗ Console on port $port is not responding${NC}"
fi
done
if [ "$CONSOLE_SUCCESS" -eq 4 ]; then
echo -e "${GREEN}✓ All Console endpoints are working${NC}"
else
echo -e "${YELLOW}$CONSOLE_SUCCESS/4 Console endpoints are working${NC}"
fi
echo ""
# Test 5: Check inter-node connectivity
echo "Test 5: Testing inter-node connectivity..."
CONN_SUCCESS=0
for node in rustfs-node2 rustfs-node3 rustfs-node4; do
if docker exec rustfs-node1 nc -z "$node" 9000 2>/dev/null; then
echo -e " ${GREEN}✓ node1 → $node connection OK${NC}"
CONN_SUCCESS=$((CONN_SUCCESS + 1))
else
echo -e " ${RED}✗ node1 → $node connection failed${NC}"
fi
done
if [ "$CONN_SUCCESS" -eq 3 ]; then
echo -e "${GREEN}✓ All inter-node connections are working${NC}"
else
echo -e "${YELLOW}$CONN_SUCCESS/3 inter-node connections are working${NC}"
fi
echo ""
# Test 6: Verify data directories
echo "Test 6: Verifying data directories..."
DIR_SUCCESS=0
for i in {1..4}; do
if docker exec rustfs-node1 test -d "/data/rustfs${i}"; then
DIR_SUCCESS=$((DIR_SUCCESS + 1))
else
echo -e " ${RED}✗ /data/rustfs${i} not found in node1${NC}"
fi
done
if [ "$DIR_SUCCESS" -eq 4 ]; then
echo -e "${GREEN}✓ All data directories exist${NC}"
else
echo -e "${RED}✗ Only $DIR_SUCCESS/4 data directories exist${NC}"
fi
echo ""
# Summary
echo "========================================="
echo "Test Summary"
echo "========================================="
echo "Containers running: $RUNNING/4"
echo "Healthy containers: $HEALTHY/4"
echo "API endpoints: $API_SUCCESS/4"
echo "Console endpoints: $CONSOLE_SUCCESS/4"
echo "Inter-node connections: $CONN_SUCCESS/3"
echo "Data directories: $DIR_SUCCESS/4"
echo ""
TOTAL=$((RUNNING + HEALTHY + API_SUCCESS + CONSOLE_SUCCESS + CONN_SUCCESS + DIR_SUCCESS))
MAX_SCORE=23
if [ "$TOTAL" -eq "$MAX_SCORE" ]; then
echo -e "${GREEN}✓ All tests passed! Deployment is working correctly.${NC}"
exit 0
elif [ "$TOTAL" -ge 20 ]; then
echo -e "${YELLOW}⚠ Most tests passed. Some components may still be starting up.${NC}"
echo " Try running this script again in a few moments."
exit 0
else
echo -e "${RED}✗ Some tests failed. Check the output above and logs for details.${NC}"
echo " Run 'docker-compose logs' for more information."
exit 1
fi

BIN
docs/rustfs-trending.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

View File

@@ -78,7 +78,6 @@ matchit = { workspace = true }
md5.workspace = true
mime_guess = { workspace = true }
opentelemetry = { workspace = true }
percent-encoding = { workspace = true }
pin-project-lite.workspace = true
reqwest = { workspace = true }
rustls = { workspace = true }

View File

@@ -12,21 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
use crate::admin::router::Operation;
use crate::auth::{check_key_valid, get_session_token};
use http::{HeaderMap, StatusCode};
use matchit::Params;
use rustfs_config::notify::{NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS};
use rustfs_config::{ENABLE_KEY, EnableState};
use rustfs_notify::rules::{BucketNotificationConfig, PatternRules};
use rustfs_targets::EventName;
use rustfs_targets::check_mqtt_broker_available;
use s3s::header::CONTENT_LENGTH;
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
use serde::{Deserialize, Serialize};
use serde_urlencoded::from_bytes;
use std::collections::HashMap;
use std::future::Future;
use std::io::{Error, ErrorKind};
use std::net::SocketAddr;
@@ -36,12 +31,6 @@ use tokio::time::{Duration, sleep};
use tracing::{debug, error, info, warn};
use url::Url;
#[derive(Debug, Deserialize)]
struct BucketQuery {
#[serde(rename = "bucketName")]
bucket_name: String,
}
#[derive(Debug, Deserialize)]
pub struct KeyValue {
pub key: String,
@@ -177,6 +166,7 @@ impl Operation for NotificationTarget {
let mut client_cert_val = None;
let mut client_key_val = None;
let mut qos_val = None;
let mut topic_val = String::new();
for kv in notification_body.key_values.iter() {
if !allowed_keys.contains(kv.key.as_str()) {
@@ -190,6 +180,16 @@ impl Operation for NotificationTarget {
if kv.key == "endpoint" {
endpoint_val = Some(kv.value.clone());
}
if target_type == NOTIFY_MQTT_SUB_SYS {
if kv.key == rustfs_config::MQTT_BROKER {
endpoint_val = Some(kv.value.clone());
}
if kv.key == rustfs_config::MQTT_TOPIC {
topic_val = kv.value.clone();
}
}
if kv.key == "queue_dir" {
queue_dir_val = Some(kv.value.clone());
}
@@ -236,12 +236,15 @@ impl Operation for NotificationTarget {
}
if target_type == NOTIFY_MQTT_SUB_SYS {
let endpoint = endpoint_val.ok_or_else(|| s3_error!(InvalidArgument, "endpoint is required"))?;
let url = Url::parse(&endpoint).map_err(|e| s3_error!(InvalidArgument, "invalid endpoint url: {}", e))?;
match url.scheme() {
"tcp" | "ssl" | "ws" | "wss" | "mqtt" | "mqtts" => {}
_ => return Err(s3_error!(InvalidArgument, "unsupported broker url scheme")),
let endpoint = endpoint_val.ok_or_else(|| s3_error!(InvalidArgument, "broker endpoint is required"))?;
if topic_val.is_empty() {
return Err(s3_error!(InvalidArgument, "topic is required"));
}
// Check MQTT Broker availability
if let Err(e) = check_mqtt_broker_available(&endpoint, &topic_val).await {
return Err(s3_error!(InvalidArgument, "MQTT Broker unavailable: {}", e));
}
if let Some(queue_dir) = queue_dir_val {
validate_queue_dir(&queue_dir).await?;
if let Some(qos) = qos_val {
@@ -420,113 +423,3 @@ fn extract_target_params<'a>(params: &'a Params<'_, '_>) -> S3Result<(&'a str, &
let target_name = extract_param(params, "target_name")?;
Ok((target_type, target_name))
}
/// Set notification rules for buckets
pub struct SetBucketNotification {}
#[async_trait::async_trait]
impl Operation for SetBucketNotification {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
// 1. Analyze query parameters
let query: BucketQuery = from_bytes(req.uri.query().unwrap_or("").as_bytes())
.map_err(|e| s3_error!(InvalidArgument, "invalid query parameters: {}", e))?;
// 2. Permission verification
let Some(input_cred) = &req.credentials else {
return Err(s3_error!(InvalidRequest, "credentials not found"));
};
let (_cred, _owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
// 3. Get notification system instance
let Some(ns) = rustfs_notify::global::notification_system() else {
return Err(s3_error!(InternalError, "notification system not initialized"));
};
// 4. The parsing request body is BucketNotificationConfig
let mut input = req.input;
let body = input.store_all_unlimited().await.map_err(|e| {
warn!("failed to read request body: {:?}", e);
s3_error!(InvalidRequest, "failed to read request body")
})?;
let config: BucketNotificationConfig = serde_json::from_slice(&body)
.map_err(|e| s3_error!(InvalidArgument, "invalid json body for bucket notification config: {}", e))?;
// 5. Load bucket notification configuration
info!("Loading notification config for bucket '{}'", &query.bucket_name);
ns.load_bucket_notification_config(&query.bucket_name, &config)
.await
.map_err(|e| {
error!("failed to load bucket notification config: {}", e);
S3Error::with_message(S3ErrorCode::InternalError, format!("failed to load bucket notification config: {e}"))
})?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}
/// Get notification rules for buckets
#[derive(Serialize)]
struct BucketRulesResponse {
rules: HashMap<EventName, PatternRules>,
}
pub struct GetBucketNotification {}
#[async_trait::async_trait]
impl Operation for GetBucketNotification {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query: BucketQuery = from_bytes(req.uri.query().unwrap_or("").as_bytes())
.map_err(|e| s3_error!(InvalidArgument, "invalid query parameters: {}", e))?;
let Some(input_cred) = &req.credentials else {
return Err(s3_error!(InvalidRequest, "credentials not found"));
};
let (_cred, _owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
let Some(ns) = rustfs_notify::global::notification_system() else {
return Err(s3_error!(InternalError, "notification system not initialized"));
};
let rules_map = ns.notifier.get_rules_map(&query.bucket_name);
let response = BucketRulesResponse {
rules: rules_map.unwrap_or_default().inner().clone(),
};
let data = serde_json::to_vec(&response)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("failed to serialize rules: {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header))
}
}
/// Remove all notification rules for a bucket
pub struct RemoveBucketNotification {}
#[async_trait::async_trait]
impl Operation for RemoveBucketNotification {
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
let query: BucketQuery = from_bytes(req.uri.query().unwrap_or("").as_bytes())
.map_err(|e| s3_error!(InvalidArgument, "invalid query parameters: {}", e))?;
let Some(input_cred) = &req.credentials else {
return Err(s3_error!(InvalidRequest, "credentials not found"));
};
let (_cred, _owner) =
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
let Some(ns) = rustfs_notify::global::notification_system() else {
return Err(s3_error!(InternalError, "notification system not initialized"));
};
info!("Removing notification config for bucket '{}'", &query.bucket_name);
ns.remove_bucket_notification_config(&query.bucket_name).await;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
}
}

View File

@@ -23,10 +23,7 @@ pub mod utils;
use handlers::{
GetReplicationMetricsHandler, HealthCheckHandler, ListRemoteTargetHandler, RemoveRemoteTargetHandler, SetRemoteTargetHandler,
bucket_meta,
event::{
GetBucketNotification, ListNotificationTargets, NotificationTarget, RemoveBucketNotification, RemoveNotificationTarget,
SetBucketNotification,
},
event::{ListNotificationTargets, ListTargetsArns, NotificationTarget, RemoveNotificationTarget},
group, kms, kms_dynamic, kms_keys, policies, pools, rebalance,
service_account::{AddServiceAccount, DeleteServiceAccount, InfoServiceAccount, ListServiceAccount, UpdateServiceAccount},
sts, tier, user,
@@ -519,25 +516,7 @@ fn register_user_route(r: &mut S3Router<AdminOperation>) -> std::io::Result<()>
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/target/arns").as_str(),
AdminOperation(&ListNotificationTargets {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/target-set-bucket").as_str(),
AdminOperation(&SetBucketNotification {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/target-get-bucket").as_str(),
AdminOperation(&GetBucketNotification {}),
)?;
r.insert(
Method::POST,
format!("{}{}", ADMIN_PREFIX, "/v3/target-remove-bucket").as_str(),
AdminOperation(&RemoveBucketNotification {}),
AdminOperation(&ListTargetsArns {}),
)?;
Ok(())

View File

@@ -92,6 +92,10 @@ where
T: Operation,
{
fn is_match(&self, method: &Method, uri: &Uri, headers: &HeaderMap, _: &mut Extensions) -> bool {
if method == Method::GET && uri.path() == "/health" {
return true;
}
// AssumeRole
if method == Method::POST && uri.path() == "/" {
if let Some(val) = headers.get(header::CONTENT_TYPE) {
@@ -104,36 +108,13 @@ where
uri.path().starts_with(ADMIN_PREFIX) || uri.path().starts_with(RPC_PREFIX) || uri.path().starts_with(CONSOLE_PREFIX)
}
async fn call(&self, req: S3Request<Body>) -> S3Result<S3Response<Body>> {
if self.console_enabled && req.uri.path().starts_with(CONSOLE_PREFIX) {
if let Some(console_router) = &self.console_router {
let mut console_router = console_router.clone();
let req = convert_request(req);
let result = console_router.call(req).await;
return match result {
Ok(resp) => Ok(convert_response(resp)),
Err(e) => Err(s3_error!(InternalError, "{}", e)),
};
}
return Err(s3_error!(InternalError, "console is not enabled"));
}
let uri = format!("{}|{}", &req.method, req.uri.path());
// warn!("get uri {}", &uri);
if let Ok(mat) = self.router.at(&uri) {
let op: &T = mat.value;
let mut resp = op.call(req, mat.params).await?;
resp.status = Some(resp.output.0);
return Ok(resp.map_output(|x| x.1));
}
return Err(s3_error!(NotImplemented));
}
// check_access before call
async fn check_access(&self, req: &mut S3Request<Body>) -> S3Result<()> {
// Allow unauthenticated access to health check
if req.method == Method::GET && req.uri.path() == "/health" {
return Ok(());
}
// Allow unauthenticated access to console static files if console is enabled
if self.console_enabled && req.uri.path().starts_with(CONSOLE_PREFIX) {
return Ok(());
}
@@ -156,6 +137,31 @@ where
None => Err(s3_error!(AccessDenied, "Signature is required")),
}
}
async fn call(&self, req: S3Request<Body>) -> S3Result<S3Response<Body>> {
if self.console_enabled && req.uri.path().starts_with(CONSOLE_PREFIX) {
if let Some(console_router) = &self.console_router {
let mut console_router = console_router.clone();
let req = convert_request(req);
let result = console_router.call(req).await;
return match result {
Ok(resp) => Ok(convert_response(resp)),
Err(e) => Err(s3_error!(InternalError, "{}", e)),
};
}
return Err(s3_error!(InternalError, "console is not enabled"));
}
let uri = format!("{}|{}", &req.method, req.uri.path());
if let Ok(mat) = self.router.at(&uri) {
let op: &T = mat.value;
let mut resp = op.call(req, mat.params).await?;
resp.status = Some(resp.output.0);
return Ok(resp.map_output(|x| x.1));
}
Err(s3_error!(NotImplemented))
}
}
#[async_trait::async_trait]

View File

@@ -18,6 +18,11 @@ use rustfs_config::DEFAULT_DELIMITER;
use rustfs_ecstore::config::GLOBAL_SERVER_CONFIG;
use tracing::{error, info, warn};
/// Start the audit system.
/// This function checks if the audit subsystem is configured in the global server configuration.
/// If configured, it initializes and starts the audit system.
/// If not configured, it skips the initialization.
/// It also handles cases where the audit system is already running or if the global configuration is not loaded.
pub(crate) async fn start_audit_system() -> AuditResult<()> {
info!(
target: "rustfs::main::start_audit_system",
@@ -94,6 +99,10 @@ pub(crate) async fn start_audit_system() -> AuditResult<()> {
}
}
/// Stop the audit system.
/// This function checks if the audit system is initialized and running.
/// If it is running, it prepares to stop the system, stops it, and records the stop time.
/// If the system is already stopped or not initialized, it logs a warning and returns.
pub(crate) async fn stop_audit_system() -> AuditResult<()> {
if let Some(system) = audit_system() {
let state = system.get_state().await;

View File

@@ -12,136 +12,112 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use super::access::authorize_request;
use super::options::del_opts;
use super::options::extract_metadata;
use super::options::put_opts;
use crate::auth::get_condition_values;
use crate::error::ApiError;
use crate::storage::access::ReqInfo;
use crate::storage::options::copy_dst_opts;
use crate::storage::options::copy_src_opts;
use crate::storage::options::get_complete_multipart_upload_opts;
use crate::storage::options::{extract_metadata_from_mime_with_object_name, get_opts, parse_copy_source_range};
use bytes::Bytes;
use chrono::DateTime;
use chrono::Utc;
use datafusion::arrow::csv::WriterBuilder as CsvWriterBuilder;
use datafusion::arrow::json::WriterBuilder as JsonWriterBuilder;
use datafusion::arrow::json::writer::JsonArray;
use http::StatusCode;
use rustfs_ecstore::bucket::metadata_sys::get_replication_config;
use rustfs_ecstore::bucket::object_lock::objectlock_sys::BucketObjectLockSys;
use rustfs_ecstore::bucket::replication::DeletedObjectReplicationInfo;
use rustfs_ecstore::bucket::replication::REPLICATE_INCOMING_DELETE;
use rustfs_ecstore::bucket::replication::ReplicationConfigurationExt;
use rustfs_ecstore::bucket::replication::check_replicate_delete;
use rustfs_ecstore::bucket::replication::get_must_replicate_options;
use rustfs_ecstore::bucket::replication::must_replicate;
use rustfs_ecstore::bucket::replication::schedule_replication;
use rustfs_ecstore::bucket::replication::schedule_replication_delete;
use rustfs_ecstore::bucket::versioning::VersioningApi;
use rustfs_ecstore::disk::error::DiskError;
use rustfs_ecstore::disk::error_reduce::is_all_buckets_not_found;
use rustfs_ecstore::error::is_err_bucket_not_found;
use rustfs_ecstore::error::is_err_object_not_found;
use rustfs_ecstore::error::is_err_version_not_found;
use rustfs_ecstore::set_disk::MAX_PARTS_COUNT;
use rustfs_ecstore::store_api::ObjectInfo;
use rustfs_filemeta::ReplicationStatusType;
use rustfs_filemeta::ReplicationType;
use rustfs_filemeta::VersionPurgeStatusType;
use rustfs_s3select_api::object_store::bytes_stream;
use rustfs_s3select_api::query::Context;
use rustfs_s3select_api::query::Query;
use rustfs_s3select_query::get_global_db;
// use rustfs_ecstore::store_api::RESERVED_METADATA_PREFIX;
use crate::storage::{
access::{ReqInfo, authorize_request},
options::{
copy_dst_opts, copy_src_opts, del_opts, extract_metadata, extract_metadata_from_mime_with_object_name,
get_complete_multipart_upload_opts, get_opts, parse_copy_source_range, put_opts,
},
};
use base64::{Engine, engine::general_purpose::STANDARD as BASE64_STANDARD};
use bytes::Bytes;
use chrono::{DateTime, Utc};
use datafusion::arrow::{
csv::WriterBuilder as CsvWriterBuilder, json::WriterBuilder as JsonWriterBuilder, json::writer::JsonArray,
};
use futures::StreamExt;
use http::HeaderMap;
use rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::validate_transition_tier;
use rustfs_ecstore::bucket::lifecycle::lifecycle::Lifecycle;
use rustfs_ecstore::bucket::metadata::BUCKET_LIFECYCLE_CONFIG;
use rustfs_ecstore::bucket::metadata::BUCKET_NOTIFICATION_CONFIG;
use rustfs_ecstore::bucket::metadata::BUCKET_POLICY_CONFIG;
use rustfs_ecstore::bucket::metadata::BUCKET_REPLICATION_CONFIG;
use rustfs_ecstore::bucket::metadata::BUCKET_SSECONFIG;
use rustfs_ecstore::bucket::metadata::BUCKET_TAGGING_CONFIG;
use rustfs_ecstore::bucket::metadata::BUCKET_VERSIONING_CONFIG;
use rustfs_ecstore::bucket::metadata::OBJECT_LOCK_CONFIG;
use rustfs_ecstore::bucket::metadata_sys;
use rustfs_ecstore::bucket::policy_sys::PolicySys;
use rustfs_ecstore::bucket::tagging::decode_tags;
use rustfs_ecstore::bucket::tagging::encode_tags;
use rustfs_ecstore::bucket::utils::serialize;
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
use rustfs_ecstore::client::object_api_utils::format_etag;
use rustfs_ecstore::compress::MIN_COMPRESSIBLE_SIZE;
use rustfs_ecstore::compress::is_compressible;
use rustfs_ecstore::error::StorageError;
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::set_disk::{DEFAULT_READ_BUFFER_SIZE, is_valid_storage_class};
use rustfs_ecstore::store_api::BucketOptions;
use rustfs_ecstore::store_api::CompletePart;
use rustfs_ecstore::store_api::DeleteBucketOptions;
use rustfs_ecstore::store_api::HTTPRangeSpec;
use rustfs_ecstore::store_api::MakeBucketOptions;
use rustfs_ecstore::store_api::MultipartUploadResult;
use rustfs_ecstore::store_api::ObjectIO;
use rustfs_ecstore::store_api::ObjectOptions;
use rustfs_ecstore::store_api::ObjectToDelete;
use rustfs_ecstore::store_api::PutObjReader;
use rustfs_ecstore::store_api::StorageAPI;
use rustfs_filemeta::fileinfo::ObjectPartInfo;
use rustfs_kms::DataKey;
use rustfs_kms::service_manager::get_global_encryption_service;
use rustfs_kms::types::{EncryptionMetadata, ObjectEncryptionContext};
use http::{HeaderMap, StatusCode};
use rustfs_ecstore::{
bucket::{
lifecycle::{bucket_lifecycle_ops::validate_transition_tier, lifecycle::Lifecycle},
metadata::{
BUCKET_LIFECYCLE_CONFIG, BUCKET_NOTIFICATION_CONFIG, BUCKET_POLICY_CONFIG, BUCKET_REPLICATION_CONFIG,
BUCKET_SSECONFIG, BUCKET_TAGGING_CONFIG, BUCKET_VERSIONING_CONFIG, OBJECT_LOCK_CONFIG,
},
metadata_sys,
metadata_sys::get_replication_config,
object_lock::objectlock_sys::BucketObjectLockSys,
policy_sys::PolicySys,
replication::{
DeletedObjectReplicationInfo, REPLICATE_INCOMING_DELETE, ReplicationConfigurationExt, check_replicate_delete,
get_must_replicate_options, must_replicate, schedule_replication, schedule_replication_delete,
},
tagging::{decode_tags, encode_tags},
utils::serialize,
versioning::VersioningApi,
versioning_sys::BucketVersioningSys,
},
client::object_api_utils::format_etag,
compress::{MIN_COMPRESSIBLE_SIZE, is_compressible},
disk::{error::DiskError, error_reduce::is_all_buckets_not_found},
error::{StorageError, is_err_bucket_not_found, is_err_object_not_found, is_err_version_not_found},
new_object_layer_fn,
set_disk::{DEFAULT_READ_BUFFER_SIZE, MAX_PARTS_COUNT, is_valid_storage_class},
store_api::{
BucketOptions,
CompletePart,
DeleteBucketOptions,
HTTPRangeSpec,
MakeBucketOptions,
MultipartUploadResult,
ObjectIO,
ObjectInfo,
ObjectOptions,
ObjectToDelete,
PutObjReader,
StorageAPI,
// RESERVED_METADATA_PREFIX,
},
};
use rustfs_filemeta::{ReplicationStatusType, ReplicationType, VersionPurgeStatusType, fileinfo::ObjectPartInfo};
use rustfs_kms::{
DataKey,
service_manager::get_global_encryption_service,
types::{EncryptionMetadata, ObjectEncryptionContext},
};
use rustfs_notify::global::notifier_instance;
use rustfs_policy::auth;
use rustfs_policy::policy::action::Action;
use rustfs_policy::policy::action::S3Action;
use rustfs_policy::policy::{BucketPolicy, BucketPolicyArgs, Validator};
use rustfs_rio::CompressReader;
use rustfs_rio::EtagReader;
use rustfs_rio::HashReader;
use rustfs_rio::Reader;
use rustfs_rio::WarpReader;
use rustfs_rio::{DecryptReader, EncryptReader, HardLimitReader};
use rustfs_targets::EventName;
use rustfs_targets::arn::{TargetID, TargetIDError};
use rustfs_utils::CompressionAlgorithm;
use rustfs_utils::http::AMZ_BUCKET_REPLICATION_STATUS;
use rustfs_utils::http::headers::RESERVED_METADATA_PREFIX_LOWER;
use rustfs_utils::http::headers::{AMZ_DECODED_CONTENT_LENGTH, AMZ_OBJECT_TAGGING};
use rustfs_utils::path::is_dir_object;
use rustfs_utils::path::path_join_buf;
use rustfs_policy::{
auth,
policy::{
action::{Action, S3Action},
{BucketPolicy, BucketPolicyArgs, Validator},
},
};
use rustfs_rio::{CompressReader, DecryptReader, EncryptReader, EtagReader, HardLimitReader, HashReader, Reader, WarpReader};
use rustfs_s3select_api::{
object_store::bytes_stream,
query::{Context, Query},
};
use rustfs_s3select_query::get_global_db;
use rustfs_targets::{
EventName,
arn::{TargetID, TargetIDError},
};
use rustfs_utils::{
CompressionAlgorithm,
http::{
AMZ_BUCKET_REPLICATION_STATUS,
headers::{AMZ_DECODED_CONTENT_LENGTH, AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX_LOWER},
},
path::{is_dir_object, path_join_buf},
};
use rustfs_zip::CompressionFormat;
use s3s::S3;
use s3s::S3Error;
use s3s::S3ErrorCode;
use s3s::S3Result;
use s3s::dto::*;
use s3s::s3_error;
use s3s::{S3Request, S3Response};
use std::collections::HashMap;
use std::fmt::Debug;
use std::path::Path;
use std::str::FromStr;
use std::sync::Arc;
use std::sync::LazyLock;
use time::OffsetDateTime;
use time::format_description::well_known::Rfc3339;
use tokio::io::AsyncRead;
use tokio::sync::mpsc;
use s3s::{S3, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, dto::*, s3_error};
use std::{
collections::HashMap,
fmt::Debug,
path::Path,
str::FromStr,
sync::{Arc, LazyLock},
};
use time::{OffsetDateTime, format_description::well_known::Rfc3339};
use tokio::{io::AsyncRead, sync::mpsc};
use tokio_stream::wrappers::ReceiverStream;
use tokio_tar::Archive;
use tokio_util::io::ReaderStream;
use tokio_util::io::StreamReader;
use tracing::debug;
use tracing::error;
use tracing::info;
use tracing::warn;
use tokio_util::io::{ReaderStream, StreamReader};
use tracing::{debug, error, info, warn};
use uuid::Uuid;
macro_rules! try_ {
@@ -2312,92 +2288,10 @@ impl S3 for FS {
let mt = metadata.clone();
let mt2 = metadata.clone();
let append_flag = req.headers.get("x-amz-object-append");
let append_action_header = req.headers.get("x-amz-append-action");
let mut append_requested = false;
let mut append_position: Option<i64> = None;
if let Some(flag_value) = append_flag {
let flag_str = flag_value.to_str().map_err(|_| {
S3Error::with_message(S3ErrorCode::InvalidArgument, "invalid x-amz-object-append header".to_string())
})?;
if flag_str.eq_ignore_ascii_case("true") {
append_requested = true;
let position_value = req.headers.get("x-amz-append-position").ok_or_else(|| {
S3Error::with_message(
S3ErrorCode::InvalidArgument,
"x-amz-append-position header required when x-amz-object-append is true".to_string(),
)
})?;
let position_str = position_value.to_str().map_err(|_| {
S3Error::with_message(S3ErrorCode::InvalidArgument, "invalid x-amz-append-position header".to_string())
})?;
let position = position_str.parse::<i64>().map_err(|_| {
S3Error::with_message(
S3ErrorCode::InvalidArgument,
"x-amz-append-position must be a non-negative integer".to_string(),
)
})?;
if position < 0 {
return Err(S3Error::with_message(
S3ErrorCode::InvalidArgument,
"x-amz-append-position must be a non-negative integer".to_string(),
));
}
append_position = Some(position);
} else if !flag_str.eq_ignore_ascii_case("false") {
return Err(S3Error::with_message(
S3ErrorCode::InvalidArgument,
"x-amz-object-append must be 'true' or 'false'".to_string(),
));
}
}
let mut append_action: Option<String> = None;
if let Some(action_value) = append_action_header {
let action_str = action_value.to_str().map_err(|_| {
S3Error::with_message(S3ErrorCode::InvalidArgument, "invalid x-amz-append-action header".to_string())
})?;
append_action = Some(action_str.to_ascii_lowercase());
}
let mut opts: ObjectOptions = put_opts(&bucket, &key, version_id, &req.headers, mt)
.await
.map_err(ApiError::from)?;
if append_requested {
opts.append_object = true;
opts.append_position = append_position;
}
if let Some(action) = append_action {
if append_requested {
return Err(S3Error::with_message(
S3ErrorCode::InvalidArgument,
"x-amz-object-append cannot be combined with x-amz-append-action".to_string(),
));
}
let obj_info = match action.as_str() {
"complete" => store.complete_append(&bucket, &key, &opts).await,
"abort" => store.abort_append(&bucket, &key, &opts).await,
_ => {
return Err(S3Error::with_message(
S3ErrorCode::InvalidArgument,
"x-amz-append-action must be 'complete' or 'abort'".to_string(),
));
}
}
.map_err(ApiError::from)?;
let output = PutObjectOutput {
e_tag: obj_info.etag.clone(),
version_id: obj_info.version_id.map(|v| v.to_string()),
..Default::default()
};
return Ok(S3Response::new(output));
}
let repoptions =
get_must_replicate_options(&mt2, "".to_string(), ReplicationStatusType::Empty, ReplicationType::Object, opts.clone());

View File

@@ -58,7 +58,7 @@ export RUSTFS_EXTERNAL_ADDRESS=":9000"
#export RUSTFS_OBS_METER_INTERVAL=1 # Sampling interval in seconds
#export RUSTFS_OBS_SERVICE_NAME=rustfs # Service name
#export RUSTFS_OBS_SERVICE_VERSION=0.1.0 # Service version
export RUSTFS_OBS_ENVIRONMENT=production # Environment name
export RUSTFS_OBS_ENVIRONMENT=develop # Environment name
export RUSTFS_OBS_LOGGER_LEVEL=info # Log level, supports trace, debug, info, warn, error
export RUSTFS_OBS_LOCAL_LOGGING_ENABLED=true # Whether to enable local logging
export RUSTFS_OBS_LOG_DIRECTORY="$current_dir/deploy/logs" # Log directory
@@ -123,6 +123,10 @@ export RUSTFS_COMPRESSION_ENABLED=true # Whether to enable compression
#export RUSTFS_REGION="us-east-1"
export RUSTFS_ENABLE_SCANNER=false
export RUSTFS_ENABLE_HEAL=false
# Event message configuration
#export RUSTFS_EVENT_CONFIG="./deploy/config/event.example.toml"