mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-16 17:20:33 +00:00
merge main
This commit is contained in:
18
AGENTS.md
18
AGENTS.md
@@ -1,8 +1,18 @@
|
||||
# Repository Guidelines
|
||||
|
||||
## ⚠️ Pre-Commit Checklist (MANDATORY)
|
||||
**Before EVERY commit, you MUST run and pass ALL of the following:**
|
||||
```bash
|
||||
cargo fmt --all --check # Code formatting
|
||||
cargo clippy --all-targets --all-features -- -D warnings # Lints
|
||||
cargo test --workspace --exclude e2e_test # Unit tests
|
||||
```
|
||||
Or simply run `make pre-commit` which covers all checks. **DO NOT commit if any check fails.**
|
||||
|
||||
## Communication Rules
|
||||
- Respond to the user in Chinese; use English in all other contexts.
|
||||
- Code and documentation must be written in English only. Chinese text is allowed solely as test data/fixtures when a case explicitly requires Chinese-language content for validation.
|
||||
- **Pull Request titles and descriptions must be written in English** to ensure consistency and accessibility for all contributors.
|
||||
|
||||
## Project Structure & Module Organization
|
||||
The workspace root hosts shared dependencies in `Cargo.toml`. The service binary lives under `rustfs/src/main.rs`, while reusable crates sit in `crates/` (`crypto`, `iam`, `kms`, and `e2e_test`). Local fixtures for standalone flows reside in `test_standalone/`, deployment manifests are under `deploy/`, Docker assets sit at the root, and automation lives in `scripts/`. Skim each crate’s README or module docs before contributing changes.
|
||||
@@ -19,7 +29,13 @@ Co-locate unit tests with their modules and give behavior-led names such as `han
|
||||
When fixing bugs or adding features, include regression tests that capture the new behavior so future changes cannot silently break it.
|
||||
|
||||
## Commit & Pull Request Guidelines
|
||||
Work on feature branches (e.g., `feat/...`) after syncing `main`. Follow Conventional Commits under 72 characters (e.g., `feat: add kms key rotation`). Each commit must compile, format cleanly, and pass `make pre-commit`. Open PRs with a concise summary, note verification commands, link relevant issues, and wait for reviewer approval.
|
||||
Work on feature branches (e.g., `feat/...`) after syncing `main`. Follow Conventional Commits under 72 characters (e.g., `feat: add kms key rotation`). Each commit must compile, format cleanly, and pass `make pre-commit`.
|
||||
|
||||
**Pull Request Requirements:**
|
||||
- PR titles and descriptions **MUST be written in English**
|
||||
- Open PRs with a concise summary, note verification commands, link relevant issues
|
||||
- Follow the PR template format and fill in all required sections
|
||||
- Wait for reviewer approval before merging
|
||||
|
||||
## Security & Configuration Tips
|
||||
Do not commit secrets or cloud credentials; prefer environment variables or vault tooling. Review IAM- and KMS-related changes with a second maintainer. Confirm proxy settings before running sensitive tests to avoid leaking traffic outside localhost.
|
||||
|
||||
@@ -186,6 +186,39 @@ cargo clippy --all-targets --all-features -- -D warnings
|
||||
cargo clippy --fix --all-targets --all-features
|
||||
```
|
||||
|
||||
## 📝 Pull Request Guidelines
|
||||
|
||||
### Language Requirements
|
||||
|
||||
**All Pull Request titles and descriptions MUST be written in English.**
|
||||
|
||||
This ensures:
|
||||
- Consistency across all contributions
|
||||
- Accessibility for international contributors
|
||||
- Better integration with automated tools and CI/CD systems
|
||||
- Clear communication in a globally understood language
|
||||
|
||||
#### PR Description Requirements
|
||||
|
||||
When creating a Pull Request, ensure:
|
||||
|
||||
1. **Title**: Use English and follow Conventional Commits format (e.g., `fix: improve s3-tests readiness detection`)
|
||||
2. **Description**: Write in English, following the PR template format
|
||||
3. **Code Comments**: Must be in English (as per coding standards)
|
||||
4. **Commit Messages**: Must be in English (as per commit guidelines)
|
||||
|
||||
#### PR Template
|
||||
|
||||
Always use the PR template (`.github/pull_request_template.md`) and fill in all sections:
|
||||
- Type of Change
|
||||
- Related Issues
|
||||
- Summary of Changes
|
||||
- Checklist
|
||||
- Impact
|
||||
- Additional Notes
|
||||
|
||||
**Note**: While you may communicate with reviewers in Chinese during discussions, the PR itself (title, description, and all formal documentation) must be in English.
|
||||
|
||||
---
|
||||
|
||||
Following these guidelines ensures high code quality and smooth collaboration across the RustFS project! 🚀
|
||||
|
||||
32
Cargo.lock
generated
32
Cargo.lock
generated
@@ -3419,18 +3419,6 @@ dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "enum-as-inner"
|
||||
version = "0.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.113",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "enum_dispatch"
|
||||
version = "0.3.13"
|
||||
@@ -7767,6 +7755,7 @@ dependencies = [
|
||||
"hyper",
|
||||
"hyper-util",
|
||||
"jemalloc_pprof",
|
||||
"libc",
|
||||
"libsystemd",
|
||||
"libunftp",
|
||||
"matchit 0.9.1",
|
||||
@@ -7816,7 +7805,6 @@ dependencies = [
|
||||
"socket2",
|
||||
"ssh-key",
|
||||
"subtle",
|
||||
"sysctl",
|
||||
"sysinfo",
|
||||
"thiserror 2.0.17",
|
||||
"tikv-jemalloc-ctl",
|
||||
@@ -8131,11 +8119,9 @@ name = "rustfs-lock"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"crossbeam-queue",
|
||||
"futures",
|
||||
"parking_lot",
|
||||
"rustfs-protos",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"smallvec",
|
||||
@@ -8144,7 +8130,6 @@ dependencies = [
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tracing",
|
||||
"url",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
@@ -8270,7 +8255,6 @@ dependencies = [
|
||||
"flatbuffers",
|
||||
"prost 0.14.1",
|
||||
"rustfs-common",
|
||||
"rustfs-credentials",
|
||||
"tonic",
|
||||
"tonic-prost",
|
||||
"tonic-prost-build",
|
||||
@@ -9718,20 +9702,6 @@ dependencies = [
|
||||
"syn 2.0.113",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sysctl"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cca424247104946a59dacd27eaad296223b7feec3d168a6dd04585183091eb0b"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"byteorder",
|
||||
"enum-as-inner",
|
||||
"libc",
|
||||
"thiserror 2.0.17",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sysinfo"
|
||||
version = "0.37.2"
|
||||
|
||||
@@ -236,7 +236,6 @@ snafu = "0.8.9"
|
||||
snap = "1.1.1"
|
||||
starshard = { version = "0.6.0", features = ["rayon", "async", "serde"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysctl = "0.7.1"
|
||||
sysinfo = "0.37.2"
|
||||
temp-env = "0.3.6"
|
||||
tempfile = "3.24.0"
|
||||
|
||||
@@ -37,6 +37,8 @@ datas = "datas"
|
||||
bre = "bre"
|
||||
abd = "abd"
|
||||
mak = "mak"
|
||||
# s3-tests original test names (cannot be changed)
|
||||
nonexisted = "nonexisted"
|
||||
|
||||
[files]
|
||||
extend-exclude = []
|
||||
extend-exclude = []
|
||||
|
||||
@@ -170,12 +170,6 @@ pub const KI_B: usize = 1024;
|
||||
/// Default value: 1048576
|
||||
pub const MI_B: usize = 1024 * 1024;
|
||||
|
||||
/// Environment variable for gRPC authentication token
|
||||
/// Used to set the authentication token for gRPC communication
|
||||
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -27,11 +27,11 @@ pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
/// Example: --secret-key rustfsadmin
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Environment variable for gRPC authentication token
|
||||
/// Used to set the authentication token for gRPC communication
|
||||
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
|
||||
/// Environment variable for RPC authentication token
|
||||
/// Used to set the authentication token for RPC communication
|
||||
/// Example: RUSTFS_RPC_SECRET=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
|
||||
pub const ENV_RPC_SECRET: &str = "RUSTFS_RPC_SECRET";
|
||||
|
||||
/// IAM Policy Types
|
||||
/// Used to differentiate between embedded and inherited policies
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{DEFAULT_SECRET_KEY, ENV_GRPC_AUTH_TOKEN, IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use crate::{DEFAULT_SECRET_KEY, ENV_RPC_SECRET, IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use rand::{Rng, RngCore};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
@@ -25,8 +25,8 @@ use time::OffsetDateTime;
|
||||
/// Global active credentials
|
||||
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
|
||||
|
||||
/// Global gRPC authentication token
|
||||
static GLOBAL_GRPC_AUTH_TOKEN: OnceLock<String> = OnceLock::new();
|
||||
/// Global RPC authentication token
|
||||
pub static GLOBAL_RUSTFS_RPC_SECRET: OnceLock<String> = OnceLock::new();
|
||||
|
||||
/// Initialize the global action credentials
|
||||
///
|
||||
@@ -181,15 +181,15 @@ pub fn gen_secret_key(length: usize) -> std::io::Result<String> {
|
||||
Ok(key_str)
|
||||
}
|
||||
|
||||
/// Get the gRPC authentication token from environment variable
|
||||
/// Get the RPC authentication token from environment variable
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The gRPC authentication token
|
||||
/// * `String` - The RPC authentication token
|
||||
///
|
||||
pub fn get_grpc_token() -> String {
|
||||
GLOBAL_GRPC_AUTH_TOKEN
|
||||
pub fn get_rpc_token() -> String {
|
||||
GLOBAL_RUSTFS_RPC_SECRET
|
||||
.get_or_init(|| {
|
||||
env::var(ENV_GRPC_AUTH_TOKEN)
|
||||
env::var(ENV_RPC_SECRET)
|
||||
.unwrap_or_else(|_| get_global_secret_key_opt().unwrap_or_else(|| DEFAULT_SECRET_KEY.to_string()))
|
||||
})
|
||||
.clone()
|
||||
|
||||
@@ -15,11 +15,12 @@
|
||||
|
||||
use async_trait::async_trait;
|
||||
use rustfs_ecstore::disk::endpoint::Endpoint;
|
||||
use rustfs_lock::client::{LockClient, local::LocalClient, remote::RemoteClient};
|
||||
use rustfs_ecstore::rpc::RemoteClient;
|
||||
use rustfs_lock::client::{LockClient, local::LocalClient};
|
||||
use rustfs_lock::types::{LockInfo, LockResponse, LockStats};
|
||||
use rustfs_lock::{LockId, LockMetadata, LockPriority, LockType};
|
||||
use rustfs_lock::{LockRequest, NamespaceLock, NamespaceLockManager};
|
||||
use rustfs_protos::{node_service_time_out_client, proto_gen::node_service::GenerallyLockRequest};
|
||||
use rustfs_protos::proto_gen::node_service::GenerallyLockRequest;
|
||||
use serial_test::serial;
|
||||
use std::{collections::HashMap, error::Error, sync::Arc, time::Duration};
|
||||
use tokio::time::sleep;
|
||||
@@ -156,7 +157,7 @@ async fn test_lock_unlock_rpc() -> Result<(), Box<dyn Error>> {
|
||||
};
|
||||
let args = serde_json::to_string(&args)?;
|
||||
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client = RemoteClient::new(CLUSTER_ADDR.to_string()).get_client().await?;
|
||||
println!("got client");
|
||||
let request = Request::new(GenerallyLockRequest { args: args.clone() });
|
||||
|
||||
@@ -614,7 +615,7 @@ async fn test_rpc_read_lock() -> Result<(), Box<dyn Error>> {
|
||||
};
|
||||
let args_str = serde_json::to_string(&args)?;
|
||||
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client = RemoteClient::new(CLUSTER_ADDR.to_string()).get_client().await?;
|
||||
|
||||
// First read lock
|
||||
let request = Request::new(GenerallyLockRequest { args: args_str.clone() });
|
||||
@@ -669,7 +670,7 @@ async fn test_lock_refresh() -> Result<(), Box<dyn Error>> {
|
||||
};
|
||||
let args_str = serde_json::to_string(&args)?;
|
||||
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client = RemoteClient::new(CLUSTER_ADDR.to_string()).get_client().await?;
|
||||
|
||||
// Acquire lock
|
||||
let request = Request::new(GenerallyLockRequest { args: args_str.clone() });
|
||||
@@ -713,7 +714,7 @@ async fn test_force_unlock() -> Result<(), Box<dyn Error>> {
|
||||
};
|
||||
let args_str = serde_json::to_string(&args)?;
|
||||
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client = RemoteClient::new(CLUSTER_ADDR.to_string()).get_client().await?;
|
||||
|
||||
// Acquire lock
|
||||
let request = Request::new(GenerallyLockRequest { args: args_str.clone() });
|
||||
|
||||
@@ -17,11 +17,11 @@ use crate::common::workspace_root;
|
||||
use futures::future::join_all;
|
||||
use rmp_serde::{Deserializer, Serializer};
|
||||
use rustfs_ecstore::disk::{VolumeInfo, WalkDirOptions};
|
||||
use rustfs_ecstore::rpc::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use rustfs_filemeta::{MetaCacheEntry, MetacacheReader, MetacacheWriter};
|
||||
use rustfs_protos::proto_gen::node_service::WalkDirRequest;
|
||||
use rustfs_protos::{
|
||||
models::{PingBody, PingBodyBuilder},
|
||||
node_service_time_out_client,
|
||||
proto_gen::node_service::{
|
||||
ListVolumesRequest, LocalStorageInfoRequest, MakeVolumeRequest, PingRequest, PingResponse, ReadAllRequest,
|
||||
},
|
||||
@@ -53,7 +53,9 @@ async fn ping() -> Result<(), Box<dyn Error>> {
|
||||
assert!(decoded_payload.is_ok());
|
||||
|
||||
// Create client
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client =
|
||||
node_service_time_out_client(&CLUSTER_ADDR.to_string(), TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await?;
|
||||
|
||||
// Construct PingRequest
|
||||
let request = Request::new(PingRequest {
|
||||
@@ -78,7 +80,9 @@ async fn ping() -> Result<(), Box<dyn Error>> {
|
||||
#[tokio::test]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn make_volume() -> Result<(), Box<dyn Error>> {
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client =
|
||||
node_service_time_out_client(&CLUSTER_ADDR.to_string(), TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await?;
|
||||
let request = Request::new(MakeVolumeRequest {
|
||||
disk: "data".to_string(),
|
||||
volume: "dandan".to_string(),
|
||||
@@ -96,7 +100,9 @@ async fn make_volume() -> Result<(), Box<dyn Error>> {
|
||||
#[tokio::test]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn list_volumes() -> Result<(), Box<dyn Error>> {
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client =
|
||||
node_service_time_out_client(&CLUSTER_ADDR.to_string(), TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await?;
|
||||
let request = Request::new(ListVolumesRequest {
|
||||
disk: "data".to_string(),
|
||||
});
|
||||
@@ -126,7 +132,9 @@ async fn walk_dir() -> Result<(), Box<dyn Error>> {
|
||||
let (rd, mut wr) = tokio::io::duplex(1024);
|
||||
let mut buf = Vec::new();
|
||||
opts.serialize(&mut Serializer::new(&mut buf))?;
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client =
|
||||
node_service_time_out_client(&CLUSTER_ADDR.to_string(), TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await?;
|
||||
let disk_path = std::env::var_os("RUSTFS_DISK_PATH").map(PathBuf::from).unwrap_or_else(|| {
|
||||
let mut path = workspace_root();
|
||||
path.push("target");
|
||||
@@ -179,7 +187,9 @@ async fn walk_dir() -> Result<(), Box<dyn Error>> {
|
||||
#[tokio::test]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn read_all() -> Result<(), Box<dyn Error>> {
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client =
|
||||
node_service_time_out_client(&CLUSTER_ADDR.to_string(), TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await?;
|
||||
let request = Request::new(ReadAllRequest {
|
||||
disk: "data".to_string(),
|
||||
volume: "ff".to_string(),
|
||||
@@ -197,7 +207,9 @@ async fn read_all() -> Result<(), Box<dyn Error>> {
|
||||
#[tokio::test]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn storage_info() -> Result<(), Box<dyn Error>> {
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client =
|
||||
node_service_time_out_client(&CLUSTER_ADDR.to_string(), TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await?;
|
||||
let request = Request::new(LocalStorageInfoRequest { metrics: true });
|
||||
|
||||
let response = client.local_storage_info(request).await?.into_inner();
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use crate::data_usage::{DATA_USAGE_CACHE_NAME, DATA_USAGE_ROOT, load_data_usage_from_backend};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::rpc::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use crate::{
|
||||
disk::endpoint::Endpoint,
|
||||
global::{GLOBAL_BOOT_TIME, GLOBAL_Endpoints},
|
||||
@@ -29,7 +30,6 @@ use rustfs_madmin::{
|
||||
};
|
||||
use rustfs_protos::{
|
||||
models::{PingBody, PingBodyBuilder},
|
||||
node_service_time_out_client,
|
||||
proto_gen::node_service::{PingRequest, PingResponse},
|
||||
};
|
||||
use std::{
|
||||
@@ -101,9 +101,9 @@ async fn is_server_resolvable(endpoint: &Endpoint) -> Result<()> {
|
||||
let decoded_payload = flatbuffers::root::<PingBody>(finished_data);
|
||||
assert!(decoded_payload.is_ok());
|
||||
|
||||
let mut client = node_service_time_out_client(&addr)
|
||||
let mut client = node_service_time_out_client(&addr, TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
|
||||
let request = Request::new(PingRequest {
|
||||
version: 1,
|
||||
|
||||
@@ -386,7 +386,7 @@ impl LocalDiskWrapper {
|
||||
let stored_disk_id = self.disk.get_disk_id().await?;
|
||||
|
||||
if stored_disk_id != want_id {
|
||||
return Err(Error::other(format!("Disk ID mismatch wanted {:?}, got {:?}", want_id, stored_disk_id)));
|
||||
return Err(Error::other(format!("Disk ID mismatch wanted {want_id:?}, got {stored_disk_id:?}")));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -470,7 +470,7 @@ impl LocalDiskWrapper {
|
||||
// Timeout occurred, mark disk as potentially faulty and decrement waiting counter
|
||||
self.health.decrement_waiting();
|
||||
warn!("disk operation timeout after {:?}", timeout_duration);
|
||||
Err(DiskError::other(format!("disk operation timeout after {:?}", timeout_duration)))
|
||||
Err(DiskError::other(format!("disk operation timeout after {timeout_duration:?}")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
88
crates/ecstore/src/rpc/client.rs
Normal file
88
crates/ecstore/src/rpc/client.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::error::Error;
|
||||
|
||||
use http::Method;
|
||||
use rustfs_common::GLOBAL_CONN_MAP;
|
||||
use rustfs_protos::{create_new_channel, proto_gen::node_service::node_service_client::NodeServiceClient};
|
||||
use tonic::{service::interceptor::InterceptedService, transport::Channel};
|
||||
use tracing::debug;
|
||||
|
||||
use crate::rpc::{TONIC_RPC_PREFIX, gen_signature_headers};
|
||||
|
||||
/// 3. Subsequent calls will attempt fresh connections
|
||||
/// 4. If node is still down, connection will fail fast (3s timeout)
|
||||
pub async fn node_service_time_out_client(
|
||||
addr: &String,
|
||||
interceptor: TonicInterceptor,
|
||||
) -> Result<NodeServiceClient<InterceptedService<Channel, TonicInterceptor>>, Box<dyn Error>> {
|
||||
// Try to get cached channel
|
||||
let cached_channel = { GLOBAL_CONN_MAP.read().await.get(addr).cloned() };
|
||||
|
||||
let channel = match cached_channel {
|
||||
Some(channel) => {
|
||||
debug!("Using cached gRPC channel for: {}", addr);
|
||||
channel
|
||||
}
|
||||
None => {
|
||||
// No cached connection, create new one
|
||||
create_new_channel(addr).await?
|
||||
}
|
||||
};
|
||||
|
||||
Ok(NodeServiceClient::with_interceptor(channel, interceptor))
|
||||
}
|
||||
|
||||
pub async fn node_service_time_out_client_no_auth(
|
||||
addr: &String,
|
||||
) -> Result<NodeServiceClient<InterceptedService<Channel, TonicInterceptor>>, Box<dyn Error>> {
|
||||
node_service_time_out_client(addr, TonicInterceptor::NoOp(NoOpInterceptor)).await
|
||||
}
|
||||
|
||||
pub struct TonicSignatureInterceptor;
|
||||
|
||||
impl tonic::service::Interceptor for TonicSignatureInterceptor {
|
||||
fn call(&mut self, mut req: tonic::Request<()>) -> Result<tonic::Request<()>, tonic::Status> {
|
||||
let headers = gen_signature_headers(TONIC_RPC_PREFIX, &Method::GET);
|
||||
req.metadata_mut().as_mut().extend(headers);
|
||||
Ok(req)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gen_tonic_signature_interceptor() -> TonicSignatureInterceptor {
|
||||
TonicSignatureInterceptor
|
||||
}
|
||||
|
||||
pub struct NoOpInterceptor;
|
||||
|
||||
impl tonic::service::Interceptor for NoOpInterceptor {
|
||||
fn call(&mut self, req: tonic::Request<()>) -> Result<tonic::Request<()>, tonic::Status> {
|
||||
Ok(req)
|
||||
}
|
||||
}
|
||||
|
||||
pub enum TonicInterceptor {
|
||||
Signature(TonicSignatureInterceptor),
|
||||
NoOp(NoOpInterceptor),
|
||||
}
|
||||
|
||||
impl tonic::service::Interceptor for TonicInterceptor {
|
||||
fn call(&mut self, req: tonic::Request<()>) -> Result<tonic::Request<()>, tonic::Status> {
|
||||
match self {
|
||||
TonicInterceptor::Signature(interceptor) => interceptor.call(req),
|
||||
TonicInterceptor::NoOp(interceptor) => interceptor.call(req),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,11 +15,8 @@
|
||||
use base64::Engine as _;
|
||||
use base64::engine::general_purpose;
|
||||
use hmac::{Hmac, KeyInit, Mac};
|
||||
use http::HeaderMap;
|
||||
use http::HeaderValue;
|
||||
use http::Method;
|
||||
use http::Uri;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use http::{HeaderMap, HeaderValue, Method, Uri};
|
||||
use rustfs_credentials::{DEFAULT_SECRET_KEY, ENV_RPC_SECRET, get_global_secret_key_opt};
|
||||
use sha2::Sha256;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::error;
|
||||
@@ -29,15 +26,20 @@ type HmacSha256 = Hmac<Sha256>;
|
||||
const SIGNATURE_HEADER: &str = "x-rustfs-signature";
|
||||
const TIMESTAMP_HEADER: &str = "x-rustfs-timestamp";
|
||||
const SIGNATURE_VALID_DURATION: i64 = 300; // 5 minutes
|
||||
pub const TONIC_RPC_PREFIX: &str = "/node_service.NodeService";
|
||||
|
||||
/// Get the shared secret for HMAC signing
|
||||
fn get_shared_secret() -> String {
|
||||
if let Some(cred) = get_global_action_cred() {
|
||||
cred.secret_key
|
||||
} else {
|
||||
// Fallback to environment variable if global credentials are not available
|
||||
std::env::var("RUSTFS_RPC_SECRET").unwrap_or_else(|_| "rustfs-default-secret".to_string())
|
||||
}
|
||||
rustfs_credentials::GLOBAL_RUSTFS_RPC_SECRET
|
||||
.get_or_init(|| {
|
||||
rustfs_utils::get_env_str(
|
||||
ENV_RPC_SECRET,
|
||||
get_global_secret_key_opt()
|
||||
.unwrap_or_else(|| DEFAULT_SECRET_KEY.to_string())
|
||||
.as_str(),
|
||||
)
|
||||
})
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// Generate HMAC-SHA256 signature for the given data
|
||||
@@ -57,13 +59,25 @@ fn generate_signature(secret: &str, url: &str, method: &Method, timestamp: i64)
|
||||
|
||||
/// Build headers with authentication signature
|
||||
pub fn build_auth_headers(url: &str, method: &Method, headers: &mut HeaderMap) {
|
||||
let auth_headers = gen_signature_headers(url, method);
|
||||
|
||||
headers.extend(auth_headers);
|
||||
}
|
||||
|
||||
pub fn gen_signature_headers(url: &str, method: &Method) -> HeaderMap {
|
||||
let secret = get_shared_secret();
|
||||
let timestamp = OffsetDateTime::now_utc().unix_timestamp();
|
||||
|
||||
let signature = generate_signature(&secret, url, method, timestamp);
|
||||
|
||||
headers.insert(SIGNATURE_HEADER, HeaderValue::from_str(&signature).unwrap());
|
||||
headers.insert(TIMESTAMP_HEADER, HeaderValue::from_str(×tamp.to_string()).unwrap());
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(SIGNATURE_HEADER, HeaderValue::from_str(&signature).expect("Invalid header value"));
|
||||
headers.insert(
|
||||
TIMESTAMP_HEADER,
|
||||
HeaderValue::from_str(×tamp.to_string()).expect("Invalid header value"),
|
||||
);
|
||||
|
||||
headers
|
||||
}
|
||||
|
||||
/// Verify the request signature for RPC requests
|
||||
|
||||
@@ -12,12 +12,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod client;
|
||||
mod http_auth;
|
||||
mod peer_rest_client;
|
||||
mod peer_s3_client;
|
||||
mod remote_disk;
|
||||
mod remote_locker;
|
||||
|
||||
pub use http_auth::{build_auth_headers, verify_rpc_signature};
|
||||
pub use client::{
|
||||
TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client, node_service_time_out_client_no_auth,
|
||||
};
|
||||
pub use http_auth::{TONIC_RPC_PREFIX, build_auth_headers, gen_signature_headers, verify_rpc_signature};
|
||||
pub use peer_rest_client::PeerRestClient;
|
||||
pub use peer_s3_client::{LocalPeerS3Client, PeerS3Client, RemotePeerS3Client, S3PeerSys};
|
||||
pub use remote_disk::RemoteDisk;
|
||||
pub use remote_locker::RemoteClient;
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use crate::{
|
||||
endpoints::EndpointServerPools,
|
||||
global::is_dist_erasure,
|
||||
@@ -25,21 +26,22 @@ use rustfs_madmin::{
|
||||
metrics::RealtimeMetrics,
|
||||
net::NetInfo,
|
||||
};
|
||||
use rustfs_protos::{
|
||||
evict_failed_connection, node_service_time_out_client,
|
||||
proto_gen::node_service::{
|
||||
DeleteBucketMetadataRequest, DeletePolicyRequest, DeleteServiceAccountRequest, DeleteUserRequest, GetCpusRequest,
|
||||
GetMemInfoRequest, GetMetricsRequest, GetNetInfoRequest, GetOsInfoRequest, GetPartitionsRequest, GetProcInfoRequest,
|
||||
GetSeLinuxInfoRequest, GetSysConfigRequest, GetSysErrorsRequest, LoadBucketMetadataRequest, LoadGroupRequest,
|
||||
LoadPolicyMappingRequest, LoadPolicyRequest, LoadRebalanceMetaRequest, LoadServiceAccountRequest,
|
||||
LoadTransitionTierConfigRequest, LoadUserRequest, LocalStorageInfoRequest, Mss, ReloadPoolMetaRequest,
|
||||
ReloadSiteReplicationConfigRequest, ServerInfoRequest, SignalServiceRequest, StartProfilingRequest, StopRebalanceRequest,
|
||||
},
|
||||
use rustfs_protos::evict_failed_connection;
|
||||
use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
DeleteBucketMetadataRequest, DeletePolicyRequest, DeleteServiceAccountRequest, DeleteUserRequest, GetCpusRequest,
|
||||
GetMemInfoRequest, GetMetricsRequest, GetNetInfoRequest, GetOsInfoRequest, GetPartitionsRequest, GetProcInfoRequest,
|
||||
GetSeLinuxInfoRequest, GetSysConfigRequest, GetSysErrorsRequest, LoadBucketMetadataRequest, LoadGroupRequest,
|
||||
LoadPolicyMappingRequest, LoadPolicyRequest, LoadRebalanceMetaRequest, LoadServiceAccountRequest,
|
||||
LoadTransitionTierConfigRequest, LoadUserRequest, LocalStorageInfoRequest, Mss, ReloadPoolMetaRequest,
|
||||
ReloadSiteReplicationConfigRequest, ServerInfoRequest, SignalServiceRequest, StartProfilingRequest, StopRebalanceRequest,
|
||||
};
|
||||
use rustfs_utils::XHost;
|
||||
use serde::{Deserialize, Serialize as _};
|
||||
use std::{collections::HashMap, io::Cursor, time::SystemTime};
|
||||
use tonic::Request;
|
||||
use tonic::service::interceptor::InterceptedService;
|
||||
use tonic::transport::Channel;
|
||||
use tracing::warn;
|
||||
|
||||
pub const PEER_RESTSIGNAL: &str = "signal";
|
||||
@@ -83,6 +85,12 @@ impl PeerRestClient {
|
||||
(remote, all)
|
||||
}
|
||||
|
||||
pub async fn get_client(&self) -> Result<NodeServiceClient<InterceptedService<Channel, TonicInterceptor>>> {
|
||||
node_service_time_out_client(&self.grid_host, TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))
|
||||
}
|
||||
|
||||
/// Evict the connection to this peer from the global cache.
|
||||
/// This should be called when communication with this peer fails.
|
||||
pub async fn evict_connection(&self) {
|
||||
@@ -101,9 +109,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
async fn local_storage_info_inner(&self) -> Result<rustfs_madmin::StorageInfo> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LocalStorageInfoRequest { metrics: true });
|
||||
|
||||
let response = client.local_storage_info(request).await?.into_inner();
|
||||
@@ -131,9 +137,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
async fn server_info_inner(&self) -> Result<ServerProperties> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(ServerInfoRequest { metrics: true });
|
||||
|
||||
let response = client.server_info(request).await?.into_inner();
|
||||
@@ -152,9 +156,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_cpus(&self) -> Result<Cpus> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetCpusRequest {});
|
||||
|
||||
let response = client.get_cpus(request).await?.into_inner();
|
||||
@@ -173,9 +175,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_net_info(&self) -> Result<NetInfo> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetNetInfoRequest {});
|
||||
|
||||
let response = client.get_net_info(request).await?.into_inner();
|
||||
@@ -194,9 +194,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_partitions(&self) -> Result<Partitions> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetPartitionsRequest {});
|
||||
|
||||
let response = client.get_partitions(request).await?.into_inner();
|
||||
@@ -215,9 +213,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_os_info(&self) -> Result<OsInfo> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetOsInfoRequest {});
|
||||
|
||||
let response = client.get_os_info(request).await?.into_inner();
|
||||
@@ -236,9 +232,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_se_linux_info(&self) -> Result<SysService> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetSeLinuxInfoRequest {});
|
||||
|
||||
let response = client.get_se_linux_info(request).await?.into_inner();
|
||||
@@ -257,9 +251,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_sys_config(&self) -> Result<SysConfig> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetSysConfigRequest {});
|
||||
|
||||
let response = client.get_sys_config(request).await?.into_inner();
|
||||
@@ -278,9 +270,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_sys_errors(&self) -> Result<SysErrors> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetSysErrorsRequest {});
|
||||
|
||||
let response = client.get_sys_errors(request).await?.into_inner();
|
||||
@@ -299,9 +289,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_mem_info(&self) -> Result<MemInfo> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetMemInfoRequest {});
|
||||
|
||||
let response = client.get_mem_info(request).await?.into_inner();
|
||||
@@ -320,9 +308,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_metrics(&self, t: MetricType, opts: &CollectMetricsOpts) -> Result<RealtimeMetrics> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let mut buf_t = Vec::new();
|
||||
t.serialize(&mut Serializer::new(&mut buf_t))?;
|
||||
let mut buf_o = Vec::new();
|
||||
@@ -348,9 +334,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_proc_info(&self) -> Result<ProcInfo> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetProcInfoRequest {});
|
||||
|
||||
let response = client.get_proc_info(request).await?.into_inner();
|
||||
@@ -369,9 +353,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn start_profiling(&self, profiler: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(StartProfilingRequest {
|
||||
profiler: profiler.to_string(),
|
||||
});
|
||||
@@ -403,9 +385,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_bucket_metadata(&self, bucket: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadBucketMetadataRequest {
|
||||
bucket: bucket.to_string(),
|
||||
});
|
||||
@@ -421,9 +401,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn delete_bucket_metadata(&self, bucket: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(DeleteBucketMetadataRequest {
|
||||
bucket: bucket.to_string(),
|
||||
});
|
||||
@@ -439,9 +417,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn delete_policy(&self, policy: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(DeletePolicyRequest {
|
||||
policy_name: policy.to_string(),
|
||||
});
|
||||
@@ -457,9 +433,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_policy(&self, policy: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadPolicyRequest {
|
||||
policy_name: policy.to_string(),
|
||||
});
|
||||
@@ -475,9 +449,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_policy_mapping(&self, user_or_group: &str, user_type: u64, is_group: bool) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadPolicyMappingRequest {
|
||||
user_or_group: user_or_group.to_string(),
|
||||
user_type,
|
||||
@@ -495,9 +467,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn delete_user(&self, access_key: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(DeleteUserRequest {
|
||||
access_key: access_key.to_string(),
|
||||
});
|
||||
@@ -517,9 +487,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn delete_service_account(&self, access_key: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(DeleteServiceAccountRequest {
|
||||
access_key: access_key.to_string(),
|
||||
});
|
||||
@@ -539,9 +507,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_user(&self, access_key: &str, temp: bool) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadUserRequest {
|
||||
access_key: access_key.to_string(),
|
||||
temp,
|
||||
@@ -562,9 +528,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_service_account(&self, access_key: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadServiceAccountRequest {
|
||||
access_key: access_key.to_string(),
|
||||
});
|
||||
@@ -584,9 +548,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_group(&self, group: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadGroupRequest {
|
||||
group: group.to_string(),
|
||||
});
|
||||
@@ -606,9 +568,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn reload_site_replication_config(&self) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(ReloadSiteReplicationConfigRequest {});
|
||||
|
||||
let response = client.reload_site_replication_config(request).await?.into_inner();
|
||||
@@ -622,9 +582,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn signal_service(&self, sig: u64, sub_sys: &str, dry_run: bool, _exec_at: SystemTime) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let mut vars = HashMap::new();
|
||||
vars.insert(PEER_RESTSIGNAL.to_string(), sig.to_string());
|
||||
vars.insert(PEER_RESTSUB_SYS.to_string(), sub_sys.to_string());
|
||||
@@ -644,23 +602,17 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_metacache_listing(&self) -> Result<()> {
|
||||
let _client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let _client = self.get_client().await?;
|
||||
todo!()
|
||||
}
|
||||
|
||||
pub async fn update_metacache_listing(&self) -> Result<()> {
|
||||
let _client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let _client = self.get_client().await?;
|
||||
todo!()
|
||||
}
|
||||
|
||||
pub async fn reload_pool_meta(&self) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(ReloadPoolMetaRequest {});
|
||||
|
||||
let response = client.reload_pool_meta(request).await?.into_inner();
|
||||
@@ -675,9 +627,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn stop_rebalance(&self) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(StopRebalanceRequest {});
|
||||
|
||||
let response = client.stop_rebalance(request).await?.into_inner();
|
||||
@@ -692,9 +642,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_rebalance_meta(&self, start_rebalance: bool) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadRebalanceMetaRequest { start_rebalance });
|
||||
|
||||
let response = client.load_rebalance_meta(request).await?.into_inner();
|
||||
@@ -711,9 +659,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_transition_tier_config(&self) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadTransitionTierConfigRequest {});
|
||||
|
||||
let response = client.load_transition_tier_config(request).await?.into_inner();
|
||||
|
||||
@@ -18,6 +18,7 @@ use crate::disk::error::{Error, Result};
|
||||
use crate::disk::error_reduce::{BUCKET_OP_IGNORED_ERRS, is_all_buckets_not_found, reduce_write_quorum_errs};
|
||||
use crate::disk::{DiskAPI, DiskStore, disk_store::get_max_timeout_duration};
|
||||
use crate::global::GLOBAL_LOCAL_DISK_MAP;
|
||||
use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use crate::store::all_local_disk;
|
||||
use crate::store_utils::is_reserved_or_invalid_bucket;
|
||||
use crate::{
|
||||
@@ -32,7 +33,7 @@ use async_trait::async_trait;
|
||||
use futures::future::join_all;
|
||||
use rustfs_common::heal_channel::{DriveState, HealItemType, HealOpts, RUSTFS_RESERVED_BUCKET};
|
||||
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use rustfs_protos::node_service_time_out_client;
|
||||
use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
DeleteBucketRequest, GetBucketInfoRequest, HealBucketRequest, ListBucketRequest, MakeBucketRequest,
|
||||
};
|
||||
@@ -40,6 +41,8 @@ use std::{collections::HashMap, fmt::Debug, sync::Arc, time::Duration};
|
||||
use tokio::{net::TcpStream, sync::RwLock, time};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tonic::Request;
|
||||
use tonic::service::interceptor::InterceptedService;
|
||||
use tonic::transport::Channel;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
type Client = Arc<Box<dyn PeerS3Client>>;
|
||||
@@ -587,6 +590,12 @@ impl RemotePeerS3Client {
|
||||
client
|
||||
}
|
||||
|
||||
pub async fn get_client(&self) -> Result<NodeServiceClient<InterceptedService<Channel, TonicInterceptor>>> {
|
||||
node_service_time_out_client(&self.addr, TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))
|
||||
}
|
||||
|
||||
pub fn get_addr(&self) -> String {
|
||||
self.addr.clone()
|
||||
}
|
||||
@@ -664,7 +673,7 @@ impl RemotePeerS3Client {
|
||||
async fn perform_connectivity_check(addr: &str) -> Result<()> {
|
||||
use tokio::time::timeout;
|
||||
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {}", e)))?;
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {e}")))?;
|
||||
|
||||
let Some(host) = url.host_str() else {
|
||||
return Err(Error::other("No host in URL".to_string()));
|
||||
@@ -675,7 +684,7 @@ impl RemotePeerS3Client {
|
||||
// Try to establish TCP connection
|
||||
match timeout(CHECK_TIMEOUT_DURATION, TcpStream::connect((host, port))).await {
|
||||
Ok(Ok(_)) => Ok(()),
|
||||
_ => Err(Error::other(format!("Cannot connect to {}:{}", host, port))),
|
||||
_ => Err(Error::other(format!("Cannot connect to {host}:{port}"))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -714,7 +723,7 @@ impl RemotePeerS3Client {
|
||||
// Timeout occurred, mark peer as potentially faulty
|
||||
self.health.decrement_waiting();
|
||||
warn!("Remote peer operation timeout after {:?}", timeout_duration);
|
||||
Err(Error::other(format!("Remote peer operation timeout after {:?}", timeout_duration)))
|
||||
Err(Error::other(format!("Remote peer operation timeout after {timeout_duration:?}")))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -730,9 +739,7 @@ impl PeerS3Client for RemotePeerS3Client {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options: String = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(HealBucketRequest {
|
||||
bucket: bucket.to_string(),
|
||||
options,
|
||||
@@ -762,9 +769,7 @@ impl PeerS3Client for RemotePeerS3Client {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(ListBucketRequest { options });
|
||||
let response = client.list_bucket(request).await?.into_inner();
|
||||
if !response.success {
|
||||
@@ -790,9 +795,7 @@ impl PeerS3Client for RemotePeerS3Client {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(MakeBucketRequest {
|
||||
name: bucket.to_string(),
|
||||
options,
|
||||
@@ -818,9 +821,7 @@ impl PeerS3Client for RemotePeerS3Client {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetBucketInfoRequest {
|
||||
bucket: bucket.to_string(),
|
||||
options,
|
||||
@@ -845,9 +846,7 @@ impl PeerS3Client for RemotePeerS3Client {
|
||||
async fn delete_bucket(&self, bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
|
||||
let request = Request::new(DeleteBucketRequest {
|
||||
bucket: bucket.to_string(),
|
||||
|
||||
@@ -12,37 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions,
|
||||
ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions,
|
||||
disk_store::{
|
||||
CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE, get_max_timeout_duration,
|
||||
},
|
||||
endpoint::Endpoint,
|
||||
local::ScanGuard,
|
||||
};
|
||||
use crate::disk::{FileReader, FileWriter};
|
||||
use crate::disk::{disk_store::DiskHealthTracker, error::DiskError};
|
||||
use crate::{
|
||||
disk::error::{Error, Result},
|
||||
rpc::build_auth_headers,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use futures::lock::Mutex;
|
||||
use http::{HeaderMap, HeaderValue, Method, header::CONTENT_TYPE};
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_protos::proto_gen::node_service::RenamePartRequest;
|
||||
use rustfs_protos::{
|
||||
node_service_time_out_client,
|
||||
proto_gen::node_service::{
|
||||
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
|
||||
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest,
|
||||
ReadMetadataRequest, ReadMultipleRequest, ReadPartsRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest,
|
||||
RenameFileRequest, StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
|
||||
},
|
||||
};
|
||||
use rustfs_rio::{HttpReader, HttpWriter};
|
||||
use rustfs_utils::string::parse_bool_with_default;
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
@@ -51,11 +20,48 @@ use std::{
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::lock::Mutex;
|
||||
use http::{HeaderMap, HeaderValue, Method, header::CONTENT_TYPE};
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
|
||||
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest,
|
||||
ReadMetadataRequest, ReadMultipleRequest, ReadPartsRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest,
|
||||
RenameFileRequest, StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
|
||||
node_service_client::NodeServiceClient,
|
||||
};
|
||||
use rustfs_utils::string::parse_bool_with_default;
|
||||
use tokio::time;
|
||||
use tokio::{io::AsyncWrite, net::TcpStream, time::timeout};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tonic::Request;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::disk::{disk_store::DiskHealthTracker, error::DiskError, local::ScanGuard};
|
||||
use crate::{
|
||||
disk::error::{Error, Result},
|
||||
rpc::build_auth_headers,
|
||||
};
|
||||
use crate::{
|
||||
disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions,
|
||||
ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions,
|
||||
disk_store::{
|
||||
CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE,
|
||||
get_max_timeout_duration,
|
||||
},
|
||||
endpoint::Endpoint,
|
||||
},
|
||||
rpc::client::gen_tonic_signature_interceptor,
|
||||
};
|
||||
use crate::{
|
||||
disk::{FileReader, FileWriter},
|
||||
rpc::client::{TonicInterceptor, node_service_time_out_client},
|
||||
};
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_protos::proto_gen::node_service::RenamePartRequest;
|
||||
use rustfs_rio::{HttpReader, HttpWriter};
|
||||
use tokio::{io::AsyncWrite, net::TcpStream, time::timeout};
|
||||
use tonic::{Request, service::interceptor::InterceptedService, transport::Channel};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -205,7 +211,7 @@ impl RemoteDisk {
|
||||
|
||||
/// Perform basic connectivity check for remote disk
|
||||
async fn perform_connectivity_check(addr: &str) -> Result<()> {
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {}", e)))?;
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {e}")))?;
|
||||
|
||||
let Some(host) = url.host_str() else {
|
||||
return Err(Error::other("No host in URL".to_string()));
|
||||
@@ -219,7 +225,7 @@ impl RemoteDisk {
|
||||
drop(stream);
|
||||
Ok(())
|
||||
}
|
||||
_ => Err(Error::other(format!("Cannot connect to {}:{}", host, port))),
|
||||
_ => Err(Error::other(format!("Cannot connect to {host}:{port}"))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,10 +265,16 @@ impl RemoteDisk {
|
||||
// Timeout occurred, mark disk as potentially faulty
|
||||
self.health.decrement_waiting();
|
||||
warn!("Remote disk operation timeout after {:?}", timeout_duration);
|
||||
Err(Error::other(format!("Remote disk operation timeout after {:?}", timeout_duration)))
|
||||
Err(Error::other(format!("Remote disk operation timeout after {timeout_duration:?}")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_client(&self) -> Result<NodeServiceClient<InterceptedService<Channel, TonicInterceptor>>> {
|
||||
node_service_time_out_client(&self.addr, TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: all api need to handle errors
|
||||
@@ -347,7 +359,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(MakeVolumeRequest {
|
||||
@@ -374,7 +387,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(MakeVolumesRequest {
|
||||
@@ -401,7 +415,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ListVolumesRequest {
|
||||
@@ -433,7 +448,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(StatVolumeRequest {
|
||||
@@ -462,7 +478,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(DeleteVolumeRequest {
|
||||
@@ -549,7 +566,8 @@ impl DiskAPI for RemoteDisk {
|
||||
let file_info = serde_json::to_string(&fi)?;
|
||||
let opts = serde_json::to_string(&opts)?;
|
||||
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(DeleteVersionRequest {
|
||||
@@ -607,7 +625,7 @@ impl DiskAPI for RemoteDisk {
|
||||
}
|
||||
});
|
||||
}
|
||||
let mut client = match node_service_time_out_client(&self.addr).await {
|
||||
let mut client = match self.get_client().await {
|
||||
Ok(client) => client,
|
||||
Err(err) => {
|
||||
let mut errors = Vec::with_capacity(versions.len());
|
||||
@@ -678,7 +696,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(DeletePathsRequest {
|
||||
@@ -707,7 +726,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(WriteMetadataRequest {
|
||||
@@ -731,7 +751,8 @@ impl DiskAPI for RemoteDisk {
|
||||
}
|
||||
|
||||
async fn read_metadata(&self, volume: &str, path: &str) -> Result<Bytes> {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadMetadataRequest {
|
||||
@@ -757,7 +778,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(UpdateMetadataRequest {
|
||||
@@ -795,7 +817,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadVersionRequest {
|
||||
@@ -827,7 +850,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadXlRequest {
|
||||
@@ -866,7 +890,8 @@ impl DiskAPI for RemoteDisk {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let file_info = serde_json::to_string(&fi)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(RenameDataRequest {
|
||||
@@ -901,7 +926,8 @@ impl DiskAPI for RemoteDisk {
|
||||
return Err(DiskError::FaultyDisk);
|
||||
}
|
||||
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ListDirRequest {
|
||||
@@ -1062,7 +1088,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(RenameFileRequest {
|
||||
@@ -1092,7 +1119,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(RenamePartRequest {
|
||||
@@ -1124,7 +1152,8 @@ impl DiskAPI for RemoteDisk {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options = serde_json::to_string(&opt)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(DeleteRequest {
|
||||
@@ -1154,7 +1183,8 @@ impl DiskAPI for RemoteDisk {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let file_info = serde_json::to_string(&fi)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(VerifyFileRequest {
|
||||
@@ -1183,7 +1213,8 @@ impl DiskAPI for RemoteDisk {
|
||||
async fn read_parts(&self, bucket: &str, paths: &[String]) -> Result<Vec<ObjectPartInfo>> {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadPartsRequest {
|
||||
@@ -1213,7 +1244,8 @@ impl DiskAPI for RemoteDisk {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let file_info = serde_json::to_string(&fi)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(CheckPartsRequest {
|
||||
@@ -1245,7 +1277,8 @@ impl DiskAPI for RemoteDisk {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let read_multiple_req = serde_json::to_string(&req)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadMultipleRequest {
|
||||
@@ -1278,7 +1311,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(WriteAllRequest {
|
||||
@@ -1307,7 +1341,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadAllRequest {
|
||||
@@ -1336,7 +1371,8 @@ impl DiskAPI for RemoteDisk {
|
||||
}
|
||||
|
||||
let opts = serde_json::to_string(&opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(DiskInfoRequest {
|
||||
|
||||
@@ -12,24 +12,21 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use async_trait::async_trait;
|
||||
use rustfs_protos::{
|
||||
node_service_time_out_client,
|
||||
proto_gen::node_service::{GenerallyLockRequest, PingRequest},
|
||||
};
|
||||
use rustfs_lock::types::{LockId, LockMetadata, LockPriority};
|
||||
use rustfs_lock::{LockClient, LockError, LockInfo, LockResponse, LockStats, LockStatus, Result};
|
||||
use rustfs_lock::{LockRequest, LockType};
|
||||
use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_protos::proto_gen::node_service::{GenerallyLockRequest, PingRequest};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::Request;
|
||||
use tonic::service::interceptor::InterceptedService;
|
||||
use tonic::transport::Channel;
|
||||
use tracing::info;
|
||||
|
||||
use crate::{
|
||||
error::{LockError, Result},
|
||||
types::{LockId, LockInfo, LockRequest, LockResponse, LockStats},
|
||||
};
|
||||
|
||||
use super::LockClient;
|
||||
|
||||
/// Remote lock client implementation
|
||||
#[derive(Debug)]
|
||||
pub struct RemoteClient {
|
||||
@@ -67,24 +64,28 @@ impl RemoteClient {
|
||||
LockRequest {
|
||||
lock_id: lock_id.clone(),
|
||||
resource: lock_id.resource.clone(),
|
||||
lock_type: crate::types::LockType::Exclusive, // Type doesn't matter for unlock
|
||||
lock_type: LockType::Exclusive, // Type doesn't matter for unlock
|
||||
owner: owner.to_string(),
|
||||
acquire_timeout: std::time::Duration::from_secs(30),
|
||||
ttl: std::time::Duration::from_secs(300),
|
||||
metadata: crate::types::LockMetadata::default(),
|
||||
priority: crate::types::LockPriority::Normal,
|
||||
metadata: LockMetadata::default(),
|
||||
priority: LockPriority::Normal,
|
||||
deadlock_detection: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_client(&self) -> Result<NodeServiceClient<InterceptedService<Channel, TonicInterceptor>>> {
|
||||
node_service_time_out_client(&self.addr, TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl LockClient for RemoteClient {
|
||||
async fn acquire_exclusive(&self, request: &LockRequest) -> Result<LockResponse> {
|
||||
info!("remote acquire_exclusive for {}", request.resource);
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
@@ -111,7 +112,7 @@ impl LockClient for RemoteClient {
|
||||
id: request.lock_id.clone(),
|
||||
resource: request.resource.clone(),
|
||||
lock_type: request.lock_type,
|
||||
status: crate::types::LockStatus::Acquired,
|
||||
status: LockStatus::Acquired,
|
||||
owner: request.owner.clone(),
|
||||
acquired_at: std::time::SystemTime::now(),
|
||||
expires_at: std::time::SystemTime::now() + request.ttl,
|
||||
@@ -133,9 +134,7 @@ impl LockClient for RemoteClient {
|
||||
|
||||
async fn acquire_shared(&self, request: &LockRequest) -> Result<LockResponse> {
|
||||
info!("remote acquire_shared for {}", request.resource);
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
@@ -162,7 +161,7 @@ impl LockClient for RemoteClient {
|
||||
id: request.lock_id.clone(),
|
||||
resource: request.resource.clone(),
|
||||
lock_type: request.lock_type,
|
||||
status: crate::types::LockStatus::Acquired,
|
||||
status: LockStatus::Acquired,
|
||||
owner: request.owner.clone(),
|
||||
acquired_at: std::time::SystemTime::now(),
|
||||
expires_at: std::time::SystemTime::now() + request.ttl,
|
||||
@@ -195,9 +194,7 @@ impl LockClient for RemoteClient {
|
||||
|
||||
let request_string = serde_json::to_string(&unlock_request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
|
||||
// Try UnLock first (for exclusive locks)
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
@@ -238,9 +235,7 @@ impl LockClient for RemoteClient {
|
||||
async fn refresh(&self, lock_id: &LockId) -> Result<bool> {
|
||||
info!("remote refresh for {}", lock_id);
|
||||
let refresh_request = self.create_unlock_request(lock_id, "remote");
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&refresh_request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
@@ -259,9 +254,7 @@ impl LockClient for RemoteClient {
|
||||
async fn force_release(&self, lock_id: &LockId) -> Result<bool> {
|
||||
info!("remote force_release for {}", lock_id);
|
||||
let force_request = self.create_unlock_request(lock_id, "remote");
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&force_request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
@@ -283,9 +276,7 @@ impl LockClient for RemoteClient {
|
||||
// Since there's no direct status query in the gRPC service,
|
||||
// we attempt a non-blocking lock acquisition to check if the resource is available
|
||||
let status_request = self.create_unlock_request(lock_id, "remote");
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
|
||||
// Try to acquire a very short-lived lock to test availability
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
@@ -316,14 +307,14 @@ impl LockClient for RemoteClient {
|
||||
Ok(Some(LockInfo {
|
||||
id: lock_id.clone(),
|
||||
resource: lock_id.as_str().to_string(),
|
||||
lock_type: crate::types::LockType::Exclusive, // We can't know the exact type
|
||||
status: crate::types::LockStatus::Acquired,
|
||||
lock_type: LockType::Exclusive, // We can't know the exact type
|
||||
status: LockStatus::Acquired,
|
||||
owner: "unknown".to_string(), // Remote client can't determine owner
|
||||
acquired_at: std::time::SystemTime::now(),
|
||||
expires_at: std::time::SystemTime::now() + std::time::Duration::from_secs(3600),
|
||||
last_refreshed: std::time::SystemTime::now(),
|
||||
metadata: crate::types::LockMetadata::default(),
|
||||
priority: crate::types::LockPriority::Normal,
|
||||
metadata: LockMetadata::default(),
|
||||
priority: LockPriority::Normal,
|
||||
wait_start_time: None,
|
||||
}))
|
||||
}
|
||||
@@ -333,14 +324,14 @@ impl LockClient for RemoteClient {
|
||||
Ok(Some(LockInfo {
|
||||
id: lock_id.clone(),
|
||||
resource: lock_id.as_str().to_string(),
|
||||
lock_type: crate::types::LockType::Exclusive,
|
||||
status: crate::types::LockStatus::Acquired,
|
||||
lock_type: LockType::Exclusive,
|
||||
status: LockStatus::Acquired,
|
||||
owner: "unknown".to_string(),
|
||||
acquired_at: std::time::SystemTime::now(),
|
||||
expires_at: std::time::SystemTime::now() + std::time::Duration::from_secs(3600),
|
||||
last_refreshed: std::time::SystemTime::now(),
|
||||
metadata: crate::types::LockMetadata::default(),
|
||||
priority: crate::types::LockPriority::Normal,
|
||||
metadata: LockMetadata::default(),
|
||||
priority: LockPriority::Normal,
|
||||
wait_start_time: None,
|
||||
}))
|
||||
}
|
||||
@@ -372,7 +363,7 @@ impl LockClient for RemoteClient {
|
||||
|
||||
async fn is_online(&self) -> bool {
|
||||
// Use Ping interface to test if remote service is online
|
||||
let mut client = match node_service_time_out_client(&self.addr).await {
|
||||
let mut client = match self.get_client().await {
|
||||
Ok(client) => client,
|
||||
Err(_) => {
|
||||
info!("remote client {} connection failed", self.addr);
|
||||
@@ -34,8 +34,8 @@ use crate::disk::endpoint::{Endpoint, EndpointType};
|
||||
use crate::disk::{DiskAPI, DiskInfo, DiskInfoOptions};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::error::{
|
||||
StorageError, is_err_bucket_exists, is_err_invalid_upload_id, is_err_object_not_found, is_err_read_quorum,
|
||||
is_err_version_not_found, to_object_err,
|
||||
StorageError, is_err_bucket_exists, is_err_bucket_not_found, is_err_invalid_upload_id, is_err_object_not_found,
|
||||
is_err_read_quorum, is_err_version_not_found, to_object_err,
|
||||
};
|
||||
use crate::global::{
|
||||
DISK_ASSUME_UNKNOWN_SIZE, DISK_FILL_FRACTION, DISK_MIN_INODES, DISK_RESERVE_FRACTION, GLOBAL_BOOT_TIME,
|
||||
@@ -87,6 +87,46 @@ use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Check if a directory contains any xl.meta files (indicating actual S3 objects)
|
||||
/// This is used to determine if a bucket is empty for deletion purposes.
|
||||
async fn has_xlmeta_files(path: &std::path::Path) -> bool {
|
||||
use crate::disk::STORAGE_FORMAT_FILE;
|
||||
use tokio::fs;
|
||||
|
||||
let mut stack = vec![path.to_path_buf()];
|
||||
|
||||
while let Some(current_path) = stack.pop() {
|
||||
let mut entries = match fs::read_dir(¤t_path).await {
|
||||
Ok(entries) => entries,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
while let Ok(Some(entry)) = entries.next_entry().await {
|
||||
let file_name = entry.file_name();
|
||||
let file_name_str = file_name.to_string_lossy();
|
||||
|
||||
// Skip hidden files/directories (like .rustfs.sys)
|
||||
if file_name_str.starts_with('.') {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this is an xl.meta file
|
||||
if file_name_str == STORAGE_FORMAT_FILE {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If it's a directory, add to stack for further exploration
|
||||
if let Ok(file_type) = entry.file_type().await
|
||||
&& file_type.is_dir()
|
||||
{
|
||||
stack.push(entry.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
const MAX_UPLOADS_LIST: usize = 10000;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -1328,14 +1368,36 @@ impl StorageAPI for ECStore {
|
||||
|
||||
// TODO: nslock
|
||||
|
||||
let mut opts = opts.clone();
|
||||
// Check bucket exists before deletion (per S3 API spec)
|
||||
// If bucket doesn't exist, return NoSuchBucket error
|
||||
if let Err(err) = self.peer_sys.get_bucket_info(bucket, &BucketOptions::default()).await {
|
||||
// Convert DiskError to StorageError for comparison
|
||||
let storage_err: StorageError = err.into();
|
||||
if is_err_bucket_not_found(&storage_err) {
|
||||
return Err(StorageError::BucketNotFound(bucket.to_string()));
|
||||
}
|
||||
return Err(to_object_err(storage_err, vec![bucket]));
|
||||
}
|
||||
|
||||
// Check bucket is empty before deletion (per S3 API spec)
|
||||
// If bucket is not empty (contains actual objects with xl.meta files) and force
|
||||
// is not set, return BucketNotEmpty error.
|
||||
// Note: Empty directories (left after object deletion) should NOT count as objects.
|
||||
if !opts.force {
|
||||
// FIXME: check bucket exists
|
||||
opts.force = true
|
||||
let local_disks = all_local_disk().await;
|
||||
for disk in local_disks.iter() {
|
||||
// Check if bucket directory contains any xl.meta files (actual objects)
|
||||
// We recursively scan for xl.meta files to determine if bucket has objects
|
||||
// Use the disk's root path to construct bucket path
|
||||
let bucket_path = disk.path().join(bucket);
|
||||
if has_xlmeta_files(&bucket_path).await {
|
||||
return Err(StorageError::BucketNotEmpty(bucket.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.peer_sys
|
||||
.delete_bucket(bucket, &opts)
|
||||
.delete_bucket(bucket, opts)
|
||||
.await
|
||||
.map_err(|e| to_object_err(e.into(), vec![bucket]))?;
|
||||
|
||||
|
||||
@@ -742,7 +742,21 @@ impl ObjectInfo {
|
||||
|
||||
let inlined = fi.inline_data();
|
||||
|
||||
// TODO:expires
|
||||
// Parse expires from metadata (HTTP date format RFC 7231 or ISO 8601)
|
||||
let expires = fi.metadata.get("expires").and_then(|s| {
|
||||
// Try parsing as ISO 8601 first
|
||||
time::OffsetDateTime::parse(s, &time::format_description::well_known::Iso8601::DEFAULT)
|
||||
.or_else(|_| {
|
||||
// Try RFC 2822 format
|
||||
time::OffsetDateTime::parse(s, &time::format_description::well_known::Rfc2822)
|
||||
})
|
||||
.or_else(|_| {
|
||||
// Try RFC 3339 format
|
||||
time::OffsetDateTime::parse(s, &time::format_description::well_known::Rfc3339)
|
||||
})
|
||||
.ok()
|
||||
});
|
||||
|
||||
// TODO:ReplicationState
|
||||
|
||||
let transitioned_object = TransitionedObject {
|
||||
@@ -800,6 +814,7 @@ impl ObjectInfo {
|
||||
user_tags,
|
||||
content_type,
|
||||
content_encoding,
|
||||
expires,
|
||||
num_versions: fi.num_versions,
|
||||
successor_mod_time: fi.successor_mod_time,
|
||||
etag,
|
||||
|
||||
@@ -30,15 +30,12 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
futures.workspace = true
|
||||
rustfs-protos.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
tracing.workspace = true
|
||||
url.workspace = true
|
||||
uuid.workspace = true
|
||||
thiserror.workspace = true
|
||||
parking_lot.workspace = true
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod local;
|
||||
pub mod remote;
|
||||
// pub mod remote;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use std::sync::Arc;
|
||||
@@ -74,10 +74,10 @@ impl ClientFactory {
|
||||
Arc::new(local::LocalClient::new())
|
||||
}
|
||||
|
||||
/// Create remote client
|
||||
pub fn create_remote(endpoint: String) -> Arc<dyn LockClient> {
|
||||
Arc::new(remote::RemoteClient::new(endpoint))
|
||||
}
|
||||
// /// Create remote client
|
||||
// pub fn create_remote(endpoint: String) -> Arc<dyn LockClient> {
|
||||
// Arc::new(remote::RemoteClient::new(endpoint))
|
||||
// }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -85,15 +85,6 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::types::LockType;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_client_factory() {
|
||||
let local_client = ClientFactory::create_local();
|
||||
assert!(local_client.is_local().await);
|
||||
|
||||
let remote_client = ClientFactory::create_remote("http://localhost:8080".to_string());
|
||||
assert!(!remote_client.is_local().await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_local_client_basic_operations() {
|
||||
let client = ClientFactory::create_local();
|
||||
|
||||
@@ -37,7 +37,7 @@ pub mod types;
|
||||
// Re-export main types for easy access
|
||||
pub use crate::{
|
||||
// Client interfaces
|
||||
client::{LockClient, local::LocalClient, remote::RemoteClient},
|
||||
client::{LockClient, local::LocalClient},
|
||||
// Error types
|
||||
error::{LockError, Result},
|
||||
// Fast Lock System exports
|
||||
|
||||
@@ -22,10 +22,42 @@ use strum::{EnumString, IntoStaticStr};
|
||||
|
||||
use super::{Error as IamError, Validator, utils::wildcard};
|
||||
|
||||
#[derive(Serialize, Clone, Default, Debug)]
|
||||
/// A set of policy actions that serializes as a single string when containing one item,
|
||||
/// or as an array when containing multiple items (matching AWS S3 API format).
|
||||
#[derive(Clone, Default, Debug)]
|
||||
pub struct ActionSet(pub HashSet<Action>);
|
||||
|
||||
impl Serialize for ActionSet {
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
use serde::ser::SerializeSeq;
|
||||
|
||||
if self.0.len() == 1 {
|
||||
// Serialize single action as string (not array)
|
||||
if let Some(action) = self.0.iter().next() {
|
||||
let action_str: &str = action.into();
|
||||
return serializer.serialize_str(action_str);
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize multiple actions as array
|
||||
let mut seq = serializer.serialize_seq(Some(self.0.len()))?;
|
||||
for action in &self.0 {
|
||||
let action_str: &str = action.into();
|
||||
seq.serialize_element(action_str)?;
|
||||
}
|
||||
seq.end()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActionSet {
|
||||
/// Returns true if the action set is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
|
||||
pub fn is_match(&self, action: &Action) -> bool {
|
||||
for act in self.0.iter() {
|
||||
if act.is_match(action) {
|
||||
@@ -150,6 +182,10 @@ impl Action {
|
||||
impl TryFrom<&str> for Action {
|
||||
type Error = Error;
|
||||
fn try_from(value: &str) -> std::result::Result<Self, Self::Error> {
|
||||
// Support wildcard "*" which matches all S3 actions (AWS S3 standard)
|
||||
if value == "*" {
|
||||
return Ok(Self::S3Action(S3Action::AllActions));
|
||||
}
|
||||
if value.starts_with(Self::S3_PREFIX) {
|
||||
Ok(Self::S3Action(
|
||||
S3Action::try_from(value).map_err(|_| IamError::InvalidAction(value.into()))?,
|
||||
@@ -559,3 +595,53 @@ pub enum KmsAction {
|
||||
#[strum(serialize = "kms:*")]
|
||||
AllActions,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashSet;
|
||||
|
||||
#[test]
|
||||
fn test_action_wildcard_parsing() {
|
||||
// Test that "*" parses to S3Action::AllActions
|
||||
let action = Action::try_from("*").expect("Should parse wildcard");
|
||||
assert!(matches!(action, Action::S3Action(S3Action::AllActions)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_actionset_serialize_single_element() {
|
||||
// Single element should serialize as string
|
||||
let mut set = HashSet::new();
|
||||
set.insert(Action::S3Action(S3Action::GetObjectAction));
|
||||
let actionset = ActionSet(set);
|
||||
|
||||
let json = serde_json::to_string(&actionset).expect("Should serialize");
|
||||
assert_eq!(json, "\"s3:GetObject\"");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_actionset_serialize_multiple_elements() {
|
||||
// Multiple elements should serialize as array
|
||||
let mut set = HashSet::new();
|
||||
set.insert(Action::S3Action(S3Action::GetObjectAction));
|
||||
set.insert(Action::S3Action(S3Action::PutObjectAction));
|
||||
let actionset = ActionSet(set);
|
||||
|
||||
let json = serde_json::to_string(&actionset).expect("Should serialize");
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
assert!(parsed.is_array());
|
||||
let arr = parsed.as_array().expect("Should be array");
|
||||
assert_eq!(arr.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_actionset_wildcard_serialization() {
|
||||
// Wildcard action should serialize correctly
|
||||
let mut set = HashSet::new();
|
||||
set.insert(Action::try_from("*").expect("Should parse wildcard"));
|
||||
let actionset = ActionSet(set);
|
||||
|
||||
let json = serde_json::to_string(&actionset).expect("Should serialize");
|
||||
assert_eq!(json, "\"s3:*\"");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,13 @@ use super::Validator;
|
||||
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
|
||||
pub struct ID(pub String);
|
||||
|
||||
impl ID {
|
||||
/// Returns true if the ID is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl Validator for ID {
|
||||
type Error = Error;
|
||||
/// if id is a valid utf string, then it is valid.
|
||||
|
||||
@@ -177,9 +177,11 @@ pub struct BucketPolicyArgs<'a> {
|
||||
pub object: &'a str,
|
||||
}
|
||||
|
||||
/// Bucket Policy with AWS S3-compatible JSON serialization.
|
||||
/// Empty optional fields are omitted from output to match AWS format.
|
||||
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
|
||||
pub struct BucketPolicy {
|
||||
#[serde(default, rename = "ID")]
|
||||
#[serde(default, rename = "ID", skip_serializing_if = "ID::is_empty")]
|
||||
pub id: ID,
|
||||
#[serde(rename = "Version")]
|
||||
pub version: String,
|
||||
@@ -950,4 +952,106 @@ mod test {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_policy_serialize_omits_empty_fields() {
|
||||
use crate::policy::action::{Action, ActionSet, S3Action};
|
||||
use crate::policy::resource::{Resource, ResourceSet};
|
||||
use crate::policy::{Effect, Functions, Principal};
|
||||
|
||||
// Create a BucketPolicy with empty optional fields
|
||||
// Use JSON deserialization to create Principal (since aws field is private)
|
||||
let principal: Principal = serde_json::from_str(r#"{"AWS": "*"}"#).expect("Should parse principal");
|
||||
|
||||
let mut policy = BucketPolicy {
|
||||
id: ID::default(), // Empty ID
|
||||
version: "2012-10-17".to_string(),
|
||||
statements: vec![BPStatement {
|
||||
sid: ID::default(), // Empty Sid
|
||||
effect: Effect::Allow,
|
||||
principal,
|
||||
actions: ActionSet::default(),
|
||||
not_actions: ActionSet::default(), // Empty NotAction
|
||||
resources: ResourceSet::default(),
|
||||
not_resources: ResourceSet::default(), // Empty NotResource
|
||||
conditions: Functions::default(), // Empty Condition
|
||||
}],
|
||||
};
|
||||
|
||||
// Set actions and resources (required fields)
|
||||
policy.statements[0]
|
||||
.actions
|
||||
.0
|
||||
.insert(Action::S3Action(S3Action::ListBucketAction));
|
||||
policy.statements[0]
|
||||
.resources
|
||||
.0
|
||||
.insert(Resource::try_from("arn:aws:s3:::test/*").unwrap());
|
||||
|
||||
let json = serde_json::to_string(&policy).expect("Should serialize");
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
|
||||
// Verify empty fields are omitted
|
||||
assert!(!parsed.as_object().unwrap().contains_key("ID"), "Empty ID should be omitted");
|
||||
|
||||
let statement = &parsed["Statement"][0];
|
||||
assert!(!statement.as_object().unwrap().contains_key("Sid"), "Empty Sid should be omitted");
|
||||
assert!(
|
||||
!statement.as_object().unwrap().contains_key("NotAction"),
|
||||
"Empty NotAction should be omitted"
|
||||
);
|
||||
assert!(
|
||||
!statement.as_object().unwrap().contains_key("NotResource"),
|
||||
"Empty NotResource should be omitted"
|
||||
);
|
||||
assert!(
|
||||
!statement.as_object().unwrap().contains_key("Condition"),
|
||||
"Empty Condition should be omitted"
|
||||
);
|
||||
|
||||
// Verify required fields are present
|
||||
assert_eq!(parsed["Version"], "2012-10-17");
|
||||
assert_eq!(statement["Effect"], "Allow");
|
||||
assert_eq!(statement["Principal"]["AWS"], "*");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_policy_serialize_single_action_as_string() {
|
||||
use crate::policy::action::{Action, ActionSet, S3Action};
|
||||
use crate::policy::resource::{Resource, ResourceSet};
|
||||
use crate::policy::{Effect, Principal};
|
||||
|
||||
// Use JSON deserialization to create Principal (since aws field is private)
|
||||
let principal: Principal = serde_json::from_str(r#"{"AWS": "*"}"#).expect("Should parse principal");
|
||||
|
||||
let mut policy = BucketPolicy {
|
||||
version: "2012-10-17".to_string(),
|
||||
statements: vec![BPStatement {
|
||||
effect: Effect::Allow,
|
||||
principal,
|
||||
actions: ActionSet::default(),
|
||||
resources: ResourceSet::default(),
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Single action
|
||||
policy.statements[0]
|
||||
.actions
|
||||
.0
|
||||
.insert(Action::S3Action(S3Action::ListBucketAction));
|
||||
policy.statements[0]
|
||||
.resources
|
||||
.0
|
||||
.insert(Resource::try_from("arn:aws:s3:::test/*").unwrap());
|
||||
|
||||
let json = serde_json::to_string(&policy).expect("Should serialize");
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
let action = &parsed["Statement"][0]["Action"];
|
||||
|
||||
// Single action should be serialized as string
|
||||
assert!(action.is_string(), "Single action should serialize as string");
|
||||
assert_eq!(action.as_str().unwrap(), "s3:ListBucket");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,13 +17,35 @@ use crate::error::Error;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashSet;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Default, PartialEq, Eq)]
|
||||
#[serde(rename_all = "PascalCase", default)]
|
||||
/// Principal that serializes AWS field as single string when containing only "*",
|
||||
/// or as an array otherwise (matching AWS S3 API format).
|
||||
#[derive(Debug, Clone, Default, PartialEq, Eq)]
|
||||
pub struct Principal {
|
||||
#[serde(rename = "AWS")]
|
||||
aws: HashSet<String>,
|
||||
}
|
||||
|
||||
impl Serialize for Principal {
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
use serde::ser::SerializeMap;
|
||||
|
||||
let mut map = serializer.serialize_map(Some(1))?;
|
||||
|
||||
// If single element, serialize as string; otherwise as array
|
||||
if self.aws.len() == 1 {
|
||||
if let Some(val) = self.aws.iter().next() {
|
||||
map.serialize_entry("AWS", val)?;
|
||||
}
|
||||
} else {
|
||||
map.serialize_entry("AWS", &self.aws)?;
|
||||
}
|
||||
|
||||
map.end()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum PrincipalFormat {
|
||||
@@ -118,4 +140,30 @@ mod test {
|
||||
};
|
||||
assert!(result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_principal_serialize_single_element() {
|
||||
// Single element should serialize as string (AWS format)
|
||||
let principal = Principal {
|
||||
aws: HashSet::from(["*".to_string()]),
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&principal).expect("Should serialize");
|
||||
assert_eq!(json, r#"{"AWS":"*"}"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_principal_serialize_multiple_elements() {
|
||||
// Multiple elements should serialize as array
|
||||
let principal = Principal {
|
||||
aws: HashSet::from(["*".to_string(), "arn:aws:iam::123456789012:root".to_string()]),
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&principal).expect("Should serialize");
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
let aws_value = parsed.get("AWS").expect("Should have AWS field");
|
||||
assert!(aws_value.is_array());
|
||||
let arr = aws_value.as_array().expect("Should be array");
|
||||
assert_eq!(arr.len(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,6 +35,11 @@ use super::{
|
||||
pub struct ResourceSet(pub HashSet<Resource>);
|
||||
|
||||
impl ResourceSet {
|
||||
/// Returns true if the resource set is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
|
||||
pub async fn is_match(&self, resource: &str, conditions: &HashMap<String, Vec<String>>) -> bool {
|
||||
self.is_match_with_resolver(resource, conditions, None).await
|
||||
}
|
||||
|
||||
@@ -179,10 +179,12 @@ impl PartialEq for Statement {
|
||||
}
|
||||
}
|
||||
|
||||
/// Bucket Policy Statement with AWS S3-compatible JSON serialization.
|
||||
/// Empty optional fields are omitted from output to match AWS format.
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
#[serde(rename_all = "PascalCase", default)]
|
||||
pub struct BPStatement {
|
||||
#[serde(rename = "Sid", default)]
|
||||
#[serde(rename = "Sid", default, skip_serializing_if = "ID::is_empty")]
|
||||
pub sid: ID,
|
||||
#[serde(rename = "Effect")]
|
||||
pub effect: Effect,
|
||||
@@ -190,13 +192,13 @@ pub struct BPStatement {
|
||||
pub principal: Principal,
|
||||
#[serde(rename = "Action")]
|
||||
pub actions: ActionSet,
|
||||
#[serde(rename = "NotAction", default)]
|
||||
#[serde(rename = "NotAction", default, skip_serializing_if = "ActionSet::is_empty")]
|
||||
pub not_actions: ActionSet,
|
||||
#[serde(rename = "Resource", default)]
|
||||
pub resources: ResourceSet,
|
||||
#[serde(rename = "NotResource", default)]
|
||||
#[serde(rename = "NotResource", default, skip_serializing_if = "ResourceSet::is_empty")]
|
||||
pub not_resources: ResourceSet,
|
||||
#[serde(rename = "Condition", default)]
|
||||
#[serde(rename = "Condition", default, skip_serializing_if = "Functions::is_empty")]
|
||||
pub conditions: Functions,
|
||||
}
|
||||
|
||||
|
||||
@@ -34,10 +34,9 @@ path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
rustfs-common.workspace = true
|
||||
rustfs-credentials = { workspace = true }
|
||||
flatbuffers = { workspace = true }
|
||||
prost = { workspace = true }
|
||||
tonic = { workspace = true, features = ["transport"] }
|
||||
tonic-prost = { workspace = true }
|
||||
tonic-prost-build = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
@@ -20,11 +20,10 @@ use rustfs_common::{GLOBAL_CONN_MAP, GLOBAL_MTLS_IDENTITY, GLOBAL_ROOT_CERT, evi
|
||||
use std::{error::Error, time::Duration};
|
||||
use tonic::{
|
||||
Request, Status,
|
||||
metadata::MetadataValue,
|
||||
service::interceptor::InterceptedService,
|
||||
transport::{Certificate, Channel, ClientTlsConfig, Endpoint},
|
||||
};
|
||||
use tracing::{debug, error, warn};
|
||||
use tracing::{debug, warn};
|
||||
|
||||
// Type alias for the complex client type
|
||||
pub type NodeServiceClientType = NodeServiceClient<
|
||||
@@ -64,7 +63,7 @@ const RUSTFS_HTTPS_PREFIX: &str = "https://";
|
||||
/// - Aggressive TCP keepalive (10s)
|
||||
/// - HTTP/2 PING every 5s, timeout at 3s
|
||||
/// - Overall RPC timeout of 30s (reduced from 60s)
|
||||
async fn create_new_channel(addr: &str) -> Result<Channel, Box<dyn Error>> {
|
||||
pub async fn create_new_channel(addr: &str) -> Result<Channel, Box<dyn Error>> {
|
||||
debug!("Creating new gRPC channel to: {}", addr);
|
||||
|
||||
let mut connector = Endpoint::from_shared(addr.to_string())?
|
||||
@@ -131,90 +130,6 @@ async fn create_new_channel(addr: &str) -> Result<Channel, Box<dyn Error>> {
|
||||
Ok(channel)
|
||||
}
|
||||
|
||||
/// Get a gRPC client for the NodeService with robust connection handling.
|
||||
///
|
||||
/// This function implements several resilience features:
|
||||
/// 1. Connection caching for performance
|
||||
/// 2. Automatic eviction of stale/dead connections on error
|
||||
/// 3. Optimized keepalive settings for fast dead peer detection
|
||||
/// 4. Reduced timeouts to fail fast when peers are unresponsive
|
||||
///
|
||||
/// # Connection Lifecycle
|
||||
/// - Cached connections are reused for subsequent calls
|
||||
/// - On any connection error, the cached connection is evicted
|
||||
/// - Fresh connections are established with aggressive keepalive settings
|
||||
///
|
||||
/// # Cluster Power-Off Recovery
|
||||
/// When a node experiences abrupt power-off:
|
||||
/// 1. The cached connection will fail on next use
|
||||
/// 2. The connection is automatically evicted from cache
|
||||
/// 3. Subsequent calls will attempt fresh connections
|
||||
/// 4. If node is still down, connection will fail fast (3s timeout)
|
||||
pub async fn node_service_time_out_client(
|
||||
addr: &String,
|
||||
) -> Result<
|
||||
NodeServiceClient<
|
||||
InterceptedService<Channel, Box<dyn Fn(Request<()>) -> Result<Request<()>, Status> + Send + Sync + 'static>>,
|
||||
>,
|
||||
Box<dyn Error>,
|
||||
> {
|
||||
debug!("Obtaining gRPC client for NodeService at: {}", addr);
|
||||
let token_str = rustfs_credentials::get_grpc_token();
|
||||
let token: MetadataValue<_> = token_str.parse().map_err(|e| {
|
||||
error!(
|
||||
"Failed to parse gRPC auth token into MetadataValue: {:?}; env={} token_len={} token_prefix={}",
|
||||
e,
|
||||
rustfs_credentials::ENV_GRPC_AUTH_TOKEN,
|
||||
token_str.len(),
|
||||
token_str.chars().take(2).collect::<String>(),
|
||||
);
|
||||
e
|
||||
})?;
|
||||
|
||||
// Try to get cached channel
|
||||
let cached_channel = { GLOBAL_CONN_MAP.read().await.get(addr).cloned() };
|
||||
|
||||
let channel = match cached_channel {
|
||||
Some(channel) => {
|
||||
debug!("Using cached gRPC channel for: {}", addr);
|
||||
channel
|
||||
}
|
||||
None => {
|
||||
// No cached connection, create new one
|
||||
create_new_channel(addr).await?
|
||||
}
|
||||
};
|
||||
|
||||
Ok(NodeServiceClient::with_interceptor(
|
||||
channel,
|
||||
Box::new(move |mut req: Request<()>| {
|
||||
req.metadata_mut().insert("authorization", token.clone());
|
||||
Ok(req)
|
||||
}),
|
||||
))
|
||||
}
|
||||
|
||||
/// Get a gRPC client with automatic connection eviction on failure.
|
||||
///
|
||||
/// This is the preferred method for cluster operations as it ensures
|
||||
/// that failed connections are automatically cleaned up from the cache.
|
||||
///
|
||||
/// Returns the client and the address for later eviction if needed.
|
||||
pub async fn node_service_client_with_eviction(
|
||||
addr: &String,
|
||||
) -> Result<
|
||||
(
|
||||
NodeServiceClient<
|
||||
InterceptedService<Channel, Box<dyn Fn(Request<()>) -> Result<Request<()>, Status> + Send + Sync + 'static>>,
|
||||
>,
|
||||
String,
|
||||
),
|
||||
Box<dyn Error>,
|
||||
> {
|
||||
let client = node_service_time_out_client(addr).await?;
|
||||
Ok((client, addr.clone()))
|
||||
}
|
||||
|
||||
/// Evict a connection from the cache after a failure.
|
||||
/// This should be called when an RPC fails to ensure fresh connections are tried.
|
||||
pub async fn evict_failed_connection(addr: &str) {
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use rustfs_config::VERSION;
|
||||
use std::env;
|
||||
use std::fmt;
|
||||
#[cfg(not(any(target_os = "openbsd", target_os = "freebsd")))]
|
||||
use sysinfo::System;
|
||||
|
||||
/// Business Type Enumeration
|
||||
|
||||
@@ -126,6 +126,7 @@ url = { workspace = true }
|
||||
urlencoding = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
zip = { workspace = true }
|
||||
libc = { workspace = true }
|
||||
|
||||
# Observability and Metrics
|
||||
metrics = { workspace = true }
|
||||
@@ -136,9 +137,6 @@ russh = { workspace = true }
|
||||
russh-sftp = { workspace = true }
|
||||
ssh-key = { workspace = true }
|
||||
|
||||
[target.'cfg(any(target_os = "macos", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))'.dependencies]
|
||||
sysctl = { workspace = true }
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
libsystemd.workspace = true
|
||||
|
||||
|
||||
@@ -336,7 +336,7 @@ pub async fn init_ftp_system(
|
||||
let ftps_address_str = rustfs_utils::get_env_str(rustfs_config::ENV_FTPS_ADDRESS, rustfs_config::DEFAULT_FTPS_ADDRESS);
|
||||
let addr: SocketAddr = ftps_address_str
|
||||
.parse()
|
||||
.map_err(|e| format!("Invalid FTPS address '{}': {}", ftps_address_str, e))?;
|
||||
.map_err(|e| format!("Invalid FTPS address '{ftps_address_str}': {e}"))?;
|
||||
|
||||
// Get FTPS configuration from environment variables
|
||||
let cert_file = rustfs_utils::get_env_opt_str(rustfs_config::ENV_FTPS_CERTS_FILE);
|
||||
@@ -402,7 +402,7 @@ pub async fn init_sftp_system(
|
||||
let sftp_address_str = rustfs_utils::get_env_str(rustfs_config::ENV_SFTP_ADDRESS, rustfs_config::DEFAULT_SFTP_ADDRESS);
|
||||
let addr: SocketAddr = sftp_address_str
|
||||
.parse()
|
||||
.map_err(|e| format!("Invalid SFTP address '{}': {}", sftp_address_str, e))?;
|
||||
.map_err(|e| format!("Invalid SFTP address '{sftp_address_str}': {e}"))?;
|
||||
|
||||
// Get SFTP configuration from environment variables
|
||||
let host_key = rustfs_utils::get_env_opt_str(rustfs_config::ENV_SFTP_HOST_KEY);
|
||||
|
||||
@@ -26,7 +26,7 @@ pub enum RustFSError {
|
||||
impl std::fmt::Display for RustFSError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RustFSError::Cert(msg) => write!(f, "Certificate error: {}", msg),
|
||||
RustFSError::Cert(msg) => write!(f, "Certificate error: {msg}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -78,7 +78,7 @@ fn parse_pem_private_key(pem: &[u8]) -> Result<PrivateKeyDer<'static>, RustFSErr
|
||||
async fn read_file(path: &PathBuf, desc: &str) -> Result<Vec<u8>, RustFSError> {
|
||||
tokio::fs::read(path)
|
||||
.await
|
||||
.map_err(|e| RustFSError::Cert(format!("read {} {:?}: {e}", desc, path)))
|
||||
.map_err(|e| RustFSError::Cert(format!("read {desc} {path:?}: {e}")))
|
||||
}
|
||||
|
||||
/// Initialize TLS material for both server and outbound client connections.
|
||||
|
||||
@@ -21,7 +21,7 @@ use crate::server::{ReadinessGateLayer, RemoteAddr, ServiceState, ServiceStateMa
|
||||
use crate::storage;
|
||||
use crate::storage::tonic_service::make_server;
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, Request as HttpRequest, Response};
|
||||
use http::{HeaderMap, Method, Request as HttpRequest, Response};
|
||||
use hyper_util::{
|
||||
rt::{TokioExecutor, TokioIo},
|
||||
server::conn::auto::Builder as ConnBuilder,
|
||||
@@ -30,7 +30,11 @@ use hyper_util::{
|
||||
};
|
||||
use metrics::{counter, histogram};
|
||||
use rustfs_common::GlobalReadiness;
|
||||
#[cfg(not(target_os = "openbsd"))]
|
||||
use rustfs_config::{MI_B, RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
#[cfg(target_os = "openbsd")]
|
||||
use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
use rustfs_ecstore::rpc::{TONIC_RPC_PREFIX, verify_rpc_signature};
|
||||
use rustfs_protos::proto_gen::node_service::node_service_server::NodeServiceServer;
|
||||
use rustfs_utils::net::parse_and_resolve_address;
|
||||
use rustls::ServerConfig;
|
||||
@@ -42,7 +46,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
use tonic::{Request, Status, metadata::MetadataValue};
|
||||
use tonic::{Request, Status};
|
||||
use tower::ServiceBuilder;
|
||||
use tower_http::add_extension::AddExtensionLayer;
|
||||
use tower_http::catch_panic::CatchPanicLayer;
|
||||
@@ -374,12 +378,20 @@ pub async fn start_http_server(
|
||||
|
||||
// Enable TCP Keepalive to detect dead clients (e.g. power loss)
|
||||
// Idle: 10s, Interval: 5s, Retries: 3
|
||||
let ka = TcpKeepalive::new()
|
||||
.with_time(Duration::from_secs(10))
|
||||
.with_interval(Duration::from_secs(5));
|
||||
let ka = {
|
||||
#[cfg(not(target_os = "openbsd"))]
|
||||
let ka = TcpKeepalive::new()
|
||||
.with_time(Duration::from_secs(10))
|
||||
.with_interval(Duration::from_secs(5))
|
||||
.with_retries(3);
|
||||
|
||||
#[cfg(not(any(target_os = "openbsd", target_os = "netbsd")))]
|
||||
let ka = ka.with_retries(3);
|
||||
// On OpenBSD socket2 only supports configuring the initial
|
||||
// TCP keepalive timeout; intervals and retries cannot be set.
|
||||
#[cfg(target_os = "openbsd")]
|
||||
let ka = TcpKeepalive::new().with_time(Duration::from_secs(10));
|
||||
|
||||
ka
|
||||
};
|
||||
|
||||
if let Err(err) = socket_ref.set_tcp_keepalive(&ka) {
|
||||
warn!(?err, "Failed to set TCP_KEEPALIVE");
|
||||
@@ -388,9 +400,11 @@ pub async fn start_http_server(
|
||||
if let Err(err) = socket_ref.set_tcp_nodelay(true) {
|
||||
warn!(?err, "Failed to set TCP_NODELAY");
|
||||
}
|
||||
#[cfg(not(any(target_os = "openbsd")))]
|
||||
if let Err(err) = socket_ref.set_recv_buffer_size(4 * MI_B) {
|
||||
warn!(?err, "Failed to set set_recv_buffer_size");
|
||||
}
|
||||
#[cfg(not(any(target_os = "openbsd")))]
|
||||
if let Err(err) = socket_ref.set_send_buffer_size(4 * MI_B) {
|
||||
warn!(?err, "Failed to set set_send_buffer_size");
|
||||
}
|
||||
@@ -722,17 +736,41 @@ fn handle_connection_error(err: &(dyn std::error::Error + 'static)) {
|
||||
|
||||
#[allow(clippy::result_large_err)]
|
||||
fn check_auth(req: Request<()>) -> std::result::Result<Request<()>, Status> {
|
||||
let token_str = rustfs_credentials::get_grpc_token();
|
||||
|
||||
let token: MetadataValue<_> = token_str.parse().map_err(|e| {
|
||||
error!("Failed to parse RUSTFS_GRPC_AUTH_TOKEN into gRPC metadata value: {}", e);
|
||||
Status::internal("Invalid auth token configuration")
|
||||
verify_rpc_signature(TONIC_RPC_PREFIX, &Method::GET, req.metadata().as_ref()).map_err(|e| {
|
||||
error!("RPC signature verification failed: {}", e);
|
||||
Status::unauthenticated("No valid auth token")
|
||||
})?;
|
||||
Ok(req)
|
||||
}
|
||||
|
||||
match req.metadata().get("authorization") {
|
||||
Some(t) if token == t => Ok(req),
|
||||
_ => Err(Status::unauthenticated("No valid auth token")),
|
||||
// For macOS and BSD variants use the syscall way of getting the connection queue length.
|
||||
#[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))]
|
||||
#[allow(unsafe_code)]
|
||||
fn get_conn_queue_len() -> i32 {
|
||||
const DEFAULT_BACKLOG: i32 = 1024;
|
||||
|
||||
#[cfg(target_os = "openbsd")]
|
||||
let mut name = [libc::CTL_KERN, libc::KERN_SOMAXCONN];
|
||||
#[cfg(any(target_os = "netbsd", target_os = "macos", target_os = "freebsd"))]
|
||||
let mut name = [libc::CTL_KERN, libc::KERN_IPC, libc::KIPC_SOMAXCONN];
|
||||
let mut buf = [0; 1];
|
||||
let mut buf_len = std::mem::size_of_val(&buf);
|
||||
|
||||
if unsafe {
|
||||
libc::sysctl(
|
||||
name.as_mut_ptr(),
|
||||
name.len() as u32,
|
||||
buf.as_mut_ptr() as *mut libc::c_void,
|
||||
&mut buf_len,
|
||||
std::ptr::null_mut(),
|
||||
0,
|
||||
)
|
||||
} != 0
|
||||
{
|
||||
return DEFAULT_BACKLOG;
|
||||
}
|
||||
|
||||
buf[0]
|
||||
}
|
||||
|
||||
/// Determines the listen backlog size.
|
||||
@@ -741,28 +779,22 @@ fn check_auth(req: Request<()>) -> std::result::Result<Request<()>, Status> {
|
||||
/// If reading fails, it falls back to a default value (e.g., 1024).
|
||||
/// This makes the backlog size adaptive to the system configuration.
|
||||
fn get_listen_backlog() -> i32 {
|
||||
const DEFAULT_BACKLOG: i32 = 1024;
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
const DEFAULT_BACKLOG: i32 = 1024;
|
||||
|
||||
// For Linux, read from /proc/sys/net/core/somaxconn
|
||||
match std::fs::read_to_string("/proc/sys/net/core/somaxconn") {
|
||||
Ok(s) => s.trim().parse().unwrap_or(DEFAULT_BACKLOG),
|
||||
Err(_) => DEFAULT_BACKLOG,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))]
|
||||
{
|
||||
// For macOS and BSD variants, use sysctl
|
||||
use sysctl::Sysctl;
|
||||
match sysctl::Ctl::new("kern.ipc.somaxconn") {
|
||||
Ok(ctl) => match ctl.value() {
|
||||
Ok(sysctl::CtlValue::Int(val)) => val,
|
||||
_ => DEFAULT_BACKLOG,
|
||||
},
|
||||
Err(_) => DEFAULT_BACKLOG,
|
||||
}
|
||||
get_conn_queue_len()
|
||||
}
|
||||
|
||||
#[cfg(not(any(
|
||||
target_os = "linux",
|
||||
target_os = "macos",
|
||||
|
||||
@@ -1548,16 +1548,19 @@ impl S3 for FS {
|
||||
|
||||
let mut object_to_delete = Vec::new();
|
||||
let mut object_to_delete_index = HashMap::new();
|
||||
for (idx, object) in delete.objects.iter().enumerate() {
|
||||
if let Some(version_id) = object.version_id.clone() {
|
||||
let _vid = match Uuid::parse_str(&version_id) {
|
||||
for (idx, obj_id) in delete.objects.iter().enumerate() {
|
||||
// Per S3 API spec, "null" string means non-versioned object
|
||||
// Filter out "null" version_id to treat as unversioned
|
||||
let version_id = obj_id.version_id.clone().filter(|v| v != "null");
|
||||
if let Some(ref vid) = version_id {
|
||||
let _vid = match Uuid::parse_str(vid) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
delete_results[idx].error = Some(Error {
|
||||
code: Some("NoSuchVersion".to_string()),
|
||||
key: Some(object.key.clone()),
|
||||
key: Some(obj_id.key.clone()),
|
||||
message: Some(err.to_string()),
|
||||
version_id: Some(version_id),
|
||||
version_id: Some(vid.clone()),
|
||||
});
|
||||
|
||||
continue;
|
||||
@@ -1568,24 +1571,26 @@ impl S3 for FS {
|
||||
{
|
||||
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
|
||||
req_info.bucket = Some(bucket.clone());
|
||||
req_info.object = Some(object.key.clone());
|
||||
req_info.version_id = object.version_id.clone();
|
||||
req_info.object = Some(obj_id.key.clone());
|
||||
req_info.version_id = version_id.clone();
|
||||
}
|
||||
|
||||
let auth_res = authorize_request(&mut req, Action::S3Action(S3Action::DeleteObjectAction)).await;
|
||||
if let Err(e) = auth_res {
|
||||
delete_results[idx].error = Some(Error {
|
||||
code: Some("AccessDenied".to_string()),
|
||||
key: Some(object.key.clone()),
|
||||
key: Some(obj_id.key.clone()),
|
||||
message: Some(e.to_string()),
|
||||
version_id: object.version_id.clone(),
|
||||
version_id: version_id.clone(),
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut object = ObjectToDelete {
|
||||
object_name: object.key.clone(),
|
||||
version_id: object.version_id.clone().map(|v| Uuid::parse_str(&v).unwrap()),
|
||||
object_name: obj_id.key.clone(),
|
||||
version_id: version_id
|
||||
.clone()
|
||||
.map(|v| Uuid::parse_str(&v).expect("version_id validated as UUID earlier")),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -2689,10 +2694,21 @@ impl S3 for FS {
|
||||
}
|
||||
}
|
||||
|
||||
// Extract standard HTTP headers from user_defined metadata
|
||||
// Note: These headers are stored with lowercase keys by extract_metadata_from_mime
|
||||
let cache_control = metadata_map.get("cache-control").cloned();
|
||||
let content_disposition = metadata_map.get("content-disposition").cloned();
|
||||
let content_language = metadata_map.get("content-language").cloned();
|
||||
let expires = info.expires.map(Timestamp::from);
|
||||
|
||||
let output = HeadObjectOutput {
|
||||
content_length: Some(content_length),
|
||||
content_type,
|
||||
content_encoding: info.content_encoding.clone(),
|
||||
cache_control,
|
||||
content_disposition,
|
||||
content_language,
|
||||
expires,
|
||||
last_modified,
|
||||
e_tag: info.etag.map(|etag| to_s3s_etag(&etag)),
|
||||
metadata: filter_object_metadata(&metadata_map),
|
||||
@@ -2790,6 +2806,10 @@ impl S3 for FS {
|
||||
|
||||
#[instrument(level = "debug", skip(self, req))]
|
||||
async fn list_objects(&self, req: S3Request<ListObjectsInput>) -> S3Result<S3Response<ListObjectsOutput>> {
|
||||
// Capture the original marker from the request before conversion
|
||||
// S3 API requires the marker field to be echoed back in the response
|
||||
let request_marker = req.input.marker.clone();
|
||||
|
||||
let v2_resp = self.list_objects_v2(req.map_input(Into::into)).await?;
|
||||
|
||||
Ok(v2_resp.map_output(|v2| {
|
||||
@@ -2812,7 +2832,7 @@ impl S3 for FS {
|
||||
.cloned();
|
||||
|
||||
// NextMarker should be the lexicographically last item
|
||||
// This matches Ceph S3 behavior used by s3-tests
|
||||
// This matches S3 standard behavior
|
||||
match (last_key, last_prefix) {
|
||||
(Some(k), Some(p)) => {
|
||||
// Return the lexicographically greater one
|
||||
@@ -2826,6 +2846,10 @@ impl S3 for FS {
|
||||
None
|
||||
};
|
||||
|
||||
// S3 API requires marker field in response, echoing back the request marker
|
||||
// If no marker was provided in request, return empty string per S3 standard
|
||||
let marker = Some(request_marker.unwrap_or_default());
|
||||
|
||||
ListObjectsOutput {
|
||||
contents: v2.contents,
|
||||
delimiter: v2.delimiter,
|
||||
@@ -2835,6 +2859,7 @@ impl S3 for FS {
|
||||
max_keys: v2.max_keys,
|
||||
common_prefixes: v2.common_prefixes,
|
||||
is_truncated: v2.is_truncated,
|
||||
marker,
|
||||
next_marker,
|
||||
..Default::default()
|
||||
}
|
||||
@@ -2872,15 +2897,17 @@ impl S3 for FS {
|
||||
|
||||
validate_list_object_unordered_with_delimiter(delimiter.as_ref(), req.uri.query())?;
|
||||
|
||||
let start_after = start_after.filter(|v| !v.is_empty());
|
||||
// Save original start_after for response (per S3 API spec, must echo back if provided)
|
||||
let response_start_after = start_after.clone();
|
||||
let start_after_for_query = start_after.filter(|v| !v.is_empty());
|
||||
|
||||
let continuation_token = continuation_token.filter(|v| !v.is_empty());
|
||||
|
||||
// Save the original encoded continuation_token for response
|
||||
let encoded_continuation_token = continuation_token.clone();
|
||||
// Save original continuation_token for response (per S3 API spec, must echo back if provided)
|
||||
// Note: empty string should still be echoed back in the response
|
||||
let response_continuation_token = continuation_token.clone();
|
||||
let continuation_token_for_query = continuation_token.filter(|v| !v.is_empty());
|
||||
|
||||
// Decode continuation_token from base64 for internal use
|
||||
let continuation_token = continuation_token
|
||||
let decoded_continuation_token = continuation_token_for_query
|
||||
.map(|token| {
|
||||
base64_simd::STANDARD
|
||||
.decode_to_vec(token.as_bytes())
|
||||
@@ -2902,11 +2929,11 @@ impl S3 for FS {
|
||||
.list_objects_v2(
|
||||
&bucket,
|
||||
&prefix,
|
||||
continuation_token,
|
||||
decoded_continuation_token,
|
||||
delimiter.clone(),
|
||||
max_keys,
|
||||
fetch_owner.unwrap_or_default(),
|
||||
start_after,
|
||||
start_after_for_query,
|
||||
incl_deleted,
|
||||
)
|
||||
.await
|
||||
@@ -2975,8 +3002,9 @@ impl S3 for FS {
|
||||
|
||||
let output = ListObjectsV2Output {
|
||||
is_truncated: Some(object_infos.is_truncated),
|
||||
continuation_token: encoded_continuation_token,
|
||||
continuation_token: response_continuation_token,
|
||||
next_continuation_token,
|
||||
start_after: response_start_after,
|
||||
key_count: Some(key_count),
|
||||
max_keys: Some(max_keys),
|
||||
contents: Some(objects),
|
||||
|
||||
@@ -330,29 +330,56 @@ pub fn extract_metadata_from_mime_with_object_name(
|
||||
}
|
||||
|
||||
pub(crate) fn filter_object_metadata(metadata: &HashMap<String, String>) -> Option<HashMap<String, String>> {
|
||||
// Standard HTTP headers that should NOT be returned in the Metadata field
|
||||
// These are returned as separate response headers, not user metadata
|
||||
const EXCLUDED_HEADERS: &[&str] = &[
|
||||
"content-type",
|
||||
"content-encoding",
|
||||
"content-disposition",
|
||||
"content-language",
|
||||
"cache-control",
|
||||
"expires",
|
||||
"etag",
|
||||
"x-amz-storage-class",
|
||||
"x-amz-tagging",
|
||||
"x-amz-replication-status",
|
||||
"x-amz-server-side-encryption",
|
||||
"x-amz-server-side-encryption-customer-algorithm",
|
||||
"x-amz-server-side-encryption-customer-key-md5",
|
||||
"x-amz-server-side-encryption-aws-kms-key-id",
|
||||
];
|
||||
|
||||
let mut filtered_metadata = HashMap::new();
|
||||
for (k, v) in metadata {
|
||||
// Skip internal/reserved metadata
|
||||
if k.starts_with(RESERVED_METADATA_PREFIX_LOWER) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip empty object lock values
|
||||
if v.is_empty() && (k == &X_AMZ_OBJECT_LOCK_MODE.to_string() || k == &X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE.to_string()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip encryption metadata placeholders
|
||||
if k == AMZ_META_UNENCRYPTED_CONTENT_MD5 || k == AMZ_META_UNENCRYPTED_CONTENT_LENGTH {
|
||||
continue;
|
||||
}
|
||||
|
||||
let lower_key = k.to_ascii_lowercase();
|
||||
if let Some(key) = lower_key.strip_prefix("x-amz-meta-") {
|
||||
filtered_metadata.insert(key.to_string(), v.to_string());
|
||||
continue;
|
||||
}
|
||||
if let Some(key) = lower_key.strip_prefix("x-rustfs-meta-") {
|
||||
filtered_metadata.insert(key.to_string(), v.to_string());
|
||||
|
||||
// Skip standard HTTP headers (they are returned as separate headers, not metadata)
|
||||
if EXCLUDED_HEADERS.contains(&lower_key.as_str()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip any x-amz-* headers that are not user metadata
|
||||
// User metadata was stored WITHOUT the x-amz-meta- prefix by extract_metadata_from_mime
|
||||
if lower_key.starts_with("x-amz-") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Include user-defined metadata (keys like "meta1", "custom-key", etc.)
|
||||
filtered_metadata.insert(k.clone(), v.clone());
|
||||
}
|
||||
if filtered_metadata.is_empty() {
|
||||
|
||||
@@ -1807,7 +1807,7 @@ impl Node for NodeService {
|
||||
return Ok(Response::new(GetMetricsResponse {
|
||||
success: false,
|
||||
realtime_metrics: Bytes::new(),
|
||||
error_info: Some(format!("Invalid metric_type: {}", err)),
|
||||
error_info: Some(format!("Invalid metric_type: {err}")),
|
||||
}));
|
||||
}
|
||||
};
|
||||
@@ -1821,7 +1821,7 @@ impl Node for NodeService {
|
||||
return Ok(Response::new(GetMetricsResponse {
|
||||
success: false,
|
||||
realtime_metrics: Bytes::new(),
|
||||
error_info: Some(format!("Invalid opts: {}", err)),
|
||||
error_info: Some(format!("Invalid opts: {err}")),
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
130
scripts/s3-tests/implemented_tests.txt
Normal file
130
scripts/s3-tests/implemented_tests.txt
Normal file
@@ -0,0 +1,130 @@
|
||||
# Implemented S3 feature tests
|
||||
# ============================
|
||||
#
|
||||
# These tests SHOULD PASS on RustFS for standard S3 API compatibility.
|
||||
# Run these tests to verify RustFS S3 compatibility.
|
||||
#
|
||||
# Covered operations:
|
||||
# - Bucket: Create, Delete, List, Head, GetLocation
|
||||
# - Object: Put, Get, Delete, Copy, Head
|
||||
# - ListObjects/ListObjectsV2: prefix, delimiter, marker, maxkeys
|
||||
# - Multipart Upload: Create, Upload, Complete, Abort, List
|
||||
# - Tagging: Bucket and Object tags
|
||||
# - Bucket Policy: Put, Get, Delete
|
||||
# - Public Access Block: Put, Get, Delete
|
||||
# - Presigned URLs: GET and PUT operations
|
||||
# - Range requests: Partial object retrieval
|
||||
# - Metadata: User-defined metadata
|
||||
# - Conditional GET: If-Match, If-None-Match, If-Modified-Since
|
||||
#
|
||||
# Total: 109 tests
|
||||
|
||||
test_basic_key_count
|
||||
test_bucket_create_naming_bad_short_one
|
||||
test_bucket_create_naming_bad_short_two
|
||||
test_bucket_create_naming_bad_starts_nonalpha
|
||||
test_bucket_create_naming_dns_dash_at_end
|
||||
test_bucket_create_naming_dns_dash_dot
|
||||
test_bucket_create_naming_dns_dot_dash
|
||||
test_bucket_create_naming_dns_dot_dot
|
||||
test_bucket_create_naming_dns_underscore
|
||||
test_bucket_create_naming_good_contains_hyphen
|
||||
test_bucket_create_naming_good_contains_period
|
||||
test_bucket_create_naming_good_long_60
|
||||
test_bucket_create_naming_good_long_61
|
||||
test_bucket_create_naming_good_long_62
|
||||
test_bucket_create_naming_good_long_63
|
||||
test_bucket_create_naming_good_starts_alpha
|
||||
test_bucket_create_naming_good_starts_digit
|
||||
test_bucket_delete_nonempty
|
||||
test_bucket_delete_notexist
|
||||
test_bucket_head
|
||||
test_bucket_head_notexist
|
||||
test_bucket_list_distinct
|
||||
test_bucket_list_empty
|
||||
test_bucket_list_long_name
|
||||
test_bucket_list_marker_after_list
|
||||
test_bucket_list_marker_empty
|
||||
test_bucket_list_marker_none
|
||||
test_bucket_list_marker_not_in_list
|
||||
test_bucket_list_marker_unreadable
|
||||
test_bucket_list_maxkeys_invalid
|
||||
test_bucket_list_maxkeys_none
|
||||
test_bucket_list_maxkeys_zero
|
||||
test_bucket_list_prefix_alt
|
||||
test_bucket_list_prefix_basic
|
||||
test_bucket_list_prefix_delimiter_alt
|
||||
test_bucket_list_prefix_delimiter_basic
|
||||
test_bucket_list_prefix_delimiter_delimiter_not_exist
|
||||
test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist
|
||||
test_bucket_list_prefix_delimiter_prefix_not_exist
|
||||
test_bucket_list_prefix_empty
|
||||
test_bucket_list_prefix_none
|
||||
test_bucket_list_prefix_not_exist
|
||||
test_bucket_list_prefix_unreadable
|
||||
test_bucket_list_special_prefix
|
||||
test_bucket_listv2_continuationtoken
|
||||
test_bucket_listv2_continuationtoken_empty
|
||||
test_bucket_listv2_fetchowner_defaultempty
|
||||
test_bucket_listv2_fetchowner_empty
|
||||
test_bucket_listv2_fetchowner_notempty
|
||||
test_bucket_listv2_maxkeys_none
|
||||
test_bucket_listv2_maxkeys_zero
|
||||
test_bucket_listv2_prefix_alt
|
||||
test_bucket_listv2_prefix_basic
|
||||
test_bucket_listv2_prefix_delimiter_alt
|
||||
test_bucket_listv2_prefix_delimiter_basic
|
||||
test_bucket_listv2_prefix_delimiter_delimiter_not_exist
|
||||
test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist
|
||||
test_bucket_listv2_prefix_delimiter_prefix_not_exist
|
||||
test_bucket_listv2_prefix_empty
|
||||
test_bucket_listv2_prefix_none
|
||||
test_bucket_listv2_prefix_not_exist
|
||||
test_bucket_listv2_prefix_unreadable
|
||||
test_bucket_listv2_startafter_after_list
|
||||
test_bucket_listv2_startafter_not_in_list
|
||||
test_bucket_listv2_startafter_unreadable
|
||||
test_bucket_notexist
|
||||
test_buckets_create_then_list
|
||||
test_buckets_list_ctime
|
||||
test_bucketv2_notexist
|
||||
test_bucketv2_policy_another_bucket
|
||||
test_get_bucket_policy_status
|
||||
test_get_nonpublicpolicy_principal_bucket_policy_status
|
||||
test_get_object_ifmatch_good
|
||||
test_get_object_ifmodifiedsince_good
|
||||
test_get_object_ifunmodifiedsince_failed
|
||||
test_list_buckets_bad_auth
|
||||
test_multi_object_delete
|
||||
test_multi_object_delete_key_limit
|
||||
test_multi_objectv2_delete
|
||||
test_multi_objectv2_delete_key_limit
|
||||
test_multipart_copy_without_range
|
||||
test_multipart_upload_empty
|
||||
test_multipart_upload_incorrect_etag
|
||||
test_multipart_upload_missing_part
|
||||
test_multipart_upload_multiple_sizes
|
||||
test_multipart_upload_on_a_bucket_with_policy
|
||||
test_multipart_upload_overwrite_existing_object
|
||||
test_multipart_upload_size_too_small
|
||||
test_object_copy_bucket_not_found
|
||||
test_object_copy_key_not_found
|
||||
test_object_copy_not_owned_object_bucket
|
||||
test_object_head_zero_bytes
|
||||
test_object_metadata_replaced_on_put
|
||||
test_object_put_authenticated
|
||||
test_object_read_not_exist
|
||||
test_object_set_get_metadata_none_to_empty
|
||||
test_object_set_get_metadata_none_to_good
|
||||
test_object_set_get_metadata_overwrite_to_empty
|
||||
test_object_write_cache_control
|
||||
test_object_write_check_etag
|
||||
test_object_write_expires
|
||||
test_object_write_file
|
||||
test_object_write_read_update_read_delete
|
||||
test_object_write_to_nonexist_bucket
|
||||
test_put_max_kvsize_tags
|
||||
test_ranged_request_empty_object
|
||||
test_ranged_request_invalid_range
|
||||
test_set_multipart_tagging
|
||||
test_upload_part_copy_percent_encoded_key
|
||||
505
scripts/s3-tests/non_standard_tests.txt
Normal file
505
scripts/s3-tests/non_standard_tests.txt
Normal file
@@ -0,0 +1,505 @@
|
||||
# Non-standard S3 tests (Ceph/RGW/MinIO specific)
|
||||
# ================================================
|
||||
#
|
||||
# These tests use vendor-specific extensions not part of AWS S3 API.
|
||||
# They are PERMANENTLY EXCLUDED from RustFS compatibility testing.
|
||||
#
|
||||
# Exclusion reasons:
|
||||
# - fails_on_aws marker: Ceph-specific features
|
||||
# - X-RGW-* headers: Ceph proprietary headers
|
||||
# - allowUnordered: Ceph-specific query parameter
|
||||
# - ACL tests: RustFS uses IAM policy-based access control
|
||||
# - CORS tests: Not implemented
|
||||
# - POST Object: HTML form upload not implemented
|
||||
# - Error format differences: Minor response format variations
|
||||
#
|
||||
# Total: non-standard tests listed below
|
||||
|
||||
test_100_continue
|
||||
test_100_continue_error_retry
|
||||
test_abort_multipart_upload_not_found
|
||||
test_access_bucket_private_object_private
|
||||
test_access_bucket_private_object_publicread
|
||||
test_access_bucket_private_object_publicreadwrite
|
||||
test_access_bucket_private_objectv2_private
|
||||
test_access_bucket_private_objectv2_publicread
|
||||
test_access_bucket_private_objectv2_publicreadwrite
|
||||
test_access_bucket_publicread_object_private
|
||||
test_access_bucket_publicread_object_publicread
|
||||
test_access_bucket_publicread_object_publicreadwrite
|
||||
test_access_bucket_publicreadwrite_object_private
|
||||
test_access_bucket_publicreadwrite_object_publicread
|
||||
test_access_bucket_publicreadwrite_object_publicreadwrite
|
||||
test_account_usage
|
||||
test_atomic_conditional_write_1mb
|
||||
test_atomic_dual_conditional_write_1mb
|
||||
test_atomic_write_bucket_gone
|
||||
test_block_public_restrict_public_buckets
|
||||
test_bucket_acl_canned
|
||||
test_bucket_acl_canned_authenticatedread
|
||||
test_bucket_acl_canned_during_create
|
||||
test_bucket_acl_canned_private_to_private
|
||||
test_bucket_acl_canned_publicreadwrite
|
||||
test_bucket_acl_default
|
||||
test_bucket_acl_grant_email
|
||||
test_bucket_acl_grant_email_not_exist
|
||||
test_bucket_acl_grant_nonexist_user
|
||||
test_bucket_acl_grant_userid_fullcontrol
|
||||
test_bucket_acl_grant_userid_read
|
||||
test_bucket_acl_grant_userid_readacp
|
||||
test_bucket_acl_grant_userid_write
|
||||
test_bucket_acl_grant_userid_writeacp
|
||||
test_bucket_acl_revoke_all
|
||||
test_bucket_concurrent_set_canned_acl
|
||||
test_bucket_create_exists
|
||||
test_bucket_create_exists_nonowner
|
||||
test_bucket_create_naming_bad_ip
|
||||
test_bucket_create_naming_dns_long
|
||||
test_bucket_create_special_key_names
|
||||
test_bucket_get_location
|
||||
test_bucket_head_extended
|
||||
test_bucket_header_acl_grants
|
||||
test_bucket_list_delimiter_not_skip_special
|
||||
test_bucket_list_delimiter_prefix
|
||||
test_bucket_list_delimiter_prefix_underscore
|
||||
test_bucket_list_many
|
||||
test_bucket_list_maxkeys_one
|
||||
test_bucket_list_objects_anonymous
|
||||
test_bucket_list_objects_anonymous_fail
|
||||
test_bucket_list_return_data
|
||||
test_bucket_list_return_data_versioning
|
||||
test_bucket_list_unordered
|
||||
test_bucket_listv2_both_continuationtoken_startafter
|
||||
test_bucket_listv2_delimiter_prefix
|
||||
test_bucket_listv2_delimiter_prefix_underscore
|
||||
test_bucket_listv2_many
|
||||
test_bucket_listv2_maxkeys_one
|
||||
test_bucket_listv2_objects_anonymous
|
||||
test_bucket_listv2_objects_anonymous_fail
|
||||
test_bucket_listv2_unordered
|
||||
test_bucket_logging_bucket_acl_required
|
||||
test_bucket_logging_bucket_auth_type
|
||||
test_bucket_logging_cleanup_bucket_concurrent_deletion_j
|
||||
test_bucket_logging_cleanup_bucket_concurrent_deletion_j_single
|
||||
test_bucket_logging_cleanup_bucket_concurrent_deletion_s
|
||||
test_bucket_logging_cleanup_bucket_concurrent_deletion_s_single
|
||||
test_bucket_logging_cleanup_bucket_deletion_j
|
||||
test_bucket_logging_cleanup_bucket_deletion_j_single
|
||||
test_bucket_logging_cleanup_bucket_deletion_s
|
||||
test_bucket_logging_cleanup_bucket_deletion_s_single
|
||||
test_bucket_logging_cleanup_concurrent_disabling_j
|
||||
test_bucket_logging_cleanup_concurrent_disabling_j_single
|
||||
test_bucket_logging_cleanup_concurrent_disabling_s
|
||||
test_bucket_logging_cleanup_concurrent_disabling_s_single
|
||||
test_bucket_logging_cleanup_concurrent_updating_j
|
||||
test_bucket_logging_cleanup_concurrent_updating_j_single
|
||||
test_bucket_logging_cleanup_concurrent_updating_s
|
||||
test_bucket_logging_cleanup_concurrent_updating_s_single
|
||||
test_bucket_logging_cleanup_disabling_j
|
||||
test_bucket_logging_cleanup_disabling_j_single
|
||||
test_bucket_logging_cleanup_disabling_s
|
||||
test_bucket_logging_cleanup_disabling_s_single
|
||||
test_bucket_logging_cleanup_updating_j
|
||||
test_bucket_logging_cleanup_updating_j_single
|
||||
test_bucket_logging_cleanup_updating_s
|
||||
test_bucket_logging_cleanup_updating_s_single
|
||||
test_bucket_logging_concurrent_flush_j
|
||||
test_bucket_logging_concurrent_flush_j_single
|
||||
test_bucket_logging_concurrent_flush_s
|
||||
test_bucket_logging_concurrent_flush_s_single
|
||||
test_bucket_logging_conf_concurrent_updating_pfx_j
|
||||
test_bucket_logging_conf_concurrent_updating_pfx_s
|
||||
test_bucket_logging_conf_concurrent_updating_roll_j
|
||||
test_bucket_logging_conf_concurrent_updating_roll_s
|
||||
test_bucket_logging_conf_updating_pfx_j
|
||||
test_bucket_logging_conf_updating_pfx_s
|
||||
test_bucket_logging_conf_updating_roll_j
|
||||
test_bucket_logging_conf_updating_roll_s
|
||||
test_bucket_logging_copy_objects
|
||||
test_bucket_logging_copy_objects_bucket
|
||||
test_bucket_logging_copy_objects_bucket_versioned
|
||||
test_bucket_logging_copy_objects_versioned
|
||||
test_bucket_logging_delete_objects
|
||||
test_bucket_logging_delete_objects_versioned
|
||||
test_bucket_logging_event_type_j
|
||||
test_bucket_logging_event_type_s
|
||||
test_bucket_logging_flush_empty
|
||||
test_bucket_logging_flush_j
|
||||
test_bucket_logging_flush_j_single
|
||||
test_bucket_logging_flush_s
|
||||
test_bucket_logging_flush_s_single
|
||||
test_bucket_logging_get_objects
|
||||
test_bucket_logging_get_objects_versioned
|
||||
test_bucket_logging_head_objects
|
||||
test_bucket_logging_head_objects_versioned
|
||||
test_bucket_logging_key_filter_j
|
||||
test_bucket_logging_key_filter_s
|
||||
test_bucket_logging_mpu_copy
|
||||
test_bucket_logging_mpu_copy_versioned
|
||||
test_bucket_logging_mpu_j
|
||||
test_bucket_logging_mpu_s
|
||||
test_bucket_logging_mpu_versioned_j
|
||||
test_bucket_logging_mpu_versioned_s
|
||||
test_bucket_logging_mtime
|
||||
test_bucket_logging_multi_delete
|
||||
test_bucket_logging_multi_delete_versioned
|
||||
test_bucket_logging_multiple_prefixes
|
||||
test_bucket_logging_notupdating_j
|
||||
test_bucket_logging_notupdating_j_single
|
||||
test_bucket_logging_notupdating_s
|
||||
test_bucket_logging_notupdating_s_single
|
||||
test_bucket_logging_object_acl_required
|
||||
test_bucket_logging_object_meta
|
||||
test_bucket_logging_part_cleanup_concurrent_deletion_j
|
||||
test_bucket_logging_part_cleanup_concurrent_deletion_s
|
||||
test_bucket_logging_part_cleanup_concurrent_disabling_j
|
||||
test_bucket_logging_part_cleanup_concurrent_disabling_s
|
||||
test_bucket_logging_part_cleanup_concurrent_updating_j
|
||||
test_bucket_logging_part_cleanup_concurrent_updating_s
|
||||
test_bucket_logging_part_cleanup_deletion_j
|
||||
test_bucket_logging_part_cleanup_deletion_s
|
||||
test_bucket_logging_part_cleanup_disabling_j
|
||||
test_bucket_logging_part_cleanup_disabling_s
|
||||
test_bucket_logging_part_cleanup_updating_j
|
||||
test_bucket_logging_part_cleanup_updating_s
|
||||
test_bucket_logging_partitioned_key
|
||||
test_bucket_logging_permission_change_j
|
||||
test_bucket_logging_permission_change_s
|
||||
test_bucket_logging_put_and_flush
|
||||
test_bucket_logging_put_concurrency
|
||||
test_bucket_logging_put_objects
|
||||
test_bucket_logging_put_objects_versioned
|
||||
test_bucket_logging_roll_time
|
||||
test_bucket_logging_simple_key
|
||||
test_bucket_logging_single_prefix
|
||||
test_bucket_logging_target_cleanup_j
|
||||
test_bucket_logging_target_cleanup_j_single
|
||||
test_bucket_logging_target_cleanup_s
|
||||
test_bucket_logging_target_cleanup_s_single
|
||||
test_bucket_policy_get_obj_acl_existing_tag
|
||||
test_bucket_policy_get_obj_existing_tag
|
||||
test_bucket_policy_get_obj_tagging_existing_tag
|
||||
test_bucket_policy_put_obj_copy_source
|
||||
test_bucket_policy_put_obj_copy_source_meta
|
||||
test_bucket_policy_put_obj_kms_noenc
|
||||
test_bucket_policy_put_obj_request_obj_tag
|
||||
test_bucket_policy_put_obj_s3_incorrect_algo_sse_s3
|
||||
test_bucket_policy_put_obj_s3_noenc
|
||||
test_bucket_policy_put_obj_tagging_existing_tag
|
||||
test_bucket_policy_set_condition_operator_end_with_IfExists
|
||||
test_bucket_policy_upload_part_copy
|
||||
test_bucket_recreate_new_acl
|
||||
test_bucket_recreate_not_overriding
|
||||
test_bucket_recreate_overwrite_acl
|
||||
test_copy_object_ifmatch_failed
|
||||
test_copy_object_ifmatch_good
|
||||
test_copy_object_ifnonematch_failed
|
||||
test_copy_object_ifnonematch_good
|
||||
test_cors_header_option
|
||||
test_cors_origin_response
|
||||
test_cors_origin_wildcard
|
||||
test_cors_presigned_get_object
|
||||
test_cors_presigned_get_object_tenant
|
||||
test_cors_presigned_get_object_tenant_v2
|
||||
test_cors_presigned_get_object_v2
|
||||
test_cors_presigned_put_object
|
||||
test_cors_presigned_put_object_tenant
|
||||
test_cors_presigned_put_object_tenant_v2
|
||||
test_cors_presigned_put_object_tenant_with_acl
|
||||
test_cors_presigned_put_object_v2
|
||||
test_cors_presigned_put_object_with_acl
|
||||
test_create_bucket_bucket_owner_enforced
|
||||
test_create_bucket_bucket_owner_preferred
|
||||
test_create_bucket_object_writer
|
||||
test_delete_marker_expiration
|
||||
test_delete_marker_nonversioned
|
||||
test_delete_marker_suspended
|
||||
test_delete_marker_versioned
|
||||
test_delete_object_current_if_match
|
||||
test_delete_object_current_if_match_last_modified_time
|
||||
test_delete_object_current_if_match_size
|
||||
test_delete_object_if_match
|
||||
test_delete_object_if_match_last_modified_time
|
||||
test_delete_object_if_match_size
|
||||
test_delete_object_version_if_match
|
||||
test_delete_object_version_if_match_last_modified_time
|
||||
test_delete_object_version_if_match_size
|
||||
test_delete_objects_current_if_match
|
||||
test_delete_objects_current_if_match_last_modified_time
|
||||
test_delete_objects_current_if_match_size
|
||||
test_delete_objects_if_match
|
||||
test_delete_objects_if_match_last_modified_time
|
||||
test_delete_objects_if_match_size
|
||||
test_delete_objects_version_if_match
|
||||
test_delete_objects_version_if_match_last_modified_time
|
||||
test_delete_objects_version_if_match_size
|
||||
test_delete_tags_obj_public
|
||||
test_encrypted_transfer_13b
|
||||
test_encrypted_transfer_1MB
|
||||
test_encrypted_transfer_1b
|
||||
test_encrypted_transfer_1kb
|
||||
test_encryption_sse_c_deny_algo_with_bucket_policy
|
||||
test_encryption_sse_c_enforced_with_bucket_policy
|
||||
test_encryption_sse_c_multipart_invalid_chunks_1
|
||||
test_encryption_sse_c_multipart_invalid_chunks_2
|
||||
test_encryption_sse_c_multipart_upload
|
||||
test_encryption_sse_c_post_object_authenticated_request
|
||||
test_encryption_sse_c_unaligned_multipart_upload
|
||||
test_expected_bucket_owner
|
||||
test_get_multipart_checksum_object_attributes
|
||||
test_get_multipart_object_attributes
|
||||
test_get_obj_tagging
|
||||
test_get_object_attributes
|
||||
test_get_object_ifmatch_failed
|
||||
test_get_object_ifmodifiedsince_failed
|
||||
test_get_object_ifnonematch_failed
|
||||
test_get_object_ifnonematch_good
|
||||
test_get_object_ifunmodifiedsince_good
|
||||
test_get_paginated_multipart_object_attributes
|
||||
test_get_single_multipart_object_attributes
|
||||
test_get_sse_c_encrypted_object_attributes
|
||||
test_get_tags_acl_public
|
||||
test_head_bucket_usage
|
||||
test_lifecycle_cloud_multiple_transition
|
||||
test_lifecycle_cloud_transition
|
||||
test_lifecycle_cloud_transition_large_obj
|
||||
test_lifecycle_deletemarker_expiration
|
||||
test_lifecycle_deletemarker_expiration_with_days_tag
|
||||
test_lifecycle_expiration
|
||||
test_lifecycle_expiration_date
|
||||
test_lifecycle_expiration_header_and_tags_head
|
||||
test_lifecycle_expiration_header_head
|
||||
test_lifecycle_expiration_header_tags_head
|
||||
test_lifecycle_expiration_newer_noncurrent
|
||||
test_lifecycle_expiration_noncur_tags1
|
||||
test_lifecycle_expiration_size_gt
|
||||
test_lifecycle_expiration_size_lt
|
||||
test_lifecycle_expiration_tags1
|
||||
test_lifecycle_expiration_tags2
|
||||
test_lifecycle_expiration_versioned_tags2
|
||||
test_lifecycle_expiration_versioning_enabled
|
||||
test_lifecycle_multipart_expiration
|
||||
test_lifecycle_noncur_cloud_transition
|
||||
test_lifecycle_noncur_expiration
|
||||
test_lifecycle_noncur_transition
|
||||
test_lifecycle_transition
|
||||
test_lifecycle_transition_single_rule_multi_trans
|
||||
test_lifecyclev2_expiration
|
||||
test_list_buckets_anonymous
|
||||
test_list_buckets_invalid_auth
|
||||
test_list_buckets_paginated
|
||||
test_list_multipart_upload
|
||||
test_list_multipart_upload_owner
|
||||
test_multipart_checksum_sha256
|
||||
test_multipart_copy_improper_range
|
||||
test_multipart_copy_invalid_range
|
||||
test_multipart_copy_multiple_sizes
|
||||
test_multipart_copy_small
|
||||
test_multipart_copy_special_names
|
||||
test_multipart_copy_versioned
|
||||
test_multipart_get_part
|
||||
test_multipart_put_current_object_if_match
|
||||
test_multipart_put_current_object_if_none_match
|
||||
test_multipart_put_object_if_match
|
||||
test_multipart_single_get_part
|
||||
test_multipart_sse_c_get_part
|
||||
test_multipart_upload
|
||||
test_multipart_upload_contents
|
||||
test_multipart_upload_resend_part
|
||||
test_multipart_upload_small
|
||||
test_multipart_use_cksum_helper_crc32
|
||||
test_multipart_use_cksum_helper_crc32c
|
||||
test_multipart_use_cksum_helper_crc64nvme
|
||||
test_multipart_use_cksum_helper_sha1
|
||||
test_multipart_use_cksum_helper_sha256
|
||||
test_non_multipart_get_part
|
||||
test_non_multipart_sse_c_get_part
|
||||
test_object_acl
|
||||
test_object_acl_canned
|
||||
test_object_acl_canned_authenticatedread
|
||||
test_object_acl_canned_bucketownerfullcontrol
|
||||
test_object_acl_canned_bucketownerread
|
||||
test_object_acl_canned_during_create
|
||||
test_object_acl_canned_publicreadwrite
|
||||
test_object_acl_default
|
||||
test_object_acl_full_control_verify_attributes
|
||||
test_object_acl_full_control_verify_owner
|
||||
test_object_acl_read
|
||||
test_object_acl_readacp
|
||||
test_object_acl_write
|
||||
test_object_acl_writeacp
|
||||
test_object_anon_put
|
||||
test_object_anon_put_write_access
|
||||
test_object_content_encoding_aws_chunked
|
||||
test_object_copy_16m
|
||||
test_object_copy_canned_acl
|
||||
test_object_copy_diff_bucket
|
||||
test_object_copy_not_owned_bucket
|
||||
test_object_copy_replacing_metadata
|
||||
test_object_copy_retaining_metadata
|
||||
test_object_copy_same_bucket
|
||||
test_object_copy_to_itself
|
||||
test_object_copy_to_itself_with_metadata
|
||||
test_object_copy_verify_contenttype
|
||||
test_object_copy_versioned_bucket
|
||||
test_object_copy_versioned_url_encoding
|
||||
test_object_copy_versioning_multipart_upload
|
||||
test_object_copy_zero_size
|
||||
test_object_delete_key_bucket_gone
|
||||
test_object_header_acl_grants
|
||||
test_object_lock_changing_mode_from_compliance
|
||||
test_object_lock_changing_mode_from_governance_with_bypass
|
||||
test_object_lock_changing_mode_from_governance_without_bypass
|
||||
test_object_lock_delete_multipart_object_with_legal_hold_on
|
||||
test_object_lock_delete_multipart_object_with_retention
|
||||
test_object_lock_delete_object_with_legal_hold_off
|
||||
test_object_lock_delete_object_with_legal_hold_on
|
||||
test_object_lock_delete_object_with_retention
|
||||
test_object_lock_delete_object_with_retention_and_marker
|
||||
test_object_lock_get_legal_hold
|
||||
test_object_lock_get_obj_lock
|
||||
test_object_lock_get_obj_metadata
|
||||
test_object_lock_get_obj_retention
|
||||
test_object_lock_get_obj_retention_iso8601
|
||||
test_object_lock_multi_delete_object_with_retention
|
||||
test_object_lock_put_legal_hold
|
||||
test_object_lock_put_legal_hold_invalid_status
|
||||
test_object_lock_put_obj_lock
|
||||
test_object_lock_put_obj_lock_invalid_days
|
||||
test_object_lock_put_obj_lock_invalid_mode
|
||||
test_object_lock_put_obj_lock_invalid_status
|
||||
test_object_lock_put_obj_lock_invalid_years
|
||||
test_object_lock_put_obj_lock_with_days_and_years
|
||||
test_object_lock_put_obj_retention
|
||||
test_object_lock_put_obj_retention_increase_period
|
||||
test_object_lock_put_obj_retention_invalid_mode
|
||||
test_object_lock_put_obj_retention_override_default_retention
|
||||
test_object_lock_put_obj_retention_shorten_period
|
||||
test_object_lock_put_obj_retention_shorten_period_bypass
|
||||
test_object_lock_put_obj_retention_versionid
|
||||
test_object_lock_suspend_versioning
|
||||
test_object_lock_uploading_obj
|
||||
test_object_raw_authenticated
|
||||
test_object_raw_authenticated_bucket_acl
|
||||
test_object_raw_authenticated_bucket_gone
|
||||
test_object_raw_authenticated_object_acl
|
||||
test_object_raw_authenticated_object_gone
|
||||
test_object_raw_get
|
||||
test_object_raw_get_bucket_acl
|
||||
test_object_raw_get_bucket_gone
|
||||
test_object_raw_get_object_acl
|
||||
test_object_raw_get_object_gone
|
||||
test_object_raw_get_x_amz_expires_not_expired
|
||||
test_object_raw_get_x_amz_expires_not_expired_tenant
|
||||
test_object_raw_get_x_amz_expires_out_max_range
|
||||
test_object_raw_get_x_amz_expires_out_positive_range
|
||||
test_object_raw_get_x_amz_expires_out_range_zero
|
||||
test_object_raw_put_authenticated_expired
|
||||
test_object_raw_response_headers
|
||||
test_object_read_unreadable
|
||||
test_object_requestid_matches_header_on_error
|
||||
test_object_set_get_unicode_metadata
|
||||
test_object_write_with_chunked_transfer_encoding
|
||||
test_post_object_anonymous_request
|
||||
test_post_object_authenticated_no_content_type
|
||||
test_post_object_authenticated_request
|
||||
test_post_object_authenticated_request_bad_access_key
|
||||
test_post_object_case_insensitive_condition_fields
|
||||
test_post_object_condition_is_case_sensitive
|
||||
test_post_object_empty_conditions
|
||||
test_post_object_escaped_field_values
|
||||
test_post_object_expired_policy
|
||||
test_post_object_expires_is_case_sensitive
|
||||
test_post_object_ignored_header
|
||||
test_post_object_invalid_access_key
|
||||
test_post_object_invalid_content_length_argument
|
||||
test_post_object_invalid_date_format
|
||||
test_post_object_invalid_request_field_value
|
||||
test_post_object_invalid_signature
|
||||
test_post_object_missing_conditions_list
|
||||
test_post_object_missing_content_length_argument
|
||||
test_post_object_missing_expires_condition
|
||||
test_post_object_missing_policy_condition
|
||||
test_post_object_missing_signature
|
||||
test_post_object_no_key_specified
|
||||
test_post_object_request_missing_policy_specified_field
|
||||
test_post_object_set_invalid_success_code
|
||||
test_post_object_set_key_from_filename
|
||||
test_post_object_set_success_code
|
||||
test_post_object_success_redirect_action
|
||||
test_post_object_tags_anonymous_request
|
||||
test_post_object_tags_authenticated_request
|
||||
test_post_object_upload_larger_than_chunk
|
||||
test_post_object_upload_size_below_minimum
|
||||
test_post_object_upload_size_limit_exceeded
|
||||
test_post_object_upload_size_rgw_chunk_size_bug
|
||||
test_post_object_user_specified_header
|
||||
test_post_object_wrong_bucket
|
||||
test_put_bucket_acl_grant_group_read
|
||||
test_put_bucket_logging_account_j
|
||||
test_put_bucket_logging_account_s
|
||||
test_put_bucket_logging_extensions
|
||||
test_put_bucket_logging_policy_wildcard_objects
|
||||
test_put_bucket_logging_tenant_j
|
||||
test_put_bucket_logging_tenant_s
|
||||
test_put_bucket_ownership_bucket_owner_enforced
|
||||
test_put_bucket_ownership_bucket_owner_preferred
|
||||
test_put_bucket_ownership_object_writer
|
||||
test_put_current_object_if_match
|
||||
test_put_current_object_if_none_match
|
||||
test_put_delete_tags
|
||||
test_put_max_tags
|
||||
test_put_modify_tags
|
||||
test_put_obj_with_tags
|
||||
test_put_object_current_if_match
|
||||
test_put_object_if_match
|
||||
test_put_object_ifmatch_failed
|
||||
test_put_object_ifmatch_good
|
||||
test_put_object_ifmatch_nonexisted_failed
|
||||
test_put_object_ifmatch_overwrite_existed_good
|
||||
test_put_object_ifnonmatch_failed
|
||||
test_put_object_ifnonmatch_good
|
||||
test_put_object_ifnonmatch_nonexisted_good
|
||||
test_put_object_ifnonmatch_overwrite_existed_failed
|
||||
test_put_tags_acl_public
|
||||
test_ranged_big_request_response_code
|
||||
test_ranged_request_response_code
|
||||
test_ranged_request_return_trailing_bytes_response_code
|
||||
test_ranged_request_skip_leading_bytes_response_code
|
||||
test_read_through
|
||||
test_restore_noncur_obj
|
||||
test_restore_object_permanent
|
||||
test_restore_object_temporary
|
||||
test_set_cors
|
||||
test_sse_kms_default_post_object_authenticated_request
|
||||
test_sse_kms_default_upload_1b
|
||||
test_sse_kms_default_upload_1kb
|
||||
test_sse_kms_default_upload_1mb
|
||||
test_sse_kms_default_upload_8mb
|
||||
test_sse_kms_method_head
|
||||
test_sse_kms_multipart_invalid_chunks_1
|
||||
test_sse_kms_multipart_invalid_chunks_2
|
||||
test_sse_kms_multipart_upload
|
||||
test_sse_kms_post_object_authenticated_request
|
||||
test_sse_kms_present
|
||||
test_sse_kms_transfer_13b
|
||||
test_sse_kms_transfer_1MB
|
||||
test_sse_kms_transfer_1b
|
||||
test_sse_kms_transfer_1kb
|
||||
test_sse_s3_default_method_head
|
||||
test_sse_s3_default_multipart_upload
|
||||
test_sse_s3_default_post_object_authenticated_request
|
||||
test_sse_s3_default_upload_1b
|
||||
test_sse_s3_default_upload_1kb
|
||||
test_sse_s3_default_upload_1mb
|
||||
test_sse_s3_default_upload_8mb
|
||||
test_sse_s3_encrypted_upload_1b
|
||||
test_sse_s3_encrypted_upload_1kb
|
||||
test_sse_s3_encrypted_upload_1mb
|
||||
test_sse_s3_encrypted_upload_8mb
|
||||
test_versioned_object_acl_no_version_specified
|
||||
test_versioning_copy_obj_version
|
||||
test_versioning_multi_object_delete_with_marker_create
|
||||
test_versioning_obj_create_overwrite_multipart
|
||||
test_versioning_obj_suspended_copy
|
||||
test_versioning_stack_delete_merkers
|
||||
@@ -33,30 +33,10 @@ S3_PORT="${S3_PORT:-9000}"
|
||||
TEST_MODE="${TEST_MODE:-single}"
|
||||
MAXFAIL="${MAXFAIL:-1}"
|
||||
XDIST="${XDIST:-0}"
|
||||
MARKEXPR="${MARKEXPR:-not lifecycle and not versioning and not s3website and not bucket_logging and not encryption}"
|
||||
|
||||
# Configuration file paths
|
||||
S3TESTS_CONF_TEMPLATE="${S3TESTS_CONF_TEMPLATE:-.github/s3tests/s3tests.conf}"
|
||||
S3TESTS_CONF="${S3TESTS_CONF:-s3tests.conf}"
|
||||
|
||||
# Service deployment mode: "build", "binary", "docker", or "existing"
|
||||
# - "build": Compile with cargo build --release and run (default)
|
||||
# - "binary": Use pre-compiled binary (RUSTFS_BINARY path or default)
|
||||
# - "docker": Build Docker image and run in container
|
||||
# - "existing": Use already running service (skip start, use S3_HOST and S3_PORT)
|
||||
DEPLOY_MODE="${DEPLOY_MODE:-build}"
|
||||
RUSTFS_BINARY="${RUSTFS_BINARY:-}"
|
||||
NO_CACHE="${NO_CACHE:-false}"
|
||||
|
||||
# Directories
|
||||
# Directories (define early for use in test list loading)
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||
ARTIFACTS_DIR="${PROJECT_ROOT}/artifacts/s3tests-${TEST_MODE}"
|
||||
CONTAINER_NAME="rustfs-${TEST_MODE}"
|
||||
NETWORK_NAME="rustfs-net"
|
||||
DATA_ROOT="${DATA_ROOT:-target}"
|
||||
DATA_DIR="${PROJECT_ROOT}/${DATA_ROOT}/test-data/${CONTAINER_NAME}"
|
||||
RUSTFS_PID=""
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
@@ -77,6 +57,137 @@ log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $*"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# Test Classification Files
|
||||
# =============================================================================
|
||||
# Tests are classified into three categories stored in text files:
|
||||
# - non_standard_tests.txt: Ceph/RGW specific tests (permanently excluded)
|
||||
# - unimplemented_tests.txt: Standard S3 features not yet implemented
|
||||
# - implemented_tests.txt: Tests that should pass on RustFS
|
||||
#
|
||||
# By default, only tests listed in implemented_tests.txt are run.
|
||||
# Use TESTEXPR env var to override and run custom test selection.
|
||||
# =============================================================================
|
||||
|
||||
# Test list files location
|
||||
TEST_LISTS_DIR="${SCRIPT_DIR}"
|
||||
IMPLEMENTED_TESTS_FILE="${TEST_LISTS_DIR}/implemented_tests.txt"
|
||||
NON_STANDARD_TESTS_FILE="${TEST_LISTS_DIR}/non_standard_tests.txt"
|
||||
UNIMPLEMENTED_TESTS_FILE="${TEST_LISTS_DIR}/unimplemented_tests.txt"
|
||||
|
||||
# =============================================================================
|
||||
# build_testexpr_from_file: Read test names from file and build pytest -k expr
|
||||
# =============================================================================
|
||||
# Reads test names from a file (one per line, ignoring comments and empty lines)
|
||||
# and builds a pytest -k expression to include only those tests.
|
||||
# =============================================================================
|
||||
build_testexpr_from_file() {
|
||||
local file="$1"
|
||||
local expr=""
|
||||
|
||||
if [[ ! -f "${file}" ]]; then
|
||||
log_error "Test list file not found: ${file}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
while IFS= read -r line || [[ -n "$line" ]]; do
|
||||
# Skip empty lines and comments
|
||||
[[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && continue
|
||||
# Trim whitespace
|
||||
line=$(echo "$line" | xargs)
|
||||
[[ -z "$line" ]] && continue
|
||||
|
||||
if [[ -n "${expr}" ]]; then
|
||||
expr+=" or "
|
||||
fi
|
||||
expr+="${line}"
|
||||
done < "${file}"
|
||||
|
||||
echo "${expr}"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MARKEXPR: pytest marker expression (safety net for marker-based filtering)
|
||||
# =============================================================================
|
||||
# Even though we use file-based test selection, we keep marker exclusions
|
||||
# as a safety net to ensure no non-standard tests slip through.
|
||||
# =============================================================================
|
||||
if [[ -z "${MARKEXPR:-}" ]]; then
|
||||
# Minimal marker exclusions as safety net (file-based filtering is primary)
|
||||
MARKEXPR="not fails_on_aws and not fails_on_rgw and not fails_on_dbstore"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# TESTEXPR: pytest -k expression to select specific tests
|
||||
# =============================================================================
|
||||
# By default, builds an inclusion expression from implemented_tests.txt.
|
||||
# Use TESTEXPR env var to override with custom selection.
|
||||
#
|
||||
# The file-based approach provides:
|
||||
# 1. Clear visibility of which tests are run
|
||||
# 2. Easy maintenance - edit txt files to add/remove tests
|
||||
# 3. Separation of concerns - test classification vs test execution
|
||||
# =============================================================================
|
||||
if [[ -z "${TESTEXPR:-}" ]]; then
|
||||
if [[ -f "${IMPLEMENTED_TESTS_FILE}" ]]; then
|
||||
log_info "Loading test list from: ${IMPLEMENTED_TESTS_FILE}"
|
||||
TESTEXPR=$(build_testexpr_from_file "${IMPLEMENTED_TESTS_FILE}")
|
||||
if [[ -z "${TESTEXPR}" ]]; then
|
||||
log_error "No tests found in ${IMPLEMENTED_TESTS_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
# Count tests for logging
|
||||
TEST_COUNT=$(grep -v '^#' "${IMPLEMENTED_TESTS_FILE}" | grep -v '^[[:space:]]*$' | wc -l | xargs)
|
||||
log_info "Loaded ${TEST_COUNT} tests from implemented_tests.txt"
|
||||
else
|
||||
log_warn "Test list file not found: ${IMPLEMENTED_TESTS_FILE}"
|
||||
log_warn "Falling back to exclusion-based filtering"
|
||||
# Fallback to exclusion-based filtering if file doesn't exist
|
||||
EXCLUDED_TESTS=(
|
||||
"test_post_object"
|
||||
"test_bucket_list_objects_anonymous"
|
||||
"test_bucket_listv2_objects_anonymous"
|
||||
"test_bucket_concurrent_set_canned_acl"
|
||||
"test_bucket_acl"
|
||||
"test_object_acl"
|
||||
"test_access_bucket"
|
||||
"test_100_continue"
|
||||
"test_cors"
|
||||
"test_object_raw"
|
||||
"test_versioning"
|
||||
"test_versioned"
|
||||
)
|
||||
TESTEXPR=""
|
||||
for pattern in "${EXCLUDED_TESTS[@]}"; do
|
||||
if [[ -n "${TESTEXPR}" ]]; then
|
||||
TESTEXPR+=" and "
|
||||
fi
|
||||
TESTEXPR+="not ${pattern}"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Configuration file paths
|
||||
S3TESTS_CONF_TEMPLATE="${S3TESTS_CONF_TEMPLATE:-.github/s3tests/s3tests.conf}"
|
||||
S3TESTS_CONF="${S3TESTS_CONF:-s3tests.conf}"
|
||||
|
||||
# Service deployment mode: "build", "binary", "docker", or "existing"
|
||||
# - "build": Compile with cargo build --release and run (default)
|
||||
# - "binary": Use pre-compiled binary (RUSTFS_BINARY path or default)
|
||||
# - "docker": Build Docker image and run in container
|
||||
# - "existing": Use already running service (skip start, use S3_HOST and S3_PORT)
|
||||
DEPLOY_MODE="${DEPLOY_MODE:-build}"
|
||||
RUSTFS_BINARY="${RUSTFS_BINARY:-}"
|
||||
NO_CACHE="${NO_CACHE:-false}"
|
||||
|
||||
# Additional directories (SCRIPT_DIR and PROJECT_ROOT defined earlier)
|
||||
ARTIFACTS_DIR="${PROJECT_ROOT}/artifacts/s3tests-${TEST_MODE}"
|
||||
CONTAINER_NAME="rustfs-${TEST_MODE}"
|
||||
NETWORK_NAME="rustfs-net"
|
||||
DATA_ROOT="${DATA_ROOT:-target}"
|
||||
DATA_DIR="${PROJECT_ROOT}/${DATA_ROOT}/test-data/${CONTAINER_NAME}"
|
||||
RUSTFS_PID=""
|
||||
|
||||
show_usage() {
|
||||
cat << EOF
|
||||
Usage: $0 [OPTIONS]
|
||||
@@ -102,14 +213,22 @@ Environment Variables:
|
||||
S3_ALT_SECRET_KEY - Alt user secret key (default: rustfsalt)
|
||||
MAXFAIL - Stop after N failures (default: 1)
|
||||
XDIST - Enable parallel execution with N workers (default: 0)
|
||||
MARKEXPR - pytest marker expression (default: exclude unsupported features)
|
||||
MARKEXPR - pytest marker expression (default: safety net exclusions)
|
||||
TESTEXPR - pytest -k expression (default: from implemented_tests.txt)
|
||||
S3TESTS_CONF_TEMPLATE - Path to s3tests config template (default: .github/s3tests/s3tests.conf)
|
||||
S3TESTS_CONF - Path to generated s3tests config (default: s3tests.conf)
|
||||
DATA_ROOT - Root directory for test data storage (default: target)
|
||||
Final path: ${DATA_ROOT}/test-data/${CONTAINER_NAME}
|
||||
Final path: \${DATA_ROOT}/test-data/\${CONTAINER_NAME}
|
||||
|
||||
Test Classification Files (in scripts/s3-tests/):
|
||||
implemented_tests.txt - Tests that should pass (run by default)
|
||||
unimplemented_tests.txt - Standard S3 features not yet implemented
|
||||
non_standard_tests.txt - Ceph/RGW specific tests (permanently excluded)
|
||||
|
||||
Notes:
|
||||
- In build mode, if the binary exists and was compiled less than 5 minutes ago,
|
||||
- Tests are loaded from implemented_tests.txt by default
|
||||
- Set TESTEXPR to override with custom test selection
|
||||
- In build mode, if the binary exists and was compiled less than 30 minutes ago,
|
||||
compilation will be skipped unless --no-cache is specified.
|
||||
|
||||
Examples:
|
||||
@@ -383,7 +502,24 @@ check_server_ready_from_log() {
|
||||
|
||||
# Test S3 API readiness
|
||||
test_s3_api_ready() {
|
||||
# Try awscurl first if available
|
||||
# Step 1: Check if server is responding using /health endpoint
|
||||
# /health is a probe path that bypasses readiness gate, so it can be used
|
||||
# to check if the server is up and running, even if readiness gate is not ready yet
|
||||
HEALTH_CODE=$(curl -s -o /dev/null -w "%{http_code}" \
|
||||
-X GET \
|
||||
"http://${S3_HOST}:${S3_PORT}/health" \
|
||||
--max-time 5 2>/dev/null || echo "000")
|
||||
|
||||
if [ "${HEALTH_CODE}" = "000" ]; then
|
||||
# Connection failed - server might not be running or not listening yet
|
||||
return 1
|
||||
elif [ "${HEALTH_CODE}" != "200" ]; then
|
||||
# Health endpoint returned non-200 status, server might have issues
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Step 2: Test S3 API with signed request (awscurl) if available
|
||||
# This tests if S3 API is actually ready and can process requests
|
||||
if command -v awscurl >/dev/null 2>&1; then
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
RESPONSE=$(awscurl --service s3 --region "${S3_REGION}" \
|
||||
@@ -392,18 +528,24 @@ test_s3_api_ready() {
|
||||
-X GET "http://${S3_HOST}:${S3_PORT}/" 2>&1)
|
||||
|
||||
if echo "${RESPONSE}" | grep -q "<ListAllMyBucketsResult"; then
|
||||
# S3 API is ready and responding correctly
|
||||
return 0
|
||||
fi
|
||||
# If awscurl failed, check if it's a 503 (Service Unavailable) which means not ready
|
||||
if echo "${RESPONSE}" | grep -q "503\|Service not ready"; then
|
||||
return 1 # Not ready yet (readiness gate is blocking S3 API)
|
||||
fi
|
||||
# Other errors from awscurl - might be auth issues or other problems
|
||||
# But server is up, so we'll consider it ready (S3 API might have other issues)
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Fallback: test /health endpoint (this bypasses readiness gate)
|
||||
if curl -sf "http://${S3_HOST}:${S3_PORT}/health" >/dev/null 2>&1; then
|
||||
# Health endpoint works, but we need to verify S3 API works too
|
||||
# Wait a bit more for FullReady to be fully set
|
||||
return 1 # Not fully ready yet, but progressing
|
||||
fi
|
||||
|
||||
return 1 # Not ready
|
||||
# Step 3: Fallback - if /health returns 200, server is up and readiness gate is ready
|
||||
# Since /health is a probe path and returns 200, and we don't have awscurl to test S3 API,
|
||||
# we can assume the server is ready. The readiness gate would have blocked /health if not ready.
|
||||
# Note: Root path "/" with HEAD method returns 501 Not Implemented (S3 doesn't support HEAD on root),
|
||||
# so we can't use it as a reliable test. Since /health already confirmed readiness, we return success.
|
||||
return 0
|
||||
}
|
||||
|
||||
# First, wait for server to log "server started successfully"
|
||||
@@ -445,16 +587,41 @@ for i in {1..20}; do
|
||||
fi
|
||||
fi
|
||||
|
||||
# Show last test attempt
|
||||
log_error "Last S3 API test:"
|
||||
# Show last test attempt with detailed diagnostics
|
||||
log_error "Last S3 API readiness test diagnostics:"
|
||||
|
||||
# Test /health endpoint (probe path, bypasses readiness gate)
|
||||
log_error "Step 1: Testing /health endpoint (probe path):"
|
||||
HEALTH_CODE=$(curl -s -o /dev/null -w "%{http_code}" \
|
||||
-X GET \
|
||||
"http://${S3_HOST}:${S3_PORT}/health" \
|
||||
--max-time 5 2>&1 || echo "000")
|
||||
log_error " /health HTTP status code: ${HEALTH_CODE}"
|
||||
if [ "${HEALTH_CODE}" != "200" ]; then
|
||||
log_error " /health endpoint response:"
|
||||
curl -s "http://${S3_HOST}:${S3_PORT}/health" 2>&1 | head -5 || true
|
||||
fi
|
||||
|
||||
# Test S3 API with signed request if awscurl is available
|
||||
if command -v awscurl >/dev/null 2>&1; then
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
log_error "Step 2: Testing S3 API with awscurl (signed request):"
|
||||
awscurl --service s3 --region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X GET "http://${S3_HOST}:${S3_PORT}/" 2>&1 | head -20
|
||||
else
|
||||
curl -v "http://${S3_HOST}:${S3_PORT}/health" 2>&1 | head -10
|
||||
log_error "Step 2: Testing S3 API root path (unsigned HEAD request):"
|
||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \
|
||||
-X HEAD \
|
||||
"http://${S3_HOST}:${S3_PORT}/" \
|
||||
--max-time 5 2>&1 || echo "000")
|
||||
log_error " Root path HTTP status code: ${HTTP_CODE}"
|
||||
if [ "${HTTP_CODE}" = "503" ]; then
|
||||
log_error " Note: 503 indicates readiness gate is blocking (service not ready)"
|
||||
elif [ "${HTTP_CODE}" = "000" ]; then
|
||||
log_error " Note: 000 indicates connection failure"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Output logs based on deployment mode
|
||||
@@ -514,12 +681,51 @@ envsubst < "${TEMPLATE_PATH}" > "${CONF_OUTPUT_PATH}" || {
|
||||
# Step 7: Provision s3-tests alt user
|
||||
# Note: Main user (rustfsadmin) is a system user and doesn't need to be created via API
|
||||
log_info "Provisioning s3-tests alt user..."
|
||||
|
||||
# Helper function to install Python packages with fallback for externally-managed environments
|
||||
install_python_package() {
|
||||
local package=$1
|
||||
local error_output
|
||||
|
||||
# Try --user first (works on most Linux systems)
|
||||
error_output=$(python3 -m pip install --user --upgrade pip "${package}" 2>&1)
|
||||
if [ $? -eq 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# If that fails with externally-managed-environment error, try with --break-system-packages
|
||||
if echo "${error_output}" | grep -q "externally-managed-environment"; then
|
||||
log_warn "Detected externally-managed Python environment, using --break-system-packages flag"
|
||||
python3 -m pip install --user --break-system-packages --upgrade pip "${package}" || {
|
||||
log_error "Failed to install ${package} even with --break-system-packages"
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Other errors - show the error output
|
||||
log_error "Failed to install ${package}: ${error_output}"
|
||||
return 1
|
||||
}
|
||||
|
||||
if ! command -v awscurl >/dev/null 2>&1; then
|
||||
python3 -m pip install --user --upgrade pip awscurl || {
|
||||
install_python_package awscurl || {
|
||||
log_error "Failed to install awscurl"
|
||||
exit 1
|
||||
}
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
# Add common Python user bin directories to PATH
|
||||
# macOS: ~/Library/Python/X.Y/bin
|
||||
# Linux: ~/.local/bin
|
||||
PYTHON_VERSION=$(python3 -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')" 2>/dev/null || echo "3.14")
|
||||
export PATH="$HOME/Library/Python/${PYTHON_VERSION}/bin:$HOME/.local/bin:$PATH"
|
||||
# Verify awscurl is now available
|
||||
if ! command -v awscurl >/dev/null 2>&1; then
|
||||
log_error "awscurl installed but not found in PATH. Tried:"
|
||||
log_error " - $HOME/Library/Python/${PYTHON_VERSION}/bin"
|
||||
log_error " - $HOME/.local/bin"
|
||||
log_error "Please ensure awscurl is in your PATH"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Provision alt user (required by suite)
|
||||
@@ -576,11 +782,13 @@ cd "${PROJECT_ROOT}/s3-tests"
|
||||
|
||||
# Install tox if not available
|
||||
if ! command -v tox >/dev/null 2>&1; then
|
||||
python3 -m pip install --user --upgrade pip tox || {
|
||||
install_python_package tox || {
|
||||
log_error "Failed to install tox"
|
||||
exit 1
|
||||
}
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
# Add common Python user bin directories to PATH (same as awscurl)
|
||||
PYTHON_VERSION=$(python3 -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')" 2>/dev/null || echo "3.14")
|
||||
export PATH="$HOME/Library/Python/${PYTHON_VERSION}/bin:$HOME/.local/bin:$PATH"
|
||||
fi
|
||||
|
||||
# Step 9: Run ceph s3-tests
|
||||
@@ -606,6 +814,7 @@ S3TEST_CONF="${CONF_OUTPUT_PATH}" \
|
||||
${XDIST_ARGS} \
|
||||
s3tests/functional/test_s3.py \
|
||||
-m "${MARKEXPR}" \
|
||||
-k "${TESTEXPR}" \
|
||||
2>&1 | tee "${ARTIFACTS_DIR}/pytest.log"
|
||||
|
||||
TEST_EXIT_CODE=${PIPESTATUS[0]}
|
||||
|
||||
191
scripts/s3-tests/unimplemented_tests.txt
Normal file
191
scripts/s3-tests/unimplemented_tests.txt
Normal file
@@ -0,0 +1,191 @@
|
||||
# Unimplemented S3 feature tests
|
||||
# ==============================
|
||||
#
|
||||
# These tests cover STANDARD S3 features not yet implemented in RustFS.
|
||||
# They are TEMPORARILY EXCLUDED and should be enabled as features are added.
|
||||
#
|
||||
# Unimplemented features:
|
||||
# - Versioning: Object versioning support
|
||||
# - Lifecycle: Object lifecycle management
|
||||
# - S3 Website: Static website hosting
|
||||
# - Bucket Logging: Access logging
|
||||
# - SSE-S3: Server-side encryption with S3-managed keys
|
||||
# - Object Lock: WORM protection
|
||||
# - IAM: Identity and Access Management roles/users
|
||||
# - SNS: Event notifications
|
||||
# - STS: Security Token Service
|
||||
# - Checksum: Full checksum validation
|
||||
# - Conditional writes: If-Match/If-None-Match for writes
|
||||
# - Object ownership: BucketOwnerEnforced/Preferred
|
||||
#
|
||||
# Total: all unimplemented S3 feature tests listed below (keep this comment in sync with the list)
|
||||
|
||||
test_bucket_create_delete_bucket_ownership
|
||||
test_bucket_logging_owner
|
||||
test_bucket_policy_deny_self_denied_policy
|
||||
test_bucket_policy_deny_self_denied_policy_confirm_header
|
||||
test_bucket_policy_put_obj_kms_s3
|
||||
test_bucket_policy_put_obj_s3_kms
|
||||
test_copy_enc
|
||||
test_copy_part_enc
|
||||
test_delete_bucket_encryption_kms
|
||||
test_delete_bucket_encryption_s3
|
||||
test_encryption_key_no_sse_c
|
||||
test_encryption_sse_c_invalid_md5
|
||||
test_encryption_sse_c_method_head
|
||||
test_encryption_sse_c_multipart_bad_download
|
||||
test_encryption_sse_c_no_key
|
||||
test_encryption_sse_c_no_md5
|
||||
test_encryption_sse_c_other_key
|
||||
test_encryption_sse_c_present
|
||||
test_get_bucket_encryption_kms
|
||||
test_get_bucket_encryption_s3
|
||||
test_get_versioned_object_attributes
|
||||
test_lifecycle_delete
|
||||
test_lifecycle_expiration_days0
|
||||
test_lifecycle_expiration_header_put
|
||||
test_lifecycle_get
|
||||
test_lifecycle_get_no_id
|
||||
test_lifecycle_id_too_long
|
||||
test_lifecycle_invalid_status
|
||||
test_lifecycle_plain_null_version_current_transition
|
||||
test_lifecycle_same_id
|
||||
test_lifecycle_set
|
||||
test_lifecycle_set_date
|
||||
test_lifecycle_set_deletemarker
|
||||
test_lifecycle_set_empty_filter
|
||||
test_lifecycle_set_filter
|
||||
test_lifecycle_set_invalid_date
|
||||
test_lifecycle_set_multipart
|
||||
test_lifecycle_set_noncurrent
|
||||
test_lifecycle_set_noncurrent_transition
|
||||
test_lifecycle_transition_encrypted
|
||||
test_lifecycle_transition_set_invalid_date
|
||||
test_object_checksum_crc64nvme
|
||||
test_object_checksum_sha256
|
||||
test_object_lock_get_legal_hold_invalid_bucket
|
||||
test_object_lock_get_obj_lock_invalid_bucket
|
||||
test_object_lock_get_obj_retention_invalid_bucket
|
||||
test_object_lock_put_legal_hold_invalid_bucket
|
||||
test_object_lock_put_obj_lock_enable_after_create
|
||||
test_object_lock_put_obj_lock_invalid_bucket
|
||||
test_object_lock_put_obj_retention_invalid_bucket
|
||||
test_post_object_upload_checksum
|
||||
test_put_bucket_encryption_kms
|
||||
test_put_bucket_encryption_s3
|
||||
test_put_bucket_logging
|
||||
test_put_bucket_logging_errors
|
||||
test_put_bucket_logging_permissions
|
||||
test_put_bucket_logging_policy_wildcard
|
||||
test_put_obj_enc_conflict_bad_enc_kms
|
||||
test_put_obj_enc_conflict_c_kms
|
||||
test_put_obj_enc_conflict_c_s3
|
||||
test_put_obj_enc_conflict_s3_kms
|
||||
test_rm_bucket_logging
|
||||
test_sse_kms_no_key
|
||||
test_sse_kms_not_declared
|
||||
test_sse_kms_read_declare
|
||||
test_versioned_concurrent_object_create_and_remove
|
||||
test_versioned_concurrent_object_create_concurrent_remove
|
||||
test_versioned_object_acl
|
||||
test_versioning_bucket_atomic_upload_return_version_id
|
||||
test_versioning_bucket_create_suspend
|
||||
test_versioning_bucket_multipart_upload_return_version_id
|
||||
test_versioning_concurrent_multi_object_delete
|
||||
test_versioning_multi_object_delete
|
||||
test_versioning_multi_object_delete_with_marker
|
||||
test_versioning_obj_create_read_remove
|
||||
test_versioning_obj_create_read_remove_head
|
||||
test_versioning_obj_create_versions_remove_all
|
||||
test_versioning_obj_create_versions_remove_special_names
|
||||
test_versioning_obj_list_marker
|
||||
test_versioning_obj_plain_null_version_overwrite
|
||||
test_versioning_obj_plain_null_version_overwrite_suspended
|
||||
test_versioning_obj_plain_null_version_removal
|
||||
test_versioning_obj_suspend_versions
|
||||
|
||||
# Teardown issues (list_object_versions on non-versioned buckets)
|
||||
test_bucket_list_delimiter_alt
|
||||
test_bucket_list_delimiter_basic
|
||||
test_bucket_list_delimiter_dot
|
||||
test_bucket_list_delimiter_empty
|
||||
test_bucket_list_delimiter_none
|
||||
test_bucket_list_delimiter_not_exist
|
||||
test_bucket_list_delimiter_percentage
|
||||
test_bucket_list_delimiter_prefix_ends_with_delimiter
|
||||
test_bucket_list_delimiter_unreadable
|
||||
test_bucket_list_delimiter_whitespace
|
||||
test_bucket_list_encoding_basic
|
||||
test_bucket_listv2_delimiter_alt
|
||||
test_bucket_listv2_delimiter_basic
|
||||
test_bucket_listv2_delimiter_dot
|
||||
test_bucket_listv2_delimiter_empty
|
||||
test_bucket_listv2_delimiter_none
|
||||
test_bucket_listv2_delimiter_not_exist
|
||||
test_bucket_listv2_delimiter_percentage
|
||||
test_bucket_listv2_delimiter_prefix_ends_with_delimiter
|
||||
test_bucket_listv2_delimiter_unreadable
|
||||
test_bucket_listv2_delimiter_whitespace
|
||||
test_bucket_listv2_encoding_basic
|
||||
|
||||
# Checksum and atomic write tests (require x-amz-checksum-* support)
|
||||
test_atomic_dual_write_1mb
|
||||
test_atomic_dual_write_4mb
|
||||
test_atomic_dual_write_8mb
|
||||
test_atomic_multipart_upload_write
|
||||
test_atomic_read_1mb
|
||||
test_atomic_read_4mb
|
||||
test_atomic_read_8mb
|
||||
test_atomic_write_1mb
|
||||
test_atomic_write_4mb
|
||||
test_atomic_write_8mb
|
||||
test_set_bucket_tagging
|
||||
|
||||
# Tests with implementation issues (need investigation)
|
||||
test_bucket_policy_acl
|
||||
test_bucket_policy_different_tenant
|
||||
test_bucketv2_policy_acl
|
||||
test_multipart_resend_first_finishes_last
|
||||
|
||||
# Multipart abort and policy issues
|
||||
test_abort_multipart_upload
|
||||
test_bucket_policy_multipart
|
||||
|
||||
# Tests with prefix conflicts or ACL/tenant dependencies
|
||||
test_bucket_policy
|
||||
test_bucket_policy_allow_notprincipal
|
||||
test_bucket_policy_another_bucket
|
||||
test_bucket_policy_put_obj_acl
|
||||
test_bucket_policy_put_obj_grant
|
||||
test_bucket_policy_tenanted_bucket
|
||||
test_bucketv2_policy
|
||||
test_object_presigned_put_object_with_acl
|
||||
test_object_presigned_put_object_with_acl_tenant
|
||||
test_object_put_acl_mtime
|
||||
|
||||
# ACL-dependent tests (PutBucketAcl not implemented)
|
||||
test_block_public_object_canned_acls
|
||||
test_block_public_put_bucket_acls
|
||||
test_get_authpublic_acl_bucket_policy_status
|
||||
test_get_nonpublicpolicy_acl_bucket_policy_status
|
||||
test_get_public_acl_bucket_policy_status
|
||||
test_get_publicpolicy_acl_bucket_policy_status
|
||||
test_ignore_public_acls
|
||||
|
||||
# PublicAccessBlock and tag validation tests
|
||||
test_block_public_policy
|
||||
test_block_public_policy_with_principal
|
||||
test_get_obj_head_tagging
|
||||
test_get_public_block_deny_bucket_policy
|
||||
test_get_undefined_public_block
|
||||
test_put_excess_key_tags
|
||||
test_put_excess_tags
|
||||
test_put_excess_val_tags
|
||||
test_put_get_delete_public_block
|
||||
test_put_public_block
|
||||
test_set_get_del_bucket_policy
|
||||
|
||||
# Object attributes and torrent tests
|
||||
test_create_bucket_no_ownership_controls
|
||||
test_get_checksum_object_attributes
|
||||
test_get_object_torrent
|
||||
Reference in New Issue
Block a user