mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
fix(targets): make target removal and reload transactional; prevent reappearing entries (#662)
* feat: improve code for notify * upgrade starshard version * upgrade version * Fix ETag format to comply with HTTP standards by wrapping with quotes (#592) * Initial plan * Fix ETag format to comply with HTTP standards by wrapping with quotes Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com> * bufigx --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com> Co-authored-by: overtrue <anzhengchao@gmail.com> * Improve lock (#596) * improve lock Signed-off-by: Mu junxiang <1948535941@qq.com> * feat(tests): add wait_for_object_absence helper and improve lifecycle test reliability Signed-off-by: Mu junxiang <1948535941@qq.com> * chore: remove dirty docs Signed-off-by: Mu junxiang <1948535941@qq.com> --------- Signed-off-by: Mu junxiang <1948535941@qq.com> * feat(append): implement object append operations with state tracking (#599) * feat(append): implement object append operations with state tracking Signed-off-by: junxiang Mu <1948535941@qq.com> * chore: rebase Signed-off-by: junxiang Mu <1948535941@qq.com> --------- Signed-off-by: junxiang Mu <1948535941@qq.com> * build(deps): upgrade s3s (#595) Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com> * fix: validate mqtt broker * improve code for `import` * fix * improve * remove logger from `rustfs-obs` crate * remove code for config Observability * fix * improve code * fix comment * up * up * upgrade version * fix * fmt * upgrade tokio version to 1.48.0 * upgrade `datafusion` and `reed-solomon-simd` version * fix * fmt * improve code for notify webhook example * improve code * fix * fix * fmt --------- Signed-off-by: Mu junxiang <1948535941@qq.com> Signed-off-by: junxiang Mu <1948535941@qq.com> Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com> Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com> Co-authored-by: overtrue <anzhengchao@gmail.com> Co-authored-by: guojidan <63799833+guojidan@users.noreply.github.com> Co-authored-by: Nugine <nugine@foxmail.com> Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
This commit is contained in:
415
Cargo.lock
generated
415
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
27
Cargo.toml
27
Cargo.toml
@@ -28,7 +28,7 @@ members = [
|
|||||||
"crates/madmin", # Management dashboard and admin API interface
|
"crates/madmin", # Management dashboard and admin API interface
|
||||||
"crates/notify", # Notification system for events
|
"crates/notify", # Notification system for events
|
||||||
"crates/obs", # Observability utilities
|
"crates/obs", # Observability utilities
|
||||||
"crates/policy",# Policy management
|
"crates/policy", # Policy management
|
||||||
"crates/protos", # Protocol buffer definitions
|
"crates/protos", # Protocol buffer definitions
|
||||||
"crates/rio", # Rust I/O utilities and abstractions
|
"crates/rio", # Rust I/O utilities and abstractions
|
||||||
"crates/targets", # Target-specific configurations and utilities
|
"crates/targets", # Target-specific configurations and utilities
|
||||||
@@ -113,17 +113,17 @@ brotli = "8.0.2"
|
|||||||
bytes = { version = "1.10.1", features = ["serde"] }
|
bytes = { version = "1.10.1", features = ["serde"] }
|
||||||
bytesize = "2.1.0"
|
bytesize = "2.1.0"
|
||||||
byteorder = "1.5.0"
|
byteorder = "1.5.0"
|
||||||
cfg-if = "1.0.3"
|
cfg-if = "1.0.4"
|
||||||
convert_case = "0.8.0"
|
convert_case = "0.8.0"
|
||||||
crc-fast = "1.3.0"
|
crc-fast = "1.3.0"
|
||||||
chacha20poly1305 = { version = "0.10.1" }
|
chacha20poly1305 = { version = "0.10.1" }
|
||||||
chrono = { version = "0.4.42", features = ["serde"] }
|
chrono = { version = "0.4.42", features = ["serde"] }
|
||||||
clap = { version = "4.5.48", features = ["derive", "env"] }
|
clap = { version = "4.5.49", features = ["derive", "env"] }
|
||||||
const-str = { version = "0.7.0", features = ["std", "proc"] }
|
const-str = { version = "0.7.0", features = ["std", "proc"] }
|
||||||
crc32fast = "1.5.0"
|
crc32fast = "1.5.0"
|
||||||
criterion = { version = "0.7", features = ["html_reports"] }
|
criterion = { version = "0.7", features = ["html_reports"] }
|
||||||
crossbeam-queue = "0.3.12"
|
crossbeam-queue = "0.3.12"
|
||||||
datafusion = "50.1.0"
|
datafusion = "50.2.0"
|
||||||
derive_builder = "0.20.2"
|
derive_builder = "0.20.2"
|
||||||
enumset = "1.1.10"
|
enumset = "1.1.10"
|
||||||
flatbuffers = "25.9.23"
|
flatbuffers = "25.9.23"
|
||||||
@@ -193,10 +193,9 @@ pretty_assertions = "1.4.1"
|
|||||||
quick-xml = "0.38.3"
|
quick-xml = "0.38.3"
|
||||||
rand = "0.9.2"
|
rand = "0.9.2"
|
||||||
rayon = "1.11.0"
|
rayon = "1.11.0"
|
||||||
rdkafka = { version = "0.38.0", features = ["tokio"] }
|
reed-solomon-simd = { version = "3.1.0" }
|
||||||
reed-solomon-simd = { version = "3.0.1" }
|
regex = { version = "1.12.2" }
|
||||||
regex = { version = "1.12.1" }
|
reqwest = { version = "0.12.24", default-features = false, features = [
|
||||||
reqwest = { version = "0.12.23", default-features = false, features = [
|
|
||||||
"rustls-tls-webpki-roots",
|
"rustls-tls-webpki-roots",
|
||||||
"charset",
|
"charset",
|
||||||
"http2",
|
"http2",
|
||||||
@@ -206,9 +205,9 @@ reqwest = { version = "0.12.23", default-features = false, features = [
|
|||||||
"blocking",
|
"blocking",
|
||||||
] }
|
] }
|
||||||
rmcp = { version = "0.8.1" }
|
rmcp = { version = "0.8.1" }
|
||||||
rmp = "0.8.14"
|
rmp = { version = "0.8.14" }
|
||||||
rmp-serde = "1.3.0"
|
rmp-serde = { version = "1.3.0" }
|
||||||
rsa = "0.9.8"
|
rsa = { version = "0.9.8" }
|
||||||
rumqttc = { version = "0.25.0" }
|
rumqttc = { version = "0.25.0" }
|
||||||
rust-embed = { version = "8.7.2" }
|
rust-embed = { version = "8.7.2" }
|
||||||
rustc-hash = { version = "2.1.1" }
|
rustc-hash = { version = "2.1.1" }
|
||||||
@@ -229,7 +228,7 @@ smallvec = { version = "1.15.1", features = ["serde"] }
|
|||||||
smartstring = "1.0.1"
|
smartstring = "1.0.1"
|
||||||
snafu = "0.8.9"
|
snafu = "0.8.9"
|
||||||
snap = "1.1.1"
|
snap = "1.1.1"
|
||||||
socket2 = "0.6.0"
|
socket2 = "0.6.1"
|
||||||
starshard = { version = "0.5.0", features = ["rayon", "async", "serde"] }
|
starshard = { version = "0.5.0", features = ["rayon", "async", "serde"] }
|
||||||
strum = { version = "0.27.2", features = ["derive"] }
|
strum = { version = "0.27.2", features = ["derive"] }
|
||||||
sysinfo = "0.37.1"
|
sysinfo = "0.37.1"
|
||||||
@@ -245,7 +244,7 @@ time = { version = "0.3.44", features = [
|
|||||||
"macros",
|
"macros",
|
||||||
"serde",
|
"serde",
|
||||||
] }
|
] }
|
||||||
tokio = { version = "1.47.1", features = ["fs", "rt-multi-thread"] }
|
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
|
||||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||||
tokio-stream = { version = "0.1.17" }
|
tokio-stream = { version = "0.1.17" }
|
||||||
tokio-tar = "0.3.1"
|
tokio-tar = "0.3.1"
|
||||||
@@ -256,7 +255,7 @@ tonic-prost = { version = "0.14.2" }
|
|||||||
tonic-prost-build = { version = "0.14.2" }
|
tonic-prost-build = { version = "0.14.2" }
|
||||||
tower = { version = "0.5.2", features = ["timeout"] }
|
tower = { version = "0.5.2", features = ["timeout"] }
|
||||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||||
tracing = "0.1.41"
|
tracing = { version = "0.1.41" }
|
||||||
tracing-core = "0.1.34"
|
tracing-core = "0.1.34"
|
||||||
tracing-error = "0.2.1"
|
tracing-error = "0.2.1"
|
||||||
tracing-opentelemetry = "0.32.0"
|
tracing-opentelemetry = "0.32.0"
|
||||||
|
|||||||
@@ -37,7 +37,6 @@ thiserror = { workspace = true }
|
|||||||
tokio = { workspace = true, features = ["sync", "fs", "rt-multi-thread", "rt", "time", "macros"] }
|
tokio = { workspace = true, features = ["sync", "fs", "rt-multi-thread", "rt", "time", "macros"] }
|
||||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
tracing = { workspace = true, features = ["std", "attributes"] }
|
||||||
url = { workspace = true }
|
url = { workspace = true }
|
||||||
once_cell = { workspace = true }
|
|
||||||
rumqttc = { workspace = true }
|
rumqttc = { workspace = true }
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
|
|||||||
@@ -13,13 +13,12 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use crate::{AuditEntry, AuditResult, AuditSystem};
|
use crate::{AuditEntry, AuditResult, AuditSystem};
|
||||||
use once_cell::sync::OnceCell;
|
|
||||||
use rustfs_ecstore::config::Config;
|
use rustfs_ecstore::config::Config;
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, OnceLock};
|
||||||
use tracing::{error, warn};
|
use tracing::{error, warn};
|
||||||
|
|
||||||
/// Global audit system instance
|
/// Global audit system instance
|
||||||
static AUDIT_SYSTEM: OnceCell<Arc<AuditSystem>> = OnceCell::new();
|
static AUDIT_SYSTEM: OnceLock<Arc<AuditSystem>> = OnceLock::new();
|
||||||
|
|
||||||
/// Initialize the global audit system
|
/// Initialize the global audit system
|
||||||
pub fn init_audit_system() -> Arc<AuditSystem> {
|
pub fn init_audit_system() -> Arc<AuditSystem> {
|
||||||
|
|||||||
@@ -21,8 +21,8 @@
|
|||||||
//! - Error rate monitoring
|
//! - Error rate monitoring
|
||||||
//! - Queue depth monitoring
|
//! - Queue depth monitoring
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
|
use std::sync::{Arc, OnceLock};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
@@ -312,7 +312,7 @@ impl PerformanceValidation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Global metrics instance
|
/// Global metrics instance
|
||||||
static GLOBAL_METRICS: once_cell::sync::OnceCell<Arc<AuditMetrics>> = once_cell::sync::OnceCell::new();
|
static GLOBAL_METRICS: OnceLock<Arc<AuditMetrics>> = OnceLock::new();
|
||||||
|
|
||||||
/// Get or initialize the global metrics instance
|
/// Get or initialize the global metrics instance
|
||||||
pub fn global_metrics() -> Arc<AuditMetrics> {
|
pub fn global_metrics() -> Arc<AuditMetrics> {
|
||||||
|
|||||||
@@ -12,20 +12,19 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use crate::AuditEntry;
|
use crate::{AuditEntry, AuditError, AuditResult};
|
||||||
use crate::{AuditError, AuditResult};
|
use futures::{StreamExt, stream::FuturesUnordered};
|
||||||
use futures::StreamExt;
|
|
||||||
use futures::stream::FuturesUnordered;
|
|
||||||
use rustfs_config::audit::AUDIT_ROUTE_PREFIX;
|
|
||||||
use rustfs_config::{
|
use rustfs_config::{
|
||||||
DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR,
|
DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR,
|
||||||
MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_BATCH_SIZE,
|
MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_BATCH_SIZE,
|
||||||
WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_HTTP_TIMEOUT, WEBHOOK_MAX_RETRY, WEBHOOK_QUEUE_DIR,
|
WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_HTTP_TIMEOUT, WEBHOOK_MAX_RETRY, WEBHOOK_QUEUE_DIR,
|
||||||
WEBHOOK_QUEUE_LIMIT, WEBHOOK_RETRY_INTERVAL,
|
WEBHOOK_QUEUE_LIMIT, WEBHOOK_RETRY_INTERVAL, audit::AUDIT_ROUTE_PREFIX,
|
||||||
};
|
};
|
||||||
use rustfs_ecstore::config::{Config, KVS};
|
use rustfs_ecstore::config::{Config, KVS};
|
||||||
use rustfs_targets::target::{ChannelTargetType, TargetType, mqtt::MQTTArgs, webhook::WebhookArgs};
|
use rustfs_targets::{
|
||||||
use rustfs_targets::{Target, TargetError};
|
Target, TargetError,
|
||||||
|
target::{ChannelTargetType, TargetType, mqtt::MQTTArgs, webhook::WebhookArgs},
|
||||||
|
};
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -68,7 +67,10 @@ impl AuditRegistry {
|
|||||||
|
|
||||||
// A collection of asynchronous tasks for concurrently executing target creation
|
// A collection of asynchronous tasks for concurrently executing target creation
|
||||||
let mut tasks = FuturesUnordered::new();
|
let mut tasks = FuturesUnordered::new();
|
||||||
let mut final_config = config.clone();
|
// let final_config = config.clone();
|
||||||
|
|
||||||
|
// Record the defaults for each segment so that the segment can eventually be rebuilt
|
||||||
|
let mut section_defaults: HashMap<String, KVS> = HashMap::new();
|
||||||
|
|
||||||
// Supported target types for audit
|
// Supported target types for audit
|
||||||
let target_types = vec![ChannelTargetType::Webhook.as_str(), ChannelTargetType::Mqtt.as_str()];
|
let target_types = vec![ChannelTargetType::Webhook.as_str(), ChannelTargetType::Mqtt.as_str()];
|
||||||
@@ -80,11 +82,14 @@ impl AuditRegistry {
|
|||||||
info!(target_type = %target_type, "Starting audit target type processing");
|
info!(target_type = %target_type, "Starting audit target type processing");
|
||||||
|
|
||||||
// 2. Prepare the configuration source
|
// 2. Prepare the configuration source
|
||||||
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}");
|
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase();
|
||||||
let file_configs = config.0.get(§ion_name).cloned().unwrap_or_default();
|
let file_configs = config.0.get(§ion_name).cloned().unwrap_or_default();
|
||||||
let default_cfg = file_configs.get(DEFAULT_DELIMITER).cloned().unwrap_or_default();
|
let default_cfg = file_configs.get(DEFAULT_DELIMITER).cloned().unwrap_or_default();
|
||||||
debug!(?default_cfg, "Retrieved default configuration");
|
debug!(?default_cfg, "Retrieved default configuration");
|
||||||
|
|
||||||
|
// Save defaults for eventual write back
|
||||||
|
section_defaults.insert(section_name.clone(), default_cfg.clone());
|
||||||
|
|
||||||
// Get valid fields for the target type
|
// Get valid fields for the target type
|
||||||
let valid_fields = match target_type {
|
let valid_fields = match target_type {
|
||||||
"webhook" => get_webhook_valid_fields(),
|
"webhook" => get_webhook_valid_fields(),
|
||||||
@@ -101,7 +106,7 @@ impl AuditRegistry {
|
|||||||
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
|
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
|
||||||
|
|
||||||
for (env_key, env_value) in &all_env {
|
for (env_key, env_value) in &all_env {
|
||||||
let audit_prefix = format!("{ENV_PREFIX}AUDIT_{}", target_type.to_uppercase());
|
let audit_prefix = format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}").to_uppercase();
|
||||||
if !env_key.starts_with(&audit_prefix) {
|
if !env_key.starts_with(&audit_prefix) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -186,38 +191,33 @@ impl AuditRegistry {
|
|||||||
let target_type_clone = target_type.to_string();
|
let target_type_clone = target_type.to_string();
|
||||||
let id_clone = id.clone();
|
let id_clone = id.clone();
|
||||||
let merged_config_arc = Arc::new(merged_config.clone());
|
let merged_config_arc = Arc::new(merged_config.clone());
|
||||||
let final_config_arc = Arc::new(final_config.clone());
|
|
||||||
|
|
||||||
let task = tokio::spawn(async move {
|
let task = tokio::spawn(async move {
|
||||||
let result = create_audit_target(&target_type_clone, &id_clone, &merged_config_arc).await;
|
let result = create_audit_target(&target_type_clone, &id_clone, &merged_config_arc).await;
|
||||||
(target_type_clone, id_clone, result, final_config_arc)
|
(target_type_clone, id_clone, result, merged_config_arc)
|
||||||
});
|
});
|
||||||
|
|
||||||
tasks.push(task);
|
tasks.push(task);
|
||||||
|
|
||||||
// Update final config with successful instance
|
// Update final config with successful instance
|
||||||
final_config
|
// final_config.0.entry(section_name.clone()).or_default().insert(id, merged_config);
|
||||||
.0
|
|
||||||
.entry(section_name.clone())
|
|
||||||
.or_default()
|
|
||||||
.insert(id, merged_config);
|
|
||||||
} else {
|
} else {
|
||||||
info!(instance_id = %id, "Skipping disabled audit target, will be removed from final configuration");
|
info!(instance_id = %id, "Skipping disabled audit target, will be removed from final configuration");
|
||||||
// Remove disabled target from final configuration
|
// Remove disabled target from final configuration
|
||||||
final_config.0.entry(section_name.clone()).or_default().remove(&id);
|
// final_config.0.entry(section_name.clone()).or_default().remove(&id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. Concurrently execute all creation tasks and collect results
|
// 6. Concurrently execute all creation tasks and collect results
|
||||||
let mut successful_targets = Vec::new();
|
let mut successful_targets = Vec::new();
|
||||||
|
let mut successful_configs = Vec::new();
|
||||||
while let Some(task_result) = tasks.next().await {
|
while let Some(task_result) = tasks.next().await {
|
||||||
match task_result {
|
match task_result {
|
||||||
Ok((target_type, id, result, _final_config)) => match result {
|
Ok((target_type, id, result, kvs_arc)) => match result {
|
||||||
Ok(target) => {
|
Ok(target) => {
|
||||||
info!(target_type = %target_type, instance_id = %id, "Created audit target successfully");
|
info!(target_type = %target_type, instance_id = %id, "Created audit target successfully");
|
||||||
successful_targets.push(target);
|
successful_targets.push(target);
|
||||||
|
successful_configs.push((target_type, id, kvs_arc));
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(target_type = %target_type, instance_id = %id, error = %e, "Failed to create audit target");
|
error!(target_type = %target_type, instance_id = %id, error = %e, "Failed to create audit target");
|
||||||
@@ -229,21 +229,67 @@ impl AuditRegistry {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 7. Save the new configuration to the system
|
// Rebuild in pieces based on "default items + successful instances" and overwrite writeback to ensure that deleted/disabled instances will not be "resurrected"
|
||||||
let Some(store) = rustfs_ecstore::new_object_layer_fn() else {
|
if !successful_configs.is_empty() || !section_defaults.is_empty() {
|
||||||
return Err(AuditError::ServerNotInitialized(
|
info!("Prepare to rebuild and save target configurations to the system configuration...");
|
||||||
"Failed to save target configuration: server storage not initialized".to_string(),
|
|
||||||
));
|
|
||||||
};
|
|
||||||
|
|
||||||
match rustfs_ecstore::config::com::save_server_config(store, &final_config).await {
|
// Aggregate successful instances into segments
|
||||||
Ok(_) => info!("New audit configuration saved to system successfully"),
|
let mut successes_by_section: HashMap<String, HashMap<String, KVS>> = HashMap::new();
|
||||||
Err(e) => {
|
for (target_type, id, kvs) in successful_configs {
|
||||||
error!(error = %e, "Failed to save new audit configuration");
|
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase();
|
||||||
return Err(AuditError::SaveConfig(e.to_string()));
|
successes_by_section
|
||||||
|
.entry(section_name)
|
||||||
|
.or_default()
|
||||||
|
.insert(id.to_lowercase(), (*kvs).clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut new_config = config.clone();
|
||||||
|
|
||||||
|
// Collection of segments that need to be processed: Collect all segments where default items exist or where successful instances exist
|
||||||
|
let mut sections: HashSet<String> = HashSet::new();
|
||||||
|
sections.extend(section_defaults.keys().cloned());
|
||||||
|
sections.extend(successes_by_section.keys().cloned());
|
||||||
|
|
||||||
|
for section_name in sections {
|
||||||
|
let mut section_map: HashMap<String, KVS> = HashMap::new();
|
||||||
|
|
||||||
|
// The default entry (if present) is written back to `_`
|
||||||
|
if let Some(default_cfg) = section_defaults.get(§ion_name) {
|
||||||
|
if !default_cfg.is_empty() {
|
||||||
|
section_map.insert(DEFAULT_DELIMITER.to_string(), default_cfg.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Successful instance write back
|
||||||
|
if let Some(instances) = successes_by_section.get(§ion_name) {
|
||||||
|
for (id, kvs) in instances {
|
||||||
|
section_map.insert(id.clone(), kvs.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty segments are removed and non-empty segments are replaced as a whole.
|
||||||
|
if section_map.is_empty() {
|
||||||
|
new_config.0.remove(§ion_name);
|
||||||
|
} else {
|
||||||
|
new_config.0.insert(section_name, section_map);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 7. Save the new configuration to the system
|
||||||
|
let Some(store) = rustfs_ecstore::new_object_layer_fn() else {
|
||||||
|
return Err(AuditError::ServerNotInitialized(
|
||||||
|
"Failed to save target configuration: server storage not initialized".to_string(),
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
match rustfs_ecstore::config::com::save_server_config(store, &new_config).await {
|
||||||
|
Ok(_) => info!("New audit configuration saved to system successfully"),
|
||||||
|
Err(e) => {
|
||||||
|
error!(error = %e, "Failed to save new audit configuration");
|
||||||
|
return Err(AuditError::SaveConfig(e.to_string()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(successful_targets)
|
Ok(successful_targets)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,10 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use crate::AuditEntry;
|
use crate::{AuditEntry, AuditError, AuditRegistry, AuditResult, observability};
|
||||||
use crate::AuditRegistry;
|
|
||||||
use crate::observability;
|
|
||||||
use crate::{AuditError, AuditResult};
|
|
||||||
use rustfs_ecstore::config::Config;
|
use rustfs_ecstore::config::Config;
|
||||||
use rustfs_targets::{
|
use rustfs_targets::{
|
||||||
StoreError, Target, TargetError,
|
StoreError, Target, TargetError,
|
||||||
|
|||||||
@@ -1,98 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Observability Keys
|
|
||||||
|
|
||||||
pub const ENV_OBS_ENDPOINT: &str = "RUSTFS_OBS_ENDPOINT";
|
|
||||||
pub const ENV_OBS_USE_STDOUT: &str = "RUSTFS_OBS_USE_STDOUT";
|
|
||||||
pub const ENV_OBS_SAMPLE_RATIO: &str = "RUSTFS_OBS_SAMPLE_RATIO";
|
|
||||||
pub const ENV_OBS_METER_INTERVAL: &str = "RUSTFS_OBS_METER_INTERVAL";
|
|
||||||
pub const ENV_OBS_SERVICE_NAME: &str = "RUSTFS_OBS_SERVICE_NAME";
|
|
||||||
pub const ENV_OBS_SERVICE_VERSION: &str = "RUSTFS_OBS_SERVICE_VERSION";
|
|
||||||
pub const ENV_OBS_ENVIRONMENT: &str = "RUSTFS_OBS_ENVIRONMENT";
|
|
||||||
pub const ENV_OBS_LOGGER_LEVEL: &str = "RUSTFS_OBS_LOGGER_LEVEL";
|
|
||||||
pub const ENV_OBS_LOCAL_LOGGING_ENABLED: &str = "RUSTFS_OBS_LOCAL_LOGGING_ENABLED";
|
|
||||||
pub const ENV_OBS_LOG_DIRECTORY: &str = "RUSTFS_OBS_LOG_DIRECTORY";
|
|
||||||
pub const ENV_OBS_LOG_FILENAME: &str = "RUSTFS_OBS_LOG_FILENAME";
|
|
||||||
pub const ENV_OBS_LOG_ROTATION_SIZE_MB: &str = "RUSTFS_OBS_LOG_ROTATION_SIZE_MB";
|
|
||||||
pub const ENV_OBS_LOG_ROTATION_TIME: &str = "RUSTFS_OBS_LOG_ROTATION_TIME";
|
|
||||||
pub const ENV_OBS_LOG_KEEP_FILES: &str = "RUSTFS_OBS_LOG_KEEP_FILES";
|
|
||||||
|
|
||||||
/// Log pool capacity for async logging
|
|
||||||
pub const ENV_OBS_LOG_POOL_CAPA: &str = "RUSTFS_OBS_LOG_POOL_CAPA";
|
|
||||||
|
|
||||||
/// Log message capacity for async logging
|
|
||||||
pub const ENV_OBS_LOG_MESSAGE_CAPA: &str = "RUSTFS_OBS_LOG_MESSAGE_CAPA";
|
|
||||||
|
|
||||||
/// Log flush interval in milliseconds for async logging
|
|
||||||
pub const ENV_OBS_LOG_FLUSH_MS: &str = "RUSTFS_OBS_LOG_FLUSH_MS";
|
|
||||||
|
|
||||||
/// Default values for log pool
|
|
||||||
pub const DEFAULT_OBS_LOG_POOL_CAPA: usize = 10240;
|
|
||||||
|
|
||||||
/// Default values for message capacity
|
|
||||||
pub const DEFAULT_OBS_LOG_MESSAGE_CAPA: usize = 32768;
|
|
||||||
|
|
||||||
/// Default values for flush interval in milliseconds
|
|
||||||
pub const DEFAULT_OBS_LOG_FLUSH_MS: u64 = 200;
|
|
||||||
|
|
||||||
/// Audit logger queue capacity environment variable key
|
|
||||||
pub const ENV_AUDIT_LOGGER_QUEUE_CAPACITY: &str = "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY";
|
|
||||||
|
|
||||||
/// Default values for observability configuration
|
|
||||||
pub const DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY: usize = 10000;
|
|
||||||
|
|
||||||
/// Default values for observability configuration
|
|
||||||
// ### Supported Environment Values
|
|
||||||
// - `production` - Secure file-only logging
|
|
||||||
// - `development` - Full debugging with stdout
|
|
||||||
// - `test` - Test environment with stdout support
|
|
||||||
// - `staging` - Staging environment with stdout support
|
|
||||||
pub const DEFAULT_OBS_ENVIRONMENT_PRODUCTION: &str = "production";
|
|
||||||
pub const DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT: &str = "development";
|
|
||||||
pub const DEFAULT_OBS_ENVIRONMENT_TEST: &str = "test";
|
|
||||||
pub const DEFAULT_OBS_ENVIRONMENT_STAGING: &str = "staging";
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_env_keys() {
|
|
||||||
assert_eq!(ENV_OBS_ENDPOINT, "RUSTFS_OBS_ENDPOINT");
|
|
||||||
assert_eq!(ENV_OBS_USE_STDOUT, "RUSTFS_OBS_USE_STDOUT");
|
|
||||||
assert_eq!(ENV_OBS_SAMPLE_RATIO, "RUSTFS_OBS_SAMPLE_RATIO");
|
|
||||||
assert_eq!(ENV_OBS_METER_INTERVAL, "RUSTFS_OBS_METER_INTERVAL");
|
|
||||||
assert_eq!(ENV_OBS_SERVICE_NAME, "RUSTFS_OBS_SERVICE_NAME");
|
|
||||||
assert_eq!(ENV_OBS_SERVICE_VERSION, "RUSTFS_OBS_SERVICE_VERSION");
|
|
||||||
assert_eq!(ENV_OBS_ENVIRONMENT, "RUSTFS_OBS_ENVIRONMENT");
|
|
||||||
assert_eq!(ENV_OBS_LOGGER_LEVEL, "RUSTFS_OBS_LOGGER_LEVEL");
|
|
||||||
assert_eq!(ENV_OBS_LOCAL_LOGGING_ENABLED, "RUSTFS_OBS_LOCAL_LOGGING_ENABLED");
|
|
||||||
assert_eq!(ENV_OBS_LOG_DIRECTORY, "RUSTFS_OBS_LOG_DIRECTORY");
|
|
||||||
assert_eq!(ENV_OBS_LOG_FILENAME, "RUSTFS_OBS_LOG_FILENAME");
|
|
||||||
assert_eq!(ENV_OBS_LOG_ROTATION_SIZE_MB, "RUSTFS_OBS_LOG_ROTATION_SIZE_MB");
|
|
||||||
assert_eq!(ENV_OBS_LOG_ROTATION_TIME, "RUSTFS_OBS_LOG_ROTATION_TIME");
|
|
||||||
assert_eq!(ENV_OBS_LOG_KEEP_FILES, "RUSTFS_OBS_LOG_KEEP_FILES");
|
|
||||||
assert_eq!(ENV_AUDIT_LOGGER_QUEUE_CAPACITY, "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_default_values() {
|
|
||||||
assert_eq!(DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY, 10000);
|
|
||||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_PRODUCTION, "production");
|
|
||||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT, "development");
|
|
||||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_TEST, "test");
|
|
||||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_STAGING, "staging");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// RUSTFS_SINKS_FILE_PATH
|
|
||||||
pub const ENV_SINKS_FILE_PATH: &str = "RUSTFS_SINKS_FILE_PATH";
|
|
||||||
// RUSTFS_SINKS_FILE_BUFFER_SIZE
|
|
||||||
pub const ENV_SINKS_FILE_BUFFER_SIZE: &str = "RUSTFS_SINKS_FILE_BUFFER_SIZE";
|
|
||||||
// RUSTFS_SINKS_FILE_FLUSH_INTERVAL_MS
|
|
||||||
pub const ENV_SINKS_FILE_FLUSH_INTERVAL_MS: &str = "RUSTFS_SINKS_FILE_FLUSH_INTERVAL_MS";
|
|
||||||
// RUSTFS_SINKS_FILE_FLUSH_THRESHOLD
|
|
||||||
pub const ENV_SINKS_FILE_FLUSH_THRESHOLD: &str = "RUSTFS_SINKS_FILE_FLUSH_THRESHOLD";
|
|
||||||
|
|
||||||
pub const DEFAULT_SINKS_FILE_BUFFER_SIZE: usize = 8192;
|
|
||||||
|
|
||||||
pub const DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS: u64 = 1000;
|
|
||||||
|
|
||||||
pub const DEFAULT_SINKS_FILE_FLUSH_THRESHOLD: usize = 100;
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// RUSTFS_SINKS_KAFKA_BROKERS
|
|
||||||
pub const ENV_SINKS_KAFKA_BROKERS: &str = "RUSTFS_SINKS_KAFKA_BROKERS";
|
|
||||||
pub const ENV_SINKS_KAFKA_TOPIC: &str = "RUSTFS_SINKS_KAFKA_TOPIC";
|
|
||||||
// batch_size
|
|
||||||
pub const ENV_SINKS_KAFKA_BATCH_SIZE: &str = "RUSTFS_SINKS_KAFKA_BATCH_SIZE";
|
|
||||||
// batch_timeout_ms
|
|
||||||
pub const ENV_SINKS_KAFKA_BATCH_TIMEOUT_MS: &str = "RUSTFS_SINKS_KAFKA_BATCH_TIMEOUT_MS";
|
|
||||||
|
|
||||||
// brokers
|
|
||||||
pub const DEFAULT_SINKS_KAFKA_BROKERS: &str = "localhost:9092";
|
|
||||||
pub const DEFAULT_SINKS_KAFKA_TOPIC: &str = "rustfs-sinks";
|
|
||||||
pub const DEFAULT_SINKS_KAFKA_BATCH_SIZE: usize = 100;
|
|
||||||
pub const DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS: u64 = 1000;
|
|
||||||
@@ -12,12 +12,87 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
mod config;
|
// Observability Keys
|
||||||
mod file;
|
|
||||||
mod kafka;
|
|
||||||
mod webhook;
|
|
||||||
|
|
||||||
pub use config::*;
|
pub const ENV_OBS_ENDPOINT: &str = "RUSTFS_OBS_ENDPOINT";
|
||||||
pub use file::*;
|
pub const ENV_OBS_USE_STDOUT: &str = "RUSTFS_OBS_USE_STDOUT";
|
||||||
pub use kafka::*;
|
pub const ENV_OBS_SAMPLE_RATIO: &str = "RUSTFS_OBS_SAMPLE_RATIO";
|
||||||
pub use webhook::*;
|
pub const ENV_OBS_METER_INTERVAL: &str = "RUSTFS_OBS_METER_INTERVAL";
|
||||||
|
pub const ENV_OBS_SERVICE_NAME: &str = "RUSTFS_OBS_SERVICE_NAME";
|
||||||
|
pub const ENV_OBS_SERVICE_VERSION: &str = "RUSTFS_OBS_SERVICE_VERSION";
|
||||||
|
pub const ENV_OBS_ENVIRONMENT: &str = "RUSTFS_OBS_ENVIRONMENT";
|
||||||
|
pub const ENV_OBS_LOGGER_LEVEL: &str = "RUSTFS_OBS_LOGGER_LEVEL";
|
||||||
|
pub const ENV_OBS_LOCAL_LOGGING_ENABLED: &str = "RUSTFS_OBS_LOCAL_LOGGING_ENABLED";
|
||||||
|
pub const ENV_OBS_LOG_DIRECTORY: &str = "RUSTFS_OBS_LOG_DIRECTORY";
|
||||||
|
pub const ENV_OBS_LOG_FILENAME: &str = "RUSTFS_OBS_LOG_FILENAME";
|
||||||
|
pub const ENV_OBS_LOG_ROTATION_SIZE_MB: &str = "RUSTFS_OBS_LOG_ROTATION_SIZE_MB";
|
||||||
|
pub const ENV_OBS_LOG_ROTATION_TIME: &str = "RUSTFS_OBS_LOG_ROTATION_TIME";
|
||||||
|
pub const ENV_OBS_LOG_KEEP_FILES: &str = "RUSTFS_OBS_LOG_KEEP_FILES";
|
||||||
|
|
||||||
|
/// Log pool capacity for async logging
|
||||||
|
pub const ENV_OBS_LOG_POOL_CAPA: &str = "RUSTFS_OBS_LOG_POOL_CAPA";
|
||||||
|
|
||||||
|
/// Log message capacity for async logging
|
||||||
|
pub const ENV_OBS_LOG_MESSAGE_CAPA: &str = "RUSTFS_OBS_LOG_MESSAGE_CAPA";
|
||||||
|
|
||||||
|
/// Log flush interval in milliseconds for async logging
|
||||||
|
pub const ENV_OBS_LOG_FLUSH_MS: &str = "RUSTFS_OBS_LOG_FLUSH_MS";
|
||||||
|
|
||||||
|
/// Default values for log pool
|
||||||
|
pub const DEFAULT_OBS_LOG_POOL_CAPA: usize = 10240;
|
||||||
|
|
||||||
|
/// Default values for message capacity
|
||||||
|
pub const DEFAULT_OBS_LOG_MESSAGE_CAPA: usize = 32768;
|
||||||
|
|
||||||
|
/// Default values for flush interval in milliseconds
|
||||||
|
pub const DEFAULT_OBS_LOG_FLUSH_MS: u64 = 200;
|
||||||
|
|
||||||
|
/// Audit logger queue capacity environment variable key
|
||||||
|
pub const ENV_AUDIT_LOGGER_QUEUE_CAPACITY: &str = "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY";
|
||||||
|
|
||||||
|
/// Default values for observability configuration
|
||||||
|
pub const DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY: usize = 10000;
|
||||||
|
|
||||||
|
/// Default values for observability configuration
|
||||||
|
// ### Supported Environment Values
|
||||||
|
// - `production` - Secure file-only logging
|
||||||
|
// - `development` - Full debugging with stdout
|
||||||
|
// - `test` - Test environment with stdout support
|
||||||
|
// - `staging` - Staging environment with stdout support
|
||||||
|
pub const DEFAULT_OBS_ENVIRONMENT_PRODUCTION: &str = "production";
|
||||||
|
pub const DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT: &str = "development";
|
||||||
|
pub const DEFAULT_OBS_ENVIRONMENT_TEST: &str = "test";
|
||||||
|
pub const DEFAULT_OBS_ENVIRONMENT_STAGING: &str = "staging";
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_env_keys() {
|
||||||
|
assert_eq!(ENV_OBS_ENDPOINT, "RUSTFS_OBS_ENDPOINT");
|
||||||
|
assert_eq!(ENV_OBS_USE_STDOUT, "RUSTFS_OBS_USE_STDOUT");
|
||||||
|
assert_eq!(ENV_OBS_SAMPLE_RATIO, "RUSTFS_OBS_SAMPLE_RATIO");
|
||||||
|
assert_eq!(ENV_OBS_METER_INTERVAL, "RUSTFS_OBS_METER_INTERVAL");
|
||||||
|
assert_eq!(ENV_OBS_SERVICE_NAME, "RUSTFS_OBS_SERVICE_NAME");
|
||||||
|
assert_eq!(ENV_OBS_SERVICE_VERSION, "RUSTFS_OBS_SERVICE_VERSION");
|
||||||
|
assert_eq!(ENV_OBS_ENVIRONMENT, "RUSTFS_OBS_ENVIRONMENT");
|
||||||
|
assert_eq!(ENV_OBS_LOGGER_LEVEL, "RUSTFS_OBS_LOGGER_LEVEL");
|
||||||
|
assert_eq!(ENV_OBS_LOCAL_LOGGING_ENABLED, "RUSTFS_OBS_LOCAL_LOGGING_ENABLED");
|
||||||
|
assert_eq!(ENV_OBS_LOG_DIRECTORY, "RUSTFS_OBS_LOG_DIRECTORY");
|
||||||
|
assert_eq!(ENV_OBS_LOG_FILENAME, "RUSTFS_OBS_LOG_FILENAME");
|
||||||
|
assert_eq!(ENV_OBS_LOG_ROTATION_SIZE_MB, "RUSTFS_OBS_LOG_ROTATION_SIZE_MB");
|
||||||
|
assert_eq!(ENV_OBS_LOG_ROTATION_TIME, "RUSTFS_OBS_LOG_ROTATION_TIME");
|
||||||
|
assert_eq!(ENV_OBS_LOG_KEEP_FILES, "RUSTFS_OBS_LOG_KEEP_FILES");
|
||||||
|
assert_eq!(ENV_AUDIT_LOGGER_QUEUE_CAPACITY, "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_default_values() {
|
||||||
|
assert_eq!(DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY, 10000);
|
||||||
|
assert_eq!(DEFAULT_OBS_ENVIRONMENT_PRODUCTION, "production");
|
||||||
|
assert_eq!(DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT, "development");
|
||||||
|
assert_eq!(DEFAULT_OBS_ENVIRONMENT_TEST, "test");
|
||||||
|
assert_eq!(DEFAULT_OBS_ENVIRONMENT_STAGING, "staging");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,28 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// RUSTFS_SINKS_WEBHOOK_ENDPOINT
|
|
||||||
pub const ENV_SINKS_WEBHOOK_ENDPOINT: &str = "RUSTFS_SINKS_WEBHOOK_ENDPOINT";
|
|
||||||
// RUSTFS_SINKS_WEBHOOK_AUTH_TOKEN
|
|
||||||
pub const ENV_SINKS_WEBHOOK_AUTH_TOKEN: &str = "RUSTFS_SINKS_WEBHOOK_AUTH_TOKEN";
|
|
||||||
// max_retries
|
|
||||||
pub const ENV_SINKS_WEBHOOK_MAX_RETRIES: &str = "RUSTFS_SINKS_WEBHOOK_MAX_RETRIES";
|
|
||||||
// retry_delay_ms
|
|
||||||
pub const ENV_SINKS_WEBHOOK_RETRY_DELAY_MS: &str = "RUSTFS_SINKS_WEBHOOK_RETRY_DELAY_MS";
|
|
||||||
|
|
||||||
// Default values for webhook sink configuration
|
|
||||||
pub const DEFAULT_SINKS_WEBHOOK_ENDPOINT: &str = "http://localhost:8080";
|
|
||||||
pub const DEFAULT_SINKS_WEBHOOK_AUTH_TOKEN: &str = "";
|
|
||||||
pub const DEFAULT_SINKS_WEBHOOK_MAX_RETRIES: usize = 3;
|
|
||||||
pub const DEFAULT_SINKS_WEBHOOK_RETRY_DELAY_MS: u64 = 100;
|
|
||||||
@@ -49,4 +49,4 @@ uuid = { workspace = true }
|
|||||||
base64 = { workspace = true }
|
base64 = { workspace = true }
|
||||||
rand = { workspace = true }
|
rand = { workspace = true }
|
||||||
chrono = { workspace = true }
|
chrono = { workspace = true }
|
||||||
md5 = { workspace = true }
|
md5 = { workspace = true }
|
||||||
@@ -635,7 +635,7 @@ impl KmsBackend for LocalKmsBackend {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn encrypt(&self, request: EncryptRequest) -> Result<EncryptResponse> {
|
async fn encrypt(&self, request: EncryptRequest) -> Result<EncryptResponse> {
|
||||||
let encrypt_request = crate::types::EncryptRequest {
|
let encrypt_request = EncryptRequest {
|
||||||
key_id: request.key_id.clone(),
|
key_id: request.key_id.clone(),
|
||||||
plaintext: request.plaintext,
|
plaintext: request.plaintext,
|
||||||
encryption_context: request.encryption_context,
|
encryption_context: request.encryption_context,
|
||||||
@@ -719,14 +719,14 @@ impl KmsBackend for LocalKmsBackend {
|
|||||||
.client
|
.client
|
||||||
.load_master_key(key_id)
|
.load_master_key(key_id)
|
||||||
.await
|
.await
|
||||||
.map_err(|_| crate::error::KmsError::key_not_found(format!("Key {key_id} not found")))?;
|
.map_err(|_| KmsError::key_not_found(format!("Key {key_id} not found")))?;
|
||||||
|
|
||||||
let (deletion_date_str, deletion_date_dt) = if request.force_immediate.unwrap_or(false) {
|
let (deletion_date_str, deletion_date_dt) = if request.force_immediate.unwrap_or(false) {
|
||||||
// For immediate deletion, actually delete the key from filesystem
|
// For immediate deletion, actually delete the key from filesystem
|
||||||
let key_path = self.client.master_key_path(key_id);
|
let key_path = self.client.master_key_path(key_id);
|
||||||
tokio::fs::remove_file(&key_path)
|
tokio::fs::remove_file(&key_path)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to delete key file: {e}")))?;
|
.map_err(|e| KmsError::internal_error(format!("Failed to delete key file: {e}")))?;
|
||||||
|
|
||||||
// Remove from cache
|
// Remove from cache
|
||||||
let mut cache = self.client.key_cache.write().await;
|
let mut cache = self.client.key_cache.write().await;
|
||||||
@@ -756,9 +756,7 @@ impl KmsBackend for LocalKmsBackend {
|
|||||||
// Schedule for deletion (default 30 days)
|
// Schedule for deletion (default 30 days)
|
||||||
let days = request.pending_window_in_days.unwrap_or(30);
|
let days = request.pending_window_in_days.unwrap_or(30);
|
||||||
if !(7..=30).contains(&days) {
|
if !(7..=30).contains(&days) {
|
||||||
return Err(crate::error::KmsError::invalid_parameter(
|
return Err(KmsError::invalid_parameter("pending_window_in_days must be between 7 and 30".to_string()));
|
||||||
"pending_window_in_days must be between 7 and 30".to_string(),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let deletion_date = chrono::Utc::now() + chrono::Duration::days(days as i64);
|
let deletion_date = chrono::Utc::now() + chrono::Duration::days(days as i64);
|
||||||
@@ -772,16 +770,16 @@ impl KmsBackend for LocalKmsBackend {
|
|||||||
let key_path = self.client.master_key_path(key_id);
|
let key_path = self.client.master_key_path(key_id);
|
||||||
let content = tokio::fs::read(&key_path)
|
let content = tokio::fs::read(&key_path)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to read key file: {e}")))?;
|
.map_err(|e| KmsError::internal_error(format!("Failed to read key file: {e}")))?;
|
||||||
let stored_key: crate::backends::local::StoredMasterKey = serde_json::from_slice(&content)
|
let stored_key: StoredMasterKey =
|
||||||
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to parse stored key: {e}")))?;
|
serde_json::from_slice(&content).map_err(|e| KmsError::internal_error(format!("Failed to parse stored key: {e}")))?;
|
||||||
|
|
||||||
// Decrypt the existing key material to preserve it
|
// Decrypt the existing key material to preserve it
|
||||||
let existing_key_material = if let Some(ref cipher) = self.client.master_cipher {
|
let existing_key_material = if let Some(ref cipher) = self.client.master_cipher {
|
||||||
let nonce = aes_gcm::Nonce::from_slice(&stored_key.nonce);
|
let nonce = Nonce::from_slice(&stored_key.nonce);
|
||||||
cipher
|
cipher
|
||||||
.decrypt(nonce, stored_key.encrypted_key_material.as_ref())
|
.decrypt(nonce, stored_key.encrypted_key_material.as_ref())
|
||||||
.map_err(|e| crate::error::KmsError::cryptographic_error("decrypt", e.to_string()))?
|
.map_err(|e| KmsError::cryptographic_error("decrypt", e.to_string()))?
|
||||||
} else {
|
} else {
|
||||||
stored_key.encrypted_key_material
|
stored_key.encrypted_key_material
|
||||||
};
|
};
|
||||||
@@ -820,10 +818,10 @@ impl KmsBackend for LocalKmsBackend {
|
|||||||
.client
|
.client
|
||||||
.load_master_key(key_id)
|
.load_master_key(key_id)
|
||||||
.await
|
.await
|
||||||
.map_err(|_| crate::error::KmsError::key_not_found(format!("Key {key_id} not found")))?;
|
.map_err(|_| KmsError::key_not_found(format!("Key {key_id} not found")))?;
|
||||||
|
|
||||||
if master_key.status != KeyStatus::PendingDeletion {
|
if master_key.status != KeyStatus::PendingDeletion {
|
||||||
return Err(crate::error::KmsError::invalid_key_state(format!("Key {key_id} is not pending deletion")));
|
return Err(KmsError::invalid_key_state(format!("Key {key_id} is not pending deletion")));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel the deletion by resetting the state
|
// Cancel the deletion by resetting the state
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ async fn main() -> Result<(), NotificationError> {
|
|||||||
key: WEBHOOK_QUEUE_DIR.to_string(),
|
key: WEBHOOK_QUEUE_DIR.to_string(),
|
||||||
value: current_root
|
value: current_root
|
||||||
.clone()
|
.clone()
|
||||||
.join("../../deploy/logs/notify/webhook")
|
.join("../../deploy/logs/notify")
|
||||||
.to_str()
|
.to_str()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_string(),
|
.to_string(),
|
||||||
@@ -120,11 +120,7 @@ async fn main() -> Result<(), NotificationError> {
|
|||||||
},
|
},
|
||||||
KV {
|
KV {
|
||||||
key: MQTT_QUEUE_DIR.to_string(),
|
key: MQTT_QUEUE_DIR.to_string(),
|
||||||
value: current_root
|
value: current_root.join("../../deploy/logs/notify").to_str().unwrap().to_string(),
|
||||||
.join("../../deploy/logs/notify/mqtt")
|
|
||||||
.to_str()
|
|
||||||
.unwrap()
|
|
||||||
.to_string(),
|
|
||||||
hidden_if_empty: false,
|
hidden_if_empty: false,
|
||||||
},
|
},
|
||||||
KV {
|
KV {
|
||||||
@@ -137,7 +133,7 @@ async fn main() -> Result<(), NotificationError> {
|
|||||||
let mqtt_kvs = KVS(mqtt_kvs_vec);
|
let mqtt_kvs = KVS(mqtt_kvs_vec);
|
||||||
let mut mqtt_targets = std::collections::HashMap::new();
|
let mut mqtt_targets = std::collections::HashMap::new();
|
||||||
mqtt_targets.insert(DEFAULT_TARGET.to_string(), mqtt_kvs);
|
mqtt_targets.insert(DEFAULT_TARGET.to_string(), mqtt_kvs);
|
||||||
config.0.insert(NOTIFY_MQTT_SUB_SYS.to_string(), mqtt_targets);
|
// config.0.insert(NOTIFY_MQTT_SUB_SYS.to_string(), mqtt_targets);
|
||||||
|
|
||||||
// Load the configuration and initialize the system
|
// Load the configuration and initialize the system
|
||||||
*system.config.write().await = config;
|
*system.config.write().await = config;
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ use rustfs_targets::EventName;
|
|||||||
use rustfs_targets::arn::TargetID;
|
use rustfs_targets::arn::TargetID;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use tokio::time::sleep;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
@@ -68,7 +69,7 @@ async fn main() -> Result<(), NotificationError> {
|
|||||||
key: WEBHOOK_QUEUE_DIR.to_string(),
|
key: WEBHOOK_QUEUE_DIR.to_string(),
|
||||||
value: current_root
|
value: current_root
|
||||||
.clone()
|
.clone()
|
||||||
.join("../../deploy/logs/notify/webhook")
|
.join("../../deploy/logs/notify")
|
||||||
.to_str()
|
.to_str()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_string(),
|
.to_string(),
|
||||||
@@ -91,7 +92,7 @@ async fn main() -> Result<(), NotificationError> {
|
|||||||
system.init().await?;
|
system.init().await?;
|
||||||
info!("✅ System initialized with Webhook target.");
|
info!("✅ System initialized with Webhook target.");
|
||||||
|
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
sleep(Duration::from_secs(1)).await;
|
||||||
|
|
||||||
// --- Dynamically update system configuration: Add an MQTT Target ---
|
// --- Dynamically update system configuration: Add an MQTT Target ---
|
||||||
info!("\n---> Dynamically adding MQTT target...");
|
info!("\n---> Dynamically adding MQTT target...");
|
||||||
@@ -129,11 +130,7 @@ async fn main() -> Result<(), NotificationError> {
|
|||||||
},
|
},
|
||||||
KV {
|
KV {
|
||||||
key: MQTT_QUEUE_DIR.to_string(),
|
key: MQTT_QUEUE_DIR.to_string(),
|
||||||
value: current_root
|
value: current_root.join("../../deploy/logs/notify").to_str().unwrap().to_string(),
|
||||||
.join("../../deploy/logs/notify/mqtt")
|
|
||||||
.to_str()
|
|
||||||
.unwrap()
|
|
||||||
.to_string(),
|
|
||||||
hidden_if_empty: false,
|
hidden_if_empty: false,
|
||||||
},
|
},
|
||||||
KV {
|
KV {
|
||||||
@@ -152,7 +149,7 @@ async fn main() -> Result<(), NotificationError> {
|
|||||||
.await?;
|
.await?;
|
||||||
info!("✅ MQTT target added and system reloaded.");
|
info!("✅ MQTT target added and system reloaded.");
|
||||||
|
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
sleep(Duration::from_secs(1)).await;
|
||||||
|
|
||||||
// --- Loading and managing Bucket configurations ---
|
// --- Loading and managing Bucket configurations ---
|
||||||
info!("\n---> Loading bucket notification config...");
|
info!("\n---> Loading bucket notification config...");
|
||||||
@@ -176,7 +173,7 @@ async fn main() -> Result<(), NotificationError> {
|
|||||||
system.send_event(event).await;
|
system.send_event(event).await;
|
||||||
info!("✅ Event sent. Both Webhook and MQTT targets should receive it.");
|
info!("✅ Event sent. Both Webhook and MQTT targets should receive it.");
|
||||||
|
|
||||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
sleep(Duration::from_secs(2)).await;
|
||||||
|
|
||||||
// --- Dynamically remove configuration ---
|
// --- Dynamically remove configuration ---
|
||||||
info!("\n---> Dynamically removing Webhook target...");
|
info!("\n---> Dynamically removing Webhook target...");
|
||||||
@@ -188,5 +185,6 @@ async fn main() -> Result<(), NotificationError> {
|
|||||||
info!("✅ Bucket 'my-bucket' config removed.");
|
info!("✅ Bucket 'my-bucket' config removed.");
|
||||||
|
|
||||||
info!("\nDemo completed successfully");
|
info!("\nDemo completed successfully");
|
||||||
|
sleep(Duration::from_secs(1)).await;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,19 +12,20 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use axum::routing::get;
|
|
||||||
use axum::{
|
use axum::{
|
||||||
Router,
|
Router,
|
||||||
extract::Json,
|
extract::Json,
|
||||||
|
extract::Query,
|
||||||
http::{HeaderMap, Response, StatusCode},
|
http::{HeaderMap, Response, StatusCode},
|
||||||
routing::post,
|
routing::{get, post},
|
||||||
};
|
};
|
||||||
|
use rustfs_utils::parse_and_resolve_address;
|
||||||
|
use serde::Deserialize;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
use axum::extract::Query;
|
|
||||||
use serde::Deserialize;
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct ResetParams {
|
struct ResetParams {
|
||||||
@@ -32,9 +33,6 @@ struct ResetParams {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Define a global variable and count the number of data received
|
// Define a global variable and count the number of data received
|
||||||
use rustfs_utils::parse_and_resolve_address;
|
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
|
||||||
use tokio::net::TcpListener;
|
|
||||||
|
|
||||||
static WEBHOOK_COUNT: AtomicU64 = AtomicU64::new(0);
|
static WEBHOOK_COUNT: AtomicU64 = AtomicU64::new(0);
|
||||||
|
|
||||||
|
|||||||
@@ -296,8 +296,8 @@ impl NotificationSystem {
|
|||||||
info!("Removing config for target {} of type {}", target_name, target_type);
|
info!("Removing config for target {} of type {}", target_name, target_type);
|
||||||
self.update_config_and_reload(|config| {
|
self.update_config_and_reload(|config| {
|
||||||
let mut changed = false;
|
let mut changed = false;
|
||||||
if let Some(targets) = config.0.get_mut(target_type) {
|
if let Some(targets) = config.0.get_mut(&target_type.to_lowercase()) {
|
||||||
if targets.remove(target_name).is_some() {
|
if targets.remove(&target_name.to_lowercase()).is_some() {
|
||||||
changed = true;
|
changed = true;
|
||||||
}
|
}
|
||||||
if targets.is_empty() {
|
if targets.is_empty() {
|
||||||
@@ -307,6 +307,7 @@ impl NotificationSystem {
|
|||||||
if !changed {
|
if !changed {
|
||||||
info!("Target {} of type {} not found, no changes made.", target_name, target_type);
|
info!("Target {} of type {} not found, no changes made.", target_name, target_type);
|
||||||
}
|
}
|
||||||
|
debug!("Config after remove: {:?}", config);
|
||||||
changed
|
changed
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -16,12 +16,9 @@ use crate::Event;
|
|||||||
use crate::factory::{MQTTTargetFactory, TargetFactory, WebhookTargetFactory};
|
use crate::factory::{MQTTTargetFactory, TargetFactory, WebhookTargetFactory};
|
||||||
use futures::stream::{FuturesUnordered, StreamExt};
|
use futures::stream::{FuturesUnordered, StreamExt};
|
||||||
use hashbrown::{HashMap, HashSet};
|
use hashbrown::{HashMap, HashSet};
|
||||||
use rustfs_config::notify::NOTIFY_ROUTE_PREFIX;
|
use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, notify::NOTIFY_ROUTE_PREFIX};
|
||||||
use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX};
|
|
||||||
use rustfs_ecstore::config::{Config, KVS};
|
use rustfs_ecstore::config::{Config, KVS};
|
||||||
use rustfs_targets::Target;
|
use rustfs_targets::{Target, TargetError, target::ChannelTargetType};
|
||||||
use rustfs_targets::TargetError;
|
|
||||||
use rustfs_targets::target::ChannelTargetType;
|
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
/// Registry for managing target factories
|
/// Registry for managing target factories
|
||||||
@@ -90,7 +87,9 @@ impl TargetRegistry {
|
|||||||
let all_env: Vec<(String, String)> = std::env::vars().filter(|(key, _)| key.starts_with(ENV_PREFIX)).collect();
|
let all_env: Vec<(String, String)> = std::env::vars().filter(|(key, _)| key.starts_with(ENV_PREFIX)).collect();
|
||||||
// A collection of asynchronous tasks for concurrently executing target creation
|
// A collection of asynchronous tasks for concurrently executing target creation
|
||||||
let mut tasks = FuturesUnordered::new();
|
let mut tasks = FuturesUnordered::new();
|
||||||
let mut final_config = config.clone(); // Clone a configuration for aggregating the final result
|
// let final_config = config.clone(); // Clone a configuration for aggregating the final result
|
||||||
|
// Record the defaults for each segment so that the segment can eventually be rebuilt
|
||||||
|
let mut section_defaults: HashMap<String, KVS> = HashMap::new();
|
||||||
// 1. Traverse all registered plants and process them by target type
|
// 1. Traverse all registered plants and process them by target type
|
||||||
for (target_type, factory) in &self.factories {
|
for (target_type, factory) in &self.factories {
|
||||||
tracing::Span::current().record("target_type", target_type.as_str());
|
tracing::Span::current().record("target_type", target_type.as_str());
|
||||||
@@ -98,12 +97,15 @@ impl TargetRegistry {
|
|||||||
|
|
||||||
// 2. Prepare the configuration source
|
// 2. Prepare the configuration source
|
||||||
// 2.1. Get the configuration segment in the file, e.g. 'notify_webhook'
|
// 2.1. Get the configuration segment in the file, e.g. 'notify_webhook'
|
||||||
let section_name = format!("{NOTIFY_ROUTE_PREFIX}{target_type}");
|
let section_name = format!("{NOTIFY_ROUTE_PREFIX}{target_type}").to_lowercase();
|
||||||
let file_configs = config.0.get(§ion_name).cloned().unwrap_or_default();
|
let file_configs = config.0.get(§ion_name).cloned().unwrap_or_default();
|
||||||
// 2.2. Get the default configuration for that type
|
// 2.2. Get the default configuration for that type
|
||||||
let default_cfg = file_configs.get(DEFAULT_DELIMITER).cloned().unwrap_or_default();
|
let default_cfg = file_configs.get(DEFAULT_DELIMITER).cloned().unwrap_or_default();
|
||||||
debug!(?default_cfg, "Get the default configuration");
|
debug!(?default_cfg, "Get the default configuration");
|
||||||
|
|
||||||
|
// Save defaults for eventual write back
|
||||||
|
section_defaults.insert(section_name.clone(), default_cfg.clone());
|
||||||
|
|
||||||
// *** Optimization point 1: Get all legitimate fields of the current target type ***
|
// *** Optimization point 1: Get all legitimate fields of the current target type ***
|
||||||
let valid_fields = factory.get_valid_fields();
|
let valid_fields = factory.get_valid_fields();
|
||||||
debug!(?valid_fields, "Get the legitimate configuration fields");
|
debug!(?valid_fields, "Get the legitimate configuration fields");
|
||||||
@@ -111,7 +113,9 @@ impl TargetRegistry {
|
|||||||
// 3. Resolve instance IDs and configuration overrides from environment variables
|
// 3. Resolve instance IDs and configuration overrides from environment variables
|
||||||
let mut instance_ids_from_env = HashSet::new();
|
let mut instance_ids_from_env = HashSet::new();
|
||||||
// 3.1. Instance discovery: Based on the '..._ENABLE_INSTANCEID' format
|
// 3.1. Instance discovery: Based on the '..._ENABLE_INSTANCEID' format
|
||||||
let enable_prefix = format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}_{ENABLE_KEY}_").to_uppercase();
|
let enable_prefix =
|
||||||
|
format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}{ENABLE_KEY}{DEFAULT_DELIMITER}")
|
||||||
|
.to_uppercase();
|
||||||
for (key, value) in &all_env {
|
for (key, value) in &all_env {
|
||||||
if value.eq_ignore_ascii_case(rustfs_config::EnableState::One.as_str())
|
if value.eq_ignore_ascii_case(rustfs_config::EnableState::One.as_str())
|
||||||
|| value.eq_ignore_ascii_case(rustfs_config::EnableState::On.as_str())
|
|| value.eq_ignore_ascii_case(rustfs_config::EnableState::On.as_str())
|
||||||
@@ -128,14 +132,14 @@ impl TargetRegistry {
|
|||||||
|
|
||||||
// 3.2. Parse all relevant environment variable configurations
|
// 3.2. Parse all relevant environment variable configurations
|
||||||
// 3.2.1. Build environment variable prefixes such as 'RUSTFS_NOTIFY_WEBHOOK_'
|
// 3.2.1. Build environment variable prefixes such as 'RUSTFS_NOTIFY_WEBHOOK_'
|
||||||
let env_prefix = format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}_").to_uppercase();
|
let env_prefix = format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}").to_uppercase();
|
||||||
// 3.2.2. 'env_overrides' is used to store configurations parsed from environment variables in the format: {instance id -> {field -> value}}
|
// 3.2.2. 'env_overrides' is used to store configurations parsed from environment variables in the format: {instance id -> {field -> value}}
|
||||||
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
|
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
|
||||||
for (key, value) in &all_env {
|
for (key, value) in &all_env {
|
||||||
if let Some(rest) = key.strip_prefix(&env_prefix) {
|
if let Some(rest) = key.strip_prefix(&env_prefix) {
|
||||||
// Use rsplitn to split from the right side to properly extract the INSTANCE_ID at the end
|
// Use rsplitn to split from the right side to properly extract the INSTANCE_ID at the end
|
||||||
// Format: <FIELD_NAME>_<INSTANCE_ID> or <FIELD_NAME>
|
// Format: <FIELD_NAME>_<INSTANCE_ID> or <FIELD_NAME>
|
||||||
let mut parts = rest.rsplitn(2, '_');
|
let mut parts = rest.rsplitn(2, DEFAULT_DELIMITER);
|
||||||
|
|
||||||
// The first part from the right is INSTANCE_ID
|
// The first part from the right is INSTANCE_ID
|
||||||
let instance_id_part = parts.next().unwrap_or(DEFAULT_DELIMITER);
|
let instance_id_part = parts.next().unwrap_or(DEFAULT_DELIMITER);
|
||||||
@@ -224,7 +228,7 @@ impl TargetRegistry {
|
|||||||
} else {
|
} else {
|
||||||
info!(instance_id = %id, "Skip the disabled target and will be removed from the final configuration");
|
info!(instance_id = %id, "Skip the disabled target and will be removed from the final configuration");
|
||||||
// Remove disabled target from final configuration
|
// Remove disabled target from final configuration
|
||||||
final_config.0.entry(section_name.clone()).or_default().remove(&id);
|
// final_config.0.entry(section_name.clone()).or_default().remove(&id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -246,15 +250,50 @@ impl TargetRegistry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 7. Aggregate new configuration and write back to system configuration
|
// 7. Aggregate new configuration and write back to system configuration
|
||||||
if !successful_configs.is_empty() {
|
if !successful_configs.is_empty() || !section_defaults.is_empty() {
|
||||||
info!(
|
info!(
|
||||||
"Prepare to update {} successfully created target configurations to the system configuration...",
|
"Prepare to update {} successfully created target configurations to the system configuration...",
|
||||||
successful_configs.len()
|
successful_configs.len()
|
||||||
);
|
);
|
||||||
let mut new_config = config.clone();
|
|
||||||
|
let mut successes_by_section: HashMap<String, HashMap<String, KVS>> = HashMap::new();
|
||||||
|
|
||||||
for (target_type, id, kvs) in successful_configs {
|
for (target_type, id, kvs) in successful_configs {
|
||||||
let section_name = format!("{NOTIFY_ROUTE_PREFIX}{target_type}").to_lowercase();
|
let section_name = format!("{NOTIFY_ROUTE_PREFIX}{target_type}").to_lowercase();
|
||||||
new_config.0.entry(section_name).or_default().insert(id, (*kvs).clone());
|
successes_by_section
|
||||||
|
.entry(section_name)
|
||||||
|
.or_default()
|
||||||
|
.insert(id.to_lowercase(), (*kvs).clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut new_config = config.clone();
|
||||||
|
// Collection of segments that need to be processed: Collect all segments where default items exist or where successful instances exist
|
||||||
|
let mut sections: HashSet<String> = HashSet::new();
|
||||||
|
sections.extend(section_defaults.keys().cloned());
|
||||||
|
sections.extend(successes_by_section.keys().cloned());
|
||||||
|
|
||||||
|
for section in sections {
|
||||||
|
let mut section_map: std::collections::HashMap<String, KVS> = std::collections::HashMap::new();
|
||||||
|
// Add default item
|
||||||
|
if let Some(default_kvs) = section_defaults.get(§ion) {
|
||||||
|
if !default_kvs.is_empty() {
|
||||||
|
section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add successful instance item
|
||||||
|
if let Some(instances) = successes_by_section.get(§ion) {
|
||||||
|
for (id, kvs) in instances {
|
||||||
|
section_map.insert(id.clone(), kvs.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty breaks are removed and non-empty breaks are replaced entirely.
|
||||||
|
if section_map.is_empty() {
|
||||||
|
new_config.0.remove(§ion);
|
||||||
|
} else {
|
||||||
|
new_config.0.insert(section, section_map);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(store) = rustfs_ecstore::global::new_object_layer_fn() else {
|
let Some(store) = rustfs_ecstore::global::new_object_layer_fn() else {
|
||||||
|
|||||||
@@ -29,17 +29,12 @@ documentation = "https://docs.rs/rustfs-obs/latest/rustfs_obs/"
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["file"]
|
default = []
|
||||||
file = []
|
|
||||||
gpu = ["dep:nvml-wrapper"]
|
gpu = ["dep:nvml-wrapper"]
|
||||||
webhook = ["dep:reqwest"]
|
|
||||||
kafka = ["dep:rdkafka"]
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rustfs-config = { workspace = true, features = ["constants", "observability"] }
|
rustfs-config = { workspace = true, features = ["constants", "observability"] }
|
||||||
rustfs-utils = { workspace = true, features = ["ip", "path"] }
|
rustfs-utils = { workspace = true, features = ["ip", "path"] }
|
||||||
async-trait = { workspace = true }
|
|
||||||
chrono = { workspace = true }
|
|
||||||
flexi_logger = { workspace = true }
|
flexi_logger = { workspace = true }
|
||||||
nu-ansi-term = { workspace = true }
|
nu-ansi-term = { workspace = true }
|
||||||
nvml-wrapper = { workspace = true, optional = true }
|
nvml-wrapper = { workspace = true, optional = true }
|
||||||
@@ -57,24 +52,9 @@ tracing-error = { workspace = true }
|
|||||||
tracing-opentelemetry = { workspace = true }
|
tracing-opentelemetry = { workspace = true }
|
||||||
tracing-subscriber = { workspace = true, features = ["registry", "std", "fmt", "env-filter", "tracing-log", "time", "local-time", "json"] }
|
tracing-subscriber = { workspace = true, features = ["registry", "std", "fmt", "env-filter", "tracing-log", "time", "local-time", "json"] }
|
||||||
tokio = { workspace = true, features = ["sync", "fs", "rt-multi-thread", "rt", "time", "macros"] }
|
tokio = { workspace = true, features = ["sync", "fs", "rt-multi-thread", "rt", "time", "macros"] }
|
||||||
reqwest = { workspace = true, optional = true }
|
|
||||||
serde_json = { workspace = true }
|
|
||||||
sysinfo = { workspace = true }
|
sysinfo = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
|
|
||||||
|
|
||||||
# Only enable kafka features and related dependencies on Linux
|
|
||||||
[target.'cfg(target_os = "linux")'.dependencies]
|
|
||||||
rdkafka = { workspace = true, features = ["tokio"], optional = true }
|
|
||||||
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
chrono = { workspace = true }
|
|
||||||
opentelemetry = { workspace = true }
|
|
||||||
opentelemetry_sdk = { workspace = true, features = ["rt-tokio"] }
|
|
||||||
opentelemetry-stdout = { workspace = true }
|
|
||||||
opentelemetry-otlp = { workspace = true, features = ["grpc-tonic"] }
|
|
||||||
opentelemetry-semantic-conventions = { workspace = true, features = ["semconv_experimental"] }
|
|
||||||
tokio = { workspace = true, features = ["full"] }
|
tokio = { workspace = true, features = ["full"] }
|
||||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
|
||||||
tracing-subscriber = { workspace = true, features = ["registry", "std", "fmt"] }
|
|
||||||
@@ -21,29 +21,4 @@ service_name = "rustfs"
|
|||||||
service_version = "0.1.0"
|
service_version = "0.1.0"
|
||||||
environments = "develop"
|
environments = "develop"
|
||||||
logger_level = "debug"
|
logger_level = "debug"
|
||||||
local_logging_enabled = true # Default is false if not specified
|
local_logging_enabled = true # Default is false if not specified
|
||||||
|
|
||||||
|
|
||||||
#[[sinks]]
|
|
||||||
#type = "Kafka"
|
|
||||||
#bootstrap_servers = "localhost:9092"
|
|
||||||
#topic = "logs"
|
|
||||||
#batch_size = 100 # Default is 100 if not specified
|
|
||||||
#batch_timeout_ms = 100 # Default is 1000ms if not specified
|
|
||||||
#
|
|
||||||
#[[sinks]]
|
|
||||||
#type = "Webhook"
|
|
||||||
#endpoint = "http://localhost:8080/webhook"
|
|
||||||
#auth_token = ""
|
|
||||||
#batch_size = 100 # Default is 3 if not specified
|
|
||||||
#batch_timeout_ms = 100 # Default is 100ms if not specified
|
|
||||||
|
|
||||||
[[sinks]]
|
|
||||||
type = "File"
|
|
||||||
path = "deploy/logs/rustfs.log"
|
|
||||||
buffer_size = 102 # Default is 8192 bytes if not specified
|
|
||||||
flush_interval_ms = 1000
|
|
||||||
flush_threshold = 100
|
|
||||||
|
|
||||||
[logger]
|
|
||||||
queue_capacity = 10000
|
|
||||||
@@ -13,33 +13,25 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use opentelemetry::global;
|
use opentelemetry::global;
|
||||||
use rustfs_obs::{BaseLogEntry, ServerLogEntry, SystemObserver, get_logger, init_obs, log_info};
|
use rustfs_obs::{SystemObserver, init_obs};
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
use tracing::{error, info, instrument};
|
use tracing::{Level, error, info, instrument};
|
||||||
use tracing_core::Level;
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
let obs_conf = Some("crates/obs/examples/config.toml".to_string());
|
let obs_conf = Some("http://localhost:4317".to_string());
|
||||||
let (_logger, _guard) = init_obs(obs_conf).await;
|
let _guard = init_obs(obs_conf).await;
|
||||||
let span = tracing::span!(Level::INFO, "main");
|
let span = tracing::span!(Level::INFO, "main");
|
||||||
let _enter = span.enter();
|
let _enter = span.enter();
|
||||||
info!("Program starts");
|
info!("Program starts");
|
||||||
// Simulate the operation
|
// Simulate the operation
|
||||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
run(
|
run("service-demo".to_string()).await;
|
||||||
"service-demo".to_string(),
|
|
||||||
"object-demo".to_string(),
|
|
||||||
"user-demo".to_string(),
|
|
||||||
"service-demo".to_string(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
info!("Program ends");
|
info!("Program ends");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(fields(bucket, object, user))]
|
#[instrument(fields(bucket, object, user))]
|
||||||
async fn run(bucket: String, object: String, user: String, service_name: String) {
|
async fn run(service_name: String) {
|
||||||
let start_time = SystemTime::now();
|
let start_time = SystemTime::now();
|
||||||
info!("Log module initialization is completed service_name: {:?}", service_name);
|
info!("Log module initialization is completed service_name: {:?}", service_name);
|
||||||
|
|
||||||
@@ -56,21 +48,6 @@ async fn run(bucket: String, object: String, user: String, service_name: String)
|
|||||||
Err(e) => error!("Failed to initialize process observer: {:?}", e),
|
Err(e) => error!("Failed to initialize process observer: {:?}", e),
|
||||||
}
|
}
|
||||||
|
|
||||||
let base_entry = BaseLogEntry::new()
|
|
||||||
.message(Some("run logger api_handler info".to_string()))
|
|
||||||
.request_id(Some("request_id".to_string()))
|
|
||||||
.timestamp(chrono::DateTime::from(start_time))
|
|
||||||
.tags(Some(HashMap::default()));
|
|
||||||
|
|
||||||
let server_entry = ServerLogEntry::new(Level::INFO, "api_handler".to_string())
|
|
||||||
.with_base(base_entry)
|
|
||||||
.user_id(Some(user.clone()))
|
|
||||||
.add_field("operation".to_string(), "login".to_string())
|
|
||||||
.add_field("bucket".to_string(), bucket.clone())
|
|
||||||
.add_field("object".to_string(), object.clone());
|
|
||||||
|
|
||||||
let result = get_logger().lock().await.log_server_entry(server_entry).await;
|
|
||||||
info!("Logging is completed {:?}", result);
|
|
||||||
put_object("bucket".to_string(), "object".to_string(), "user".to_string()).await;
|
put_object("bucket".to_string(), "object".to_string(), "user".to_string()).await;
|
||||||
info!("Logging is completed");
|
info!("Logging is completed");
|
||||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||||
@@ -97,8 +74,6 @@ async fn put_object(bucket: String, object: String, user: String) {
|
|||||||
start_time.elapsed().unwrap().as_secs_f64()
|
start_time.elapsed().unwrap().as_secs_f64()
|
||||||
);
|
);
|
||||||
|
|
||||||
let result = log_info("put_object logger info", "put_object").await;
|
|
||||||
info!("put_object is completed {:?}", result);
|
|
||||||
// Simulate the operation
|
// Simulate the operation
|
||||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
|
||||||
|
|||||||
@@ -13,16 +13,9 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use rustfs_config::observability::{
|
use rustfs_config::observability::{
|
||||||
DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY, DEFAULT_SINKS_FILE_BUFFER_SIZE, DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS,
|
ENV_OBS_ENDPOINT, ENV_OBS_ENVIRONMENT, ENV_OBS_LOCAL_LOGGING_ENABLED, ENV_OBS_LOG_DIRECTORY, ENV_OBS_LOG_FILENAME,
|
||||||
DEFAULT_SINKS_FILE_FLUSH_THRESHOLD, DEFAULT_SINKS_KAFKA_BATCH_SIZE, DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS,
|
ENV_OBS_LOG_KEEP_FILES, ENV_OBS_LOG_ROTATION_SIZE_MB, ENV_OBS_LOG_ROTATION_TIME, ENV_OBS_LOGGER_LEVEL,
|
||||||
DEFAULT_SINKS_KAFKA_BROKERS, DEFAULT_SINKS_KAFKA_TOPIC, DEFAULT_SINKS_WEBHOOK_AUTH_TOKEN, DEFAULT_SINKS_WEBHOOK_ENDPOINT,
|
ENV_OBS_METER_INTERVAL, ENV_OBS_SAMPLE_RATIO, ENV_OBS_SERVICE_NAME, ENV_OBS_SERVICE_VERSION, ENV_OBS_USE_STDOUT,
|
||||||
DEFAULT_SINKS_WEBHOOK_MAX_RETRIES, DEFAULT_SINKS_WEBHOOK_RETRY_DELAY_MS, ENV_AUDIT_LOGGER_QUEUE_CAPACITY, ENV_OBS_ENDPOINT,
|
|
||||||
ENV_OBS_ENVIRONMENT, ENV_OBS_LOCAL_LOGGING_ENABLED, ENV_OBS_LOG_DIRECTORY, ENV_OBS_LOG_FILENAME, ENV_OBS_LOG_KEEP_FILES,
|
|
||||||
ENV_OBS_LOG_ROTATION_SIZE_MB, ENV_OBS_LOG_ROTATION_TIME, ENV_OBS_LOGGER_LEVEL, ENV_OBS_METER_INTERVAL, ENV_OBS_SAMPLE_RATIO,
|
|
||||||
ENV_OBS_SERVICE_NAME, ENV_OBS_SERVICE_VERSION, ENV_OBS_USE_STDOUT, ENV_SINKS_FILE_BUFFER_SIZE,
|
|
||||||
ENV_SINKS_FILE_FLUSH_INTERVAL_MS, ENV_SINKS_FILE_FLUSH_THRESHOLD, ENV_SINKS_FILE_PATH, ENV_SINKS_KAFKA_BATCH_SIZE,
|
|
||||||
ENV_SINKS_KAFKA_BATCH_TIMEOUT_MS, ENV_SINKS_KAFKA_BROKERS, ENV_SINKS_KAFKA_TOPIC, ENV_SINKS_WEBHOOK_AUTH_TOKEN,
|
|
||||||
ENV_SINKS_WEBHOOK_ENDPOINT, ENV_SINKS_WEBHOOK_MAX_RETRIES, ENV_SINKS_WEBHOOK_RETRY_DELAY_MS,
|
|
||||||
};
|
};
|
||||||
use rustfs_config::{
|
use rustfs_config::{
|
||||||
APP_NAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, DEFAULT_LOG_ROTATION_SIZE_MB, DEFAULT_LOG_ROTATION_TIME,
|
APP_NAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, DEFAULT_LOG_ROTATION_SIZE_MB, DEFAULT_LOG_ROTATION_TIME,
|
||||||
@@ -145,167 +138,10 @@ impl Default for OtelConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Kafka Sink Configuration - Add batch parameters
|
|
||||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
|
||||||
pub struct KafkaSinkConfig {
|
|
||||||
pub brokers: String,
|
|
||||||
pub topic: String,
|
|
||||||
pub batch_size: Option<usize>, // Batch size, default 100
|
|
||||||
pub batch_timeout_ms: Option<u64>, // Batch timeout time, default 1000ms
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KafkaSinkConfig {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for KafkaSinkConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
brokers: env::var(ENV_SINKS_KAFKA_BROKERS)
|
|
||||||
.ok()
|
|
||||||
.filter(|s| !s.trim().is_empty())
|
|
||||||
.unwrap_or_else(|| DEFAULT_SINKS_KAFKA_BROKERS.to_string()),
|
|
||||||
topic: env::var(ENV_SINKS_KAFKA_TOPIC)
|
|
||||||
.ok()
|
|
||||||
.filter(|s| !s.trim().is_empty())
|
|
||||||
.unwrap_or_else(|| DEFAULT_SINKS_KAFKA_TOPIC.to_string()),
|
|
||||||
batch_size: env::var(ENV_SINKS_KAFKA_BATCH_SIZE)
|
|
||||||
.ok()
|
|
||||||
.and_then(|v| v.parse().ok())
|
|
||||||
.or(Some(DEFAULT_SINKS_KAFKA_BATCH_SIZE)),
|
|
||||||
batch_timeout_ms: env::var(ENV_SINKS_KAFKA_BATCH_TIMEOUT_MS)
|
|
||||||
.ok()
|
|
||||||
.and_then(|v| v.parse().ok())
|
|
||||||
.or(Some(DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Webhook Sink Configuration - Add Retry Parameters
|
|
||||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
|
||||||
pub struct WebhookSinkConfig {
|
|
||||||
pub endpoint: String,
|
|
||||||
pub auth_token: String,
|
|
||||||
pub max_retries: Option<usize>, // Maximum number of retry times, default 3
|
|
||||||
pub retry_delay_ms: Option<u64>, // Retry the delay cardinality, default 100ms
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WebhookSinkConfig {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for WebhookSinkConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
endpoint: env::var(ENV_SINKS_WEBHOOK_ENDPOINT)
|
|
||||||
.ok()
|
|
||||||
.filter(|s| !s.trim().is_empty())
|
|
||||||
.unwrap_or_else(|| DEFAULT_SINKS_WEBHOOK_ENDPOINT.to_string()),
|
|
||||||
auth_token: env::var(ENV_SINKS_WEBHOOK_AUTH_TOKEN)
|
|
||||||
.ok()
|
|
||||||
.filter(|s| !s.trim().is_empty())
|
|
||||||
.unwrap_or_else(|| DEFAULT_SINKS_WEBHOOK_AUTH_TOKEN.to_string()),
|
|
||||||
max_retries: env::var(ENV_SINKS_WEBHOOK_MAX_RETRIES)
|
|
||||||
.ok()
|
|
||||||
.and_then(|v| v.parse().ok())
|
|
||||||
.or(Some(DEFAULT_SINKS_WEBHOOK_MAX_RETRIES)),
|
|
||||||
retry_delay_ms: env::var(ENV_SINKS_WEBHOOK_RETRY_DELAY_MS)
|
|
||||||
.ok()
|
|
||||||
.and_then(|v| v.parse().ok())
|
|
||||||
.or(Some(DEFAULT_SINKS_WEBHOOK_RETRY_DELAY_MS)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// File Sink Configuration - Add buffering parameters
|
|
||||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
|
||||||
pub struct FileSinkConfig {
|
|
||||||
pub path: String,
|
|
||||||
pub buffer_size: Option<usize>, // Write buffer size, default 8192
|
|
||||||
pub flush_interval_ms: Option<u64>, // Refresh interval time, default 1000ms
|
|
||||||
pub flush_threshold: Option<usize>, // Refresh threshold, default 100 logs
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FileSinkConfig {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for FileSinkConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
path: get_log_directory_to_string(ENV_SINKS_FILE_PATH),
|
|
||||||
buffer_size: env::var(ENV_SINKS_FILE_BUFFER_SIZE)
|
|
||||||
.ok()
|
|
||||||
.and_then(|v| v.parse().ok())
|
|
||||||
.or(Some(DEFAULT_SINKS_FILE_BUFFER_SIZE)),
|
|
||||||
flush_interval_ms: env::var(ENV_SINKS_FILE_FLUSH_INTERVAL_MS)
|
|
||||||
.ok()
|
|
||||||
.and_then(|v| v.parse().ok())
|
|
||||||
.or(Some(DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS)),
|
|
||||||
flush_threshold: env::var(ENV_SINKS_FILE_FLUSH_THRESHOLD)
|
|
||||||
.ok()
|
|
||||||
.and_then(|v| v.parse().ok())
|
|
||||||
.or(Some(DEFAULT_SINKS_FILE_FLUSH_THRESHOLD)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sink configuration collection
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
#[serde(tag = "type")]
|
|
||||||
pub enum SinkConfig {
|
|
||||||
File(FileSinkConfig),
|
|
||||||
Kafka(KafkaSinkConfig),
|
|
||||||
Webhook(WebhookSinkConfig),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SinkConfig {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self::File(FileSinkConfig::new())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for SinkConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
///Logger Configuration
|
|
||||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
|
||||||
pub struct LoggerConfig {
|
|
||||||
pub queue_capacity: Option<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LoggerConfig {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
queue_capacity: env::var(ENV_AUDIT_LOGGER_QUEUE_CAPACITY)
|
|
||||||
.ok()
|
|
||||||
.and_then(|v| v.parse().ok())
|
|
||||||
.or(Some(DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for LoggerConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Overall application configuration
|
/// Overall application configuration
|
||||||
/// Add observability, sinks, and logger configuration
|
/// Add observability configuration
|
||||||
///
|
///
|
||||||
/// Observability: OpenTelemetry configuration
|
/// Observability: OpenTelemetry configuration
|
||||||
/// Sinks: Kafka, Webhook, File sink configuration
|
|
||||||
/// Logger: Logger configuration
|
|
||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
/// ```
|
/// ```
|
||||||
@@ -316,8 +152,6 @@ impl Default for LoggerConfig {
|
|||||||
#[derive(Debug, Deserialize, Clone)]
|
#[derive(Debug, Deserialize, Clone)]
|
||||||
pub struct AppConfig {
|
pub struct AppConfig {
|
||||||
pub observability: OtelConfig,
|
pub observability: OtelConfig,
|
||||||
pub sinks: Vec<SinkConfig>,
|
|
||||||
pub logger: Option<LoggerConfig>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppConfig {
|
impl AppConfig {
|
||||||
@@ -328,16 +162,12 @@ impl AppConfig {
|
|||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
observability: OtelConfig::default(),
|
observability: OtelConfig::default(),
|
||||||
sinks: vec![SinkConfig::default()],
|
|
||||||
logger: Some(LoggerConfig::default()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_with_endpoint(endpoint: Option<String>) -> Self {
|
pub fn new_with_endpoint(endpoint: Option<String>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
observability: OtelConfig::extract_otel_config_from_env(endpoint),
|
observability: OtelConfig::extract_otel_config_from_env(endpoint),
|
||||||
sinks: vec![SinkConfig::new()],
|
|
||||||
logger: Some(LoggerConfig::new()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,88 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use crate::entry::ObjectVersion;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
/// Args - defines the arguments for API operations
|
|
||||||
/// Args is used to define the arguments for API operations.
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::Args;
|
|
||||||
/// use std::collections::HashMap;
|
|
||||||
///
|
|
||||||
/// let args = Args::new()
|
|
||||||
/// .set_bucket(Some("my-bucket".to_string()))
|
|
||||||
/// .set_object(Some("my-object".to_string()))
|
|
||||||
/// .set_version_id(Some("123".to_string()))
|
|
||||||
/// .set_metadata(Some(HashMap::new()));
|
|
||||||
/// ```
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Default, Eq, PartialEq)]
|
|
||||||
pub struct Args {
|
|
||||||
#[serde(rename = "bucket", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub bucket: Option<String>,
|
|
||||||
#[serde(rename = "object", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub object: Option<String>,
|
|
||||||
#[serde(rename = "versionId", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub version_id: Option<String>,
|
|
||||||
#[serde(rename = "objects", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub objects: Option<Vec<ObjectVersion>>,
|
|
||||||
#[serde(rename = "metadata", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub metadata: Option<HashMap<String, String>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Args {
|
|
||||||
/// Create a new Args object
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Args {
|
|
||||||
bucket: None,
|
|
||||||
object: None,
|
|
||||||
version_id: None,
|
|
||||||
objects: None,
|
|
||||||
metadata: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the bucket
|
|
||||||
pub fn set_bucket(mut self, bucket: Option<String>) -> Self {
|
|
||||||
self.bucket = bucket;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the object
|
|
||||||
pub fn set_object(mut self, object: Option<String>) -> Self {
|
|
||||||
self.object = object;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the version ID
|
|
||||||
pub fn set_version_id(mut self, version_id: Option<String>) -> Self {
|
|
||||||
self.version_id = version_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the objects
|
|
||||||
pub fn set_objects(mut self, objects: Option<Vec<ObjectVersion>>) -> Self {
|
|
||||||
self.objects = objects;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the metadata
|
|
||||||
pub fn set_metadata(mut self, metadata: Option<HashMap<String, String>>) -> Self {
|
|
||||||
self.metadata = metadata;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,467 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use crate::{BaseLogEntry, LogRecord, ObjectVersion};
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
/// API details structure
|
|
||||||
/// ApiDetails is used to define the details of an API operation
|
|
||||||
///
|
|
||||||
/// The `ApiDetails` structure contains the following fields:
|
|
||||||
/// - `name` - the name of the API operation
|
|
||||||
/// - `bucket` - the bucket name
|
|
||||||
/// - `object` - the object name
|
|
||||||
/// - `objects` - the list of objects
|
|
||||||
/// - `status` - the status of the API operation
|
|
||||||
/// - `status_code` - the status code of the API operation
|
|
||||||
/// - `input_bytes` - the input bytes
|
|
||||||
/// - `output_bytes` - the output bytes
|
|
||||||
/// - `header_bytes` - the header bytes
|
|
||||||
/// - `time_to_first_byte` - the time to first byte
|
|
||||||
/// - `time_to_first_byte_in_ns` - the time to first byte in nanoseconds
|
|
||||||
/// - `time_to_response` - the time to response
|
|
||||||
/// - `time_to_response_in_ns` - the time to response in nanoseconds
|
|
||||||
///
|
|
||||||
/// The `ApiDetails` structure contains the following methods:
|
|
||||||
/// - `new` - create a new `ApiDetails` with default values
|
|
||||||
/// - `set_name` - set the name
|
|
||||||
/// - `set_bucket` - set the bucket
|
|
||||||
/// - `set_object` - set the object
|
|
||||||
/// - `set_objects` - set the objects
|
|
||||||
/// - `set_status` - set the status
|
|
||||||
/// - `set_status_code` - set the status code
|
|
||||||
/// - `set_input_bytes` - set the input bytes
|
|
||||||
/// - `set_output_bytes` - set the output bytes
|
|
||||||
/// - `set_header_bytes` - set the header bytes
|
|
||||||
/// - `set_time_to_first_byte` - set the time to first byte
|
|
||||||
/// - `set_time_to_first_byte_in_ns` - set the time to first byte in nanoseconds
|
|
||||||
/// - `set_time_to_response` - set the time to response
|
|
||||||
/// - `set_time_to_response_in_ns` - set the time to response in nanoseconds
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::ApiDetails;
|
|
||||||
/// use rustfs_obs::ObjectVersion;
|
|
||||||
///
|
|
||||||
/// let api = ApiDetails::new()
|
|
||||||
/// .set_name(Some("GET".to_string()))
|
|
||||||
/// .set_bucket(Some("my-bucket".to_string()))
|
|
||||||
/// .set_object(Some("my-object".to_string()))
|
|
||||||
/// .set_objects(vec![ObjectVersion::new_with_object_name("my-object".to_string())])
|
|
||||||
/// .set_status(Some("OK".to_string()))
|
|
||||||
/// .set_status_code(Some(200))
|
|
||||||
/// .set_input_bytes(100)
|
|
||||||
/// .set_output_bytes(200)
|
|
||||||
/// .set_header_bytes(Some(50))
|
|
||||||
/// .set_time_to_first_byte(Some("100ms".to_string()))
|
|
||||||
/// .set_time_to_first_byte_in_ns(Some("100000000ns".to_string()))
|
|
||||||
/// .set_time_to_response(Some("200ms".to_string()))
|
|
||||||
/// .set_time_to_response_in_ns(Some("200000000ns".to_string()));
|
|
||||||
/// ```
|
|
||||||
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)]
|
|
||||||
pub struct ApiDetails {
|
|
||||||
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub name: Option<String>,
|
|
||||||
#[serde(rename = "bucket", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub bucket: Option<String>,
|
|
||||||
#[serde(rename = "object", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub object: Option<String>,
|
|
||||||
#[serde(rename = "objects", skip_serializing_if = "Vec::is_empty", default)]
|
|
||||||
pub objects: Vec<ObjectVersion>,
|
|
||||||
#[serde(rename = "status", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub status: Option<String>,
|
|
||||||
#[serde(rename = "statusCode", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub status_code: Option<i32>,
|
|
||||||
#[serde(rename = "rx")]
|
|
||||||
pub input_bytes: i64,
|
|
||||||
#[serde(rename = "tx")]
|
|
||||||
pub output_bytes: i64,
|
|
||||||
#[serde(rename = "txHeaders", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub header_bytes: Option<i64>,
|
|
||||||
#[serde(rename = "timeToFirstByte", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub time_to_first_byte: Option<String>,
|
|
||||||
#[serde(rename = "timeToFirstByteInNS", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub time_to_first_byte_in_ns: Option<String>,
|
|
||||||
#[serde(rename = "timeToResponse", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub time_to_response: Option<String>,
|
|
||||||
#[serde(rename = "timeToResponseInNS", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub time_to_response_in_ns: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ApiDetails {
|
|
||||||
/// Create a new `ApiDetails` with default values
|
|
||||||
pub fn new() -> Self {
|
|
||||||
ApiDetails {
|
|
||||||
name: None,
|
|
||||||
bucket: None,
|
|
||||||
object: None,
|
|
||||||
objects: Vec::new(),
|
|
||||||
status: None,
|
|
||||||
status_code: None,
|
|
||||||
input_bytes: 0,
|
|
||||||
output_bytes: 0,
|
|
||||||
header_bytes: None,
|
|
||||||
time_to_first_byte: None,
|
|
||||||
time_to_first_byte_in_ns: None,
|
|
||||||
time_to_response: None,
|
|
||||||
time_to_response_in_ns: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the name
|
|
||||||
pub fn set_name(mut self, name: Option<String>) -> Self {
|
|
||||||
self.name = name;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the bucket
|
|
||||||
pub fn set_bucket(mut self, bucket: Option<String>) -> Self {
|
|
||||||
self.bucket = bucket;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the object
|
|
||||||
pub fn set_object(mut self, object: Option<String>) -> Self {
|
|
||||||
self.object = object;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the objects
|
|
||||||
pub fn set_objects(mut self, objects: Vec<ObjectVersion>) -> Self {
|
|
||||||
self.objects = objects;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the status
|
|
||||||
pub fn set_status(mut self, status: Option<String>) -> Self {
|
|
||||||
self.status = status;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the status code
|
|
||||||
pub fn set_status_code(mut self, status_code: Option<i32>) -> Self {
|
|
||||||
self.status_code = status_code;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the input bytes
|
|
||||||
pub fn set_input_bytes(mut self, input_bytes: i64) -> Self {
|
|
||||||
self.input_bytes = input_bytes;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the output bytes
|
|
||||||
pub fn set_output_bytes(mut self, output_bytes: i64) -> Self {
|
|
||||||
self.output_bytes = output_bytes;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the header bytes
|
|
||||||
pub fn set_header_bytes(mut self, header_bytes: Option<i64>) -> Self {
|
|
||||||
self.header_bytes = header_bytes;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the time to first byte
|
|
||||||
pub fn set_time_to_first_byte(mut self, time_to_first_byte: Option<String>) -> Self {
|
|
||||||
self.time_to_first_byte = time_to_first_byte;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the time to first byte in nanoseconds
|
|
||||||
pub fn set_time_to_first_byte_in_ns(mut self, time_to_first_byte_in_ns: Option<String>) -> Self {
|
|
||||||
self.time_to_first_byte_in_ns = time_to_first_byte_in_ns;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the time to response
|
|
||||||
pub fn set_time_to_response(mut self, time_to_response: Option<String>) -> Self {
|
|
||||||
self.time_to_response = time_to_response;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the time to response in nanoseconds
|
|
||||||
pub fn set_time_to_response_in_ns(mut self, time_to_response_in_ns: Option<String>) -> Self {
|
|
||||||
self.time_to_response_in_ns = time_to_response_in_ns;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Entry - audit entry logs
|
|
||||||
/// AuditLogEntry is used to define the structure of an audit log entry
|
|
||||||
///
|
|
||||||
/// The `AuditLogEntry` structure contains the following fields:
|
|
||||||
/// - `base` - the base log entry
|
|
||||||
/// - `version` - the version of the audit log entry
|
|
||||||
/// - `deployment_id` - the deployment ID
|
|
||||||
/// - `event` - the event
|
|
||||||
/// - `entry_type` - the type of audit message
|
|
||||||
/// - `api` - the API details
|
|
||||||
/// - `remote_host` - the remote host
|
|
||||||
/// - `user_agent` - the user agent
|
|
||||||
/// - `req_path` - the request path
|
|
||||||
/// - `req_host` - the request host
|
|
||||||
/// - `req_claims` - the request claims
|
|
||||||
/// - `req_query` - the request query
|
|
||||||
/// - `req_header` - the request header
|
|
||||||
/// - `resp_header` - the response header
|
|
||||||
/// - `access_key` - the access key
|
|
||||||
/// - `parent_user` - the parent user
|
|
||||||
/// - `error` - the error
|
|
||||||
///
|
|
||||||
/// The `AuditLogEntry` structure contains the following methods:
|
|
||||||
/// - `new` - create a new `AuditEntry` with default values
|
|
||||||
/// - `new_with_values` - create a new `AuditEntry` with version, time, event and api details
|
|
||||||
/// - `with_base` - set the base log entry
|
|
||||||
/// - `set_version` - set the version
|
|
||||||
/// - `set_deployment_id` - set the deployment ID
|
|
||||||
/// - `set_event` - set the event
|
|
||||||
/// - `set_entry_type` - set the entry type
|
|
||||||
/// - `set_api` - set the API details
|
|
||||||
/// - `set_remote_host` - set the remote host
|
|
||||||
/// - `set_user_agent` - set the user agent
|
|
||||||
/// - `set_req_path` - set the request path
|
|
||||||
/// - `set_req_host` - set the request host
|
|
||||||
/// - `set_req_claims` - set the request claims
|
|
||||||
/// - `set_req_query` - set the request query
|
|
||||||
/// - `set_req_header` - set the request header
|
|
||||||
/// - `set_resp_header` - set the response header
|
|
||||||
/// - `set_access_key` - set the access key
|
|
||||||
/// - `set_parent_user` - set the parent user
|
|
||||||
/// - `set_error` - set the error
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::AuditLogEntry;
|
|
||||||
/// use rustfs_obs::ApiDetails;
|
|
||||||
/// use std::collections::HashMap;
|
|
||||||
///
|
|
||||||
/// let entry = AuditLogEntry::new()
|
|
||||||
/// .set_version("1.0".to_string())
|
|
||||||
/// .set_deployment_id(Some("123".to_string()))
|
|
||||||
/// .set_event("event".to_string())
|
|
||||||
/// .set_entry_type(Some("type".to_string()))
|
|
||||||
/// .set_api(ApiDetails::new())
|
|
||||||
/// .set_remote_host(Some("remote-host".to_string()))
|
|
||||||
/// .set_user_agent(Some("user-agent".to_string()))
|
|
||||||
/// .set_req_path(Some("req-path".to_string()))
|
|
||||||
/// .set_req_host(Some("req-host".to_string()))
|
|
||||||
/// .set_req_claims(Some(HashMap::new()))
|
|
||||||
/// .set_req_query(Some(HashMap::new()))
|
|
||||||
/// .set_req_header(Some(HashMap::new()))
|
|
||||||
/// .set_resp_header(Some(HashMap::new()))
|
|
||||||
/// .set_access_key(Some("access-key".to_string()))
|
|
||||||
/// .set_parent_user(Some("parent-user".to_string()))
|
|
||||||
/// .set_error(Some("error".to_string()));
|
|
||||||
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
|
|
||||||
pub struct AuditLogEntry {
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub base: BaseLogEntry,
|
|
||||||
pub version: String,
|
|
||||||
#[serde(rename = "deploymentid", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub deployment_id: Option<String>,
|
|
||||||
pub event: String,
|
|
||||||
// Class of audit message - S3, admin ops, bucket management
|
|
||||||
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub entry_type: Option<String>,
|
|
||||||
pub api: ApiDetails,
|
|
||||||
#[serde(rename = "remotehost", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub remote_host: Option<String>,
|
|
||||||
#[serde(rename = "userAgent", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub user_agent: Option<String>,
|
|
||||||
#[serde(rename = "requestPath", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub req_path: Option<String>,
|
|
||||||
#[serde(rename = "requestHost", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub req_host: Option<String>,
|
|
||||||
#[serde(rename = "requestClaims", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub req_claims: Option<HashMap<String, Value>>,
|
|
||||||
#[serde(rename = "requestQuery", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub req_query: Option<HashMap<String, String>>,
|
|
||||||
#[serde(rename = "requestHeader", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub req_header: Option<HashMap<String, String>>,
|
|
||||||
#[serde(rename = "responseHeader", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub resp_header: Option<HashMap<String, String>>,
|
|
||||||
#[serde(rename = "accessKey", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub access_key: Option<String>,
|
|
||||||
#[serde(rename = "parentUser", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub parent_user: Option<String>,
|
|
||||||
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub error: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AuditLogEntry {
|
|
||||||
/// Create a new `AuditEntry` with default values
|
|
||||||
pub fn new() -> Self {
|
|
||||||
AuditLogEntry {
|
|
||||||
base: BaseLogEntry::new(),
|
|
||||||
version: String::new(),
|
|
||||||
deployment_id: None,
|
|
||||||
event: String::new(),
|
|
||||||
entry_type: None,
|
|
||||||
api: ApiDetails::new(),
|
|
||||||
remote_host: None,
|
|
||||||
user_agent: None,
|
|
||||||
req_path: None,
|
|
||||||
req_host: None,
|
|
||||||
req_claims: None,
|
|
||||||
req_query: None,
|
|
||||||
req_header: None,
|
|
||||||
resp_header: None,
|
|
||||||
access_key: None,
|
|
||||||
parent_user: None,
|
|
||||||
error: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new `AuditEntry` with version, time, event and api details
|
|
||||||
pub fn new_with_values(version: String, time: DateTime<Utc>, event: String, api: ApiDetails) -> Self {
|
|
||||||
let mut base = BaseLogEntry::new();
|
|
||||||
base.timestamp = time;
|
|
||||||
|
|
||||||
AuditLogEntry {
|
|
||||||
base,
|
|
||||||
version,
|
|
||||||
deployment_id: None,
|
|
||||||
event,
|
|
||||||
entry_type: None,
|
|
||||||
api,
|
|
||||||
remote_host: None,
|
|
||||||
user_agent: None,
|
|
||||||
req_path: None,
|
|
||||||
req_host: None,
|
|
||||||
req_claims: None,
|
|
||||||
req_query: None,
|
|
||||||
req_header: None,
|
|
||||||
resp_header: None,
|
|
||||||
access_key: None,
|
|
||||||
parent_user: None,
|
|
||||||
error: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the base log entry
|
|
||||||
pub fn with_base(mut self, base: BaseLogEntry) -> Self {
|
|
||||||
self.base = base;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the version
|
|
||||||
pub fn set_version(mut self, version: String) -> Self {
|
|
||||||
self.version = version;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the deployment ID
|
|
||||||
pub fn set_deployment_id(mut self, deployment_id: Option<String>) -> Self {
|
|
||||||
self.deployment_id = deployment_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the event
|
|
||||||
pub fn set_event(mut self, event: String) -> Self {
|
|
||||||
self.event = event;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the entry type
|
|
||||||
pub fn set_entry_type(mut self, entry_type: Option<String>) -> Self {
|
|
||||||
self.entry_type = entry_type;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the API details
|
|
||||||
pub fn set_api(mut self, api: ApiDetails) -> Self {
|
|
||||||
self.api = api;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the remote host
|
|
||||||
pub fn set_remote_host(mut self, remote_host: Option<String>) -> Self {
|
|
||||||
self.remote_host = remote_host;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the user agent
|
|
||||||
pub fn set_user_agent(mut self, user_agent: Option<String>) -> Self {
|
|
||||||
self.user_agent = user_agent;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the request path
|
|
||||||
pub fn set_req_path(mut self, req_path: Option<String>) -> Self {
|
|
||||||
self.req_path = req_path;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the request host
|
|
||||||
pub fn set_req_host(mut self, req_host: Option<String>) -> Self {
|
|
||||||
self.req_host = req_host;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the request claims
|
|
||||||
pub fn set_req_claims(mut self, req_claims: Option<HashMap<String, Value>>) -> Self {
|
|
||||||
self.req_claims = req_claims;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the request query
|
|
||||||
pub fn set_req_query(mut self, req_query: Option<HashMap<String, String>>) -> Self {
|
|
||||||
self.req_query = req_query;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the request header
|
|
||||||
pub fn set_req_header(mut self, req_header: Option<HashMap<String, String>>) -> Self {
|
|
||||||
self.req_header = req_header;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the response header
|
|
||||||
pub fn set_resp_header(mut self, resp_header: Option<HashMap<String, String>>) -> Self {
|
|
||||||
self.resp_header = resp_header;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the access key
|
|
||||||
pub fn set_access_key(mut self, access_key: Option<String>) -> Self {
|
|
||||||
self.access_key = access_key;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the parent user
|
|
||||||
pub fn set_parent_user(mut self, parent_user: Option<String>) -> Self {
|
|
||||||
self.parent_user = parent_user;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the error
|
|
||||||
pub fn set_error(mut self, error: Option<String>) -> Self {
|
|
||||||
self.error = error;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LogRecord for AuditLogEntry {
|
|
||||||
fn to_json(&self) -> String {
|
|
||||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
|
||||||
self.base.timestamp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,106 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
/// Base log entry structure shared by all log types
|
|
||||||
/// This structure is used to serialize log entries to JSON
|
|
||||||
/// and send them to the log sinks
|
|
||||||
/// This structure is also used to deserialize log entries from JSON
|
|
||||||
/// This structure is also used to store log entries in the database
|
|
||||||
/// This structure is also used to query log entries from the database
|
|
||||||
///
|
|
||||||
/// The `BaseLogEntry` structure contains the following fields:
|
|
||||||
/// - `timestamp` - the timestamp of the log entry
|
|
||||||
/// - `request_id` - the request ID of the log entry
|
|
||||||
/// - `message` - the message of the log entry
|
|
||||||
/// - `tags` - the tags of the log entry
|
|
||||||
///
|
|
||||||
/// The `BaseLogEntry` structure contains the following methods:
|
|
||||||
/// - `new` - create a new `BaseLogEntry` with default values
|
|
||||||
/// - `message` - set the message
|
|
||||||
/// - `request_id` - set the request ID
|
|
||||||
/// - `tags` - set the tags
|
|
||||||
/// - `timestamp` - set the timestamp
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::BaseLogEntry;
|
|
||||||
/// use chrono::{DateTime, Utc};
|
|
||||||
/// use std::collections::HashMap;
|
|
||||||
///
|
|
||||||
/// let timestamp = Utc::now();
|
|
||||||
/// let request = Some("req-123".to_string());
|
|
||||||
/// let message = Some("This is a log message".to_string());
|
|
||||||
/// let tags = Some(HashMap::new());
|
|
||||||
///
|
|
||||||
/// let entry = BaseLogEntry::new()
|
|
||||||
/// .timestamp(timestamp)
|
|
||||||
/// .request_id(request)
|
|
||||||
/// .message(message)
|
|
||||||
/// .tags(tags);
|
|
||||||
/// ```
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Default)]
|
|
||||||
pub struct BaseLogEntry {
|
|
||||||
#[serde(rename = "time")]
|
|
||||||
pub timestamp: DateTime<Utc>,
|
|
||||||
|
|
||||||
#[serde(rename = "requestID", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub request_id: Option<String>,
|
|
||||||
|
|
||||||
#[serde(rename = "message", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub message: Option<String>,
|
|
||||||
|
|
||||||
#[serde(rename = "tags", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub tags: Option<HashMap<String, Value>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BaseLogEntry {
|
|
||||||
/// Create a new BaseLogEntry with default values
|
|
||||||
pub fn new() -> Self {
|
|
||||||
BaseLogEntry {
|
|
||||||
timestamp: Utc::now(),
|
|
||||||
request_id: None,
|
|
||||||
message: None,
|
|
||||||
tags: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the message
|
|
||||||
pub fn message(mut self, message: Option<String>) -> Self {
|
|
||||||
self.message = message;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the request ID
|
|
||||||
pub fn request_id(mut self, request_id: Option<String>) -> Self {
|
|
||||||
self.request_id = request_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the tags
|
|
||||||
pub fn tags(mut self, tags: Option<HashMap<String, Value>>) -> Self {
|
|
||||||
self.tags = tags;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the timestamp
|
|
||||||
pub fn timestamp(mut self, timestamp: DateTime<Utc>) -> Self {
|
|
||||||
self.timestamp = timestamp;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,158 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
pub(crate) mod args;
|
|
||||||
pub(crate) mod audit;
|
|
||||||
pub(crate) mod base;
|
|
||||||
pub(crate) mod unified;
|
|
||||||
|
|
||||||
use serde::de::Error;
|
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
|
||||||
use tracing_core::Level;
|
|
||||||
|
|
||||||
/// ObjectVersion is used across multiple modules
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
|
||||||
pub struct ObjectVersion {
|
|
||||||
#[serde(rename = "name")]
|
|
||||||
pub object_name: String,
|
|
||||||
#[serde(rename = "versionId", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub version_id: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ObjectVersion {
|
|
||||||
/// Create a new ObjectVersion object
|
|
||||||
pub fn new() -> Self {
|
|
||||||
ObjectVersion {
|
|
||||||
object_name: String::new(),
|
|
||||||
version_id: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new ObjectVersion with object name
|
|
||||||
pub fn new_with_object_name(object_name: String) -> Self {
|
|
||||||
ObjectVersion {
|
|
||||||
object_name,
|
|
||||||
version_id: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the object name
|
|
||||||
pub fn set_object_name(mut self, object_name: String) -> Self {
|
|
||||||
self.object_name = object_name;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the version ID
|
|
||||||
pub fn set_version_id(mut self, version_id: Option<String>) -> Self {
|
|
||||||
self.version_id = version_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ObjectVersion {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log kind/level enum
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
|
|
||||||
pub enum LogKind {
|
|
||||||
#[serde(rename = "INFO")]
|
|
||||||
#[default]
|
|
||||||
Info,
|
|
||||||
#[serde(rename = "WARNING")]
|
|
||||||
Warning,
|
|
||||||
#[serde(rename = "ERROR")]
|
|
||||||
Error,
|
|
||||||
#[serde(rename = "FATAL")]
|
|
||||||
Fatal,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait for types that can be serialized to JSON and have a timestamp
|
|
||||||
/// This trait is used by `ServerLogEntry` to convert the log entry to JSON
|
|
||||||
/// and get the timestamp of the log entry
|
|
||||||
/// This trait is implemented by `ServerLogEntry`
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::LogRecord;
|
|
||||||
/// use chrono::{DateTime, Utc};
|
|
||||||
/// use rustfs_obs::ServerLogEntry;
|
|
||||||
/// use tracing_core::Level;
|
|
||||||
///
|
|
||||||
/// let log_entry = ServerLogEntry::new(Level::INFO, "api_handler".to_string());
|
|
||||||
/// let json = log_entry.to_json();
|
|
||||||
/// let timestamp = log_entry.get_timestamp();
|
|
||||||
/// ```
|
|
||||||
pub trait LogRecord {
|
|
||||||
fn to_json(&self) -> String;
|
|
||||||
fn get_timestamp(&self) -> chrono::DateTime<chrono::Utc>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Wrapper for `tracing_core::Level` to implement `Serialize` and `Deserialize`
|
|
||||||
/// for `ServerLogEntry`
|
|
||||||
/// This is necessary because `tracing_core::Level` does not implement `Serialize`
|
|
||||||
/// and `Deserialize`
|
|
||||||
/// This is a workaround to allow `ServerLogEntry` to be serialized and deserialized
|
|
||||||
/// using `serde`
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::SerializableLevel;
|
|
||||||
/// use tracing_core::Level;
|
|
||||||
///
|
|
||||||
/// let level = Level::INFO;
|
|
||||||
/// let serializable_level = SerializableLevel::from(level);
|
|
||||||
/// ```
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
pub struct SerializableLevel(pub Level);
|
|
||||||
|
|
||||||
impl From<Level> for SerializableLevel {
|
|
||||||
fn from(level: Level) -> Self {
|
|
||||||
SerializableLevel(level)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<SerializableLevel> for Level {
|
|
||||||
fn from(serializable_level: SerializableLevel) -> Self {
|
|
||||||
serializable_level.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for SerializableLevel {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: Serializer,
|
|
||||||
{
|
|
||||||
serializer.serialize_str(self.0.as_str())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'de> Deserialize<'de> for SerializableLevel {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let s = String::deserialize(deserializer)?;
|
|
||||||
match s.as_str() {
|
|
||||||
"TRACE" => Ok(SerializableLevel(Level::TRACE)),
|
|
||||||
"DEBUG" => Ok(SerializableLevel(Level::DEBUG)),
|
|
||||||
"INFO" => Ok(SerializableLevel(Level::INFO)),
|
|
||||||
"WARN" => Ok(SerializableLevel(Level::WARN)),
|
|
||||||
"ERROR" => Ok(SerializableLevel(Level::ERROR)),
|
|
||||||
_ => Err(D::Error::custom("unknown log level")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,301 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use crate::{AuditLogEntry, BaseLogEntry, LogKind, LogRecord, SerializableLevel};
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use tracing_core::Level;
|
|
||||||
|
|
||||||
/// Server log entry with structured fields
|
|
||||||
/// ServerLogEntry is used to log structured log entries from the server
|
|
||||||
///
|
|
||||||
/// The `ServerLogEntry` structure contains the following fields:
|
|
||||||
/// - `base` - the base log entry
|
|
||||||
/// - `level` - the log level
|
|
||||||
/// - `source` - the source of the log entry
|
|
||||||
/// - `user_id` - the user ID
|
|
||||||
/// - `fields` - the structured fields of the log entry
|
|
||||||
///
|
|
||||||
/// The `ServerLogEntry` structure contains the following methods:
|
|
||||||
/// - `new` - create a new `ServerLogEntry` with specified level and source
|
|
||||||
/// - `with_base` - set the base log entry
|
|
||||||
/// - `user_id` - set the user ID
|
|
||||||
/// - `fields` - set the fields
|
|
||||||
/// - `add_field` - add a field
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::ServerLogEntry;
|
|
||||||
/// use tracing_core::Level;
|
|
||||||
///
|
|
||||||
/// let entry = ServerLogEntry::new(Level::INFO, "test_module".to_string())
|
|
||||||
/// .user_id(Some("user-456".to_string()))
|
|
||||||
/// .add_field("operation".to_string(), "login".to_string());
|
|
||||||
/// ```
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
|
||||||
pub struct ServerLogEntry {
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub base: BaseLogEntry,
|
|
||||||
|
|
||||||
pub level: SerializableLevel,
|
|
||||||
pub source: String,
|
|
||||||
|
|
||||||
#[serde(rename = "userId", skip_serializing_if = "Option::is_none")]
|
|
||||||
pub user_id: Option<String>,
|
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
|
||||||
pub fields: Vec<(String, String)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServerLogEntry {
|
|
||||||
/// Create a new ServerLogEntry with specified level and source
|
|
||||||
pub fn new(level: Level, source: String) -> Self {
|
|
||||||
ServerLogEntry {
|
|
||||||
base: BaseLogEntry::new(),
|
|
||||||
level: SerializableLevel(level),
|
|
||||||
source,
|
|
||||||
user_id: None,
|
|
||||||
fields: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the base log entry
|
|
||||||
pub fn with_base(mut self, base: BaseLogEntry) -> Self {
|
|
||||||
self.base = base;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the user ID
|
|
||||||
pub fn user_id(mut self, user_id: Option<String>) -> Self {
|
|
||||||
self.user_id = user_id;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set fields
|
|
||||||
pub fn fields(mut self, fields: Vec<(String, String)>) -> Self {
|
|
||||||
self.fields = fields;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a field
|
|
||||||
pub fn add_field(mut self, key: String, value: String) -> Self {
|
|
||||||
self.fields.push((key, value));
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LogRecord for ServerLogEntry {
|
|
||||||
fn to_json(&self) -> String {
|
|
||||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
|
||||||
self.base.timestamp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Console log entry structure
|
|
||||||
/// ConsoleLogEntry is used to log console log entries
|
|
||||||
/// The `ConsoleLogEntry` structure contains the following fields:
|
|
||||||
/// - `base` - the base log entry
|
|
||||||
/// - `level` - the log level
|
|
||||||
/// - `console_msg` - the console message
|
|
||||||
/// - `node_name` - the node name
|
|
||||||
/// - `err` - the error message
|
|
||||||
///
|
|
||||||
/// The `ConsoleLogEntry` structure contains the following methods:
|
|
||||||
/// - `new` - create a new `ConsoleLogEntry`
|
|
||||||
/// - `new_with_console_msg` - create a new `ConsoleLogEntry` with console message and node name
|
|
||||||
/// - `with_base` - set the base log entry
|
|
||||||
/// - `set_level` - set the log level
|
|
||||||
/// - `set_node_name` - set the node name
|
|
||||||
/// - `set_console_msg` - set the console message
|
|
||||||
/// - `set_err` - set the error message
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::ConsoleLogEntry;
|
|
||||||
///
|
|
||||||
/// let entry = ConsoleLogEntry::new_with_console_msg("Test message".to_string(), "node-123".to_string());
|
|
||||||
/// ```
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct ConsoleLogEntry {
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub base: BaseLogEntry,
|
|
||||||
|
|
||||||
pub level: LogKind,
|
|
||||||
pub console_msg: String,
|
|
||||||
pub node_name: String,
|
|
||||||
|
|
||||||
#[serde(skip)]
|
|
||||||
pub err: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConsoleLogEntry {
|
|
||||||
/// Create a new ConsoleLogEntry
|
|
||||||
pub fn new() -> Self {
|
|
||||||
ConsoleLogEntry {
|
|
||||||
base: BaseLogEntry::new(),
|
|
||||||
level: LogKind::Info,
|
|
||||||
console_msg: String::new(),
|
|
||||||
node_name: String::new(),
|
|
||||||
err: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new ConsoleLogEntry with console message and node name
|
|
||||||
pub fn new_with_console_msg(console_msg: String, node_name: String) -> Self {
|
|
||||||
ConsoleLogEntry {
|
|
||||||
base: BaseLogEntry::new(),
|
|
||||||
level: LogKind::Info,
|
|
||||||
console_msg,
|
|
||||||
node_name,
|
|
||||||
err: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the base log entry
|
|
||||||
pub fn with_base(mut self, base: BaseLogEntry) -> Self {
|
|
||||||
self.base = base;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the log level
|
|
||||||
pub fn set_level(mut self, level: LogKind) -> Self {
|
|
||||||
self.level = level;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the node name
|
|
||||||
pub fn set_node_name(mut self, node_name: String) -> Self {
|
|
||||||
self.node_name = node_name;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the console message
|
|
||||||
pub fn set_console_msg(mut self, console_msg: String) -> Self {
|
|
||||||
self.console_msg = console_msg;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the error message
|
|
||||||
pub fn set_err(mut self, err: Option<String>) -> Self {
|
|
||||||
self.err = err;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ConsoleLogEntry {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LogRecord for ConsoleLogEntry {
|
|
||||||
fn to_json(&self) -> String {
|
|
||||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
|
||||||
self.base.timestamp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Unified log entry type
|
|
||||||
/// UnifiedLogEntry is used to log different types of log entries
|
|
||||||
///
|
|
||||||
/// The `UnifiedLogEntry` enum contains the following variants:
|
|
||||||
/// - `Server` - a server log entry
|
|
||||||
/// - `Audit` - an audit log entry
|
|
||||||
/// - `Console` - a console log entry
|
|
||||||
///
|
|
||||||
/// The `UnifiedLogEntry` enum contains the following methods:
|
|
||||||
/// - `to_json` - convert the log entry to JSON
|
|
||||||
/// - `get_timestamp` - get the timestamp of the log entry
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::{UnifiedLogEntry, ServerLogEntry};
|
|
||||||
/// use tracing_core::Level;
|
|
||||||
///
|
|
||||||
/// let server_entry = ServerLogEntry::new(Level::INFO, "test_module".to_string());
|
|
||||||
/// let unified = UnifiedLogEntry::Server(server_entry);
|
|
||||||
/// ```
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
#[serde(tag = "type")]
|
|
||||||
pub enum UnifiedLogEntry {
|
|
||||||
#[serde(rename = "server")]
|
|
||||||
Server(ServerLogEntry),
|
|
||||||
|
|
||||||
#[serde(rename = "audit")]
|
|
||||||
Audit(Box<AuditLogEntry>),
|
|
||||||
|
|
||||||
#[serde(rename = "console")]
|
|
||||||
Console(ConsoleLogEntry),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LogRecord for UnifiedLogEntry {
|
|
||||||
fn to_json(&self) -> String {
|
|
||||||
match self {
|
|
||||||
UnifiedLogEntry::Server(entry) => entry.to_json(),
|
|
||||||
UnifiedLogEntry::Audit(entry) => entry.to_json(),
|
|
||||||
UnifiedLogEntry::Console(entry) => entry.to_json(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
|
||||||
match self {
|
|
||||||
UnifiedLogEntry::Server(entry) => entry.get_timestamp(),
|
|
||||||
UnifiedLogEntry::Audit(entry) => entry.get_timestamp(),
|
|
||||||
UnifiedLogEntry::Console(entry) => entry.get_timestamp(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_base_log_entry() {
|
|
||||||
let base = BaseLogEntry::new()
|
|
||||||
.request_id(Some("req-123".to_string()))
|
|
||||||
.message(Some("Test message".to_string()));
|
|
||||||
|
|
||||||
assert_eq!(base.request_id, Some("req-123".to_string()));
|
|
||||||
assert_eq!(base.message, Some("Test message".to_string()));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_server_log_entry() {
|
|
||||||
let entry = ServerLogEntry::new(Level::INFO, "test_module".to_string())
|
|
||||||
.user_id(Some("user-456".to_string()))
|
|
||||||
.add_field("operation".to_string(), "login".to_string());
|
|
||||||
|
|
||||||
assert_eq!(entry.level.0, Level::INFO);
|
|
||||||
assert_eq!(entry.source, "test_module");
|
|
||||||
assert_eq!(entry.user_id, Some("user-456".to_string()));
|
|
||||||
assert_eq!(entry.fields.len(), 1);
|
|
||||||
assert_eq!(entry.fields[0], ("operation".to_string(), "login".to_string()));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_unified_log_entry_json() {
|
|
||||||
let server_entry = ServerLogEntry::new(Level::INFO, "test_source".to_string());
|
|
||||||
let unified = UnifiedLogEntry::Server(server_entry);
|
|
||||||
|
|
||||||
let json = unified.to_json();
|
|
||||||
assert!(json.contains("test_source"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -12,9 +12,8 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use crate::logger::InitLogStatus;
|
use crate::AppConfig;
|
||||||
use crate::telemetry::{OtelGuard, init_telemetry};
|
use crate::telemetry::{OtelGuard, init_telemetry};
|
||||||
use crate::{AppConfig, Logger, get_global_logger, init_global_logger};
|
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use tokio::sync::{OnceCell, SetError};
|
use tokio::sync::{OnceCell, SetError};
|
||||||
use tracing::{error, info};
|
use tracing::{error, info};
|
||||||
@@ -61,46 +60,14 @@ pub enum GlobalError {
|
|||||||
///
|
///
|
||||||
/// # #[tokio::main]
|
/// # #[tokio::main]
|
||||||
/// # async fn main() {
|
/// # async fn main() {
|
||||||
/// let (logger, guard) = init_obs(None).await;
|
/// let guard = init_obs(None).await;
|
||||||
/// # }
|
/// # }
|
||||||
/// ```
|
/// ```
|
||||||
pub async fn init_obs(endpoint: Option<String>) -> (Arc<tokio::sync::Mutex<Logger>>, OtelGuard) {
|
pub async fn init_obs(endpoint: Option<String>) -> OtelGuard {
|
||||||
// Load the configuration file
|
// Load the configuration file
|
||||||
let config = AppConfig::new_with_endpoint(endpoint);
|
let config = AppConfig::new_with_endpoint(endpoint);
|
||||||
|
|
||||||
let guard = init_telemetry(&config.observability);
|
init_telemetry(&config.observability)
|
||||||
|
|
||||||
let logger = init_global_logger(&config).await;
|
|
||||||
let obs_config = config.observability.clone();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let result = InitLogStatus::init_start_log(&obs_config).await;
|
|
||||||
match result {
|
|
||||||
Ok(_) => {
|
|
||||||
info!("Logger initialized successfully");
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
error!("Failed to initialize logger: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
(logger, guard)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the global logger instance
|
|
||||||
/// This function returns a reference to the global logger instance.
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// A reference to the global logger instance
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```no_run
|
|
||||||
/// use rustfs_obs::get_logger;
|
|
||||||
///
|
|
||||||
/// let logger = get_logger();
|
|
||||||
/// ```
|
|
||||||
pub fn get_logger() -> &'static Arc<tokio::sync::Mutex<Logger>> {
|
|
||||||
get_global_logger()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the global guard for OpenTelemetry
|
/// Set the global guard for OpenTelemetry
|
||||||
@@ -117,7 +84,7 @@ pub fn get_logger() -> &'static Arc<tokio::sync::Mutex<Logger>> {
|
|||||||
/// use rustfs_obs::{ init_obs, set_global_guard};
|
/// use rustfs_obs::{ init_obs, set_global_guard};
|
||||||
///
|
///
|
||||||
/// async fn init() -> Result<(), Box<dyn std::error::Error>> {
|
/// async fn init() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
/// let (_, guard) = init_obs(None).await;
|
/// let guard = init_obs(None).await;
|
||||||
/// set_global_guard(guard)?;
|
/// set_global_guard(guard)?;
|
||||||
/// Ok(())
|
/// Ok(())
|
||||||
/// }
|
/// }
|
||||||
|
|||||||
@@ -18,10 +18,7 @@
|
|||||||
//!
|
//!
|
||||||
//! ## feature mark
|
//! ## feature mark
|
||||||
//!
|
//!
|
||||||
//! - `file`: enable file logging enabled by default
|
|
||||||
//! - `gpu`: gpu monitoring function
|
//! - `gpu`: gpu monitoring function
|
||||||
//! - `kafka`: enable kafka metric output
|
|
||||||
//! - `webhook`: enable webhook notifications
|
|
||||||
//! - `full`: includes all functions
|
//! - `full`: includes all functions
|
||||||
//!
|
//!
|
||||||
//! to enable gpu monitoring add in cargo toml
|
//! to enable gpu monitoring add in cargo toml
|
||||||
@@ -41,27 +38,15 @@
|
|||||||
///
|
///
|
||||||
/// # #[tokio::main]
|
/// # #[tokio::main]
|
||||||
/// # async fn main() {
|
/// # async fn main() {
|
||||||
/// let (logger, guard) = init_obs(None).await;
|
/// # let guard = init_obs(None).await;
|
||||||
/// # }
|
/// # }
|
||||||
/// ```
|
/// ```
|
||||||
mod config;
|
mod config;
|
||||||
mod entry;
|
|
||||||
mod global;
|
mod global;
|
||||||
mod logger;
|
|
||||||
mod metrics;
|
mod metrics;
|
||||||
mod sinks;
|
|
||||||
mod system;
|
mod system;
|
||||||
mod telemetry;
|
mod telemetry;
|
||||||
mod worker;
|
|
||||||
|
|
||||||
pub use config::{AppConfig, LoggerConfig, OtelConfig, SinkConfig};
|
pub use config::AppConfig;
|
||||||
pub use entry::args::Args;
|
|
||||||
pub use entry::audit::{ApiDetails, AuditLogEntry};
|
|
||||||
pub use entry::base::BaseLogEntry;
|
|
||||||
pub use entry::unified::{ConsoleLogEntry, ServerLogEntry, UnifiedLogEntry};
|
|
||||||
pub use entry::{LogKind, LogRecord, ObjectVersion, SerializableLevel};
|
|
||||||
pub use global::*;
|
pub use global::*;
|
||||||
pub use logger::Logger;
|
|
||||||
pub use logger::{get_global_logger, init_global_logger, start_logger};
|
|
||||||
pub use logger::{log_debug, log_error, log_info, log_trace, log_warn, log_with_context};
|
|
||||||
pub use system::SystemObserver;
|
pub use system::SystemObserver;
|
||||||
|
|||||||
@@ -1,490 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use crate::sinks::Sink;
|
|
||||||
use crate::{
|
|
||||||
AppConfig, AuditLogEntry, BaseLogEntry, ConsoleLogEntry, GlobalError, OtelConfig, ServerLogEntry, UnifiedLogEntry, sinks,
|
|
||||||
};
|
|
||||||
use rustfs_config::{APP_NAME, ENVIRONMENT, SERVICE_VERSION};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::SystemTime;
|
|
||||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
|
||||||
use tokio::sync::{Mutex, OnceCell};
|
|
||||||
use tracing_core::Level;
|
|
||||||
|
|
||||||
// Add the global instance at the module level
|
|
||||||
static GLOBAL_LOGGER: OnceCell<Arc<Mutex<Logger>>> = OnceCell::const_new();
|
|
||||||
|
|
||||||
/// Server log processor
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Logger {
|
|
||||||
sender: Sender<UnifiedLogEntry>, // Log sending channel
|
|
||||||
queue_capacity: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Logger {
|
|
||||||
/// Create a new Logger instance
|
|
||||||
/// Returns Logger and corresponding Receiver
|
|
||||||
pub fn new(config: &AppConfig) -> (Self, Receiver<UnifiedLogEntry>) {
|
|
||||||
// Get queue capacity from configuration, or use default values 10000
|
|
||||||
let queue_capacity = config.logger.as_ref().and_then(|l| l.queue_capacity).unwrap_or(10000);
|
|
||||||
let (sender, receiver) = mpsc::channel(queue_capacity);
|
|
||||||
(Logger { sender, queue_capacity }, receiver)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// get the queue capacity
|
|
||||||
/// This function returns the queue capacity.
|
|
||||||
/// # Returns
|
|
||||||
/// The queue capacity
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::Logger;
|
|
||||||
/// async fn example(logger: &Logger) {
|
|
||||||
/// let _ = logger.get_queue_capacity();
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
pub fn get_queue_capacity(&self) -> usize {
|
|
||||||
self.queue_capacity
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log a server entry
|
|
||||||
#[tracing::instrument(skip(self), fields(log_source = "logger_server"))]
|
|
||||||
pub async fn log_server_entry(&self, entry: ServerLogEntry) -> Result<(), GlobalError> {
|
|
||||||
self.log_entry(UnifiedLogEntry::Server(entry)).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log an audit entry
|
|
||||||
#[tracing::instrument(skip(self), fields(log_source = "logger_audit"))]
|
|
||||||
pub async fn log_audit_entry(&self, entry: AuditLogEntry) -> Result<(), GlobalError> {
|
|
||||||
self.log_entry(UnifiedLogEntry::Audit(Box::new(entry))).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log a console entry
|
|
||||||
#[tracing::instrument(skip(self), fields(log_source = "logger_console"))]
|
|
||||||
pub async fn log_console_entry(&self, entry: ConsoleLogEntry) -> Result<(), GlobalError> {
|
|
||||||
self.log_entry(UnifiedLogEntry::Console(entry)).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Asynchronous logging of unified log entries
|
|
||||||
#[tracing::instrument(skip_all, fields(log_source = "logger"))]
|
|
||||||
pub async fn log_entry(&self, entry: UnifiedLogEntry) -> Result<(), GlobalError> {
|
|
||||||
// Extract information for tracing based on entry type
|
|
||||||
match &entry {
|
|
||||||
UnifiedLogEntry::Server(server) => {
|
|
||||||
tracing::Span::current()
|
|
||||||
.record("log_level", server.level.0.as_str())
|
|
||||||
.record("log_message", server.base.message.as_deref().unwrap_or("log message not set"))
|
|
||||||
.record("source", &server.source);
|
|
||||||
|
|
||||||
// Generate tracing event based on log level
|
|
||||||
match server.level.0 {
|
|
||||||
Level::ERROR => {
|
|
||||||
tracing::error!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or(""));
|
|
||||||
}
|
|
||||||
Level::WARN => {
|
|
||||||
tracing::warn!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or(""));
|
|
||||||
}
|
|
||||||
Level::INFO => {
|
|
||||||
tracing::info!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or(""));
|
|
||||||
}
|
|
||||||
Level::DEBUG => {
|
|
||||||
tracing::debug!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or(""));
|
|
||||||
}
|
|
||||||
Level::TRACE => {
|
|
||||||
tracing::trace!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or(""));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
UnifiedLogEntry::Audit(audit) => {
|
|
||||||
tracing::info!(
|
|
||||||
target: "audit_logs",
|
|
||||||
event = %audit.event,
|
|
||||||
api = %audit.api.name.as_deref().unwrap_or("unknown"),
|
|
||||||
message = %audit.base.message.as_deref().unwrap_or("")
|
|
||||||
);
|
|
||||||
}
|
|
||||||
UnifiedLogEntry::Console(console) => {
|
|
||||||
let level_str = match console.level {
|
|
||||||
crate::LogKind::Info => "INFO",
|
|
||||||
crate::LogKind::Warning => "WARN",
|
|
||||||
crate::LogKind::Error => "ERROR",
|
|
||||||
crate::LogKind::Fatal => "FATAL",
|
|
||||||
};
|
|
||||||
|
|
||||||
tracing::info!(
|
|
||||||
target: "console_logs",
|
|
||||||
level = %level_str,
|
|
||||||
node = %console.node_name,
|
|
||||||
message = %console.console_msg
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send logs to async queue with improved error handling
|
|
||||||
match self.sender.try_send(entry) {
|
|
||||||
Ok(_) => Ok(()),
|
|
||||||
Err(mpsc::error::TrySendError::Full(entry)) => {
|
|
||||||
// Processing strategy when queue is full
|
|
||||||
tracing::warn!("Log queue full, applying backpressure");
|
|
||||||
match tokio::time::timeout(std::time::Duration::from_millis(500), self.sender.send(entry)).await {
|
|
||||||
Ok(Ok(_)) => Ok(()),
|
|
||||||
Ok(Err(_)) => Err(GlobalError::SendFailed("Channel closed")),
|
|
||||||
Err(_) => Err(GlobalError::Timeout("Queue backpressure timeout")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(mpsc::error::TrySendError::Closed(_)) => Err(GlobalError::SendFailed("Logger channel closed")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write log with context information
|
|
||||||
/// This function writes log messages with context information.
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// - `message`: Message to be logged
|
|
||||||
/// - `source`: Source of the log
|
|
||||||
/// - `request_id`: Request ID
|
|
||||||
/// - `user_id`: User ID
|
|
||||||
/// - `fields`: Additional fields
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// Result indicating whether the operation was successful
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use tracing_core::Level;
|
|
||||||
/// use rustfs_obs::Logger;
|
|
||||||
///
|
|
||||||
/// async fn example(logger: &Logger) {
|
|
||||||
/// let _ = logger.write_with_context("This is an information message", "example",Level::INFO, Some("req-12345".to_string()), Some("user-6789".to_string()), vec![("endpoint".to_string(), "/api/v1/data".to_string())]).await;
|
|
||||||
/// }
|
|
||||||
pub async fn write_with_context(
|
|
||||||
&self,
|
|
||||||
message: &str,
|
|
||||||
source: &str,
|
|
||||||
level: Level,
|
|
||||||
request_id: Option<String>,
|
|
||||||
user_id: Option<String>,
|
|
||||||
fields: Vec<(String, String)>,
|
|
||||||
) -> Result<(), GlobalError> {
|
|
||||||
let base = BaseLogEntry::new().message(Some(message.to_string())).request_id(request_id);
|
|
||||||
|
|
||||||
let server_entry = ServerLogEntry::new(level, source.to_string())
|
|
||||||
.user_id(user_id)
|
|
||||||
.fields(fields)
|
|
||||||
.with_base(base);
|
|
||||||
|
|
||||||
self.log_server_entry(server_entry).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write log
|
|
||||||
/// This function writes log messages.
|
|
||||||
/// # Parameters
|
|
||||||
/// - `message`: Message to be logged
|
|
||||||
/// - `source`: Source of the log
|
|
||||||
/// - `level`: Log level
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// Result indicating whether the operation was successful
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::Logger;
|
|
||||||
/// use tracing_core::Level;
|
|
||||||
///
|
|
||||||
/// async fn example(logger: &Logger) {
|
|
||||||
/// let _ = logger.write("This is an information message", "example", Level::INFO).await;
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
pub async fn write(&self, message: &str, source: &str, level: Level) -> Result<(), GlobalError> {
|
|
||||||
self.write_with_context(message, source, level, None, None, Vec::new()).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Shutdown the logger
|
|
||||||
/// This function shuts down the logger.
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// Result indicating whether the operation was successful
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::Logger;
|
|
||||||
///
|
|
||||||
/// async fn example(logger: Logger) {
|
|
||||||
/// let _ = logger.shutdown().await;
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
pub async fn shutdown(self) -> Result<(), GlobalError> {
|
|
||||||
drop(self.sender); //Close the sending end so that the receiver knows that there is no new message
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Start the log module
|
|
||||||
/// This function starts the log module.
|
|
||||||
/// It initializes the logger and starts the worker to process logs.
|
|
||||||
/// # Parameters
|
|
||||||
/// - `config`: Configuration information
|
|
||||||
/// - `sinks`: A vector of Sink instances
|
|
||||||
/// # Returns
|
|
||||||
/// The global logger instance
|
|
||||||
/// # Example
|
|
||||||
/// ```no_run
|
|
||||||
/// use rustfs_obs::{AppConfig, start_logger};
|
|
||||||
///
|
|
||||||
/// let config = AppConfig::default();
|
|
||||||
/// let sinks = vec![];
|
|
||||||
/// let logger = start_logger(&config, sinks);
|
|
||||||
/// ```
|
|
||||||
pub fn start_logger(config: &AppConfig, sinks: Vec<Arc<dyn Sink>>) -> Logger {
|
|
||||||
let (logger, receiver) = Logger::new(config);
|
|
||||||
tokio::spawn(crate::worker::start_worker(receiver, sinks));
|
|
||||||
logger
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Initialize the global logger instance
|
|
||||||
/// This function initializes the global logger instance and returns a reference to it.
|
|
||||||
/// If the logger has been initialized before, it will return the existing logger instance.
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// - `config`: Configuration information
|
|
||||||
/// - `sinks`: A vector of Sink instances
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// A reference to the global logger instance
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```
|
|
||||||
/// use rustfs_obs::{AppConfig,init_global_logger};
|
|
||||||
///
|
|
||||||
/// let config = AppConfig::default();
|
|
||||||
/// let logger = init_global_logger(&config);
|
|
||||||
/// ```
|
|
||||||
pub async fn init_global_logger(config: &AppConfig) -> Arc<Mutex<Logger>> {
|
|
||||||
let sinks = sinks::create_sinks(config).await;
|
|
||||||
let logger = Arc::new(Mutex::new(start_logger(config, sinks)));
|
|
||||||
GLOBAL_LOGGER.set(logger.clone()).expect("Logger already initialized");
|
|
||||||
logger
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the global logger instance
|
|
||||||
///
|
|
||||||
/// This function returns a reference to the global logger instance.
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// A reference to the global logger instance
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```no_run
|
|
||||||
/// use rustfs_obs::get_global_logger;
|
|
||||||
///
|
|
||||||
/// let logger = get_global_logger();
|
|
||||||
/// ```
|
|
||||||
pub fn get_global_logger() -> &'static Arc<Mutex<Logger>> {
|
|
||||||
GLOBAL_LOGGER.get().expect("Logger not initialized")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log information
|
|
||||||
/// This function logs information messages.
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
/// - `message`: Message to be logged
|
|
||||||
/// - `source`: Source of the log
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// Result indicating whether the operation was successful
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```no_run
|
|
||||||
/// use rustfs_obs::log_info;
|
|
||||||
///
|
|
||||||
/// async fn example() {
|
|
||||||
/// let _ = log_info("This is an information message", "example").await;
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
pub async fn log_info(message: &str, source: &str) -> Result<(), GlobalError> {
|
|
||||||
get_global_logger().lock().await.write(message, source, Level::INFO).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log error
|
|
||||||
/// This function logs error messages.
|
|
||||||
/// # Parameters
|
|
||||||
/// - `message`: Message to be logged
|
|
||||||
/// - `source`: Source of the log
|
|
||||||
/// # Returns
|
|
||||||
/// Result indicating whether the operation was successful
|
|
||||||
/// # Example
|
|
||||||
/// ```no_run
|
|
||||||
/// use rustfs_obs::log_error;
|
|
||||||
///
|
|
||||||
/// async fn example() {
|
|
||||||
/// let _ = log_error("This is an error message", "example").await;
|
|
||||||
/// }
|
|
||||||
pub async fn log_error(message: &str, source: &str) -> Result<(), GlobalError> {
|
|
||||||
get_global_logger().lock().await.write(message, source, Level::ERROR).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log warning
|
|
||||||
/// This function logs warning messages.
|
|
||||||
/// # Parameters
|
|
||||||
/// - `message`: Message to be logged
|
|
||||||
/// - `source`: Source of the log
|
|
||||||
/// # Returns
|
|
||||||
/// Result indicating whether the operation was successful
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```no_run
|
|
||||||
/// use rustfs_obs::log_warn;
|
|
||||||
///
|
|
||||||
/// async fn example() {
|
|
||||||
/// let _ = log_warn("This is a warning message", "example").await;
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
pub async fn log_warn(message: &str, source: &str) -> Result<(), GlobalError> {
|
|
||||||
get_global_logger().lock().await.write(message, source, Level::WARN).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log debug
|
|
||||||
/// This function logs debug messages.
|
|
||||||
/// # Parameters
|
|
||||||
/// - `message`: Message to be logged
|
|
||||||
/// - `source`: Source of the log
|
|
||||||
/// # Returns
|
|
||||||
/// Result indicating whether the operation was successful
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```no_run
|
|
||||||
/// use rustfs_obs::log_debug;
|
|
||||||
///
|
|
||||||
/// async fn example() {
|
|
||||||
/// let _ = log_debug("This is a debug message", "example").await;
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
pub async fn log_debug(message: &str, source: &str) -> Result<(), GlobalError> {
|
|
||||||
get_global_logger().lock().await.write(message, source, Level::DEBUG).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log trace
|
|
||||||
/// This function logs trace messages.
|
|
||||||
/// # Parameters
|
|
||||||
/// - `message`: Message to be logged
|
|
||||||
/// - `source`: Source of the log
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
/// Result indicating whether the operation was successful
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
/// ```no_run
|
|
||||||
/// use rustfs_obs::log_trace;
|
|
||||||
///
|
|
||||||
/// async fn example() {
|
|
||||||
/// let _ = log_trace("This is a trace message", "example").await;
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
pub async fn log_trace(message: &str, source: &str) -> Result<(), GlobalError> {
|
|
||||||
get_global_logger().lock().await.write(message, source, Level::TRACE).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log with context information
|
|
||||||
/// This function logs messages with context information.
|
|
||||||
/// # Parameters
|
|
||||||
/// - `message`: Message to be logged
|
|
||||||
/// - `source`: Source of the log
|
|
||||||
/// - `level`: Log level
|
|
||||||
/// - `request_id`: Request ID
|
|
||||||
/// - `user_id`: User ID
|
|
||||||
/// - `fields`: Additional fields
|
|
||||||
/// # Returns
|
|
||||||
/// Result indicating whether the operation was successful
|
|
||||||
/// # Example
|
|
||||||
/// ```no_run
|
|
||||||
/// use tracing_core::Level;
|
|
||||||
/// use rustfs_obs::log_with_context;
|
|
||||||
///
|
|
||||||
/// async fn example() {
|
|
||||||
/// let _ = log_with_context("This is an information message", "example", Level::INFO, Some("req-12345".to_string()), Some("user-6789".to_string()), vec![("endpoint".to_string(), "/api/v1/data".to_string())]).await;
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
pub async fn log_with_context(
|
|
||||||
message: &str,
|
|
||||||
source: &str,
|
|
||||||
level: Level,
|
|
||||||
request_id: Option<String>,
|
|
||||||
user_id: Option<String>,
|
|
||||||
fields: Vec<(String, String)>,
|
|
||||||
) -> Result<(), GlobalError> {
|
|
||||||
get_global_logger()
|
|
||||||
.lock()
|
|
||||||
.await
|
|
||||||
.write_with_context(message, source, level, request_id, user_id, fields)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log initialization status
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub(crate) struct InitLogStatus {
|
|
||||||
pub timestamp: SystemTime,
|
|
||||||
pub service_name: String,
|
|
||||||
pub version: String,
|
|
||||||
pub environment: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for InitLogStatus {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
timestamp: SystemTime::now(),
|
|
||||||
service_name: String::from(APP_NAME),
|
|
||||||
version: SERVICE_VERSION.to_string(),
|
|
||||||
environment: ENVIRONMENT.to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InitLogStatus {
|
|
||||||
pub fn new_config(config: &OtelConfig) -> Self {
|
|
||||||
let config = config.clone();
|
|
||||||
let environment = config.environment.unwrap_or(ENVIRONMENT.to_string());
|
|
||||||
let version = config.service_version.unwrap_or(SERVICE_VERSION.to_string());
|
|
||||||
Self {
|
|
||||||
timestamp: SystemTime::now(),
|
|
||||||
service_name: String::from(APP_NAME),
|
|
||||||
version,
|
|
||||||
environment,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn init_start_log(config: &OtelConfig) -> Result<(), GlobalError> {
|
|
||||||
let status = Self::new_config(config);
|
|
||||||
log_init_state(Some(status)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Log initialization details during system startup
|
|
||||||
async fn log_init_state(status: Option<InitLogStatus>) -> Result<(), GlobalError> {
|
|
||||||
let status = status.unwrap_or_default();
|
|
||||||
|
|
||||||
let base_entry = BaseLogEntry::new()
|
|
||||||
.timestamp(chrono::DateTime::from(status.timestamp))
|
|
||||||
.message(Some(format!(
|
|
||||||
"Service initialization started - {} v{} in {}",
|
|
||||||
status.service_name, status.version, status.environment
|
|
||||||
)))
|
|
||||||
.request_id(Some("system_init".to_string()));
|
|
||||||
|
|
||||||
let server_entry = ServerLogEntry::new(Level::INFO, "system_initialization".to_string())
|
|
||||||
.with_base(base_entry)
|
|
||||||
.user_id(Some("system".to_string()));
|
|
||||||
|
|
||||||
get_global_logger().lock().await.log_server_entry(server_entry).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,178 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use crate::sinks::Sink;
|
|
||||||
use crate::{LogRecord, UnifiedLogEntry};
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tokio::fs::OpenOptions;
|
|
||||||
use tokio::io;
|
|
||||||
use tokio::io::AsyncWriteExt;
|
|
||||||
|
|
||||||
/// File Sink Implementation
|
|
||||||
pub struct FileSink {
|
|
||||||
path: String,
|
|
||||||
buffer_size: usize,
|
|
||||||
writer: Arc<tokio::sync::Mutex<io::BufWriter<tokio::fs::File>>>,
|
|
||||||
entry_count: std::sync::atomic::AtomicUsize,
|
|
||||||
last_flush: std::sync::atomic::AtomicU64,
|
|
||||||
flush_interval_ms: u64, // Time between flushes
|
|
||||||
flush_threshold: usize, // Number of entries before flush
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FileSink {
|
|
||||||
/// Create a new FileSink instance
|
|
||||||
pub async fn new(
|
|
||||||
path: String,
|
|
||||||
buffer_size: usize,
|
|
||||||
flush_interval_ms: u64,
|
|
||||||
flush_threshold: usize,
|
|
||||||
) -> Result<Self, io::Error> {
|
|
||||||
// check if the file exists
|
|
||||||
let file_exists = tokio::fs::metadata(&path).await.is_ok();
|
|
||||||
// if the file not exists, create it
|
|
||||||
if !file_exists {
|
|
||||||
tokio::fs::create_dir_all(std::path::Path::new(&path).parent().unwrap()).await?;
|
|
||||||
tracing::debug!("File does not exist, creating it. Path: {:?}", path)
|
|
||||||
}
|
|
||||||
let file = if file_exists {
|
|
||||||
// If the file exists, open it in append mode
|
|
||||||
tracing::debug!("FileSink: File exists, opening in append mode. Path: {:?}", path);
|
|
||||||
OpenOptions::new().append(true).create(true).open(&path).await?
|
|
||||||
} else {
|
|
||||||
// If the file does not exist, create it
|
|
||||||
tracing::debug!("FileSink: File does not exist, creating a new file.");
|
|
||||||
// Create the file and write a header or initial content if needed
|
|
||||||
OpenOptions::new().create(true).truncate(true).write(true).open(&path).await?
|
|
||||||
};
|
|
||||||
let writer = io::BufWriter::with_capacity(buffer_size, file);
|
|
||||||
let now = std::time::SystemTime::now()
|
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_millis() as u64;
|
|
||||||
Ok(FileSink {
|
|
||||||
path,
|
|
||||||
buffer_size,
|
|
||||||
writer: Arc::new(tokio::sync::Mutex::new(writer)),
|
|
||||||
entry_count: std::sync::atomic::AtomicUsize::new(0),
|
|
||||||
last_flush: std::sync::atomic::AtomicU64::new(now),
|
|
||||||
flush_interval_ms,
|
|
||||||
flush_threshold,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
async fn initialize_writer(&mut self) -> io::Result<()> {
|
|
||||||
let file = tokio::fs::File::create(&self.path).await?;
|
|
||||||
|
|
||||||
// Use buffer_size to create a buffer writer with a specified capacity
|
|
||||||
let buf_writer = io::BufWriter::with_capacity(self.buffer_size, file);
|
|
||||||
|
|
||||||
// Replace the original writer with the new Mutex
|
|
||||||
self.writer = Arc::new(tokio::sync::Mutex::new(buf_writer));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the current buffer size
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub fn buffer_size(&self) -> usize {
|
|
||||||
self.buffer_size
|
|
||||||
}
|
|
||||||
|
|
||||||
// How to dynamically adjust the buffer size
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub async fn set_buffer_size(&mut self, new_size: usize) -> io::Result<()> {
|
|
||||||
if self.buffer_size != new_size {
|
|
||||||
self.buffer_size = new_size;
|
|
||||||
// Reinitialize the writer directly, without checking is_some()
|
|
||||||
self.initialize_writer().await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if flushing is needed based on count or time
|
|
||||||
fn should_flush(&self) -> bool {
|
|
||||||
// Check entry count threshold
|
|
||||||
if self.entry_count.load(std::sync::atomic::Ordering::Relaxed) >= self.flush_threshold {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check time threshold
|
|
||||||
let now = std::time::SystemTime::now()
|
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_millis() as u64;
|
|
||||||
|
|
||||||
let last = self.last_flush.load(std::sync::atomic::Ordering::Relaxed);
|
|
||||||
now - last >= self.flush_interval_ms
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Sink for FileSink {
|
|
||||||
async fn write(&self, entry: &UnifiedLogEntry) {
|
|
||||||
let line = format!("{entry:?}\n");
|
|
||||||
let mut writer = self.writer.lock().await;
|
|
||||||
|
|
||||||
if let Err(e) = writer.write_all(line.as_bytes()).await {
|
|
||||||
eprintln!(
|
|
||||||
"Failed to write log to file {}: {},entry timestamp:{:?}",
|
|
||||||
self.path,
|
|
||||||
e,
|
|
||||||
entry.get_timestamp()
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only flush periodically to improve performance
|
|
||||||
// Logic to determine when to flush could be added here
|
|
||||||
// Increment the entry count
|
|
||||||
self.entry_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
|
||||||
|
|
||||||
// Check if we should flush
|
|
||||||
if self.should_flush() {
|
|
||||||
if let Err(e) = writer.flush().await {
|
|
||||||
eprintln!("Failed to flush log file {}: {}", self.path, e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset counters
|
|
||||||
self.entry_count.store(0, std::sync::atomic::Ordering::Relaxed);
|
|
||||||
|
|
||||||
let now = std::time::SystemTime::now()
|
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_millis() as u64;
|
|
||||||
|
|
||||||
self.last_flush.store(now, std::sync::atomic::Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for FileSink {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
let writer = self.writer.clone();
|
|
||||||
let path = self.path.clone();
|
|
||||||
|
|
||||||
tokio::task::spawn_blocking(move || {
|
|
||||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
|
||||||
rt.block_on(async {
|
|
||||||
let mut writer = writer.lock().await;
|
|
||||||
if let Err(e) = writer.flush().await {
|
|
||||||
eprintln!("Failed to flush log file {path}: {e}");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,179 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use crate::sinks::Sink;
|
|
||||||
use crate::{LogRecord, UnifiedLogEntry};
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
/// Kafka Sink Implementation
|
|
||||||
pub struct KafkaSink {
|
|
||||||
producer: rdkafka::producer::FutureProducer,
|
|
||||||
topic: String,
|
|
||||||
batch_size: usize,
|
|
||||||
batch_timeout_ms: u64,
|
|
||||||
entries: Arc<tokio::sync::Mutex<Vec<UnifiedLogEntry>>>,
|
|
||||||
last_flush: Arc<std::sync::atomic::AtomicU64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KafkaSink {
|
|
||||||
/// Create a new KafkaSink instance
|
|
||||||
pub fn new(producer: rdkafka::producer::FutureProducer, topic: String, batch_size: usize, batch_timeout_ms: u64) -> Self {
|
|
||||||
// Create Arc-wrapped values first
|
|
||||||
let entries = Arc::new(tokio::sync::Mutex::new(Vec::with_capacity(batch_size)));
|
|
||||||
let last_flush = Arc::new(std::sync::atomic::AtomicU64::new(
|
|
||||||
std::time::SystemTime::now()
|
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_millis() as u64,
|
|
||||||
));
|
|
||||||
let sink = KafkaSink {
|
|
||||||
producer: producer.clone(),
|
|
||||||
topic: topic.clone(),
|
|
||||||
batch_size,
|
|
||||||
batch_timeout_ms,
|
|
||||||
entries: entries.clone(),
|
|
||||||
last_flush: last_flush.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Start background flusher
|
|
||||||
tokio::spawn(Self::periodic_flush(producer, topic, entries, last_flush, batch_timeout_ms));
|
|
||||||
|
|
||||||
sink
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a getter method to read the batch_timeout_ms field
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub fn batch_timeout(&self) -> u64 {
|
|
||||||
self.batch_timeout_ms
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a method to dynamically adjust the timeout if needed
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub fn set_batch_timeout(&mut self, new_timeout_ms: u64) {
|
|
||||||
self.batch_timeout_ms = new_timeout_ms;
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn periodic_flush(
|
|
||||||
producer: rdkafka::producer::FutureProducer,
|
|
||||||
topic: String,
|
|
||||||
entries: Arc<tokio::sync::Mutex<Vec<UnifiedLogEntry>>>,
|
|
||||||
last_flush: Arc<std::sync::atomic::AtomicU64>,
|
|
||||||
timeout_ms: u64,
|
|
||||||
) {
|
|
||||||
loop {
|
|
||||||
tokio::time::sleep(tokio::time::Duration::from_millis(timeout_ms / 2)).await;
|
|
||||||
|
|
||||||
let now = std::time::SystemTime::now()
|
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_millis() as u64;
|
|
||||||
|
|
||||||
let last = last_flush.load(std::sync::atomic::Ordering::Relaxed);
|
|
||||||
|
|
||||||
if now - last >= timeout_ms {
|
|
||||||
let mut batch = entries.lock().await;
|
|
||||||
if !batch.is_empty() {
|
|
||||||
Self::send_batch(&producer, &topic, batch.drain(..).collect()).await;
|
|
||||||
last_flush.store(now, std::sync::atomic::Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_batch(producer: &rdkafka::producer::FutureProducer, topic: &str, entries: Vec<UnifiedLogEntry>) {
|
|
||||||
for entry in entries {
|
|
||||||
let payload = match serde_json::to_string(&entry) {
|
|
||||||
Ok(p) => p,
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("Failed to serialize log entry: {e}");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let span_id = entry.get_timestamp().to_rfc3339();
|
|
||||||
|
|
||||||
let _ = producer
|
|
||||||
.send(
|
|
||||||
rdkafka::producer::FutureRecord::to(topic).payload(&payload).key(&span_id),
|
|
||||||
std::time::Duration::from_secs(5),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Sink for KafkaSink {
|
|
||||||
async fn write(&self, entry: &UnifiedLogEntry) {
|
|
||||||
let mut batch = self.entries.lock().await;
|
|
||||||
batch.push(entry.clone());
|
|
||||||
|
|
||||||
let should_flush_by_size = batch.len() >= self.batch_size;
|
|
||||||
let should_flush_by_time = {
|
|
||||||
let now = std::time::SystemTime::now()
|
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_millis() as u64;
|
|
||||||
let last = self.last_flush.load(std::sync::atomic::Ordering::Relaxed);
|
|
||||||
now - last >= self.batch_timeout_ms
|
|
||||||
};
|
|
||||||
|
|
||||||
if should_flush_by_size || should_flush_by_time {
|
|
||||||
// Existing flush logic
|
|
||||||
let entries_to_send: Vec<UnifiedLogEntry> = batch.drain(..).collect();
|
|
||||||
let producer = self.producer.clone();
|
|
||||||
let topic = self.topic.clone();
|
|
||||||
|
|
||||||
self.last_flush.store(
|
|
||||||
std::time::SystemTime::now()
|
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_millis() as u64,
|
|
||||||
std::sync::atomic::Ordering::Relaxed,
|
|
||||||
);
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
|
||||||
KafkaSink::send_batch(&producer, &topic, entries_to_send).await;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for KafkaSink {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
// Perform any necessary cleanup here
|
|
||||||
// For example, you might want to flush any remaining entries
|
|
||||||
let producer = self.producer.clone();
|
|
||||||
let topic = self.topic.clone();
|
|
||||||
let entries = self.entries.clone();
|
|
||||||
let last_flush = self.last_flush.clone();
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let mut batch = entries.lock().await;
|
|
||||||
if !batch.is_empty() {
|
|
||||||
KafkaSink::send_batch(&producer, &topic, batch.drain(..).collect()).await;
|
|
||||||
last_flush.store(
|
|
||||||
std::time::SystemTime::now()
|
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_millis() as u64,
|
|
||||||
std::sync::atomic::Ordering::Relaxed,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
eprintln!("Dropping KafkaSink with topic: {0}", self.topic);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,123 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use crate::{AppConfig, SinkConfig, UnifiedLogEntry};
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
#[cfg(feature = "file")]
|
|
||||||
mod file;
|
|
||||||
#[cfg(all(feature = "kafka", target_os = "linux"))]
|
|
||||||
mod kafka;
|
|
||||||
#[cfg(feature = "webhook")]
|
|
||||||
mod webhook;
|
|
||||||
|
|
||||||
/// Sink Trait definition, asynchronously write logs
|
|
||||||
#[async_trait]
|
|
||||||
pub trait Sink: Send + Sync {
|
|
||||||
async fn write(&self, entry: &UnifiedLogEntry);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a list of Sink instances
|
|
||||||
pub async fn create_sinks(config: &AppConfig) -> Vec<Arc<dyn Sink>> {
|
|
||||||
let mut sinks: Vec<Arc<dyn Sink>> = Vec::new();
|
|
||||||
|
|
||||||
for sink_config in &config.sinks {
|
|
||||||
match sink_config {
|
|
||||||
#[cfg(all(feature = "kafka", target_os = "linux"))]
|
|
||||||
SinkConfig::Kafka(kafka_config) => {
|
|
||||||
match rdkafka::config::ClientConfig::new()
|
|
||||||
.set("bootstrap.servers", &kafka_config.brokers)
|
|
||||||
.set("message.timeout.ms", "5000")
|
|
||||||
.create()
|
|
||||||
{
|
|
||||||
Ok(producer) => {
|
|
||||||
sinks.push(Arc::new(kafka::KafkaSink::new(
|
|
||||||
producer,
|
|
||||||
kafka_config.topic.clone(),
|
|
||||||
kafka_config
|
|
||||||
.batch_size
|
|
||||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_KAFKA_BATCH_SIZE),
|
|
||||||
kafka_config
|
|
||||||
.batch_timeout_ms
|
|
||||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS),
|
|
||||||
)));
|
|
||||||
tracing::info!("Kafka sink created for topic: {}", kafka_config.topic);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Failed to create Kafka producer: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "webhook")]
|
|
||||||
SinkConfig::Webhook(webhook_config) => {
|
|
||||||
sinks.push(Arc::new(webhook::WebhookSink::new(
|
|
||||||
webhook_config.endpoint.clone(),
|
|
||||||
webhook_config.auth_token.clone(),
|
|
||||||
webhook_config
|
|
||||||
.max_retries
|
|
||||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_WEBHOOK_MAX_RETRIES),
|
|
||||||
webhook_config
|
|
||||||
.retry_delay_ms
|
|
||||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_WEBHOOK_RETRY_DELAY_MS),
|
|
||||||
)));
|
|
||||||
tracing::info!("Webhook sink created for endpoint: {}", webhook_config.endpoint);
|
|
||||||
}
|
|
||||||
#[cfg(feature = "file")]
|
|
||||||
SinkConfig::File(file_config) => {
|
|
||||||
tracing::debug!("FileSink: Using path: {}", file_config.path);
|
|
||||||
match file::FileSink::new(
|
|
||||||
std::path::Path::new(&file_config.path)
|
|
||||||
.join(rustfs_config::DEFAULT_SINK_FILE_LOG_FILE)
|
|
||||||
.to_string_lossy()
|
|
||||||
.to_string(),
|
|
||||||
file_config
|
|
||||||
.buffer_size
|
|
||||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_FILE_BUFFER_SIZE),
|
|
||||||
file_config
|
|
||||||
.flush_interval_ms
|
|
||||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS),
|
|
||||||
file_config
|
|
||||||
.flush_threshold
|
|
||||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_FILE_FLUSH_THRESHOLD),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(sink) => {
|
|
||||||
sinks.push(Arc::new(sink));
|
|
||||||
tracing::info!("File sink created for path: {}", file_config.path);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Failed to create File sink: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[cfg(any(not(feature = "kafka"), not(target_os = "linux")))]
|
|
||||||
SinkConfig::Kafka(_) => {
|
|
||||||
tracing::warn!("Kafka sink is configured but the 'kafka' feature is not enabled");
|
|
||||||
}
|
|
||||||
#[cfg(not(feature = "webhook"))]
|
|
||||||
SinkConfig::Webhook(_) => {
|
|
||||||
tracing::warn!("Webhook sink is configured but the 'webhook' feature is not enabled");
|
|
||||||
}
|
|
||||||
#[cfg(not(feature = "file"))]
|
|
||||||
SinkConfig::File(_) => {
|
|
||||||
tracing::warn!("File sink is configured but the 'file' feature is not enabled");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sinks
|
|
||||||
}
|
|
||||||
@@ -1,84 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use crate::UnifiedLogEntry;
|
|
||||||
use crate::sinks::Sink;
|
|
||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
/// Webhook Sink Implementation
|
|
||||||
pub struct WebhookSink {
|
|
||||||
endpoint: String,
|
|
||||||
auth_token: String,
|
|
||||||
client: reqwest::Client,
|
|
||||||
max_retries: usize,
|
|
||||||
retry_delay_ms: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WebhookSink {
|
|
||||||
pub fn new(endpoint: String, auth_token: String, max_retries: usize, retry_delay_ms: u64) -> Self {
|
|
||||||
WebhookSink {
|
|
||||||
endpoint,
|
|
||||||
auth_token,
|
|
||||||
client: reqwest::Client::builder()
|
|
||||||
.timeout(std::time::Duration::from_secs(10))
|
|
||||||
.build()
|
|
||||||
.unwrap_or_else(|_| reqwest::Client::new()),
|
|
||||||
max_retries,
|
|
||||||
retry_delay_ms,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Sink for WebhookSink {
|
|
||||||
async fn write(&self, entry: &UnifiedLogEntry) {
|
|
||||||
let mut retries = 0;
|
|
||||||
let url = self.endpoint.clone();
|
|
||||||
let entry_clone = entry.clone();
|
|
||||||
let auth_value = reqwest::header::HeaderValue::from_str(format!("Bearer {}", self.auth_token.clone()).as_str()).unwrap();
|
|
||||||
while retries < self.max_retries {
|
|
||||||
match self
|
|
||||||
.client
|
|
||||||
.post(&url)
|
|
||||||
.header(reqwest::header::AUTHORIZATION, auth_value.clone())
|
|
||||||
.json(&entry_clone)
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(response) if response.status().is_success() => {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
retries += 1;
|
|
||||||
if retries < self.max_retries {
|
|
||||||
tokio::time::sleep(tokio::time::Duration::from_millis(
|
|
||||||
self.retry_delay_ms * (1 << retries), // Exponential backoff
|
|
||||||
))
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
eprintln!("Failed to send log to webhook after {0} retries", self.max_retries);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for WebhookSink {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
// Perform any necessary cleanup here
|
|
||||||
// For example, you might want to log that the sink is being dropped
|
|
||||||
eprintln!("Dropping WebhookSink with URL: {0}", self.endpoint);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use crate::OtelConfig;
|
use crate::config::OtelConfig;
|
||||||
use flexi_logger::{
|
use flexi_logger::{
|
||||||
Age, Cleanup, Criterion, DeferredNow, FileSpec, LogSpecification, Naming, Record, WriteMode,
|
Age, Cleanup, Criterion, DeferredNow, FileSpec, LogSpecification, Naming, Record, WriteMode,
|
||||||
WriteMode::{AsyncWith, BufferAndFlush},
|
WriteMode::{AsyncWith, BufferAndFlush},
|
||||||
@@ -63,7 +63,8 @@ use tracing_subscriber::{EnvFilter, Layer, layer::SubscriberExt, util::Subscribe
|
|||||||
/// - The tracer provider (for distributed tracing)
|
/// - The tracer provider (for distributed tracing)
|
||||||
/// - The meter provider (for metrics collection)
|
/// - The meter provider (for metrics collection)
|
||||||
/// - The logger provider (for structured logging)
|
/// - The logger provider (for structured logging)
|
||||||
// Implement Debug trait correctly, rather than using derive, as some fields may not have implemented Debug
|
///
|
||||||
|
/// Implement Debug trait correctly, rather than using derive, as some fields may not have implemented Debug
|
||||||
pub struct OtelGuard {
|
pub struct OtelGuard {
|
||||||
tracer_provider: Option<SdkTracerProvider>,
|
tracer_provider: Option<SdkTracerProvider>,
|
||||||
meter_provider: Option<SdkMeterProvider>,
|
meter_provider: Option<SdkMeterProvider>,
|
||||||
|
|||||||
@@ -1,27 +0,0 @@
|
|||||||
// Copyright 2024 RustFS Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use crate::{UnifiedLogEntry, sinks::Sink};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tokio::sync::mpsc::Receiver;
|
|
||||||
|
|
||||||
/// Start the log processing worker thread
|
|
||||||
pub(crate) async fn start_worker(receiver: Receiver<UnifiedLogEntry>, sinks: Vec<Arc<dyn Sink>>) {
|
|
||||||
let mut receiver = receiver;
|
|
||||||
while let Some(entry) = receiver.recv().await {
|
|
||||||
for sink in &sinks {
|
|
||||||
sink.write(&entry).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -13,18 +13,14 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use futures::pin_mut;
|
use futures::{Stream, StreamExt, pin_mut};
|
||||||
use futures::{Stream, StreamExt};
|
|
||||||
use std::io::Error;
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::{HashMap, HashSet},
|
collections::{HashMap, HashSet},
|
||||||
fmt::Display,
|
fmt::Display,
|
||||||
net::{IpAddr, SocketAddr, TcpListener, ToSocketAddrs},
|
io::Error,
|
||||||
time::{Duration, Instant},
|
net::{IpAddr, Ipv6Addr, SocketAddr, TcpListener, ToSocketAddrs},
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
net::Ipv6Addr,
|
|
||||||
sync::{Arc, LazyLock, Mutex, RwLock},
|
sync::{Arc, LazyLock, Mutex, RwLock},
|
||||||
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
use tracing::{error, info};
|
use tracing::{error, info};
|
||||||
use transform_stream::AsyncTryStream;
|
use transform_stream::AsyncTryStream;
|
||||||
|
|||||||
@@ -57,6 +57,7 @@ rustfs-protos = { workspace = true }
|
|||||||
rustfs-s3select-query = { workspace = true }
|
rustfs-s3select-query = { workspace = true }
|
||||||
rustfs-targets = { workspace = true }
|
rustfs-targets = { workspace = true }
|
||||||
rustfs-kms = { workspace = true }
|
rustfs-kms = { workspace = true }
|
||||||
|
rustfs-lock.workspace = true
|
||||||
atoi = { workspace = true }
|
atoi = { workspace = true }
|
||||||
atomic_enum = { workspace = true }
|
atomic_enum = { workspace = true }
|
||||||
axum.workspace = true
|
axum.workspace = true
|
||||||
@@ -69,7 +70,9 @@ chrono = { workspace = true }
|
|||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
datafusion = { workspace = true }
|
datafusion = { workspace = true }
|
||||||
const-str = { workspace = true }
|
const-str = { workspace = true }
|
||||||
|
flatbuffers.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
futures-util.workspace = true
|
||||||
hyper.workspace = true
|
hyper.workspace = true
|
||||||
hyper-util.workspace = true
|
hyper-util.workspace = true
|
||||||
http.workspace = true
|
http.workspace = true
|
||||||
@@ -80,6 +83,7 @@ mime_guess = { workspace = true }
|
|||||||
opentelemetry = { workspace = true }
|
opentelemetry = { workspace = true }
|
||||||
pin-project-lite.workspace = true
|
pin-project-lite.workspace = true
|
||||||
reqwest = { workspace = true }
|
reqwest = { workspace = true }
|
||||||
|
rmp-serde.workspace = true
|
||||||
rustls = { workspace = true }
|
rustls = { workspace = true }
|
||||||
rust-embed = { workspace = true, features = ["interpolate-folder-path"] }
|
rust-embed = { workspace = true, features = ["interpolate-folder-path"] }
|
||||||
s3s.workspace = true
|
s3s.workspace = true
|
||||||
@@ -116,10 +120,7 @@ url = { workspace = true }
|
|||||||
urlencoding = { workspace = true }
|
urlencoding = { workspace = true }
|
||||||
uuid = { workspace = true }
|
uuid = { workspace = true }
|
||||||
zip = { workspace = true }
|
zip = { workspace = true }
|
||||||
futures-util.workspace = true
|
|
||||||
rmp-serde.workspace = true
|
|
||||||
flatbuffers.workspace = true
|
|
||||||
rustfs-lock.workspace = true
|
|
||||||
|
|
||||||
[target.'cfg(any(target_os = "macos", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))'.dependencies]
|
[target.'cfg(any(target_os = "macos", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))'.dependencies]
|
||||||
sysctl = { workspace = true }
|
sysctl = { workspace = true }
|
||||||
@@ -129,7 +130,7 @@ sysctl = { workspace = true }
|
|||||||
libsystemd.workspace = true
|
libsystemd.workspace = true
|
||||||
|
|
||||||
[target.'cfg(all(target_os = "linux", target_env = "gnu"))'.dependencies]
|
[target.'cfg(all(target_os = "linux", target_env = "gnu"))'.dependencies]
|
||||||
tikv-jemallocator = "0.6"
|
tikv-jemallocator = "0.6.1"
|
||||||
|
|
||||||
[target.'cfg(all(target_os = "linux", target_env = "musl"))'.dependencies]
|
[target.'cfg(all(target_os = "linux", target_env = "musl"))'.dependencies]
|
||||||
mimalloc = "0.1"
|
mimalloc = "0.1"
|
||||||
|
|||||||
@@ -127,7 +127,7 @@ async fn async_main() -> Result<()> {
|
|||||||
init_license(opt.license.clone());
|
init_license(opt.license.clone());
|
||||||
|
|
||||||
// Initialize Observability
|
// Initialize Observability
|
||||||
let (_logger, guard) = init_obs(Some(opt.clone().obs_endpoint)).await;
|
let guard = init_obs(Some(opt.clone().obs_endpoint)).await;
|
||||||
|
|
||||||
// print startup logo
|
// print startup logo
|
||||||
info!("{}", LOGO);
|
info!("{}", LOGO);
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ use tokio::{io::AsyncRead, sync::mpsc};
|
|||||||
use tokio_stream::wrappers::ReceiverStream;
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
use tokio_tar::Archive;
|
use tokio_tar::Archive;
|
||||||
use tokio_util::io::{ReaderStream, StreamReader};
|
use tokio_util::io::{ReaderStream, StreamReader};
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, instrument, warn};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
macro_rules! try_ {
|
macro_rules! try_ {
|
||||||
@@ -505,7 +505,7 @@ async fn get_validated_store(bucket: &str) -> S3Result<Arc<rustfs_ecstore::store
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl S3 for FS {
|
impl S3 for FS {
|
||||||
#[tracing::instrument(
|
#[instrument(
|
||||||
level = "debug",
|
level = "debug",
|
||||||
skip(self, req),
|
skip(self, req),
|
||||||
fields(start_time=?time::OffsetDateTime::now_utc())
|
fields(start_time=?time::OffsetDateTime::now_utc())
|
||||||
@@ -538,7 +538,7 @@ impl S3 for FS {
|
|||||||
let event_args = rustfs_notify::event::EventArgs {
|
let event_args = rustfs_notify::event::EventArgs {
|
||||||
event_name: EventName::BucketCreated,
|
event_name: EventName::BucketCreated,
|
||||||
bucket_name: bucket.clone(),
|
bucket_name: bucket.clone(),
|
||||||
object: rustfs_ecstore::store_api::ObjectInfo { ..Default::default() },
|
object: ObjectInfo { ..Default::default() },
|
||||||
req_params: rustfs_utils::extract_req_params_header(&req.headers),
|
req_params: rustfs_utils::extract_req_params_header(&req.headers),
|
||||||
resp_elements: rustfs_utils::extract_resp_elements(&S3Response::new(output.clone())),
|
resp_elements: rustfs_utils::extract_resp_elements(&S3Response::new(output.clone())),
|
||||||
version_id: String::new(),
|
version_id: String::new(),
|
||||||
@@ -555,7 +555,7 @@ impl S3 for FS {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Copy an object from one location to another
|
/// Copy an object from one location to another
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn copy_object(&self, req: S3Request<CopyObjectInput>) -> S3Result<S3Response<CopyObjectOutput>> {
|
async fn copy_object(&self, req: S3Request<CopyObjectInput>) -> S3Result<S3Response<CopyObjectOutput>> {
|
||||||
let CopyObjectInput {
|
let CopyObjectInput {
|
||||||
copy_source,
|
copy_source,
|
||||||
@@ -709,7 +709,7 @@ impl S3 for FS {
|
|||||||
src_info.put_object_reader = Some(PutObjReader::new(reader));
|
src_info.put_object_reader = Some(PutObjReader::new(reader));
|
||||||
|
|
||||||
// check quota
|
// check quota
|
||||||
// TODO: src metadada
|
// TODO: src metadata
|
||||||
|
|
||||||
for (k, v) in compress_metadata {
|
for (k, v) in compress_metadata {
|
||||||
src_info.user_defined.insert(k, v);
|
src_info.user_defined.insert(k, v);
|
||||||
@@ -923,7 +923,7 @@ impl S3 for FS {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Delete a bucket
|
/// Delete a bucket
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn delete_bucket(&self, req: S3Request<DeleteBucketInput>) -> S3Result<S3Response<DeleteBucketOutput>> {
|
async fn delete_bucket(&self, req: S3Request<DeleteBucketInput>) -> S3Result<S3Response<DeleteBucketOutput>> {
|
||||||
let input = req.input;
|
let input = req.input;
|
||||||
// TODO: DeleteBucketInput doesn't have force parameter?
|
// TODO: DeleteBucketInput doesn't have force parameter?
|
||||||
@@ -945,7 +945,7 @@ impl S3 for FS {
|
|||||||
let event_args = rustfs_notify::event::EventArgs {
|
let event_args = rustfs_notify::event::EventArgs {
|
||||||
event_name: EventName::BucketRemoved,
|
event_name: EventName::BucketRemoved,
|
||||||
bucket_name: input.bucket,
|
bucket_name: input.bucket,
|
||||||
object: rustfs_ecstore::store_api::ObjectInfo { ..Default::default() },
|
object: ObjectInfo { ..Default::default() },
|
||||||
req_params: rustfs_utils::extract_req_params_header(&req.headers),
|
req_params: rustfs_utils::extract_req_params_header(&req.headers),
|
||||||
resp_elements: rustfs_utils::extract_resp_elements(&S3Response::new(DeleteBucketOutput {})),
|
resp_elements: rustfs_utils::extract_resp_elements(&S3Response::new(DeleteBucketOutput {})),
|
||||||
version_id: String::new(),
|
version_id: String::new(),
|
||||||
@@ -962,7 +962,7 @@ impl S3 for FS {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Delete an object
|
/// Delete an object
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn delete_object(&self, mut req: S3Request<DeleteObjectInput>) -> S3Result<S3Response<DeleteObjectOutput>> {
|
async fn delete_object(&self, mut req: S3Request<DeleteObjectInput>) -> S3Result<S3Response<DeleteObjectOutput>> {
|
||||||
let DeleteObjectInput {
|
let DeleteObjectInput {
|
||||||
bucket, key, version_id, ..
|
bucket, key, version_id, ..
|
||||||
@@ -1069,7 +1069,7 @@ impl S3 for FS {
|
|||||||
let event_args = rustfs_notify::event::EventArgs {
|
let event_args = rustfs_notify::event::EventArgs {
|
||||||
event_name,
|
event_name,
|
||||||
bucket_name: bucket.clone(),
|
bucket_name: bucket.clone(),
|
||||||
object: rustfs_ecstore::store_api::ObjectInfo {
|
object: ObjectInfo {
|
||||||
name: key.clone(),
|
name: key.clone(),
|
||||||
bucket: bucket.clone(),
|
bucket: bucket.clone(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -1090,7 +1090,7 @@ impl S3 for FS {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Delete multiple objects
|
/// Delete multiple objects
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn delete_objects(&self, req: S3Request<DeleteObjectsInput>) -> S3Result<S3Response<DeleteObjectsOutput>> {
|
async fn delete_objects(&self, req: S3Request<DeleteObjectsInput>) -> S3Result<S3Response<DeleteObjectsOutput>> {
|
||||||
let DeleteObjectsInput { bucket, delete, .. } = req.input;
|
let DeleteObjectsInput { bucket, delete, .. } = req.input;
|
||||||
|
|
||||||
@@ -1329,7 +1329,7 @@ impl S3 for FS {
|
|||||||
let event_args = rustfs_notify::event::EventArgs {
|
let event_args = rustfs_notify::event::EventArgs {
|
||||||
event_name,
|
event_name,
|
||||||
bucket_name: bucket.clone(),
|
bucket_name: bucket.clone(),
|
||||||
object: rustfs_ecstore::store_api::ObjectInfo {
|
object: ObjectInfo {
|
||||||
name: dobj.object_name,
|
name: dobj.object_name,
|
||||||
bucket: bucket.clone(),
|
bucket: bucket.clone(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -1350,7 +1350,7 @@ impl S3 for FS {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get bucket location
|
/// Get bucket location
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn get_bucket_location(&self, req: S3Request<GetBucketLocationInput>) -> S3Result<S3Response<GetBucketLocationOutput>> {
|
async fn get_bucket_location(&self, req: S3Request<GetBucketLocationInput>) -> S3Result<S3Response<GetBucketLocationOutput>> {
|
||||||
// mc get 1
|
// mc get 1
|
||||||
let input = req.input;
|
let input = req.input;
|
||||||
@@ -1375,7 +1375,7 @@ impl S3 for FS {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get bucket notification
|
/// Get bucket notification
|
||||||
#[tracing::instrument(
|
#[instrument(
|
||||||
level = "debug",
|
level = "debug",
|
||||||
skip(self, req),
|
skip(self, req),
|
||||||
fields(start_time=?time::OffsetDateTime::now_utc())
|
fields(start_time=?time::OffsetDateTime::now_utc())
|
||||||
@@ -1435,9 +1435,9 @@ impl S3 for FS {
|
|||||||
.map_err(ApiError::from)?;
|
.map_err(ApiError::from)?;
|
||||||
|
|
||||||
let info = reader.object_info;
|
let info = reader.object_info;
|
||||||
tracing::debug!(object_size = info.size, part_count = info.parts.len(), "GET object metadata snapshot");
|
debug!(object_size = info.size, part_count = info.parts.len(), "GET object metadata snapshot");
|
||||||
for part in &info.parts {
|
for part in &info.parts {
|
||||||
tracing::debug!(
|
debug!(
|
||||||
part_number = part.number,
|
part_number = part.number,
|
||||||
part_size = part.size,
|
part_size = part.size,
|
||||||
part_actual_size = part.actual_size,
|
part_actual_size = part.actual_size,
|
||||||
@@ -1487,7 +1487,7 @@ impl S3 for FS {
|
|||||||
let mut managed_encryption_applied = false;
|
let mut managed_encryption_applied = false;
|
||||||
let mut managed_original_size: Option<i64> = None;
|
let mut managed_original_size: Option<i64> = None;
|
||||||
|
|
||||||
tracing::debug!(
|
debug!(
|
||||||
"GET object metadata check: stored_sse_algorithm={:?}, stored_sse_key_md5={:?}, provided_sse_key={:?}",
|
"GET object metadata check: stored_sse_algorithm={:?}, stored_sse_key_md5={:?}, provided_sse_key={:?}",
|
||||||
stored_sse_algorithm,
|
stored_sse_algorithm,
|
||||||
stored_sse_key_md5,
|
stored_sse_key_md5,
|
||||||
@@ -1501,20 +1501,16 @@ impl S3 for FS {
|
|||||||
// Each part needs to be decrypted individually, which requires storage layer changes
|
// Each part needs to be decrypted individually, which requires storage layer changes
|
||||||
// Note: Single part objects also have info.parts.len() == 1, but they are not true multipart uploads
|
// Note: Single part objects also have info.parts.len() == 1, but they are not true multipart uploads
|
||||||
if info.parts.len() > 1 {
|
if info.parts.len() > 1 {
|
||||||
tracing::warn!(
|
warn!(
|
||||||
"SSE-C multipart object detected with {} parts. Currently, multipart SSE-C upload parts are not encrypted during upload_part, so no decryption is needed during GET.",
|
"SSE-C multipart object detected with {} parts. Currently, multipart SSE-C upload parts are not encrypted during upload_part, so no decryption is needed during GET.",
|
||||||
info.parts.len()
|
info.parts.len()
|
||||||
);
|
);
|
||||||
|
|
||||||
// Verify that the provided key MD5 matches the stored MD5 for security
|
// Verify that the provided key MD5 matches the stored MD5 for security
|
||||||
if let Some(stored_md5) = stored_sse_key_md5 {
|
if let Some(stored_md5) = stored_sse_key_md5 {
|
||||||
tracing::debug!("SSE-C MD5 comparison: provided='{}', stored='{}'", sse_key_md5_provided, stored_md5);
|
debug!("SSE-C MD5 comparison: provided='{}', stored='{}'", sse_key_md5_provided, stored_md5);
|
||||||
if sse_key_md5_provided != stored_md5 {
|
if sse_key_md5_provided != stored_md5 {
|
||||||
tracing::error!(
|
error!("SSE-C key MD5 mismatch: provided='{}', stored='{}'", sse_key_md5_provided, stored_md5);
|
||||||
"SSE-C key MD5 mismatch: provided='{}', stored='{}'",
|
|
||||||
sse_key_md5_provided,
|
|
||||||
stored_md5
|
|
||||||
);
|
|
||||||
return Err(
|
return Err(
|
||||||
ApiError::from(StorageError::other("SSE-C key does not match object encryption key")).into()
|
ApiError::from(StorageError::other("SSE-C key does not match object encryption key")).into()
|
||||||
);
|
);
|
||||||
@@ -1532,13 +1528,9 @@ impl S3 for FS {
|
|||||||
} else {
|
} else {
|
||||||
// Verify that the provided key MD5 matches the stored MD5
|
// Verify that the provided key MD5 matches the stored MD5
|
||||||
if let Some(stored_md5) = stored_sse_key_md5 {
|
if let Some(stored_md5) = stored_sse_key_md5 {
|
||||||
tracing::debug!("SSE-C MD5 comparison: provided='{}', stored='{}'", sse_key_md5_provided, stored_md5);
|
debug!("SSE-C MD5 comparison: provided='{}', stored='{}'", sse_key_md5_provided, stored_md5);
|
||||||
if sse_key_md5_provided != stored_md5 {
|
if sse_key_md5_provided != stored_md5 {
|
||||||
tracing::error!(
|
error!("SSE-C key MD5 mismatch: provided='{}', stored='{}'", sse_key_md5_provided, stored_md5);
|
||||||
"SSE-C key MD5 mismatch: provided='{}', stored='{}'",
|
|
||||||
sse_key_md5_provided,
|
|
||||||
stored_md5
|
|
||||||
);
|
|
||||||
return Err(
|
return Err(
|
||||||
ApiError::from(StorageError::other("SSE-C key does not match object encryption key")).into()
|
ApiError::from(StorageError::other("SSE-C key does not match object encryption key")).into()
|
||||||
);
|
);
|
||||||
@@ -1613,14 +1605,13 @@ impl S3 for FS {
|
|||||||
let response_content_length = if stored_sse_algorithm.is_some() {
|
let response_content_length = if stored_sse_algorithm.is_some() {
|
||||||
if let Some(original_size_str) = info.user_defined.get("x-amz-server-side-encryption-customer-original-size") {
|
if let Some(original_size_str) = info.user_defined.get("x-amz-server-side-encryption-customer-original-size") {
|
||||||
let original_size = original_size_str.parse::<i64>().unwrap_or(content_length);
|
let original_size = original_size_str.parse::<i64>().unwrap_or(content_length);
|
||||||
tracing::info!(
|
info!(
|
||||||
"SSE-C decryption: using original size {} instead of encrypted size {}",
|
"SSE-C decryption: using original size {} instead of encrypted size {}",
|
||||||
original_size,
|
original_size, content_length
|
||||||
content_length
|
|
||||||
);
|
);
|
||||||
original_size
|
original_size
|
||||||
} else {
|
} else {
|
||||||
tracing::debug!("SSE-C decryption: no original size found, using content_length {}", content_length);
|
debug!("SSE-C decryption: no original size found, using content_length {}", content_length);
|
||||||
content_length
|
content_length
|
||||||
}
|
}
|
||||||
} else if managed_encryption_applied {
|
} else if managed_encryption_applied {
|
||||||
@@ -1629,7 +1620,7 @@ impl S3 for FS {
|
|||||||
content_length
|
content_length
|
||||||
};
|
};
|
||||||
|
|
||||||
tracing::info!("Final response_content_length: {}", response_content_length);
|
info!("Final response_content_length: {}", response_content_length);
|
||||||
|
|
||||||
if stored_sse_algorithm.is_some() || managed_encryption_applied {
|
if stored_sse_algorithm.is_some() || managed_encryption_applied {
|
||||||
let limit_reader = HardLimitReader::new(Box::new(WarpReader::new(final_stream)), response_content_length);
|
let limit_reader = HardLimitReader::new(Box::new(WarpReader::new(final_stream)), response_content_length);
|
||||||
@@ -1639,7 +1630,7 @@ impl S3 for FS {
|
|||||||
// For SSE-C encrypted objects, don't use bytes_stream to limit the stream
|
// For SSE-C encrypted objects, don't use bytes_stream to limit the stream
|
||||||
// because DecryptReader needs to read all encrypted data to produce decrypted output
|
// because DecryptReader needs to read all encrypted data to produce decrypted output
|
||||||
let body = if stored_sse_algorithm.is_some() || managed_encryption_applied {
|
let body = if stored_sse_algorithm.is_some() || managed_encryption_applied {
|
||||||
tracing::info!("Managed SSE: Using unlimited stream for decryption");
|
info!("Managed SSE: Using unlimited stream for decryption");
|
||||||
Some(StreamingBlob::wrap(ReaderStream::with_capacity(final_stream, DEFAULT_READ_BUFFER_SIZE)))
|
Some(StreamingBlob::wrap(ReaderStream::with_capacity(final_stream, DEFAULT_READ_BUFFER_SIZE)))
|
||||||
} else {
|
} else {
|
||||||
Some(StreamingBlob::wrap(bytes_stream(
|
Some(StreamingBlob::wrap(bytes_stream(
|
||||||
@@ -1702,7 +1693,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(output))
|
Ok(S3Response::new(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn head_bucket(&self, req: S3Request<HeadBucketInput>) -> S3Result<S3Response<HeadBucketOutput>> {
|
async fn head_bucket(&self, req: S3Request<HeadBucketInput>) -> S3Result<S3Response<HeadBucketOutput>> {
|
||||||
let input = req.input;
|
let input = req.input;
|
||||||
|
|
||||||
@@ -1719,7 +1710,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(HeadBucketOutput::default()))
|
Ok(S3Response::new(HeadBucketOutput::default()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn head_object(&self, req: S3Request<HeadObjectInput>) -> S3Result<S3Response<HeadObjectOutput>> {
|
async fn head_object(&self, req: S3Request<HeadObjectInput>) -> S3Result<S3Response<HeadObjectOutput>> {
|
||||||
// mc get 2
|
// mc get 2
|
||||||
let HeadObjectInput {
|
let HeadObjectInput {
|
||||||
@@ -1838,7 +1829,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(output))
|
Ok(S3Response::new(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn list_buckets(&self, req: S3Request<ListBucketsInput>) -> S3Result<S3Response<ListBucketsOutput>> {
|
async fn list_buckets(&self, req: S3Request<ListBucketsInput>) -> S3Result<S3Response<ListBucketsOutput>> {
|
||||||
// mc ls
|
// mc ls
|
||||||
|
|
||||||
@@ -1893,7 +1884,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(output))
|
Ok(S3Response::new(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn list_objects(&self, req: S3Request<ListObjectsInput>) -> S3Result<S3Response<ListObjectsOutput>> {
|
async fn list_objects(&self, req: S3Request<ListObjectsInput>) -> S3Result<S3Response<ListObjectsOutput>> {
|
||||||
let v2_resp = self.list_objects_v2(req.map_input(Into::into)).await?;
|
let v2_resp = self.list_objects_v2(req.map_input(Into::into)).await?;
|
||||||
|
|
||||||
@@ -1909,7 +1900,7 @@ impl S3 for FS {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn list_objects_v2(&self, req: S3Request<ListObjectsV2Input>) -> S3Result<S3Response<ListObjectsV2Output>> {
|
async fn list_objects_v2(&self, req: S3Request<ListObjectsV2Input>) -> S3Result<S3Response<ListObjectsV2Output>> {
|
||||||
// warn!("list_objects_v2 req {:?}", &req.input);
|
// warn!("list_objects_v2 req {:?}", &req.input);
|
||||||
let ListObjectsV2Input {
|
let ListObjectsV2Input {
|
||||||
@@ -2082,7 +2073,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(output))
|
Ok(S3Response::new(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
// #[tracing::instrument(level = "debug", skip(self, req))]
|
// #[instrument(level = "debug", skip(self, req))]
|
||||||
async fn put_object(&self, req: S3Request<PutObjectInput>) -> S3Result<S3Response<PutObjectOutput>> {
|
async fn put_object(&self, req: S3Request<PutObjectInput>) -> S3Result<S3Response<PutObjectOutput>> {
|
||||||
if req
|
if req
|
||||||
.headers
|
.headers
|
||||||
@@ -2143,17 +2134,17 @@ impl S3 for FS {
|
|||||||
|
|
||||||
// TDD: Get bucket default encryption configuration
|
// TDD: Get bucket default encryption configuration
|
||||||
let bucket_sse_config = metadata_sys::get_sse_config(&bucket).await.ok();
|
let bucket_sse_config = metadata_sys::get_sse_config(&bucket).await.ok();
|
||||||
tracing::debug!("TDD: bucket_sse_config={:?}", bucket_sse_config);
|
debug!("TDD: bucket_sse_config={:?}", bucket_sse_config);
|
||||||
|
|
||||||
// TDD: Determine effective encryption configuration (request overrides bucket default)
|
// TDD: Determine effective encryption configuration (request overrides bucket default)
|
||||||
let original_sse = server_side_encryption.clone();
|
let original_sse = server_side_encryption.clone();
|
||||||
let effective_sse = server_side_encryption.or_else(|| {
|
let effective_sse = server_side_encryption.or_else(|| {
|
||||||
bucket_sse_config.as_ref().and_then(|(config, _timestamp)| {
|
bucket_sse_config.as_ref().and_then(|(config, _timestamp)| {
|
||||||
tracing::debug!("TDD: Processing bucket SSE config: {:?}", config);
|
debug!("TDD: Processing bucket SSE config: {:?}", config);
|
||||||
config.rules.first().and_then(|rule| {
|
config.rules.first().and_then(|rule| {
|
||||||
tracing::debug!("TDD: Processing SSE rule: {:?}", rule);
|
debug!("TDD: Processing SSE rule: {:?}", rule);
|
||||||
rule.apply_server_side_encryption_by_default.as_ref().map(|sse| {
|
rule.apply_server_side_encryption_by_default.as_ref().map(|sse| {
|
||||||
tracing::debug!("TDD: Found SSE default: {:?}", sse);
|
debug!("TDD: Found SSE default: {:?}", sse);
|
||||||
match sse.sse_algorithm.as_str() {
|
match sse.sse_algorithm.as_str() {
|
||||||
"AES256" => ServerSideEncryption::from_static(ServerSideEncryption::AES256),
|
"AES256" => ServerSideEncryption::from_static(ServerSideEncryption::AES256),
|
||||||
"aws:kms" => ServerSideEncryption::from_static(ServerSideEncryption::AWS_KMS),
|
"aws:kms" => ServerSideEncryption::from_static(ServerSideEncryption::AWS_KMS),
|
||||||
@@ -2163,7 +2154,7 @@ impl S3 for FS {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
tracing::debug!("TDD: effective_sse={:?} (original={:?})", effective_sse, original_sse);
|
debug!("TDD: effective_sse={:?} (original={:?})", effective_sse, original_sse);
|
||||||
|
|
||||||
let mut effective_kms_key_id = ssekms_key_id.or_else(|| {
|
let mut effective_kms_key_id = ssekms_key_id.or_else(|| {
|
||||||
bucket_sse_config.as_ref().and_then(|(config, _timestamp)| {
|
bucket_sse_config.as_ref().and_then(|(config, _timestamp)| {
|
||||||
@@ -2356,7 +2347,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(output))
|
Ok(S3Response::new(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn create_multipart_upload(
|
async fn create_multipart_upload(
|
||||||
&self,
|
&self,
|
||||||
req: S3Request<CreateMultipartUploadInput>,
|
req: S3Request<CreateMultipartUploadInput>,
|
||||||
@@ -2397,17 +2388,17 @@ impl S3 for FS {
|
|||||||
|
|
||||||
// TDD: Get bucket SSE configuration for multipart upload
|
// TDD: Get bucket SSE configuration for multipart upload
|
||||||
let bucket_sse_config = metadata_sys::get_sse_config(&bucket).await.ok();
|
let bucket_sse_config = metadata_sys::get_sse_config(&bucket).await.ok();
|
||||||
tracing::debug!("TDD: Got bucket SSE config for multipart: {:?}", bucket_sse_config);
|
debug!("TDD: Got bucket SSE config for multipart: {:?}", bucket_sse_config);
|
||||||
|
|
||||||
// TDD: Determine effective encryption (request parameters override bucket defaults)
|
// TDD: Determine effective encryption (request parameters override bucket defaults)
|
||||||
let original_sse = server_side_encryption.clone();
|
let original_sse = server_side_encryption.clone();
|
||||||
let effective_sse = server_side_encryption.or_else(|| {
|
let effective_sse = server_side_encryption.or_else(|| {
|
||||||
bucket_sse_config.as_ref().and_then(|(config, _timestamp)| {
|
bucket_sse_config.as_ref().and_then(|(config, _timestamp)| {
|
||||||
tracing::debug!("TDD: Processing bucket SSE config for multipart: {:?}", config);
|
debug!("TDD: Processing bucket SSE config for multipart: {:?}", config);
|
||||||
config.rules.first().and_then(|rule| {
|
config.rules.first().and_then(|rule| {
|
||||||
tracing::debug!("TDD: Processing SSE rule for multipart: {:?}", rule);
|
debug!("TDD: Processing SSE rule for multipart: {:?}", rule);
|
||||||
rule.apply_server_side_encryption_by_default.as_ref().map(|sse| {
|
rule.apply_server_side_encryption_by_default.as_ref().map(|sse| {
|
||||||
tracing::debug!("TDD: Found SSE default for multipart: {:?}", sse);
|
debug!("TDD: Found SSE default for multipart: {:?}", sse);
|
||||||
match sse.sse_algorithm.as_str() {
|
match sse.sse_algorithm.as_str() {
|
||||||
"AES256" => ServerSideEncryption::from_static(ServerSideEncryption::AES256),
|
"AES256" => ServerSideEncryption::from_static(ServerSideEncryption::AES256),
|
||||||
"aws:kms" => ServerSideEncryption::from_static(ServerSideEncryption::AWS_KMS),
|
"aws:kms" => ServerSideEncryption::from_static(ServerSideEncryption::AWS_KMS),
|
||||||
@@ -2417,7 +2408,7 @@ impl S3 for FS {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
tracing::debug!("TDD: effective_sse for multipart={:?} (original={:?})", effective_sse, original_sse);
|
debug!("TDD: effective_sse for multipart={:?} (original={:?})", effective_sse, original_sse);
|
||||||
|
|
||||||
let _original_kms_key_id = ssekms_key_id.clone();
|
let _original_kms_key_id = ssekms_key_id.clone();
|
||||||
let mut effective_kms_key_id = ssekms_key_id.or_else(|| {
|
let mut effective_kms_key_id = ssekms_key_id.or_else(|| {
|
||||||
@@ -2496,7 +2487,7 @@ impl S3 for FS {
|
|||||||
let event_args = rustfs_notify::event::EventArgs {
|
let event_args = rustfs_notify::event::EventArgs {
|
||||||
event_name: EventName::ObjectCreatedCompleteMultipartUpload,
|
event_name: EventName::ObjectCreatedCompleteMultipartUpload,
|
||||||
bucket_name: bucket_name.clone(),
|
bucket_name: bucket_name.clone(),
|
||||||
object: rustfs_ecstore::store_api::ObjectInfo {
|
object: ObjectInfo {
|
||||||
name: object_name,
|
name: object_name,
|
||||||
bucket: bucket_name,
|
bucket: bucket_name,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -2516,7 +2507,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(output))
|
Ok(S3Response::new(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn upload_part(&self, req: S3Request<UploadPartInput>) -> S3Result<S3Response<UploadPartOutput>> {
|
async fn upload_part(&self, req: S3Request<UploadPartInput>) -> S3Result<S3Response<UploadPartOutput>> {
|
||||||
let UploadPartInput {
|
let UploadPartInput {
|
||||||
body,
|
body,
|
||||||
@@ -2582,7 +2573,7 @@ impl S3 for FS {
|
|||||||
.await?
|
.await?
|
||||||
.is_some();
|
.is_some();
|
||||||
|
|
||||||
// If managed encryption will be applied and we have Content-Length, buffer the entire body
|
// If managed encryption will be applied, and we have Content-Length, buffer the entire body
|
||||||
// This is necessary because encryption changes the data size, which causes Content-Length mismatches
|
// This is necessary because encryption changes the data size, which causes Content-Length mismatches
|
||||||
if will_apply_managed_encryption && size.is_some() {
|
if will_apply_managed_encryption && size.is_some() {
|
||||||
let mut total = 0i64;
|
let mut total = 0i64;
|
||||||
@@ -2687,7 +2678,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(output))
|
Ok(S3Response::new(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn upload_part_copy(&self, req: S3Request<UploadPartCopyInput>) -> S3Result<S3Response<UploadPartCopyOutput>> {
|
async fn upload_part_copy(&self, req: S3Request<UploadPartCopyInput>) -> S3Result<S3Response<UploadPartCopyOutput>> {
|
||||||
let UploadPartCopyInput {
|
let UploadPartCopyInput {
|
||||||
bucket,
|
bucket,
|
||||||
@@ -2880,7 +2871,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(output))
|
Ok(S3Response::new(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn list_parts(&self, req: S3Request<ListPartsInput>) -> S3Result<S3Response<ListPartsOutput>> {
|
async fn list_parts(&self, req: S3Request<ListPartsInput>) -> S3Result<S3Response<ListPartsOutput>> {
|
||||||
let ListPartsInput {
|
let ListPartsInput {
|
||||||
bucket,
|
bucket,
|
||||||
@@ -3013,7 +3004,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(output))
|
Ok(S3Response::new(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn complete_multipart_upload(
|
async fn complete_multipart_upload(
|
||||||
&self,
|
&self,
|
||||||
req: S3Request<CompleteMultipartUploadInput>,
|
req: S3Request<CompleteMultipartUploadInput>,
|
||||||
@@ -3044,26 +3035,24 @@ impl S3 for FS {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// TDD: Get multipart info to extract encryption configuration before completing
|
// TDD: Get multipart info to extract encryption configuration before completing
|
||||||
tracing::info!(
|
info!(
|
||||||
"TDD: Attempting to get multipart info for bucket={}, key={}, upload_id={}",
|
"TDD: Attempting to get multipart info for bucket={}, key={}, upload_id={}",
|
||||||
bucket,
|
bucket, key, upload_id
|
||||||
key,
|
|
||||||
upload_id
|
|
||||||
);
|
);
|
||||||
let multipart_info = store
|
let multipart_info = store
|
||||||
.get_multipart_info(&bucket, &key, &upload_id, &ObjectOptions::default())
|
.get_multipart_info(&bucket, &key, &upload_id, &ObjectOptions::default())
|
||||||
.await
|
.await
|
||||||
.map_err(ApiError::from)?;
|
.map_err(ApiError::from)?;
|
||||||
|
|
||||||
tracing::info!("TDD: Got multipart info successfully");
|
info!("TDD: Got multipart info successfully");
|
||||||
tracing::info!("TDD: Multipart info metadata: {:?}", multipart_info.user_defined);
|
info!("TDD: Multipart info metadata: {:?}", multipart_info.user_defined);
|
||||||
|
|
||||||
// TDD: Extract encryption information from multipart upload metadata
|
// TDD: Extract encryption information from multipart upload metadata
|
||||||
let server_side_encryption = multipart_info
|
let server_side_encryption = multipart_info
|
||||||
.user_defined
|
.user_defined
|
||||||
.get("x-amz-server-side-encryption")
|
.get("x-amz-server-side-encryption")
|
||||||
.map(|s| ServerSideEncryption::from(s.clone()));
|
.map(|s| ServerSideEncryption::from(s.clone()));
|
||||||
tracing::info!(
|
info!(
|
||||||
"TDD: Raw encryption from metadata: {:?} -> parsed: {:?}",
|
"TDD: Raw encryption from metadata: {:?} -> parsed: {:?}",
|
||||||
multipart_info.user_defined.get("x-amz-server-side-encryption"),
|
multipart_info.user_defined.get("x-amz-server-side-encryption"),
|
||||||
server_side_encryption
|
server_side_encryption
|
||||||
@@ -3074,10 +3063,9 @@ impl S3 for FS {
|
|||||||
.get("x-amz-server-side-encryption-aws-kms-key-id")
|
.get("x-amz-server-side-encryption-aws-kms-key-id")
|
||||||
.cloned();
|
.cloned();
|
||||||
|
|
||||||
tracing::info!(
|
info!(
|
||||||
"TDD: Extracted encryption info - SSE: {:?}, KMS Key: {:?}",
|
"TDD: Extracted encryption info - SSE: {:?}, KMS Key: {:?}",
|
||||||
server_side_encryption,
|
server_side_encryption, ssekms_key_id
|
||||||
ssekms_key_id
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let obj_info = store
|
let obj_info = store
|
||||||
@@ -3086,10 +3074,9 @@ impl S3 for FS {
|
|||||||
.await
|
.await
|
||||||
.map_err(ApiError::from)?;
|
.map_err(ApiError::from)?;
|
||||||
|
|
||||||
tracing::info!(
|
info!(
|
||||||
"TDD: Creating output with SSE: {:?}, KMS Key: {:?}",
|
"TDD: Creating output with SSE: {:?}, KMS Key: {:?}",
|
||||||
server_side_encryption,
|
server_side_encryption, ssekms_key_id
|
||||||
ssekms_key_id
|
|
||||||
);
|
);
|
||||||
let output = CompleteMultipartUploadOutput {
|
let output = CompleteMultipartUploadOutput {
|
||||||
bucket: Some(bucket.clone()),
|
bucket: Some(bucket.clone()),
|
||||||
@@ -3100,10 +3087,9 @@ impl S3 for FS {
|
|||||||
ssekms_key_id, // TDD: Return KMS key ID if present
|
ssekms_key_id, // TDD: Return KMS key ID if present
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
tracing::info!(
|
info!(
|
||||||
"TDD: Created output: SSE={:?}, KMS={:?}",
|
"TDD: Created output: SSE={:?}, KMS={:?}",
|
||||||
output.server_side_encryption,
|
output.server_side_encryption, output.ssekms_key_id
|
||||||
output.ssekms_key_id
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let mt2 = HashMap::new();
|
let mt2 = HashMap::new();
|
||||||
@@ -3116,15 +3102,14 @@ impl S3 for FS {
|
|||||||
warn!("need multipart replication");
|
warn!("need multipart replication");
|
||||||
schedule_replication(obj_info, store, dsc, ReplicationType::Object).await;
|
schedule_replication(obj_info, store, dsc, ReplicationType::Object).await;
|
||||||
}
|
}
|
||||||
tracing::info!(
|
info!(
|
||||||
"TDD: About to return S3Response with output: SSE={:?}, KMS={:?}",
|
"TDD: About to return S3Response with output: SSE={:?}, KMS={:?}",
|
||||||
output.server_side_encryption,
|
output.server_side_encryption, output.ssekms_key_id
|
||||||
output.ssekms_key_id
|
|
||||||
);
|
);
|
||||||
Ok(S3Response::new(output))
|
Ok(S3Response::new(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn abort_multipart_upload(
|
async fn abort_multipart_upload(
|
||||||
&self,
|
&self,
|
||||||
req: S3Request<AbortMultipartUploadInput>,
|
req: S3Request<AbortMultipartUploadInput>,
|
||||||
@@ -3146,7 +3131,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(AbortMultipartUploadOutput { ..Default::default() }))
|
Ok(S3Response::new(AbortMultipartUploadOutput { ..Default::default() }))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn get_bucket_tagging(&self, req: S3Request<GetBucketTaggingInput>) -> S3Result<S3Response<GetBucketTaggingOutput>> {
|
async fn get_bucket_tagging(&self, req: S3Request<GetBucketTaggingInput>) -> S3Result<S3Response<GetBucketTaggingOutput>> {
|
||||||
let bucket = req.input.bucket.clone();
|
let bucket = req.input.bucket.clone();
|
||||||
// check bucket exists.
|
// check bucket exists.
|
||||||
@@ -3169,7 +3154,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(GetBucketTaggingOutput { tag_set }))
|
Ok(S3Response::new(GetBucketTaggingOutput { tag_set }))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn put_bucket_tagging(&self, req: S3Request<PutBucketTaggingInput>) -> S3Result<S3Response<PutBucketTaggingOutput>> {
|
async fn put_bucket_tagging(&self, req: S3Request<PutBucketTaggingInput>) -> S3Result<S3Response<PutBucketTaggingOutput>> {
|
||||||
let PutBucketTaggingInput { bucket, tagging, .. } = req.input;
|
let PutBucketTaggingInput { bucket, tagging, .. } = req.input;
|
||||||
|
|
||||||
@@ -3191,7 +3176,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(Default::default()))
|
Ok(S3Response::new(Default::default()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn delete_bucket_tagging(
|
async fn delete_bucket_tagging(
|
||||||
&self,
|
&self,
|
||||||
req: S3Request<DeleteBucketTaggingInput>,
|
req: S3Request<DeleteBucketTaggingInput>,
|
||||||
@@ -3205,7 +3190,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(DeleteBucketTaggingOutput {}))
|
Ok(S3Response::new(DeleteBucketTaggingOutput {}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self, req))]
|
#[instrument(level = "debug", skip(self, req))]
|
||||||
async fn put_object_tagging(&self, req: S3Request<PutObjectTaggingInput>) -> S3Result<S3Response<PutObjectTaggingOutput>> {
|
async fn put_object_tagging(&self, req: S3Request<PutObjectTaggingInput>) -> S3Result<S3Response<PutObjectTaggingOutput>> {
|
||||||
let PutObjectTaggingInput {
|
let PutObjectTaggingInput {
|
||||||
bucket,
|
bucket,
|
||||||
@@ -3270,7 +3255,7 @@ impl S3 for FS {
|
|||||||
let event_args = rustfs_notify::event::EventArgs {
|
let event_args = rustfs_notify::event::EventArgs {
|
||||||
event_name: EventName::ObjectCreatedPutTagging,
|
event_name: EventName::ObjectCreatedPutTagging,
|
||||||
bucket_name: bucket.clone(),
|
bucket_name: bucket.clone(),
|
||||||
object: rustfs_ecstore::store_api::ObjectInfo {
|
object: ObjectInfo {
|
||||||
name: object.clone(),
|
name: object.clone(),
|
||||||
bucket,
|
bucket,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -3290,7 +3275,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(PutObjectTaggingOutput { version_id: None }))
|
Ok(S3Response::new(PutObjectTaggingOutput { version_id: None }))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn get_object_tagging(&self, req: S3Request<GetObjectTaggingInput>) -> S3Result<S3Response<GetObjectTaggingOutput>> {
|
async fn get_object_tagging(&self, req: S3Request<GetObjectTaggingInput>) -> S3Result<S3Response<GetObjectTaggingOutput>> {
|
||||||
let GetObjectTaggingInput { bucket, key: object, .. } = req.input;
|
let GetObjectTaggingInput { bucket, key: object, .. } = req.input;
|
||||||
|
|
||||||
@@ -3312,7 +3297,7 @@ impl S3 for FS {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn delete_object_tagging(
|
async fn delete_object_tagging(
|
||||||
&self,
|
&self,
|
||||||
req: S3Request<DeleteObjectTaggingInput>,
|
req: S3Request<DeleteObjectTaggingInput>,
|
||||||
@@ -3337,7 +3322,7 @@ impl S3 for FS {
|
|||||||
let event_args = rustfs_notify::event::EventArgs {
|
let event_args = rustfs_notify::event::EventArgs {
|
||||||
event_name: EventName::ObjectCreatedDeleteTagging,
|
event_name: EventName::ObjectCreatedDeleteTagging,
|
||||||
bucket_name: bucket.clone(),
|
bucket_name: bucket.clone(),
|
||||||
object: rustfs_ecstore::store_api::ObjectInfo {
|
object: ObjectInfo {
|
||||||
name: object.clone(),
|
name: object.clone(),
|
||||||
bucket,
|
bucket,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -3357,7 +3342,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(DeleteObjectTaggingOutput { version_id: None }))
|
Ok(S3Response::new(DeleteObjectTaggingOutput { version_id: None }))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn get_bucket_versioning(
|
async fn get_bucket_versioning(
|
||||||
&self,
|
&self,
|
||||||
req: S3Request<GetBucketVersioningInput>,
|
req: S3Request<GetBucketVersioningInput>,
|
||||||
@@ -3380,7 +3365,7 @@ impl S3 for FS {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn put_bucket_versioning(
|
async fn put_bucket_versioning(
|
||||||
&self,
|
&self,
|
||||||
req: S3Request<PutBucketVersioningInput>,
|
req: S3Request<PutBucketVersioningInput>,
|
||||||
@@ -3537,7 +3522,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(DeleteBucketPolicyOutput {}))
|
Ok(S3Response::new(DeleteBucketPolicyOutput {}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn get_bucket_lifecycle_configuration(
|
async fn get_bucket_lifecycle_configuration(
|
||||||
&self,
|
&self,
|
||||||
req: S3Request<GetBucketLifecycleConfigurationInput>,
|
req: S3Request<GetBucketLifecycleConfigurationInput>,
|
||||||
@@ -3570,7 +3555,7 @@ impl S3 for FS {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn put_bucket_lifecycle_configuration(
|
async fn put_bucket_lifecycle_configuration(
|
||||||
&self,
|
&self,
|
||||||
req: S3Request<PutBucketLifecycleConfigurationInput>,
|
req: S3Request<PutBucketLifecycleConfigurationInput>,
|
||||||
@@ -3604,7 +3589,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(PutBucketLifecycleConfigurationOutput::default()))
|
Ok(S3Response::new(PutBucketLifecycleConfigurationOutput::default()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn delete_bucket_lifecycle(
|
async fn delete_bucket_lifecycle(
|
||||||
&self,
|
&self,
|
||||||
req: S3Request<DeleteBucketLifecycleInput>,
|
req: S3Request<DeleteBucketLifecycleInput>,
|
||||||
@@ -3709,7 +3694,7 @@ impl S3 for FS {
|
|||||||
Ok(S3Response::new(DeleteBucketEncryptionOutput::default()))
|
Ok(S3Response::new(DeleteBucketEncryptionOutput::default()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn get_object_lock_configuration(
|
async fn get_object_lock_configuration(
|
||||||
&self,
|
&self,
|
||||||
req: S3Request<GetObjectLockConfigurationInput>,
|
req: S3Request<GetObjectLockConfigurationInput>,
|
||||||
@@ -3731,7 +3716,7 @@ impl S3 for FS {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn put_object_lock_configuration(
|
async fn put_object_lock_configuration(
|
||||||
&self,
|
&self,
|
||||||
req: S3Request<PutObjectLockConfigurationInput>,
|
req: S3Request<PutObjectLockConfigurationInput>,
|
||||||
@@ -3777,7 +3762,7 @@ impl S3 for FS {
|
|||||||
.await
|
.await
|
||||||
.map_err(ApiError::from)?;
|
.map_err(ApiError::from)?;
|
||||||
|
|
||||||
let rcfg = match metadata_sys::get_replication_config(&bucket).await {
|
let rcfg = match get_replication_config(&bucket).await {
|
||||||
Ok((cfg, _created)) => Some(cfg),
|
Ok((cfg, _created)) => Some(cfg),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("get_replication_config err {:?}", err);
|
error!("get_replication_config err {:?}", err);
|
||||||
@@ -4107,7 +4092,7 @@ impl S3 for FS {
|
|||||||
let event_args = rustfs_notify::event::EventArgs {
|
let event_args = rustfs_notify::event::EventArgs {
|
||||||
event_name: EventName::ObjectAccessedAttributes,
|
event_name: EventName::ObjectAccessedAttributes,
|
||||||
bucket_name: bucket.clone(),
|
bucket_name: bucket.clone(),
|
||||||
object: rustfs_ecstore::store_api::ObjectInfo {
|
object: ObjectInfo {
|
||||||
name: key.clone(),
|
name: key.clone(),
|
||||||
bucket,
|
bucket,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
|
|||||||
@@ -68,11 +68,6 @@ export RUSTFS_OBS_LOG_POOL_CAPA=10240
|
|||||||
export RUSTFS_OBS_LOG_MESSAGE_CAPA=32768
|
export RUSTFS_OBS_LOG_MESSAGE_CAPA=32768
|
||||||
export RUSTFS_OBS_LOG_FLUSH_MS=300
|
export RUSTFS_OBS_LOG_FLUSH_MS=300
|
||||||
|
|
||||||
export RUSTFS_SINKS_FILE_PATH="$current_dir/deploy/logs"
|
|
||||||
export RUSTFS_SINKS_FILE_BUFFER_SIZE=12
|
|
||||||
export RUSTFS_SINKS_FILE_FLUSH_INTERVAL_MS=1000
|
|
||||||
export RUSTFS_SINKS_FILE_FLUSH_THRESHOLD=100
|
|
||||||
|
|
||||||
#tokio runtime
|
#tokio runtime
|
||||||
export RUSTFS_RUNTIME_WORKER_THREADS=16
|
export RUSTFS_RUNTIME_WORKER_THREADS=16
|
||||||
export RUSTFS_RUNTIME_MAX_BLOCKING_THREADS=1024
|
export RUSTFS_RUNTIME_MAX_BLOCKING_THREADS=1024
|
||||||
@@ -82,21 +77,6 @@ export RUSTFS_RUNTIME_THREAD_STACK_SIZE=1024*1024
|
|||||||
export RUSTFS_RUNTIME_THREAD_KEEP_ALIVE=60
|
export RUSTFS_RUNTIME_THREAD_KEEP_ALIVE=60
|
||||||
export RUSTFS_RUNTIME_GLOBAL_QUEUE_INTERVAL=31
|
export RUSTFS_RUNTIME_GLOBAL_QUEUE_INTERVAL=31
|
||||||
|
|
||||||
#
|
|
||||||
# Kafka sink 配置
|
|
||||||
#export RUSTFS_SINKS_KAFKA_BROKERS=localhost:9092
|
|
||||||
#export RUSTFS_SINKS_KAFKA_TOPIC=logs
|
|
||||||
#export RUSTFS_SINKS_KAFKA_BATCH_SIZE=100
|
|
||||||
#export RUSTFS_SINKS_KAFKA_BATCH_TIMEOUT_MS=1000
|
|
||||||
#
|
|
||||||
# Webhook sink 配置
|
|
||||||
#export RUSTFS_SINKS_WEBHOOK_ENDPOINT=http://localhost:8080/webhook
|
|
||||||
#export RUSTFS_SINKS_WEBHOOK_AUTH_TOKEN=you-auth-token
|
|
||||||
#export RUSTFS_SINKS_WEBHOOK_BATCH_SIZE=100
|
|
||||||
#export RUSTFS_SINKS_WEBHOOK_BATCH_TIMEOUT_MS=1000
|
|
||||||
#
|
|
||||||
#export RUSTFS_LOGGER_QUEUE_CAPACITY=10
|
|
||||||
|
|
||||||
export OTEL_INSTRUMENTATION_NAME="rustfs"
|
export OTEL_INSTRUMENTATION_NAME="rustfs"
|
||||||
export OTEL_INSTRUMENTATION_VERSION="0.1.1"
|
export OTEL_INSTRUMENTATION_VERSION="0.1.1"
|
||||||
export OTEL_INSTRUMENTATION_SCHEMA_URL="https://opentelemetry.io/schemas/1.31.0"
|
export OTEL_INSTRUMENTATION_SCHEMA_URL="https://opentelemetry.io/schemas/1.31.0"
|
||||||
|
|||||||
Reference in New Issue
Block a user