Reconstructing Notify module

This commit is contained in:
houseme
2025-06-19 15:40:48 +08:00
parent e6b019c29d
commit c658d88d25
51 changed files with 5845 additions and 4469 deletions

176
Cargo.lock generated
View File

@@ -506,6 +506,16 @@ dependencies = [
"zbus 5.7.1",
]
[[package]]
name = "assert-json-diff"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "async-broadcast"
version = "0.7.2"
@@ -1829,6 +1839,15 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
[[package]]
name = "colored"
version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fde0e0ec90c9dfb3b4b1a0891a7dcd0e2bffde2f7efed5fe7c9bb00e5bfb915e"
dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "combine"
version = "4.6.7"
@@ -3545,12 +3564,6 @@ dependencies = [
"syn 2.0.103",
]
[[package]]
name = "dotenvy"
version = "0.15.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b"
[[package]]
name = "dpi"
version = "0.1.2"
@@ -3578,12 +3591,6 @@ version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813"
[[package]]
name = "dyn-clone"
version = "1.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c7a8fb8a9fbf66c1f703fe16184d10ca0ee9d23be5b4436400408ba54a95005"
[[package]]
name = "e2e_test"
version = "0.0.1"
@@ -5139,7 +5146,6 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
dependencies = [
"autocfg",
"hashbrown 0.12.3",
"serde",
]
[[package]]
@@ -5150,7 +5156,6 @@ checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e"
dependencies = [
"equivalent",
"hashbrown 0.15.4",
"serde",
]
[[package]]
@@ -5984,6 +5989,30 @@ dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "mockito"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7760e0e418d9b7e5777c0374009ca4c93861b9066f18cb334a20ce50ab63aa48"
dependencies = [
"assert-json-diff",
"bytes",
"colored",
"futures-util",
"http 1.3.1",
"http-body 1.0.1",
"http-body-util",
"hyper 1.6.0",
"hyper-util",
"log",
"rand 0.9.1",
"regex",
"serde_json",
"serde_urlencoded",
"similar",
"tokio",
]
[[package]]
name = "muda"
version = "0.11.5"
@@ -7581,6 +7610,7 @@ checksum = "331e97a1af0bf59823e6eadffe373d7b27f485be8748f71471c662c1f269b7fb"
dependencies = [
"memchr",
"serde",
"tokio",
]
[[package]]
@@ -7914,26 +7944,6 @@ dependencies = [
"readme-rustdocifier",
]
[[package]]
name = "ref-cast"
version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a0ae411dbe946a674d89546582cea4ba2bb8defac896622d6496f14c23ba5cf"
dependencies = [
"ref-cast-impl",
]
[[package]]
name = "ref-cast-impl"
version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.103",
]
[[package]]
name = "regex"
version = "1.11.1"
@@ -8371,32 +8381,6 @@ dependencies = [
"serde_json",
]
[[package]]
name = "rustfs-event"
version = "0.0.1"
dependencies = [
"async-trait",
"common",
"ecstore",
"once_cell",
"reqwest",
"rumqttc",
"rustfs-config",
"rustfs-notify",
"serde",
"serde_json",
"serde_with",
"smallvec",
"strum",
"thiserror 2.0.12",
"tokio",
"tokio-util",
"tracing",
"url",
"urlencoding",
"uuid",
]
[[package]]
name = "rustfs-filemeta"
version = "0.0.1"
@@ -8444,25 +8428,27 @@ version = "0.0.1"
dependencies = [
"async-trait",
"axum",
"common",
"dotenvy",
"chrono",
"const-str",
"ecstore",
"libc",
"mockito",
"once_cell",
"quick-xml",
"reqwest",
"rumqttc",
"rustfs-config",
"serde",
"serde_json",
"serde_with",
"smallvec",
"snap",
"strum",
"thiserror 2.0.12",
"tokio",
"tokio-util",
"tracing",
"tracing-subscriber",
"url",
"urlencoding",
"uuid",
"wildmatch",
]
[[package]]
@@ -8845,18 +8831,6 @@ dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "schemars"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f"
dependencies = [
"dyn-clone",
"ref-cast",
"serde",
"serde_json",
]
[[package]]
name = "scopeguard"
version = "1.2.0"
@@ -9105,37 +9079,6 @@ dependencies = [
"serde",
]
[[package]]
name = "serde_with"
version = "3.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42"
dependencies = [
"base64 0.22.1",
"chrono",
"hex",
"indexmap 1.9.3",
"indexmap 2.9.0",
"schemars",
"serde",
"serde_derive",
"serde_json",
"serde_with_macros",
"time",
]
[[package]]
name = "serde_with_macros"
version = "3.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77"
dependencies = [
"darling",
"proc-macro2",
"quote",
"syn 2.0.103",
]
[[package]]
name = "serde_yaml"
version = "0.9.34+deprecated"
@@ -9345,6 +9288,12 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
[[package]]
name = "similar"
version = "2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa"
[[package]]
name = "simple_asn1"
version = "0.6.3"
@@ -11081,6 +11030,15 @@ dependencies = [
"rustix 0.38.44",
]
[[package]]
name = "wildmatch"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68ce1ab1f8c62655ebe1350f589c61e505cf94d385bc6a12899442d9081e71fd"
dependencies = [
"serde",
]
[[package]]
name = "winapi"
version = "0.3.9"

View File

@@ -3,13 +3,14 @@ members = [
"appauth", # Application authentication and authorization
"cli/rustfs-gui", # Graphical user interface client
"common/common", # Shared utilities and data structures
"crates/filemeta", # File metadata management
"common/lock", # Distributed locking implementation
"common/protos", # Protocol buffer definitions
"common/workers", # Worker thread pools and task scheduling
"crates/config", # Configuration management
"crates/event", # Event handling and processing
"crates/notify", # Notification system for events
"crates/obs", # Observability utilities
"crates/rio", # Rust I/O utilities and abstractions
"crates/utils", # Utility functions and helpers
"crates/zip", # ZIP file handling and compression
"crypto", # Cryptography and security features
@@ -20,9 +21,8 @@ members = [
"rustfs", # Core file system implementation
"s3select/api", # S3 Select API interface
"s3select/query", # S3 Select query engine
"crates/zip",
"crates/filemeta",
"crates/rio",
]
resolver = "2"
@@ -121,12 +121,14 @@ keyring = { version = "3.6.2", features = [
"sync-secret-service",
] }
lazy_static = "1.5.0"
libc = "0.2.174"
libsystemd = { version = "0.7.2" }
local-ip-address = "0.6.5"
matchit = "0.8.4"
md-5 = "0.10.6"
mime = "0.3.17"
mime_guess = "2.0.5"
mockito = "1.7.0"
netif = "0.1.6"
nix = { version = "0.30.1", features = ["fs"] }
nu-ansi-term = "0.50.1"
@@ -159,6 +161,7 @@ pin-project-lite = "0.2.16"
prost = "0.13.5"
prost-build = "0.13.5"
protobuf = "3.7"
quick-xml = "0.37.5"
rand = "0.9.1"
brotli = "8.0.1"
flate2 = "1.1.1"
@@ -241,6 +244,7 @@ uuid = { version = "1.17.0", features = [
"fast-rng",
"macro-diagnostics",
] }
wildmatch = { version = "2.4.0", features = ["serde"] }
winapi = { version = "0.3.9" }
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }

View File

@@ -1,32 +0,0 @@
[package]
name = "rustfs-event"
edition.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
version.workspace = true
[dependencies]
rustfs-config = { workspace = true, features = ["constants", "notify"] }
rustfs-notify = { workspace = true }
async-trait = { workspace = true }
common = { workspace = true }
ecstore = { workspace = true }
once_cell = { workspace = true }
reqwest = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_with = { workspace = true }
smallvec = { workspace = true, features = ["serde"] }
strum = { workspace = true, features = ["derive"] }
tracing = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["sync", "net", "macros", "signal", "rt-multi-thread"] }
tokio-util = { workspace = true }
uuid = { workspace = true, features = ["v4", "serde"] }
url = { workspace = true }
urlencoding = { workspace = true }
rumqttc = { workspace = true }
[lints]
workspace = true

View File

@@ -1,37 +0,0 @@
use thiserror::Error;
use tokio::sync::mpsc::error;
use tokio::task::JoinError;
/// The `Error` enum represents all possible errors that can occur in the application.
/// It implements the `std::error::Error` trait and provides a way to convert various error types into a single error type.
#[derive(Error, Debug)]
pub enum Error {
#[error("Join error: {0}")]
JoinError(#[from] JoinError),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Serialization error: {0}")]
Serde(#[from] serde_json::Error),
#[error("Channel send error: {0}")]
ChannelSend(#[from] Box<error::SendError<crate::event::Event>>),
#[error("Feature disabled: {0}")]
FeatureDisabled(&'static str),
#[error("Event bus already started")]
EventBusStarted,
#[error("necessary fields are missing:{0}")]
MissingField(&'static str),
#[error("field verification failed:{0}")]
ValidationError(&'static str),
#[error("Custom error: {0}")]
Custom(String),
#[error("Configuration error: {0}")]
ConfigError(String),
#[error("create adapter failed error: {0}")]
AdapterCreationFailed(String),
}
impl Error {
pub fn custom(msg: &str) -> Error {
Self::Custom(msg.to_string())
}
}

View File

@@ -1,616 +0,0 @@
use crate::error::Error;
use serde::{Deserialize, Serialize};
use serde_with::{DeserializeFromStr, SerializeDisplay};
use smallvec::{smallvec, SmallVec};
use std::borrow::Cow;
use std::collections::HashMap;
use std::time::{SystemTime, UNIX_EPOCH};
use strum::{Display, EnumString};
use uuid::Uuid;
/// A struct representing the identity of the user
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Identity {
#[serde(rename = "principalId")]
pub principal_id: String,
}
impl Identity {
/// Create a new Identity instance
pub fn new(principal_id: String) -> Self {
Self { principal_id }
}
/// Set the principal ID
pub fn set_principal_id(&mut self, principal_id: String) {
self.principal_id = principal_id;
}
}
/// A struct representing the bucket information
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Bucket {
pub name: String,
#[serde(rename = "ownerIdentity")]
pub owner_identity: Identity,
pub arn: String,
}
impl Bucket {
/// Create a new Bucket instance
pub fn new(name: String, owner_identity: Identity, arn: String) -> Self {
Self {
name,
owner_identity,
arn,
}
}
/// Set the name of the bucket
pub fn set_name(&mut self, name: String) {
self.name = name;
}
/// Set the ARN of the bucket
pub fn set_arn(&mut self, arn: String) {
self.arn = arn;
}
/// Set the owner identity of the bucket
pub fn set_owner_identity(&mut self, owner_identity: Identity) {
self.owner_identity = owner_identity;
}
}
/// A struct representing the object information
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Object {
pub key: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub size: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none", rename = "eTag")]
pub etag: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none", rename = "contentType")]
pub content_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none", rename = "userMetadata")]
pub user_metadata: Option<HashMap<String, String>>,
#[serde(default, skip_serializing_if = "Option::is_none", rename = "versionId")]
pub version_id: Option<String>,
pub sequencer: String,
}
impl Object {
/// Create a new Object instance
pub fn new(
key: String,
size: Option<i64>,
etag: Option<String>,
content_type: Option<String>,
user_metadata: Option<HashMap<String, String>>,
version_id: Option<String>,
sequencer: String,
) -> Self {
Self {
key,
size,
etag,
content_type,
user_metadata,
version_id,
sequencer,
}
}
/// Set the key
pub fn set_key(&mut self, key: String) {
self.key = key;
}
/// Set the size
pub fn set_size(&mut self, size: Option<i64>) {
self.size = size;
}
/// Set the etag
pub fn set_etag(&mut self, etag: Option<String>) {
self.etag = etag;
}
/// Set the content type
pub fn set_content_type(&mut self, content_type: Option<String>) {
self.content_type = content_type;
}
/// Set the user metadata
pub fn set_user_metadata(&mut self, user_metadata: Option<HashMap<String, String>>) {
self.user_metadata = user_metadata;
}
/// Set the version ID
pub fn set_version_id(&mut self, version_id: Option<String>) {
self.version_id = version_id;
}
/// Set the sequencer
pub fn set_sequencer(&mut self, sequencer: String) {
self.sequencer = sequencer;
}
}
/// A struct representing the metadata of the event
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Metadata {
#[serde(rename = "s3SchemaVersion")]
pub schema_version: String,
#[serde(rename = "configurationId")]
pub configuration_id: String,
pub bucket: Bucket,
pub object: Object,
}
impl Default for Metadata {
fn default() -> Self {
Self::new()
}
}
impl Metadata {
/// Create a new Metadata instance with default values
pub fn new() -> Self {
Self {
schema_version: "1.0".to_string(),
configuration_id: "default".to_string(),
bucket: Bucket::new(
"default".to_string(),
Identity::new("default".to_string()),
"arn:aws:s3:::default".to_string(),
),
object: Object::new("default".to_string(), None, None, None, None, None, "default".to_string()),
}
}
/// Create a new Metadata instance
pub fn create(schema_version: String, configuration_id: String, bucket: Bucket, object: Object) -> Self {
Self {
schema_version,
configuration_id,
bucket,
object,
}
}
/// Set the schema version
pub fn set_schema_version(&mut self, schema_version: String) {
self.schema_version = schema_version;
}
/// Set the configuration ID
pub fn set_configuration_id(&mut self, configuration_id: String) {
self.configuration_id = configuration_id;
}
/// Set the bucket
pub fn set_bucket(&mut self, bucket: Bucket) {
self.bucket = bucket;
}
/// Set the object
pub fn set_object(&mut self, object: Object) {
self.object = object;
}
}
/// A struct representing the source of the event
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Source {
pub host: String,
pub port: String,
#[serde(rename = "userAgent")]
pub user_agent: String,
}
impl Source {
/// Create a new Source instance
pub fn new(host: String, port: String, user_agent: String) -> Self {
Self { host, port, user_agent }
}
/// Set the host
pub fn set_host(&mut self, host: String) {
self.host = host;
}
/// Set the port
pub fn set_port(&mut self, port: String) {
self.port = port;
}
/// Set the user agent
pub fn set_user_agent(&mut self, user_agent: String) {
self.user_agent = user_agent;
}
}
/// Builder for creating an Event.
///
/// This struct is used to build an Event object with various parameters.
/// It provides methods to set each parameter and a build method to create the Event.
#[derive(Default, Clone)]
pub struct EventBuilder {
event_version: Option<String>,
event_source: Option<String>,
aws_region: Option<String>,
event_time: Option<String>,
event_name: Option<Name>,
user_identity: Option<Identity>,
request_parameters: Option<HashMap<String, String>>,
response_elements: Option<HashMap<String, String>>,
s3: Option<Metadata>,
source: Option<Source>,
channels: Option<SmallVec<[String; 2]>>,
}
impl EventBuilder {
/// create a builder that pre filled default values
pub fn new() -> Self {
Self {
event_version: Some(Cow::Borrowed("2.0").to_string()),
event_source: Some(Cow::Borrowed("aws:s3").to_string()),
aws_region: Some("us-east-1".to_string()),
event_time: Some(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs().to_string()),
event_name: None,
user_identity: Some(Identity {
principal_id: "anonymous".to_string(),
}),
request_parameters: Some(HashMap::new()),
response_elements: Some(HashMap::new()),
s3: None,
source: None,
channels: Some(Vec::new().into()),
}
}
/// verify and set the event version
pub fn event_version(mut self, event_version: impl Into<String>) -> Self {
let event_version = event_version.into();
if !event_version.is_empty() {
self.event_version = Some(event_version);
}
self
}
/// verify and set the event source
pub fn event_source(mut self, event_source: impl Into<String>) -> Self {
let event_source = event_source.into();
if !event_source.is_empty() {
self.event_source = Some(event_source);
}
self
}
/// set up aws regions
pub fn aws_region(mut self, aws_region: impl Into<String>) -> Self {
self.aws_region = Some(aws_region.into());
self
}
/// set event time
pub fn event_time(mut self, event_time: impl Into<String>) -> Self {
self.event_time = Some(event_time.into());
self
}
/// set event name
pub fn event_name(mut self, event_name: Name) -> Self {
self.event_name = Some(event_name);
self
}
/// set user identity
pub fn user_identity(mut self, user_identity: Identity) -> Self {
self.user_identity = Some(user_identity);
self
}
/// set request parameters
pub fn request_parameters(mut self, request_parameters: HashMap<String, String>) -> Self {
self.request_parameters = Some(request_parameters);
self
}
/// set response elements
pub fn response_elements(mut self, response_elements: HashMap<String, String>) -> Self {
self.response_elements = Some(response_elements);
self
}
/// setting up s3 metadata
pub fn s3(mut self, s3: Metadata) -> Self {
self.s3 = Some(s3);
self
}
/// set event source information
pub fn source(mut self, source: Source) -> Self {
self.source = Some(source);
self
}
/// set up the sending channel
pub fn channels(mut self, channels: Vec<String>) -> Self {
self.channels = Some(channels.into());
self
}
/// Create a preconfigured builder for common object event scenarios
pub fn for_object_creation(s3: Metadata, source: Source) -> Self {
Self::new().event_name(Name::ObjectCreatedPut).s3(s3).source(source)
}
/// Create a preconfigured builder for object deletion events
pub fn for_object_removal(s3: Metadata, source: Source) -> Self {
Self::new().event_name(Name::ObjectRemovedDelete).s3(s3).source(source)
}
/// build event instance
///
/// Verify the required fields and create a complete Event object
pub fn build(self) -> Result<Event, Error> {
let event_version = self.event_version.ok_or(Error::MissingField("event_version"))?;
let event_source = self.event_source.ok_or(Error::MissingField("event_source"))?;
let aws_region = self.aws_region.ok_or(Error::MissingField("aws_region"))?;
let event_time = self.event_time.ok_or(Error::MissingField("event_time"))?;
let event_name = self.event_name.ok_or(Error::MissingField("event_name"))?;
let user_identity = self.user_identity.ok_or(Error::MissingField("user_identity"))?;
let request_parameters = self.request_parameters.unwrap_or_default();
let response_elements = self.response_elements.unwrap_or_default();
let s3 = self.s3.ok_or(Error::MissingField("s3"))?;
let source = self.source.ok_or(Error::MissingField("source"))?;
let channels = self.channels.unwrap_or_else(|| smallvec![]);
Ok(Event {
event_version,
event_source,
aws_region,
event_time,
event_name,
user_identity,
request_parameters,
response_elements,
s3,
source,
id: Uuid::new_v4(),
timestamp: SystemTime::now(),
channels,
})
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Event {
#[serde(rename = "eventVersion")]
pub event_version: String,
#[serde(rename = "eventSource")]
pub event_source: String,
#[serde(rename = "awsRegion")]
pub aws_region: String,
#[serde(rename = "eventTime")]
pub event_time: String,
#[serde(rename = "eventName")]
pub event_name: Name,
#[serde(rename = "userIdentity")]
pub user_identity: Identity,
#[serde(rename = "requestParameters")]
pub request_parameters: HashMap<String, String>,
#[serde(rename = "responseElements")]
pub response_elements: HashMap<String, String>,
pub s3: Metadata,
pub source: Source,
pub id: Uuid,
pub timestamp: SystemTime,
pub channels: SmallVec<[String; 2]>,
}
impl Event {
/// create a new event builder
///
/// Returns an EventBuilder instance pre-filled with default values
pub fn builder() -> EventBuilder {
EventBuilder::new()
}
/// Quickly create Event instances with necessary fields
///
/// suitable for common s3 event scenarios
pub fn create(event_name: Name, s3: Metadata, source: Source, channels: Vec<String>) -> Self {
Self::builder()
.event_name(event_name)
.s3(s3)
.source(source)
.channels(channels)
.build()
.expect("Failed to create event, missing necessary parameters")
}
/// a convenient way to create a preconfigured builder
pub fn for_object_creation(s3: Metadata, source: Source) -> EventBuilder {
EventBuilder::for_object_creation(s3, source)
}
/// a convenient way to create a preconfigured builder
pub fn for_object_removal(s3: Metadata, source: Source) -> EventBuilder {
EventBuilder::for_object_removal(s3, source)
}
/// Determine whether an event belongs to a specific type
pub fn is_type(&self, event_type: Name) -> bool {
let mask = event_type.mask();
(self.event_name.mask() & mask) != 0
}
/// Determine whether an event needs to be sent to a specific channel
pub fn is_for_channel(&self, channel: &str) -> bool {
self.channels.iter().any(|c| c == channel)
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Log {
#[serde(rename = "eventName")]
pub event_name: Name,
pub key: String,
pub records: Vec<Event>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, SerializeDisplay, DeserializeFromStr, Display, EnumString)]
#[strum(serialize_all = "SCREAMING_SNAKE_CASE")]
pub enum Name {
ObjectAccessedGet,
ObjectAccessedGetRetention,
ObjectAccessedGetLegalHold,
ObjectAccessedHead,
ObjectAccessedAttributes,
ObjectCreatedCompleteMultipartUpload,
ObjectCreatedCopy,
ObjectCreatedPost,
ObjectCreatedPut,
ObjectCreatedPutRetention,
ObjectCreatedPutLegalHold,
ObjectCreatedPutTagging,
ObjectCreatedDeleteTagging,
ObjectRemovedDelete,
ObjectRemovedDeleteMarkerCreated,
ObjectRemovedDeleteAllVersions,
ObjectRemovedNoOp,
BucketCreated,
BucketRemoved,
ObjectReplicationFailed,
ObjectReplicationComplete,
ObjectReplicationMissedThreshold,
ObjectReplicationReplicatedAfterThreshold,
ObjectReplicationNotTracked,
ObjectRestorePost,
ObjectRestoreCompleted,
ObjectTransitionFailed,
ObjectTransitionComplete,
ObjectManyVersions,
ObjectLargeVersions,
PrefixManyFolders,
IlmDelMarkerExpirationDelete,
ObjectAccessedAll,
ObjectCreatedAll,
ObjectRemovedAll,
ObjectReplicationAll,
ObjectRestoreAll,
ObjectTransitionAll,
ObjectScannerAll,
Everything,
}
impl Name {
pub fn expand(&self) -> Vec<Name> {
match self {
Name::ObjectAccessedAll => vec![
Name::ObjectAccessedGet,
Name::ObjectAccessedHead,
Name::ObjectAccessedGetRetention,
Name::ObjectAccessedGetLegalHold,
Name::ObjectAccessedAttributes,
],
Name::ObjectCreatedAll => vec![
Name::ObjectCreatedCompleteMultipartUpload,
Name::ObjectCreatedCopy,
Name::ObjectCreatedPost,
Name::ObjectCreatedPut,
Name::ObjectCreatedPutRetention,
Name::ObjectCreatedPutLegalHold,
Name::ObjectCreatedPutTagging,
Name::ObjectCreatedDeleteTagging,
],
Name::ObjectRemovedAll => vec![
Name::ObjectRemovedDelete,
Name::ObjectRemovedDeleteMarkerCreated,
Name::ObjectRemovedNoOp,
Name::ObjectRemovedDeleteAllVersions,
],
Name::ObjectReplicationAll => vec![
Name::ObjectReplicationFailed,
Name::ObjectReplicationComplete,
Name::ObjectReplicationNotTracked,
Name::ObjectReplicationMissedThreshold,
Name::ObjectReplicationReplicatedAfterThreshold,
],
Name::ObjectRestoreAll => vec![Name::ObjectRestorePost, Name::ObjectRestoreCompleted],
Name::ObjectTransitionAll => {
vec![Name::ObjectTransitionFailed, Name::ObjectTransitionComplete]
}
Name::ObjectScannerAll => vec![Name::ObjectManyVersions, Name::ObjectLargeVersions, Name::PrefixManyFolders],
Name::Everything => (1..=Name::IlmDelMarkerExpirationDelete as u32)
.map(|i| Name::from_repr(i).unwrap())
.collect(),
_ => vec![*self],
}
}
pub fn mask(&self) -> u64 {
if (*self as u32) < Name::ObjectAccessedAll as u32 {
1 << (*self as u32 - 1)
} else {
self.expand().iter().fold(0, |acc, n| acc | (1 << (*n as u32 - 1)))
}
}
fn from_repr(discriminant: u32) -> Option<Self> {
match discriminant {
1 => Some(Name::ObjectAccessedGet),
2 => Some(Name::ObjectAccessedGetRetention),
3 => Some(Name::ObjectAccessedGetLegalHold),
4 => Some(Name::ObjectAccessedHead),
5 => Some(Name::ObjectAccessedAttributes),
6 => Some(Name::ObjectCreatedCompleteMultipartUpload),
7 => Some(Name::ObjectCreatedCopy),
8 => Some(Name::ObjectCreatedPost),
9 => Some(Name::ObjectCreatedPut),
10 => Some(Name::ObjectCreatedPutRetention),
11 => Some(Name::ObjectCreatedPutLegalHold),
12 => Some(Name::ObjectCreatedPutTagging),
13 => Some(Name::ObjectCreatedDeleteTagging),
14 => Some(Name::ObjectRemovedDelete),
15 => Some(Name::ObjectRemovedDeleteMarkerCreated),
16 => Some(Name::ObjectRemovedDeleteAllVersions),
17 => Some(Name::ObjectRemovedNoOp),
18 => Some(Name::BucketCreated),
19 => Some(Name::BucketRemoved),
20 => Some(Name::ObjectReplicationFailed),
21 => Some(Name::ObjectReplicationComplete),
22 => Some(Name::ObjectReplicationMissedThreshold),
23 => Some(Name::ObjectReplicationReplicatedAfterThreshold),
24 => Some(Name::ObjectReplicationNotTracked),
25 => Some(Name::ObjectRestorePost),
26 => Some(Name::ObjectRestoreCompleted),
27 => Some(Name::ObjectTransitionFailed),
28 => Some(Name::ObjectTransitionComplete),
29 => Some(Name::ObjectManyVersions),
30 => Some(Name::ObjectLargeVersions),
31 => Some(Name::PrefixManyFolders),
32 => Some(Name::IlmDelMarkerExpirationDelete),
33 => Some(Name::ObjectAccessedAll),
34 => Some(Name::ObjectCreatedAll),
35 => Some(Name::ObjectRemovedAll),
36 => Some(Name::ObjectReplicationAll),
37 => Some(Name::ObjectRestoreAll),
38 => Some(Name::ObjectTransitionAll),
39 => Some(Name::ObjectScannerAll),
40 => Some(Name::Everything),
_ => None,
}
}
}

View File

@@ -1,5 +0,0 @@
mod error;
mod event;
mod notifier;
mod system;
mod target;

View File

@@ -1,143 +0,0 @@
use common::error::{Error, Result};
use ecstore::store::ECStore;
use rustfs_notify::Event;
use rustfs_notify::EventNotifierConfig;
use std::sync::Arc;
use tokio::sync::{broadcast, mpsc};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, instrument, warn};
/// Event Notifier
pub struct EventNotifier {
/// The event sending channel
sender: mpsc::Sender<Event>,
/// Receiver task handle
task_handle: Option<tokio::task::JoinHandle<()>>,
/// Configuration information
config: EventNotifierConfig,
/// Turn off tagging
shutdown: CancellationToken,
/// Close the notification channel
shutdown_complete_tx: Option<broadcast::Sender<()>>,
}
impl EventNotifier {
/// Create a new event notifier
#[instrument(skip_all)]
pub async fn new(store: Arc<ECStore>) -> Result<Self> {
let manager = rustfs_notify::manager::EventManager::new(store);
let manager = Arc::new(manager.await);
// Initialize the configuration
let config = manager.clone().init().await?;
// Create adapters
let adapters = manager.clone().create_adapters().await?;
info!("Created {} adapters", adapters.len());
// Create a close marker
let shutdown = CancellationToken::new();
let (shutdown_complete_tx, _) = broadcast::channel(1);
// 创建事件通道 - 使用默认容量,因为每个适配器都有自己的队列
// 这里使用较小的通道容量,因为事件会被快速分发到适配器
let (sender, mut receiver) = mpsc::channel::<Event>(100);
let shutdown_clone = shutdown.clone();
let shutdown_complete_tx_clone = shutdown_complete_tx.clone();
let adapters_clone = adapters.clone();
// Start the event processing task
let task_handle = tokio::spawn(async move {
debug!("The event processing task starts");
loop {
tokio::select! {
Some(event) = receiver.recv() => {
debug!("The event is received:{}", event.id);
// Distribute to all adapters
for adapter in &adapters_clone {
let adapter_name = adapter.name();
match adapter.send(&event).await {
Ok(_) => {
debug!("Event {} Successfully sent to the adapter {}", event.id, adapter_name);
}
Err(e) => {
error!("Event {} send to adapter {} failed:{}", event.id, adapter_name, e);
}
}
}
}
_ = shutdown_clone.cancelled() => {
info!("A shutdown signal is received, and the event processing task is stopped");
let _ = shutdown_complete_tx_clone.send(());
break;
}
}
}
debug!("The event processing task has been stopped");
});
Ok(Self {
sender,
task_handle: Some(task_handle),
config,
shutdown,
shutdown_complete_tx: Some(shutdown_complete_tx),
})
}
/// Turn off the event notifier
pub async fn shutdown(&mut self) -> Result<()> {
info!("Turn off the event notifier");
self.shutdown.cancel();
if let Some(shutdown_tx) = self.shutdown_complete_tx.take() {
let mut rx = shutdown_tx.subscribe();
// Wait for the shutdown to complete the signal or time out
tokio::select! {
_ = rx.recv() => {
debug!("A shutdown completion signal is received");
}
_ = tokio::time::sleep(std::time::Duration::from_secs(10)) => {
warn!("Shutdown timeout and forced termination");
}
}
}
if let Some(handle) = self.task_handle.take() {
handle.abort();
match handle.await {
Ok(_) => debug!("The event processing task has been terminated gracefully"),
Err(e) => {
if e.is_cancelled() {
debug!("The event processing task has been canceled");
} else {
error!("An error occurred while waiting for the event processing task to terminate:{}", e);
}
}
}
}
info!("The event notifier is completely turned off");
Ok(())
}
/// Send events
pub async fn send(&self, event: Event) -> Result<()> {
self.sender
.send(event)
.await
.map_err(|e| Error::msg(format!("Failed to send events to channel:{}", e)))
}
/// Get the current configuration
pub fn config(&self) -> &EventNotifierConfig {
&self.config
}
}

View File

@@ -1,82 +0,0 @@
use crate::notifier::EventNotifier;
use common::error::Result;
use ecstore::store::ECStore;
use once_cell::sync::OnceCell;
use rustfs_notify::Event;
use rustfs_notify::EventNotifierConfig;
use std::sync::{Arc, Mutex};
use tracing::{debug, error, info};
/// Global event system
pub struct EventSystem {
/// Event Notifier
notifier: Mutex<Option<EventNotifier>>,
}
impl EventSystem {
/// Create a new event system
pub fn new() -> Self {
Self {
notifier: Mutex::new(None),
}
}
/// Initialize the event system
pub async fn init(&self, store: Arc<ECStore>) -> Result<EventNotifierConfig> {
info!("Initialize the event system");
let notifier = EventNotifier::new(store).await?;
let config = notifier.config().clone();
let mut guard = self
.notifier
.lock()
.map_err(|e| common::error::Error::msg(format!("Failed to acquire locks:{}", e)))?;
*guard = Some(notifier);
debug!("The event system initialization is complete");
Ok(config)
}
/// Send events
pub async fn send_event(&self, event: Event) -> Result<()> {
let guard = self
.notifier
.lock()
.map_err(|e| common::error::Error::msg(format!("Failed to acquire locks:{}", e)))?;
if let Some(notifier) = &*guard {
notifier.send(event).await
} else {
error!("The event system is not initialized");
Err(common::error::Error::msg("The event system is not initialized"))
}
}
/// Shut down the event system
pub async fn shutdown(&self) -> Result<()> {
info!("Shut down the event system");
let mut guard = self
.notifier
.lock()
.map_err(|e| common::error::Error::msg(format!("Failed to acquire locks:{}", e)))?;
if let Some(ref mut notifier) = *guard {
notifier.shutdown().await?;
*guard = None;
info!("The event system is down");
Ok(())
} else {
debug!("The event system has been shut down");
Ok(())
}
}
}
/// A global event system instance
pub static GLOBAL_EVENT_SYS: OnceCell<EventSystem> = OnceCell::new();
/// Initialize the global event system
pub fn init_global_event_system() -> &'static EventSystem {
GLOBAL_EVENT_SYS.get_or_init(EventSystem::new)
}

View File

@@ -1,49 +0,0 @@
use async_trait::async_trait;
use rustfs_notify::store::{Key, Store, StoreError, StoreResult};
use serde::{de::DeserializeOwned, Serialize};
use std::sync::Arc;
pub mod mqtt;
pub mod webhook;
pub const STORE_PREFIX: &str = "rustfs";
// Target 公共 trait对应 Go 的 Target 接口
#[async_trait]
pub trait Target: Send + Sync {
fn name(&self) -> String;
async fn send_from_store(&self, key: Key) -> StoreResult<()>;
async fn is_active(&self) -> StoreResult<bool>;
async fn close(&self) -> StoreResult<()>;
}
// TargetID 结构体,用于唯一标识目标
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct TargetID {
pub id: String,
pub name: String,
}
impl TargetID {
pub fn new(id: &str, name: &str) -> Self {
Self {
id: id.to_owned(),
name: name.to_owned(),
}
}
}
impl ToString for TargetID {
fn to_string(&self) -> String {
format!("{}:{}", self.name, self.id)
}
}
// TargetStore 接口
pub trait TargetStore {
fn store<T>(&self) -> Option<Arc<dyn Store<T>>>
where
T: Serialize + DeserializeOwned + Send + Sync + 'static;
}
pub type Logger = fn(ctx: Option<&str>, err: StoreError, id: &str, err_kind: &[&dyn std::fmt::Display]);

View File

@@ -1,426 +0,0 @@
use super::{Logger, Target, TargetID, TargetStore, STORE_PREFIX};
use async_trait::async_trait;
use once_cell::sync::OnceCell;
use rumqttc::{AsyncClient, ConnectionError, Event as MqttEvent, MqttOptions, QoS, Transport};
use rustfs_config::notify::mqtt::MQTTArgs;
use rustfs_notify::store;
use rustfs_notify::{
store::{Key, Store, StoreError, StoreResult},
Event, QueueStore,
};
use serde::{de::DeserializeOwned, Serialize};
use serde_json::json;
use std::{path::PathBuf, sync::Arc, time::Duration};
use tokio::{
sync::{mpsc, Mutex},
task::JoinHandle,
};
use url::Url;
pub struct MQTTTarget {
init: OnceCell<()>,
id: TargetID,
args: MQTTArgs,
client: Option<Arc<Mutex<AsyncClient>>>,
eventloop_handle: Option<JoinHandle<()>>,
store: Option<Arc<dyn Store<Event>>>,
logger: Logger,
cancel_tx: mpsc::Sender<()>,
connection_status: Arc<Mutex<bool>>,
}
impl MQTTTarget {
pub async fn new(id: &str, args: MQTTArgs, logger: Logger) -> Result<Self, StoreError> {
// 创建取消通道
let (cancel_tx, mut cancel_rx) = mpsc::channel(1);
let connection_status = Arc::new(Mutex::new(false));
// 创建队列存储(如果配置了)
let mut store = None;
if !args.queue_dir.is_empty() {
if args.qos == 0 {
return Err(StoreError::Other("QoS should be set to 1 or 2 if queueDir is set".to_string()));
}
let queue_dir = PathBuf::from(&args.queue_dir).join(format!("{}-mqtt-{}", STORE_PREFIX, id));
let queue_store = Arc::new(QueueStore::<Event>::new(queue_dir, args.queue_limit, Some(".event")));
queue_store.open().await?;
store = Some(queue_store.clone() as Arc<dyn Store<Event>>);
// 设置事件流
let status_clone = connection_status.clone();
let logger_clone = logger;
let target_store = queue_store;
let args_clone = args.clone();
let id_clone = id.to_string();
let cancel_tx_clone = cancel_tx.clone();
tokio::spawn(async move {
let target = Arc::new(MQTTTargetWrapper {
id: TargetID::new(&id_clone, "mqtt"),
args: args_clone,
client: None,
logger: logger_clone,
cancel_tx: cancel_tx_clone,
connection_status: status_clone,
});
store::stream_items(target_store, target, cancel_rx, logger_clone).await;
});
}
Ok(Self {
init: OnceCell::new(),
id: TargetID::new(id, "mqtt"),
args,
client: None,
eventloop_handle: None,
store,
logger,
cancel_tx,
connection_status,
})
}
async fn initialize(&self) -> StoreResult<()> {
if self.init.get().is_some() {
return Ok(());
}
// 解析 MQTT broker 地址
let broker_url = Url::parse(&self.args.broker).map_err(|e| StoreError::Other(format!("Invalid broker URL: {}", e)))?;
let host = broker_url
.host_str()
.ok_or_else(|| StoreError::Other("Missing host in broker URL".into()))?
.to_string();
let port = broker_url.port().unwrap_or_else(|| {
match broker_url.scheme() {
"mqtt" => 1883,
"mqtts" | "ssl" | "tls" => 8883,
"ws" => 80,
"wss" => 443,
_ => 1883, // 默认
}
});
// 创建客户端 ID
let client_id = format!(
"{:x}",
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map_err(|e| StoreError::Other(e.to_string()))?
.as_nanos()
);
// 创建 MQTT 选项
let mut mqtt_options = MqttOptions::new(client_id, host, port);
mqtt_options.set_clean_session(true);
mqtt_options.set_keep_alive(self.args.keep_alive);
mqtt_options.set_max_packet_size(100 * 1024); // 100KB
// 设置重连间隔
mqtt_options.set_connection_timeout(self.args.keep_alive.as_secs() as u16);
mqtt_options.set_max_reconnect_retry(10); // 最大重试次数
mqtt_options.set_retry_interval(Duration::from_millis(100));
// 如果设置了用户名和密码
if !self.args.username.is_empty() {
mqtt_options.set_credentials(&self.args.username, &self.args.password);
}
// TLS 配置
if self.args.root_cas.is_some()
|| broker_url.scheme() == "mqtts"
|| broker_url.scheme() == "ssl"
|| broker_url.scheme() == "tls"
|| broker_url.scheme() == "wss"
{
let mut transport = if broker_url.scheme() == "ws" || broker_url.scheme() == "wss" {
let path = broker_url.path();
Transport::Ws {
path: if path == "/" { "/mqtt".to_string() } else { path.to_string() },
}
} else {
Transport::Tls
};
// 如果提供了根证书
if let Some(root_cas) = &self.args.root_cas {
if let Transport::Tls = transport {
transport = Transport::Tls;
}
// 在实际实现中,这里需要设置 TLS 证书
// 由于 rumqttc 的接口可能会随版本变化,请参考最新的文档
}
mqtt_options.set_transport(transport);
} else if broker_url.scheme() == "ws" {
let path = broker_url.path();
mqtt_options.set_transport(Transport::Ws {
path: if path == "/" { "/mqtt".to_string() } else { path.to_string() },
});
}
// 创建 MQTT 客户端
let (client, mut eventloop) = AsyncClient::new(mqtt_options, 10);
let client = Arc::new(Mutex::new(client));
// 克隆引用用于事件循环
let connection_status = self.connection_status.clone();
let client_clone = client.clone();
let logger = self.logger;
let target_id = self.id.to_string();
// 启动事件循环
let eventloop_handle = tokio::spawn(async move {
loop {
match eventloop.poll().await {
Ok(event) => match event {
MqttEvent::Incoming(incoming) => match incoming {
rumqttc::Packet::ConnAck(connack) => {
if connack.code == rumqttc::ConnectReturnCode::Success {
*connection_status.lock().await = true;
} else {
logger(
None,
StoreError::Other(format!("MQTT connection failed: {:?}", connack.code)),
&target_id,
&[],
);
*connection_status.lock().await = false;
}
}
_ => {}
},
MqttEvent::Outgoing(_) => {}
},
Err(ConnectionError::ConnectionRefused(_)) => {
*connection_status.lock().await = false;
logger(None, StoreError::NotConnected, &target_id, &["MQTT connection refused"]);
tokio::time::sleep(Duration::from_secs(5)).await;
}
Err(e) => {
*connection_status.lock().await = false;
logger(None, StoreError::Other(format!("MQTT error: {}", e)), &target_id, &[]);
tokio::time::sleep(Duration::from_secs(5)).await;
}
}
}
});
// 更新目标状态
self.client = Some(client_clone);
self.eventloop_handle = Some(eventloop_handle);
// 等待连接建立
for _ in 0..5 {
if *self.connection_status.lock().await {
self.init
.set(())
.map_err(|_| StoreError::Other("Failed to initialize MQTT target".into()))?;
return Ok(());
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
Err(StoreError::NotConnected)
}
async fn send(&self, event_data: &Event) -> StoreResult<()> {
let client = match &self.client {
Some(client) => client,
None => return Err(StoreError::NotConnected),
};
if !*self.connection_status.lock().await {
return Err(StoreError::NotConnected);
}
// 构建消息内容
let object_key = urlencoding::decode(&event_data.s3.object.key)
.map_err(|e| StoreError::Other(format!("Failed to decode object key: {}", e)))?;
let key = format!("{}/{}", event_data.s3.bucket.name, object_key);
let log_data = json!({
"EventName": event_data.event_name,
"Key": key,
"Records": [event_data]
});
let payload = serde_json::to_string(&log_data).map_err(|e| StoreError::SerdeError(e))?;
// 确定 QoS 级别
let qos = match self.args.qos {
0 => QoS::AtMostOnce,
1 => QoS::AtLeastOnce,
2 => QoS::ExactlyOnce,
_ => QoS::AtMostOnce, // 默认
};
// 发布消息
let mut client_guard = client.lock().await;
client_guard
.publish(&self.args.topic, qos, false, payload)
.await
.map_err(|e| {
if matches!(e, rumqttc::ClientError::ConnectionLost(_)) {
StoreError::NotConnected
} else {
StoreError::Other(format!("MQTT publish error: {}", e))
}
})?;
Ok(())
}
}
// MQTT 目标包装器,用于流事件
struct MQTTTargetWrapper {
id: TargetID,
args: MQTTArgs,
client: Option<Arc<Mutex<AsyncClient>>>,
logger: Logger,
cancel_tx: mpsc::Sender<()>,
connection_status: Arc<Mutex<bool>>,
}
#[async_trait]
impl Target for MQTTTargetWrapper {
fn name(&self) -> String {
self.id.to_string()
}
async fn send_from_store(&self, _key: Key) -> StoreResult<()> {
// 这个方法在实际 MQTTTarget 中实现
Ok(())
}
async fn is_active(&self) -> StoreResult<bool> {
Ok(*self.connection_status.lock().await)
}
async fn close(&self) -> StoreResult<()> {
// 发送取消信号
let _ = self.cancel_tx.send(()).await;
Ok(())
}
}
#[async_trait]
impl Target for MQTTTarget {
fn name(&self) -> String {
self.id.to_string()
}
async fn send_from_store(&self, key: Key) -> StoreResult<()> {
self.initialize().await?;
// 如果没有连接,返回错误
if !*self.connection_status.lock().await {
return Err(StoreError::NotConnected);
}
// 如果有存储,获取事件并发送
if let Some(store) = &self.store {
match store.get(key.clone()).await {
Ok(event_data) => {
match self.send(&event_data).await {
Ok(_) => {
// 成功发送后删除事件
return store.del(key).await.map(|_| ());
}
Err(e) => {
(self.logger)(None, e.clone(), &self.id.to_string(), &["Failed to send event"]);
return Err(e);
}
}
}
Err(e) => {
// 如果文件不存在,忽略错误(可能已被处理)
if let StoreError::IOError(ref io_err) = e {
if io_err.kind() == std::io::ErrorKind::NotFound {
return Ok(());
}
}
return Err(e);
}
}
}
Ok(())
}
async fn is_active(&self) -> StoreResult<bool> {
if self.init.get().is_none() {
return Ok(false);
}
Ok(*self.connection_status.lock().await)
}
async fn close(&self) -> StoreResult<()> {
// 发送取消信号
let _ = self.cancel_tx.send(()).await;
// 取消事件循环
if let Some(handle) = &self.eventloop_handle {
handle.abort();
}
// 断开 MQTT 连接
if let Some(client) = &self.client {
if let Ok(mut client) = client.try_lock() {
// 尝试断开连接(忽略错误)
let _ = client.disconnect().await;
}
}
Ok(())
}
}
impl TargetStore for MQTTTarget {
fn store<T>(&self) -> Option<Arc<dyn Store<T>>>
where
T: Serialize + DeserializeOwned + Send + Sync + 'static,
{
if let Some(store) = &self.store {
// 类型检查确保 T 是 Event 类型
if std::any::TypeId::of::<T>() == std::any::TypeId::of::<Event>() {
// 安全:我们已经检查类型 ID 匹配
let store_ptr = Arc::as_ptr(store);
let store_t = unsafe { Arc::from_raw(store_ptr as *const dyn Store<T>) };
// 增加引用计数,避免释放原始指针
std::mem::forget(store_t.clone());
return Some(store_t);
}
}
None
}
}
impl MQTTTarget {
pub async fn save(&self, event_data: Event) -> StoreResult<()> {
// 如果配置了存储,则存储事件
if let Some(store) = &self.store {
return store.put(event_data).await.map(|_| ());
}
// 否则,初始化并直接发送
self.initialize().await?;
// 检查连接
if !*self.connection_status.lock().await {
return Err(StoreError::NotConnected);
}
self.send(&event_data).await
}
pub fn id(&self) -> &TargetID {
&self.id
}
}

View File

@@ -1,328 +0,0 @@
use super::{Logger, Target, TargetID, TargetStore, STORE_PREFIX};
use async_trait::async_trait;
use once_cell::sync::OnceCell;
use reqwest::{header, Client, StatusCode};
use rustfs_config::notify::webhook::WebhookArgs;
use rustfs_notify::{
store::{self, Key, Store, StoreError, StoreResult},
Event,
};
use serde::de::DeserializeOwned;
use serde::Serialize;
use serde_json::json;
use std::{path::PathBuf, sync::Arc, time::Duration};
use tokio::{net::TcpStream, sync::mpsc};
use url::Url;
pub struct WebhookTarget {
init: OnceCell<()>,
id: TargetID,
args: WebhookArgs,
client: Client,
store: Option<Arc<dyn Store<Event>>>,
logger: Logger,
cancel_tx: mpsc::Sender<()>,
addr: String, // 完整地址,包含 IP/DNS 和端口号
}
impl WebhookTarget {
pub async fn new(id: &str, args: WebhookArgs, logger: Logger) -> Result<Self, StoreError> {
// 创建取消通道
let (cancel_tx, cancel_rx) = mpsc::channel(1);
// 配置客户端
let mut client_builder = Client::builder().timeout(Duration::from_secs(10));
// 添加客户端证书如果配置了
if !args.client_cert.is_empty() && !args.client_key.is_empty() {
let cert =
std::fs::read(&args.client_cert).map_err(|e| StoreError::Other(format!("Failed to read client cert: {}", e)))?;
let key =
std::fs::read(&args.client_key).map_err(|e| StoreError::Other(format!("Failed to read client key: {}", e)))?;
let identity = reqwest::Identity::from_pem(&[cert, key].concat())
.map_err(|e| StoreError::Other(format!("Failed to create identity: {}", e)))?;
client_builder = client_builder.identity(identity);
}
let client = client_builder
.build()
.map_err(|e| StoreError::Other(format!("Failed to create HTTP client: {}", e)))?;
// 计算目标地址
let endpoint = Url::parse(&args.endpoint).map_err(|e| StoreError::Other(format!("Invalid URL: {}", e)))?;
let mut addr = endpoint
.host_str()
.ok_or_else(|| StoreError::Other("Missing host in endpoint".into()))?
.to_string();
// 如果没有端口,根据协议添加默认端口
if endpoint.port().is_none() {
match endpoint.scheme() {
"http" => addr.push_str(":80"),
"https" => addr.push_str(":443"),
_ => return Err(StoreError::Other("Unsupported scheme".into())),
}
} else if let Some(port) = endpoint.port() {
addr = format!("{}:{}", addr, port);
}
// 创建队列存储(如果配置了)
let mut store = None;
if !args.queue_dir.is_empty() {
let queue_dir = PathBuf::from(&args.queue_dir).join(format!("{}-webhook-{}", STORE_PREFIX, id));
let queue_store = Arc::new(store::queue::QueueStore::<Event>::new(queue_dir, args.queue_limit, Some(".event")));
queue_store.open().await?;
store = Some(queue_store.clone() as Arc<dyn Store<Event>>);
// 设置事件流
let target_store = Arc::new(queue_store);
let target = Arc::new(WebhookTargetWrapper::new(
id,
args.clone(),
client.clone(),
addr.clone(),
logger,
cancel_tx.clone(),
));
tokio::spawn(async move {
store::stream_items(target_store.clone(), target.clone(), cancel_rx, logger).await;
});
}
Ok(Self {
init: OnceCell::new(),
id: TargetID::new(id, "webhook"),
args,
client,
store,
logger,
cancel_tx,
addr,
})
}
async fn initialize(&self) -> StoreResult<()> {
if self.init.get().is_some() {
return Ok(());
}
let is_active = self.is_active().await?;
if !is_active {
return Err(StoreError::NotConnected);
}
self.init
.set(())
.map_err(|_| StoreError::Other("Failed to initialize".into()))?;
Ok(())
}
async fn send(&self, event_data: &Event) -> StoreResult<()> {
// 构建请求数据
let object_key = match urlencoding::decode(&event_data.s3.object.key) {
Ok(key) => key.to_string(),
Err(e) => return Err(StoreError::Other(format!("Failed to decode object key: {}", e))),
};
let key = format!("{}/{}", event_data.s3.bucket.name, object_key);
let log_data = json!({
"EventName": event_data.event_name,
"Key": key,
"Records": [event_data]
});
// 创建请求
let mut request_builder = self
.client
.post(&self.args.endpoint)
.header(header::CONTENT_TYPE, "application/json");
// 添加认证头
if !self.args.auth_token.is_empty() {
let tokens: Vec<&str> = self.args.auth_token.split_whitespace().collect();
match tokens.len() {
2 => request_builder = request_builder.header(header::AUTHORIZATION, &self.args.auth_token),
1 => request_builder = request_builder.header(header::AUTHORIZATION, format!("Bearer {}", &self.args.auth_token)),
_ => {}
}
}
// 发送请求
let response = request_builder.json(&log_data).send().await.map_err(|e| {
if e.is_timeout() || e.is_connect() {
StoreError::NotConnected
} else {
StoreError::Other(format!("Request failed: {}", e))
}
})?;
// 检查响应状态
let status = response.status();
if status.is_success() {
Ok(())
} else if status == StatusCode::FORBIDDEN {
Err(StoreError::Other(format!(
"{} returned '{}', please check if your auth token is correctly set",
self.args.endpoint, status
)))
} else {
Err(StoreError::Other(format!(
"{} returned '{}', please check your endpoint configuration",
self.args.endpoint, status
)))
}
}
}
struct WebhookTargetWrapper {
id: TargetID,
args: WebhookArgs,
client: Client,
addr: String,
logger: Logger,
cancel_tx: mpsc::Sender<()>,
}
impl WebhookTargetWrapper {
fn new(id: &str, args: WebhookArgs, client: Client, addr: String, logger: Logger, cancel_tx: mpsc::Sender<()>) -> Self {
Self {
id: TargetID::new(id, "webhook"),
args,
client,
addr,
logger,
cancel_tx,
}
}
}
#[async_trait]
impl Target for WebhookTargetWrapper {
fn name(&self) -> String {
self.id.to_string()
}
async fn send_from_store(&self, key: Key) -> StoreResult<()> {
// 这个方法在 Target trait 实现中需要,但我们不会直接使用它
// 实际上,它将由上面创建的 WebhookTarget 的 SendFromStore 方法处理
Ok(())
}
async fn is_active(&self) -> StoreResult<bool> {
// 尝试连接到目标地址
match tokio::time::timeout(Duration::from_secs(5), TcpStream::connect(&self.addr)).await {
Ok(Ok(_)) => Ok(true),
Ok(Err(e)) => {
if e.kind() == std::io::ErrorKind::ConnectionRefused
|| e.kind() == std::io::ErrorKind::ConnectionAborted
|| e.kind() == std::io::ErrorKind::ConnectionReset
{
Err(StoreError::NotConnected)
} else {
Err(StoreError::Other(format!("Connection error: {}", e)))
}
}
Err(_) => Err(StoreError::NotConnected),
}
}
async fn close(&self) -> StoreResult<()> {
// 发送取消信号
let _ = self.cancel_tx.send(()).await;
Ok(())
}
}
#[async_trait]
impl Target for WebhookTarget {
fn name(&self) -> String {
self.id.to_string()
}
async fn send_from_store(&self, key: Key) -> StoreResult<()> {
self.initialize().await?;
// 如果有存储,获取事件并发送
if let Some(store) = &self.store {
match store.get(key.clone()).await {
Ok(event_data) => match self.send(&event_data).await {
Ok(_) => store.del(key).await?,
Err(e) => {
if matches!(e, StoreError::NotConnected) {
return Err(StoreError::NotConnected);
}
return Err(e);
}
},
Err(e) => {
// 如果键不存在,可能已经被发送,忽略错误
if let StoreError::IoError(io_err) = &e {
if io_err.kind() == std::io::ErrorKind::NotFound {
return Ok(());
}
}
return Err(e);
}
}
}
Ok(())
}
async fn is_active(&self) -> StoreResult<bool> {
// 尝试连接到目标地址
match tokio::time::timeout(Duration::from_secs(5), TcpStream::connect(&self.addr)).await {
Ok(Ok(_)) => Ok(true),
Ok(Err(_)) => Err(StoreError::NotConnected),
Err(_) => Err(StoreError::NotConnected),
}
}
async fn close(&self) -> StoreResult<()> {
// 发送取消信号
let _ = self.cancel_tx.send(()).await;
Ok(())
}
}
impl TargetStore for WebhookTarget {
fn store<T>(&self) -> Option<Arc<dyn Store<T>>>
where
T: Serialize + DeserializeOwned + Send + Sync + 'static,
{
if let Some(store) = &self.store {
// 注意:这里假设 T 是 Event 类型,需要类型转换(如果不是,将返回 None
if std::any::TypeId::of::<T>() == std::any::TypeId::of::<Event>() {
// 安全:因为我们检查了类型 ID
let store_ptr = Arc::as_ptr(store);
let store_t = unsafe { Arc::from_raw(store_ptr as *const dyn Store<T>) };
// 增加引用计数,避免释放原始指针
std::mem::forget(store_t.clone());
return Some(store_t);
}
}
None
}
}
impl WebhookTarget {
pub async fn save(&self, event_data: Event) -> StoreResult<()> {
// 如果配置了存储,则存储事件
if let Some(store) = &self.store {
return store.put(event_data).await.map(|_| ());
}
// 否则,初始化并直接发送
self.initialize().await?;
self.send(&event_data).await
}
pub fn id(&self) -> &TargetID {
&self.id
}
}

View File

@@ -6,36 +6,34 @@ repository.workspace = true
rust-version.workspace = true
version.workspace = true
[features]
default = ["webhook"]
webhook = ["dep:reqwest"]
mqtt = ["rumqttc"]
[dependencies]
rustfs-config = { workspace = true, features = ["constants", "notify"] }
async-trait = { workspace = true }
common = { workspace = true }
chrono = { workspace = true, features = ["serde"] }
const-str = { workspace = true }
ecstore = { workspace = true }
libc = { workspace = true }
once_cell = { workspace = true }
reqwest = { workspace = true, optional = true }
rumqttc = { workspace = true, optional = true }
quick-xml = { workspace = true, features = ["serialize", "async-tokio"] }
reqwest = { workspace = true }
rumqttc = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_with = { workspace = true }
smallvec = { workspace = true, features = ["serde"] }
strum = { workspace = true, features = ["derive"] }
tracing = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["sync", "net", "macros", "signal", "rt-multi-thread"] }
tokio-util = { workspace = true }
uuid = { workspace = true, features = ["v4", "serde"] }
snap = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["rt-multi-thread", "sync", "time"] }
tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
uuid = { workspace = true, features = ["v4", "serde"] }
url = { workspace = true }
urlencoding = { workspace = true }
wildmatch = { workspace = true, features = ["serde"] }
[dev-dependencies]
tokio = { workspace = true, features = ["test-util"] }
tracing-subscriber = { workspace = true }
mockito = "1.7"
reqwest = { workspace = true, default-features = false, features = ["rustls-tls", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
axum = { workspace = true }
dotenvy = { workspace = true }
[lints]
workspace = true

View File

@@ -1,28 +0,0 @@
## ===== global configuration =====
#NOTIFIER__STORE_PATH=/var/log/event-notification
#NOTIFIER__CHANNEL_CAPACITY=5000
#
## ===== adapter configuration array format =====
## webhook adapter index 0
#NOTIFIER__ADAPTERS_0__type=Webhook
#NOTIFIER__ADAPTERS_0__endpoint=http://127.0.0.1:3020/webhook
#NOTIFIER__ADAPTERS_0__auth_token=your-auth-token
#NOTIFIER__ADAPTERS_0__max_retries=3
#NOTIFIER__ADAPTERS_0__timeout=50
#NOTIFIER__ADAPTERS_0__custom_headers__x_custom_server=server-value
#NOTIFIER__ADAPTERS_0__custom_headers__x_custom_client=client-value
#
## kafka adapter index 1
#NOTIFIER__ADAPTERS_1__type=Kafka
#NOTIFIER__ADAPTERS_1__brokers=localhost:9092
#NOTIFIER__ADAPTERS_1__topic=notifications
#NOTIFIER__ADAPTERS_1__max_retries=3
#NOTIFIER__ADAPTERS_1__timeout=60
#
## mqtt adapter index 2
#NOTIFIER__ADAPTERS_2__type=Mqtt
#NOTIFIER__ADAPTERS_2__broker=mqtt.example.com
#NOTIFIER__ADAPTERS_2__port=1883
#NOTIFIER__ADAPTERS_2__client_id=event-notifier
#NOTIFIER__ADAPTERS_2__topic=events
#NOTIFIER__ADAPTERS_2__max_retries=3

View File

@@ -1,28 +0,0 @@
## ===== 全局配置 =====
#NOTIFIER__STORE_PATH=/var/log/event-notification
#NOTIFIER__CHANNEL_CAPACITY=5000
#
## ===== 适配器配置(数组格式) =====
## Webhook 适配器(索引 0
#NOTIFIER__ADAPTERS_0__type=Webhook
#NOTIFIER__ADAPTERS_0__endpoint=http://127.0.0.1:3020/webhook
#NOTIFIER__ADAPTERS_0__auth_token=your-auth-token
#NOTIFIER__ADAPTERS_0__max_retries=3
#NOTIFIER__ADAPTERS_0__timeout=50
#NOTIFIER__ADAPTERS_0__custom_headers__x_custom_server=value
#NOTIFIER__ADAPTERS_0__custom_headers__x_custom_client=value
#
## Kafka 适配器(索引 1
#NOTIFIER__ADAPTERS_1__type=Kafka
#NOTIFIER__ADAPTERS_1__brokers=localhost:9092
#NOTIFIER__ADAPTERS_1__topic=notifications
#NOTIFIER__ADAPTERS_1__max_retries=3
#NOTIFIER__ADAPTERS_1__timeout=60
#
## MQTT 适配器(索引 2
#NOTIFIER__ADAPTERS_2__type=Mqtt
#NOTIFIER__ADAPTERS_2__broker=mqtt.example.com
#NOTIFIER__ADAPTERS_2__port=1883
#NOTIFIER__ADAPTERS_2__client_id=event-notifier
#NOTIFIER__ADAPTERS_2__topic=events
#NOTIFIER__ADAPTERS_2__max_retries=3

View File

@@ -1,29 +0,0 @@
# config.toml
store_path = "/var/log/event-notifier"
channel_capacity = 5000
[[adapters]]
type = "Webhook"
endpoint = "http://127.0.0.1:3020/webhook"
auth_token = "your-auth-token"
max_retries = 3
timeout = 50
[adapters.custom_headers]
custom_server = "value_server"
custom_client = "value_client"
[[adapters]]
type = "Kafka"
brokers = "localhost:9092"
topic = "notifications"
max_retries = 3
timeout = 60
[[adapters]]
type = "Mqtt"
broker = "mqtt.example.com"
port = 1883
client_id = "event-notifier"
topic = "events"
max_retries = 3

View File

@@ -0,0 +1,109 @@
use notify::arn::TargetID;
use notify::global::notification_system;
use notify::{
init_logger, BucketNotificationConfig, Event, EventName, LogLevel, NotificationError,
};
use std::time::Duration;
use tracing::info;
#[tokio::main]
async fn main() -> Result<(), NotificationError> {
init_logger(LogLevel::Debug);
let system = notification_system();
// --- 初始配置 (Webhook 和 MQTT) ---
let mut config = notify::Config::new();
// Webhook target configuration
let mut webhook_kvs = notify::KVS::new();
webhook_kvs.set("enable", "on");
webhook_kvs.set("endpoint", "http://127.0.0.1:3020/webhook");
webhook_kvs.set("auth_token", "secret-token");
// webhook_kvs.set("queue_dir", "/tmp/data/webhook");
webhook_kvs.set(
"queue_dir",
"/Users/qun/Documents/rust/rustfs/notify/logs/webhook",
);
webhook_kvs.set("queue_limit", "10000");
let mut webhook_targets = std::collections::HashMap::new();
webhook_targets.insert("1".to_string(), webhook_kvs);
config.insert("notify_webhook".to_string(), webhook_targets);
// MQTT target configuration
let mut mqtt_kvs = notify::KVS::new();
mqtt_kvs.set("enable", "on");
mqtt_kvs.set("broker", "mqtt://localhost:1883");
mqtt_kvs.set("topic", "rustfs/events");
mqtt_kvs.set("qos", "1"); // AtLeastOnce
mqtt_kvs.set("username", "test");
mqtt_kvs.set("password", "123456");
// webhook_kvs.set("queue_dir", "/tmp/data/mqtt");
mqtt_kvs.set(
"queue_dir",
"/Users/qun/Documents/rust/rustfs/notify/logs/mqtt",
);
mqtt_kvs.set("queue_limit", "10000");
let mut mqtt_targets = std::collections::HashMap::new();
mqtt_targets.insert("1".to_string(), mqtt_kvs);
config.insert("notify_mqtt".to_string(), mqtt_targets);
// 加载配置并初始化系统
*system.config.write().await = config;
system.init().await?;
info!("✅ System initialized with Webhook and MQTT targets.");
// --- 1. 查询当前活动的 Target ---
let active_targets = system.get_active_targets().await;
info!("\n---> Currently active targets: {:?}", active_targets);
assert_eq!(active_targets.len(), 2);
tokio::time::sleep(Duration::from_secs(1)).await;
// --- 2. 精确删除一个 Target (例如 MQTT) ---
info!("\n---> Removing MQTT target...");
let mqtt_target_id = TargetID::new("1".to_string(), "mqtt".to_string());
system.remove_target(&mqtt_target_id, "notify_mqtt").await?;
info!("✅ MQTT target removed.");
// --- 3. 再次查询活动的 Target ---
let active_targets_after_removal = system.get_active_targets().await;
info!(
"\n---> Active targets after removal: {:?}",
active_targets_after_removal
);
assert_eq!(active_targets_after_removal.len(), 1);
assert_eq!(active_targets_after_removal[0].id, "1".to_string());
// --- 4. 发送事件进行验证 ---
// 配置一个规则,指向 Webhook 和已删除的 MQTT
let mut bucket_config = BucketNotificationConfig::new("us-east-1");
bucket_config.add_rule(
&[EventName::ObjectCreatedPut],
"*".to_string(),
TargetID::new("1".to_string(), "webhook".to_string()),
);
bucket_config.add_rule(
&[EventName::ObjectCreatedPut],
"*".to_string(),
TargetID::new("1".to_string(), "mqtt".to_string()), // 这个规则会匹配,但找不到 Target
);
system
.load_bucket_notification_config("my-bucket", &bucket_config)
.await?;
info!("\n---> Sending an event...");
let event = Event::new_test_event("my-bucket", "document.pdf", EventName::ObjectCreatedPut);
system
.send_event("my-bucket", "s3:ObjectCreated:Put", "document.pdf", event)
.await;
info!(
"✅ Event sent. Only the Webhook target should receive it. Check logs for warnings about the missing MQTT target."
);
tokio::time::sleep(Duration::from_secs(2)).await;
info!("\nDemo completed successfully");
Ok(())
}

View File

@@ -0,0 +1,100 @@
use notify::arn::TargetID;
use notify::global::notification_system;
// 1. 使用全局访问器
use notify::{
init_logger, BucketNotificationConfig, Event, EventName, LogLevel, NotificationError, KVS,
};
use std::time::Duration;
use tracing::info;
#[tokio::main]
async fn main() -> Result<(), NotificationError> {
init_logger(LogLevel::Debug);
// 获取全局 NotificationSystem 实例
let system = notification_system();
// --- 初始配置 ---
let mut config = notify::Config::new();
// Webhook target
let mut webhook_kvs = KVS::new();
webhook_kvs.set("enable", "on");
webhook_kvs.set("endpoint", "http://127.0.0.1:3020/webhook");
// webhook_kvs.set("queue_dir", "./logs/webhook");
webhook_kvs.set(
"queue_dir",
"/Users/qun/Documents/rust/rustfs/notify/logs/webhook",
);
let mut webhook_targets = std::collections::HashMap::new();
webhook_targets.insert("1".to_string(), webhook_kvs);
config.insert("notify_webhook".to_string(), webhook_targets);
// 加载初始配置并初始化系统
*system.config.write().await = config;
system.init().await?;
info!("✅ System initialized with Webhook target.");
tokio::time::sleep(Duration::from_secs(1)).await;
// --- 2. 动态更新系统配置:添加一个 MQTT Target ---
info!("\n---> Dynamically adding MQTT target...");
let mut mqtt_kvs = KVS::new();
mqtt_kvs.set("enable", "on");
mqtt_kvs.set("broker", "mqtt://localhost:1883");
mqtt_kvs.set("topic", "rustfs/events");
mqtt_kvs.set("qos", "1");
mqtt_kvs.set("username", "test");
mqtt_kvs.set("password", "123456");
mqtt_kvs.set("queue_limit", "10000");
// mqtt_kvs.set("queue_dir", "./logs/mqtt");
mqtt_kvs.set(
"queue_dir",
"/Users/qun/Documents/rust/rustfs/notify/logs/mqtt",
);
system
.set_target_config("notify_mqtt", "1", mqtt_kvs)
.await?;
info!("✅ MQTT target added and system reloaded.");
tokio::time::sleep(Duration::from_secs(1)).await;
// --- 3. 加载和管理 Bucket 配置 ---
info!("\n---> Loading bucket notification config...");
let mut bucket_config = BucketNotificationConfig::new("us-east-1");
bucket_config.add_rule(
&[EventName::ObjectCreatedPut],
"*".to_string(),
TargetID::new("1".to_string(), "webhook".to_string()),
);
bucket_config.add_rule(
&[EventName::ObjectCreatedPut],
"*".to_string(),
TargetID::new("1".to_string(), "mqtt".to_string()),
);
system
.load_bucket_notification_config("my-bucket", &bucket_config)
.await?;
info!("✅ Bucket 'my-bucket' config loaded.");
// --- 发送事件 ---
info!("\n---> Sending an event...");
let event = Event::new_test_event("my-bucket", "document.pdf", EventName::ObjectCreatedPut);
system
.send_event("my-bucket", "s3:ObjectCreated:Put", "document.pdf", event)
.await;
info!("✅ Event sent. Both Webhook and MQTT targets should receive it.");
tokio::time::sleep(Duration::from_secs(2)).await;
// --- 动态移除配置 ---
info!("\n---> Dynamically removing Webhook target...");
system.remove_target_config("notify_webhook", "1").await?;
info!("✅ Webhook target removed and system reloaded.");
info!("\n---> Removing bucket notification config...");
system.remove_bucket_notification_config("my-bucket").await;
info!("✅ Bucket 'my-bucket' config removed.");
info!("\nDemo completed successfully");
Ok(())
}

View File

@@ -1,17 +1,53 @@
use axum::routing::get;
use axum::{extract::Json, http::StatusCode, routing::post, Router};
use axum::{
extract::Json,
http::{HeaderMap, Response, StatusCode},
routing::post,
Router,
};
use serde_json::Value;
use std::time::{SystemTime, UNIX_EPOCH};
use axum::extract::Query;
use serde::Deserialize;
#[derive(Deserialize)]
struct ResetParams {
reason: Option<String>,
}
// 定义一个全局变量 统计接受到数据条数
use std::sync::atomic::{AtomicU64, Ordering};
static WEBHOOK_COUNT: AtomicU64 = AtomicU64::new(0);
#[tokio::main]
async fn main() {
// 构建应用
let app = Router::new()
.route("/webhook", post(receive_webhook))
.route(
"/webhook/reset/{reason}",
get(reset_webhook_count_with_path),
)
.route("/webhook/reset", get(reset_webhook_count))
.route("/webhook", get(receive_webhook));
// 启动服务器
let listener = tokio::net::TcpListener::bind("0.0.0.0:3020").await.unwrap();
println!("Server running on http://0.0.0.0:3020");
let addr = "0.0.0.0:3020";
let listener = tokio::net::TcpListener::bind(addr).await.unwrap();
println!("Server running on {}", addr);
// 服务启动后进行自检
tokio::spawn(async move {
// 给服务器一点时间启动
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
match is_service_active(addr).await {
Ok(true) => println!("服务健康检查:成功 - 服务正常运行"),
Ok(false) => eprintln!("服务健康检查:失败 - 服务未响应"),
Err(e) => eprintln!("服务健康检查错误:{}", e),
}
});
// 创建关闭信号处理
tokio::select! {
@@ -26,9 +62,93 @@ async fn main() {
}
}
/// 创建一个方法重置 WEBHOOK_COUNT 的值
async fn reset_webhook_count_with_path(
axum::extract::Path(reason): axum::extract::Path<String>,
) -> Response<String> {
// 输出当前计数器的值
let current_count = WEBHOOK_COUNT.load(Ordering::SeqCst);
println!("Current webhook count: {}", current_count);
println!("Reset webhook count, reason: {}", reason);
// 将计数器重置为 0
WEBHOOK_COUNT.store(0, Ordering::SeqCst);
println!("Webhook count has been reset to 0.");
Response::builder()
.header("Foo", "Bar")
.status(StatusCode::OK)
.body(format!(
"Webhook count reset successfully. Previous count: {}. Reason: {}",
current_count, reason
))
.unwrap()
}
/// 创建一个方法重置 WEBHOOK_COUNT 的值
/// 可以通过调用此方法来重置计数器
async fn reset_webhook_count(
Query(params): Query<ResetParams>,
headers: HeaderMap,
) -> Response<String> {
// 输出当前计数器的值
let current_count = WEBHOOK_COUNT.load(Ordering::SeqCst);
println!("Current webhook count: {}", current_count);
let reason = params.reason.unwrap_or_else(|| "未提供原因".to_string());
println!("Reset webhook count, reason: {}", reason);
for header in headers {
let (key, value) = header;
println!("Header: {:?}: {:?}", key, value);
}
println!("Reset webhook count printed headers");
// 将计数器重置为 0
WEBHOOK_COUNT.store(0, Ordering::SeqCst);
println!("Webhook count has been reset to 0.");
Response::builder()
.header("Foo", "Bar")
.status(StatusCode::OK)
.body(format!(
"Webhook count reset successfully current_count:{}",
current_count
))
.unwrap()
}
async fn is_service_active(addr: &str) -> Result<bool, String> {
let socket_addr = tokio::net::lookup_host(addr)
.await
.map_err(|e| format!("无法解析主机:{}", e))?
.next()
.ok_or_else(|| "未找到地址".to_string())?;
println!("正在检查服务状态:{}", socket_addr);
match tokio::time::timeout(
std::time::Duration::from_secs(5),
tokio::net::TcpStream::connect(socket_addr),
)
.await
{
Ok(Ok(_)) => Ok(true),
Ok(Err(e)) => {
if e.kind() == std::io::ErrorKind::ConnectionRefused {
Ok(false)
} else {
Err(format!("连接失败:{}", e))
}
}
Err(_) => Err("连接超时".to_string()),
}
}
async fn receive_webhook(Json(payload): Json<Value>) -> StatusCode {
let start = SystemTime::now();
let since_the_epoch = start.duration_since(UNIX_EPOCH).expect("Time went backwards");
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
// get the number of seconds since the unix era
let seconds = since_the_epoch.as_secs();
@@ -37,12 +157,20 @@ async fn receive_webhook(Json(payload): Json<Value>) -> StatusCode {
let (year, month, day, hour, minute, second) = convert_seconds_to_date(seconds);
// output result
println!("current time:{:04}-{:02}-{:02} {:02}:{:02}:{:02}", year, month, day, hour, minute, second);
println!(
"current time:{:04}-{:02}-{:02} {:02}:{:02}:{:02}",
year, month, day, hour, minute, second
);
println!(
"received a webhook request time:{} content:\n {}",
seconds,
serde_json::to_string_pretty(&payload).unwrap()
);
WEBHOOK_COUNT.fetch_add(1, Ordering::SeqCst);
println!(
"Total webhook requests received: {}",
WEBHOOK_COUNT.load(Ordering::SeqCst)
);
StatusCode::OK
}
@@ -93,5 +221,12 @@ fn convert_seconds_to_date(seconds: u64) -> (u32, u32, u32, u32, u32, u32) {
// calculate the number of seconds
second += total_seconds;
(year as u32, month as u32, day as u32, hour as u32, minute as u32, second as u32)
(
year as u32,
month as u32,
day as u32,
hour as u32,
minute as u32,
second as u32,
)
}

View File

@@ -1,112 +0,0 @@
use crate::config::AdapterConfig;
use crate::{Error, Event};
use async_trait::async_trait;
use std::sync::Arc;
#[cfg(feature = "mqtt")]
pub(crate) mod mqtt;
#[cfg(feature = "webhook")]
pub(crate) mod webhook;
#[allow(dead_code)]
const NOTIFY_KAFKA_SUB_SYS: &str = "notify_kafka";
#[allow(dead_code)]
const NOTIFY_MQTT_SUB_SYS: &str = "notify_mqtt";
#[allow(dead_code)]
const NOTIFY_MY_SQL_SUB_SYS: &str = "notify_mysql";
#[allow(dead_code)]
const NOTIFY_NATS_SUB_SYS: &str = "notify_nats";
#[allow(dead_code)]
const NOTIFY_NSQ_SUB_SYS: &str = "notify_nsq";
#[allow(dead_code)]
const NOTIFY_ES_SUB_SYS: &str = "notify_elasticsearch";
#[allow(dead_code)]
const NOTIFY_AMQP_SUB_SYS: &str = "notify_amqp";
#[allow(dead_code)]
const NOTIFY_POSTGRES_SUB_SYS: &str = "notify_postgres";
#[allow(dead_code)]
const NOTIFY_REDIS_SUB_SYS: &str = "notify_redis";
const NOTIFY_WEBHOOK_SUB_SYS: &str = "notify_webhook";
/// The `ChannelAdapterType` enum represents the different types of channel adapters.
///
/// It is used to identify the type of adapter being used in the system.
///
/// # Variants
///
/// - `Webhook`: Represents a webhook adapter.
/// - `Kafka`: Represents a Kafka adapter.
/// - `Mqtt`: Represents an MQTT adapter.
///
/// # Example
///
/// ```
/// use rustfs_notify::ChannelAdapterType;
///
/// let adapter_type = ChannelAdapterType::Webhook;
/// match adapter_type {
/// ChannelAdapterType::Webhook => println!("Using webhook adapter"),
/// ChannelAdapterType::Kafka => println!("Using Kafka adapter"),
/// ChannelAdapterType::Mqtt => println!("Using MQTT adapter"),
/// }
pub enum ChannelAdapterType {
Webhook,
Kafka,
Mqtt,
}
impl ChannelAdapterType {
pub fn as_str(&self) -> &'static str {
match self {
ChannelAdapterType::Webhook => "webhook",
ChannelAdapterType::Kafka => "kafka",
ChannelAdapterType::Mqtt => "mqtt",
}
}
}
impl std::fmt::Display for ChannelAdapterType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ChannelAdapterType::Webhook => write!(f, "webhook"),
ChannelAdapterType::Kafka => write!(f, "kafka"),
ChannelAdapterType::Mqtt => write!(f, "mqtt"),
}
}
}
/// The `ChannelAdapter` trait defines the interface for all channel adapters.
#[async_trait]
pub trait ChannelAdapter: Send + Sync + 'static {
/// Sends an event to the channel.
fn name(&self) -> String;
/// Sends an event to the channel.
async fn send(&self, event: &Event) -> Result<(), Error>;
}
/// Creates channel adapters based on the provided configuration.
pub async fn create_adapters(configs: Vec<AdapterConfig>) -> Result<Vec<Arc<dyn ChannelAdapter>>, Error> {
let mut adapters: Vec<Arc<dyn ChannelAdapter>> = Vec::new();
for config in configs {
match config {
#[cfg(feature = "webhook")]
AdapterConfig::Webhook(webhook_config) => {
webhook_config.validate().map_err(Error::ConfigError)?;
adapters.push(Arc::new(webhook::WebhookAdapter::new(webhook_config.clone()).await));
}
#[cfg(feature = "mqtt")]
AdapterConfig::Mqtt(mqtt_config) => {
let (mqtt, mut event_loop) = mqtt::MqttAdapter::new(mqtt_config);
tokio::spawn(async move { while event_loop.poll().await.is_ok() {} });
adapters.push(Arc::new(mqtt));
}
#[cfg(not(feature = "webhook"))]
AdapterConfig::Webhook(_) => return Err(Error::FeatureDisabled("webhook")),
#[cfg(not(feature = "mqtt"))]
AdapterConfig::Mqtt(_) => return Err(Error::FeatureDisabled("mqtt")),
}
}
Ok(adapters)
}

View File

@@ -1,57 +0,0 @@
use crate::config::mqtt::MqttConfig;
use crate::{ChannelAdapter, ChannelAdapterType};
use crate::{Error, Event};
use async_trait::async_trait;
use rumqttc::{AsyncClient, MqttOptions, QoS};
use std::time::Duration;
use tokio::time::sleep;
/// MQTT adapter for sending events to an MQTT broker.
pub struct MqttAdapter {
client: AsyncClient,
topic: String,
max_retries: u32,
}
impl MqttAdapter {
/// Creates a new MQTT adapter.
pub fn new(config: &MqttConfig) -> (Self, rumqttc::EventLoop) {
let mqtt_options = MqttOptions::new(&config.client_id, &config.broker, config.port);
let (client, event_loop) = rumqttc::AsyncClient::new(mqtt_options, 10);
(
Self {
client,
topic: config.topic.clone(),
max_retries: config.max_retries,
},
event_loop,
)
}
}
#[async_trait]
impl ChannelAdapter for MqttAdapter {
fn name(&self) -> String {
ChannelAdapterType::Mqtt.to_string()
}
async fn send(&self, event: &Event) -> Result<(), Error> {
let payload = serde_json::to_string(event).map_err(Error::Serde)?;
let mut attempt = 0;
loop {
match self
.client
.publish(&self.topic, QoS::AtLeastOnce, false, payload.clone())
.await
{
Ok(()) => return Ok(()),
Err(e) if attempt < self.max_retries => {
attempt += 1;
tracing::warn!("MQTT attempt {} failed: {}. Retrying...", attempt, e);
sleep(Duration::from_secs(2u64.pow(attempt))).await;
}
Err(e) => return Err(Error::Mqtt(e)),
}
}
}
}

View File

@@ -1,260 +0,0 @@
use crate::config::STORE_PREFIX;
use crate::error::Error;
use crate::store::Store;
use crate::{ChannelAdapter, ChannelAdapterType, QueueStore};
use crate::{Event, DEFAULT_RETRY_INTERVAL};
use async_trait::async_trait;
use reqwest::header::{HeaderMap, HeaderName, HeaderValue};
use reqwest::{self, Client, Identity, RequestBuilder};
use rustfs_config::notify::webhook::WebhookArgs;
use std::fs;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use tokio::time::sleep;
use ChannelAdapterType::Webhook;
// Webhook constants
pub const WEBHOOK_ENDPOINT: &str = "endpoint";
pub const WEBHOOK_AUTH_TOKEN: &str = "auth_token";
pub const WEBHOOK_QUEUE_DIR: &str = "queue_dir";
pub const WEBHOOK_QUEUE_LIMIT: &str = "queue_limit";
pub const WEBHOOK_CLIENT_CERT: &str = "client_cert";
pub const WEBHOOK_CLIENT_KEY: &str = "client_key";
pub const ENV_WEBHOOK_ENABLE: &str = "RUSTFS_NOTIFY_WEBHOOK_ENABLE";
pub const ENV_WEBHOOK_ENDPOINT: &str = "RUSTFS_NOTIFY_WEBHOOK_ENDPOINT";
pub const ENV_WEBHOOK_AUTH_TOKEN: &str = "RUSTFS_NOTIFY_WEBHOOK_AUTH_TOKEN";
pub const ENV_WEBHOOK_QUEUE_DIR: &str = "RUSTFS_NOTIFY_WEBHOOK_QUEUE_DIR";
pub const ENV_WEBHOOK_QUEUE_LIMIT: &str = "RUSTFS_NOTIFY_WEBHOOK_QUEUE_LIMIT";
pub const ENV_WEBHOOK_CLIENT_CERT: &str = "RUSTFS_NOTIFY_WEBHOOK_CLIENT_CERT";
pub const ENV_WEBHOOK_CLIENT_KEY: &str = "RUSTFS_NOTIFY_WEBHOOK_CLIENT_KEY";
/// Webhook adapter for sending events to a webhook endpoint.
pub struct WebhookAdapter {
/// Configuration information
config: WebhookArgs,
/// Event storage queues
store: Option<Arc<QueueStore<Event>>>,
/// HTTP client
client: Client,
}
impl WebhookAdapter {
/// Creates a new Webhook adapter.
pub async fn new(config: WebhookArgs) -> Self {
let mut builder = Client::builder();
let client = if let (cert_path, key_path) = (&config.client_cert, &config.client_key) {
let cert_path = PathBuf::from(cert_path);
let key_path = PathBuf::from(key_path);
// Check if the certificate file exists
if !cert_path.exists() || !key_path.exists() {
tracing::warn!("Certificate files not found, falling back to default client");
builder.build()
} else {
// Try to read and load the certificate
match (fs::read(&cert_path), fs::read(&key_path)) {
(Ok(cert_data), Ok(key_data)) => {
// Create an identity
let mut pem_data = cert_data;
pem_data.extend_from_slice(&key_data);
match Identity::from_pem(&pem_data) {
Ok(identity) => {
tracing::info!("Successfully loaded client certificate");
builder.identity(identity).build()
}
Err(e) => {
tracing::warn!("Failed to create identity from PEM: {}, falling back to default client", e);
builder.build()
}
}
}
_ => {
tracing::warn!("Failed to read certificate files, falling back to default client");
builder.build()
}
}
}
} else {
builder.build()
}
.unwrap_or_else(|e| {
tracing::error!("Failed to create HTTP client: {}", e);
reqwest::Client::new()
});
// create a queue store if enabled
let store = if !config.queue_dir.len() > 0 {
let store_path = PathBuf::from(&config.queue_dir).join(format!(
"{}-{}-{}",
STORE_PREFIX,
Webhook.as_str(),
"identifier".to_string()
));
let queue_limit = if config.queue_limit > 0 {
config.queue_limit
} else {
crate::config::default_queue_limit()
};
let store = QueueStore::new(store_path, queue_limit, Some(".event"));
if let Err(e) = store.open().await {
tracing::error!("Unable to open queue storage: {}", e);
None
} else {
Some(Arc::new(store))
}
} else {
None
};
Self { config, store, client }
}
/// Handle backlog events in storage
pub async fn process_backlog(&self) -> Result<(), Error> {
if let Some(store) = &self.store {
let keys = store.list().await;
for key in keys {
let key_clone = key.clone();
match store.get_multiple(key).await {
Ok(events) => {
for event in events {
if let Err(e) = self.send_with_retry(&event).await {
tracing::error!("Processing of backlog events failed: {}", e);
// If it still fails, we remain in the queue
break;
}
}
// Deleted after successful processing
if let Err(e) = store.del(key_clone).await {
tracing::error!("Failed to delete a handled event: {}", e);
}
}
Err(e) => {
tracing::error!("Failed to read events from storage: {}", e);
// delete the broken entries
// If the event cannot be read, it may be corrupted, delete it
if let Err(del_err) = store.del(key_clone).await {
tracing::error!("Failed to delete a corrupted event: {}", del_err);
}
}
}
}
}
Ok(())
}
///Send events to the webhook endpoint with retry logic
async fn send_with_retry(&self, event: &Event) -> Result<(), Error> {
let retry_interval = Duration::from_secs(DEFAULT_RETRY_INTERVAL);
let mut attempts = 0;
loop {
attempts += 1;
match self.send_request(event).await {
Ok(_) => return Ok(()),
Err(e) => {
tracing::warn!("Send to webhook fails and will be retried after 3 seconds:{}", e);
sleep(retry_interval).await;
if let Some(store) = &self.store {
// store in a queue for later processing
tracing::warn!("The maximum number of retries is reached, and the event is stored in a queue:{}", e);
if let Err(store_err) = store.put(event.clone()).await {
tracing::error!("Events cannot be stored to a queue:{}", store_err);
}
return Err(e);
}
}
}
}
}
/// Send a single HTTP request
async fn send_request(&self, event: &Event) -> Result<(), Error> {
// Send a request
let response = self
.build_request(event)
.send()
.await
.map_err(|e| Error::Custom(format!("Sending a webhook request failed:{}", e)))?;
// Check the response status
if !response.status().is_success() {
let status = response.status();
let body = response
.text()
.await
.unwrap_or_else(|_| "Unable to read response body".to_string());
return Err(Error::Custom(format!("Webhook request failed, status code:{},response:{}", status, body)));
}
Ok(())
}
/// Builds the request to send the event.
fn build_request(&self, event: &Event) -> RequestBuilder {
let mut request = self
.client
.post(&self.config.endpoint)
.json(event)
.header("Content-Type", "application/json");
if let token = &self.config.auth_token {
let tokens: Vec<&str> = token.split_whitespace().collect();
match tokens.len() {
2 => request = request.header("Authorization", token),
1 => request = request.header("Authorization", format!("Bearer {}", token)),
_ => tracing::warn!("Invalid auth token format, skipping Authorization header"),
}
}
if let Some(headers) = &self.config.custom_headers {
let mut header_map = HeaderMap::new();
for (key, value) in headers {
if let (Ok(name), Ok(val)) = (HeaderName::from_bytes(key.as_bytes()), HeaderValue::from_str(value)) {
header_map.insert(name, val);
}
}
request = request.headers(header_map);
}
request
}
/// Save the event to the queue
async fn save_to_queue(&self, event: &Event) -> Result<(), Error> {
if let Some(store) = &self.store {
store.put(event.clone()).await.map_err(|e| {
tracing::error!("Failed to save event to queue: {}", e);
Error::Custom(format!("Failed to save event to queue: {}", e))
})?;
}
Ok(())
}
}
#[async_trait]
impl ChannelAdapter for WebhookAdapter {
fn name(&self) -> String {
Webhook.to_string()
}
async fn send(&self, event: &Event) -> Result<(), Error> {
// Deal with the backlog of events first
let _ = self.process_backlog().await;
// Send the current event
match self.send_with_retry(event).await {
Ok(_) => Ok(()),
Err(e) => {
// If the send fails and the queue is enabled, save to the queue
if let Some(_) = &self.store {
tracing::warn!("Failed to send the event and saved to the queue: {}", e);
self.save_to_queue(event).await?;
return Ok(());
}
Err(e)
}
}
}
}

110
crates/notify/src/args.rs Normal file
View File

@@ -0,0 +1,110 @@
use crate::{Event, EventName};
use std::collections::HashMap;
/// 事件参数
#[derive(Debug, Clone)]
pub struct EventArgs {
pub event_name: EventName,
pub bucket_name: String,
pub object_name: String,
pub object_size: Option<i64>,
pub object_etag: Option<String>,
pub object_version_id: Option<String>,
pub object_content_type: Option<String>,
pub object_user_metadata: Option<HashMap<String, String>>,
pub req_params: HashMap<String, String>,
pub resp_elements: HashMap<String, String>,
pub host: String,
pub user_agent: String,
}
impl EventArgs {
/// 转换为通知事件
pub fn to_event(&self) -> Event {
let event_time = chrono::Utc::now();
let unique_id = format!("{:X}", event_time.timestamp_nanos_opt().unwrap_or(0));
let mut resp_elements = HashMap::new();
if let Some(request_id) = self.resp_elements.get("requestId") {
resp_elements.insert("x-amz-request-id".to_string(), request_id.clone());
}
if let Some(node_id) = self.resp_elements.get("nodeId") {
resp_elements.insert("x-amz-id-2".to_string(), node_id.clone());
}
// RustFS 特定的自定义元素
// 注意:这里需要获取 endpoint 的逻辑在 Rust 中可能需要单独实现
resp_elements.insert("x-rustfs-origin-endpoint".to_string(), "".to_string());
// 添加 deployment ID
resp_elements.insert("x-rustfs-deployment-id".to_string(), "".to_string());
if let Some(content_length) = self.resp_elements.get("content-length") {
resp_elements.insert("content-length".to_string(), content_length.clone());
}
let key_name = &self.object_name;
// 注意:这里可能需要根据 escape 参数进行 URL 编码
let mut event = Event {
event_version: "2.0".to_string(),
event_source: "rustfs:s3".to_string(),
aws_region: self.req_params.get("region").cloned().unwrap_or_default(),
event_time,
event_name: self.event_name,
user_identity: crate::event::Identity {
principal_id: self
.req_params
.get("principalId")
.cloned()
.unwrap_or_default(),
},
request_parameters: self.req_params.clone(),
response_elements: resp_elements,
s3: crate::event::Metadata {
schema_version: "1.0".to_string(),
configuration_id: "Config".to_string(),
bucket: crate::event::Bucket {
name: self.bucket_name.clone(),
owner_identity: crate::event::Identity {
principal_id: self
.req_params
.get("principalId")
.cloned()
.unwrap_or_default(),
},
arn: format!("arn:aws:s3:::{}", self.bucket_name),
},
object: crate::event::Object {
key: key_name.clone(),
version_id: self.object_version_id.clone(),
sequencer: unique_id,
size: self.object_size,
etag: self.object_etag.clone(),
content_type: self.object_content_type.clone(),
user_metadata: Some(self.object_user_metadata.clone().unwrap_or_default()),
},
},
source: crate::event::Source {
host: self.host.clone(),
port: "".to_string(),
user_agent: self.user_agent.clone(),
},
};
// 检查是否为删除事件,如果是删除事件,某些字段应当为空
let is_removed_event = matches!(
self.event_name,
EventName::ObjectRemovedDelete | EventName::ObjectRemovedDeleteMarkerCreated
);
if is_removed_event {
event.s3.object.etag = None;
event.s3.object.size = None;
event.s3.object.content_type = None;
event.s3.object.user_metadata = None;
}
event
}
}

243
crates/notify/src/arn.rs Normal file
View File

@@ -0,0 +1,243 @@
use crate::TargetError;
use const_str::concat;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::fmt;
use std::str::FromStr;
use thiserror::Error;
pub(crate) const DEFAULT_ARN_PARTITION: &str = "rustfs";
pub(crate) const DEFAULT_ARN_SERVICE: &str = "sqs";
/// Default ARN prefix for SQS
/// "arn:rustfs:sqs:"
const ARN_PREFIX: &str = concat!("arn:", DEFAULT_ARN_PARTITION, ":", DEFAULT_ARN_SERVICE, ":");
#[derive(Debug, Error)]
pub enum TargetIDError {
#[error("Invalid TargetID format '{0}', expect 'ID:Name'")]
InvalidFormat(String),
}
/// Target ID, used to identify notification targets
#[derive(Debug, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)]
pub struct TargetID {
pub id: String,
pub name: String,
}
impl TargetID {
pub fn new(id: String, name: String) -> Self {
Self { id, name }
}
/// Convert to string representation
pub fn to_id_string(&self) -> String {
format!("{}:{}", self.id, self.name)
}
/// Create an ARN
pub fn to_arn(&self, region: &str) -> ARN {
ARN {
target_id: self.clone(),
region: region.to_string(),
service: DEFAULT_ARN_SERVICE.to_string(), // Default Service
partition: DEFAULT_ARN_PARTITION.to_string(), // Default partition
}
}
}
impl fmt::Display for TargetID {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}:{}", self.id, self.name)
}
}
impl FromStr for TargetID {
type Err = TargetIDError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.splitn(2, ':').collect();
if parts.len() == 2 {
Ok(TargetID {
id: parts[0].to_string(),
name: parts[1].to_string(),
})
} else {
Err(TargetIDError::InvalidFormat(s.to_string()))
}
}
}
impl Serialize for TargetID {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_id_string())
}
}
impl<'de> Deserialize<'de> for TargetID {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
TargetID::from_str(&s).map_err(serde::de::Error::custom)
}
}
#[derive(Debug, Error)]
pub enum ArnError {
#[error("Invalid ARN format '{0}'")]
InvalidFormat(String),
#[error("ARN component missing")]
MissingComponents,
}
/// ARN - AWS resource name representation
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct ARN {
pub target_id: TargetID,
pub region: String,
// Service types, such as "sqs", "sns", "lambda", etc. This defaults to "sqs" to match the Go example.
pub service: String,
// Partitions such as "aws", "aws-cn", or customizations such as "rustfs","rustfs", etc.
pub partition: String,
}
impl ARN {
pub fn new(target_id: TargetID, region: String) -> Self {
ARN {
target_id,
region,
service: DEFAULT_ARN_SERVICE.to_string(), // Default is sqs
partition: DEFAULT_ARN_PARTITION.to_string(), // Default is rustfs partition
}
}
/// Returns the string representation of ARN
/// Returns the ARN string in the format "{ARN_PREFIX}:{region}:{target_id}"
#[allow(clippy::inherent_to_string)]
pub fn to_arn_string(&self) -> String {
if self.target_id.id.is_empty() && self.target_id.name.is_empty() && self.region.is_empty()
{
return String::new();
}
format!(
"{}:{}:{}",
ARN_PREFIX,
self.region,
self.target_id.to_id_string()
)
}
/// Parsing ARN from string
pub fn parse(s: &str) -> Result<Self, TargetError> {
if !s.starts_with(ARN_PREFIX) {
return Err(TargetError::InvalidARN(s.to_string()));
}
let tokens: Vec<&str> = s.split(':').collect();
if tokens.len() != 6 {
return Err(TargetError::InvalidARN(s.to_string()));
}
if tokens[4].is_empty() || tokens[5].is_empty() {
return Err(TargetError::InvalidARN(s.to_string()));
}
Ok(ARN {
region: tokens[3].to_string(),
target_id: TargetID {
id: tokens[4].to_string(),
name: tokens[5].to_string(),
},
service: tokens[2].to_string(), // Service Type
partition: tokens[1].to_string(), // Partition
})
}
}
impl fmt::Display for ARN {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.target_id.id.is_empty() && self.target_id.name.is_empty() && self.region.is_empty()
{
// Returns an empty string if all parts are empty
return Ok(());
}
write!(
f,
"arn:{}:{}:{}:{}:{}",
self.partition, self.service, self.region, self.target_id.id, self.target_id.name
)
}
}
impl FromStr for ARN {
type Err = ArnError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split(':').collect();
if parts.len() < 6 {
return Err(ArnError::InvalidFormat(s.to_string()));
}
if parts[0] != "arn" {
return Err(ArnError::InvalidFormat(s.to_string()));
}
let partition = parts[1].to_string();
let service = parts[2].to_string();
let region = parts[3].to_string();
let id = parts[4].to_string();
let name = parts[5..].join(":"); // The name section may contain colons, although this is not usually the case in SQS ARN
if id.is_empty() || name.is_empty() {
return Err(ArnError::MissingComponents);
}
Ok(ARN {
target_id: TargetID { id, name },
region,
service,
partition,
})
}
}
// Serialization implementation
impl Serialize for ARN {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_arn_string())
}
}
impl<'de> Deserialize<'de> for ARN {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
// deserializer.deserialize_str(ARNVisitor)
let s = String::deserialize(deserializer)?;
if s.is_empty() {
// Handle an empty ARN string, for example, creating an empty or default Arn instance
// Or return an error based on business logic
// Here we create an empty TargetID and region Arn
return Ok(ARN {
target_id: TargetID {
id: String::new(),
name: String::new(),
},
region: String::new(),
service: DEFAULT_ARN_SERVICE.to_string(),
partition: DEFAULT_ARN_PARTITION.to_string(),
});
}
ARN::from_str(&s).map_err(serde::de::Error::custom)
}
}

View File

@@ -1,105 +1,163 @@
use rustfs_config::notify::mqtt::MQTTArgs;
use rustfs_config::notify::webhook::WebhookArgs;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::env;
/// The default configuration file name
const DEFAULT_CONFIG_FILE: &str = "notify";
/// The prefix for the configuration file
pub const STORE_PREFIX: &str = "rustfs";
/// The default retry interval for the webhook adapter
pub const DEFAULT_RETRY_INTERVAL: u64 = 3;
/// The default maximum retry count for the webhook adapter
pub const DEFAULT_MAX_RETRIES: u32 = 3;
/// The default notification queue limit
pub const DEFAULT_NOTIFY_QUEUE_LIMIT: u64 = 10000;
/// Provide temporary directories as default storage paths
pub(crate) fn default_queue_dir() -> String {
env::var("EVENT_QUEUE_DIR").unwrap_or_else(|e| {
tracing::info!("Failed to get `EVENT_QUEUE_DIR` failed err: {}", e.to_string());
env::temp_dir().join(DEFAULT_CONFIG_FILE).to_string_lossy().to_string()
})
/// Represents a key-value pair in configuration
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct KV {
pub key: String,
pub value: String,
}
/// Provides the recommended default channel capacity for high concurrency systems
pub(crate) fn default_queue_limit() -> u64 {
env::var("EVENT_CHANNEL_CAPACITY")
.unwrap_or_else(|_| DEFAULT_NOTIFY_QUEUE_LIMIT.to_string())
.parse()
.unwrap_or(DEFAULT_NOTIFY_QUEUE_LIMIT) // Default to 10000 if parsing fails
/// Represents a collection of key-value pairs
#[derive(Debug, Clone, Default)]
pub struct KVS {
kvs: Vec<KV>,
}
/// Configuration for the adapter.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum AdapterConfig {
Webhook(WebhookArgs),
Mqtt(MQTTArgs),
}
/// Event Notifier Configuration
/// This struct contains the configuration for the event notifier system,
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct EventNotifierConfig {
/// A collection of webhook configurations, with the key being a unique identifier
#[serde(default)]
pub webhook: HashMap<String, WebhookArgs>,
///MQTT configuration collection, with the key being a unique identifier
#[serde(default)]
pub mqtt: HashMap<String, MQTTArgs>,
}
impl EventNotifierConfig {
/// Create a new default configuration
impl KVS {
/// Creates a new empty KVS
pub fn new() -> Self {
Self::default()
KVS { kvs: Vec::new() }
}
/// Load the configuration from the file
pub fn event_load_config(_config_dir: Option<String>) -> EventNotifierConfig {
// The existing implementation remains the same, but returns EventNotifierConfig
// ...
/// Sets a key-value pair
pub fn set(&mut self, key: impl Into<String>, value: impl Into<String>) {
let key = key.into();
let value = value.into();
Self::default()
}
/// Deserialization configuration
pub fn unmarshal(data: &[u8]) -> common::error::Result<EventNotifierConfig> {
let m: EventNotifierConfig = serde_json::from_slice(data)?;
Ok(m)
}
/// Serialization configuration
pub fn marshal(&self) -> common::error::Result<Vec<u8>> {
let data = serde_json::to_vec(&self)?;
Ok(data)
}
/// Convert this configuration to a list of adapter configurations
pub fn to_adapter_configs(&self) -> Vec<AdapterConfig> {
let mut adapters = Vec::new();
// Add all enabled webhook configurations
for webhook in self.webhook.values() {
if webhook.enable {
adapters.push(AdapterConfig::Webhook(webhook.clone()));
// Update existing value or add new
for kv in &mut self.kvs {
if kv.key == key {
kv.value = value;
return;
}
}
// Add all enabled MQTT configurations
for mqtt in self.mqtt.values() {
if mqtt.enable {
adapters.push(AdapterConfig::Mqtt(mqtt.clone()));
}
}
self.kvs.push(KV { key, value });
}
adapters
/// Looks up a value by key
pub fn lookup(&self, key: &str) -> Option<&str> {
self.kvs
.iter()
.find(|kv| kv.key == key)
.map(|kv| kv.value.as_str())
}
/// Deletes a key-value pair
pub fn delete(&mut self, key: &str) {
self.kvs.retain(|kv| kv.key != key);
}
/// Checks if the KVS is empty
pub fn is_empty(&self) -> bool {
self.kvs.is_empty()
}
/// Returns all keys
pub fn keys(&self) -> Vec<String> {
self.kvs.iter().map(|kv| kv.key.clone()).collect()
}
}
/// Represents the entire configuration
pub type Config = HashMap<String, HashMap<String, KVS>>;
/// Parses configuration from a string
pub fn parse_config(config_str: &str) -> Result<Config, String> {
let mut config = Config::new();
let mut current_section = String::new();
let mut current_subsection = String::new();
for line in config_str.lines() {
let line = line.trim();
if line.is_empty() || line.starts_with('#') {
continue;
}
// Parse sections
if line.starts_with('[') && line.ends_with(']') {
let section = line[1..line.len() - 1].trim();
if let Some((section_name, subsection)) = section.split_once(' ') {
current_section = section_name.to_string();
current_subsection = subsection.trim_matches('"').to_string();
} else {
current_section = section.to_string();
current_subsection = String::new();
}
continue;
}
// Parse key-value pairs
if let Some((key, value)) = line.split_once('=') {
let key = key.trim();
let value = value.trim();
let section = config.entry(current_section.clone()).or_default();
let kvs = section.entry(current_subsection.clone()).or_default();
kvs.set(key, value);
}
}
Ok(config)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_kvs() {
let mut kvs = KVS::new();
assert!(kvs.is_empty());
kvs.set("key1", "value1");
kvs.set("key2", "value2");
assert!(!kvs.is_empty());
assert_eq!(kvs.lookup("key1"), Some("value1"));
assert_eq!(kvs.lookup("key2"), Some("value2"));
assert_eq!(kvs.lookup("key3"), None);
kvs.set("key1", "new_value");
assert_eq!(kvs.lookup("key1"), Some("new_value"));
kvs.delete("key2");
assert_eq!(kvs.lookup("key2"), None);
}
#[test]
fn test_parse_config() {
let config_str = r#"
# Comment line
[notify_webhook "webhook1"]
enable = on
endpoint = http://example.com/webhook
auth_token = secret
[notify_mqtt "mqtt1"]
enable = on
broker = mqtt://localhost:1883
topic = rustfs/events
"#;
let config = parse_config(config_str).unwrap();
assert!(config.contains_key("notify_webhook"));
assert!(config.contains_key("notify_mqtt"));
let webhook = &config["notify_webhook"]["webhook1"];
assert_eq!(webhook.lookup("enable"), Some("on"));
assert_eq!(
webhook.lookup("endpoint"),
Some("http://example.com/webhook")
);
assert_eq!(webhook.lookup("auth_token"), Some("secret"));
let mqtt = &config["notify_mqtt"]["mqtt1"];
assert_eq!(mqtt.lookup("enable"), Some("on"));
assert_eq!(mqtt.lookup("broker"), Some("mqtt://localhost:1883"));
assert_eq!(mqtt.lookup("topic"), Some("rustfs/events"));
}
}

View File

@@ -1,403 +1,101 @@
use std::io;
use thiserror::Error;
use tokio::sync::mpsc::error;
use tokio::task::JoinError;
/// The `Error` enum represents all possible errors that can occur in the application.
/// It implements the `std::error::Error` trait and provides a way to convert various error types into a single error type.
#[derive(Error, Debug)]
pub enum Error {
#[error("Join error: {0}")]
JoinError(#[from] JoinError),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
/// Error types for the store
#[derive(Debug, Error)]
pub enum StoreError {
#[error("I/O error: {0}")]
Io(#[from] io::Error),
#[error("Serialization error: {0}")]
Serde(#[from] serde_json::Error),
#[error("HTTP error: {0}")]
Http(#[from] reqwest::Error),
#[cfg(all(feature = "kafka", target_os = "linux"))]
#[error("Kafka error: {0}")]
Kafka(#[from] rdkafka::error::KafkaError),
#[cfg(feature = "mqtt")]
#[error("MQTT error: {0}")]
Mqtt(#[from] rumqttc::ClientError),
#[error("Channel send error: {0}")]
ChannelSend(#[from] Box<error::SendError<crate::event::Event>>),
#[error("Feature disabled: {0}")]
FeatureDisabled(&'static str),
#[error("Event bus already started")]
EventBusStarted,
#[error("necessary fields are missing:{0}")]
MissingField(&'static str),
#[error("field verification failed:{0}")]
ValidationError(&'static str),
#[error("Custom error: {0}")]
Custom(String),
Serialization(String),
#[error("Deserialization error: {0}")]
Deserialization(String),
#[error("Compression error: {0}")]
Compression(String),
#[error("Entry limit exceeded")]
LimitExceeded,
#[error("Entry not found")]
NotFound,
#[error("Invalid entry: {0}")]
Internal(String), // 新增内部错误类型
}
/// Error types for targets
#[derive(Debug, Error)]
pub enum TargetError {
#[error("Storage error: {0}")]
Storage(String),
#[error("Network error: {0}")]
Network(String),
#[error("Request error: {0}")]
Request(String),
#[error("Timeout error: {0}")]
Timeout(String),
#[error("Authentication error: {0}")]
Authentication(String),
#[error("Configuration error: {0}")]
ConfigError(String),
#[error("create adapter failed error: {0}")]
AdapterCreationFailed(String),
Configuration(String),
#[error("Encoding error: {0}")]
Encoding(String),
#[error("Serialization error: {0}")]
Serialization(String),
#[error("Target not connected")]
NotConnected,
#[error("Target initialization failed: {0}")]
Initialization(String),
#[error("Invalid ARN: {0}")]
InvalidARN(String),
#[error("Unknown error: {0}")]
Unknown(String),
#[error("Target is disabled")]
Disabled,
}
impl Error {
pub fn custom(msg: &str) -> Error {
Self::Custom(msg.to_string())
}
/// Error types for the notification system
#[derive(Debug, Error)]
pub enum NotificationError {
#[error("Target error: {0}")]
Target(#[from] TargetError),
#[error("Configuration error: {0}")]
Configuration(String),
#[error("ARN not found: {0}")]
ARNNotFound(String),
#[error("Invalid ARN: {0}")]
InvalidARN(String),
#[error("Bucket notification error: {0}")]
BucketNotification(String),
#[error("Rule configuration error: {0}")]
RuleConfiguration(String),
#[error("System initialization error: {0}")]
Initialization(String),
}
#[cfg(test)]
mod tests {
use super::*;
use std::error::Error as StdError;
use std::io;
use tokio::sync::mpsc;
#[test]
fn test_error_display() {
// Test error message display
let custom_error = Error::custom("test message");
assert_eq!(custom_error.to_string(), "Custom error: test message");
let feature_error = Error::FeatureDisabled("test feature");
assert_eq!(feature_error.to_string(), "Feature disabled: test feature");
let event_bus_error = Error::EventBusStarted;
assert_eq!(event_bus_error.to_string(), "Event bus already started");
let missing_field_error = Error::MissingField("required_field");
assert_eq!(missing_field_error.to_string(), "necessary fields are missing:required_field");
let validation_error = Error::ValidationError("invalid format");
assert_eq!(validation_error.to_string(), "field verification failed:invalid format");
let config_error = Error::ConfigError("invalid config".to_string());
assert_eq!(config_error.to_string(), "Configuration error: invalid config");
}
#[test]
fn test_error_debug() {
// Test Debug trait implementation
let custom_error = Error::custom("debug test");
let debug_str = format!("{:?}", custom_error);
assert!(debug_str.contains("Custom"));
assert!(debug_str.contains("debug test"));
let feature_error = Error::FeatureDisabled("debug feature");
let debug_str = format!("{:?}", feature_error);
assert!(debug_str.contains("FeatureDisabled"));
assert!(debug_str.contains("debug feature"));
}
#[test]
fn test_custom_error_creation() {
// Test custom error creation
let error = Error::custom("test custom error");
match error {
Error::Custom(msg) => assert_eq!(msg, "test custom error"),
_ => panic!("Expected Custom error variant"),
}
// Test empty string
let empty_error = Error::custom("");
match empty_error {
Error::Custom(msg) => assert_eq!(msg, ""),
_ => panic!("Expected Custom error variant"),
}
// Test special characters
let special_error = Error::custom("Test Chinese 中文 & special chars: !@#$%");
match special_error {
Error::Custom(msg) => assert_eq!(msg, "Test Chinese 中文 & special chars: !@#$%"),
_ => panic!("Expected Custom error variant"),
}
}
#[test]
fn test_io_error_conversion() {
// Test IO error conversion
let io_error = io::Error::new(io::ErrorKind::NotFound, "file not found");
let converted_error: Error = io_error.into();
match converted_error {
Error::Io(err) => {
assert_eq!(err.kind(), io::ErrorKind::NotFound);
assert_eq!(err.to_string(), "file not found");
}
_ => panic!("Expected Io error variant"),
}
// Test different types of IO errors
let permission_error = io::Error::new(io::ErrorKind::PermissionDenied, "access denied");
let converted: Error = permission_error.into();
assert!(matches!(converted, Error::Io(_)));
}
#[test]
fn test_serde_error_conversion() {
// Test serialization error conversion
let invalid_json = r#"{"invalid": json}"#;
let serde_error = serde_json::from_str::<serde_json::Value>(invalid_json).unwrap_err();
let converted_error: Error = serde_error.into();
match converted_error {
Error::Serde(_) => {
// Verify error type is correct
assert!(converted_error.to_string().contains("Serialization error"));
}
_ => panic!("Expected Serde error variant"),
}
}
#[tokio::test]
async fn test_channel_send_error_conversion() {
// Test channel send error conversion
let (tx, rx) = mpsc::channel::<crate::event::Event>(1);
drop(rx); // Close receiver
// Create a test event
use crate::event::{Bucket, Identity, Metadata, Name, Object, Source};
use std::collections::HashMap;
let identity = Identity::new("test-user".to_string());
let bucket = Bucket::new("test-bucket".to_string(), identity.clone(), "arn:aws:s3:::test-bucket".to_string());
let object = Object::new(
"test-key".to_string(),
Some(1024),
Some("etag123".to_string()),
Some("text/plain".to_string()),
Some(HashMap::new()),
None,
"sequencer123".to_string(),
);
let metadata = Metadata::create("1.0".to_string(), "config1".to_string(), bucket, object);
let source = Source::new("localhost".to_string(), "8080".to_string(), "test-agent".to_string());
let test_event = crate::event::Event::builder()
.event_name(Name::ObjectCreatedPut)
.s3(metadata)
.source(source)
.build()
.unwrap();
let send_result = tx.send(test_event).await;
assert!(send_result.is_err());
let send_error = send_result.unwrap_err();
let boxed_error = Box::new(send_error);
let converted_error: Error = boxed_error.into();
match converted_error {
Error::ChannelSend(_) => {
assert!(converted_error.to_string().contains("Channel send error"));
}
_ => panic!("Expected ChannelSend error variant"),
}
}
#[test]
fn test_error_source_chain() {
// 测试错误源链
let io_error = io::Error::new(io::ErrorKind::InvalidData, "invalid data");
let converted_error: Error = io_error.into();
// 验证错误源
assert!(converted_error.source().is_some());
let source = converted_error.source().unwrap();
assert_eq!(source.to_string(), "invalid data");
}
#[test]
fn test_error_variants_exhaustive() {
// 测试所有错误变体的创建
let errors = vec![
Error::FeatureDisabled("test"),
Error::EventBusStarted,
Error::MissingField("field"),
Error::ValidationError("validation"),
Error::Custom("custom".to_string()),
Error::ConfigError("config".to_string()),
];
for error in errors {
// 验证每个错误都能正确显示
let error_str = error.to_string();
assert!(!error_str.is_empty());
// 验证每个错误都能正确调试
let debug_str = format!("{:?}", error);
assert!(!debug_str.is_empty());
}
}
#[test]
fn test_error_equality_and_matching() {
// 测试错误的模式匹配
let custom_error = Error::custom("test");
match custom_error {
Error::Custom(msg) => assert_eq!(msg, "test"),
_ => panic!("Pattern matching failed"),
}
let feature_error = Error::FeatureDisabled("feature");
match feature_error {
Error::FeatureDisabled(feature) => assert_eq!(feature, "feature"),
_ => panic!("Pattern matching failed"),
}
let event_bus_error = Error::EventBusStarted;
match event_bus_error {
Error::EventBusStarted => {} // 正确匹配
_ => panic!("Pattern matching failed"),
}
}
#[test]
fn test_error_message_formatting() {
// 测试错误消息格式化
let test_cases = vec![
(Error::FeatureDisabled("kafka"), "Feature disabled: kafka"),
(Error::MissingField("bucket_name"), "necessary fields are missing:bucket_name"),
(Error::ValidationError("invalid email"), "field verification failed:invalid email"),
(Error::ConfigError("missing file".to_string()), "Configuration error: missing file"),
];
for (error, expected_message) in test_cases {
assert_eq!(error.to_string(), expected_message);
}
}
#[test]
fn test_error_memory_efficiency() {
// 测试错误类型的内存效率
use std::mem;
let size = mem::size_of::<Error>();
// 错误类型应该相对紧凑考虑到包含多种错误类型96 字节是合理的
assert!(size <= 128, "Error size should be reasonable, got {} bytes", size);
// 测试 Option<Error>的大小
let option_size = mem::size_of::<Option<Error>>();
assert!(option_size <= 136, "Option<Error> should be efficient, got {} bytes", option_size);
}
#[test]
fn test_error_thread_safety() {
// 测试错误类型的线程安全性
fn assert_send<T: Send>() {}
fn assert_sync<T: Sync>() {}
assert_send::<Error>();
assert_sync::<Error>();
}
#[test]
fn test_custom_error_edge_cases() {
// 测试自定义错误的边界情况
let long_message = "a".repeat(1000);
let long_error = Error::custom(&long_message);
match long_error {
Error::Custom(msg) => assert_eq!(msg.len(), 1000),
_ => panic!("Expected Custom error variant"),
}
// 测试包含换行符的消息
let multiline_error = Error::custom("line1\nline2\nline3");
match multiline_error {
Error::Custom(msg) => assert!(msg.contains('\n')),
_ => panic!("Expected Custom error variant"),
}
// 测试包含 Unicode 字符的消息
let unicode_error = Error::custom("🚀 Unicode test 测试 🎉");
match unicode_error {
Error::Custom(msg) => assert!(msg.contains('🚀')),
_ => panic!("Expected Custom error variant"),
}
}
#[test]
fn test_error_conversion_consistency() {
// 测试错误转换的一致性
let original_io_error = io::Error::new(io::ErrorKind::TimedOut, "timeout");
let error_message = original_io_error.to_string();
let converted: Error = original_io_error.into();
// 验证转换后的错误包含原始错误信息
assert!(converted.to_string().contains(&error_message));
}
#[test]
fn test_error_downcast() {
// 测试错误的向下转型
let io_error = io::Error::other("test error");
let converted: Error = io_error.into();
// 验证可以获取源错误
if let Error::Io(ref inner) = converted {
assert_eq!(inner.to_string(), "test error");
assert_eq!(inner.kind(), io::ErrorKind::Other);
} else {
panic!("Expected Io error variant");
}
}
#[test]
fn test_error_chain_depth() {
// 测试错误链的深度
let root_cause = io::Error::other("root cause");
let converted: Error = root_cause.into();
let mut depth = 0;
let mut current_error: &dyn StdError = &converted;
while let Some(source) = current_error.source() {
depth += 1;
current_error = source;
// 防止无限循环
if depth > 10 {
break;
}
}
assert!(depth > 0, "Error should have at least one source");
assert!(depth <= 3, "Error chain should not be too deep");
}
#[test]
fn test_static_str_lifetime() {
// 测试静态字符串生命周期
fn create_feature_error() -> Error {
Error::FeatureDisabled("static_feature")
}
let error = create_feature_error();
match error {
Error::FeatureDisabled(feature) => assert_eq!(feature, "static_feature"),
_ => panic!("Expected FeatureDisabled error variant"),
}
}
#[test]
fn test_error_formatting_consistency() {
// 测试错误格式化的一致性
let errors = vec![
Error::FeatureDisabled("test"),
Error::MissingField("field"),
Error::ValidationError("validation"),
Error::Custom("custom".to_string()),
];
for error in errors {
let display_str = error.to_string();
let debug_str = format!("{:?}", error);
// Display 和 Debug 都不应该为空
assert!(!display_str.is_empty());
assert!(!debug_str.is_empty());
// Debug 输出通常包含更多信息,但不是绝对的
// 这里我们只验证两者都有内容即可
assert!(!debug_str.is_empty());
assert!(!display_str.is_empty());
}
impl From<url::ParseError> for TargetError {
fn from(err: url::ParseError) -> Self {
TargetError::Configuration(format!("URL parse error: {}", err))
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,247 @@
use crate::store::DEFAULT_LIMIT;
use crate::{
config::KVS,
error::TargetError,
target::{mqtt::MQTTArgs, webhook::WebhookArgs, Target},
};
use async_trait::async_trait;
use rumqttc::QoS;
use std::time::Duration;
use tracing::warn;
use url::Url;
/// Trait for creating targets from configuration
#[async_trait]
pub trait TargetFactory: Send + Sync {
/// Creates a target from configuration
async fn create_target(
&self,
id: String,
config: &KVS,
) -> Result<Box<dyn Target + Send + Sync>, TargetError>;
/// Validates target configuration
fn validate_config(&self, config: &KVS) -> Result<(), TargetError>;
}
/// Factory for creating Webhook targets
pub struct WebhookTargetFactory;
#[async_trait]
impl TargetFactory for WebhookTargetFactory {
async fn create_target(
&self,
id: String,
config: &KVS,
) -> Result<Box<dyn Target + Send + Sync>, TargetError> {
// Parse configuration values
let enable = config.lookup("enable").unwrap_or("off") == "on";
if !enable {
return Err(TargetError::Configuration("Target is disabled".to_string()));
}
let endpoint = config
.lookup("endpoint")
.ok_or_else(|| TargetError::Configuration("Missing endpoint".to_string()))?;
let endpoint_url = Url::parse(endpoint)
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {}", e)))?;
let auth_token = config.lookup("auth_token").unwrap_or("").to_string();
let queue_dir = config.lookup("queue_dir").unwrap_or("").to_string();
let queue_limit = config
.lookup("queue_limit")
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(DEFAULT_LIMIT);
let client_cert = config.lookup("client_cert").unwrap_or("").to_string();
let client_key = config.lookup("client_key").unwrap_or("").to_string();
// Create and return Webhook target
let args = WebhookArgs {
enable,
endpoint: endpoint_url,
auth_token,
queue_dir,
queue_limit,
client_cert,
client_key,
};
let target = crate::target::webhook::WebhookTarget::new(id, args)?;
Ok(Box::new(target))
}
fn validate_config(&self, config: &KVS) -> Result<(), TargetError> {
let enable = config.lookup("enable").unwrap_or("off") == "on";
if !enable {
return Ok(());
}
// Validate endpoint
let endpoint = config
.lookup("endpoint")
.ok_or_else(|| TargetError::Configuration("Missing endpoint".to_string()))?;
Url::parse(endpoint)
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {}", e)))?;
// Validate TLS certificates
let client_cert = config.lookup("client_cert").unwrap_or("");
let client_key = config.lookup("client_key").unwrap_or("");
if (!client_cert.is_empty() && client_key.is_empty())
|| (client_cert.is_empty() && !client_key.is_empty())
{
return Err(TargetError::Configuration(
"Both client_cert and client_key must be specified if using client certificates"
.to_string(),
));
}
// Validate queue directory
let queue_dir = config.lookup("queue_dir").unwrap_or("");
if !queue_dir.is_empty() && !std::path::Path::new(queue_dir).is_absolute() {
return Err(TargetError::Configuration(
"Webhook Queue directory must be an absolute path".to_string(),
));
}
Ok(())
}
}
/// Factory for creating MQTT targets
pub struct MQTTTargetFactory;
#[async_trait]
impl TargetFactory for MQTTTargetFactory {
async fn create_target(
&self,
id: String,
config: &KVS,
) -> Result<Box<dyn Target + Send + Sync>, TargetError> {
// Parse configuration values
let enable = config.lookup("enable").unwrap_or("off") == "on";
if !enable {
return Err(TargetError::Configuration("Target is disabled".to_string()));
}
let broker = config
.lookup("broker")
.ok_or_else(|| TargetError::Configuration("Missing broker".to_string()))?;
let broker_url = Url::parse(broker)
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {}", e)))?;
let topic = config
.lookup("topic")
.ok_or_else(|| TargetError::Configuration("Missing topic".to_string()))?;
let qos = config
.lookup("qos")
.and_then(|v| v.parse::<u8>().ok())
.map(|q| match q {
0 => QoS::AtMostOnce,
1 => QoS::AtLeastOnce,
2 => QoS::ExactlyOnce,
_ => QoS::AtMostOnce,
})
.unwrap_or(QoS::AtLeastOnce);
let username = config.lookup("username").unwrap_or("").to_string();
let password = config.lookup("password").unwrap_or("").to_string();
let reconnect_interval = config
.lookup("reconnect_interval")
.and_then(|v| v.parse::<u64>().ok())
.map(Duration::from_secs)
.unwrap_or(Duration::from_secs(5));
let keep_alive = config
.lookup("keep_alive_interval")
.and_then(|v| v.parse::<u64>().ok())
.map(Duration::from_secs)
.unwrap_or(Duration::from_secs(30));
let queue_dir = config.lookup("queue_dir").unwrap_or("").to_string();
let queue_limit = config
.lookup("queue_limit")
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(DEFAULT_LIMIT);
// Create and return MQTT target
let args = MQTTArgs {
enable,
broker: broker_url,
topic: topic.to_string(),
qos,
username,
password,
max_reconnect_interval: reconnect_interval,
keep_alive,
queue_dir,
queue_limit,
};
let target = crate::target::mqtt::MQTTTarget::new(id, args)?;
Ok(Box::new(target))
}
fn validate_config(&self, config: &KVS) -> Result<(), TargetError> {
let enable = config.lookup("enable").unwrap_or("off") == "on";
if !enable {
return Ok(());
}
// Validate broker URL
let broker = config
.lookup("broker")
.ok_or_else(|| TargetError::Configuration("Missing broker".to_string()))?;
let url = Url::parse(broker)
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {}", e)))?;
// Validate supported schemes
match url.scheme() {
"tcp" | "ssl" | "ws" | "wss" | "mqtt" | "mqtts" => {}
_ => {
return Err(TargetError::Configuration(
"Unsupported broker URL scheme".to_string(),
));
}
}
// Validate topic
if config.lookup("topic").is_none() {
return Err(TargetError::Configuration("Missing topic".to_string()));
}
// Validate QoS
if let Some(qos_str) = config.lookup("qos") {
let qos = qos_str
.parse::<u8>()
.map_err(|_| TargetError::Configuration("Invalid QoS value".to_string()))?;
if qos > 2 {
return Err(TargetError::Configuration(
"QoS must be 0, 1, or 2".to_string(),
));
}
}
// Validate queue directory
let queue_dir = config.lookup("queue_dir").unwrap_or("");
if !queue_dir.is_empty() {
if !std::path::Path::new(queue_dir).is_absolute() {
return Err(TargetError::Configuration(
"mqtt Queue directory must be an absolute path".to_string(),
));
}
if let Some(qos_str) = config.lookup("qos") {
if qos_str == "0" {
warn!("Using queue_dir with QoS 0 may result in event loss");
}
}
}
Ok(())
}
}

View File

@@ -0,0 +1,12 @@
use crate::NotificationSystem;
use once_cell::sync::Lazy;
use std::sync::Arc;
static NOTIFICATION_SYSTEM: Lazy<Arc<NotificationSystem>> =
Lazy::new(|| Arc::new(NotificationSystem::new()));
/// Returns the handle to the global NotificationSystem instance.
/// This function can be called anywhere you need to interact with the notification system。
pub fn notification_system() -> Arc<NotificationSystem> {
NOTIFICATION_SYSTEM.clone()
}

View File

@@ -0,0 +1,594 @@
use crate::arn::TargetID;
use crate::store::{Key, Store};
use crate::{
config::{parse_config, Config}, error::NotificationError, notifier::EventNotifier, registry::TargetRegistry,
rules::BucketNotificationConfig,
stream,
Event,
StoreError,
Target,
KVS,
};
use std::collections::HashMap;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::{mpsc, RwLock, Semaphore};
use tracing::{debug, error, info, warn};
/// Notify the system of monitoring indicators
pub struct NotificationMetrics {
/// The number of events currently being processed
processing_events: AtomicUsize,
/// Number of events that have been successfully processed
processed_events: AtomicUsize,
/// Number of events that failed to handle
failed_events: AtomicUsize,
/// System startup time
start_time: Instant,
}
impl Default for NotificationMetrics {
fn default() -> Self {
Self::new()
}
}
impl NotificationMetrics {
pub fn new() -> Self {
NotificationMetrics {
processing_events: AtomicUsize::new(0),
processed_events: AtomicUsize::new(0),
failed_events: AtomicUsize::new(0),
start_time: Instant::now(),
}
}
// 提供公共方法增加计数
pub fn increment_processing(&self) {
self.processing_events.fetch_add(1, Ordering::Relaxed);
}
pub fn increment_processed(&self) {
self.processing_events.fetch_sub(1, Ordering::Relaxed);
self.processed_events.fetch_add(1, Ordering::Relaxed);
}
pub fn increment_failed(&self) {
self.processing_events.fetch_sub(1, Ordering::Relaxed);
self.failed_events.fetch_add(1, Ordering::Relaxed);
}
// 提供公共方法获取计数
pub fn processing_count(&self) -> usize {
self.processing_events.load(Ordering::Relaxed)
}
pub fn processed_count(&self) -> usize {
self.processed_events.load(Ordering::Relaxed)
}
pub fn failed_count(&self) -> usize {
self.failed_events.load(Ordering::Relaxed)
}
pub fn uptime(&self) -> Duration {
self.start_time.elapsed()
}
}
/// The notification system that integrates all components
pub struct NotificationSystem {
/// The event notifier
pub notifier: Arc<EventNotifier>,
/// The target registry
pub registry: Arc<TargetRegistry>,
/// The current configuration
pub config: Arc<RwLock<Config>>,
/// Cancel sender for managing stream processing tasks
stream_cancellers: Arc<RwLock<HashMap<TargetID, mpsc::Sender<()>>>>,
/// Concurrent control signal quantity
concurrency_limiter: Arc<Semaphore>,
/// Monitoring indicators
metrics: Arc<NotificationMetrics>,
}
impl Default for NotificationSystem {
fn default() -> Self {
Self::new()
}
}
impl NotificationSystem {
/// Creates a new NotificationSystem
pub fn new() -> Self {
NotificationSystem {
notifier: Arc::new(EventNotifier::new()),
registry: Arc::new(TargetRegistry::new()),
config: Arc::new(RwLock::new(Config::new())),
stream_cancellers: Arc::new(RwLock::new(HashMap::new())),
concurrency_limiter: Arc::new(Semaphore::new(
std::env::var("RUSTFS_TARGET_STREAM_CONCURRENCY")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(20),
)), // Limit the maximum number of concurrent processing events to 20
metrics: Arc::new(NotificationMetrics::new()),
}
}
/// Initializes the notification system
pub async fn init(&self) -> Result<(), NotificationError> {
info!("Initialize notification system...");
let config = self.config.read().await;
debug!(
"Initializing notification system with config: {:?}",
*config
);
let targets: Vec<Box<dyn Target + Send + Sync>> =
self.registry.create_targets_from_config(&config).await?;
info!("{} notification targets were created", targets.len());
// Initiate event stream processing for each storage enabled target
let mut cancellers = HashMap::new();
for target in &targets {
let target_id = target.id();
info!("Initializing target: {}", target.id());
// Initialize the target
if let Err(e) = target.init().await {
error!("Target {} Initialization failed:{}", target.id(), e);
continue;
}
debug!(
"Target {} initialized successfully,enabled:{}",
target_id,
target.is_enabled()
);
// Check if the target is enabled and has storage
if target.is_enabled() {
if let Some(store) = target.store() {
info!("Start event stream processing for target {}", target.id());
// The storage of the cloned target and the target itself
let store_clone = store.boxed_clone();
let target_box = target.clone_dyn();
let target_arc = Arc::from(target_box);
// Add a reference to the monitoring metrics
let metrics = self.metrics.clone();
let semaphore = self.concurrency_limiter.clone();
// Encapsulated enhanced version of start_event_stream
let cancel_tx = self.enhanced_start_event_stream(
store_clone,
target_arc,
metrics,
semaphore,
);
// Start event stream processing and save cancel sender
let target_id_clone = target_id.clone();
cancellers.insert(target_id, cancel_tx);
info!(
"Event stream processing for target {} is started successfully",
target_id_clone
);
} else {
info!(
"Target {} No storage is configured, event stream processing is skipped",
target_id
);
}
} else {
info!(
"Target {} is not enabled, event stream processing is skipped",
target_id
);
}
}
// Update canceler collection
*self.stream_cancellers.write().await = cancellers;
// Initialize the bucket target
self.notifier.init_bucket_targets(targets).await?;
info!("Notification system initialized");
Ok(())
}
/// Gets a list of Targets for all currently active (initialized).
///
/// # Return
/// A Vec containing all active Targets `TargetID`.
pub async fn get_active_targets(&self) -> Vec<TargetID> {
self.notifier.target_list().read().await.keys()
}
/// 通过 TargetID 精确地移除一个 Target 及其相关资源。
///
/// 这个过程包括:
/// 1. 停止与该 Target 关联的事件流(如果存在)。
/// 2. 从 Notifier 的活动列表中移除该 Target 实例。
/// 3. 从系统配置中移除该 Target 的配置项。
///
/// # 参数
/// * `target_id` - 要移除的 Target 的唯一标识符。
///
/// # 返回
/// 如果成功,则返回 `Ok(())`。
pub async fn remove_target(
&self,
target_id: &TargetID,
target_type: &str,
) -> Result<(), NotificationError> {
info!("Attempting to remove target: {}", target_id);
// 步骤 1: 停止事件流 (如果存在)
let mut cancellers_guard = self.stream_cancellers.write().await;
if let Some(cancel_tx) = cancellers_guard.remove(target_id) {
info!("Stopping event stream for target {}", target_id);
// 发送停止信号,即使失败也继续执行,因为接收端可能已经关闭
if let Err(e) = cancel_tx.send(()).await {
error!(
"Failed to send stop signal to target {} stream: {}",
target_id, e
);
}
} else {
info!(
"No active event stream found for target {}, skipping stop.",
target_id
);
}
drop(cancellers_guard);
// 步骤 2: 从 Notifier 的活动列表中移除 Target 实例
// TargetList::remove_target_only 会调用 target.close()
let target_list = self.notifier.target_list();
let mut target_list_guard = target_list.write().await;
if target_list_guard
.remove_target_only(target_id)
.await
.is_some()
{
info!("Removed target {} from the active list.", target_id);
} else {
warn!("Target {} was not found in the active list.", target_id);
}
drop(target_list_guard);
// 步骤 3: 从持久化配置中移除 Target
let mut config_guard = self.config.write().await;
let mut changed = false;
if let Some(targets_of_type) = config_guard.get_mut(target_type) {
if targets_of_type.remove(&target_id.name).is_some() {
info!("Removed target {} from the configuration.", target_id);
changed = true;
}
// 如果该类型下已无任何 target则移除该类型条目
if targets_of_type.is_empty() {
config_guard.remove(target_type);
}
}
if !changed {
warn!("Target {} was not found in the configuration.", target_id);
}
Ok(())
}
/// Set or update a Target configuration.
/// If the configuration is changed, the entire notification system will be automatically reloaded to apply the changes.
///
/// # Arguments
/// * `target_type` - Target type, such as "notify_webhook" or "notify_mqtt".
/// * `target_name` - A unique name for a Target, such as "1".
/// * `kvs` - The full configuration of the Target.
///
/// # Returns
/// Result<(), NotificationError>
/// If the target configuration is successfully set, it returns Ok(()).
/// If the target configuration is invalid, it returns Err(NotificationError::Configuration).
pub async fn set_target_config(
&self,
target_type: &str,
target_name: &str,
kvs: KVS,
) -> Result<(), NotificationError> {
info!(
"Setting config for target {} of type {}",
target_name, target_type
);
let mut config_guard = self.config.write().await;
config_guard
.entry(target_type.to_string())
.or_default()
.insert(target_name.to_string(), kvs);
let new_config = config_guard.clone();
// Release the lock before calling reload_config
drop(config_guard);
self.reload_config(new_config).await
}
/// Removes all notification configurations for a bucket.
pub async fn remove_bucket_notification_config(&self, bucket_name: &str) {
self.notifier.remove_rules_map(bucket_name).await;
}
/// Removes a Target configuration.
/// If the configuration is successfully removed, the entire notification system will be automatically reloaded.
///
/// # Arguments
/// * `target_type` - Target type, such as "notify_webhook" or "notify_mqtt".
/// * `target_name` - A unique name for a Target, such as "1".
///
/// # Returns
/// Result<(), NotificationError>
///
/// If the target configuration is successfully removed, it returns Ok(()).
/// If the target configuration does not exist, it returns Ok(()) without making any changes.
pub async fn remove_target_config(
&self,
target_type: &str,
target_name: &str,
) -> Result<(), NotificationError> {
info!(
"Removing config for target {} of type {}",
target_name, target_type
);
let mut config_guard = self.config.write().await;
let mut changed = false;
if let Some(targets) = config_guard.get_mut(target_type) {
if targets.remove(target_name).is_some() {
changed = true;
}
if targets.is_empty() {
config_guard.remove(target_type);
}
}
if changed {
let new_config = config_guard.clone();
// Release the lock before calling reload_config
drop(config_guard);
self.reload_config(new_config).await
} else {
info!(
"Target {} of type {} not found, no changes made.",
target_name, target_type
);
Ok(())
}
}
/// Enhanced event stream startup function, including monitoring and concurrency control
fn enhanced_start_event_stream(
&self,
store: Box<dyn Store<Event, Error = StoreError, Key = Key> + Send>,
target: Arc<dyn Target + Send + Sync>,
metrics: Arc<NotificationMetrics>,
semaphore: Arc<Semaphore>,
) -> mpsc::Sender<()> {
// Event Stream Processing Using Batch Version
stream::start_event_stream_with_batching(store, target, metrics, semaphore)
}
/// Reloads the configuration
pub async fn reload_config(&self, new_config: Config) -> Result<(), NotificationError> {
info!("Reload notification configuration starts");
// Stop all existing streaming services
let mut cancellers = self.stream_cancellers.write().await;
for (target_id, cancel_tx) in cancellers.drain() {
info!("Stop event stream processing for target {}", target_id);
let _ = cancel_tx.send(()).await;
}
// Update the config
{
let mut config = self.config.write().await;
*config = new_config.clone();
}
// Create a new target from configuration
let targets: Vec<Box<dyn Target + Send + Sync>> = self
.registry
.create_targets_from_config(&new_config)
.await
.map_err(NotificationError::Target)?;
info!(
"{} notification targets were created from the new configuration",
targets.len()
);
// Start new event stream processing for each storage enabled target
let mut new_cancellers = HashMap::new();
for target in &targets {
let target_id = target.id();
// Initialize the target
if let Err(e) = target.init().await {
error!("Target {} Initialization failed:{}", target_id, e);
continue;
}
// Check if the target is enabled and has storage
if target.is_enabled() {
if let Some(store) = target.store() {
info!("Start new event stream processing for target {}", target_id);
// The storage of the cloned target and the target itself
let store_clone = store.boxed_clone();
let target_box = target.clone_dyn();
let target_arc = Arc::from(target_box);
// Add a reference to the monitoring metrics
let metrics = self.metrics.clone();
let semaphore = self.concurrency_limiter.clone();
// Encapsulated enhanced version of start_event_stream
let cancel_tx = self.enhanced_start_event_stream(
store_clone,
target_arc,
metrics,
semaphore,
);
// Start event stream processing and save cancel sender
// let cancel_tx = start_event_stream(store_clone, target_clone);
let target_id_clone = target_id.clone();
new_cancellers.insert(target_id, cancel_tx);
info!(
"Event stream processing of target {} is restarted successfully",
target_id_clone
);
} else {
info!(
"Target {} No storage is configured, event stream processing is skipped",
target_id
);
}
} else {
info!(
"Target {} disabled, event stream processing is skipped",
target_id
);
}
}
// Update canceler collection
*cancellers = new_cancellers;
// Initialize the bucket target
self.notifier.init_bucket_targets(targets).await?;
info!("Configuration reloaded end");
Ok(())
}
/// Loads the bucket notification configuration
pub async fn load_bucket_notification_config(
&self,
bucket_name: &str,
config: &BucketNotificationConfig,
) -> Result<(), NotificationError> {
let arn_list = self.notifier.get_arn_list(&config.region).await;
if arn_list.is_empty() {
return Err(NotificationError::Configuration(
"No targets configured".to_string(),
));
}
info!("Available ARNs: {:?}", arn_list);
// Validate the configuration against the available ARNs
if let Err(e) = config.validate(&config.region, &arn_list) {
debug!(
"Bucket notification config validation region:{} failed: {}",
&config.region, e
);
if !e.to_string().contains("ARN not found") {
return Err(NotificationError::BucketNotification(e.to_string()));
} else {
error!("{}", e);
}
}
// let rules_map = config.to_rules_map();
let rules_map = config.get_rules_map();
self.notifier
.add_rules_map(bucket_name, rules_map.clone())
.await;
info!("Loaded notification config for bucket: {}", bucket_name);
Ok(())
}
/// Sends an event
pub async fn send_event(
&self,
bucket_name: &str,
event_name: &str,
object_key: &str,
event: Event,
) {
self.notifier
.send(bucket_name, event_name, object_key, event)
.await;
}
/// Obtain system status information
pub fn get_status(&self) -> HashMap<String, String> {
let mut status = HashMap::new();
status.insert(
"uptime_seconds".to_string(),
self.metrics.uptime().as_secs().to_string(),
);
status.insert(
"processing_events".to_string(),
self.metrics.processing_count().to_string(),
);
status.insert(
"processed_events".to_string(),
self.metrics.processed_count().to_string(),
);
status.insert(
"failed_events".to_string(),
self.metrics.failed_count().to_string(),
);
status
}
// Add a method to shut down the system
pub async fn shutdown(&self) {
info!("Turn off the notification system");
// Get the number of active targets
let active_targets = self.stream_cancellers.read().await.len();
info!(
"Stops {} active event stream processing tasks",
active_targets
);
let mut cancellers = self.stream_cancellers.write().await;
for (target_id, cancel_tx) in cancellers.drain() {
info!("Stop event stream processing for target {}", target_id);
let _ = cancel_tx.send(()).await;
}
// Wait for a short while to make sure the task has a chance to complete
tokio::time::sleep(Duration::from_millis(500)).await;
info!("Notify the system to be shut down completed");
}
}
impl Drop for NotificationSystem {
fn drop(&mut self) {
// Asynchronous operation cannot be used here, but logs can be recorded.
info!("Notify the system instance to be destroyed");
let status = self.get_status();
for (key, value) in status {
info!("key:{}, value:{}", key, value);
}
info!("Notification system status at shutdown:");
}
}
/// Loads configuration from a file
pub async fn load_config_from_file(
path: &str,
system: &NotificationSystem,
) -> Result<(), NotificationError> {
let config_str = tokio::fs::read_to_string(path).await.map_err(|e| {
NotificationError::Configuration(format!("Failed to read config file: {}", e))
})?;
let config = parse_config(&config_str)
.map_err(|e| NotificationError::Configuration(format!("Failed to parse config: {}", e)))?;
system.reload_config(config).await
}

View File

@@ -1,22 +1,74 @@
mod adapter;
mod config;
mod error;
mod event;
mod notifier;
//! RustFs Notify - A flexible and extensible event notification system for object storage.
//!
//! This library provides a Rust implementation of a storage bucket notification system,
//! similar to RustFS's notification system. It supports sending events to various targets
//! (like Webhook and MQTT) and includes features like event persistence and retry on failure.
pub mod args;
pub mod arn;
pub mod config;
pub mod error;
pub mod event;
pub mod factory;
pub mod global;
pub mod integration;
pub mod notifier;
pub mod registry;
pub mod rules;
pub mod store;
mod system;
pub mod stream;
pub mod target;
pub mod utils;
pub use adapter::create_adapters;
#[cfg(feature = "mqtt")]
pub use adapter::mqtt::MqttAdapter;
#[cfg(feature = "webhook")]
pub use adapter::webhook::WebhookAdapter;
// Re-exports
pub use config::{parse_config, Config, KV, KVS};
pub use error::{NotificationError, StoreError, TargetError};
pub use event::{Event, EventLog, EventName};
pub use integration::NotificationSystem;
pub use rules::BucketNotificationConfig;
use std::io::IsTerminal;
pub use target::Target;
pub use adapter::ChannelAdapter;
pub use adapter::ChannelAdapterType;
pub use config::{AdapterConfig, EventNotifierConfig, DEFAULT_MAX_RETRIES, DEFAULT_RETRY_INTERVAL};
pub use error::Error;
pub use event::{Bucket, Event, EventBuilder, Identity, Log, Metadata, Name, Object, Source};
pub use store::manager;
pub use store::queue;
pub use store::queue::QueueStore;
use tracing_subscriber::{fmt, prelude::*, util::SubscriberInitExt, EnvFilter};
/// Initialize the tracing log system
///
/// # Example
/// ```
/// notify::init_logger(notify::LogLevel::Info);
/// ```
pub fn init_logger(level: LogLevel) {
let filter = EnvFilter::default().add_directive(level.into());
tracing_subscriber::registry()
.with(filter)
.with(
fmt::layer()
.with_target(true)
.with_target(true)
.with_ansi(std::io::stdout().is_terminal())
.with_thread_names(true)
.with_thread_ids(true)
.with_file(true)
.with_line_number(true),
)
.init();
}
/// Log level definition
pub enum LogLevel {
Debug,
Info,
Warn,
Error,
}
impl From<LogLevel> for tracing_subscriber::filter::Directive {
fn from(level: LogLevel) -> Self {
match level {
LogLevel::Debug => "debug".parse().unwrap(),
LogLevel::Info => "info".parse().unwrap(),
LogLevel::Warn => "warn".parse().unwrap(),
LogLevel::Error => "error".parse().unwrap(),
}
}
}

View File

@@ -1,143 +1,263 @@
use crate::config::EventNotifierConfig;
use crate::Event;
use common::error::{Error, Result};
use ecstore::store::ECStore;
use std::sync::Arc;
use tokio::sync::{broadcast, mpsc};
use tokio_util::sync::CancellationToken;
use crate::arn::TargetID;
use crate::{error::NotificationError, event::Event, rules::RulesMap, target::Target, EventName};
use std::{collections::HashMap, sync::Arc};
use tokio::sync::RwLock;
use tracing::{debug, error, info, instrument, warn};
/// Event Notifier
/// Manages event notification to targets based on rules
pub struct EventNotifier {
/// The event sending channel
sender: mpsc::Sender<Event>,
/// Receiver task handle
task_handle: Option<tokio::task::JoinHandle<()>>,
/// Configuration information
config: EventNotifierConfig,
/// Turn off tagging
shutdown: CancellationToken,
/// Close the notification channel
shutdown_complete_tx: Option<broadcast::Sender<()>>,
target_list: Arc<RwLock<TargetList>>,
bucket_rules_map: Arc<RwLock<HashMap<String, RulesMap>>>,
}
impl Default for EventNotifier {
fn default() -> Self {
Self::new()
}
}
impl EventNotifier {
/// Create a new event notifier
#[instrument(skip_all)]
pub async fn new(store: Arc<ECStore>) -> Result<Self> {
let manager = crate::store::manager::EventManager::new(store);
let manager = Arc::new(manager.await);
// Initialize the configuration
let config = manager.clone().init().await?;
// Create adapters
let adapters = manager.clone().create_adapters().await?;
info!("Created {} adapters", adapters.len());
// Create a close marker
let shutdown = CancellationToken::new();
let (shutdown_complete_tx, _) = broadcast::channel(1);
// 创建事件通道 - 使用默认容量,因为每个适配器都有自己的队列
// 这里使用较小的通道容量,因为事件会被快速分发到适配器
let (sender, mut receiver) = mpsc::channel::<Event>(100);
let shutdown_clone = shutdown.clone();
let shutdown_complete_tx_clone = shutdown_complete_tx.clone();
let adapters_clone = adapters.clone();
// Start the event processing task
let task_handle = tokio::spawn(async move {
debug!("The event processing task starts");
loop {
tokio::select! {
Some(event) = receiver.recv() => {
debug!("The event is received:{}", event.id);
// Distribute to all adapters
for adapter in &adapters_clone {
let adapter_name = adapter.name();
match adapter.send(&event).await {
Ok(_) => {
debug!("Event {} Successfully sent to the adapter {}", event.id, adapter_name);
}
Err(e) => {
error!("Event {} send to adapter {} failed:{}", event.id, adapter_name, e);
}
}
}
}
_ = shutdown_clone.cancelled() => {
info!("A shutdown signal is received, and the event processing task is stopped");
let _ = shutdown_complete_tx_clone.send(());
break;
}
}
}
debug!("The event processing task has been stopped");
});
Ok(Self {
sender,
task_handle: Some(task_handle),
config,
shutdown,
shutdown_complete_tx: Some(shutdown_complete_tx),
})
/// Creates a new EventNotifier
pub fn new() -> Self {
EventNotifier {
target_list: Arc::new(RwLock::new(TargetList::new())),
bucket_rules_map: Arc::new(RwLock::new(HashMap::new())),
}
}
/// Turn off the event notifier
pub async fn shutdown(&mut self) -> Result<()> {
info!("Turn off the event notifier");
self.shutdown.cancel();
/// Returns a reference to the target list
/// This method provides access to the target list for external use.
///
pub fn target_list(&self) -> Arc<RwLock<TargetList>> {
Arc::clone(&self.target_list)
}
if let Some(shutdown_tx) = self.shutdown_complete_tx.take() {
let mut rx = shutdown_tx.subscribe();
// Wait for the shutdown to complete the signal or time out
tokio::select! {
_ = rx.recv() => {
debug!("A shutdown completion signal is received");
}
_ = tokio::time::sleep(std::time::Duration::from_secs(10)) => {
warn!("Shutdown timeout and forced termination");
}
}
/// Removes all notification rules for a bucket
///
/// # Arguments
/// * `bucket_name` - The name of the bucket for which to remove rules
///
/// This method removes all rules associated with the specified bucket name.
/// It will log a message indicating the removal of rules.
pub async fn remove_rules_map(&self, bucket_name: &str) {
let mut rules_map = self.bucket_rules_map.write().await;
if rules_map.remove(bucket_name).is_some() {
info!("Removed all notification rules for bucket: {}", bucket_name);
}
}
if let Some(handle) = self.task_handle.take() {
handle.abort();
match handle.await {
Ok(_) => debug!("The event processing task has been terminated gracefully"),
Err(e) => {
if e.is_cancelled() {
debug!("The event processing task has been canceled");
/// Returns a list of ARNs for the registered targets
pub async fn get_arn_list(&self, region: &str) -> Vec<String> {
let target_list_guard = self.target_list.read().await;
target_list_guard
.keys()
.iter()
.map(|target_id| target_id.to_arn(region).to_arn_string())
.collect()
}
/// Adds a rules map for a bucket
pub async fn add_rules_map(&self, bucket_name: &str, rules_map: RulesMap) {
let mut bucket_rules_guard = self.bucket_rules_map.write().await;
if rules_map.is_empty() {
bucket_rules_guard.remove(bucket_name);
} else {
bucket_rules_guard.insert(bucket_name.to_string(), rules_map);
}
info!("Added rules for bucket: {}", bucket_name);
}
/// Removes notification rules for a bucket
pub async fn remove_notification(&self, bucket_name: &str) {
let mut bucket_rules_guard = self.bucket_rules_map.write().await;
bucket_rules_guard.remove(bucket_name);
info!("Removed notification rules for bucket: {}", bucket_name);
}
/// Removes all targets
pub async fn remove_all_bucket_targets(&self) {
let mut target_list_guard = self.target_list.write().await;
// The logic for sending cancel signals via stream_cancel_senders would be removed.
// TargetList::clear_targets_only already handles calling target.close().
target_list_guard.clear_targets_only().await; // Modified clear to not re-cancel
info!("Removed all targets and their streams");
}
/// Sends an event to the appropriate targets based on the bucket rules
#[instrument(skip(self, event))]
pub async fn send(&self, bucket_name: &str, event_name: &str, object_key: &str, event: Event) {
let bucket_rules_guard = self.bucket_rules_map.read().await;
if let Some(rules) = bucket_rules_guard.get(bucket_name) {
let target_ids = rules.match_rules(EventName::from(event_name), object_key);
if target_ids.is_empty() {
debug!("No matching targets for event in bucket: {}", bucket_name);
return;
}
let target_ids_len = target_ids.len();
let mut handles = vec![];
// 使用作用域来限制 target_list 的借用范围
{
let target_list_guard = self.target_list.read().await;
info!("Sending event to targets: {:?}", target_ids);
for target_id in target_ids {
// `get` now returns Option<Arc<dyn Target + Send + Sync>>
if let Some(target_arc) = target_list_guard.get(&target_id) {
// 克隆 Arc<Box<dyn Target>> (target_list 存储的就是这个类型) 以便移入异步任务
// target_arc is already Arc, clone it for the async task
let cloned_target_for_task = target_arc.clone();
let event_clone = event.clone();
let target_name_for_task = cloned_target_for_task.name(); // 在生成任务前获取名称
debug!(
"Preparing to send event to target: {}",
target_name_for_task
);
// 在闭包中使用克隆的数据,避免借用冲突
let handle = tokio::spawn(async move {
if let Err(e) = cloned_target_for_task.save(event_clone).await {
error!(
"Failed to send event to target {}: {}",
target_name_for_task, e
);
} else {
debug!(
"Successfully saved event to target {}",
target_name_for_task
);
}
});
handles.push(handle);
} else {
error!("An error occurred while waiting for the event processing task to terminate:{}", e);
warn!(
"Target ID {:?} found in rules but not in target list.",
target_id
);
}
}
// target_list 在这里自动释放
}
}
info!("The event notifier is completely turned off");
// 等待所有任务完成
for handle in handles {
if let Err(e) = handle.await {
error!("Task for sending/saving event failed: {}", e);
}
}
info!(
"Event processing initiated for {} targets for bucket: {}",
target_ids_len, bucket_name
);
} else {
debug!("No rules found for bucket: {}", bucket_name);
}
}
/// Initializes the targets for buckets
#[instrument(skip(self, targets_to_init))]
pub async fn init_bucket_targets(
&self,
targets_to_init: Vec<Box<dyn Target + Send + Sync>>,
) -> Result<(), NotificationError> {
// 当前激活的、更简单的逻辑:
let mut target_list_guard = self.target_list.write().await; // 获取 TargetList 的写锁
for target_boxed in targets_to_init {
// 遍历传入的 Box<dyn Target>
debug!("init bucket target: {}", target_boxed.name());
// TargetList::add 方法期望 Arc<dyn Target + Send + Sync>
// 因此,需要将 Box<dyn Target + Send + Sync> 转换为 Arc<dyn Target + Send + Sync>
let target_arc: Arc<dyn Target + Send + Sync> = Arc::from(target_boxed);
target_list_guard.add(target_arc)?; // 将 Arc<dyn Target> 添加到列表中
}
info!(
"Initialized {} targets, list size: {}", // 更清晰的日志
target_list_guard.len(),
target_list_guard.len()
);
Ok(()) // 确保返回 Result
}
}
/// A thread-safe list of targets
pub struct TargetList {
targets: HashMap<TargetID, Arc<dyn Target + Send + Sync>>,
}
impl Default for TargetList {
fn default() -> Self {
Self::new()
}
}
impl TargetList {
/// Creates a new TargetList
pub fn new() -> Self {
TargetList {
targets: HashMap::new(),
}
}
/// Adds a target to the list
pub fn add(&mut self, target: Arc<dyn Target + Send + Sync>) -> Result<(), NotificationError> {
let id = target.id();
if self.targets.contains_key(&id) {
// Potentially update or log a warning/error if replacing an existing target.
warn!(
"Target with ID {} already exists in TargetList. It will be overwritten.",
id
);
}
self.targets.insert(id, target);
Ok(())
}
/// Send events
pub async fn send(&self, event: Event) -> Result<()> {
self.sender
.send(event)
.await
.map_err(|e| Error::msg(format!("Failed to send events to channel:{}", e)))
/// Removes a target by ID. Note: This does not stop its associated event stream.
/// Stream cancellation should be handled by EventNotifier.
pub async fn remove_target_only(
&mut self,
id: &TargetID,
) -> Option<Arc<dyn Target + Send + Sync>> {
if let Some(target_arc) = self.targets.remove(id) {
if let Err(e) = target_arc.close().await {
// Target's own close logic
error!("Failed to close target {} during removal: {}", id, e);
}
Some(target_arc)
} else {
None
}
}
/// Get the current configuration
pub fn config(&self) -> &EventNotifierConfig {
&self.config
/// Clears all targets from the list. Note: This does not stop their associated event streams.
/// Stream cancellation should be handled by EventNotifier.
pub async fn clear_targets_only(&mut self) {
let target_ids_to_clear: Vec<TargetID> = self.targets.keys().cloned().collect();
for id in target_ids_to_clear {
if let Some(target_arc) = self.targets.remove(&id) {
if let Err(e) = target_arc.close().await {
error!("Failed to close target {} during clear: {}", id, e);
}
}
}
self.targets.clear();
}
/// Returns a target by ID
pub fn get(&self, id: &TargetID) -> Option<Arc<dyn Target + Send + Sync>> {
self.targets.get(id).cloned()
}
/// Returns all target IDs
pub fn keys(&self) -> Vec<TargetID> {
self.targets.keys().cloned().collect()
}
/// Returns the number of targets
pub fn len(&self) -> usize {
self.targets.len()
}
// is_empty can be derived from len()
pub fn is_empty(&self) -> bool {
self.targets.is_empty()
}
}

View File

@@ -0,0 +1,147 @@
use crate::target::ChannelTargetType;
use crate::{
config::Config,
error::TargetError,
factory::{MQTTTargetFactory, TargetFactory, WebhookTargetFactory},
target::Target,
};
use std::collections::HashMap;
use tracing::{error, info};
/// Registry for managing target factories
pub struct TargetRegistry {
factories: HashMap<String, Box<dyn TargetFactory>>,
}
impl Default for TargetRegistry {
fn default() -> Self {
Self::new()
}
}
impl TargetRegistry {
/// Creates a new TargetRegistry with built-in factories
pub fn new() -> Self {
let mut registry = TargetRegistry {
factories: HashMap::new(),
};
// Register built-in factories
registry.register(
ChannelTargetType::Webhook.as_str(),
Box::new(WebhookTargetFactory),
);
registry.register(
ChannelTargetType::Mqtt.as_str(),
Box::new(MQTTTargetFactory),
);
registry
}
/// Registers a new factory for a target type
pub fn register(&mut self, target_type: &str, factory: Box<dyn TargetFactory>) {
self.factories.insert(target_type.to_string(), factory);
}
/// Creates a target from configuration
pub async fn create_target(
&self,
target_type: &str,
id: String,
config: &crate::config::KVS,
) -> Result<Box<dyn Target + Send + Sync>, TargetError> {
let factory = self.factories.get(target_type).ok_or_else(|| {
TargetError::Configuration(format!("Unknown target type: {}", target_type))
})?;
// Validate configuration before creating target
factory.validate_config(config)?;
// Create target
factory.create_target(id, config).await
}
/// Creates all targets from a configuration
pub async fn create_targets_from_config(
&self,
config: &Config,
) -> Result<Vec<Box<dyn Target + Send + Sync>>, TargetError> {
let mut targets: Vec<Box<dyn Target + Send + Sync>> = Vec::new();
// Iterate through configuration sections
for (section, subsections) in config {
// Only process notification sections
if !section.starts_with("notify_") {
continue;
}
// Extract target type from section name
let target_type = section.trim_start_matches("notify_");
// Iterate through subsections (each representing a target instance)
for (target_id, target_config) in subsections {
// Skip disabled targets
if target_config.lookup("enable").unwrap_or("off") != "on" {
continue;
}
// Create target
match self
.create_target(target_type, target_id.clone(), target_config)
.await
{
Ok(target) => {
info!("Created target: {}/{}", target_type, target_id);
targets.push(target);
}
Err(e) => {
error!(
"Failed to create target {}/{}: {}",
target_type, target_id, e
);
}
}
}
}
Ok(targets)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config::KVS;
#[tokio::test]
async fn test_target_registry() {
let registry = TargetRegistry::new();
// Test valid webhook config
let mut webhook_config = KVS::new();
webhook_config.set("enable", "on");
webhook_config.set("endpoint", "http://example.com/webhook");
let target = registry
.create_target("webhook", "webhook1".to_string(), &webhook_config)
.await;
assert!(target.is_ok());
// Test invalid target type
let target = registry
.create_target("invalid", "invalid1".to_string(), &webhook_config)
.await;
assert!(target.is_err());
// Test disabled target
let mut disabled_config = KVS::new();
disabled_config.set("enable", "off");
disabled_config.set("endpoint", "http://example.com/webhook");
let target = registry
.create_target("webhook", "disabled".to_string(), &disabled_config)
.await;
assert!(target.is_err());
}
}

View File

@@ -0,0 +1,126 @@
use super::rules_map::RulesMap;
// Keep for existing structure if any, or remove if not used
use super::xml_config::ParseConfigError as BucketNotificationConfigError;
use crate::arn::TargetID;
use crate::rules::pattern_rules;
use crate::rules::target_id_set;
use crate::rules::NotificationConfiguration;
use crate::EventName;
use std::collections::HashMap;
use std::io::Read;
// Assuming this is the XML config structure
/// Configuration for bucket notifications.
/// This struct now holds the parsed and validated rules in the new RulesMap format.
#[derive(Debug, Clone, Default)]
pub struct BucketNotificationConfig {
pub region: String, // Region where this config is applicable
pub rules: RulesMap, // The new, more detailed RulesMap
}
impl BucketNotificationConfig {
pub fn new(region: &str) -> Self {
BucketNotificationConfig {
region: region.to_string(),
rules: RulesMap::new(),
}
}
/// Adds a rule to the configuration.
/// This method allows adding a rule with a specific event and target ID.
pub fn add_rule(
&mut self,
event_names: &[EventName], // Assuming event_names is a list of event names
pattern: String, // The object key pattern for the rule
target_id: TargetID, // The target ID for the notification
) {
self.rules.add_rule_config(event_names, pattern, target_id);
}
/// Parses notification configuration from XML.
/// `arn_list` is a list of valid ARN strings for validation.
pub fn from_xml<R: Read>(
reader: R,
current_region: &str,
arn_list: &[String],
) -> Result<Self, BucketNotificationConfigError> {
let mut parsed_config = NotificationConfiguration::from_reader(reader)?;
// Set defaults (region in ARNs if empty, xmlns) before validation
parsed_config.set_defaults(current_region);
// Validate the parsed configuration
parsed_config.validate(current_region, arn_list)?;
let mut rules_map = RulesMap::new();
for queue_conf in parsed_config.queue_list {
// The ARN in queue_conf should now have its region set if it was originally empty.
// Ensure TargetID can be cloned or extracted correctly.
let target_id = queue_conf.arn.target_id.clone();
let pattern_str = queue_conf.filter.filter_rule_list.pattern();
rules_map.add_rule_config(&queue_conf.events, pattern_str, target_id);
}
Ok(BucketNotificationConfig {
region: current_region.to_string(), // Config is for the current_region
rules: rules_map,
})
}
/// Validates the *current* BucketNotificationConfig.
/// This might be redundant if construction always implies validation.
/// However, Go's Config has a Validate method.
/// The primary validation now happens during `from_xml` via `NotificationConfiguration::validate`.
/// This method could re-check against an updated arn_list or region if needed.
pub fn validate(
&self,
current_region: &str,
arn_list: &[String],
) -> Result<(), BucketNotificationConfigError> {
if self.region != current_region {
return Err(BucketNotificationConfigError::RegionMismatch {
config_region: self.region.clone(),
current_region: current_region.to_string(),
});
}
// Iterate through the rules in self.rules and validate their TargetIDs against arn_list
// This requires RulesMap to expose its internal structure or provide an iterator
for (_event_name, pattern_rules) in self.rules.inner().iter() {
for (_pattern, target_id_set) in pattern_rules.inner().iter() {
// Assuming PatternRules has inner()
for target_id in target_id_set {
// Construct the ARN string for this target_id and self.region
let arn_to_check = target_id.to_arn(&self.region); // Assuming TargetID has to_arn
if !arn_list.contains(&arn_to_check.to_arn_string()) {
return Err(BucketNotificationConfigError::ArnNotFound(
arn_to_check.to_arn_string(),
));
}
}
}
}
Ok(())
}
// Expose the RulesMap for the notifier
pub fn get_rules_map(&self) -> &RulesMap {
&self.rules
}
pub fn to_rules_map(&self) -> RulesMap {
self.rules.clone()
}
/// Sets the region for the configuration
pub fn set_region(&mut self, region: &str) {
self.region = region.to_string();
}
}
// Add a helper to PatternRules if not already present
impl pattern_rules::PatternRules {
pub fn inner(&self) -> &HashMap<String, target_id_set::TargetIdSet> {
&self.rules
}
}

View File

@@ -0,0 +1,19 @@
pub mod pattern;
pub mod pattern_rules;
pub mod rules_map;
pub mod target_id_set;
pub mod xml_config; // For XML structure definition and parsing
pub mod config; // Definition and parsing for BucketNotificationConfig
// Re-export key types from submodules for easy access to `crate::rules::TypeName`
// Re-export key types from submodules for external use
pub use config::BucketNotificationConfig;
// Assume that BucketNotificationConfigError is also defined in config.rs
// Or if it is still an alias for xml_config::ParseConfigError , adjust accordingly
pub use xml_config::ParseConfigError as BucketNotificationConfigError;
pub use pattern_rules::PatternRules;
pub use rules_map::RulesMap;
pub use target_id_set::TargetIdSet;
pub use xml_config::{NotificationConfiguration, ParseConfigError};

View File

@@ -0,0 +1,99 @@
use wildmatch::WildMatch;
/// Create new pattern string based on prefix and suffix。
///
/// The rule is similar to event.NewPattern in the Go version:
/// - If a prefix is provided and does not end with '*', '*' is appended.
/// - If a suffix is provided and does not start with '*', then prefix '*'.
/// - Replace "**" with "*".
pub fn new_pattern(prefix: Option<&str>, suffix: Option<&str>) -> String {
let mut pattern = String::new();
// Process the prefix part
if let Some(p) = prefix {
if !p.is_empty() {
pattern.push_str(p);
if !p.ends_with('*') {
pattern.push('*');
}
}
}
// Process the suffix part
if let Some(s) = suffix {
if !s.is_empty() {
let mut s_to_append = s.to_string();
if !s.starts_with('*') {
s_to_append.insert(0, '*');
}
// If the pattern is empty (only suffixes are provided), then the pattern is the suffix
// Otherwise, append the suffix to the pattern
if pattern.is_empty() {
pattern = s_to_append;
} else {
pattern.push_str(&s_to_append);
}
}
}
// Replace "**" with "*"
pattern = pattern.replace("**", "*");
pattern
}
/// Simple matching object names and patterns。
pub fn match_simple(pattern_str: &str, object_name: &str) -> bool {
if pattern_str == "*" {
// AWS S3 docs: A single asterisk (*) in the rule matches all objects.
return true;
}
// WildMatch considers an empty pattern to not match anything, which is usually desired.
// If pattern_str is empty, it means no specific filter, so it depends on interpretation.
// Go's wildcard.MatchSimple might treat empty pattern differently.
// For now, assume empty pattern means no match unless it's explicitly "*".
if pattern_str.is_empty() {
return false; // Or true if an empty pattern means "match all" in some contexts.
// Given Go's NewRulesMap defaults to "*", an empty pattern from Filter is unlikely to mean "match all".
}
WildMatch::new(pattern_str).matches(object_name)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new_pattern() {
assert_eq!(new_pattern(Some("images/"), Some(".jpg")), "images/*.jpg");
assert_eq!(new_pattern(Some("images/"), None), "images/*");
assert_eq!(new_pattern(None, Some(".jpg")), "*.jpg");
assert_eq!(new_pattern(Some("foo"), Some("bar")), "foo*bar"); // foo* + *bar -> foo**bar -> foo*bar
assert_eq!(new_pattern(Some("foo*"), Some("bar")), "foo*bar"); // foo* + *bar -> foo**bar -> foo*bar
assert_eq!(new_pattern(Some("foo"), Some("*bar")), "foo*bar"); // foo* + *bar -> foo**bar -> foo*bar
assert_eq!(new_pattern(Some("foo*"), Some("*bar")), "foo*bar"); // foo* + *bar -> foo**bar -> foo*bar
assert_eq!(new_pattern(Some("*"), Some("*")), "*"); // * + * -> ** -> *
assert_eq!(new_pattern(Some("a"), Some("")), "a*");
assert_eq!(new_pattern(Some(""), Some("b")), "*b");
assert_eq!(new_pattern(None, None), "");
assert_eq!(new_pattern(Some("prefix"), Some("suffix")), "prefix*suffix");
assert_eq!(
new_pattern(Some("prefix/"), Some("/suffix")),
"prefix/*suffix"
); // prefix/* + */suffix -> prefix/**/suffix -> prefix/*/suffix
}
#[test]
fn test_match_simple() {
assert!(match_simple("foo*", "foobar"));
assert!(!match_simple("foo*", "barfoo"));
assert!(match_simple("*.jpg", "photo.jpg"));
assert!(!match_simple("*.jpg", "photo.png"));
assert!(match_simple("*", "anything.anything"));
assert!(match_simple("foo*bar", "foobazbar"));
assert!(!match_simple("foo*bar", "foobar_baz"));
assert!(match_simple("a*b*c", "axbyc"));
assert!(!match_simple("a*b*c", "axbc"));
}
}

View File

@@ -0,0 +1,80 @@
use super::pattern;
use super::target_id_set::TargetIdSet;
use crate::arn::TargetID;
use std::collections::HashMap;
/// PatternRules - Event rule that maps object name patterns to TargetID collections.
/// `event.Rules` (map[string]TargetIDSet) in the Go code
#[derive(Debug, Clone, Default)]
pub struct PatternRules {
pub(crate) rules: HashMap<String, TargetIdSet>,
}
impl PatternRules {
pub fn new() -> Self {
Default::default()
}
/// Add rules: Pattern and Target ID.
/// If the schema already exists, add target_id to the existing TargetIdSet.
pub fn add(&mut self, pattern: String, target_id: TargetID) {
self.rules.entry(pattern).or_default().insert(target_id);
}
/// Checks if there are any rules that match the given object name.
pub fn match_simple(&self, object_name: &str) -> bool {
self.rules
.keys()
.any(|p| pattern::match_simple(p, object_name))
}
/// Returns all TargetIDs that match the object name.
pub fn match_targets(&self, object_name: &str) -> TargetIdSet {
let mut matched_targets = TargetIdSet::new();
for (pattern_str, target_set) in &self.rules {
if pattern::match_simple(pattern_str, object_name) {
matched_targets.extend(target_set.iter().cloned());
}
}
matched_targets
}
pub fn is_empty(&self) -> bool {
self.rules.is_empty()
}
/// Merge another PatternRules.
/// Corresponding to Go's `Rules.Union`.
pub fn union(&self, other: &Self) -> Self {
let mut new_rules = self.clone();
for (pattern, their_targets) in &other.rules {
let our_targets = new_rules.rules.entry(pattern.clone()).or_default();
our_targets.extend(their_targets.iter().cloned());
}
new_rules
}
/// Calculate the difference from another PatternRules.
/// Corresponding to Go's `Rules.Difference`.
pub fn difference(&self, other: &Self) -> Self {
let mut result_rules = HashMap::new();
for (pattern, self_targets) in &self.rules {
match other.rules.get(pattern) {
Some(other_targets) => {
let diff_targets: TargetIdSet =
self_targets.difference(other_targets).cloned().collect();
if !diff_targets.is_empty() {
result_rules.insert(pattern.clone(), diff_targets);
}
}
None => {
// If there is no pattern in other, self_targets are all retained
result_rules.insert(pattern.clone(), self_targets.clone());
}
}
}
PatternRules {
rules: result_rules,
}
}
}

View File

@@ -0,0 +1,106 @@
use super::pattern_rules::PatternRules;
use super::target_id_set::TargetIdSet;
use crate::arn::TargetID;
use crate::event::EventName;
use std::collections::HashMap;
/// RulesMap - Rule mapping organized by event name。
/// `event.RulesMap` (map[Name]Rules) in the corresponding Go code
#[derive(Debug, Clone, Default)]
pub struct RulesMap {
map: HashMap<EventName, PatternRules>,
}
impl RulesMap {
pub fn new() -> Self {
Default::default()
}
/// Add rule configuration.
/// event_names: A set of event names。
/// pattern: Object key pattern.
/// target_id: Notify the target.
///
/// This method expands the composite event name.
pub fn add_rule_config(
&mut self,
event_names: &[EventName],
pattern: String,
target_id: TargetID,
) {
let mut effective_pattern = pattern;
if effective_pattern.is_empty() {
effective_pattern = "*".to_string(); // Match all by default
}
for event_name_spec in event_names {
for expanded_event_name in event_name_spec.expand() {
// Make sure EventName::expand() returns Vec<EventName>
self.map
.entry(expanded_event_name)
.or_default()
.add(effective_pattern.clone(), target_id.clone());
}
}
}
/// Merge another RulesMap.
/// `RulesMap.Add(rulesMap2 RulesMap) corresponding to Go
pub fn add_map(&mut self, other_map: &Self) {
for (event_name, other_pattern_rules) in &other_map.map {
let self_pattern_rules = self.map.entry(*event_name).or_default();
// PatternRules::union 返回新的 PatternRules我们需要修改现有的
let merged_rules = self_pattern_rules.union(other_pattern_rules);
*self_pattern_rules = merged_rules;
}
}
/// 从当前 RulesMap 中移除另一个 RulesMap 中定义的规则。
/// 对应 Go 的 `RulesMap.Remove(rulesMap2 RulesMap)`
pub fn remove_map(&mut self, other_map: &Self) {
let mut events_to_remove = Vec::new();
for (event_name, self_pattern_rules) in &mut self.map {
if let Some(other_pattern_rules) = other_map.map.get(event_name) {
*self_pattern_rules = self_pattern_rules.difference(other_pattern_rules);
if self_pattern_rules.is_empty() {
events_to_remove.push(*event_name);
}
}
}
for event_name in events_to_remove {
self.map.remove(&event_name);
}
}
/// 匹配给定事件名称和对象键的规则,返回所有匹配的 TargetID。
pub fn match_rules(&self, event_name: EventName, object_key: &str) -> TargetIdSet {
// 首先尝试直接匹配事件名称
if let Some(pattern_rules) = self.map.get(&event_name) {
let targets = pattern_rules.match_targets(object_key);
if !targets.is_empty() {
return targets;
}
}
// Go 的 RulesMap[eventName] 直接获取,如果不存在则为空 Rules。
// Rust 的 HashMap::get 返回 Option。如果事件名不存在则没有规则。
// 复合事件(如 ObjectCreatedAll在 add_rule_config 时已展开为单一事件。
// 因此,查询时应使用单一事件名称。
// 如果 event_name 本身就是单一类型,则直接查找。
// 如果 event_name 是复合类型Go 的逻辑是在添加时展开。
// 这里的 match_rules 应该接收已经可能是单一的事件。
// 如果调用者传入的是复合事件,它应该先自行展开或此函数处理。
// 假设 event_name 已经是具体的、可用于查找的事件。
self.map
.get(&event_name)
.map_or_else(TargetIdSet::new, |pr| pr.match_targets(object_key))
}
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// 返回内部规则的克隆,用于 BucketNotificationConfig::validate 等场景。
pub fn inner(&self) -> &HashMap<EventName, PatternRules> {
&self.map
}
}

View File

@@ -0,0 +1,15 @@
use crate::arn::TargetID;
use std::collections::HashSet;
/// TargetIDSet - A collection representation of TargetID.
pub type TargetIdSet = HashSet<TargetID>;
/// Provides a Go-like method for TargetIdSet (can be implemented as trait if needed)
#[allow(dead_code)]
pub(crate) fn new_target_id_set(target_ids: Vec<TargetID>) -> TargetIdSet {
target_ids.into_iter().collect()
}
// HashSet has built-in clone, union, difference and other operations.
// But the Go version of the method returns a new Set, and the HashSet method is usually iterator or modify itself.
// If you need to exactly match Go's API style, you can add wrapper functions.

View File

@@ -0,0 +1,274 @@
use super::pattern;
use crate::arn::{ArnError, TargetIDError, ARN};
use crate::event::EventName;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::io::Read;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum ParseConfigError {
#[error("XML parsing error:{0}")]
XmlError(#[from] quick_xml::errors::Error),
#[error("Invalid filter value:{0}")]
InvalidFilterValue(String),
#[error("Invalid filter name: {0}, only 'prefix' or 'suffix' is allowed")]
InvalidFilterName(String),
#[error("There can only be one 'prefix' in the filter rule")]
DuplicatePrefixFilter,
#[error("There can only be one 'suffix' in the filter rule")]
DuplicateSuffixFilter,
#[error("Missing event name")]
MissingEventName,
#[error("Duplicate event name:{0}")]
DuplicateEventName(String), // EventName is usually an enum, and here String is used to represent its text
#[error("Repeated queue configuration: ID={0:?}, ARN={1}")]
DuplicateQueueConfiguration(Option<String>, String),
#[error("Unsupported configuration types (e.g. Lambda, Topic)")]
UnsupportedConfiguration,
#[error("ARN not found:{0}")]
ArnNotFound(String),
#[error("Unknown area:{0}")]
UnknownRegion(String),
#[error("ARN parsing error:{0}")]
ArnParseError(#[from] ArnError),
#[error("TargetID parsing error:{0}")]
TargetIDParseError(#[from] TargetIDError),
#[error("IO Error:{0}")]
IoError(#[from] std::io::Error),
#[error("Region mismatch: Configure region {config_region}, current region {current_region}")]
RegionMismatch { config_region: String, current_region: String },
#[error("ARN {0} Not found in the provided list")]
ArnValidation(String),
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct FilterRule {
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "Value")]
pub value: String,
}
impl FilterRule {
fn validate(&self) -> Result<(), ParseConfigError> {
if self.name != "prefix" && self.name != "suffix" {
return Err(ParseConfigError::InvalidFilterName(self.name.clone()));
}
// ValidateFilterRuleValue from Go:
// no "." or ".." path segments, <= 1024 chars, valid UTF-8, no '\'.
for segment in self.value.split('/') {
if segment == "." || segment == ".." {
return Err(ParseConfigError::InvalidFilterValue(self.value.clone()));
}
}
if self.value.len() > 1024 || self.value.contains('\\') || std::str::from_utf8(self.value.as_bytes()).is_err() {
return Err(ParseConfigError::InvalidFilterValue(self.value.clone()));
}
Ok(())
}
}
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)]
pub struct FilterRuleList {
#[serde(rename = "FilterRule", default, skip_serializing_if = "Vec::is_empty")]
pub rules: Vec<FilterRule>,
}
impl FilterRuleList {
pub fn validate(&self) -> Result<(), ParseConfigError> {
let mut has_prefix = false;
let mut has_suffix = false;
for rule in &self.rules {
rule.validate()?;
if rule.name == "prefix" {
if has_prefix {
return Err(ParseConfigError::DuplicatePrefixFilter);
}
has_prefix = true;
} else if rule.name == "suffix" {
if has_suffix {
return Err(ParseConfigError::DuplicateSuffixFilter);
}
has_suffix = true;
}
}
Ok(())
}
pub fn pattern(&self) -> String {
let mut prefix_val: Option<&str> = None;
let mut suffix_val: Option<&str> = None;
for rule in &self.rules {
if rule.name == "prefix" {
prefix_val = Some(&rule.value);
} else if rule.name == "suffix" {
suffix_val = Some(&rule.value);
}
}
pattern::new_pattern(prefix_val, suffix_val)
}
pub fn is_empty(&self) -> bool {
self.rules.is_empty()
}
}
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)]
pub struct S3KeyFilter {
#[serde(rename = "FilterRuleList", default, skip_serializing_if = "FilterRuleList::is_empty")]
pub filter_rule_list: FilterRuleList,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct QueueConfig {
#[serde(rename = "Id", skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "Queue")] // This is ARN in XML
pub arn: ARN,
#[serde(rename = "Event", default)] // XML has multiple <Event> tags
pub events: Vec<EventName>, // EventName needs to handle XML (de)serialization if not string
#[serde(rename = "Filter", default, skip_serializing_if = "s3key_filter_is_empty")]
pub filter: S3KeyFilter,
}
fn s3key_filter_is_empty(f: &S3KeyFilter) -> bool {
f.filter_rule_list.is_empty()
}
impl QueueConfig {
pub fn validate(&self, region: &str, arn_list: &[String]) -> Result<(), ParseConfigError> {
if self.events.is_empty() {
return Err(ParseConfigError::MissingEventName);
}
let mut event_set = HashSet::new();
for event in &self.events {
// EventName::to_string() or similar for uniqueness check
if !event_set.insert(event.to_string()) {
return Err(ParseConfigError::DuplicateEventName(event.to_string()));
}
}
self.filter.filter_rule_list.validate()?;
// Validate ARN (similar to Go's Queue.Validate)
// The Go code checks targetList.Exists(q.ARN.TargetID)
// Here we check against a provided arn_list
let _config_arn_str = self.arn.to_arn_string();
if !self.arn.region.is_empty() && self.arn.region != region {
return Err(ParseConfigError::UnknownRegion(self.arn.region.clone()));
}
// Construct the ARN string that would be in arn_list
// The arn_list contains ARNs like "arn:rustfs:sqs:REGION:ID:NAME"
// We need to ensure self.arn (potentially with region adjusted) is in arn_list
let effective_arn = ARN {
target_id: self.arn.target_id.clone(),
region: if self.arn.region.is_empty() {
region.to_string()
} else {
self.arn.region.clone()
},
service: self.arn.service.clone(), // or default "sqs"
partition: self.arn.partition.clone(), // or default "rustfs"
};
if !arn_list.contains(&effective_arn.to_arn_string()) {
return Err(ParseConfigError::ArnNotFound(effective_arn.to_arn_string()));
}
Ok(())
}
/// Sets the region if it's not already set in the ARN.
pub fn set_region_if_empty(&mut self, region: &str) {
if self.arn.region.is_empty() {
self.arn.region = region.to_string();
}
}
}
/// Corresponding to the `lambda` structure in the Go code.
/// Used to parse <CloudFunction> ARN from inside the <CloudFunctionConfiguration> tag.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default)]
pub struct LambdaConfigDetail {
#[serde(rename = "CloudFunction")]
pub arn: String,
// 根据 AWS S3 文档,<CloudFunctionConfiguration> 通常还包含 Id, Event, Filter
// 但为了严格对应提供的 Go `lambda` 结构体,这里只包含 ARN。
// 如果需要完整支持,可以添加其他字段。
// 例如:
// #[serde(rename = "Id", skip_serializing_if = "Option::is_none")]
// pub id: Option<String>,
// #[serde(rename = "Event", default, skip_serializing_if = "Vec::is_empty")]
// pub events: Vec<EventName>,
// #[serde(rename = "Filter", default, skip_serializing_if = "S3KeyFilterIsEmpty")]
// pub filter: S3KeyFilter,
}
/// Corresponding to the `topic` structure in the Go code.
/// Used to parse <Topic> ARN from inside the <TopicConfiguration> tag.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default)]
pub struct TopicConfigDetail {
#[serde(rename = "Topic")]
pub arn: String,
// 类似于 LambdaConfigDetail可以根据需要扩展以包含 Id, Event, Filter 等字段。
}
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)]
#[serde(rename = "NotificationConfiguration")]
pub struct NotificationConfiguration {
#[serde(rename = "xmlns", skip_serializing_if = "Option::is_none")]
pub xmlns: Option<String>,
#[serde(rename = "QueueConfiguration", default, skip_serializing_if = "Vec::is_empty")]
pub queue_list: Vec<QueueConfig>,
#[serde(
rename = "CloudFunctionConfiguration", // Tags for each lambda configuration item in XML
default,
skip_serializing_if = "Vec::is_empty"
)]
pub lambda_list: Vec<LambdaConfigDetail>, // Modify: Use a new structure
#[serde(
rename = "TopicConfiguration", // Tags for each topic configuration item in XML
default,
skip_serializing_if = "Vec::is_empty"
)]
pub topic_list: Vec<TopicConfigDetail>, // Modify: Use a new structure
}
impl NotificationConfiguration {
pub fn from_reader<R: Read>(reader: R) -> Result<Self, ParseConfigError> {
let config: NotificationConfiguration = quick_xml::reader::Reader::from_reader(reader)?;
Ok(config)
}
pub fn validate(&self, current_region: &str, arn_list: &[String]) -> Result<(), ParseConfigError> {
// Verification logic remains the same: if lambda_list or topic_list is not empty, it is considered an unsupported configuration
if !self.lambda_list.is_empty() || !self.topic_list.is_empty() {
return Err(ParseConfigError::UnsupportedConfiguration);
}
let mut unique_queues = HashSet::new();
for queue_config in &self.queue_list {
queue_config.validate(current_region, arn_list)?;
let queue_key = (
queue_config.id.clone(),
queue_config.arn.to_arn_string(), // Assuming that the ARN structure implements Display or ToString
);
if !unique_queues.insert(queue_key.clone()) {
return Err(ParseConfigError::DuplicateQueueConfiguration(queue_key.0, queue_key.1));
}
}
Ok(())
}
pub fn set_defaults(&mut self, region: &str) {
for queue_config in &mut self.queue_list {
queue_config.set_region_if_empty(region);
}
if self.xmlns.is_none() {
self.xmlns = Some("http://s3.amazonaws.com/doc/2006-03-01/".to_string());
}
// 注意:如果 LambdaConfigDetail 和 TopicConfigDetail 将来包含区域等信息,
// 也可能需要在这里设置默认值。但根据当前定义,它们只包含 ARN 字符串。
}
}

498
crates/notify/src/store.rs Normal file
View File

@@ -0,0 +1,498 @@
use crate::error::StoreError;
use serde::{de::DeserializeOwned, Serialize};
use snap::raw::{Decoder, Encoder};
use std::sync::{Arc, RwLock};
use std::{
collections::HashMap,
marker::PhantomData,
path::PathBuf,
time::{SystemTime, UNIX_EPOCH},
};
use tracing::{debug, warn};
use uuid::Uuid;
pub const DEFAULT_LIMIT: u64 = 100000; // Default store limit
pub const DEFAULT_EXT: &str = ".unknown"; // Default file extension
pub const COMPRESS_EXT: &str = ".snappy"; // Extension for compressed files
/// STORE_EXTENSION - file extension of an event file in store
pub const STORE_EXTENSION: &str = ".event";
/// Represents a key for an entry in the store
#[derive(Debug, Clone)]
pub struct Key {
/// The name of the key (UUID)
pub name: String,
/// The file extension for the entry
pub extension: String,
/// The number of items in the entry (for batch storage)
pub item_count: usize,
/// Whether the entry is compressed
pub compress: bool,
}
impl Key {
/// Converts the key to a string (filename)
pub fn to_key_string(&self) -> String {
let name_part = if self.item_count > 1 {
format!("{}:{}", self.item_count, self.name)
} else {
self.name.clone()
};
let mut file_name = name_part;
if !self.extension.is_empty() {
file_name.push_str(&self.extension);
}
if self.compress {
file_name.push_str(COMPRESS_EXT);
}
file_name
}
}
impl std::fmt::Display for Key {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let name_part = if self.item_count > 1 {
format!("{}:{}", self.item_count, self.name)
} else {
self.name.clone()
};
let mut file_name = name_part;
if !self.extension.is_empty() {
file_name.push_str(&self.extension);
}
if self.compress {
file_name.push_str(COMPRESS_EXT);
}
write!(f, "{}", file_name)
}
}
/// Parses a string into a Key
pub fn parse_key(s: &str) -> Key {
debug!("Parsing key: {}", s);
let mut name = s.to_string();
let mut extension = String::new();
let mut item_count = 1;
let mut compress = false;
// Check for compressed suffixes
if name.ends_with(COMPRESS_EXT) {
compress = true;
name = name[..name.len() - COMPRESS_EXT.len()].to_string();
}
// Number of batch items parsed
if let Some(colon_pos) = name.find(':') {
if let Ok(count) = name[..colon_pos].parse::<usize>() {
item_count = count;
name = name[colon_pos + 1..].to_string();
}
}
// Resolve extension
if let Some(dot_pos) = name.rfind('.') {
extension = name[dot_pos..].to_string();
name = name[..dot_pos].to_string();
}
debug!(
"Parsed key - name: {}, extension: {}, item_count: {}, compress: {}",
name, extension, item_count, compress
);
Key {
name,
extension,
item_count,
compress,
}
}
/// Trait for a store that can store and retrieve items of type T
pub trait Store<T>: Send + Sync {
/// The error type for the store
type Error;
/// The key type for the store
type Key;
/// Opens the store
fn open(&self) -> Result<(), Self::Error>;
/// Stores a single item
fn put(&self, item: T) -> Result<Self::Key, Self::Error>;
/// Stores multiple items in a single batch
fn put_multiple(&self, items: Vec<T>) -> Result<Self::Key, Self::Error>;
/// Retrieves a single item by key
fn get(&self, key: &Self::Key) -> Result<T, Self::Error>;
/// Retrieves multiple items by key
fn get_multiple(&self, key: &Self::Key) -> Result<Vec<T>, Self::Error>;
/// Deletes an item by key
fn del(&self, key: &Self::Key) -> Result<(), Self::Error>;
/// Lists all keys in the store
fn list(&self) -> Vec<Self::Key>;
/// Returns the number of items in the store
fn len(&self) -> usize;
/// Returns true if the store is empty
fn is_empty(&self) -> bool;
/// Clones the store into a boxed trait object
fn boxed_clone(&self) -> Box<dyn Store<T, Error = Self::Error, Key = Self::Key> + Send + Sync>;
}
/// A store that uses the filesystem to persist events in a queue
pub struct QueueStore<T> {
entry_limit: u64,
directory: PathBuf,
file_ext: String,
entries: Arc<RwLock<HashMap<String, i64>>>, // key -> modtime as unix nano
_phantom: PhantomData<T>,
}
impl<T> Clone for QueueStore<T> {
fn clone(&self) -> Self {
QueueStore {
entry_limit: self.entry_limit,
directory: self.directory.clone(),
file_ext: self.file_ext.clone(),
entries: Arc::clone(&self.entries),
_phantom: PhantomData,
}
}
}
impl<T: Serialize + DeserializeOwned + Send + Sync> QueueStore<T> {
/// Creates a new QueueStore
pub fn new(directory: impl Into<PathBuf>, limit: u64, ext: &str) -> Self {
let file_ext = if ext.is_empty() { DEFAULT_EXT } else { ext };
QueueStore {
directory: directory.into(),
entry_limit: if limit == 0 { DEFAULT_LIMIT } else { limit },
file_ext: file_ext.to_string(),
entries: Arc::new(RwLock::new(HashMap::with_capacity(limit as usize))),
_phantom: PhantomData,
}
}
/// Returns the full path for a key
fn file_path(&self, key: &Key) -> PathBuf {
self.directory.join(key.to_string())
}
/// Reads a file for the given key
fn read_file(&self, key: &Key) -> Result<Vec<u8>, StoreError> {
let path = self.file_path(key);
debug!(
"Reading file for key: {},path: {}",
key.to_string(),
path.display()
);
let data = std::fs::read(&path).map_err(|e| {
if e.kind() == std::io::ErrorKind::NotFound {
StoreError::NotFound
} else {
StoreError::Io(e)
}
})?;
if data.is_empty() {
return Err(StoreError::NotFound);
}
if key.compress {
let mut decoder = Decoder::new();
decoder
.decompress_vec(&data)
.map_err(|e| StoreError::Compression(e.to_string()))
} else {
Ok(data)
}
}
/// Writes data to a file for the given key
fn write_file(&self, key: &Key, data: &[u8]) -> Result<(), StoreError> {
let path = self.file_path(key);
// Create directory if it doesn't exist
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).map_err(StoreError::Io)?;
}
let data = if key.compress {
let mut encoder = Encoder::new();
encoder
.compress_vec(data)
.map_err(|e| StoreError::Compression(e.to_string()))?
} else {
data.to_vec()
};
std::fs::write(&path, &data).map_err(StoreError::Io)?;
let modified = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_nanos() as i64;
let mut entries = self.entries.write().map_err(|_| {
StoreError::Internal("Failed to acquire write lock on entries".to_string())
})?;
entries.insert(key.to_string(), modified);
debug!("Wrote event to store: {}", key.to_string());
Ok(())
}
}
impl<T> Store<T> for QueueStore<T>
where
T: Serialize + DeserializeOwned + Clone + Send + Sync + 'static,
{
type Error = StoreError;
type Key = Key;
fn open(&self) -> Result<(), Self::Error> {
std::fs::create_dir_all(&self.directory).map_err(StoreError::Io)?;
let entries = std::fs::read_dir(&self.directory).map_err(StoreError::Io)?;
// Get the write lock to update the internal state
let mut entries_map = self.entries.write().map_err(|_| {
StoreError::Internal("Failed to acquire write lock on entries".to_string())
})?;
for entry in entries {
let entry = entry.map_err(StoreError::Io)?;
let metadata = entry.metadata().map_err(StoreError::Io)?;
if metadata.is_file() {
let modified = metadata.modified().map_err(StoreError::Io)?;
let unix_nano = modified
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_nanos() as i64;
let file_name = entry.file_name().to_string_lossy().to_string();
entries_map.insert(file_name, unix_nano);
}
}
debug!("Opened store at: {:?}", self.directory);
Ok(())
}
fn put(&self, item: T) -> Result<Self::Key, Self::Error> {
// Check storage limits
{
let entries = self.entries.read().map_err(|_| {
StoreError::Internal("Failed to acquire read lock on entries".to_string())
})?;
if entries.len() as u64 >= self.entry_limit {
return Err(StoreError::LimitExceeded);
}
}
let uuid = Uuid::new_v4();
let key = Key {
name: uuid.to_string(),
extension: self.file_ext.clone(),
item_count: 1,
compress: true,
};
let data =
serde_json::to_vec(&item).map_err(|e| StoreError::Serialization(e.to_string()))?;
self.write_file(&key, &data)?;
Ok(key)
}
fn put_multiple(&self, items: Vec<T>) -> Result<Self::Key, Self::Error> {
// Check storage limits
{
let entries = self.entries.read().map_err(|_| {
StoreError::Internal("Failed to acquire read lock on entries".to_string())
})?;
if entries.len() as u64 >= self.entry_limit {
return Err(StoreError::LimitExceeded);
}
}
if items.is_empty() {
// Or return an error, or a special key?
return Err(StoreError::Internal(
"Cannot put_multiple with empty items list".to_string(),
));
}
let uuid = Uuid::new_v4();
let key = Key {
name: uuid.to_string(),
extension: self.file_ext.clone(),
item_count: items.len(),
compress: true,
};
// Serialize all items into a single Vec<u8>
// This current approach for get_multiple/put_multiple assumes items are concatenated JSON objects.
// This might be problematic for deserialization if not handled carefully.
// A better approach for multiple items might be to store them as a JSON array `Vec<T>`.
// For now, sticking to current logic of concatenating.
let mut buffer = Vec::new();
for item in items {
// If items are Vec<Event>, and Event is large, this could be inefficient.
// The current get_multiple deserializes one by one.
let item_data =
serde_json::to_vec(&item).map_err(|e| StoreError::Serialization(e.to_string()))?;
buffer.extend_from_slice(&item_data);
// If using JSON array: buffer = serde_json::to_vec(&items)?
}
self.write_file(&key, &buffer)?;
Ok(key)
}
fn get(&self, key: &Self::Key) -> Result<T, Self::Error> {
if key.item_count != 1 {
return Err(StoreError::Internal(format!(
"get() called on a batch key ({} items), use get_multiple()",
key.item_count
)));
}
let items = self.get_multiple(key)?;
items.into_iter().next().ok_or(StoreError::NotFound)
}
fn get_multiple(&self, key: &Self::Key) -> Result<Vec<T>, Self::Error> {
debug!("Reading items from store for key: {}", key.to_string());
let data = self.read_file(key)?;
if data.is_empty() {
return Err(StoreError::Deserialization(
"Cannot deserialize empty data".to_string(),
));
}
let mut items = Vec::with_capacity(key.item_count);
// let mut deserializer = serde_json::Deserializer::from_slice(&data);
// while let Ok(item) = serde::Deserialize::deserialize(&mut deserializer) {
// items.push(item);
// }
// This deserialization logic assumes multiple JSON objects are simply concatenated in the file.
// This is fragile. It's better to store a JSON array `[item1, item2, ...]`
// or use a streaming deserializer that can handle multiple top-level objects if that's the format.
// For now, assuming serde_json::Deserializer::from_slice can handle this if input is well-formed.
let mut deserializer = serde_json::Deserializer::from_slice(&data).into_iter::<T>();
for _ in 0..key.item_count {
match deserializer.next() {
Some(Ok(item)) => items.push(item),
Some(Err(e)) => {
return Err(StoreError::Deserialization(format!(
"Failed to deserialize item in batch: {}",
e
)));
}
None => {
// Reached end of stream sooner than item_count
if items.len() < key.item_count && !items.is_empty() {
// Partial read
warn!(
"Expected {} items for key {}, but only found {}. Possible data corruption or incorrect item_count.",
key.item_count,
key.to_string(),
items.len()
);
// Depending on strictness, this could be an error.
} else if items.is_empty() {
// No items at all, but file existed
return Err(StoreError::Deserialization(format!(
"No items deserialized for key {} though file existed.",
key
)));
}
break;
}
}
}
if items.is_empty() && key.item_count > 0 {
return Err(StoreError::Deserialization("No items found".to_string()));
}
Ok(items)
}
fn del(&self, key: &Self::Key) -> Result<(), Self::Error> {
let path = self.file_path(key);
std::fs::remove_file(&path).map_err(|e| {
if e.kind() == std::io::ErrorKind::NotFound {
// If file not found, still try to remove from entries map in case of inconsistency
warn!("File not found for key {} during del, but proceeding to remove from entries map.", key.to_string());
StoreError::NotFound
} else {
StoreError::Io(e)
}
})?;
// Get the write lock to update the internal state
let mut entries = self.entries.write().map_err(|_| {
StoreError::Internal("Failed to acquire write lock on entries".to_string())
})?;
if entries.remove(&key.to_string()).is_none() {
// Key was not in the map, could be an inconsistency or already deleted.
// This is not necessarily an error if the file deletion succeeded or was NotFound.
debug!(
"Key {} not found in entries map during del, might have been already removed.",
key
);
}
debug!("Deleted event from store: {}", key.to_string());
Ok(())
}
fn list(&self) -> Vec<Self::Key> {
// Get the read lock to read the internal state
let entries = match self.entries.read() {
Ok(entries) => entries,
Err(_) => {
debug!("Failed to acquire read lock on entries for listing");
return Vec::new();
}
};
let mut entries_vec: Vec<_> = entries.iter().collect();
// Sort by modtime (value in HashMap) to process oldest first
entries_vec.sort_by(|a, b| a.1.cmp(b.1)); // Oldest first
entries_vec.into_iter().map(|(k, _)| parse_key(k)).collect()
}
fn len(&self) -> usize {
// Get the read lock to read the internal state
match self.entries.read() {
Ok(entries) => entries.len(),
Err(_) => {
debug!("Failed to acquire read lock on entries for len");
0
}
}
}
fn is_empty(&self) -> bool {
self.len() == 0
}
fn boxed_clone(&self) -> Box<dyn Store<T, Error = Self::Error, Key = Self::Key> + Send + Sync> {
Box::new(self.clone())
as Box<dyn Store<T, Error = Self::Error, Key = Self::Key> + Send + Sync>
}
}

View File

@@ -1,232 +0,0 @@
use crate::{adapter, ChannelAdapter, EventNotifierConfig};
use common::error::{Error, Result};
use ecstore::config::com::{read_config, save_config, CONFIG_PREFIX};
use ecstore::disk::RUSTFS_META_BUCKET;
use ecstore::store::ECStore;
use ecstore::store_api::ObjectOptions;
use ecstore::utils::path::SLASH_SEPARATOR;
use ecstore::StorageAPI;
use once_cell::sync::Lazy;
use std::sync::Arc;
use tokio::sync::Mutex;
use tracing::instrument;
/// * config file
const CONFIG_FILE: &str = "event.json";
/// event sys config
const EVENT: &str = "event";
/// Global storage API access point
pub static GLOBAL_STORE_API: Lazy<Mutex<Option<Arc<ECStore>>>> = Lazy::new(|| Mutex::new(None));
/// Global event system configuration
pub static GLOBAL_EVENT_CONFIG: Lazy<Mutex<Option<EventNotifierConfig>>> = Lazy::new(|| Mutex::new(None));
/// EventManager Responsible for managing all operations of the event system
#[derive(Debug)]
pub struct EventManager {
api: Arc<ECStore>,
}
impl EventManager {
/// Create a new Event Manager
pub async fn new(api: Arc<ECStore>) -> Self {
// Set the global storage API
{
let mut global_api = GLOBAL_STORE_API.lock().await;
*global_api = Some(api.clone());
}
Self { api }
}
/// Initialize the Event Manager
///
/// # Returns
/// If it succeeds, it returns configuration information, and if it fails, it returns an error
#[instrument(skip_all)]
pub async fn init(&self) -> Result<EventNotifierConfig> {
tracing::info!("Event system configuration initialization begins");
let cfg = match read_config_without_migrate(self.api.clone()).await {
Ok(cfg) => {
tracing::info!("The event system configuration was successfully read");
cfg
}
Err(err) => {
tracing::error!("Failed to initialize the event system configuration:{:?}", err);
return Err(err);
}
};
*GLOBAL_EVENT_CONFIG.lock().await = Some(cfg.clone());
tracing::info!("The initialization of the event system configuration is complete");
Ok(cfg)
}
/// Create a new configuration
///
/// # Parameters
/// - `cfg`: The configuration to be created
///
/// # Returns
/// The result of the operation
pub async fn create_config(&self, cfg: &EventNotifierConfig) -> Result<()> {
// Check whether the configuration already exists
if read_event_config(self.api.clone()).await.is_ok() {
return Err(Error::msg("The configuration already exists, use the update action"));
}
save_event_config(self.api.clone(), cfg).await?;
*GLOBAL_EVENT_CONFIG.lock().await = Some(cfg.clone());
Ok(())
}
/// Update the configuration
///
/// # Parameters
/// - `cfg`: The configuration to be updated
///
/// # Returns
/// The result of the operation
pub async fn update_config(&self, cfg: &EventNotifierConfig) -> Result<()> {
// Read the existing configuration first to merge
let current_cfg = read_event_config(self.api.clone()).await.unwrap_or_default();
// This is where the merge logic can be implemented
let merged_cfg = self.merge_configs(current_cfg, cfg.clone());
save_event_config(self.api.clone(), &merged_cfg).await?;
*GLOBAL_EVENT_CONFIG.lock().await = Some(merged_cfg);
Ok(())
}
/// Merge the two configurations
fn merge_configs(&self, current: EventNotifierConfig, new: EventNotifierConfig) -> EventNotifierConfig {
let mut merged = current;
// Merge webhook configurations
for (id, config) in new.webhook {
merged.webhook.insert(id, config);
}
// Merge MQTT configurations
for (id, config) in new.mqtt {
merged.mqtt.insert(id, config);
}
merged
}
/// Delete the configuration
pub async fn delete_config(&self) -> Result<()> {
let config_file = get_event_config_file();
self.api
.delete_object(
RUSTFS_META_BUCKET,
&config_file,
ObjectOptions {
delete_prefix: true,
delete_prefix_object: true,
..Default::default()
},
)
.await?;
// Reset the global configuration to default
// let _ = GLOBAL_EventSysConfig.set(self.read_config().await?);
Ok(())
}
/// Read the configuration
pub async fn read_config(&self) -> Result<EventNotifierConfig> {
read_event_config(self.api.clone()).await
}
/// Create all enabled adapters
pub async fn create_adapters(&self) -> Result<Vec<Arc<dyn ChannelAdapter>>> {
let config = match GLOBAL_EVENT_CONFIG.lock().await.clone() {
Some(cfg) => cfg,
None => return Err(Error::msg("The global configuration is not initialized")),
};
let adapter_configs = config.to_adapter_configs();
match adapter::create_adapters(adapter_configs).await {
Ok(adapters) => Ok(adapters),
Err(err) => {
tracing::error!("Failed to create adapters: {:?}", err);
Err(Error::from(err))
}
}
}
}
/// Get the Global Storage API
pub async fn get_global_store_api() -> Option<Arc<ECStore>> {
GLOBAL_STORE_API.lock().await.clone()
}
/// Get the Global Storage API
pub async fn get_global_event_config() -> Option<EventNotifierConfig> {
GLOBAL_EVENT_CONFIG.lock().await.clone()
}
/// Read event configuration
async fn read_event_config<S: StorageAPI>(api: Arc<S>) -> Result<EventNotifierConfig> {
let config_file = get_event_config_file();
let data = read_config(api, &config_file).await?;
EventNotifierConfig::unmarshal(&data)
}
/// Save the event configuration
async fn save_event_config<S: StorageAPI>(api: Arc<S>, config: &EventNotifierConfig) -> Result<()> {
let config_file = get_event_config_file();
let data = config.marshal()?;
save_config(api, &config_file, data).await
}
/// Get the event profile path
fn get_event_config_file() -> String {
format!("{}{}{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR, EVENT, SLASH_SEPARATOR, CONFIG_FILE)
}
/// Read the configuration file and create a default configuration if it doesn't exist
pub async fn read_config_without_migrate<S: StorageAPI>(api: Arc<S>) -> Result<EventNotifierConfig> {
let config_file = get_event_config_file();
let data = match read_config(api.clone(), &config_file).await {
Ok(data) => {
if data.is_empty() {
return new_and_save_event_config(api).await;
}
data
}
Err(err) if ecstore::config::error::is_err_config_not_found(&err) => {
tracing::warn!("If the configuration file does not exist, start initializing the default configuration");
return new_and_save_event_config(api).await;
}
Err(err) => {
tracing::error!("Read configuration file error: {:?}", err);
return Err(err);
}
};
// Parse configuration
let cfg = EventNotifierConfig::unmarshal(&data)?;
Ok(cfg)
}
/// Create and save a new configuration
async fn new_and_save_event_config<S: StorageAPI>(api: Arc<S>) -> Result<EventNotifierConfig> {
let cfg = EventNotifierConfig::default();
save_event_config(api, &cfg).await?;
Ok(cfg)
}

View File

@@ -1,319 +0,0 @@
use async_trait::async_trait;
use serde::{de::DeserializeOwned, Serialize};
use std::error::Error;
use std::fmt;
use std::fmt::Display;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc;
use tokio::time;
pub mod manager;
pub mod queue;
// 常量定义
pub const RETRY_INTERVAL: Duration = Duration::from_secs(3);
pub const DEFAULT_LIMIT: u64 = 100000; // 默认存储限制
pub const DEFAULT_EXT: &str = ".unknown";
pub const COMPRESS_EXT: &str = ".snappy";
// 错误类型
#[derive(Debug)]
pub enum StoreError {
NotConnected,
LimitExceeded,
IoError(std::io::Error),
Utf8(std::str::Utf8Error),
SerdeError(serde_json::Error),
Deserialize(serde_json::Error),
UuidError(uuid::Error),
Other(String),
}
impl Display for StoreError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
StoreError::NotConnected => write!(f, "not connected to target server/service"),
StoreError::LimitExceeded => write!(f, "the maximum store limit reached"),
StoreError::IoError(e) => write!(f, "IO error: {}", e),
StoreError::Utf8(e) => write!(f, "UTF-8 conversion error: {}", e),
StoreError::SerdeError(e) => write!(f, "serialization error: {}", e),
StoreError::Deserialize(e) => write!(f, "deserialization error: {}", e),
StoreError::UuidError(e) => write!(f, "UUID generation error: {}", e),
StoreError::Other(s) => write!(f, "{}", s),
}
}
}
impl Error for StoreError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
StoreError::IoError(e) => Some(e),
StoreError::SerdeError(e) => Some(e),
StoreError::UuidError(e) => Some(e),
_ => None,
}
}
}
impl From<std::io::Error> for StoreError {
fn from(e: std::io::Error) -> Self {
StoreError::IoError(e)
}
}
impl From<serde_json::Error> for StoreError {
fn from(e: serde_json::Error) -> Self {
StoreError::SerdeError(e)
}
}
impl From<uuid::Error> for StoreError {
fn from(e: uuid::Error) -> Self {
StoreError::UuidError(e)
}
}
pub type StoreResult<T> = Result<T, StoreError>;
// 日志记录器类型
pub type Logger = fn(ctx: Option<&str>, err: StoreError, id: &str, err_kind: &[&dyn Display]);
// Key 结构体定义
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Key {
pub name: String,
pub compress: bool,
pub extension: String,
pub item_count: usize,
}
impl Key {
pub fn new(name: String, extension: String) -> Self {
Self {
name,
extension,
compress: false,
item_count: 1,
}
}
pub fn with_compression(mut self, compress: bool) -> Self {
self.compress = compress;
self
}
pub fn with_item_count(mut self, count: usize) -> Self {
self.item_count = count;
self
}
pub fn to_string(&self) -> String {
let mut key_str = self.name.clone();
if self.item_count > 1 {
key_str = format!("{}:{}", self.item_count, self.name);
}
let ext = if self.compress {
format!("{}{}", self.extension, COMPRESS_EXT)
} else {
self.extension.clone()
};
format!("{}{}", key_str, ext)
}
}
impl Display for Key {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.to_string())
}
}
pub fn parse_key(k: &str) -> Key {
let mut key = Key {
name: k.to_string(),
compress: false,
extension: String::new(),
item_count: 1,
};
// 检查压缩扩展名
if k.ends_with(COMPRESS_EXT) {
key.compress = true;
key.name = key.name[..key.name.len() - COMPRESS_EXT.len()].to_string();
}
// 解析项目数量
if let Some(colon_pos) = key.name.find(':') {
if let Ok(count) = key.name[..colon_pos].parse::<usize>() {
key.item_count = count;
key.name = key.name[colon_pos + 1..].to_string();
}
}
// 解析扩展名
if let Some(dot_pos) = key.name.rfind('.') {
key.extension = key.name[dot_pos..].to_string();
key.name = key.name[..dot_pos].to_string();
}
key
}
// Target trait 定义
#[async_trait]
pub trait Target: Send + Sync {
fn name(&self) -> String;
async fn send_from_store(&self, key: Key) -> StoreResult<()>;
}
// Store trait 定义
#[async_trait]
pub trait Store<T>: Send + Sync
where
T: Serialize + DeserializeOwned + Send + Sync + 'static,
{
async fn put(&self, item: T) -> StoreResult<Key>;
async fn put_multiple(&self, items: Vec<T>) -> StoreResult<Key>;
async fn get(&self, key: Key) -> StoreResult<T>;
async fn get_multiple(&self, key: Key) -> StoreResult<Vec<T>>;
async fn get_raw(&self, key: Key) -> StoreResult<Vec<u8>>;
async fn put_raw(&self, b: Vec<u8>) -> StoreResult<Key>;
async fn len(&self) -> usize;
async fn list(&self) -> Vec<Key>;
async fn del(&self, key: Key) -> StoreResult<()>;
async fn open(&self) -> StoreResult<()>;
async fn delete(&self) -> StoreResult<()>;
}
// 重播项目辅助函数
pub async fn replay_items<T>(store: Arc<dyn Store<T>>, done_ch: mpsc::Receiver<()>, log: Logger, id: &str) -> mpsc::Receiver<Key>
where
T: Serialize + DeserializeOwned + Send + Sync + 'static,
{
let (tx, rx) = mpsc::channel(100); // 合理的缓冲区大小
let id = id.to_string();
tokio::spawn(async move {
let mut done_ch = done_ch;
let mut retry_interval = time::interval(RETRY_INTERVAL);
let mut retry_interval = time::interval_at(retry_interval.tick().await, RETRY_INTERVAL);
loop {
let keys = store.list().await;
for key in keys {
let tx = tx.clone();
tokio::select! {
_ = tx.send(key) => {
// 成功发送下一个键
}
_ = done_ch.recv() => {
return;
}
}
}
tokio::select! {
_ = retry_interval.tick() => {
// 重试定时器触发,继续循环
}
_ = done_ch.recv() => {
return;
}
}
}
});
rx
}
// 发送项目辅助函数
pub async fn send_items(
target: Arc<dyn Target>,
mut key_ch: mpsc::Receiver<Key>,
mut done_ch: mpsc::Receiver<()>,
logger: Logger,
) {
let mut retry_interval = time::interval(RETRY_INTERVAL);
let target_clone = target.clone();
async fn try_send(
target: Arc<dyn Target>,
key: Key,
retry_interval: &mut time::Interval,
done_ch: &mut mpsc::Receiver<()>,
logger: Logger,
) -> bool {
loop {
match target.send_from_store(key.clone()).await {
Ok(_) => return true,
Err(err) => {
logger(None, err, &target.name(), &[&format!("unable to send log entry to '{}'", target.name())]);
tokio::select! {
_ = retry_interval.tick() => {
// 重试
}
_ = done_ch.recv() => {
return false;
}
}
}
}
}
}
loop {
tokio::select! {
maybe_key = key_ch.recv() => {
match maybe_key {
Some(key) => {
if !try_send(target_clone.clone(), key, &mut retry_interval, &mut done_ch, logger).await {
return;
}
}
None => return,
}
}
_ = done_ch.recv() => {
return;
}
}
}
}
// 流式传输项目
pub async fn stream_items<T>(store: Arc<dyn Store<T>>, target: Arc<dyn Target>, done_ch: mpsc::Receiver<()>, logger: Logger)
where
T: Serialize + DeserializeOwned + Send + Sync + 'static,
{
// 创建一个 done_ch 的克隆,以便可以将其传递给 replay_items
// let (tx, rx) = mpsc::channel::<()>(1);
let (tx_replay, rx_replay) = mpsc::channel::<()>(1);
let (tx_send, rx_send) = mpsc::channel::<()>(1);
let mut done_ch = done_ch;
let key_ch = replay_items(store, rx_replay, logger, &target.name()).await;
// let key_ch = replay_items(store, rx, logger, &target.name()).await;
let tx_replay_clone = tx_replay.clone();
let tx_send_clone = tx_send.clone();
// 监听原始 done_ch如果收到信号则关闭我们创建的通道
tokio::spawn(async move {
// if done_ch.recv().await.is_some() {
// let _ = tx.send(()).await;
// }
if done_ch.recv().await.is_some() {
let _ = tx_replay_clone.send(()).await;
let _ = tx_send_clone.send(()).await;
}
});
// send_items(target, key_ch, rx, logger).await;
send_items(target, key_ch, rx_send, logger).await;
}

View File

@@ -1,252 +0,0 @@
use crate::store::{parse_key, Key, Store, StoreError, StoreResult, DEFAULT_EXT, DEFAULT_LIMIT};
use async_trait::async_trait;
use serde::{de::DeserializeOwned, Serialize};
use snap::raw::{Decoder, Encoder};
use std::collections::BTreeMap;
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::fs;
use tokio::sync::RwLock;
use uuid::Uuid;
pub struct QueueStore<T> {
entry_limit: u64,
directory: PathBuf,
file_ext: String,
entries: RwLock<BTreeMap<String, i64>>,
_phantom: std::marker::PhantomData<T>,
}
impl<T> QueueStore<T>
where
T: Serialize + DeserializeOwned + Send + Sync + 'static,
{
pub fn new<P: AsRef<Path>>(directory: P, limit: u64, ext: Option<&str>) -> Self {
let entry_limit = if limit == 0 { DEFAULT_LIMIT } else { limit };
let ext = ext.unwrap_or(DEFAULT_EXT).to_string();
Self {
directory: directory.as_ref().to_path_buf(),
entry_limit,
file_ext: ext,
entries: RwLock::new(BTreeMap::new()),
_phantom: std::marker::PhantomData,
}
}
async fn write_bytes(&self, key: Key, data: Vec<u8>) -> StoreResult<()> {
let path = self.directory.join(key.to_string());
let data = if key.compress {
let mut encoder = Encoder::new();
encoder.compress_vec(&data).map_err(|e| StoreError::Other(e.to_string()))?
} else {
data
};
fs::write(&path, &data).await?;
// 更新条目映射
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(|e| StoreError::Other(e.to_string()))?
.as_nanos() as i64;
self.entries.write().await.insert(key.to_string(), now);
Ok(())
}
async fn write(&self, key: Key, item: T) -> StoreResult<()> {
let data = serde_json::to_vec(&item)?;
self.write_bytes(key, data).await
}
async fn multi_write(&self, key: Key, items: Vec<T>) -> StoreResult<()> {
let mut buffer = Vec::new();
for item in items {
let item_data = serde_json::to_vec(&item)?;
buffer.extend_from_slice(&item_data);
buffer.push(b'\n'); // 使用换行符分隔项目
}
self.write_bytes(key, buffer).await
}
async fn del_internal(&self, key: &Key) -> StoreResult<()> {
let path = self.directory.join(key.to_string());
if let Err(e) = fs::remove_file(&path).await {
if e.kind() != std::io::ErrorKind::NotFound {
return Err(e.into());
}
}
self.entries.write().await.remove(&key.to_string());
Ok(())
}
}
#[async_trait]
impl<T> Store<T> for QueueStore<T>
where
T: Serialize + DeserializeOwned + Send + Sync + 'static,
{
async fn put(&self, item: T) -> StoreResult<Key> {
let entries_len = self.entries.read().await.len() as u64;
if entries_len >= self.entry_limit {
return Err(StoreError::LimitExceeded);
}
// 生成 UUID 作为键
let uuid = Uuid::new_v4();
let key = Key::new(uuid.to_string(), self.file_ext.clone());
self.write(key.clone(), item).await?;
Ok(key)
}
async fn put_multiple(&self, items: Vec<T>) -> StoreResult<Key> {
let entries_len = self.entries.read().await.len() as u64;
if entries_len >= self.entry_limit {
return Err(StoreError::LimitExceeded);
}
if items.is_empty() {
return Err(StoreError::Other("Cannot store empty item list".into()));
}
// 生成 UUID 作为键
let uuid = Uuid::new_v4();
let key = Key::new(uuid.to_string(), self.file_ext.clone())
.with_item_count(items.len())
.with_compression(true);
self.multi_write(key.clone(), items).await?;
Ok(key)
}
async fn get(&self, key: Key) -> StoreResult<T> {
let items = self.get_multiple(key).await?;
items
.into_iter()
.next()
.ok_or_else(|| StoreError::Other("No items found".into()))
}
async fn get_multiple(&self, key: Key) -> StoreResult<Vec<T>> {
let data = self.get_raw(key).await?;
// 尝试解析为 JSON 数组
match serde_json::from_slice::<Vec<T>>(&data) {
Ok(items) if !items.is_empty() => return Ok(items),
Ok(_) => return Err(StoreError::Other("No items deserialized".into())),
Err(_) => {} // 失败则尝试按行解析
}
// 如果直接解析为 Vec<T> 失败,则尝试按行解析
// 转换为字符串并按行解析
let data_str = std::str::from_utf8(&data).map_err(StoreError::Utf8)?;
// 按行解析JSON Lines
let mut items = Vec::new();
for line in data_str.lines() {
let line = line.trim();
if line.is_empty() {
continue;
}
let item = serde_json::from_str::<T>(line).map_err(StoreError::Deserialize)?;
items.push(item);
}
if items.is_empty() {
return Err(StoreError::Other("Failed to deserialize items".into()));
}
Ok(items)
}
async fn get_raw(&self, key: Key) -> StoreResult<Vec<u8>> {
let path = self.directory.join(key.to_string());
let data = fs::read(&path).await?;
if data.is_empty() {
return Err(StoreError::Other("Empty file".into()));
}
if key.compress {
let mut decoder = Decoder::new();
decoder.decompress_vec(&data).map_err(|e| StoreError::Other(e.to_string()))
} else {
Ok(data)
}
}
async fn put_raw(&self, data: Vec<u8>) -> StoreResult<Key> {
let entries_len = self.entries.read().await.len() as u64;
if entries_len >= self.entry_limit {
return Err(StoreError::LimitExceeded);
}
// 生成 UUID 作为键
let uuid = Uuid::new_v4();
let key = Key::new(uuid.to_string(), self.file_ext.clone());
self.write_bytes(key.clone(), data).await?;
Ok(key)
}
async fn len(&self) -> usize {
self.entries.read().await.len()
}
async fn list(&self) -> Vec<Key> {
let entries = self.entries.read().await;
// 将条目转换为 (key, timestamp) 元组并排序
let mut entries_vec: Vec<(&String, &i64)> = entries.iter().collect();
entries_vec.sort_by_key(|(_k, &v)| v);
// 将排序后的键解析为 Key 结构体
entries_vec.into_iter().map(|(k, _)| parse_key(k)).collect()
}
async fn del(&self, key: Key) -> StoreResult<()> {
self.del_internal(&key).await
}
async fn open(&self) -> StoreResult<()> {
// 创建目录(如果不存在)
fs::create_dir_all(&self.directory).await?;
// 读取已经存在的文件
let entries = self.entries.write();
let mut entries = entries.await;
entries.clear();
let mut dir_entries = fs::read_dir(&self.directory).await?;
while let Some(entry) = dir_entries.next_entry().await? {
if let Ok(metadata) = entry.metadata().await {
if metadata.is_file() {
let modified = metadata
.modified()?
.duration_since(UNIX_EPOCH)
.map_err(|e| StoreError::Other(e.to_string()))?
.as_nanos() as i64;
entries.insert(entry.file_name().to_string_lossy().to_string(), modified);
}
}
}
Ok(())
}
async fn delete(&self) -> StoreResult<()> {
fs::remove_dir_all(&self.directory).await?;
Ok(())
}
}

362
crates/notify/src/stream.rs Normal file
View File

@@ -0,0 +1,362 @@
use crate::{
error::TargetError, integration::NotificationMetrics,
store::{Key, Store},
target::Target,
Event,
StoreError,
};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::{mpsc, Semaphore};
use tokio::time::sleep;
use tracing::{debug, error, info, warn};
/// Streams events from the store to the target
pub async fn stream_events(
store: &mut (dyn Store<crate::event::Event, Error = StoreError, Key = Key> + Send),
target: &dyn Target,
mut cancel_rx: mpsc::Receiver<()>,
) {
info!("Starting event stream for target: {}", target.name());
// Retry configuration
const MAX_RETRIES: usize = 5;
const RETRY_DELAY: Duration = Duration::from_secs(5);
loop {
// Check for cancellation signal
if cancel_rx.try_recv().is_ok() {
info!("Cancellation received for target: {}", target.name());
return;
}
// Get list of events in the store
let keys = store.list();
if keys.is_empty() {
// No events, wait before checking again
sleep(Duration::from_secs(1)).await;
continue;
}
// Process each event
for key in keys {
// Check for cancellation before processing each event
if cancel_rx.try_recv().is_ok() {
info!(
"Cancellation received during processing for target: {}",
target.name()
);
return;
}
let mut retry_count = 0;
let mut success = false;
// Retry logic
while retry_count < MAX_RETRIES && !success {
match target.send_from_store(key.clone()).await {
Ok(_) => {
info!("Successfully sent event for target: {}", target.name());
success = true;
}
Err(e) => {
// Handle specific errors
match &e {
TargetError::NotConnected => {
warn!("Target {} not connected, retrying...", target.name());
retry_count += 1;
sleep(RETRY_DELAY).await;
}
TargetError::Timeout(_) => {
warn!("Timeout for target {}, retrying...", target.name());
retry_count += 1;
sleep(Duration::from_secs((retry_count * 5) as u64)).await; // 指数退避
}
_ => {
// Permanent error, skip this event
error!("Permanent error for target {}: {}", target.name(), e);
break;
}
}
}
}
}
// Remove event from store if successfully sent
if retry_count >= MAX_RETRIES && !success {
warn!(
"Max retries exceeded for event {}, target: {}, skipping",
key.to_string(),
target.name()
);
}
}
// Small delay before next iteration
sleep(Duration::from_millis(100)).await;
}
}
/// Starts the event streaming process for a target
pub fn start_event_stream(
mut store: Box<dyn Store<Event, Error = StoreError, Key = Key> + Send>,
target: Arc<dyn Target + Send + Sync>,
) -> mpsc::Sender<()> {
let (cancel_tx, cancel_rx) = mpsc::channel(1);
tokio::spawn(async move {
stream_events(&mut *store, &*target, cancel_rx).await;
info!("Event stream stopped for target: {}", target.name());
});
cancel_tx
}
/// Start event stream with batch processing
pub fn start_event_stream_with_batching(
mut store: Box<dyn Store<Event, Error = StoreError, Key = Key> + Send>,
target: Arc<dyn Target + Send + Sync>,
metrics: Arc<NotificationMetrics>,
semaphore: Arc<Semaphore>,
) -> mpsc::Sender<()> {
let (cancel_tx, cancel_rx) = mpsc::channel(1);
debug!(
"Starting event stream with batching for target: {}",
target.name()
);
tokio::spawn(async move {
stream_events_with_batching(&mut *store, &*target, cancel_rx, metrics, semaphore).await;
info!("Event stream stopped for target: {}", target.name());
});
cancel_tx
}
/// 带批处理的事件流处理
pub async fn stream_events_with_batching(
store: &mut (dyn Store<Event, Error = StoreError, Key = Key> + Send),
target: &dyn Target,
mut cancel_rx: mpsc::Receiver<()>,
metrics: Arc<NotificationMetrics>,
semaphore: Arc<Semaphore>,
) {
info!(
"Starting event stream with batching for target: {}",
target.name()
);
// Configuration parameters
const DEFAULT_BATCH_SIZE: usize = 1;
let batch_size = std::env::var("RUSTFS_EVENT_BATCH_SIZE")
.ok()
.and_then(|s| s.parse::<usize>().ok())
.unwrap_or(DEFAULT_BATCH_SIZE);
const BATCH_TIMEOUT: Duration = Duration::from_secs(5);
const MAX_RETRIES: usize = 5;
const BASE_RETRY_DELAY: Duration = Duration::from_secs(2);
let mut batch = Vec::with_capacity(batch_size);
let mut batch_keys = Vec::with_capacity(batch_size);
let mut last_flush = Instant::now();
loop {
// 检查取消信号
if cancel_rx.try_recv().is_ok() {
info!("Cancellation received for target: {}", target.name());
return;
}
// 获取存储中的事件列表
let keys = store.list();
debug!(
"Found {} keys in store for target: {}",
keys.len(),
target.name()
);
if keys.is_empty() {
// 如果批处理中有数据且超时,则刷新批处理
if !batch.is_empty() && last_flush.elapsed() >= BATCH_TIMEOUT {
process_batch(
&mut batch,
&mut batch_keys,
target,
MAX_RETRIES,
BASE_RETRY_DELAY,
&metrics,
&semaphore,
)
.await;
last_flush = Instant::now();
}
// 无事件,等待后再检查
tokio::time::sleep(Duration::from_millis(500)).await;
continue;
}
// 处理每个事件
for key in keys {
// 再次检查取消信号
if cancel_rx.try_recv().is_ok() {
info!(
"Cancellation received during processing for target: {}",
target.name()
);
// 在退出前处理已收集的批次
if !batch.is_empty() {
process_batch(
&mut batch,
&mut batch_keys,
target,
MAX_RETRIES,
BASE_RETRY_DELAY,
&metrics,
&semaphore,
)
.await;
}
return;
}
// 尝试从存储中获取事件
match store.get(&key) {
Ok(event) => {
// 添加到批处理
batch.push(event);
batch_keys.push(key);
metrics.increment_processing();
// 如果批次已满或距离上次刷新已经过了足够时间,则处理批次
if batch.len() >= batch_size || last_flush.elapsed() >= BATCH_TIMEOUT {
process_batch(
&mut batch,
&mut batch_keys,
target,
MAX_RETRIES,
BASE_RETRY_DELAY,
&metrics,
&semaphore,
)
.await;
last_flush = Instant::now();
}
}
Err(e) => {
error!(
"Failed to target: {}, get event {} from store: {}",
target.name(),
key.to_string(),
e
);
// 可以考虑删除无法读取的事件,防止无限循环尝试读取
match store.del(&key) {
Ok(_) => {
info!("Deleted corrupted event {} from store", key.to_string());
}
Err(del_err) => {
error!(
"Failed to delete corrupted event {}: {}",
key.to_string(),
del_err
);
}
}
metrics.increment_failed();
}
}
}
// 小延迟再进行下一轮检查
tokio::time::sleep(Duration::from_millis(100)).await;
}
}
/// 处理事件批次
async fn process_batch(
batch: &mut Vec<Event>,
batch_keys: &mut Vec<Key>,
target: &dyn Target,
max_retries: usize,
base_delay: Duration,
metrics: &Arc<NotificationMetrics>,
semaphore: &Arc<Semaphore>,
) {
debug!(
"Processing batch of {} events for target: {}",
batch.len(),
target.name()
);
if batch.is_empty() {
return;
}
// 获取信号量许可,限制并发
let permit = match semaphore.clone().acquire_owned().await {
Ok(permit) => permit,
Err(e) => {
error!("Failed to acquire semaphore permit: {}", e);
return;
}
};
// 处理批次中的每个事件
for (_event, key) in batch.iter().zip(batch_keys.iter()) {
let mut retry_count = 0;
let mut success = false;
// 重试逻辑
while retry_count < max_retries && !success {
match target.send_from_store(key.clone()).await {
Ok(_) => {
info!(
"Successfully sent event for target: {}, Key: {}",
target.name(),
key.to_string()
);
success = true;
metrics.increment_processed();
}
Err(e) => {
// 根据错误类型采用不同的重试策略
match &e {
TargetError::NotConnected => {
warn!("Target {} not connected, retrying...", target.name());
retry_count += 1;
tokio::time::sleep(base_delay * (1 << retry_count)).await; // 指数退避
}
TargetError::Timeout(_) => {
warn!("Timeout for target {}, retrying...", target.name());
retry_count += 1;
tokio::time::sleep(base_delay * (1 << retry_count)).await;
}
_ => {
// 永久性错误,跳过此事件
error!("Permanent error for target {}: {}", target.name(), e);
metrics.increment_failed();
break;
}
}
}
}
}
// 处理最大重试次数耗尽的情况
if retry_count >= max_retries && !success {
warn!(
"Max retries exceeded for event {}, target: {}, skipping",
key.to_string(),
target.name()
);
metrics.increment_failed();
}
}
// 清空已处理的批次
batch.clear();
batch_keys.clear();
// 释放信号量许可(通过 drop
drop(permit);
}

View File

@@ -1,81 +0,0 @@
use crate::config::EventNotifierConfig;
use crate::notifier::EventNotifier;
use common::error::Result;
use ecstore::store::ECStore;
use once_cell::sync::OnceCell;
use std::sync::{Arc, Mutex};
use tracing::{debug, error, info};
/// Global event system
pub struct EventSystem {
/// Event Notifier
notifier: Mutex<Option<EventNotifier>>,
}
impl EventSystem {
/// Create a new event system
pub fn new() -> Self {
Self {
notifier: Mutex::new(None),
}
}
/// Initialize the event system
pub async fn init(&self, store: Arc<ECStore>) -> Result<EventNotifierConfig> {
info!("Initialize the event system");
let notifier = EventNotifier::new(store).await?;
let config = notifier.config().clone();
let mut guard = self
.notifier
.lock()
.map_err(|e| common::error::Error::msg(format!("Failed to acquire locks:{}", e)))?;
*guard = Some(notifier);
debug!("The event system initialization is complete");
Ok(config)
}
/// Send events
pub async fn send_event(&self, event: crate::Event) -> Result<()> {
let guard = self
.notifier
.lock()
.map_err(|e| common::error::Error::msg(format!("Failed to acquire locks:{}", e)))?;
if let Some(notifier) = &*guard {
notifier.send(event).await
} else {
error!("The event system is not initialized");
Err(common::error::Error::msg("The event system is not initialized"))
}
}
/// Shut down the event system
pub async fn shutdown(&self) -> Result<()> {
info!("Shut down the event system");
let mut guard = self
.notifier
.lock()
.map_err(|e| common::error::Error::msg(format!("Failed to acquire locks:{}", e)))?;
if let Some(ref mut notifier) = *guard {
notifier.shutdown().await?;
*guard = None;
info!("The event system is down");
Ok(())
} else {
debug!("The event system has been shut down");
Ok(())
}
}
}
/// A global event system instance
pub static GLOBAL_EVENT_SYS: OnceCell<EventSystem> = OnceCell::new();
/// Initialize the global event system
pub fn init_global_event_system() -> &'static EventSystem {
GLOBAL_EVENT_SYS.get_or_init(EventSystem::new)
}

View File

@@ -0,0 +1,35 @@
#[allow(dead_code)]
const NOTIFY_KAFKA_SUB_SYS: &str = "notify_kafka";
#[allow(dead_code)]
const NOTIFY_MQTT_SUB_SYS: &str = "notify_mqtt";
#[allow(dead_code)]
const NOTIFY_MY_SQL_SUB_SYS: &str = "notify_mysql";
#[allow(dead_code)]
const NOTIFY_NATS_SUB_SYS: &str = "notify_nats";
#[allow(dead_code)]
const NOTIFY_NSQ_SUB_SYS: &str = "notify_nsq";
#[allow(dead_code)]
const NOTIFY_ES_SUB_SYS: &str = "notify_elasticsearch";
#[allow(dead_code)]
const NOTIFY_AMQP_SUB_SYS: &str = "notify_amqp";
#[allow(dead_code)]
const NOTIFY_POSTGRES_SUB_SYS: &str = "notify_postgres";
#[allow(dead_code)]
const NOTIFY_REDIS_SUB_SYS: &str = "notify_redis";
const NOTIFY_WEBHOOK_SUB_SYS: &str = "notify_webhook";
// Webhook constants
pub const WEBHOOK_ENDPOINT: &str = "endpoint";
pub const WEBHOOK_AUTH_TOKEN: &str = "auth_token";
pub const WEBHOOK_QUEUE_DIR: &str = "queue_dir";
pub const WEBHOOK_QUEUE_LIMIT: &str = "queue_limit";
pub const WEBHOOK_CLIENT_CERT: &str = "client_cert";
pub const WEBHOOK_CLIENT_KEY: &str = "client_key";
pub const ENV_WEBHOOK_ENABLE: &str = "RUSTFS_NOTIFY_WEBHOOK_ENABLE";
pub const ENV_WEBHOOK_ENDPOINT: &str = "RUSTFS_NOTIFY_WEBHOOK_ENDPOINT";
pub const ENV_WEBHOOK_AUTH_TOKEN: &str = "RUSTFS_NOTIFY_WEBHOOK_AUTH_TOKEN";
pub const ENV_WEBHOOK_QUEUE_DIR: &str = "RUSTFS_NOTIFY_WEBHOOK_QUEUE_DIR";
pub const ENV_WEBHOOK_QUEUE_LIMIT: &str = "RUSTFS_NOTIFY_WEBHOOK_QUEUE_LIMIT";
pub const ENV_WEBHOOK_CLIENT_CERT: &str = "RUSTFS_NOTIFY_WEBHOOK_CLIENT_CERT";
pub const ENV_WEBHOOK_CLIENT_KEY: &str = "RUSTFS_NOTIFY_WEBHOOK_CLIENT_KEY";

View File

@@ -0,0 +1,97 @@
use crate::arn::TargetID;
use crate::store::{Key, Store};
use crate::{Event, StoreError, TargetError};
use async_trait::async_trait;
pub mod constants;
pub mod mqtt;
pub mod webhook;
/// Trait for notification targets
#[async_trait]
pub trait Target: Send + Sync + 'static {
/// Returns the ID of the target
fn id(&self) -> TargetID;
/// Returns the name of the target
fn name(&self) -> String {
self.id().to_string()
}
/// Checks if the target is active and reachable
async fn is_active(&self) -> Result<bool, TargetError>;
/// Saves an event (either sends it immediately or stores it for later)
async fn save(&self, event: Event) -> Result<(), TargetError>;
/// Sends an event from the store
async fn send_from_store(&self, key: Key) -> Result<(), TargetError>;
/// Closes the target and releases resources
async fn close(&self) -> Result<(), TargetError>;
/// Returns the store associated with the target (if any)
fn store(&self) -> Option<&(dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync)>;
/// Returns the type of the target
fn clone_dyn(&self) -> Box<dyn Target + Send + Sync>;
/// Initialize the target, such as establishing a connection, etc.
async fn init(&self) -> Result<(), TargetError> {
// The default implementation is empty
Ok(())
}
/// Check if the target is enabled
fn is_enabled(&self) -> bool;
}
/// The `ChannelTargetType` enum represents the different types of channel Target
/// used in the notification system.
///
/// It includes:
/// - `Webhook`: Represents a webhook target for sending notifications via HTTP requests.
/// - `Kafka`: Represents a Kafka target for sending notifications to a Kafka topic.
/// - `Mqtt`: Represents an MQTT target for sending notifications via MQTT protocol.
///
/// Each variant has an associated string representation that can be used for serialization
/// or logging purposes.
/// The `as_str` method returns the string representation of the target type,
/// and the `Display` implementation allows for easy formatting of the target type as a string.
///
/// example usage:
/// ```rust
/// use rustfs_notify::target::ChannelTargetType;
///
/// let target_type = ChannelTargetType::Webhook;
/// assert_eq!(target_type.as_str(), "webhook");
/// println!("Target type: {}", target_type);
/// ```
///
/// example output:
/// Target type: webhook
pub enum ChannelTargetType {
Webhook,
Kafka,
Mqtt,
}
impl ChannelTargetType {
pub fn as_str(&self) -> &'static str {
match self {
ChannelTargetType::Webhook => "webhook",
ChannelTargetType::Kafka => "kafka",
ChannelTargetType::Mqtt => "mqtt",
}
}
}
impl std::fmt::Display for ChannelTargetType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ChannelTargetType::Webhook => write!(f, "webhook"),
ChannelTargetType::Kafka => write!(f, "kafka"),
ChannelTargetType::Mqtt => write!(f, "mqtt"),
}
}
}

View File

@@ -0,0 +1,671 @@
use crate::store::{Key, STORE_EXTENSION};
use crate::target::ChannelTargetType;
use crate::{
arn::TargetID, error::TargetError,
event::{Event, EventLog},
store::Store,
StoreError,
Target,
};
use async_trait::async_trait;
use rumqttc::{mqttbytes::Error as MqttBytesError, ConnectionError};
use rumqttc::{AsyncClient, EventLoop, MqttOptions, Outgoing, Packet, QoS};
use std::sync::Arc;
use std::{
path::PathBuf,
sync::atomic::{AtomicBool, Ordering},
time::Duration,
};
use tokio::sync::{mpsc, Mutex, OnceCell};
use tracing::{debug, error, info, instrument, trace, warn};
use url::Url;
use urlencoding;
const DEFAULT_CONNECTION_TIMEOUT: Duration = Duration::from_secs(15);
const EVENT_LOOP_POLL_TIMEOUT: Duration = Duration::from_secs(10); // For initial connection check in task
/// Arguments for configuring an MQTT target
#[derive(Debug, Clone)]
pub struct MQTTArgs {
/// Whether the target is enabled
pub enable: bool,
/// The broker URL
pub broker: Url,
/// The topic to publish to
pub topic: String,
/// The quality of service level
pub qos: QoS,
/// The username for the broker
pub username: String,
/// The password for the broker
pub password: String,
/// The maximum interval for reconnection attempts (Note: rumqttc has internal strategy)
pub max_reconnect_interval: Duration,
/// The keep alive interval
pub keep_alive: Duration,
/// The directory to store events in case of failure
pub queue_dir: String,
/// The maximum number of events to store
pub queue_limit: u64,
}
impl MQTTArgs {
pub fn validate(&self) -> Result<(), TargetError> {
if !self.enable {
return Ok(());
}
match self.broker.scheme() {
"ws" | "wss" | "tcp" | "ssl" | "tls" | "tcps" | "mqtt" | "mqtts" => {}
_ => {
return Err(TargetError::Configuration(
"unknown protocol in broker address".to_string(),
));
}
}
if !self.queue_dir.is_empty() {
let path = std::path::Path::new(&self.queue_dir);
if !path.is_absolute() {
return Err(TargetError::Configuration(
"mqtt queueDir path should be absolute".to_string(),
));
}
if self.qos == QoS::AtMostOnce {
return Err(TargetError::Configuration(
"QoS should be AtLeastOnce (1) or ExactlyOnce (2) if queueDir is set"
.to_string(),
));
}
}
Ok(())
}
}
struct BgTaskManager {
init_cell: OnceCell<tokio::task::JoinHandle<()>>,
cancel_tx: mpsc::Sender<()>,
initial_cancel_rx: Mutex<Option<mpsc::Receiver<()>>>,
}
/// A target that sends events to an MQTT broker
pub struct MQTTTarget {
id: TargetID,
args: MQTTArgs,
client: Arc<Mutex<Option<AsyncClient>>>,
store: Option<Box<dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync>>,
connected: Arc<AtomicBool>,
bg_task_manager: Arc<BgTaskManager>,
}
impl MQTTTarget {
/// Creates a new MQTTTarget
#[instrument(skip(args), fields(target_id_as_string = %id))]
pub fn new(id: String, args: MQTTArgs) -> Result<Self, TargetError> {
args.validate()?;
let target_id = TargetID::new(id.clone(), ChannelTargetType::Mqtt.as_str().to_string());
let queue_store = if !args.queue_dir.is_empty() {
let base_path = PathBuf::from(&args.queue_dir);
let unique_dir_name = format!(
"rustfs-{}-{}-{}",
ChannelTargetType::Mqtt.as_str(),
target_id.name,
target_id.id
)
.replace(":", "_");
// Ensure the directory name is valid for filesystem
let specific_queue_path = base_path.join(unique_dir_name);
debug!(target_id = %target_id, path = %specific_queue_path.display(), "Initializing queue store for MQTT target");
let store = crate::store::QueueStore::<Event>::new(
specific_queue_path,
args.queue_limit,
STORE_EXTENSION,
);
if let Err(e) = store.open() {
error!(
target_id = %target_id,
error = %e,
"Failed to open store for MQTT target"
);
return Err(TargetError::Storage(format!("{}", e)));
}
Some(Box::new(store)
as Box<
dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync,
>)
} else {
None
};
let (cancel_tx, cancel_rx) = mpsc::channel(1);
let bg_task_manager = Arc::new(BgTaskManager {
init_cell: OnceCell::new(),
cancel_tx,
initial_cancel_rx: Mutex::new(Some(cancel_rx)),
});
info!(target_id = %target_id, "MQTT target created");
Ok(MQTTTarget {
id: target_id,
args,
client: Arc::new(Mutex::new(None)),
store: queue_store,
connected: Arc::new(AtomicBool::new(false)),
bg_task_manager,
})
}
#[instrument(skip(self), fields(target_id = %self.id))]
async fn init(&self) -> Result<(), TargetError> {
if self.connected.load(Ordering::SeqCst) {
debug!(target_id = %self.id, "Already connected.");
return Ok(());
}
let bg_task_manager = Arc::clone(&self.bg_task_manager);
let client_arc = Arc::clone(&self.client);
let connected_arc = Arc::clone(&self.connected);
let target_id_clone = self.id.clone();
let args_clone = self.args.clone();
let _ = bg_task_manager
.init_cell
.get_or_try_init(|| async {
debug!(target_id = %target_id_clone, "Initializing MQTT background task.");
let host = args_clone.broker.host_str().unwrap_or("localhost");
let port = args_clone.broker.port().unwrap_or(1883);
let mut mqtt_options = MqttOptions::new(
format!("rustfs_notify_{}", uuid::Uuid::new_v4()),
host,
port,
);
mqtt_options
.set_keep_alive(args_clone.keep_alive)
.set_max_packet_size(100 * 1024 * 1024, 100 * 1024 * 1024); // 100MB
if !args_clone.username.is_empty() {
mqtt_options
.set_credentials(args_clone.username.clone(), args_clone.password.clone());
}
let (new_client, eventloop) = AsyncClient::new(mqtt_options, 10);
if let Err(e) = new_client.subscribe(&args_clone.topic, args_clone.qos).await {
error!(target_id = %target_id_clone, error = %e, "Failed to subscribe to MQTT topic during init");
return Err(TargetError::Network(format!("MQTT subscribe failed: {}", e)));
}
let mut rx_guard = bg_task_manager.initial_cancel_rx.lock().await;
let cancel_rx = rx_guard.take().ok_or_else(|| {
error!(target_id = %target_id_clone, "MQTT cancel receiver already taken for task.");
TargetError::Configuration("MQTT cancel receiver already taken for task".to_string())
})?;
drop(rx_guard);
*client_arc.lock().await = Some(new_client.clone());
info!(target_id = %target_id_clone, "Spawning MQTT event loop task.");
let task_handle = tokio::spawn(run_mqtt_event_loop(
eventloop,
connected_arc.clone(),
target_id_clone.clone(),
cancel_rx,
));
Ok(task_handle)
})
.await
.map_err(|e: TargetError| {
error!(target_id = %self.id, error = %e, "Failed to initialize MQTT background task");
e
})?;
debug!(target_id = %self.id, "MQTT background task initialized successfully.");
match tokio::time::timeout(DEFAULT_CONNECTION_TIMEOUT, async {
while !self.connected.load(Ordering::SeqCst) {
if let Some(handle) = self.bg_task_manager.init_cell.get() {
if handle.is_finished() && !self.connected.load(Ordering::SeqCst) {
error!(target_id = %self.id, "MQTT background task exited prematurely before connection was established.");
return Err(TargetError::Network("MQTT background task exited prematurely".to_string()));
}
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
debug!(target_id = %self.id, "MQTT target connected successfully.");
Ok(())
}).await {
Ok(Ok(_)) => {
info!(target_id = %self.id, "MQTT target initialized and connected.");
Ok(())
}
Ok(Err(e)) => Err(e),
Err(_) => {
error!(target_id = %self.id, "Timeout waiting for MQTT connection after task spawn.");
Err(TargetError::Network(
"Timeout waiting for MQTT connection".to_string(),
))
}
}
}
#[instrument(skip(self, event), fields(target_id = %self.id))]
async fn send(&self, event: &Event) -> Result<(), TargetError> {
let client_guard = self.client.lock().await;
let client = client_guard
.as_ref()
.ok_or_else(|| TargetError::Configuration("MQTT client not initialized".to_string()))?;
let object_name = urlencoding::decode(&event.s3.object.key)
.map_err(|e| TargetError::Encoding(format!("Failed to decode object key: {}", e)))?;
let key = format!("{}/{}", event.s3.bucket.name, object_name);
let log = EventLog {
event_name: event.event_name,
key,
records: vec![event.clone()],
};
let data = serde_json::to_vec(&log)
.map_err(|e| TargetError::Serialization(format!("Failed to serialize event: {}", e)))?;
// Vec<u8> Convert to String, only for printing logs
let data_string = String::from_utf8(data.clone()).map_err(|e| {
TargetError::Encoding(format!("Failed to convert event data to UTF-8: {}", e))
})?;
debug!(
"Sending event to mqtt target: {}, event log: {}",
self.id, data_string
);
client
.publish(&self.args.topic, self.args.qos, false, data)
.await
.map_err(|e| {
if e.to_string().contains("Connection") || e.to_string().contains("Timeout") {
self.connected.store(false, Ordering::SeqCst);
warn!(target_id = %self.id, error = %e, "Publish failed due to connection issue, marking as not connected.");
TargetError::NotConnected
} else {
TargetError::Request(format!("Failed to publish message: {}", e))
}
})?;
debug!(target_id = %self.id, topic = %self.args.topic, "Event published to MQTT topic");
Ok(())
}
pub fn clone_target(&self) -> Box<dyn Target + Send + Sync> {
Box::new(MQTTTarget {
id: self.id.clone(),
args: self.args.clone(),
client: self.client.clone(),
store: self.store.as_ref().map(|s| s.boxed_clone()),
connected: self.connected.clone(),
bg_task_manager: self.bg_task_manager.clone(),
})
}
}
async fn run_mqtt_event_loop(
mut eventloop: EventLoop,
connected_status: Arc<AtomicBool>,
target_id: TargetID,
mut cancel_rx: mpsc::Receiver<()>,
) {
info!(target_id = %target_id, "MQTT event loop task started.");
let mut initial_connection_established = false;
loop {
tokio::select! {
biased;
_ = cancel_rx.recv() => {
info!(target_id = %target_id, "MQTT event loop task received cancellation signal. Shutting down.");
break;
}
polled_event_result = async {
if !initial_connection_established || !connected_status.load(Ordering::SeqCst) {
match tokio::time::timeout(EVENT_LOOP_POLL_TIMEOUT, eventloop.poll()).await {
Ok(Ok(event)) => Ok(event),
Ok(Err(e)) => Err(e),
Err(_) => {
debug!(target_id = %target_id, "MQTT poll timed out (EVENT_LOOP_POLL_TIMEOUT) while not connected or status pending.");
Err(rumqttc::ConnectionError::NetworkTimeout)
}
}
} else {
eventloop.poll().await
}
} => {
match polled_event_result {
Ok(notification) => {
trace!(target_id = %target_id, event = ?notification, "Received MQTT event");
match notification {
rumqttc::Event::Incoming(Packet::ConnAck(_conn_ack)) => {
info!(target_id = %target_id, "MQTT connected (ConnAck).");
connected_status.store(true, Ordering::SeqCst);
initial_connection_established = true;
}
rumqttc::Event::Incoming(Packet::Publish(publish)) => {
debug!(target_id = %target_id, topic = %publish.topic, payload_len = publish.payload.len(), "Received message on subscribed topic.");
}
rumqttc::Event::Incoming(Packet::Disconnect) => {
info!(target_id = %target_id, "Received Disconnect packet from broker. MQTT connection lost.");
connected_status.store(false, Ordering::SeqCst);
}
rumqttc::Event::Incoming(Packet::PingResp) => {
trace!(target_id = %target_id, "Received PingResp from broker. Connection is alive.");
}
rumqttc::Event::Incoming(Packet::SubAck(suback)) => {
trace!(target_id = %target_id, "Received SubAck for pkid: {}", suback.pkid);
}
rumqttc::Event::Incoming(Packet::PubAck(puback)) => {
trace!(target_id = %target_id, "Received PubAck for pkid: {}", puback.pkid);
}
// Process other incoming packet types as needed (PubRec, PubRel, PubComp, UnsubAck)
rumqttc::Event::Outgoing(Outgoing::Disconnect) => {
info!(target_id = %target_id, "MQTT outgoing disconnect initiated by client.");
connected_status.store(false, Ordering::SeqCst);
}
rumqttc::Event::Outgoing(Outgoing::PingReq) => {
trace!(target_id = %target_id, "Client sent PingReq to broker.");
}
// Other Outgoing events (Subscribe, Unsubscribe, Publish) usually do not need to handle connection status here,
// Because they are actions initiated by the client.
_ => {
// Log other unspecified MQTT events that are not handled, which helps debug
trace!(target_id = %target_id, "Unhandled or generic MQTT event: {:?}", notification);
}
}
}
Err(e) => {
connected_status.store(false, Ordering::SeqCst);
error!(target_id = %target_id, error = %e, "Error from MQTT event loop poll");
if matches!(e, rumqttc::ConnectionError::NetworkTimeout) && (!initial_connection_established || !connected_status.load(Ordering::SeqCst)) {
warn!(target_id = %target_id, "Timeout during initial poll or pending state, will retry.");
continue;
}
if matches!(e,
ConnectionError::Io(_) |
ConnectionError::NetworkTimeout |
ConnectionError::ConnectionRefused(_) |
ConnectionError::Tls(_)
) {
warn!(target_id = %target_id, error = %e, "MQTT connection error. Relying on rumqttc for reconnection if applicable.");
}
// Here you can decide whether to break loops based on the error type.
// For example, for some unrecoverable errors.
if is_fatal_mqtt_error(&e) {
error!(target_id = %target_id, error = %e, "Fatal MQTT error, terminating event loop.");
break;
}
// rumqttc's eventloop.poll() may return Err and terminate after some errors,
// Or it will handle reconnection internally. The continue here will make select! wait again.
// If the error is temporary and rumqttc is handling reconnection, poll() should eventually succeed or return a different error again.
// Sleep briefly to avoid busy cycles in case of rapid failure.
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
}
}
connected_status.store(false, Ordering::SeqCst);
info!(target_id = %target_id, "MQTT event loop task finished.");
}
/// Check whether the given MQTT connection error should be considered a fatal error,
/// For fatal errors, the event loop should terminate.
fn is_fatal_mqtt_error(err: &ConnectionError) -> bool {
match err {
// If the client request has been processed all (for example, AsyncClient is dropped), the event loop can end.
ConnectionError::RequestsDone => true,
// Check for the underlying MQTT status error
ConnectionError::MqttState(state_err) => {
// The type of state_err is &rumqttc::StateError
match state_err {
// If StateError is caused by deserialization issues, check the underlying MqttBytesError
rumqttc::StateError::Deserialization(mqtt_bytes_err) => { // The type of mqtt_bytes_err is &rumqttc::mqttbytes::Error
matches!(
mqtt_bytes_err,
MqttBytesError::InvalidProtocol // Invalid agreement
| MqttBytesError::InvalidProtocolLevel(_) // Invalid protocol level
| MqttBytesError::IncorrectPacketFormat // Package format is incorrect
| MqttBytesError::InvalidPacketType(_) // Invalid package type
| MqttBytesError::MalformedPacket // Package format error
| MqttBytesError::PayloadTooLong // Too long load
| MqttBytesError::PayloadSizeLimitExceeded(_) // Load size limit exceeded
| MqttBytesError::TopicNotUtf8 // Topic Non-UTF-8 (Serious Agreement Violation)
)
}
// Others that are fatal StateError variants
rumqttc::StateError::InvalidState // The internal state machine is in invalid state
| rumqttc::StateError::WrongPacket // Agreement Violation: Unexpected Data Packet Received
| rumqttc::StateError::Unsolicited(_) // Agreement Violation: Unsolicited ACK Received
| rumqttc::StateError::OutgoingPacketTooLarge { .. } // Try to send too large packets
| rumqttc::StateError::EmptySubscription // Agreement violation (if this stage occurs)
=> true,
// Other StateErrors (such as Io, AwaitPingResp, CollisionTimeout) are not considered deadly here.
// They may be processed internally by rumqttc or upgraded to other ConnectionError types.
_ => false,
}
}
// Other types of ConnectionErrors (such as Io, Tls, NetworkTimeout, ConnectionRefused, NotConnAck, etc.)
// It is usually considered temporary, or the reconnect logic inside rumqttc will be processed.
_ => false,
}
}
#[async_trait]
impl Target for MQTTTarget {
fn id(&self) -> TargetID {
self.id.clone()
}
#[instrument(skip(self), fields(target_id = %self.id))]
async fn is_active(&self) -> Result<bool, TargetError> {
debug!(target_id = %self.id, "Checking if MQTT target is active.");
if self.client.lock().await.is_none() && !self.connected.load(Ordering::SeqCst) {
// Check if the background task is running and has not panicked
if let Some(handle) = self.bg_task_manager.init_cell.get() {
if handle.is_finished() {
error!(target_id = %self.id, "MQTT background task has finished, possibly due to an error. Target is not active.");
return Err(TargetError::Network(
"MQTT background task terminated".to_string(),
));
}
}
debug!(target_id = %self.id, "MQTT client not yet initialized or task not running/connected.");
return Err(TargetError::Configuration(
"MQTT client not available or not initialized/connected".to_string(),
));
}
if self.connected.load(Ordering::SeqCst) {
debug!(target_id = %self.id, "MQTT target is active (connected flag is true).");
Ok(true)
} else {
debug!(target_id = %self.id, "MQTT target is not connected (connected flag is false).");
Err(TargetError::NotConnected)
}
}
#[instrument(skip(self, event), fields(target_id = %self.id))]
async fn save(&self, event: Event) -> Result<(), TargetError> {
if let Some(store) = &self.store {
debug!(target_id = %self.id, "Event saved to store start");
// If store is configured, ONLY put the event into the store.
// Do NOT send it directly here.
match store.put(event.clone()) {
Ok(_) => {
debug!(target_id = %self.id, "Event saved to store for MQTT target successfully.");
Ok(())
}
Err(e) => {
error!(target_id = %self.id, error = %e, "Failed to save event to store");
return Err(TargetError::Storage(format!(
"Failed to save event to store: {}",
e
)));
}
}
} else {
if !self.is_enabled() {
return Err(TargetError::Disabled);
}
if !self.connected.load(Ordering::SeqCst) {
warn!(target_id = %self.id, "Attempting to send directly but not connected; trying to init.");
// Call the struct's init method, not the trait's default
match MQTTTarget::init(self).await {
Ok(_) => debug!(target_id = %self.id, "MQTT target initialized successfully."),
Err(e) => {
error!(target_id = %self.id, error = %e, "Failed to initialize MQTT target.");
return Err(TargetError::NotConnected);
}
}
if !self.connected.load(Ordering::SeqCst) {
error!(target_id = %self.id, "Cannot save (send directly) as target is not active after init attempt.");
return Err(TargetError::NotConnected);
}
}
self.send(&event).await
}
}
#[instrument(skip(self), fields(target_id = %self.id))]
async fn send_from_store(&self, key: Key) -> Result<(), TargetError> {
debug!(target_id = %self.id, ?key, "Attempting to send event from store with key.");
if !self.is_enabled() {
return Err(TargetError::Disabled);
}
if !self.connected.load(Ordering::SeqCst) {
warn!(target_id = %self.id, "Not connected; trying to init before sending from store.");
match MQTTTarget::init(self).await {
Ok(_) => debug!(target_id = %self.id, "MQTT target initialized successfully."),
Err(e) => {
error!(target_id = %self.id, error = %e, "Failed to initialize MQTT target.");
return Err(TargetError::NotConnected);
}
}
if !self.connected.load(Ordering::SeqCst) {
error!(target_id = %self.id, "Cannot send from store as target is not active after init attempt.");
return Err(TargetError::NotConnected);
}
}
let store = self
.store
.as_ref()
.ok_or_else(|| TargetError::Configuration("No store configured".to_string()))?;
let event = match store.get(&key) {
Ok(event) => {
debug!(target_id = %self.id, ?key, "Retrieved event from store for sending.");
event
}
Err(StoreError::NotFound) => {
// Assuming NotFound takes the key
debug!(target_id = %self.id, ?key, "Event not found in store for sending.");
return Ok(());
}
Err(e) => {
error!(
target_id = %self.id,
error = %e,
"Failed to get event from store"
);
return Err(TargetError::Storage(format!(
"Failed to get event from store: {}",
e
)));
}
};
debug!(target_id = %self.id, ?key, "Sending event from store.");
if let Err(e) = self.send(&event).await {
if matches!(e, TargetError::NotConnected) {
warn!(target_id = %self.id, "Failed to send event from store: Not connected. Event remains in store.");
return Err(TargetError::NotConnected);
}
error!(target_id = %self.id, error = %e, "Failed to send event from store with an unexpected error.");
return Err(e);
}
debug!(target_id = %self.id, ?key, "Event sent from store successfully. deleting from store. ");
match store.del(&key) {
Ok(_) => {
debug!(target_id = %self.id, ?key, "Event deleted from store after successful send.")
}
Err(StoreError::NotFound) => {
debug!(target_id = %self.id, ?key, "Event already deleted from store.");
}
Err(e) => {
error!(target_id = %self.id, error = %e, "Failed to delete event from store after send.");
return Err(TargetError::Storage(format!(
"Failed to delete event from store: {}",
e
)));
}
}
debug!(target_id = %self.id, ?key, "Event deleted from store.");
Ok(())
}
async fn close(&self) -> Result<(), TargetError> {
info!(target_id = %self.id, "Attempting to close MQTT target.");
if let Err(e) = self.bg_task_manager.cancel_tx.send(()).await {
warn!(target_id = %self.id, error = %e, "Failed to send cancel signal to MQTT background task. It might have already exited.");
}
// Wait for the task to finish if it was initialized
if let Some(_task_handle) = self.bg_task_manager.init_cell.get() {
debug!(target_id = %self.id, "Waiting for MQTT background task to complete...");
// It's tricky to await here if close is called from a sync context or Drop
// For async close, this is fine. Consider a timeout.
// let _ = tokio::time::timeout(Duration::from_secs(5), task_handle.await).await;
// If task_handle.await is directly used, ensure it's not awaited multiple times if close can be called multiple times.
// For now, we rely on the signal and the task's self-termination.
}
if let Some(client_instance) = self.client.lock().await.take() {
info!(target_id = %self.id, "Disconnecting MQTT client.");
if let Err(e) = client_instance.disconnect().await {
warn!(target_id = %self.id, error = %e, "Error during MQTT client disconnect.");
}
}
self.connected.store(false, Ordering::SeqCst);
info!(target_id = %self.id, "MQTT target close method finished.");
Ok(())
}
fn store(&self) -> Option<&(dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync)> {
self.store.as_deref()
}
fn clone_dyn(&self) -> Box<dyn Target + Send + Sync> {
self.clone_target()
}
async fn init(&self) -> Result<(), TargetError> {
if !self.is_enabled() {
debug!(target_id = %self.id, "Target is disabled, skipping init.");
return Ok(());
}
// Call the internal init logic
MQTTTarget::init(self).await
}
fn is_enabled(&self) -> bool {
self.args.enable
}
}

View File

@@ -0,0 +1,450 @@
use crate::store::STORE_EXTENSION;
use crate::target::ChannelTargetType;
use crate::{
arn::TargetID, error::TargetError,
event::{Event, EventLog},
store::{Key, Store},
utils,
StoreError,
Target,
};
use async_trait::async_trait;
use reqwest::{Client, StatusCode, Url};
use std::{
path::PathBuf,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::Duration,
};
use tokio::net::lookup_host;
use tokio::sync::mpsc;
use tracing::{debug, error, info, instrument};
use urlencoding;
/// Arguments for configuring a Webhook target
#[derive(Debug, Clone)]
pub struct WebhookArgs {
/// Whether the target is enabled
pub enable: bool,
/// The endpoint URL to send events to
pub endpoint: Url,
/// The authorization token for the endpoint
pub auth_token: String,
/// The directory to store events in case of failure
pub queue_dir: String,
/// The maximum number of events to store
pub queue_limit: u64,
/// The client certificate for TLS (PEM format)
pub client_cert: String,
/// The client key for TLS (PEM format)
pub client_key: String,
}
// WebhookArgs 的验证方法
impl WebhookArgs {
pub fn validate(&self) -> Result<(), TargetError> {
if !self.enable {
return Ok(());
}
if self.endpoint.as_str().is_empty() {
return Err(TargetError::Configuration("endpoint empty".to_string()));
}
if !self.queue_dir.is_empty() {
let path = std::path::Path::new(&self.queue_dir);
if !path.is_absolute() {
return Err(TargetError::Configuration(
"webhook queueDir path should be absolute".to_string(),
));
}
}
if !self.client_cert.is_empty() && self.client_key.is_empty()
|| self.client_cert.is_empty() && !self.client_key.is_empty()
{
return Err(TargetError::Configuration(
"cert and key must be specified as a pair".to_string(),
));
}
Ok(())
}
}
/// A target that sends events to a webhook
pub struct WebhookTarget {
id: TargetID,
args: WebhookArgs,
http_client: Arc<Client>,
// 添加 Send + Sync 约束确保线程安全
store: Option<Box<dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync>>,
initialized: AtomicBool,
addr: String,
cancel_sender: mpsc::Sender<()>,
}
impl WebhookTarget {
/// Clones the WebhookTarget, creating a new instance with the same configuration
pub fn clone_box(&self) -> Box<dyn Target + Send + Sync> {
Box::new(WebhookTarget {
id: self.id.clone(),
args: self.args.clone(),
http_client: Arc::clone(&self.http_client),
store: self.store.as_ref().map(|s| s.boxed_clone()),
initialized: AtomicBool::new(self.initialized.load(Ordering::SeqCst)),
addr: self.addr.clone(),
cancel_sender: self.cancel_sender.clone(),
})
}
/// Creates a new WebhookTarget
#[instrument(skip(args), fields(target_id = %id))]
pub fn new(id: String, args: WebhookArgs) -> Result<Self, TargetError> {
// 首先验证参数
args.validate()?;
// 创建 TargetID
let target_id = TargetID::new(id, ChannelTargetType::Webhook.as_str().to_string());
// 构建 HTTP client
let mut client_builder = Client::builder()
.timeout(Duration::from_secs(30))
.user_agent(utils::get_user_agent(utils::ServiceType::Basis));
// 补充证书处理逻辑
if !args.client_cert.is_empty() && !args.client_key.is_empty() {
// 添加客户端证书
let cert = std::fs::read(&args.client_cert).map_err(|e| {
TargetError::Configuration(format!("Failed to read client cert: {}", e))
})?;
let key = std::fs::read(&args.client_key).map_err(|e| {
TargetError::Configuration(format!("Failed to read client key: {}", e))
})?;
let identity = reqwest::Identity::from_pem(&[cert, key].concat()).map_err(|e| {
TargetError::Configuration(format!("Failed to create identity: {}", e))
})?;
client_builder = client_builder.identity(identity);
}
let http_client = Arc::new(client_builder.build().map_err(|e| {
TargetError::Configuration(format!("Failed to build HTTP client: {}", e))
})?);
// 构建存储
let queue_store = if !args.queue_dir.is_empty() {
let queue_dir = PathBuf::from(&args.queue_dir).join(format!(
"rustfs-{}-{}-{}",
ChannelTargetType::Webhook.as_str(),
target_id.name,
target_id.id
));
let store = super::super::store::QueueStore::<Event>::new(
queue_dir,
args.queue_limit,
STORE_EXTENSION,
);
if let Err(e) = store.open() {
error!(
"Failed to open store for Webhook target {}: {}",
target_id.id, e
);
return Err(TargetError::Storage(format!("{}", e)));
}
// 确保 QueueStore 实现的 Store trait 匹配预期的错误类型
Some(Box::new(store)
as Box<
dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync,
>)
} else {
None
};
// 解析地址
let addr = {
let host = args.endpoint.host_str().unwrap_or("localhost");
let port = args.endpoint.port().unwrap_or_else(|| {
if args.endpoint.scheme() == "https" {
443
} else {
80
}
});
format!("{}:{}", host, port)
};
// 创建取消通道
let (cancel_sender, _) = mpsc::channel(1);
info!(target_id = %target_id.id, "Webhook target created");
Ok(WebhookTarget {
id: target_id,
args,
http_client,
store: queue_store,
initialized: AtomicBool::new(false),
addr,
cancel_sender,
})
}
async fn init(&self) -> Result<(), TargetError> {
// 使用 CAS 操作确保线程安全初始化
if !self.initialized.load(Ordering::SeqCst) {
// 检查连接
match self.is_active().await {
Ok(true) => {
info!("Webhook target {} is active", self.id);
}
Ok(false) => {
return Err(TargetError::NotConnected);
}
Err(e) => {
error!(
"Failed to check if Webhook target {} is active: {}",
self.id, e
);
return Err(e);
}
}
self.initialized.store(true, Ordering::SeqCst);
info!("Webhook target {} initialized", self.id);
}
Ok(())
}
async fn send(&self, event: &Event) -> Result<(), TargetError> {
info!("Webhook Sending event to webhook target: {}", self.id);
let object_name = urlencoding::decode(&event.s3.object.key)
.map_err(|e| TargetError::Encoding(format!("Failed to decode object key: {}", e)))?;
let key = format!("{}/{}", event.s3.bucket.name, object_name);
let log = EventLog {
event_name: event.event_name,
key,
records: vec![event.clone()],
};
let data = serde_json::to_vec(&log)
.map_err(|e| TargetError::Serialization(format!("Failed to serialize event: {}", e)))?;
// Vec<u8> 转换为 String
let data_string = String::from_utf8(data.clone()).map_err(|e| {
TargetError::Encoding(format!("Failed to convert event data to UTF-8: {}", e))
})?;
debug!(
"Sending event to webhook target: {}, event log: {}",
self.id, data_string
);
// 构建请求
let mut req_builder = self
.http_client
.post(self.args.endpoint.as_str())
.header("Content-Type", "application/json");
if !self.args.auth_token.is_empty() {
// 分割 auth_token 字符串,检查是否已包含认证类型
let tokens: Vec<&str> = self.args.auth_token.split_whitespace().collect();
match tokens.len() {
2 => {
// 已经包含认证类型和令牌,如 "Bearer token123"
req_builder = req_builder.header("Authorization", &self.args.auth_token);
}
1 => {
// 只有令牌,需要添加 "Bearer" 前缀
req_builder = req_builder
.header("Authorization", format!("Bearer {}", self.args.auth_token));
}
_ => {
// 空字符串或其他情况,不添加认证头
}
}
}
// 发送请求
let resp = req_builder.body(data).send().await.map_err(|e| {
if e.is_timeout() || e.is_connect() {
TargetError::NotConnected
} else {
TargetError::Request(format!("Failed to send request: {}", e))
}
})?;
let status = resp.status();
if status.is_success() {
debug!("Event sent to webhook target: {}", self.id);
Ok(())
} else if status == StatusCode::FORBIDDEN {
Err(TargetError::Authentication(format!(
"{} returned '{}', please check if your auth token is correctly set",
self.args.endpoint, status
)))
} else {
Err(TargetError::Request(format!(
"{} returned '{}', please check your endpoint configuration",
self.args.endpoint, status
)))
}
}
}
#[async_trait]
impl Target for WebhookTarget {
fn id(&self) -> TargetID {
self.id.clone()
}
// 确保 Future 是 Send
async fn is_active(&self) -> Result<bool, TargetError> {
let socket_addr = lookup_host(&self.addr)
.await
.map_err(|e| TargetError::Network(format!("Failed to resolve host: {}", e)))?
.next()
.ok_or_else(|| TargetError::Network("No address found".to_string()))?;
debug!(
"is_active socket addr: {},target id:{}",
socket_addr, self.id.id
);
match tokio::time::timeout(
Duration::from_secs(5),
tokio::net::TcpStream::connect(socket_addr),
)
.await
{
Ok(Ok(_)) => {
debug!("Connection to {} is active", self.addr);
Ok(true)
}
Ok(Err(e)) => {
debug!("Connection to {} failed: {}", self.addr, e);
if e.kind() == std::io::ErrorKind::ConnectionRefused {
Err(TargetError::NotConnected)
} else {
Err(TargetError::Network(format!("Connection failed: {}", e)))
}
}
Err(_) => Err(TargetError::Timeout("Connection timed out".to_string())),
}
}
async fn save(&self, event: Event) -> Result<(), TargetError> {
if let Some(store) = &self.store {
// Call the store method directly, no longer need to acquire the lock
store.put(event).map_err(|e| {
TargetError::Storage(format!("Failed to save event to store: {}", e))
})?;
debug!("Event saved to store for target: {}", self.id);
Ok(())
} else {
match self.init().await {
Ok(_) => (),
Err(e) => {
error!("Failed to initialize Webhook target {}: {}", self.id.id, e);
return Err(TargetError::NotConnected);
}
}
self.send(&event).await
}
}
async fn send_from_store(&self, key: Key) -> Result<(), TargetError> {
debug!("Sending event from store for target: {}", self.id);
match self.init().await {
Ok(_) => {
debug!("Event sent to store for target: {}", self.name());
}
Err(e) => {
error!("Failed to initialize Webhook target {}: {}", self.id.id, e);
return Err(TargetError::NotConnected);
}
}
let store = self
.store
.as_ref()
.ok_or_else(|| TargetError::Configuration("No store configured".to_string()))?;
// Get events directly from the store, no longer need to acquire locks
let event = match store.get(&key) {
Ok(event) => event,
Err(StoreError::NotFound) => return Ok(()),
Err(e) => {
return Err(TargetError::Storage(format!(
"Failed to get event from store: {}",
e
)));
}
};
if let Err(e) = self.send(&event).await {
if let TargetError::NotConnected = e {
return Err(TargetError::NotConnected);
}
return Err(e);
}
// Use the immutable reference of the store to delete the event content corresponding to the key
debug!(
"Deleting event from store for target: {}, key:{}, start",
self.id,
key.to_string()
);
match store.del(&key) {
Ok(_) => debug!(
"Event deleted from store for target: {}, key:{}, end",
self.id,
key.to_string()
),
Err(e) => {
error!("Failed to delete event from store: {}", e);
return Err(TargetError::Storage(format!(
"Failed to delete event from store: {}",
e
)));
}
}
debug!("Event sent from store and deleted for target: {}", self.id);
Ok(())
}
async fn close(&self) -> Result<(), TargetError> {
// Send cancel signal to background tasks
let _ = self.cancel_sender.try_send(());
info!("Webhook target closed: {}", self.id);
Ok(())
}
fn store(&self) -> Option<&(dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync)> {
// Returns the reference to the internal store
self.store.as_deref()
}
fn clone_dyn(&self) -> Box<dyn Target + Send + Sync> {
self.clone_box()
}
// The existing init method can meet the needs well, but we need to make sure it complies with the Target trait
// We can use the existing init method, but adjust the return value to match the trait requirement
async fn init(&self) -> Result<(), TargetError> {
// If the target is disabled, return to success directly
if !self.is_enabled() {
debug!(
"Webhook target {} is disabled, skipping initialization",
self.id
);
return Ok(());
}
// Use existing initialization logic
WebhookTarget::init(self).await
}
fn is_enabled(&self) -> bool {
self.args.enable
}
}

213
crates/notify/src/utils.rs Normal file
View File

@@ -0,0 +1,213 @@
use std::env;
use std::fmt;
#[cfg(unix)]
use libc::uname;
#[cfg(unix)]
use std::ffi::CStr;
#[cfg(windows)]
use std::process::Command;
// 定义 Rustfs 版本
const RUSTFS_VERSION: &str = "1.0.0";
// 业务类型枚举
#[derive(Debug, Clone, PartialEq)]
pub enum ServiceType {
Basis,
Core,
Event,
Logger,
Custom(String),
}
impl ServiceType {
fn as_str(&self) -> &str {
match self {
ServiceType::Basis => "basis",
ServiceType::Core => "core",
ServiceType::Event => "event",
ServiceType::Logger => "logger",
ServiceType::Custom(s) => s.as_str(),
}
}
}
// UserAgent 结构体
struct UserAgent {
os_platform: String,
arch: String,
version: String,
service: ServiceType,
}
impl UserAgent {
// 创建新的 UserAgent 实例,接受业务类型参数
fn new(service: ServiceType) -> Self {
let os_platform = Self::get_os_platform();
let arch = env::consts::ARCH.to_string();
let version = RUSTFS_VERSION.to_string();
UserAgent {
os_platform,
arch,
version,
service,
}
}
// 获取操作系统平台信息
fn get_os_platform() -> String {
if cfg!(target_os = "windows") {
Self::get_windows_platform()
} else if cfg!(target_os = "macos") {
Self::get_macos_platform()
} else if cfg!(target_os = "linux") {
Self::get_linux_platform()
} else {
"Unknown".to_string()
}
}
// 获取 Windows 平台信息
#[cfg(windows)]
fn get_windows_platform() -> String {
// 使用 cmd /c ver 获取版本
let output = Command::new("cmd")
.args(&["/C", "ver"])
.output()
.unwrap_or_default();
let version = String::from_utf8_lossy(&output.stdout);
let version = version
.lines()
.next()
.unwrap_or("Windows NT 10.0")
.replace("Microsoft Windows [Version ", "")
.replace("]", "");
format!("Windows NT {}", version.trim())
}
#[cfg(not(windows))]
fn get_windows_platform() -> String {
"N/A".to_string()
}
// 获取 macOS 平台信息
#[cfg(target_os = "macos")]
fn get_macos_platform() -> String {
unsafe {
let mut name = std::mem::zeroed();
if uname(&mut name) == 0 {
let release = CStr::from_ptr(name.release.as_ptr()).to_string_lossy();
// 映射内核版本(如 23.5.0)到 User-Agent 格式(如 14_5_0
let major = release
.split('.')
.next()
.unwrap_or("14")
.parse::<i32>()
.unwrap_or(14);
let minor = if major >= 20 { major - 9 } else { 14 };
let patch = release.split('.').nth(1).unwrap_or("0");
format!("Macintosh; Intel Mac OS X {}_{}_{}", minor, patch, 0)
} else {
"Macintosh; Intel Mac OS X 14_5_0".to_string()
}
}
}
#[cfg(not(target_os = "macos"))]
fn get_macos_platform() -> String {
"N/A".to_string()
}
// 获取 Linux 平台信息
#[cfg(target_os = "linux")]
fn get_linux_platform() -> String {
unsafe {
let mut name = std::mem::zeroed();
if uname(&mut name) == 0 {
let release = CStr::from_ptr(name.release.as_ptr()).to_string_lossy();
format!("X11; Linux {}", release)
} else {
"X11; Linux Unknown".to_string()
}
}
}
#[cfg(not(target_os = "linux"))]
fn get_linux_platform() -> String {
"N/A".to_string()
}
}
// 实现 Display trait 以格式化 User-Agent
impl fmt::Display for UserAgent {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.service == ServiceType::Basis {
return write!(
f,
"Mozilla/5.0 ({}; {}) Rustfs/{}",
self.os_platform, self.arch, self.version
);
}
write!(
f,
"Mozilla/5.0 ({}; {}) Rustfs/{} ({})",
self.os_platform,
self.arch,
self.version,
self.service.as_str()
)
}
}
// 获取 User-Agent 字符串,接受业务类型参数
pub fn get_user_agent(service: ServiceType) -> String {
UserAgent::new(service).to_string()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_user_agent_format_basis() {
let ua = get_user_agent(ServiceType::Basis);
assert!(ua.starts_with("Mozilla/5.0"));
assert!(ua.contains("Rustfs/1.0.0"));
println!("User-Agent: {}", ua);
}
#[test]
fn test_user_agent_format_core() {
let ua = get_user_agent(ServiceType::Core);
assert!(ua.starts_with("Mozilla/5.0"));
assert!(ua.contains("Rustfs/1.0.0 (core)"));
println!("User-Agent: {}", ua);
}
#[test]
fn test_user_agent_format_event() {
let ua = get_user_agent(ServiceType::Event);
assert!(ua.starts_with("Mozilla/5.0"));
assert!(ua.contains("Rustfs/1.0.0 (event)"));
println!("User-Agent: {}", ua);
}
#[test]
fn test_user_agent_format_logger() {
let ua = get_user_agent(ServiceType::Logger);
assert!(ua.starts_with("Mozilla/5.0"));
assert!(ua.contains("Rustfs/1.0.0 (logger)"));
println!("User-Agent: {}", ua);
}
#[test]
fn test_user_agent_format_custom() {
let ua = get_user_agent(ServiceType::Custom("monitor".to_string()));
assert!(ua.starts_with("Mozilla/5.0"));
assert!(ua.contains("Rustfs/1.0.0 (monitor)"));
println!("User-Agent: {}", ua);
}
}

View File

@@ -486,13 +486,7 @@ pub struct FileAccessDeniedWithContext {
impl std::fmt::Display for FileAccessDeniedWithContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
<<<<<<< HEAD
write!(f, "Access files '{}' denied: {}", self.path.display(), self.source)
||||||| 5ab2ce3c
write!(f, "访问文件 '{}' 被拒绝:{}", self.path.display(), self.source)
=======
write!(f, "file access denied for path: {}", self.path.display())
>>>>>>> 46870384b75a45ad0dd683099061f9e50a58c1e7
}
}