From 42d3645d6fdc55d6c898950b738a5f1df08066d6 Mon Sep 17 00:00:00 2001 From: houseme Date: Fri, 17 Oct 2025 15:34:53 +0800 Subject: [PATCH] fix(targets): make target removal and reload transactional; prevent reappearing entries (#662) * feat: improve code for notify * upgrade starshard version * upgrade version * Fix ETag format to comply with HTTP standards by wrapping with quotes (#592) * Initial plan * Fix ETag format to comply with HTTP standards by wrapping with quotes Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com> * bufigx --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com> Co-authored-by: overtrue * Improve lock (#596) * improve lock Signed-off-by: Mu junxiang <1948535941@qq.com> * feat(tests): add wait_for_object_absence helper and improve lifecycle test reliability Signed-off-by: Mu junxiang <1948535941@qq.com> * chore: remove dirty docs Signed-off-by: Mu junxiang <1948535941@qq.com> --------- Signed-off-by: Mu junxiang <1948535941@qq.com> * feat(append): implement object append operations with state tracking (#599) * feat(append): implement object append operations with state tracking Signed-off-by: junxiang Mu <1948535941@qq.com> * chore: rebase Signed-off-by: junxiang Mu <1948535941@qq.com> --------- Signed-off-by: junxiang Mu <1948535941@qq.com> * build(deps): upgrade s3s (#595) Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com> * fix: validate mqtt broker * improve code for `import` * fix * improve * remove logger from `rustfs-obs` crate * remove code for config Observability * fix * improve code * fix comment * up * up * upgrade version * fix * fmt * upgrade tokio version to 1.48.0 * upgrade `datafusion` and `reed-solomon-simd` version * fix * fmt * improve code for notify webhook example * improve code * fix * fix * fmt --------- Signed-off-by: Mu junxiang <1948535941@qq.com> Signed-off-by: junxiang Mu <1948535941@qq.com> Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com> Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com> Co-authored-by: overtrue Co-authored-by: guojidan <63799833+guojidan@users.noreply.github.com> Co-authored-by: Nugine Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com> --- Cargo.lock | 415 ++++++----------- Cargo.toml | 27 +- crates/audit/Cargo.toml | 1 - crates/audit/src/global.rs | 5 +- crates/audit/src/observability.rs | 4 +- crates/audit/src/registry.rs | 114 +++-- crates/audit/src/system.rs | 5 +- crates/config/src/observability/config.rs | 98 ----- crates/config/src/observability/file.rs | 28 -- crates/config/src/observability/kafka.rs | 27 -- crates/config/src/observability/mod.rs | 91 +++- crates/config/src/observability/webhook.rs | 28 -- crates/e2e_test/Cargo.toml | 2 +- crates/kms/src/backends/local.rs | 24 +- crates/notify/examples/full_demo.rs | 10 +- crates/notify/examples/full_demo_one.rs | 16 +- crates/notify/examples/webhook.rs | 14 +- crates/notify/src/integration.rs | 5 +- crates/notify/src/registry.rs | 67 ++- crates/obs/Cargo.toml | 22 +- crates/obs/examples/config.toml | 27 +- crates/obs/examples/server.rs | 37 +- crates/obs/src/config.rs | 178 +------- crates/obs/src/entry/args.rs | 88 ---- crates/obs/src/entry/audit.rs | 467 -------------------- crates/obs/src/entry/base.rs | 106 ----- crates/obs/src/entry/mod.rs | 158 ------- crates/obs/src/entry/unified.rs | 301 ------------- crates/obs/src/global.rs | 43 +- crates/obs/src/lib.rs | 19 +- crates/obs/src/logger.rs | 490 --------------------- crates/obs/src/sinks/file.rs | 178 -------- crates/obs/src/sinks/kafka.rs | 179 -------- crates/obs/src/sinks/mod.rs | 123 ------ crates/obs/src/sinks/webhook.rs | 84 ---- crates/obs/src/telemetry.rs | 5 +- crates/obs/src/worker.rs | 27 -- crates/utils/src/net.rs | 12 +- rustfs/Cargo.toml | 11 +- rustfs/src/main.rs | 2 +- rustfs/src/storage/ecfs.rs | 175 ++++---- scripts/run.sh | 20 - 42 files changed, 514 insertions(+), 3219 deletions(-) delete mode 100644 crates/config/src/observability/config.rs delete mode 100644 crates/config/src/observability/file.rs delete mode 100644 crates/config/src/observability/kafka.rs delete mode 100644 crates/config/src/observability/webhook.rs delete mode 100644 crates/obs/src/entry/args.rs delete mode 100644 crates/obs/src/entry/audit.rs delete mode 100644 crates/obs/src/entry/base.rs delete mode 100644 crates/obs/src/entry/mod.rs delete mode 100644 crates/obs/src/entry/unified.rs delete mode 100644 crates/obs/src/logger.rs delete mode 100644 crates/obs/src/sinks/file.rs delete mode 100644 crates/obs/src/sinks/kafka.rs delete mode 100644 crates/obs/src/sinks/mod.rs delete mode 100644 crates/obs/src/sinks/webhook.rs delete mode 100644 crates/obs/src/worker.rs diff --git a/Cargo.lock b/Cargo.lock index 142c9063..55602575 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -60,7 +60,7 @@ checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", "const-random", - "getrandom 0.3.3", + "getrandom 0.3.4", "once_cell", "version_check", "zerocopy", @@ -601,16 +601,15 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.32.2" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2b715a6010afb9e457ca2b7c9d2b9c344baa8baed7b38dc476034c171b32575" +checksum = "107a4e9d9cab9963e04e84bb8dee0e25f2a987f9a8bad5ed054abd439caa8f8c" dependencies = [ "bindgen", "cc", "cmake", "dunce", "fs_extra", - "libloading", ] [[package]] @@ -852,7 +851,7 @@ dependencies = [ "pin-project-lite", "rustls 0.21.12", "rustls 0.23.32", - "rustls-native-certs 0.8.1", + "rustls-native-certs 0.8.2", "rustls-pki-types", "tokio", "tokio-rustls 0.26.4", @@ -1298,9 +1297,9 @@ dependencies = [ [[package]] name = "bzip2" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bea8dcd42434048e4f7a304411d9273a411f647446c1234a65ce0554923f4cff" +checksum = "f3a53fac24f34a81bc9954b5d6cfce0c21e18ec6959f44f56e8e90e4bb7c346c" dependencies = [ "libbz2-rs-sys", ] @@ -1393,9 +1392,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "cfg_aliases" @@ -1502,9 +1501,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.48" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2134bb3ea021b78629caa971416385309e0131b351b25e01dc16fb54e1b5fae" +checksum = "f4512b90fa68d3a9932cea5184017c5d200f5921df706d45e853537dea51508f" dependencies = [ "clap_builder", "clap_derive", @@ -1512,9 +1511,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.48" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2ba64afa3c0a6df7fa517765e31314e983f51dda798ffba27b988194fb65dc9" +checksum = "0025e98baa12e766c67ba13ff4695a887a1eba19569aad00a472546795bd6730" dependencies = [ "anstream", "anstyle", @@ -1524,9 +1523,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.47" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ "heck", "proc-macro2", @@ -1536,9 +1535,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "cmake" @@ -2077,16 +2076,16 @@ checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" [[package]] name = "datafusion" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4016a135c11820d9c9884a1f7924d5456c563bd3657b7d691a6e7b937a452df7" +checksum = "fc6759cf9ef57c5c469e4027ac4b4cfa746e06a0f5472c2b922b6a403c2a64c4" dependencies = [ "arrow", "arrow-ipc", "arrow-schema", "async-trait", "bytes", - "bzip2 0.6.0", + "bzip2 0.6.1", "chrono", "datafusion-catalog", "datafusion-catalog-listing", @@ -2132,9 +2131,9 @@ dependencies = [ [[package]] name = "datafusion-catalog" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1721d3973afeb8a0c3f235a79101cc61e4a558dd3f02fdc9ae6c61e882e544d9" +checksum = "8a1c48fc7e6d62590d45f7be7c531980b8ff091d1ab113a9ddf465bef41e4093" dependencies = [ "arrow", "async-trait", @@ -2158,9 +2157,9 @@ dependencies = [ [[package]] name = "datafusion-catalog-listing" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44841d3efb0c89c6a5ac6fde5ac61d4f2474a2767f170db6d97300a8b4df8904" +checksum = "3db1266da115de3ab0b2669fc027d96cf0ff777deb3216d52c74b528446ccdd6" dependencies = [ "arrow", "async-trait", @@ -2181,9 +2180,9 @@ dependencies = [ [[package]] name = "datafusion-common" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb89b9d1ea8198d174b0838b91b40293b780261d694d6ac59bd20c38005115" +checksum = "ad4eb2a48ca10fa1e1a487a28a5bf080e31efac2d4bf12bb7e92c2d9ea4f35e5" dependencies = [ "ahash", "arrow", @@ -2206,9 +2205,9 @@ dependencies = [ [[package]] name = "datafusion-common-runtime" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f03fe3936f978fe8e76776d14ad8722e33843b01d81d11707ca72d54d2867787" +checksum = "a0422ee64d5791599c46b786063e695f7699fadd3a12ad25038cb3094d05886a" dependencies = [ "futures", "log", @@ -2217,15 +2216,15 @@ dependencies = [ [[package]] name = "datafusion-datasource" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4543216d2f4fc255780a46ae9e062e50c86ac23ecab6718cc1ba3fe4a8d5a8f2" +checksum = "904c2e1089b3ccf10786f2dae12bc560fda278e4194a8917c5844d2e8c212818" dependencies = [ "arrow", "async-compression", "async-trait", "bytes", - "bzip2 0.6.0", + "bzip2 0.6.1", "chrono", "datafusion-common", "datafusion-common-runtime", @@ -2254,9 +2253,9 @@ dependencies = [ [[package]] name = "datafusion-datasource-csv" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ab662d4692ca5929ce32eb609c6c8a741772537d98363b3efb3bc68148cd530" +checksum = "8336a805c42ef4e359daaad142ddc53649f23c7e934c117d8516816afe6b7a3d" dependencies = [ "arrow", "async-trait", @@ -2279,9 +2278,9 @@ dependencies = [ [[package]] name = "datafusion-datasource-json" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dad4492ba9a2fca417cb211f8f05ffeb7f12a1f0f8e5bdcf548c353ff923779" +checksum = "c691b1565e245ea369bc8418b472a75ea84c2ad2deb61b1521cfa38319a9cd47" dependencies = [ "arrow", "async-trait", @@ -2304,9 +2303,9 @@ dependencies = [ [[package]] name = "datafusion-datasource-parquet" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2925432ce04847cc09b4789a53fc22b0fdf5f2e73289ad7432759d76c6026e9e" +checksum = "f9f7576ceb5974c5f6874d7f2a5ebfeb58960a920da64017def849e0352fe2d8" dependencies = [ "arrow", "async-trait", @@ -2337,15 +2336,15 @@ dependencies = [ [[package]] name = "datafusion-doc" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71f8c2c0d5c57620003c3bf1ee577b738404a7fd9642f6cf73d10e44ffaa70f" +checksum = "9dde7c10244f3657fc01eef8247c0b2b20eae4cf6439a0ebb27322f32026d6b8" [[package]] name = "datafusion-execution" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa51cf4d253927cb65690c05a18e7720cdda4c47c923b0dd7d641f7fcfe21b14" +checksum = "5143fc795cef959b6d5271b2e8f1120382fe929fc4bd027c7d7b993f5352ef7e" dependencies = [ "arrow", "async-trait", @@ -2363,9 +2362,9 @@ dependencies = [ [[package]] name = "datafusion-expr" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a347435cfcd1de0498c8410d32e0b1fc3920e198ce0378f8e259da717af9e0f" +checksum = "63e826296bc5f5d0af3e39c1af473d4091ac6a152a5be2f80c256f0182938428" dependencies = [ "arrow", "async-trait", @@ -2385,9 +2384,9 @@ dependencies = [ [[package]] name = "datafusion-expr-common" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e73951bdf1047d7af212bb11310407230b4067921df648781ae7f7f1241e87e" +checksum = "9096732d0d8862d1950ca70324fe91f9dee3799eeb0db53ef452bdb573484db6" dependencies = [ "arrow", "datafusion-common", @@ -2398,9 +2397,9 @@ dependencies = [ [[package]] name = "datafusion-functions" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3b181e79552d764a2589910d1e0420ef41b07ab97c3e3efdbce612b692141e7" +checksum = "3f362c78ac283e64fd3976e060c1a8a57d5f4dcf844a6b6bd2eb320640a1572e" dependencies = [ "arrow", "arrow-buffer", @@ -2427,9 +2426,9 @@ dependencies = [ [[package]] name = "datafusion-functions-aggregate" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e8cfb3b3f9e48e756939c85816b388264bed378d166a993fb265d800e1c83c" +checksum = "22e2a80a80145a796ae3f02eb724ac516178556aec864fe89f6ab3741a4cd249" dependencies = [ "ahash", "arrow", @@ -2448,9 +2447,9 @@ dependencies = [ [[package]] name = "datafusion-functions-aggregate-common" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9501537e235e4e86828bc8bf4e22968c1514c2cb4c860b7c7cf7dc99e172d43c" +checksum = "d7dcca2fe7c33409e9ab3f950366aa4cba5db6175a09599fdb658ad9f2cc4296" dependencies = [ "ahash", "arrow", @@ -2461,9 +2460,9 @@ dependencies = [ [[package]] name = "datafusion-functions-nested" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cbc3ecce122389530af091444e923f2f19153c38731893f5b798e19a46fbf86" +checksum = "d1b298733377f3ec8c2868c75b5555b15396d9c13e36c5fda28e80feee34e3ed" dependencies = [ "arrow", "arrow-ord", @@ -2483,9 +2482,9 @@ dependencies = [ [[package]] name = "datafusion-functions-table" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8ad370763644d6626b15900fe2268e7d55c618fadf5cff3a7f717bb6fb50ec1" +checksum = "2fa4a380ca362eb0fbd33093e8ca6b7a31057616c7e6ee999b87a4ad3c7c0b3f" dependencies = [ "arrow", "async-trait", @@ -2499,9 +2498,9 @@ dependencies = [ [[package]] name = "datafusion-functions-window" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44b14fc52c77461f359d1697826a4373c7887a6adfca94eedc81c35decd0df9f" +checksum = "9068fc85b8e187c706427794d79bb7ee91132b6b192cb7b18e650a5f7c5c1340" dependencies = [ "arrow", "datafusion-common", @@ -2517,9 +2516,9 @@ dependencies = [ [[package]] name = "datafusion-functions-window-common" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "851c80de71ff8bc9be7f8478f26e8060e25cab868a36190c4ebdaacc72ceade1" +checksum = "b2f80ec56e177d166269556649be817a382a374642872df4ca48cf9be3d09b3a" dependencies = [ "datafusion-common", "datafusion-physical-expr-common", @@ -2527,9 +2526,9 @@ dependencies = [ [[package]] name = "datafusion-macros" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "386208ac4f475a099920cdbe9599188062276a09cb4c3f02efdc54e0c015ab14" +checksum = "c4868fe261ba01e462033eff141e90453b7630722cad6420fddd81ebb786f6e2" dependencies = [ "datafusion-expr", "quote", @@ -2538,9 +2537,9 @@ dependencies = [ [[package]] name = "datafusion-optimizer" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b20ff1cec8c23fbab8523e2937790fb374b92d3b273306a64b7d8889ff3b8614" +checksum = "40ed8c51b5c37c057e5c7d5945ed807f1cecfba003bdb1a4c3036595dda287c7" dependencies = [ "arrow", "chrono", @@ -2558,9 +2557,9 @@ dependencies = [ [[package]] name = "datafusion-physical-expr" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945659046d27372e38e8a37927f0b887f50846202792063ad6b197c6eaf9fb5b" +checksum = "f678f5734147446e1adbee63be4b244c8f0e9cbd5c41525004ace3730190d03e" dependencies = [ "ahash", "arrow", @@ -2581,9 +2580,9 @@ dependencies = [ [[package]] name = "datafusion-physical-expr-adapter" -version = "50.0.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da3a7429a555dd5ff0bec4d24bd5532ec43876764088da635cad55b2f178dc2" +checksum = "086877d4eca538e9cd1f28b917db0036efe0ad8b4fb7c702f520510672032c8d" dependencies = [ "arrow", "datafusion-common", @@ -2596,9 +2595,9 @@ dependencies = [ [[package]] name = "datafusion-physical-expr-common" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "218d60e94d829d8a52bf50e694f2f567313508f0c684af4954def9f774ce3518" +checksum = "f5c5d17f6a4f28f9849ee3449bb9b83406a718e4275c218bf37ca247ee123779" dependencies = [ "ahash", "arrow", @@ -2610,9 +2609,9 @@ dependencies = [ [[package]] name = "datafusion-physical-optimizer" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f96a93ebfd35cc52595e85c3100730a5baa6def39ff5390d6f90d2f3f89ce53f" +checksum = "ab9fb8b3fba2634d444e0177862797dc1231e0e20bc4db291a15d39c0d4136c3" dependencies = [ "arrow", "datafusion-common", @@ -2630,9 +2629,9 @@ dependencies = [ [[package]] name = "datafusion-physical-plan" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6516a95911f763f05ec29bddd6fe987a0aa987409c213eac12faa5db7f3c9c" +checksum = "d5086cb2e579270173ff0eb38d60ba2a081f1d422a743fa673f6096920950eb5" dependencies = [ "ahash", "arrow", @@ -2661,9 +2660,9 @@ dependencies = [ [[package]] name = "datafusion-pruning" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40befe63ab3bd9f3b05d02d13466055aa81876ad580247b10bdde1ba3782cebb" +checksum = "1f84b866d906118c320459f30385048aeedbe36ac06973d3e4fa0cc5d60d722c" dependencies = [ "arrow", "arrow-schema", @@ -2679,9 +2678,9 @@ dependencies = [ [[package]] name = "datafusion-session" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26aa059f478e6fa31158e80e4685226490b39f67c2e357401e26da84914be8b2" +checksum = "3820062b9dd2846954eeb844ff9fe3662977b7d2d74947647c779fabfa502508" dependencies = [ "arrow", "async-trait", @@ -2703,9 +2702,9 @@ dependencies = [ [[package]] name = "datafusion-sql" -version = "50.1.0" +version = "50.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea3ce7cb3c31bfc6162026f6f4b11eb5a3a83c8a6b88d8b9c529ddbe97d53525" +checksum = "375232baa851b2e9d09fcbe8906141a0ec6e0e058addc5565e0d3d790bb9d51d" dependencies = [ "arrow", "bigdecimal", @@ -3212,12 +3211,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - [[package]] name = "fixedbitset" version = "0.5.7" @@ -3439,21 +3432,21 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.1+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "js-sys", "libc", "r-efi", - "wasi 0.14.7+wasi-0.2.4", + "wasip2", "wasm-bindgen", ] @@ -3541,9 +3534,9 @@ dependencies = [ [[package]] name = "half" -version = "2.7.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54c115d4f30f52c67202f079c5f9d8b49db4691f460fdb0b4c2e838261b2ba5" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", @@ -3863,7 +3856,7 @@ dependencies = [ "hyper-util", "log", "rustls 0.23.32", - "rustls-native-certs 0.8.1", + "rustls-native-certs 0.8.2", "rustls-pki-types", "tokio", "tokio-rustls 0.26.4", @@ -3902,7 +3895,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.0", + "socket2 0.6.1", "system-configuration", "tokio", "tower-service", @@ -4110,17 +4103,6 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" -[[package]] -name = "io-uring" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" -dependencies = [ - "bitflags 2.9.4", - "cfg-if", - "libc", -] - [[package]] name = "ipconfig" version = "0.3.2" @@ -4211,7 +4193,7 @@ version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "libc", ] @@ -4348,12 +4330,12 @@ checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "libloading" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-targets 0.53.5", + "windows-link 0.2.1", ] [[package]] @@ -4410,18 +4392,6 @@ dependencies = [ "zlib-rs", ] -[[package]] -name = "libz-sys" -version = "1.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "linux-raw-sys" version = "0.4.15" @@ -4639,7 +4609,7 @@ checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "log", - "wasi 0.11.1+wasi-snapshot-preview1", + "wasi", "windows-sys 0.59.0", ] @@ -4931,28 +4901,6 @@ dependencies = [ "libc", ] -[[package]] -name = "num_enum" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" -dependencies = [ - "num_enum_derive", - "rustversion", -] - -[[package]] -name = "num_enum_derive" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 2.0.106", -] - [[package]] name = "num_threads" version = "0.1.7" @@ -5386,7 +5334,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ - "fixedbitset 0.5.7", + "fixedbitset", "indexmap", ] @@ -5396,7 +5344,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" dependencies = [ - "fixedbitset 0.5.7", + "fixedbitset", "hashbrown 0.15.5", "indexmap", "serde", @@ -5629,15 +5577,6 @@ dependencies = [ "elliptic-curve 0.13.8", ] -[[package]] -name = "proc-macro-crate" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" -dependencies = [ - "toml_edit 0.23.7", -] - [[package]] name = "proc-macro2" version = "1.0.101" @@ -5824,7 +5763,7 @@ dependencies = [ "quinn-udp", "rustc-hash", "rustls 0.23.32", - "socket2 0.6.0", + "socket2 0.6.1", "thiserror 2.0.17", "tokio", "tracing", @@ -5838,7 +5777,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ "bytes", - "getrandom 0.3.3", + "getrandom 0.3.4", "lru-slab", "rand 0.9.2", "ring", @@ -5861,7 +5800,7 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.0", + "socket2 0.6.1", "tracing", "windows-sys 0.60.2", ] @@ -5937,7 +5876,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", ] [[package]] @@ -5960,36 +5899,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "rdkafka" -version = "0.38.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f1856d72dbbbea0d2a5b2eaf6af7fb3847ef2746e883b11781446a51dbc85c0" -dependencies = [ - "futures-channel", - "futures-util", - "libc", - "log", - "rdkafka-sys", - "serde", - "serde_derive", - "serde_json", - "slab", - "tokio", -] - -[[package]] -name = "rdkafka-sys" -version = "4.9.0+2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5230dca48bc354d718269f3e4353280e188b610f7af7e2fcf54b7a79d5802872" -dependencies = [ - "libc", - "libz-sys", - "num_enum", - "pkg-config", -] - [[package]] name = "readme-rustdocifier" version = "0.1.1" @@ -6047,11 +5956,13 @@ dependencies = [ [[package]] name = "reed-solomon-simd" -version = "3.0.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab6badd4f4b9c93832eb3707431e8e7bea282fae96801312f0990d48b030f8c5" +checksum = "cffef0520d30fbd4151fb20e262947ae47fb0ab276a744a19b6398438105a072" dependencies = [ - "fixedbitset 0.4.2", + "cpufeatures", + "fixedbitset", + "once_cell", "readme-rustdocifier", ] @@ -6077,9 +5988,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.12.1" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a52d8d02cacdb176ef4678de6c052efb4b3da14b78e4db683a4252762be5433" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", @@ -6089,9 +6000,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "722166aa0d7438abbaa4d5cc2c649dac844e8c56d82fb3d33e9c34b5cd268fc6" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", @@ -6106,15 +6017,15 @@ checksum = "8d942b98df5e658f56f20d592c7f868833fe38115e65c33003d8cd224b0155da" [[package]] name = "regex-syntax" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3160422bbd54dd5ecfdca71e5fd59b7b8fe2b1697ab2baf64f6d05dcc66d298" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "reqwest" -version = "0.12.23" +version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ "base64 0.22.1", "bytes", @@ -6288,11 +6199,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "128f632072dc89ced3359668399026d90eadc06c65c807c298d15ff3d1eacf63" dependencies = [ "bytes", - "fixedbitset 0.5.7", + "fixedbitset", "flume", "futures-util", "log", - "rustls-native-certs 0.8.1", + "rustls-native-certs 0.8.2", "rustls-pemfile 2.2.0", "rustls-webpki 0.102.8", "thiserror 2.0.17", @@ -6419,7 +6330,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "shadow-rs", - "socket2 0.6.0", + "socket2 0.6.1", "sysctl", "thiserror 2.0.17", "tikv-jemallocator", @@ -6484,7 +6395,6 @@ version = "0.0.5" dependencies = [ "chrono", "futures", - "once_cell", "rumqttc", "rustfs-config", "rustfs-ecstore", @@ -6791,8 +6701,6 @@ dependencies = [ name = "rustfs-obs" version = "0.0.5" dependencies = [ - "async-trait", - "chrono", "flexi_logger", "nu-ansi-term", "nvml-wrapper", @@ -6802,12 +6710,9 @@ dependencies = [ "opentelemetry-semantic-conventions", "opentelemetry-stdout", "opentelemetry_sdk", - "rdkafka", - "reqwest", "rustfs-config", "rustfs-utils", "serde", - "serde_json", "smallvec", "sysinfo", "thiserror 2.0.17", @@ -7120,9 +7025,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" dependencies = [ "openssl-probe", "rustls-pki-types", @@ -7758,12 +7663,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -8164,7 +8069,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ "fastrand", - "getrandom 0.3.3", + "getrandom 0.3.4", "once_cell", "rustix 1.1.2", "windows-sys 0.61.2", @@ -8265,9 +8170,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" +version = "0.6.1+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d" +checksum = "cd8aa5b2ab86a2cefa406d889139c162cbb230092f7d1d7cbc1716405d852a3b" dependencies = [ "cc", "libc", @@ -8275,9 +8180,9 @@ dependencies = [ [[package]] name = "tikv-jemallocator" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cec5ff18518d81584f477e9bfdf957f5bb0979b0bac3af4ca30b5b3ae2d2865" +checksum = "0359b4327f954e0567e69fb191cf1436617748813819c94b8cd4a431422d053a" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -8362,29 +8267,26 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.47.1" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ - "backtrace", "bytes", - "io-uring", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", - "slab", - "socket2 0.6.0", + "socket2 0.6.1", "tokio-macros", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", @@ -8472,8 +8374,8 @@ checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", - "toml_datetime 0.6.11", - "toml_edit 0.22.27", + "toml_datetime", + "toml_edit", ] [[package]] @@ -8485,15 +8387,6 @@ dependencies = [ "serde", ] -[[package]] -name = "toml_datetime" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" -dependencies = [ - "serde_core", -] - [[package]] name = "toml_edit" version = "0.22.27" @@ -8503,32 +8396,11 @@ dependencies = [ "indexmap", "serde", "serde_spanned", - "toml_datetime 0.6.11", + "toml_datetime", "toml_write", "winnow", ] -[[package]] -name = "toml_edit" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" -dependencies = [ - "indexmap", - "toml_datetime 0.7.3", - "toml_parser", - "winnow", -] - -[[package]] -name = "toml_parser" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" -dependencies = [ - "winnow", -] - [[package]] name = "toml_write" version = "0.1.2" @@ -8555,7 +8427,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "socket2 0.6.0", + "socket2 0.6.1", "sync_wrapper", "tokio", "tokio-stream", @@ -8905,7 +8777,7 @@ version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "js-sys", "rand 0.9.2", "serde", @@ -8986,12 +8858,6 @@ dependencies = [ "url", ] -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - [[package]] name = "version_check" version = "0.9.5" @@ -9029,15 +8895,6 @@ version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" -[[package]] -name = "wasi" -version = "0.14.7+wasi-0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c" -dependencies = [ - "wasip2", -] - [[package]] name = "wasip2" version = "1.0.1+wasi-0.2.4" @@ -9815,12 +9672,12 @@ checksum = "eb2a05c7c36fde6c09b08576c9f7fb4cda705990f73b58fe011abf7dfb24168b" dependencies = [ "aes", "arbitrary", - "bzip2 0.6.0", + "bzip2 0.6.1", "constant_time_eq", "crc32fast", "deflate64", "flate2", - "getrandom 0.3.3", + "getrandom 0.3.4", "hmac 0.12.1", "indexmap", "lzma-rust2", diff --git a/Cargo.toml b/Cargo.toml index 38367992..4db7052c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ members = [ "crates/madmin", # Management dashboard and admin API interface "crates/notify", # Notification system for events "crates/obs", # Observability utilities - "crates/policy",# Policy management + "crates/policy", # Policy management "crates/protos", # Protocol buffer definitions "crates/rio", # Rust I/O utilities and abstractions "crates/targets", # Target-specific configurations and utilities @@ -113,17 +113,17 @@ brotli = "8.0.2" bytes = { version = "1.10.1", features = ["serde"] } bytesize = "2.1.0" byteorder = "1.5.0" -cfg-if = "1.0.3" +cfg-if = "1.0.4" convert_case = "0.8.0" crc-fast = "1.3.0" chacha20poly1305 = { version = "0.10.1" } chrono = { version = "0.4.42", features = ["serde"] } -clap = { version = "4.5.48", features = ["derive", "env"] } +clap = { version = "4.5.49", features = ["derive", "env"] } const-str = { version = "0.7.0", features = ["std", "proc"] } crc32fast = "1.5.0" criterion = { version = "0.7", features = ["html_reports"] } crossbeam-queue = "0.3.12" -datafusion = "50.1.0" +datafusion = "50.2.0" derive_builder = "0.20.2" enumset = "1.1.10" flatbuffers = "25.9.23" @@ -193,10 +193,9 @@ pretty_assertions = "1.4.1" quick-xml = "0.38.3" rand = "0.9.2" rayon = "1.11.0" -rdkafka = { version = "0.38.0", features = ["tokio"] } -reed-solomon-simd = { version = "3.0.1" } -regex = { version = "1.12.1" } -reqwest = { version = "0.12.23", default-features = false, features = [ +reed-solomon-simd = { version = "3.1.0" } +regex = { version = "1.12.2" } +reqwest = { version = "0.12.24", default-features = false, features = [ "rustls-tls-webpki-roots", "charset", "http2", @@ -206,9 +205,9 @@ reqwest = { version = "0.12.23", default-features = false, features = [ "blocking", ] } rmcp = { version = "0.8.1" } -rmp = "0.8.14" -rmp-serde = "1.3.0" -rsa = "0.9.8" +rmp = { version = "0.8.14" } +rmp-serde = { version = "1.3.0" } +rsa = { version = "0.9.8" } rumqttc = { version = "0.25.0" } rust-embed = { version = "8.7.2" } rustc-hash = { version = "2.1.1" } @@ -229,7 +228,7 @@ smallvec = { version = "1.15.1", features = ["serde"] } smartstring = "1.0.1" snafu = "0.8.9" snap = "1.1.1" -socket2 = "0.6.0" +socket2 = "0.6.1" starshard = { version = "0.5.0", features = ["rayon", "async", "serde"] } strum = { version = "0.27.2", features = ["derive"] } sysinfo = "0.37.1" @@ -245,7 +244,7 @@ time = { version = "0.3.44", features = [ "macros", "serde", ] } -tokio = { version = "1.47.1", features = ["fs", "rt-multi-thread"] } +tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] } tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] } tokio-stream = { version = "0.1.17" } tokio-tar = "0.3.1" @@ -256,7 +255,7 @@ tonic-prost = { version = "0.14.2" } tonic-prost-build = { version = "0.14.2" } tower = { version = "0.5.2", features = ["timeout"] } tower-http = { version = "0.6.6", features = ["cors"] } -tracing = "0.1.41" +tracing = { version = "0.1.41" } tracing-core = "0.1.34" tracing-error = "0.2.1" tracing-opentelemetry = "0.32.0" diff --git a/crates/audit/Cargo.toml b/crates/audit/Cargo.toml index c338babe..e847bb2b 100644 --- a/crates/audit/Cargo.toml +++ b/crates/audit/Cargo.toml @@ -37,7 +37,6 @@ thiserror = { workspace = true } tokio = { workspace = true, features = ["sync", "fs", "rt-multi-thread", "rt", "time", "macros"] } tracing = { workspace = true, features = ["std", "attributes"] } url = { workspace = true } -once_cell = { workspace = true } rumqttc = { workspace = true } [lints] diff --git a/crates/audit/src/global.rs b/crates/audit/src/global.rs index 4c458ca2..e9b3176d 100644 --- a/crates/audit/src/global.rs +++ b/crates/audit/src/global.rs @@ -13,13 +13,12 @@ // limitations under the License. use crate::{AuditEntry, AuditResult, AuditSystem}; -use once_cell::sync::OnceCell; use rustfs_ecstore::config::Config; -use std::sync::Arc; +use std::sync::{Arc, OnceLock}; use tracing::{error, warn}; /// Global audit system instance -static AUDIT_SYSTEM: OnceCell> = OnceCell::new(); +static AUDIT_SYSTEM: OnceLock> = OnceLock::new(); /// Initialize the global audit system pub fn init_audit_system() -> Arc { diff --git a/crates/audit/src/observability.rs b/crates/audit/src/observability.rs index 7b43e9f2..abbcda21 100644 --- a/crates/audit/src/observability.rs +++ b/crates/audit/src/observability.rs @@ -21,8 +21,8 @@ //! - Error rate monitoring //! - Queue depth monitoring -use std::sync::Arc; use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, OnceLock}; use std::time::{Duration, Instant}; use tokio::sync::RwLock; use tracing::info; @@ -312,7 +312,7 @@ impl PerformanceValidation { } /// Global metrics instance -static GLOBAL_METRICS: once_cell::sync::OnceCell> = once_cell::sync::OnceCell::new(); +static GLOBAL_METRICS: OnceLock> = OnceLock::new(); /// Get or initialize the global metrics instance pub fn global_metrics() -> Arc { diff --git a/crates/audit/src/registry.rs b/crates/audit/src/registry.rs index 3a1a8abf..59126c67 100644 --- a/crates/audit/src/registry.rs +++ b/crates/audit/src/registry.rs @@ -12,20 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::AuditEntry; -use crate::{AuditError, AuditResult}; -use futures::StreamExt; -use futures::stream::FuturesUnordered; -use rustfs_config::audit::AUDIT_ROUTE_PREFIX; +use crate::{AuditEntry, AuditError, AuditResult}; +use futures::{StreamExt, stream::FuturesUnordered}; use rustfs_config::{ DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR, MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_BATCH_SIZE, WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_HTTP_TIMEOUT, WEBHOOK_MAX_RETRY, WEBHOOK_QUEUE_DIR, - WEBHOOK_QUEUE_LIMIT, WEBHOOK_RETRY_INTERVAL, + WEBHOOK_QUEUE_LIMIT, WEBHOOK_RETRY_INTERVAL, audit::AUDIT_ROUTE_PREFIX, }; use rustfs_ecstore::config::{Config, KVS}; -use rustfs_targets::target::{ChannelTargetType, TargetType, mqtt::MQTTArgs, webhook::WebhookArgs}; -use rustfs_targets::{Target, TargetError}; +use rustfs_targets::{ + Target, TargetError, + target::{ChannelTargetType, TargetType, mqtt::MQTTArgs, webhook::WebhookArgs}, +}; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use std::time::Duration; @@ -68,7 +67,10 @@ impl AuditRegistry { // A collection of asynchronous tasks for concurrently executing target creation let mut tasks = FuturesUnordered::new(); - let mut final_config = config.clone(); + // let final_config = config.clone(); + + // Record the defaults for each segment so that the segment can eventually be rebuilt + let mut section_defaults: HashMap = HashMap::new(); // Supported target types for audit let target_types = vec![ChannelTargetType::Webhook.as_str(), ChannelTargetType::Mqtt.as_str()]; @@ -80,11 +82,14 @@ impl AuditRegistry { info!(target_type = %target_type, "Starting audit target type processing"); // 2. Prepare the configuration source - let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}"); + let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase(); let file_configs = config.0.get(§ion_name).cloned().unwrap_or_default(); let default_cfg = file_configs.get(DEFAULT_DELIMITER).cloned().unwrap_or_default(); debug!(?default_cfg, "Retrieved default configuration"); + // Save defaults for eventual write back + section_defaults.insert(section_name.clone(), default_cfg.clone()); + // Get valid fields for the target type let valid_fields = match target_type { "webhook" => get_webhook_valid_fields(), @@ -101,7 +106,7 @@ impl AuditRegistry { let mut env_overrides: HashMap> = HashMap::new(); for (env_key, env_value) in &all_env { - let audit_prefix = format!("{ENV_PREFIX}AUDIT_{}", target_type.to_uppercase()); + let audit_prefix = format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}").to_uppercase(); if !env_key.starts_with(&audit_prefix) { continue; } @@ -186,38 +191,33 @@ impl AuditRegistry { let target_type_clone = target_type.to_string(); let id_clone = id.clone(); let merged_config_arc = Arc::new(merged_config.clone()); - let final_config_arc = Arc::new(final_config.clone()); - let task = tokio::spawn(async move { let result = create_audit_target(&target_type_clone, &id_clone, &merged_config_arc).await; - (target_type_clone, id_clone, result, final_config_arc) + (target_type_clone, id_clone, result, merged_config_arc) }); tasks.push(task); // Update final config with successful instance - final_config - .0 - .entry(section_name.clone()) - .or_default() - .insert(id, merged_config); + // final_config.0.entry(section_name.clone()).or_default().insert(id, merged_config); } else { info!(instance_id = %id, "Skipping disabled audit target, will be removed from final configuration"); // Remove disabled target from final configuration - final_config.0.entry(section_name.clone()).or_default().remove(&id); + // final_config.0.entry(section_name.clone()).or_default().remove(&id); } } } // 6. Concurrently execute all creation tasks and collect results let mut successful_targets = Vec::new(); - + let mut successful_configs = Vec::new(); while let Some(task_result) = tasks.next().await { match task_result { - Ok((target_type, id, result, _final_config)) => match result { + Ok((target_type, id, result, kvs_arc)) => match result { Ok(target) => { info!(target_type = %target_type, instance_id = %id, "Created audit target successfully"); successful_targets.push(target); + successful_configs.push((target_type, id, kvs_arc)); } Err(e) => { error!(target_type = %target_type, instance_id = %id, error = %e, "Failed to create audit target"); @@ -229,21 +229,67 @@ impl AuditRegistry { } } - // 7. Save the new configuration to the system - let Some(store) = rustfs_ecstore::new_object_layer_fn() else { - return Err(AuditError::ServerNotInitialized( - "Failed to save target configuration: server storage not initialized".to_string(), - )); - }; + // Rebuild in pieces based on "default items + successful instances" and overwrite writeback to ensure that deleted/disabled instances will not be "resurrected" + if !successful_configs.is_empty() || !section_defaults.is_empty() { + info!("Prepare to rebuild and save target configurations to the system configuration..."); - match rustfs_ecstore::config::com::save_server_config(store, &final_config).await { - Ok(_) => info!("New audit configuration saved to system successfully"), - Err(e) => { - error!(error = %e, "Failed to save new audit configuration"); - return Err(AuditError::SaveConfig(e.to_string())); + // Aggregate successful instances into segments + let mut successes_by_section: HashMap> = HashMap::new(); + for (target_type, id, kvs) in successful_configs { + let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase(); + successes_by_section + .entry(section_name) + .or_default() + .insert(id.to_lowercase(), (*kvs).clone()); + } + + let mut new_config = config.clone(); + + // Collection of segments that need to be processed: Collect all segments where default items exist or where successful instances exist + let mut sections: HashSet = HashSet::new(); + sections.extend(section_defaults.keys().cloned()); + sections.extend(successes_by_section.keys().cloned()); + + for section_name in sections { + let mut section_map: HashMap = HashMap::new(); + + // The default entry (if present) is written back to `_` + if let Some(default_cfg) = section_defaults.get(§ion_name) { + if !default_cfg.is_empty() { + section_map.insert(DEFAULT_DELIMITER.to_string(), default_cfg.clone()); + } + } + + // Successful instance write back + if let Some(instances) = successes_by_section.get(§ion_name) { + for (id, kvs) in instances { + section_map.insert(id.clone(), kvs.clone()); + } + } + + // Empty segments are removed and non-empty segments are replaced as a whole. + if section_map.is_empty() { + new_config.0.remove(§ion_name); + } else { + new_config.0.insert(section_name, section_map); + } + } + + // 7. Save the new configuration to the system + let Some(store) = rustfs_ecstore::new_object_layer_fn() else { + return Err(AuditError::ServerNotInitialized( + "Failed to save target configuration: server storage not initialized".to_string(), + )); + }; + + match rustfs_ecstore::config::com::save_server_config(store, &new_config).await { + Ok(_) => info!("New audit configuration saved to system successfully"), + Err(e) => { + error!(error = %e, "Failed to save new audit configuration"); + return Err(AuditError::SaveConfig(e.to_string())); + } } } - Ok(successful_targets) } diff --git a/crates/audit/src/system.rs b/crates/audit/src/system.rs index 4f0c4d61..a45b7eea 100644 --- a/crates/audit/src/system.rs +++ b/crates/audit/src/system.rs @@ -12,10 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::AuditEntry; -use crate::AuditRegistry; -use crate::observability; -use crate::{AuditError, AuditResult}; +use crate::{AuditEntry, AuditError, AuditRegistry, AuditResult, observability}; use rustfs_ecstore::config::Config; use rustfs_targets::{ StoreError, Target, TargetError, diff --git a/crates/config/src/observability/config.rs b/crates/config/src/observability/config.rs deleted file mode 100644 index 0032c286..00000000 --- a/crates/config/src/observability/config.rs +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Observability Keys - -pub const ENV_OBS_ENDPOINT: &str = "RUSTFS_OBS_ENDPOINT"; -pub const ENV_OBS_USE_STDOUT: &str = "RUSTFS_OBS_USE_STDOUT"; -pub const ENV_OBS_SAMPLE_RATIO: &str = "RUSTFS_OBS_SAMPLE_RATIO"; -pub const ENV_OBS_METER_INTERVAL: &str = "RUSTFS_OBS_METER_INTERVAL"; -pub const ENV_OBS_SERVICE_NAME: &str = "RUSTFS_OBS_SERVICE_NAME"; -pub const ENV_OBS_SERVICE_VERSION: &str = "RUSTFS_OBS_SERVICE_VERSION"; -pub const ENV_OBS_ENVIRONMENT: &str = "RUSTFS_OBS_ENVIRONMENT"; -pub const ENV_OBS_LOGGER_LEVEL: &str = "RUSTFS_OBS_LOGGER_LEVEL"; -pub const ENV_OBS_LOCAL_LOGGING_ENABLED: &str = "RUSTFS_OBS_LOCAL_LOGGING_ENABLED"; -pub const ENV_OBS_LOG_DIRECTORY: &str = "RUSTFS_OBS_LOG_DIRECTORY"; -pub const ENV_OBS_LOG_FILENAME: &str = "RUSTFS_OBS_LOG_FILENAME"; -pub const ENV_OBS_LOG_ROTATION_SIZE_MB: &str = "RUSTFS_OBS_LOG_ROTATION_SIZE_MB"; -pub const ENV_OBS_LOG_ROTATION_TIME: &str = "RUSTFS_OBS_LOG_ROTATION_TIME"; -pub const ENV_OBS_LOG_KEEP_FILES: &str = "RUSTFS_OBS_LOG_KEEP_FILES"; - -/// Log pool capacity for async logging -pub const ENV_OBS_LOG_POOL_CAPA: &str = "RUSTFS_OBS_LOG_POOL_CAPA"; - -/// Log message capacity for async logging -pub const ENV_OBS_LOG_MESSAGE_CAPA: &str = "RUSTFS_OBS_LOG_MESSAGE_CAPA"; - -/// Log flush interval in milliseconds for async logging -pub const ENV_OBS_LOG_FLUSH_MS: &str = "RUSTFS_OBS_LOG_FLUSH_MS"; - -/// Default values for log pool -pub const DEFAULT_OBS_LOG_POOL_CAPA: usize = 10240; - -/// Default values for message capacity -pub const DEFAULT_OBS_LOG_MESSAGE_CAPA: usize = 32768; - -/// Default values for flush interval in milliseconds -pub const DEFAULT_OBS_LOG_FLUSH_MS: u64 = 200; - -/// Audit logger queue capacity environment variable key -pub const ENV_AUDIT_LOGGER_QUEUE_CAPACITY: &str = "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY"; - -/// Default values for observability configuration -pub const DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY: usize = 10000; - -/// Default values for observability configuration -// ### Supported Environment Values -// - `production` - Secure file-only logging -// - `development` - Full debugging with stdout -// - `test` - Test environment with stdout support -// - `staging` - Staging environment with stdout support -pub const DEFAULT_OBS_ENVIRONMENT_PRODUCTION: &str = "production"; -pub const DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT: &str = "development"; -pub const DEFAULT_OBS_ENVIRONMENT_TEST: &str = "test"; -pub const DEFAULT_OBS_ENVIRONMENT_STAGING: &str = "staging"; - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_env_keys() { - assert_eq!(ENV_OBS_ENDPOINT, "RUSTFS_OBS_ENDPOINT"); - assert_eq!(ENV_OBS_USE_STDOUT, "RUSTFS_OBS_USE_STDOUT"); - assert_eq!(ENV_OBS_SAMPLE_RATIO, "RUSTFS_OBS_SAMPLE_RATIO"); - assert_eq!(ENV_OBS_METER_INTERVAL, "RUSTFS_OBS_METER_INTERVAL"); - assert_eq!(ENV_OBS_SERVICE_NAME, "RUSTFS_OBS_SERVICE_NAME"); - assert_eq!(ENV_OBS_SERVICE_VERSION, "RUSTFS_OBS_SERVICE_VERSION"); - assert_eq!(ENV_OBS_ENVIRONMENT, "RUSTFS_OBS_ENVIRONMENT"); - assert_eq!(ENV_OBS_LOGGER_LEVEL, "RUSTFS_OBS_LOGGER_LEVEL"); - assert_eq!(ENV_OBS_LOCAL_LOGGING_ENABLED, "RUSTFS_OBS_LOCAL_LOGGING_ENABLED"); - assert_eq!(ENV_OBS_LOG_DIRECTORY, "RUSTFS_OBS_LOG_DIRECTORY"); - assert_eq!(ENV_OBS_LOG_FILENAME, "RUSTFS_OBS_LOG_FILENAME"); - assert_eq!(ENV_OBS_LOG_ROTATION_SIZE_MB, "RUSTFS_OBS_LOG_ROTATION_SIZE_MB"); - assert_eq!(ENV_OBS_LOG_ROTATION_TIME, "RUSTFS_OBS_LOG_ROTATION_TIME"); - assert_eq!(ENV_OBS_LOG_KEEP_FILES, "RUSTFS_OBS_LOG_KEEP_FILES"); - assert_eq!(ENV_AUDIT_LOGGER_QUEUE_CAPACITY, "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY"); - } - - #[test] - fn test_default_values() { - assert_eq!(DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY, 10000); - assert_eq!(DEFAULT_OBS_ENVIRONMENT_PRODUCTION, "production"); - assert_eq!(DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT, "development"); - assert_eq!(DEFAULT_OBS_ENVIRONMENT_TEST, "test"); - assert_eq!(DEFAULT_OBS_ENVIRONMENT_STAGING, "staging"); - } -} diff --git a/crates/config/src/observability/file.rs b/crates/config/src/observability/file.rs deleted file mode 100644 index 18f6942b..00000000 --- a/crates/config/src/observability/file.rs +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// RUSTFS_SINKS_FILE_PATH -pub const ENV_SINKS_FILE_PATH: &str = "RUSTFS_SINKS_FILE_PATH"; -// RUSTFS_SINKS_FILE_BUFFER_SIZE -pub const ENV_SINKS_FILE_BUFFER_SIZE: &str = "RUSTFS_SINKS_FILE_BUFFER_SIZE"; -// RUSTFS_SINKS_FILE_FLUSH_INTERVAL_MS -pub const ENV_SINKS_FILE_FLUSH_INTERVAL_MS: &str = "RUSTFS_SINKS_FILE_FLUSH_INTERVAL_MS"; -// RUSTFS_SINKS_FILE_FLUSH_THRESHOLD -pub const ENV_SINKS_FILE_FLUSH_THRESHOLD: &str = "RUSTFS_SINKS_FILE_FLUSH_THRESHOLD"; - -pub const DEFAULT_SINKS_FILE_BUFFER_SIZE: usize = 8192; - -pub const DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS: u64 = 1000; - -pub const DEFAULT_SINKS_FILE_FLUSH_THRESHOLD: usize = 100; diff --git a/crates/config/src/observability/kafka.rs b/crates/config/src/observability/kafka.rs deleted file mode 100644 index f5589d32..00000000 --- a/crates/config/src/observability/kafka.rs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// RUSTFS_SINKS_KAFKA_BROKERS -pub const ENV_SINKS_KAFKA_BROKERS: &str = "RUSTFS_SINKS_KAFKA_BROKERS"; -pub const ENV_SINKS_KAFKA_TOPIC: &str = "RUSTFS_SINKS_KAFKA_TOPIC"; -// batch_size -pub const ENV_SINKS_KAFKA_BATCH_SIZE: &str = "RUSTFS_SINKS_KAFKA_BATCH_SIZE"; -// batch_timeout_ms -pub const ENV_SINKS_KAFKA_BATCH_TIMEOUT_MS: &str = "RUSTFS_SINKS_KAFKA_BATCH_TIMEOUT_MS"; - -// brokers -pub const DEFAULT_SINKS_KAFKA_BROKERS: &str = "localhost:9092"; -pub const DEFAULT_SINKS_KAFKA_TOPIC: &str = "rustfs-sinks"; -pub const DEFAULT_SINKS_KAFKA_BATCH_SIZE: usize = 100; -pub const DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS: u64 = 1000; diff --git a/crates/config/src/observability/mod.rs b/crates/config/src/observability/mod.rs index 2b1392bc..0032c286 100644 --- a/crates/config/src/observability/mod.rs +++ b/crates/config/src/observability/mod.rs @@ -12,12 +12,87 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod config; -mod file; -mod kafka; -mod webhook; +// Observability Keys -pub use config::*; -pub use file::*; -pub use kafka::*; -pub use webhook::*; +pub const ENV_OBS_ENDPOINT: &str = "RUSTFS_OBS_ENDPOINT"; +pub const ENV_OBS_USE_STDOUT: &str = "RUSTFS_OBS_USE_STDOUT"; +pub const ENV_OBS_SAMPLE_RATIO: &str = "RUSTFS_OBS_SAMPLE_RATIO"; +pub const ENV_OBS_METER_INTERVAL: &str = "RUSTFS_OBS_METER_INTERVAL"; +pub const ENV_OBS_SERVICE_NAME: &str = "RUSTFS_OBS_SERVICE_NAME"; +pub const ENV_OBS_SERVICE_VERSION: &str = "RUSTFS_OBS_SERVICE_VERSION"; +pub const ENV_OBS_ENVIRONMENT: &str = "RUSTFS_OBS_ENVIRONMENT"; +pub const ENV_OBS_LOGGER_LEVEL: &str = "RUSTFS_OBS_LOGGER_LEVEL"; +pub const ENV_OBS_LOCAL_LOGGING_ENABLED: &str = "RUSTFS_OBS_LOCAL_LOGGING_ENABLED"; +pub const ENV_OBS_LOG_DIRECTORY: &str = "RUSTFS_OBS_LOG_DIRECTORY"; +pub const ENV_OBS_LOG_FILENAME: &str = "RUSTFS_OBS_LOG_FILENAME"; +pub const ENV_OBS_LOG_ROTATION_SIZE_MB: &str = "RUSTFS_OBS_LOG_ROTATION_SIZE_MB"; +pub const ENV_OBS_LOG_ROTATION_TIME: &str = "RUSTFS_OBS_LOG_ROTATION_TIME"; +pub const ENV_OBS_LOG_KEEP_FILES: &str = "RUSTFS_OBS_LOG_KEEP_FILES"; + +/// Log pool capacity for async logging +pub const ENV_OBS_LOG_POOL_CAPA: &str = "RUSTFS_OBS_LOG_POOL_CAPA"; + +/// Log message capacity for async logging +pub const ENV_OBS_LOG_MESSAGE_CAPA: &str = "RUSTFS_OBS_LOG_MESSAGE_CAPA"; + +/// Log flush interval in milliseconds for async logging +pub const ENV_OBS_LOG_FLUSH_MS: &str = "RUSTFS_OBS_LOG_FLUSH_MS"; + +/// Default values for log pool +pub const DEFAULT_OBS_LOG_POOL_CAPA: usize = 10240; + +/// Default values for message capacity +pub const DEFAULT_OBS_LOG_MESSAGE_CAPA: usize = 32768; + +/// Default values for flush interval in milliseconds +pub const DEFAULT_OBS_LOG_FLUSH_MS: u64 = 200; + +/// Audit logger queue capacity environment variable key +pub const ENV_AUDIT_LOGGER_QUEUE_CAPACITY: &str = "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY"; + +/// Default values for observability configuration +pub const DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY: usize = 10000; + +/// Default values for observability configuration +// ### Supported Environment Values +// - `production` - Secure file-only logging +// - `development` - Full debugging with stdout +// - `test` - Test environment with stdout support +// - `staging` - Staging environment with stdout support +pub const DEFAULT_OBS_ENVIRONMENT_PRODUCTION: &str = "production"; +pub const DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT: &str = "development"; +pub const DEFAULT_OBS_ENVIRONMENT_TEST: &str = "test"; +pub const DEFAULT_OBS_ENVIRONMENT_STAGING: &str = "staging"; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_env_keys() { + assert_eq!(ENV_OBS_ENDPOINT, "RUSTFS_OBS_ENDPOINT"); + assert_eq!(ENV_OBS_USE_STDOUT, "RUSTFS_OBS_USE_STDOUT"); + assert_eq!(ENV_OBS_SAMPLE_RATIO, "RUSTFS_OBS_SAMPLE_RATIO"); + assert_eq!(ENV_OBS_METER_INTERVAL, "RUSTFS_OBS_METER_INTERVAL"); + assert_eq!(ENV_OBS_SERVICE_NAME, "RUSTFS_OBS_SERVICE_NAME"); + assert_eq!(ENV_OBS_SERVICE_VERSION, "RUSTFS_OBS_SERVICE_VERSION"); + assert_eq!(ENV_OBS_ENVIRONMENT, "RUSTFS_OBS_ENVIRONMENT"); + assert_eq!(ENV_OBS_LOGGER_LEVEL, "RUSTFS_OBS_LOGGER_LEVEL"); + assert_eq!(ENV_OBS_LOCAL_LOGGING_ENABLED, "RUSTFS_OBS_LOCAL_LOGGING_ENABLED"); + assert_eq!(ENV_OBS_LOG_DIRECTORY, "RUSTFS_OBS_LOG_DIRECTORY"); + assert_eq!(ENV_OBS_LOG_FILENAME, "RUSTFS_OBS_LOG_FILENAME"); + assert_eq!(ENV_OBS_LOG_ROTATION_SIZE_MB, "RUSTFS_OBS_LOG_ROTATION_SIZE_MB"); + assert_eq!(ENV_OBS_LOG_ROTATION_TIME, "RUSTFS_OBS_LOG_ROTATION_TIME"); + assert_eq!(ENV_OBS_LOG_KEEP_FILES, "RUSTFS_OBS_LOG_KEEP_FILES"); + assert_eq!(ENV_AUDIT_LOGGER_QUEUE_CAPACITY, "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY"); + } + + #[test] + fn test_default_values() { + assert_eq!(DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY, 10000); + assert_eq!(DEFAULT_OBS_ENVIRONMENT_PRODUCTION, "production"); + assert_eq!(DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT, "development"); + assert_eq!(DEFAULT_OBS_ENVIRONMENT_TEST, "test"); + assert_eq!(DEFAULT_OBS_ENVIRONMENT_STAGING, "staging"); + } +} diff --git a/crates/config/src/observability/webhook.rs b/crates/config/src/observability/webhook.rs deleted file mode 100644 index b40da1da..00000000 --- a/crates/config/src/observability/webhook.rs +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// RUSTFS_SINKS_WEBHOOK_ENDPOINT -pub const ENV_SINKS_WEBHOOK_ENDPOINT: &str = "RUSTFS_SINKS_WEBHOOK_ENDPOINT"; -// RUSTFS_SINKS_WEBHOOK_AUTH_TOKEN -pub const ENV_SINKS_WEBHOOK_AUTH_TOKEN: &str = "RUSTFS_SINKS_WEBHOOK_AUTH_TOKEN"; -// max_retries -pub const ENV_SINKS_WEBHOOK_MAX_RETRIES: &str = "RUSTFS_SINKS_WEBHOOK_MAX_RETRIES"; -// retry_delay_ms -pub const ENV_SINKS_WEBHOOK_RETRY_DELAY_MS: &str = "RUSTFS_SINKS_WEBHOOK_RETRY_DELAY_MS"; - -// Default values for webhook sink configuration -pub const DEFAULT_SINKS_WEBHOOK_ENDPOINT: &str = "http://localhost:8080"; -pub const DEFAULT_SINKS_WEBHOOK_AUTH_TOKEN: &str = ""; -pub const DEFAULT_SINKS_WEBHOOK_MAX_RETRIES: usize = 3; -pub const DEFAULT_SINKS_WEBHOOK_RETRY_DELAY_MS: u64 = 100; diff --git a/crates/e2e_test/Cargo.toml b/crates/e2e_test/Cargo.toml index 9f5f9538..07e2b239 100644 --- a/crates/e2e_test/Cargo.toml +++ b/crates/e2e_test/Cargo.toml @@ -49,4 +49,4 @@ uuid = { workspace = true } base64 = { workspace = true } rand = { workspace = true } chrono = { workspace = true } -md5 = { workspace = true } +md5 = { workspace = true } \ No newline at end of file diff --git a/crates/kms/src/backends/local.rs b/crates/kms/src/backends/local.rs index 3ea03c33..f73abf8a 100644 --- a/crates/kms/src/backends/local.rs +++ b/crates/kms/src/backends/local.rs @@ -635,7 +635,7 @@ impl KmsBackend for LocalKmsBackend { } async fn encrypt(&self, request: EncryptRequest) -> Result { - let encrypt_request = crate::types::EncryptRequest { + let encrypt_request = EncryptRequest { key_id: request.key_id.clone(), plaintext: request.plaintext, encryption_context: request.encryption_context, @@ -719,14 +719,14 @@ impl KmsBackend for LocalKmsBackend { .client .load_master_key(key_id) .await - .map_err(|_| crate::error::KmsError::key_not_found(format!("Key {key_id} not found")))?; + .map_err(|_| KmsError::key_not_found(format!("Key {key_id} not found")))?; let (deletion_date_str, deletion_date_dt) = if request.force_immediate.unwrap_or(false) { // For immediate deletion, actually delete the key from filesystem let key_path = self.client.master_key_path(key_id); tokio::fs::remove_file(&key_path) .await - .map_err(|e| crate::error::KmsError::internal_error(format!("Failed to delete key file: {e}")))?; + .map_err(|e| KmsError::internal_error(format!("Failed to delete key file: {e}")))?; // Remove from cache let mut cache = self.client.key_cache.write().await; @@ -756,9 +756,7 @@ impl KmsBackend for LocalKmsBackend { // Schedule for deletion (default 30 days) let days = request.pending_window_in_days.unwrap_or(30); if !(7..=30).contains(&days) { - return Err(crate::error::KmsError::invalid_parameter( - "pending_window_in_days must be between 7 and 30".to_string(), - )); + return Err(KmsError::invalid_parameter("pending_window_in_days must be between 7 and 30".to_string())); } let deletion_date = chrono::Utc::now() + chrono::Duration::days(days as i64); @@ -772,16 +770,16 @@ impl KmsBackend for LocalKmsBackend { let key_path = self.client.master_key_path(key_id); let content = tokio::fs::read(&key_path) .await - .map_err(|e| crate::error::KmsError::internal_error(format!("Failed to read key file: {e}")))?; - let stored_key: crate::backends::local::StoredMasterKey = serde_json::from_slice(&content) - .map_err(|e| crate::error::KmsError::internal_error(format!("Failed to parse stored key: {e}")))?; + .map_err(|e| KmsError::internal_error(format!("Failed to read key file: {e}")))?; + let stored_key: StoredMasterKey = + serde_json::from_slice(&content).map_err(|e| KmsError::internal_error(format!("Failed to parse stored key: {e}")))?; // Decrypt the existing key material to preserve it let existing_key_material = if let Some(ref cipher) = self.client.master_cipher { - let nonce = aes_gcm::Nonce::from_slice(&stored_key.nonce); + let nonce = Nonce::from_slice(&stored_key.nonce); cipher .decrypt(nonce, stored_key.encrypted_key_material.as_ref()) - .map_err(|e| crate::error::KmsError::cryptographic_error("decrypt", e.to_string()))? + .map_err(|e| KmsError::cryptographic_error("decrypt", e.to_string()))? } else { stored_key.encrypted_key_material }; @@ -820,10 +818,10 @@ impl KmsBackend for LocalKmsBackend { .client .load_master_key(key_id) .await - .map_err(|_| crate::error::KmsError::key_not_found(format!("Key {key_id} not found")))?; + .map_err(|_| KmsError::key_not_found(format!("Key {key_id} not found")))?; if master_key.status != KeyStatus::PendingDeletion { - return Err(crate::error::KmsError::invalid_key_state(format!("Key {key_id} is not pending deletion"))); + return Err(KmsError::invalid_key_state(format!("Key {key_id} is not pending deletion"))); } // Cancel the deletion by resetting the state diff --git a/crates/notify/examples/full_demo.rs b/crates/notify/examples/full_demo.rs index c63208bf..0e7b4684 100644 --- a/crates/notify/examples/full_demo.rs +++ b/crates/notify/examples/full_demo.rs @@ -68,7 +68,7 @@ async fn main() -> Result<(), NotificationError> { key: WEBHOOK_QUEUE_DIR.to_string(), value: current_root .clone() - .join("../../deploy/logs/notify/webhook") + .join("../../deploy/logs/notify") .to_str() .unwrap() .to_string(), @@ -120,11 +120,7 @@ async fn main() -> Result<(), NotificationError> { }, KV { key: MQTT_QUEUE_DIR.to_string(), - value: current_root - .join("../../deploy/logs/notify/mqtt") - .to_str() - .unwrap() - .to_string(), + value: current_root.join("../../deploy/logs/notify").to_str().unwrap().to_string(), hidden_if_empty: false, }, KV { @@ -137,7 +133,7 @@ async fn main() -> Result<(), NotificationError> { let mqtt_kvs = KVS(mqtt_kvs_vec); let mut mqtt_targets = std::collections::HashMap::new(); mqtt_targets.insert(DEFAULT_TARGET.to_string(), mqtt_kvs); - config.0.insert(NOTIFY_MQTT_SUB_SYS.to_string(), mqtt_targets); + // config.0.insert(NOTIFY_MQTT_SUB_SYS.to_string(), mqtt_targets); // Load the configuration and initialize the system *system.config.write().await = config; diff --git a/crates/notify/examples/full_demo_one.rs b/crates/notify/examples/full_demo_one.rs index 962274db..476b4516 100644 --- a/crates/notify/examples/full_demo_one.rs +++ b/crates/notify/examples/full_demo_one.rs @@ -28,6 +28,7 @@ use rustfs_targets::EventName; use rustfs_targets::arn::TargetID; use std::sync::Arc; use std::time::Duration; +use tokio::time::sleep; use tracing::info; #[tokio::main] @@ -68,7 +69,7 @@ async fn main() -> Result<(), NotificationError> { key: WEBHOOK_QUEUE_DIR.to_string(), value: current_root .clone() - .join("../../deploy/logs/notify/webhook") + .join("../../deploy/logs/notify") .to_str() .unwrap() .to_string(), @@ -91,7 +92,7 @@ async fn main() -> Result<(), NotificationError> { system.init().await?; info!("✅ System initialized with Webhook target."); - tokio::time::sleep(Duration::from_secs(1)).await; + sleep(Duration::from_secs(1)).await; // --- Dynamically update system configuration: Add an MQTT Target --- info!("\n---> Dynamically adding MQTT target..."); @@ -129,11 +130,7 @@ async fn main() -> Result<(), NotificationError> { }, KV { key: MQTT_QUEUE_DIR.to_string(), - value: current_root - .join("../../deploy/logs/notify/mqtt") - .to_str() - .unwrap() - .to_string(), + value: current_root.join("../../deploy/logs/notify").to_str().unwrap().to_string(), hidden_if_empty: false, }, KV { @@ -152,7 +149,7 @@ async fn main() -> Result<(), NotificationError> { .await?; info!("✅ MQTT target added and system reloaded."); - tokio::time::sleep(Duration::from_secs(1)).await; + sleep(Duration::from_secs(1)).await; // --- Loading and managing Bucket configurations --- info!("\n---> Loading bucket notification config..."); @@ -176,7 +173,7 @@ async fn main() -> Result<(), NotificationError> { system.send_event(event).await; info!("✅ Event sent. Both Webhook and MQTT targets should receive it."); - tokio::time::sleep(Duration::from_secs(2)).await; + sleep(Duration::from_secs(2)).await; // --- Dynamically remove configuration --- info!("\n---> Dynamically removing Webhook target..."); @@ -188,5 +185,6 @@ async fn main() -> Result<(), NotificationError> { info!("✅ Bucket 'my-bucket' config removed."); info!("\nDemo completed successfully"); + sleep(Duration::from_secs(1)).await; Ok(()) } diff --git a/crates/notify/examples/webhook.rs b/crates/notify/examples/webhook.rs index 514f7576..b0f47dc9 100644 --- a/crates/notify/examples/webhook.rs +++ b/crates/notify/examples/webhook.rs @@ -12,19 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -use axum::routing::get; use axum::{ Router, extract::Json, + extract::Query, http::{HeaderMap, Response, StatusCode}, - routing::post, + routing::{get, post}, }; +use rustfs_utils::parse_and_resolve_address; +use serde::Deserialize; use serde_json::Value; use std::net::SocketAddr; +use std::sync::atomic::{AtomicU64, Ordering}; use std::time::{SystemTime, UNIX_EPOCH}; - -use axum::extract::Query; -use serde::Deserialize; +use tokio::net::TcpListener; #[derive(Deserialize)] struct ResetParams { @@ -32,9 +33,6 @@ struct ResetParams { } // Define a global variable and count the number of data received -use rustfs_utils::parse_and_resolve_address; -use std::sync::atomic::{AtomicU64, Ordering}; -use tokio::net::TcpListener; static WEBHOOK_COUNT: AtomicU64 = AtomicU64::new(0); diff --git a/crates/notify/src/integration.rs b/crates/notify/src/integration.rs index 97d2c41e..d4b82336 100644 --- a/crates/notify/src/integration.rs +++ b/crates/notify/src/integration.rs @@ -296,8 +296,8 @@ impl NotificationSystem { info!("Removing config for target {} of type {}", target_name, target_type); self.update_config_and_reload(|config| { let mut changed = false; - if let Some(targets) = config.0.get_mut(target_type) { - if targets.remove(target_name).is_some() { + if let Some(targets) = config.0.get_mut(&target_type.to_lowercase()) { + if targets.remove(&target_name.to_lowercase()).is_some() { changed = true; } if targets.is_empty() { @@ -307,6 +307,7 @@ impl NotificationSystem { if !changed { info!("Target {} of type {} not found, no changes made.", target_name, target_type); } + debug!("Config after remove: {:?}", config); changed }) .await diff --git a/crates/notify/src/registry.rs b/crates/notify/src/registry.rs index 9bb9cf03..76f15936 100644 --- a/crates/notify/src/registry.rs +++ b/crates/notify/src/registry.rs @@ -16,12 +16,9 @@ use crate::Event; use crate::factory::{MQTTTargetFactory, TargetFactory, WebhookTargetFactory}; use futures::stream::{FuturesUnordered, StreamExt}; use hashbrown::{HashMap, HashSet}; -use rustfs_config::notify::NOTIFY_ROUTE_PREFIX; -use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX}; +use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, notify::NOTIFY_ROUTE_PREFIX}; use rustfs_ecstore::config::{Config, KVS}; -use rustfs_targets::Target; -use rustfs_targets::TargetError; -use rustfs_targets::target::ChannelTargetType; +use rustfs_targets::{Target, TargetError, target::ChannelTargetType}; use tracing::{debug, error, info, warn}; /// Registry for managing target factories @@ -90,7 +87,9 @@ impl TargetRegistry { let all_env: Vec<(String, String)> = std::env::vars().filter(|(key, _)| key.starts_with(ENV_PREFIX)).collect(); // A collection of asynchronous tasks for concurrently executing target creation let mut tasks = FuturesUnordered::new(); - let mut final_config = config.clone(); // Clone a configuration for aggregating the final result + // let final_config = config.clone(); // Clone a configuration for aggregating the final result + // Record the defaults for each segment so that the segment can eventually be rebuilt + let mut section_defaults: HashMap = HashMap::new(); // 1. Traverse all registered plants and process them by target type for (target_type, factory) in &self.factories { tracing::Span::current().record("target_type", target_type.as_str()); @@ -98,12 +97,15 @@ impl TargetRegistry { // 2. Prepare the configuration source // 2.1. Get the configuration segment in the file, e.g. 'notify_webhook' - let section_name = format!("{NOTIFY_ROUTE_PREFIX}{target_type}"); + let section_name = format!("{NOTIFY_ROUTE_PREFIX}{target_type}").to_lowercase(); let file_configs = config.0.get(§ion_name).cloned().unwrap_or_default(); // 2.2. Get the default configuration for that type let default_cfg = file_configs.get(DEFAULT_DELIMITER).cloned().unwrap_or_default(); debug!(?default_cfg, "Get the default configuration"); + // Save defaults for eventual write back + section_defaults.insert(section_name.clone(), default_cfg.clone()); + // *** Optimization point 1: Get all legitimate fields of the current target type *** let valid_fields = factory.get_valid_fields(); debug!(?valid_fields, "Get the legitimate configuration fields"); @@ -111,7 +113,9 @@ impl TargetRegistry { // 3. Resolve instance IDs and configuration overrides from environment variables let mut instance_ids_from_env = HashSet::new(); // 3.1. Instance discovery: Based on the '..._ENABLE_INSTANCEID' format - let enable_prefix = format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}_{ENABLE_KEY}_").to_uppercase(); + let enable_prefix = + format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}{ENABLE_KEY}{DEFAULT_DELIMITER}") + .to_uppercase(); for (key, value) in &all_env { if value.eq_ignore_ascii_case(rustfs_config::EnableState::One.as_str()) || value.eq_ignore_ascii_case(rustfs_config::EnableState::On.as_str()) @@ -128,14 +132,14 @@ impl TargetRegistry { // 3.2. Parse all relevant environment variable configurations // 3.2.1. Build environment variable prefixes such as 'RUSTFS_NOTIFY_WEBHOOK_' - let env_prefix = format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}_").to_uppercase(); + let env_prefix = format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}").to_uppercase(); // 3.2.2. 'env_overrides' is used to store configurations parsed from environment variables in the format: {instance id -> {field -> value}} let mut env_overrides: HashMap> = HashMap::new(); for (key, value) in &all_env { if let Some(rest) = key.strip_prefix(&env_prefix) { // Use rsplitn to split from the right side to properly extract the INSTANCE_ID at the end // Format: _ or - let mut parts = rest.rsplitn(2, '_'); + let mut parts = rest.rsplitn(2, DEFAULT_DELIMITER); // The first part from the right is INSTANCE_ID let instance_id_part = parts.next().unwrap_or(DEFAULT_DELIMITER); @@ -224,7 +228,7 @@ impl TargetRegistry { } else { info!(instance_id = %id, "Skip the disabled target and will be removed from the final configuration"); // Remove disabled target from final configuration - final_config.0.entry(section_name.clone()).or_default().remove(&id); + // final_config.0.entry(section_name.clone()).or_default().remove(&id); } } } @@ -246,15 +250,50 @@ impl TargetRegistry { } // 7. Aggregate new configuration and write back to system configuration - if !successful_configs.is_empty() { + if !successful_configs.is_empty() || !section_defaults.is_empty() { info!( "Prepare to update {} successfully created target configurations to the system configuration...", successful_configs.len() ); - let mut new_config = config.clone(); + + let mut successes_by_section: HashMap> = HashMap::new(); + for (target_type, id, kvs) in successful_configs { let section_name = format!("{NOTIFY_ROUTE_PREFIX}{target_type}").to_lowercase(); - new_config.0.entry(section_name).or_default().insert(id, (*kvs).clone()); + successes_by_section + .entry(section_name) + .or_default() + .insert(id.to_lowercase(), (*kvs).clone()); + } + + let mut new_config = config.clone(); + // Collection of segments that need to be processed: Collect all segments where default items exist or where successful instances exist + let mut sections: HashSet = HashSet::new(); + sections.extend(section_defaults.keys().cloned()); + sections.extend(successes_by_section.keys().cloned()); + + for section in sections { + let mut section_map: std::collections::HashMap = std::collections::HashMap::new(); + // Add default item + if let Some(default_kvs) = section_defaults.get(§ion) { + if !default_kvs.is_empty() { + section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone()); + } + } + + // Add successful instance item + if let Some(instances) = successes_by_section.get(§ion) { + for (id, kvs) in instances { + section_map.insert(id.clone(), kvs.clone()); + } + } + + // Empty breaks are removed and non-empty breaks are replaced entirely. + if section_map.is_empty() { + new_config.0.remove(§ion); + } else { + new_config.0.insert(section, section_map); + } } let Some(store) = rustfs_ecstore::global::new_object_layer_fn() else { diff --git a/crates/obs/Cargo.toml b/crates/obs/Cargo.toml index 9838561a..5527bd93 100644 --- a/crates/obs/Cargo.toml +++ b/crates/obs/Cargo.toml @@ -29,17 +29,12 @@ documentation = "https://docs.rs/rustfs-obs/latest/rustfs_obs/" workspace = true [features] -default = ["file"] -file = [] +default = [] gpu = ["dep:nvml-wrapper"] -webhook = ["dep:reqwest"] -kafka = ["dep:rdkafka"] [dependencies] rustfs-config = { workspace = true, features = ["constants", "observability"] } rustfs-utils = { workspace = true, features = ["ip", "path"] } -async-trait = { workspace = true } -chrono = { workspace = true } flexi_logger = { workspace = true } nu-ansi-term = { workspace = true } nvml-wrapper = { workspace = true, optional = true } @@ -57,24 +52,9 @@ tracing-error = { workspace = true } tracing-opentelemetry = { workspace = true } tracing-subscriber = { workspace = true, features = ["registry", "std", "fmt", "env-filter", "tracing-log", "time", "local-time", "json"] } tokio = { workspace = true, features = ["sync", "fs", "rt-multi-thread", "rt", "time", "macros"] } -reqwest = { workspace = true, optional = true } -serde_json = { workspace = true } sysinfo = { workspace = true } thiserror = { workspace = true } -# Only enable kafka features and related dependencies on Linux -[target.'cfg(target_os = "linux")'.dependencies] -rdkafka = { workspace = true, features = ["tokio"], optional = true } - - [dev-dependencies] -chrono = { workspace = true } -opentelemetry = { workspace = true } -opentelemetry_sdk = { workspace = true, features = ["rt-tokio"] } -opentelemetry-stdout = { workspace = true } -opentelemetry-otlp = { workspace = true, features = ["grpc-tonic"] } -opentelemetry-semantic-conventions = { workspace = true, features = ["semconv_experimental"] } tokio = { workspace = true, features = ["full"] } -tracing = { workspace = true, features = ["std", "attributes"] } -tracing-subscriber = { workspace = true, features = ["registry", "std", "fmt"] } \ No newline at end of file diff --git a/crates/obs/examples/config.toml b/crates/obs/examples/config.toml index 74e79cab..07cc0dc5 100644 --- a/crates/obs/examples/config.toml +++ b/crates/obs/examples/config.toml @@ -21,29 +21,4 @@ service_name = "rustfs" service_version = "0.1.0" environments = "develop" logger_level = "debug" -local_logging_enabled = true # Default is false if not specified - - -#[[sinks]] -#type = "Kafka" -#bootstrap_servers = "localhost:9092" -#topic = "logs" -#batch_size = 100 # Default is 100 if not specified -#batch_timeout_ms = 100 # Default is 1000ms if not specified -# -#[[sinks]] -#type = "Webhook" -#endpoint = "http://localhost:8080/webhook" -#auth_token = "" -#batch_size = 100 # Default is 3 if not specified -#batch_timeout_ms = 100 # Default is 100ms if not specified - -[[sinks]] -type = "File" -path = "deploy/logs/rustfs.log" -buffer_size = 102 # Default is 8192 bytes if not specified -flush_interval_ms = 1000 -flush_threshold = 100 - -[logger] -queue_capacity = 10000 \ No newline at end of file +local_logging_enabled = true # Default is false if not specified \ No newline at end of file diff --git a/crates/obs/examples/server.rs b/crates/obs/examples/server.rs index fc413957..5741d404 100644 --- a/crates/obs/examples/server.rs +++ b/crates/obs/examples/server.rs @@ -13,33 +13,25 @@ // limitations under the License. use opentelemetry::global; -use rustfs_obs::{BaseLogEntry, ServerLogEntry, SystemObserver, get_logger, init_obs, log_info}; -use std::collections::HashMap; +use rustfs_obs::{SystemObserver, init_obs}; use std::time::{Duration, SystemTime}; -use tracing::{error, info, instrument}; -use tracing_core::Level; +use tracing::{Level, error, info, instrument}; #[tokio::main] async fn main() { - let obs_conf = Some("crates/obs/examples/config.toml".to_string()); - let (_logger, _guard) = init_obs(obs_conf).await; + let obs_conf = Some("http://localhost:4317".to_string()); + let _guard = init_obs(obs_conf).await; let span = tracing::span!(Level::INFO, "main"); let _enter = span.enter(); info!("Program starts"); // Simulate the operation tokio::time::sleep(Duration::from_millis(100)).await; - run( - "service-demo".to_string(), - "object-demo".to_string(), - "user-demo".to_string(), - "service-demo".to_string(), - ) - .await; + run("service-demo".to_string()).await; info!("Program ends"); } #[instrument(fields(bucket, object, user))] -async fn run(bucket: String, object: String, user: String, service_name: String) { +async fn run(service_name: String) { let start_time = SystemTime::now(); info!("Log module initialization is completed service_name: {:?}", service_name); @@ -56,21 +48,6 @@ async fn run(bucket: String, object: String, user: String, service_name: String) Err(e) => error!("Failed to initialize process observer: {:?}", e), } - let base_entry = BaseLogEntry::new() - .message(Some("run logger api_handler info".to_string())) - .request_id(Some("request_id".to_string())) - .timestamp(chrono::DateTime::from(start_time)) - .tags(Some(HashMap::default())); - - let server_entry = ServerLogEntry::new(Level::INFO, "api_handler".to_string()) - .with_base(base_entry) - .user_id(Some(user.clone())) - .add_field("operation".to_string(), "login".to_string()) - .add_field("bucket".to_string(), bucket.clone()) - .add_field("object".to_string(), object.clone()); - - let result = get_logger().lock().await.log_server_entry(server_entry).await; - info!("Logging is completed {:?}", result); put_object("bucket".to_string(), "object".to_string(), "user".to_string()).await; info!("Logging is completed"); tokio::time::sleep(Duration::from_secs(2)).await; @@ -97,8 +74,6 @@ async fn put_object(bucket: String, object: String, user: String) { start_time.elapsed().unwrap().as_secs_f64() ); - let result = log_info("put_object logger info", "put_object").await; - info!("put_object is completed {:?}", result); // Simulate the operation tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/crates/obs/src/config.rs b/crates/obs/src/config.rs index 999a9da1..d15fc677 100644 --- a/crates/obs/src/config.rs +++ b/crates/obs/src/config.rs @@ -13,16 +13,9 @@ // limitations under the License. use rustfs_config::observability::{ - DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY, DEFAULT_SINKS_FILE_BUFFER_SIZE, DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS, - DEFAULT_SINKS_FILE_FLUSH_THRESHOLD, DEFAULT_SINKS_KAFKA_BATCH_SIZE, DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS, - DEFAULT_SINKS_KAFKA_BROKERS, DEFAULT_SINKS_KAFKA_TOPIC, DEFAULT_SINKS_WEBHOOK_AUTH_TOKEN, DEFAULT_SINKS_WEBHOOK_ENDPOINT, - DEFAULT_SINKS_WEBHOOK_MAX_RETRIES, DEFAULT_SINKS_WEBHOOK_RETRY_DELAY_MS, ENV_AUDIT_LOGGER_QUEUE_CAPACITY, ENV_OBS_ENDPOINT, - ENV_OBS_ENVIRONMENT, ENV_OBS_LOCAL_LOGGING_ENABLED, ENV_OBS_LOG_DIRECTORY, ENV_OBS_LOG_FILENAME, ENV_OBS_LOG_KEEP_FILES, - ENV_OBS_LOG_ROTATION_SIZE_MB, ENV_OBS_LOG_ROTATION_TIME, ENV_OBS_LOGGER_LEVEL, ENV_OBS_METER_INTERVAL, ENV_OBS_SAMPLE_RATIO, - ENV_OBS_SERVICE_NAME, ENV_OBS_SERVICE_VERSION, ENV_OBS_USE_STDOUT, ENV_SINKS_FILE_BUFFER_SIZE, - ENV_SINKS_FILE_FLUSH_INTERVAL_MS, ENV_SINKS_FILE_FLUSH_THRESHOLD, ENV_SINKS_FILE_PATH, ENV_SINKS_KAFKA_BATCH_SIZE, - ENV_SINKS_KAFKA_BATCH_TIMEOUT_MS, ENV_SINKS_KAFKA_BROKERS, ENV_SINKS_KAFKA_TOPIC, ENV_SINKS_WEBHOOK_AUTH_TOKEN, - ENV_SINKS_WEBHOOK_ENDPOINT, ENV_SINKS_WEBHOOK_MAX_RETRIES, ENV_SINKS_WEBHOOK_RETRY_DELAY_MS, + ENV_OBS_ENDPOINT, ENV_OBS_ENVIRONMENT, ENV_OBS_LOCAL_LOGGING_ENABLED, ENV_OBS_LOG_DIRECTORY, ENV_OBS_LOG_FILENAME, + ENV_OBS_LOG_KEEP_FILES, ENV_OBS_LOG_ROTATION_SIZE_MB, ENV_OBS_LOG_ROTATION_TIME, ENV_OBS_LOGGER_LEVEL, + ENV_OBS_METER_INTERVAL, ENV_OBS_SAMPLE_RATIO, ENV_OBS_SERVICE_NAME, ENV_OBS_SERVICE_VERSION, ENV_OBS_USE_STDOUT, }; use rustfs_config::{ APP_NAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, DEFAULT_LOG_ROTATION_SIZE_MB, DEFAULT_LOG_ROTATION_TIME, @@ -145,167 +138,10 @@ impl Default for OtelConfig { } } -/// Kafka Sink Configuration - Add batch parameters -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct KafkaSinkConfig { - pub brokers: String, - pub topic: String, - pub batch_size: Option, // Batch size, default 100 - pub batch_timeout_ms: Option, // Batch timeout time, default 1000ms -} - -impl KafkaSinkConfig { - pub fn new() -> Self { - Self::default() - } -} - -impl Default for KafkaSinkConfig { - fn default() -> Self { - Self { - brokers: env::var(ENV_SINKS_KAFKA_BROKERS) - .ok() - .filter(|s| !s.trim().is_empty()) - .unwrap_or_else(|| DEFAULT_SINKS_KAFKA_BROKERS.to_string()), - topic: env::var(ENV_SINKS_KAFKA_TOPIC) - .ok() - .filter(|s| !s.trim().is_empty()) - .unwrap_or_else(|| DEFAULT_SINKS_KAFKA_TOPIC.to_string()), - batch_size: env::var(ENV_SINKS_KAFKA_BATCH_SIZE) - .ok() - .and_then(|v| v.parse().ok()) - .or(Some(DEFAULT_SINKS_KAFKA_BATCH_SIZE)), - batch_timeout_ms: env::var(ENV_SINKS_KAFKA_BATCH_TIMEOUT_MS) - .ok() - .and_then(|v| v.parse().ok()) - .or(Some(DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS)), - } - } -} - -/// Webhook Sink Configuration - Add Retry Parameters -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct WebhookSinkConfig { - pub endpoint: String, - pub auth_token: String, - pub max_retries: Option, // Maximum number of retry times, default 3 - pub retry_delay_ms: Option, // Retry the delay cardinality, default 100ms -} - -impl WebhookSinkConfig { - pub fn new() -> Self { - Self::default() - } -} - -impl Default for WebhookSinkConfig { - fn default() -> Self { - Self { - endpoint: env::var(ENV_SINKS_WEBHOOK_ENDPOINT) - .ok() - .filter(|s| !s.trim().is_empty()) - .unwrap_or_else(|| DEFAULT_SINKS_WEBHOOK_ENDPOINT.to_string()), - auth_token: env::var(ENV_SINKS_WEBHOOK_AUTH_TOKEN) - .ok() - .filter(|s| !s.trim().is_empty()) - .unwrap_or_else(|| DEFAULT_SINKS_WEBHOOK_AUTH_TOKEN.to_string()), - max_retries: env::var(ENV_SINKS_WEBHOOK_MAX_RETRIES) - .ok() - .and_then(|v| v.parse().ok()) - .or(Some(DEFAULT_SINKS_WEBHOOK_MAX_RETRIES)), - retry_delay_ms: env::var(ENV_SINKS_WEBHOOK_RETRY_DELAY_MS) - .ok() - .and_then(|v| v.parse().ok()) - .or(Some(DEFAULT_SINKS_WEBHOOK_RETRY_DELAY_MS)), - } - } -} - -/// File Sink Configuration - Add buffering parameters -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct FileSinkConfig { - pub path: String, - pub buffer_size: Option, // Write buffer size, default 8192 - pub flush_interval_ms: Option, // Refresh interval time, default 1000ms - pub flush_threshold: Option, // Refresh threshold, default 100 logs -} - -impl FileSinkConfig { - pub fn new() -> Self { - Self::default() - } -} - -impl Default for FileSinkConfig { - fn default() -> Self { - Self { - path: get_log_directory_to_string(ENV_SINKS_FILE_PATH), - buffer_size: env::var(ENV_SINKS_FILE_BUFFER_SIZE) - .ok() - .and_then(|v| v.parse().ok()) - .or(Some(DEFAULT_SINKS_FILE_BUFFER_SIZE)), - flush_interval_ms: env::var(ENV_SINKS_FILE_FLUSH_INTERVAL_MS) - .ok() - .and_then(|v| v.parse().ok()) - .or(Some(DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS)), - flush_threshold: env::var(ENV_SINKS_FILE_FLUSH_THRESHOLD) - .ok() - .and_then(|v| v.parse().ok()) - .or(Some(DEFAULT_SINKS_FILE_FLUSH_THRESHOLD)), - } - } -} - -/// Sink configuration collection -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum SinkConfig { - File(FileSinkConfig), - Kafka(KafkaSinkConfig), - Webhook(WebhookSinkConfig), -} - -impl SinkConfig { - pub fn new() -> Self { - Self::File(FileSinkConfig::new()) - } -} - -impl Default for SinkConfig { - fn default() -> Self { - Self::new() - } -} - -///Logger Configuration -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct LoggerConfig { - pub queue_capacity: Option, -} - -impl LoggerConfig { - pub fn new() -> Self { - Self { - queue_capacity: env::var(ENV_AUDIT_LOGGER_QUEUE_CAPACITY) - .ok() - .and_then(|v| v.parse().ok()) - .or(Some(DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY)), - } - } -} - -impl Default for LoggerConfig { - fn default() -> Self { - Self::new() - } -} - /// Overall application configuration -/// Add observability, sinks, and logger configuration +/// Add observability configuration /// /// Observability: OpenTelemetry configuration -/// Sinks: Kafka, Webhook, File sink configuration -/// Logger: Logger configuration /// /// # Example /// ``` @@ -316,8 +152,6 @@ impl Default for LoggerConfig { #[derive(Debug, Deserialize, Clone)] pub struct AppConfig { pub observability: OtelConfig, - pub sinks: Vec, - pub logger: Option, } impl AppConfig { @@ -328,16 +162,12 @@ impl AppConfig { pub fn new() -> Self { Self { observability: OtelConfig::default(), - sinks: vec![SinkConfig::default()], - logger: Some(LoggerConfig::default()), } } pub fn new_with_endpoint(endpoint: Option) -> Self { Self { observability: OtelConfig::extract_otel_config_from_env(endpoint), - sinks: vec![SinkConfig::new()], - logger: Some(LoggerConfig::new()), } } } diff --git a/crates/obs/src/entry/args.rs b/crates/obs/src/entry/args.rs deleted file mode 100644 index d0e4df38..00000000 --- a/crates/obs/src/entry/args.rs +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::entry::ObjectVersion; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -/// Args - defines the arguments for API operations -/// Args is used to define the arguments for API operations. -/// -/// # Example -/// ``` -/// use rustfs_obs::Args; -/// use std::collections::HashMap; -/// -/// let args = Args::new() -/// .set_bucket(Some("my-bucket".to_string())) -/// .set_object(Some("my-object".to_string())) -/// .set_version_id(Some("123".to_string())) -/// .set_metadata(Some(HashMap::new())); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize, Default, Eq, PartialEq)] -pub struct Args { - #[serde(rename = "bucket", skip_serializing_if = "Option::is_none")] - pub bucket: Option, - #[serde(rename = "object", skip_serializing_if = "Option::is_none")] - pub object: Option, - #[serde(rename = "versionId", skip_serializing_if = "Option::is_none")] - pub version_id: Option, - #[serde(rename = "objects", skip_serializing_if = "Option::is_none")] - pub objects: Option>, - #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] - pub metadata: Option>, -} - -impl Args { - /// Create a new Args object - pub fn new() -> Self { - Args { - bucket: None, - object: None, - version_id: None, - objects: None, - metadata: None, - } - } - - /// Set the bucket - pub fn set_bucket(mut self, bucket: Option) -> Self { - self.bucket = bucket; - self - } - - /// Set the object - pub fn set_object(mut self, object: Option) -> Self { - self.object = object; - self - } - - /// Set the version ID - pub fn set_version_id(mut self, version_id: Option) -> Self { - self.version_id = version_id; - self - } - - /// Set the objects - pub fn set_objects(mut self, objects: Option>) -> Self { - self.objects = objects; - self - } - - /// Set the metadata - pub fn set_metadata(mut self, metadata: Option>) -> Self { - self.metadata = metadata; - self - } -} diff --git a/crates/obs/src/entry/audit.rs b/crates/obs/src/entry/audit.rs deleted file mode 100644 index 47017a1f..00000000 --- a/crates/obs/src/entry/audit.rs +++ /dev/null @@ -1,467 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{BaseLogEntry, LogRecord, ObjectVersion}; -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; -use serde_json::Value; -use std::collections::HashMap; - -/// API details structure -/// ApiDetails is used to define the details of an API operation -/// -/// The `ApiDetails` structure contains the following fields: -/// - `name` - the name of the API operation -/// - `bucket` - the bucket name -/// - `object` - the object name -/// - `objects` - the list of objects -/// - `status` - the status of the API operation -/// - `status_code` - the status code of the API operation -/// - `input_bytes` - the input bytes -/// - `output_bytes` - the output bytes -/// - `header_bytes` - the header bytes -/// - `time_to_first_byte` - the time to first byte -/// - `time_to_first_byte_in_ns` - the time to first byte in nanoseconds -/// - `time_to_response` - the time to response -/// - `time_to_response_in_ns` - the time to response in nanoseconds -/// -/// The `ApiDetails` structure contains the following methods: -/// - `new` - create a new `ApiDetails` with default values -/// - `set_name` - set the name -/// - `set_bucket` - set the bucket -/// - `set_object` - set the object -/// - `set_objects` - set the objects -/// - `set_status` - set the status -/// - `set_status_code` - set the status code -/// - `set_input_bytes` - set the input bytes -/// - `set_output_bytes` - set the output bytes -/// - `set_header_bytes` - set the header bytes -/// - `set_time_to_first_byte` - set the time to first byte -/// - `set_time_to_first_byte_in_ns` - set the time to first byte in nanoseconds -/// - `set_time_to_response` - set the time to response -/// - `set_time_to_response_in_ns` - set the time to response in nanoseconds -/// -/// # Example -/// ``` -/// use rustfs_obs::ApiDetails; -/// use rustfs_obs::ObjectVersion; -/// -/// let api = ApiDetails::new() -/// .set_name(Some("GET".to_string())) -/// .set_bucket(Some("my-bucket".to_string())) -/// .set_object(Some("my-object".to_string())) -/// .set_objects(vec![ObjectVersion::new_with_object_name("my-object".to_string())]) -/// .set_status(Some("OK".to_string())) -/// .set_status_code(Some(200)) -/// .set_input_bytes(100) -/// .set_output_bytes(200) -/// .set_header_bytes(Some(50)) -/// .set_time_to_first_byte(Some("100ms".to_string())) -/// .set_time_to_first_byte_in_ns(Some("100000000ns".to_string())) -/// .set_time_to_response(Some("200ms".to_string())) -/// .set_time_to_response_in_ns(Some("200000000ns".to_string())); -/// ``` -#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)] -pub struct ApiDetails { - #[serde(rename = "name", skip_serializing_if = "Option::is_none")] - pub name: Option, - #[serde(rename = "bucket", skip_serializing_if = "Option::is_none")] - pub bucket: Option, - #[serde(rename = "object", skip_serializing_if = "Option::is_none")] - pub object: Option, - #[serde(rename = "objects", skip_serializing_if = "Vec::is_empty", default)] - pub objects: Vec, - #[serde(rename = "status", skip_serializing_if = "Option::is_none")] - pub status: Option, - #[serde(rename = "statusCode", skip_serializing_if = "Option::is_none")] - pub status_code: Option, - #[serde(rename = "rx")] - pub input_bytes: i64, - #[serde(rename = "tx")] - pub output_bytes: i64, - #[serde(rename = "txHeaders", skip_serializing_if = "Option::is_none")] - pub header_bytes: Option, - #[serde(rename = "timeToFirstByte", skip_serializing_if = "Option::is_none")] - pub time_to_first_byte: Option, - #[serde(rename = "timeToFirstByteInNS", skip_serializing_if = "Option::is_none")] - pub time_to_first_byte_in_ns: Option, - #[serde(rename = "timeToResponse", skip_serializing_if = "Option::is_none")] - pub time_to_response: Option, - #[serde(rename = "timeToResponseInNS", skip_serializing_if = "Option::is_none")] - pub time_to_response_in_ns: Option, -} - -impl ApiDetails { - /// Create a new `ApiDetails` with default values - pub fn new() -> Self { - ApiDetails { - name: None, - bucket: None, - object: None, - objects: Vec::new(), - status: None, - status_code: None, - input_bytes: 0, - output_bytes: 0, - header_bytes: None, - time_to_first_byte: None, - time_to_first_byte_in_ns: None, - time_to_response: None, - time_to_response_in_ns: None, - } - } - - /// Set the name - pub fn set_name(mut self, name: Option) -> Self { - self.name = name; - self - } - - /// Set the bucket - pub fn set_bucket(mut self, bucket: Option) -> Self { - self.bucket = bucket; - self - } - - /// Set the object - pub fn set_object(mut self, object: Option) -> Self { - self.object = object; - self - } - - /// Set the objects - pub fn set_objects(mut self, objects: Vec) -> Self { - self.objects = objects; - self - } - - /// Set the status - pub fn set_status(mut self, status: Option) -> Self { - self.status = status; - self - } - - /// Set the status code - pub fn set_status_code(mut self, status_code: Option) -> Self { - self.status_code = status_code; - self - } - - /// Set the input bytes - pub fn set_input_bytes(mut self, input_bytes: i64) -> Self { - self.input_bytes = input_bytes; - self - } - - /// Set the output bytes - pub fn set_output_bytes(mut self, output_bytes: i64) -> Self { - self.output_bytes = output_bytes; - self - } - - /// Set the header bytes - pub fn set_header_bytes(mut self, header_bytes: Option) -> Self { - self.header_bytes = header_bytes; - self - } - - /// Set the time to first byte - pub fn set_time_to_first_byte(mut self, time_to_first_byte: Option) -> Self { - self.time_to_first_byte = time_to_first_byte; - self - } - - /// Set the time to first byte in nanoseconds - pub fn set_time_to_first_byte_in_ns(mut self, time_to_first_byte_in_ns: Option) -> Self { - self.time_to_first_byte_in_ns = time_to_first_byte_in_ns; - self - } - - /// Set the time to response - pub fn set_time_to_response(mut self, time_to_response: Option) -> Self { - self.time_to_response = time_to_response; - self - } - - /// Set the time to response in nanoseconds - pub fn set_time_to_response_in_ns(mut self, time_to_response_in_ns: Option) -> Self { - self.time_to_response_in_ns = time_to_response_in_ns; - self - } -} - -/// Entry - audit entry logs -/// AuditLogEntry is used to define the structure of an audit log entry -/// -/// The `AuditLogEntry` structure contains the following fields: -/// - `base` - the base log entry -/// - `version` - the version of the audit log entry -/// - `deployment_id` - the deployment ID -/// - `event` - the event -/// - `entry_type` - the type of audit message -/// - `api` - the API details -/// - `remote_host` - the remote host -/// - `user_agent` - the user agent -/// - `req_path` - the request path -/// - `req_host` - the request host -/// - `req_claims` - the request claims -/// - `req_query` - the request query -/// - `req_header` - the request header -/// - `resp_header` - the response header -/// - `access_key` - the access key -/// - `parent_user` - the parent user -/// - `error` - the error -/// -/// The `AuditLogEntry` structure contains the following methods: -/// - `new` - create a new `AuditEntry` with default values -/// - `new_with_values` - create a new `AuditEntry` with version, time, event and api details -/// - `with_base` - set the base log entry -/// - `set_version` - set the version -/// - `set_deployment_id` - set the deployment ID -/// - `set_event` - set the event -/// - `set_entry_type` - set the entry type -/// - `set_api` - set the API details -/// - `set_remote_host` - set the remote host -/// - `set_user_agent` - set the user agent -/// - `set_req_path` - set the request path -/// - `set_req_host` - set the request host -/// - `set_req_claims` - set the request claims -/// - `set_req_query` - set the request query -/// - `set_req_header` - set the request header -/// - `set_resp_header` - set the response header -/// - `set_access_key` - set the access key -/// - `set_parent_user` - set the parent user -/// - `set_error` - set the error -/// -/// # Example -/// ``` -/// use rustfs_obs::AuditLogEntry; -/// use rustfs_obs::ApiDetails; -/// use std::collections::HashMap; -/// -/// let entry = AuditLogEntry::new() -/// .set_version("1.0".to_string()) -/// .set_deployment_id(Some("123".to_string())) -/// .set_event("event".to_string()) -/// .set_entry_type(Some("type".to_string())) -/// .set_api(ApiDetails::new()) -/// .set_remote_host(Some("remote-host".to_string())) -/// .set_user_agent(Some("user-agent".to_string())) -/// .set_req_path(Some("req-path".to_string())) -/// .set_req_host(Some("req-host".to_string())) -/// .set_req_claims(Some(HashMap::new())) -/// .set_req_query(Some(HashMap::new())) -/// .set_req_header(Some(HashMap::new())) -/// .set_resp_header(Some(HashMap::new())) -/// .set_access_key(Some("access-key".to_string())) -/// .set_parent_user(Some("parent-user".to_string())) -/// .set_error(Some("error".to_string())); -#[derive(Debug, Serialize, Deserialize, Clone, Default)] -pub struct AuditLogEntry { - #[serde(flatten)] - pub base: BaseLogEntry, - pub version: String, - #[serde(rename = "deploymentid", skip_serializing_if = "Option::is_none")] - pub deployment_id: Option, - pub event: String, - // Class of audit message - S3, admin ops, bucket management - #[serde(rename = "type", skip_serializing_if = "Option::is_none")] - pub entry_type: Option, - pub api: ApiDetails, - #[serde(rename = "remotehost", skip_serializing_if = "Option::is_none")] - pub remote_host: Option, - #[serde(rename = "userAgent", skip_serializing_if = "Option::is_none")] - pub user_agent: Option, - #[serde(rename = "requestPath", skip_serializing_if = "Option::is_none")] - pub req_path: Option, - #[serde(rename = "requestHost", skip_serializing_if = "Option::is_none")] - pub req_host: Option, - #[serde(rename = "requestClaims", skip_serializing_if = "Option::is_none")] - pub req_claims: Option>, - #[serde(rename = "requestQuery", skip_serializing_if = "Option::is_none")] - pub req_query: Option>, - #[serde(rename = "requestHeader", skip_serializing_if = "Option::is_none")] - pub req_header: Option>, - #[serde(rename = "responseHeader", skip_serializing_if = "Option::is_none")] - pub resp_header: Option>, - #[serde(rename = "accessKey", skip_serializing_if = "Option::is_none")] - pub access_key: Option, - #[serde(rename = "parentUser", skip_serializing_if = "Option::is_none")] - pub parent_user: Option, - #[serde(rename = "error", skip_serializing_if = "Option::is_none")] - pub error: Option, -} - -impl AuditLogEntry { - /// Create a new `AuditEntry` with default values - pub fn new() -> Self { - AuditLogEntry { - base: BaseLogEntry::new(), - version: String::new(), - deployment_id: None, - event: String::new(), - entry_type: None, - api: ApiDetails::new(), - remote_host: None, - user_agent: None, - req_path: None, - req_host: None, - req_claims: None, - req_query: None, - req_header: None, - resp_header: None, - access_key: None, - parent_user: None, - error: None, - } - } - - /// Create a new `AuditEntry` with version, time, event and api details - pub fn new_with_values(version: String, time: DateTime, event: String, api: ApiDetails) -> Self { - let mut base = BaseLogEntry::new(); - base.timestamp = time; - - AuditLogEntry { - base, - version, - deployment_id: None, - event, - entry_type: None, - api, - remote_host: None, - user_agent: None, - req_path: None, - req_host: None, - req_claims: None, - req_query: None, - req_header: None, - resp_header: None, - access_key: None, - parent_user: None, - error: None, - } - } - - /// Set the base log entry - pub fn with_base(mut self, base: BaseLogEntry) -> Self { - self.base = base; - self - } - - /// Set the version - pub fn set_version(mut self, version: String) -> Self { - self.version = version; - self - } - - /// Set the deployment ID - pub fn set_deployment_id(mut self, deployment_id: Option) -> Self { - self.deployment_id = deployment_id; - self - } - - /// Set the event - pub fn set_event(mut self, event: String) -> Self { - self.event = event; - self - } - - /// Set the entry type - pub fn set_entry_type(mut self, entry_type: Option) -> Self { - self.entry_type = entry_type; - self - } - - /// Set the API details - pub fn set_api(mut self, api: ApiDetails) -> Self { - self.api = api; - self - } - - /// Set the remote host - pub fn set_remote_host(mut self, remote_host: Option) -> Self { - self.remote_host = remote_host; - self - } - - /// Set the user agent - pub fn set_user_agent(mut self, user_agent: Option) -> Self { - self.user_agent = user_agent; - self - } - - /// Set the request path - pub fn set_req_path(mut self, req_path: Option) -> Self { - self.req_path = req_path; - self - } - - /// Set the request host - pub fn set_req_host(mut self, req_host: Option) -> Self { - self.req_host = req_host; - self - } - - /// Set the request claims - pub fn set_req_claims(mut self, req_claims: Option>) -> Self { - self.req_claims = req_claims; - self - } - - /// Set the request query - pub fn set_req_query(mut self, req_query: Option>) -> Self { - self.req_query = req_query; - self - } - - /// Set the request header - pub fn set_req_header(mut self, req_header: Option>) -> Self { - self.req_header = req_header; - self - } - - /// Set the response header - pub fn set_resp_header(mut self, resp_header: Option>) -> Self { - self.resp_header = resp_header; - self - } - - /// Set the access key - pub fn set_access_key(mut self, access_key: Option) -> Self { - self.access_key = access_key; - self - } - - /// Set the parent user - pub fn set_parent_user(mut self, parent_user: Option) -> Self { - self.parent_user = parent_user; - self - } - - /// Set the error - pub fn set_error(mut self, error: Option) -> Self { - self.error = error; - self - } -} - -impl LogRecord for AuditLogEntry { - fn to_json(&self) -> String { - serde_json::to_string(self).unwrap_or_else(|_| String::from("{}")) - } - - fn get_timestamp(&self) -> DateTime { - self.base.timestamp - } -} diff --git a/crates/obs/src/entry/base.rs b/crates/obs/src/entry/base.rs deleted file mode 100644 index 23c05c3f..00000000 --- a/crates/obs/src/entry/base.rs +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; -use serde_json::Value; -use std::collections::HashMap; - -/// Base log entry structure shared by all log types -/// This structure is used to serialize log entries to JSON -/// and send them to the log sinks -/// This structure is also used to deserialize log entries from JSON -/// This structure is also used to store log entries in the database -/// This structure is also used to query log entries from the database -/// -/// The `BaseLogEntry` structure contains the following fields: -/// - `timestamp` - the timestamp of the log entry -/// - `request_id` - the request ID of the log entry -/// - `message` - the message of the log entry -/// - `tags` - the tags of the log entry -/// -/// The `BaseLogEntry` structure contains the following methods: -/// - `new` - create a new `BaseLogEntry` with default values -/// - `message` - set the message -/// - `request_id` - set the request ID -/// - `tags` - set the tags -/// - `timestamp` - set the timestamp -/// -/// # Example -/// ``` -/// use rustfs_obs::BaseLogEntry; -/// use chrono::{DateTime, Utc}; -/// use std::collections::HashMap; -/// -/// let timestamp = Utc::now(); -/// let request = Some("req-123".to_string()); -/// let message = Some("This is a log message".to_string()); -/// let tags = Some(HashMap::new()); -/// -/// let entry = BaseLogEntry::new() -/// .timestamp(timestamp) -/// .request_id(request) -/// .message(message) -/// .tags(tags); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Default)] -pub struct BaseLogEntry { - #[serde(rename = "time")] - pub timestamp: DateTime, - - #[serde(rename = "requestID", skip_serializing_if = "Option::is_none")] - pub request_id: Option, - - #[serde(rename = "message", skip_serializing_if = "Option::is_none")] - pub message: Option, - - #[serde(rename = "tags", skip_serializing_if = "Option::is_none")] - pub tags: Option>, -} - -impl BaseLogEntry { - /// Create a new BaseLogEntry with default values - pub fn new() -> Self { - BaseLogEntry { - timestamp: Utc::now(), - request_id: None, - message: None, - tags: None, - } - } - - /// Set the message - pub fn message(mut self, message: Option) -> Self { - self.message = message; - self - } - - /// Set the request ID - pub fn request_id(mut self, request_id: Option) -> Self { - self.request_id = request_id; - self - } - - /// Set the tags - pub fn tags(mut self, tags: Option>) -> Self { - self.tags = tags; - self - } - - /// Set the timestamp - pub fn timestamp(mut self, timestamp: DateTime) -> Self { - self.timestamp = timestamp; - self - } -} diff --git a/crates/obs/src/entry/mod.rs b/crates/obs/src/entry/mod.rs deleted file mode 100644 index 9c319ffd..00000000 --- a/crates/obs/src/entry/mod.rs +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub(crate) mod args; -pub(crate) mod audit; -pub(crate) mod base; -pub(crate) mod unified; - -use serde::de::Error; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use tracing_core::Level; - -/// ObjectVersion is used across multiple modules -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -pub struct ObjectVersion { - #[serde(rename = "name")] - pub object_name: String, - #[serde(rename = "versionId", skip_serializing_if = "Option::is_none")] - pub version_id: Option, -} - -impl ObjectVersion { - /// Create a new ObjectVersion object - pub fn new() -> Self { - ObjectVersion { - object_name: String::new(), - version_id: None, - } - } - - /// Create a new ObjectVersion with object name - pub fn new_with_object_name(object_name: String) -> Self { - ObjectVersion { - object_name, - version_id: None, - } - } - - /// Set the object name - pub fn set_object_name(mut self, object_name: String) -> Self { - self.object_name = object_name; - self - } - - /// Set the version ID - pub fn set_version_id(mut self, version_id: Option) -> Self { - self.version_id = version_id; - self - } -} - -impl Default for ObjectVersion { - fn default() -> Self { - Self::new() - } -} - -/// Log kind/level enum -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] -pub enum LogKind { - #[serde(rename = "INFO")] - #[default] - Info, - #[serde(rename = "WARNING")] - Warning, - #[serde(rename = "ERROR")] - Error, - #[serde(rename = "FATAL")] - Fatal, -} - -/// Trait for types that can be serialized to JSON and have a timestamp -/// This trait is used by `ServerLogEntry` to convert the log entry to JSON -/// and get the timestamp of the log entry -/// This trait is implemented by `ServerLogEntry` -/// -/// # Example -/// ``` -/// use rustfs_obs::LogRecord; -/// use chrono::{DateTime, Utc}; -/// use rustfs_obs::ServerLogEntry; -/// use tracing_core::Level; -/// -/// let log_entry = ServerLogEntry::new(Level::INFO, "api_handler".to_string()); -/// let json = log_entry.to_json(); -/// let timestamp = log_entry.get_timestamp(); -/// ``` -pub trait LogRecord { - fn to_json(&self) -> String; - fn get_timestamp(&self) -> chrono::DateTime; -} - -/// Wrapper for `tracing_core::Level` to implement `Serialize` and `Deserialize` -/// for `ServerLogEntry` -/// This is necessary because `tracing_core::Level` does not implement `Serialize` -/// and `Deserialize` -/// This is a workaround to allow `ServerLogEntry` to be serialized and deserialized -/// using `serde` -/// -/// # Example -/// ``` -/// use rustfs_obs::SerializableLevel; -/// use tracing_core::Level; -/// -/// let level = Level::INFO; -/// let serializable_level = SerializableLevel::from(level); -/// ``` -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct SerializableLevel(pub Level); - -impl From for SerializableLevel { - fn from(level: Level) -> Self { - SerializableLevel(level) - } -} - -impl From for Level { - fn from(serializable_level: SerializableLevel) -> Self { - serializable_level.0 - } -} - -impl Serialize for SerializableLevel { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(self.0.as_str()) - } -} - -impl<'de> Deserialize<'de> for SerializableLevel { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - match s.as_str() { - "TRACE" => Ok(SerializableLevel(Level::TRACE)), - "DEBUG" => Ok(SerializableLevel(Level::DEBUG)), - "INFO" => Ok(SerializableLevel(Level::INFO)), - "WARN" => Ok(SerializableLevel(Level::WARN)), - "ERROR" => Ok(SerializableLevel(Level::ERROR)), - _ => Err(D::Error::custom("unknown log level")), - } - } -} diff --git a/crates/obs/src/entry/unified.rs b/crates/obs/src/entry/unified.rs deleted file mode 100644 index 6248f150..00000000 --- a/crates/obs/src/entry/unified.rs +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{AuditLogEntry, BaseLogEntry, LogKind, LogRecord, SerializableLevel}; -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; -use tracing_core::Level; - -/// Server log entry with structured fields -/// ServerLogEntry is used to log structured log entries from the server -/// -/// The `ServerLogEntry` structure contains the following fields: -/// - `base` - the base log entry -/// - `level` - the log level -/// - `source` - the source of the log entry -/// - `user_id` - the user ID -/// - `fields` - the structured fields of the log entry -/// -/// The `ServerLogEntry` structure contains the following methods: -/// - `new` - create a new `ServerLogEntry` with specified level and source -/// - `with_base` - set the base log entry -/// - `user_id` - set the user ID -/// - `fields` - set the fields -/// - `add_field` - add a field -/// -/// # Example -/// ``` -/// use rustfs_obs::ServerLogEntry; -/// use tracing_core::Level; -/// -/// let entry = ServerLogEntry::new(Level::INFO, "test_module".to_string()) -/// .user_id(Some("user-456".to_string())) -/// .add_field("operation".to_string(), "login".to_string()); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub struct ServerLogEntry { - #[serde(flatten)] - pub base: BaseLogEntry, - - pub level: SerializableLevel, - pub source: String, - - #[serde(rename = "userId", skip_serializing_if = "Option::is_none")] - pub user_id: Option, - - #[serde(skip_serializing_if = "Vec::is_empty", default)] - pub fields: Vec<(String, String)>, -} - -impl ServerLogEntry { - /// Create a new ServerLogEntry with specified level and source - pub fn new(level: Level, source: String) -> Self { - ServerLogEntry { - base: BaseLogEntry::new(), - level: SerializableLevel(level), - source, - user_id: None, - fields: Vec::new(), - } - } - - /// Set the base log entry - pub fn with_base(mut self, base: BaseLogEntry) -> Self { - self.base = base; - self - } - - /// Set the user ID - pub fn user_id(mut self, user_id: Option) -> Self { - self.user_id = user_id; - self - } - - /// Set fields - pub fn fields(mut self, fields: Vec<(String, String)>) -> Self { - self.fields = fields; - self - } - - /// Add a field - pub fn add_field(mut self, key: String, value: String) -> Self { - self.fields.push((key, value)); - self - } -} - -impl LogRecord for ServerLogEntry { - fn to_json(&self) -> String { - serde_json::to_string(self).unwrap_or_else(|_| String::from("{}")) - } - - fn get_timestamp(&self) -> DateTime { - self.base.timestamp - } -} - -/// Console log entry structure -/// ConsoleLogEntry is used to log console log entries -/// The `ConsoleLogEntry` structure contains the following fields: -/// - `base` - the base log entry -/// - `level` - the log level -/// - `console_msg` - the console message -/// - `node_name` - the node name -/// - `err` - the error message -/// -/// The `ConsoleLogEntry` structure contains the following methods: -/// - `new` - create a new `ConsoleLogEntry` -/// - `new_with_console_msg` - create a new `ConsoleLogEntry` with console message and node name -/// - `with_base` - set the base log entry -/// - `set_level` - set the log level -/// - `set_node_name` - set the node name -/// - `set_console_msg` - set the console message -/// - `set_err` - set the error message -/// -/// # Example -/// ``` -/// use rustfs_obs::ConsoleLogEntry; -/// -/// let entry = ConsoleLogEntry::new_with_console_msg("Test message".to_string(), "node-123".to_string()); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ConsoleLogEntry { - #[serde(flatten)] - pub base: BaseLogEntry, - - pub level: LogKind, - pub console_msg: String, - pub node_name: String, - - #[serde(skip)] - pub err: Option, -} - -impl ConsoleLogEntry { - /// Create a new ConsoleLogEntry - pub fn new() -> Self { - ConsoleLogEntry { - base: BaseLogEntry::new(), - level: LogKind::Info, - console_msg: String::new(), - node_name: String::new(), - err: None, - } - } - - /// Create a new ConsoleLogEntry with console message and node name - pub fn new_with_console_msg(console_msg: String, node_name: String) -> Self { - ConsoleLogEntry { - base: BaseLogEntry::new(), - level: LogKind::Info, - console_msg, - node_name, - err: None, - } - } - - /// Set the base log entry - pub fn with_base(mut self, base: BaseLogEntry) -> Self { - self.base = base; - self - } - - /// Set the log level - pub fn set_level(mut self, level: LogKind) -> Self { - self.level = level; - self - } - - /// Set the node name - pub fn set_node_name(mut self, node_name: String) -> Self { - self.node_name = node_name; - self - } - - /// Set the console message - pub fn set_console_msg(mut self, console_msg: String) -> Self { - self.console_msg = console_msg; - self - } - - /// Set the error message - pub fn set_err(mut self, err: Option) -> Self { - self.err = err; - self - } -} - -impl Default for ConsoleLogEntry { - fn default() -> Self { - Self::new() - } -} - -impl LogRecord for ConsoleLogEntry { - fn to_json(&self) -> String { - serde_json::to_string(self).unwrap_or_else(|_| String::from("{}")) - } - - fn get_timestamp(&self) -> DateTime { - self.base.timestamp - } -} - -/// Unified log entry type -/// UnifiedLogEntry is used to log different types of log entries -/// -/// The `UnifiedLogEntry` enum contains the following variants: -/// - `Server` - a server log entry -/// - `Audit` - an audit log entry -/// - `Console` - a console log entry -/// -/// The `UnifiedLogEntry` enum contains the following methods: -/// - `to_json` - convert the log entry to JSON -/// - `get_timestamp` - get the timestamp of the log entry -/// -/// # Example -/// ``` -/// use rustfs_obs::{UnifiedLogEntry, ServerLogEntry}; -/// use tracing_core::Level; -/// -/// let server_entry = ServerLogEntry::new(Level::INFO, "test_module".to_string()); -/// let unified = UnifiedLogEntry::Server(server_entry); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum UnifiedLogEntry { - #[serde(rename = "server")] - Server(ServerLogEntry), - - #[serde(rename = "audit")] - Audit(Box), - - #[serde(rename = "console")] - Console(ConsoleLogEntry), -} - -impl LogRecord for UnifiedLogEntry { - fn to_json(&self) -> String { - match self { - UnifiedLogEntry::Server(entry) => entry.to_json(), - UnifiedLogEntry::Audit(entry) => entry.to_json(), - UnifiedLogEntry::Console(entry) => entry.to_json(), - } - } - - fn get_timestamp(&self) -> DateTime { - match self { - UnifiedLogEntry::Server(entry) => entry.get_timestamp(), - UnifiedLogEntry::Audit(entry) => entry.get_timestamp(), - UnifiedLogEntry::Console(entry) => entry.get_timestamp(), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_base_log_entry() { - let base = BaseLogEntry::new() - .request_id(Some("req-123".to_string())) - .message(Some("Test message".to_string())); - - assert_eq!(base.request_id, Some("req-123".to_string())); - assert_eq!(base.message, Some("Test message".to_string())); - } - - #[test] - fn test_server_log_entry() { - let entry = ServerLogEntry::new(Level::INFO, "test_module".to_string()) - .user_id(Some("user-456".to_string())) - .add_field("operation".to_string(), "login".to_string()); - - assert_eq!(entry.level.0, Level::INFO); - assert_eq!(entry.source, "test_module"); - assert_eq!(entry.user_id, Some("user-456".to_string())); - assert_eq!(entry.fields.len(), 1); - assert_eq!(entry.fields[0], ("operation".to_string(), "login".to_string())); - } - - #[test] - fn test_unified_log_entry_json() { - let server_entry = ServerLogEntry::new(Level::INFO, "test_source".to_string()); - let unified = UnifiedLogEntry::Server(server_entry); - - let json = unified.to_json(); - assert!(json.contains("test_source")); - } -} diff --git a/crates/obs/src/global.rs b/crates/obs/src/global.rs index 56fa5049..052774de 100644 --- a/crates/obs/src/global.rs +++ b/crates/obs/src/global.rs @@ -12,9 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::logger::InitLogStatus; +use crate::AppConfig; use crate::telemetry::{OtelGuard, init_telemetry}; -use crate::{AppConfig, Logger, get_global_logger, init_global_logger}; use std::sync::{Arc, Mutex}; use tokio::sync::{OnceCell, SetError}; use tracing::{error, info}; @@ -61,46 +60,14 @@ pub enum GlobalError { /// /// # #[tokio::main] /// # async fn main() { -/// let (logger, guard) = init_obs(None).await; +/// let guard = init_obs(None).await; /// # } /// ``` -pub async fn init_obs(endpoint: Option) -> (Arc>, OtelGuard) { +pub async fn init_obs(endpoint: Option) -> OtelGuard { // Load the configuration file let config = AppConfig::new_with_endpoint(endpoint); - let guard = init_telemetry(&config.observability); - - let logger = init_global_logger(&config).await; - let obs_config = config.observability.clone(); - tokio::spawn(async move { - let result = InitLogStatus::init_start_log(&obs_config).await; - match result { - Ok(_) => { - info!("Logger initialized successfully"); - } - Err(e) => { - error!("Failed to initialize logger: {}", e); - } - } - }); - - (logger, guard) -} - -/// Get the global logger instance -/// This function returns a reference to the global logger instance. -/// -/// # Returns -/// A reference to the global logger instance -/// -/// # Example -/// ```no_run -/// use rustfs_obs::get_logger; -/// -/// let logger = get_logger(); -/// ``` -pub fn get_logger() -> &'static Arc> { - get_global_logger() + init_telemetry(&config.observability) } /// Set the global guard for OpenTelemetry @@ -117,7 +84,7 @@ pub fn get_logger() -> &'static Arc> { /// use rustfs_obs::{ init_obs, set_global_guard}; /// /// async fn init() -> Result<(), Box> { -/// let (_, guard) = init_obs(None).await; +/// let guard = init_obs(None).await; /// set_global_guard(guard)?; /// Ok(()) /// } diff --git a/crates/obs/src/lib.rs b/crates/obs/src/lib.rs index 4b7775fa..43bfec38 100644 --- a/crates/obs/src/lib.rs +++ b/crates/obs/src/lib.rs @@ -18,10 +18,7 @@ //! //! ## feature mark //! -//! - `file`: enable file logging enabled by default //! - `gpu`: gpu monitoring function -//! - `kafka`: enable kafka metric output -//! - `webhook`: enable webhook notifications //! - `full`: includes all functions //! //! to enable gpu monitoring add in cargo toml @@ -41,27 +38,15 @@ /// /// # #[tokio::main] /// # async fn main() { -/// let (logger, guard) = init_obs(None).await; +/// # let guard = init_obs(None).await; /// # } /// ``` mod config; -mod entry; mod global; -mod logger; mod metrics; -mod sinks; mod system; mod telemetry; -mod worker; -pub use config::{AppConfig, LoggerConfig, OtelConfig, SinkConfig}; -pub use entry::args::Args; -pub use entry::audit::{ApiDetails, AuditLogEntry}; -pub use entry::base::BaseLogEntry; -pub use entry::unified::{ConsoleLogEntry, ServerLogEntry, UnifiedLogEntry}; -pub use entry::{LogKind, LogRecord, ObjectVersion, SerializableLevel}; +pub use config::AppConfig; pub use global::*; -pub use logger::Logger; -pub use logger::{get_global_logger, init_global_logger, start_logger}; -pub use logger::{log_debug, log_error, log_info, log_trace, log_warn, log_with_context}; pub use system::SystemObserver; diff --git a/crates/obs/src/logger.rs b/crates/obs/src/logger.rs deleted file mode 100644 index 1f109e40..00000000 --- a/crates/obs/src/logger.rs +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::sinks::Sink; -use crate::{ - AppConfig, AuditLogEntry, BaseLogEntry, ConsoleLogEntry, GlobalError, OtelConfig, ServerLogEntry, UnifiedLogEntry, sinks, -}; -use rustfs_config::{APP_NAME, ENVIRONMENT, SERVICE_VERSION}; -use std::sync::Arc; -use std::time::SystemTime; -use tokio::sync::mpsc::{self, Receiver, Sender}; -use tokio::sync::{Mutex, OnceCell}; -use tracing_core::Level; - -// Add the global instance at the module level -static GLOBAL_LOGGER: OnceCell>> = OnceCell::const_new(); - -/// Server log processor -#[derive(Debug)] -pub struct Logger { - sender: Sender, // Log sending channel - queue_capacity: usize, -} - -impl Logger { - /// Create a new Logger instance - /// Returns Logger and corresponding Receiver - pub fn new(config: &AppConfig) -> (Self, Receiver) { - // Get queue capacity from configuration, or use default values 10000 - let queue_capacity = config.logger.as_ref().and_then(|l| l.queue_capacity).unwrap_or(10000); - let (sender, receiver) = mpsc::channel(queue_capacity); - (Logger { sender, queue_capacity }, receiver) - } - - /// get the queue capacity - /// This function returns the queue capacity. - /// # Returns - /// The queue capacity - /// # Example - /// ``` - /// use rustfs_obs::Logger; - /// async fn example(logger: &Logger) { - /// let _ = logger.get_queue_capacity(); - /// } - /// ``` - pub fn get_queue_capacity(&self) -> usize { - self.queue_capacity - } - - /// Log a server entry - #[tracing::instrument(skip(self), fields(log_source = "logger_server"))] - pub async fn log_server_entry(&self, entry: ServerLogEntry) -> Result<(), GlobalError> { - self.log_entry(UnifiedLogEntry::Server(entry)).await - } - - /// Log an audit entry - #[tracing::instrument(skip(self), fields(log_source = "logger_audit"))] - pub async fn log_audit_entry(&self, entry: AuditLogEntry) -> Result<(), GlobalError> { - self.log_entry(UnifiedLogEntry::Audit(Box::new(entry))).await - } - - /// Log a console entry - #[tracing::instrument(skip(self), fields(log_source = "logger_console"))] - pub async fn log_console_entry(&self, entry: ConsoleLogEntry) -> Result<(), GlobalError> { - self.log_entry(UnifiedLogEntry::Console(entry)).await - } - - /// Asynchronous logging of unified log entries - #[tracing::instrument(skip_all, fields(log_source = "logger"))] - pub async fn log_entry(&self, entry: UnifiedLogEntry) -> Result<(), GlobalError> { - // Extract information for tracing based on entry type - match &entry { - UnifiedLogEntry::Server(server) => { - tracing::Span::current() - .record("log_level", server.level.0.as_str()) - .record("log_message", server.base.message.as_deref().unwrap_or("log message not set")) - .record("source", &server.source); - - // Generate tracing event based on log level - match server.level.0 { - Level::ERROR => { - tracing::error!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or("")); - } - Level::WARN => { - tracing::warn!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or("")); - } - Level::INFO => { - tracing::info!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or("")); - } - Level::DEBUG => { - tracing::debug!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or("")); - } - Level::TRACE => { - tracing::trace!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or("")); - } - } - } - UnifiedLogEntry::Audit(audit) => { - tracing::info!( - target: "audit_logs", - event = %audit.event, - api = %audit.api.name.as_deref().unwrap_or("unknown"), - message = %audit.base.message.as_deref().unwrap_or("") - ); - } - UnifiedLogEntry::Console(console) => { - let level_str = match console.level { - crate::LogKind::Info => "INFO", - crate::LogKind::Warning => "WARN", - crate::LogKind::Error => "ERROR", - crate::LogKind::Fatal => "FATAL", - }; - - tracing::info!( - target: "console_logs", - level = %level_str, - node = %console.node_name, - message = %console.console_msg - ); - } - } - - // Send logs to async queue with improved error handling - match self.sender.try_send(entry) { - Ok(_) => Ok(()), - Err(mpsc::error::TrySendError::Full(entry)) => { - // Processing strategy when queue is full - tracing::warn!("Log queue full, applying backpressure"); - match tokio::time::timeout(std::time::Duration::from_millis(500), self.sender.send(entry)).await { - Ok(Ok(_)) => Ok(()), - Ok(Err(_)) => Err(GlobalError::SendFailed("Channel closed")), - Err(_) => Err(GlobalError::Timeout("Queue backpressure timeout")), - } - } - Err(mpsc::error::TrySendError::Closed(_)) => Err(GlobalError::SendFailed("Logger channel closed")), - } - } - - /// Write log with context information - /// This function writes log messages with context information. - /// - /// # Parameters - /// - `message`: Message to be logged - /// - `source`: Source of the log - /// - `request_id`: Request ID - /// - `user_id`: User ID - /// - `fields`: Additional fields - /// - /// # Returns - /// Result indicating whether the operation was successful - /// - /// # Example - /// ``` - /// use tracing_core::Level; - /// use rustfs_obs::Logger; - /// - /// async fn example(logger: &Logger) { - /// let _ = logger.write_with_context("This is an information message", "example",Level::INFO, Some("req-12345".to_string()), Some("user-6789".to_string()), vec![("endpoint".to_string(), "/api/v1/data".to_string())]).await; - /// } - pub async fn write_with_context( - &self, - message: &str, - source: &str, - level: Level, - request_id: Option, - user_id: Option, - fields: Vec<(String, String)>, - ) -> Result<(), GlobalError> { - let base = BaseLogEntry::new().message(Some(message.to_string())).request_id(request_id); - - let server_entry = ServerLogEntry::new(level, source.to_string()) - .user_id(user_id) - .fields(fields) - .with_base(base); - - self.log_server_entry(server_entry).await - } - - /// Write log - /// This function writes log messages. - /// # Parameters - /// - `message`: Message to be logged - /// - `source`: Source of the log - /// - `level`: Log level - /// - /// # Returns - /// Result indicating whether the operation was successful - /// - /// # Example - /// ``` - /// use rustfs_obs::Logger; - /// use tracing_core::Level; - /// - /// async fn example(logger: &Logger) { - /// let _ = logger.write("This is an information message", "example", Level::INFO).await; - /// } - /// ``` - pub async fn write(&self, message: &str, source: &str, level: Level) -> Result<(), GlobalError> { - self.write_with_context(message, source, level, None, None, Vec::new()).await - } - - /// Shutdown the logger - /// This function shuts down the logger. - /// - /// # Returns - /// Result indicating whether the operation was successful - /// - /// # Example - /// ``` - /// use rustfs_obs::Logger; - /// - /// async fn example(logger: Logger) { - /// let _ = logger.shutdown().await; - /// } - /// ``` - pub async fn shutdown(self) -> Result<(), GlobalError> { - drop(self.sender); //Close the sending end so that the receiver knows that there is no new message - Ok(()) - } -} - -/// Start the log module -/// This function starts the log module. -/// It initializes the logger and starts the worker to process logs. -/// # Parameters -/// - `config`: Configuration information -/// - `sinks`: A vector of Sink instances -/// # Returns -/// The global logger instance -/// # Example -/// ```no_run -/// use rustfs_obs::{AppConfig, start_logger}; -/// -/// let config = AppConfig::default(); -/// let sinks = vec![]; -/// let logger = start_logger(&config, sinks); -/// ``` -pub fn start_logger(config: &AppConfig, sinks: Vec>) -> Logger { - let (logger, receiver) = Logger::new(config); - tokio::spawn(crate::worker::start_worker(receiver, sinks)); - logger -} - -/// Initialize the global logger instance -/// This function initializes the global logger instance and returns a reference to it. -/// If the logger has been initialized before, it will return the existing logger instance. -/// -/// # Parameters -/// - `config`: Configuration information -/// - `sinks`: A vector of Sink instances -/// -/// # Returns -/// A reference to the global logger instance -/// -/// # Example -/// ``` -/// use rustfs_obs::{AppConfig,init_global_logger}; -/// -/// let config = AppConfig::default(); -/// let logger = init_global_logger(&config); -/// ``` -pub async fn init_global_logger(config: &AppConfig) -> Arc> { - let sinks = sinks::create_sinks(config).await; - let logger = Arc::new(Mutex::new(start_logger(config, sinks))); - GLOBAL_LOGGER.set(logger.clone()).expect("Logger already initialized"); - logger -} - -/// Get the global logger instance -/// -/// This function returns a reference to the global logger instance. -/// -/// # Returns -/// A reference to the global logger instance -/// -/// # Example -/// ```no_run -/// use rustfs_obs::get_global_logger; -/// -/// let logger = get_global_logger(); -/// ``` -pub fn get_global_logger() -> &'static Arc> { - GLOBAL_LOGGER.get().expect("Logger not initialized") -} - -/// Log information -/// This function logs information messages. -/// -/// # Parameters -/// - `message`: Message to be logged -/// - `source`: Source of the log -/// -/// # Returns -/// Result indicating whether the operation was successful -/// -/// # Example -/// ```no_run -/// use rustfs_obs::log_info; -/// -/// async fn example() { -/// let _ = log_info("This is an information message", "example").await; -/// } -/// ``` -pub async fn log_info(message: &str, source: &str) -> Result<(), GlobalError> { - get_global_logger().lock().await.write(message, source, Level::INFO).await -} - -/// Log error -/// This function logs error messages. -/// # Parameters -/// - `message`: Message to be logged -/// - `source`: Source of the log -/// # Returns -/// Result indicating whether the operation was successful -/// # Example -/// ```no_run -/// use rustfs_obs::log_error; -/// -/// async fn example() { -/// let _ = log_error("This is an error message", "example").await; -/// } -pub async fn log_error(message: &str, source: &str) -> Result<(), GlobalError> { - get_global_logger().lock().await.write(message, source, Level::ERROR).await -} - -/// Log warning -/// This function logs warning messages. -/// # Parameters -/// - `message`: Message to be logged -/// - `source`: Source of the log -/// # Returns -/// Result indicating whether the operation was successful -/// -/// # Example -/// ```no_run -/// use rustfs_obs::log_warn; -/// -/// async fn example() { -/// let _ = log_warn("This is a warning message", "example").await; -/// } -/// ``` -pub async fn log_warn(message: &str, source: &str) -> Result<(), GlobalError> { - get_global_logger().lock().await.write(message, source, Level::WARN).await -} - -/// Log debug -/// This function logs debug messages. -/// # Parameters -/// - `message`: Message to be logged -/// - `source`: Source of the log -/// # Returns -/// Result indicating whether the operation was successful -/// -/// # Example -/// ```no_run -/// use rustfs_obs::log_debug; -/// -/// async fn example() { -/// let _ = log_debug("This is a debug message", "example").await; -/// } -/// ``` -pub async fn log_debug(message: &str, source: &str) -> Result<(), GlobalError> { - get_global_logger().lock().await.write(message, source, Level::DEBUG).await -} - -/// Log trace -/// This function logs trace messages. -/// # Parameters -/// - `message`: Message to be logged -/// - `source`: Source of the log -/// -/// # Returns -/// Result indicating whether the operation was successful -/// -/// # Example -/// ```no_run -/// use rustfs_obs::log_trace; -/// -/// async fn example() { -/// let _ = log_trace("This is a trace message", "example").await; -/// } -/// ``` -pub async fn log_trace(message: &str, source: &str) -> Result<(), GlobalError> { - get_global_logger().lock().await.write(message, source, Level::TRACE).await -} - -/// Log with context information -/// This function logs messages with context information. -/// # Parameters -/// - `message`: Message to be logged -/// - `source`: Source of the log -/// - `level`: Log level -/// - `request_id`: Request ID -/// - `user_id`: User ID -/// - `fields`: Additional fields -/// # Returns -/// Result indicating whether the operation was successful -/// # Example -/// ```no_run -/// use tracing_core::Level; -/// use rustfs_obs::log_with_context; -/// -/// async fn example() { -/// let _ = log_with_context("This is an information message", "example", Level::INFO, Some("req-12345".to_string()), Some("user-6789".to_string()), vec![("endpoint".to_string(), "/api/v1/data".to_string())]).await; -/// } -/// ``` -pub async fn log_with_context( - message: &str, - source: &str, - level: Level, - request_id: Option, - user_id: Option, - fields: Vec<(String, String)>, -) -> Result<(), GlobalError> { - get_global_logger() - .lock() - .await - .write_with_context(message, source, level, request_id, user_id, fields) - .await -} - -/// Log initialization status -#[derive(Debug)] -pub(crate) struct InitLogStatus { - pub timestamp: SystemTime, - pub service_name: String, - pub version: String, - pub environment: String, -} - -impl Default for InitLogStatus { - fn default() -> Self { - Self { - timestamp: SystemTime::now(), - service_name: String::from(APP_NAME), - version: SERVICE_VERSION.to_string(), - environment: ENVIRONMENT.to_string(), - } - } -} - -impl InitLogStatus { - pub fn new_config(config: &OtelConfig) -> Self { - let config = config.clone(); - let environment = config.environment.unwrap_or(ENVIRONMENT.to_string()); - let version = config.service_version.unwrap_or(SERVICE_VERSION.to_string()); - Self { - timestamp: SystemTime::now(), - service_name: String::from(APP_NAME), - version, - environment, - } - } - - pub async fn init_start_log(config: &OtelConfig) -> Result<(), GlobalError> { - let status = Self::new_config(config); - log_init_state(Some(status)).await - } -} - -/// Log initialization details during system startup -async fn log_init_state(status: Option) -> Result<(), GlobalError> { - let status = status.unwrap_or_default(); - - let base_entry = BaseLogEntry::new() - .timestamp(chrono::DateTime::from(status.timestamp)) - .message(Some(format!( - "Service initialization started - {} v{} in {}", - status.service_name, status.version, status.environment - ))) - .request_id(Some("system_init".to_string())); - - let server_entry = ServerLogEntry::new(Level::INFO, "system_initialization".to_string()) - .with_base(base_entry) - .user_id(Some("system".to_string())); - - get_global_logger().lock().await.log_server_entry(server_entry).await?; - Ok(()) -} diff --git a/crates/obs/src/sinks/file.rs b/crates/obs/src/sinks/file.rs deleted file mode 100644 index fe1bcb60..00000000 --- a/crates/obs/src/sinks/file.rs +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::sinks::Sink; -use crate::{LogRecord, UnifiedLogEntry}; -use async_trait::async_trait; -use std::sync::Arc; -use tokio::fs::OpenOptions; -use tokio::io; -use tokio::io::AsyncWriteExt; - -/// File Sink Implementation -pub struct FileSink { - path: String, - buffer_size: usize, - writer: Arc>>, - entry_count: std::sync::atomic::AtomicUsize, - last_flush: std::sync::atomic::AtomicU64, - flush_interval_ms: u64, // Time between flushes - flush_threshold: usize, // Number of entries before flush -} - -impl FileSink { - /// Create a new FileSink instance - pub async fn new( - path: String, - buffer_size: usize, - flush_interval_ms: u64, - flush_threshold: usize, - ) -> Result { - // check if the file exists - let file_exists = tokio::fs::metadata(&path).await.is_ok(); - // if the file not exists, create it - if !file_exists { - tokio::fs::create_dir_all(std::path::Path::new(&path).parent().unwrap()).await?; - tracing::debug!("File does not exist, creating it. Path: {:?}", path) - } - let file = if file_exists { - // If the file exists, open it in append mode - tracing::debug!("FileSink: File exists, opening in append mode. Path: {:?}", path); - OpenOptions::new().append(true).create(true).open(&path).await? - } else { - // If the file does not exist, create it - tracing::debug!("FileSink: File does not exist, creating a new file."); - // Create the file and write a header or initial content if needed - OpenOptions::new().create(true).truncate(true).write(true).open(&path).await? - }; - let writer = io::BufWriter::with_capacity(buffer_size, file); - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_millis() as u64; - Ok(FileSink { - path, - buffer_size, - writer: Arc::new(tokio::sync::Mutex::new(writer)), - entry_count: std::sync::atomic::AtomicUsize::new(0), - last_flush: std::sync::atomic::AtomicU64::new(now), - flush_interval_ms, - flush_threshold, - }) - } - - #[allow(dead_code)] - async fn initialize_writer(&mut self) -> io::Result<()> { - let file = tokio::fs::File::create(&self.path).await?; - - // Use buffer_size to create a buffer writer with a specified capacity - let buf_writer = io::BufWriter::with_capacity(self.buffer_size, file); - - // Replace the original writer with the new Mutex - self.writer = Arc::new(tokio::sync::Mutex::new(buf_writer)); - Ok(()) - } - - // Get the current buffer size - #[allow(dead_code)] - pub fn buffer_size(&self) -> usize { - self.buffer_size - } - - // How to dynamically adjust the buffer size - #[allow(dead_code)] - pub async fn set_buffer_size(&mut self, new_size: usize) -> io::Result<()> { - if self.buffer_size != new_size { - self.buffer_size = new_size; - // Reinitialize the writer directly, without checking is_some() - self.initialize_writer().await?; - } - Ok(()) - } - - // Check if flushing is needed based on count or time - fn should_flush(&self) -> bool { - // Check entry count threshold - if self.entry_count.load(std::sync::atomic::Ordering::Relaxed) >= self.flush_threshold { - return true; - } - - // Check time threshold - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_millis() as u64; - - let last = self.last_flush.load(std::sync::atomic::Ordering::Relaxed); - now - last >= self.flush_interval_ms - } -} - -#[async_trait] -impl Sink for FileSink { - async fn write(&self, entry: &UnifiedLogEntry) { - let line = format!("{entry:?}\n"); - let mut writer = self.writer.lock().await; - - if let Err(e) = writer.write_all(line.as_bytes()).await { - eprintln!( - "Failed to write log to file {}: {},entry timestamp:{:?}", - self.path, - e, - entry.get_timestamp() - ); - return; - } - - // Only flush periodically to improve performance - // Logic to determine when to flush could be added here - // Increment the entry count - self.entry_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed); - - // Check if we should flush - if self.should_flush() { - if let Err(e) = writer.flush().await { - eprintln!("Failed to flush log file {}: {}", self.path, e); - return; - } - - // Reset counters - self.entry_count.store(0, std::sync::atomic::Ordering::Relaxed); - - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_millis() as u64; - - self.last_flush.store(now, std::sync::atomic::Ordering::Relaxed); - } - } -} - -impl Drop for FileSink { - fn drop(&mut self) { - let writer = self.writer.clone(); - let path = self.path.clone(); - - tokio::task::spawn_blocking(move || { - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - let mut writer = writer.lock().await; - if let Err(e) = writer.flush().await { - eprintln!("Failed to flush log file {path}: {e}"); - } - }); - }); - } -} diff --git a/crates/obs/src/sinks/kafka.rs b/crates/obs/src/sinks/kafka.rs deleted file mode 100644 index 3dd9ad5a..00000000 --- a/crates/obs/src/sinks/kafka.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::sinks::Sink; -use crate::{LogRecord, UnifiedLogEntry}; -use async_trait::async_trait; -use std::sync::Arc; - -/// Kafka Sink Implementation -pub struct KafkaSink { - producer: rdkafka::producer::FutureProducer, - topic: String, - batch_size: usize, - batch_timeout_ms: u64, - entries: Arc>>, - last_flush: Arc, -} - -impl KafkaSink { - /// Create a new KafkaSink instance - pub fn new(producer: rdkafka::producer::FutureProducer, topic: String, batch_size: usize, batch_timeout_ms: u64) -> Self { - // Create Arc-wrapped values first - let entries = Arc::new(tokio::sync::Mutex::new(Vec::with_capacity(batch_size))); - let last_flush = Arc::new(std::sync::atomic::AtomicU64::new( - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_millis() as u64, - )); - let sink = KafkaSink { - producer: producer.clone(), - topic: topic.clone(), - batch_size, - batch_timeout_ms, - entries: entries.clone(), - last_flush: last_flush.clone(), - }; - - // Start background flusher - tokio::spawn(Self::periodic_flush(producer, topic, entries, last_flush, batch_timeout_ms)); - - sink - } - - /// Add a getter method to read the batch_timeout_ms field - #[allow(dead_code)] - pub fn batch_timeout(&self) -> u64 { - self.batch_timeout_ms - } - - /// Add a method to dynamically adjust the timeout if needed - #[allow(dead_code)] - pub fn set_batch_timeout(&mut self, new_timeout_ms: u64) { - self.batch_timeout_ms = new_timeout_ms; - } - - async fn periodic_flush( - producer: rdkafka::producer::FutureProducer, - topic: String, - entries: Arc>>, - last_flush: Arc, - timeout_ms: u64, - ) { - loop { - tokio::time::sleep(tokio::time::Duration::from_millis(timeout_ms / 2)).await; - - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_millis() as u64; - - let last = last_flush.load(std::sync::atomic::Ordering::Relaxed); - - if now - last >= timeout_ms { - let mut batch = entries.lock().await; - if !batch.is_empty() { - Self::send_batch(&producer, &topic, batch.drain(..).collect()).await; - last_flush.store(now, std::sync::atomic::Ordering::Relaxed); - } - } - } - } - - async fn send_batch(producer: &rdkafka::producer::FutureProducer, topic: &str, entries: Vec) { - for entry in entries { - let payload = match serde_json::to_string(&entry) { - Ok(p) => p, - Err(e) => { - eprintln!("Failed to serialize log entry: {e}"); - continue; - } - }; - - let span_id = entry.get_timestamp().to_rfc3339(); - - let _ = producer - .send( - rdkafka::producer::FutureRecord::to(topic).payload(&payload).key(&span_id), - std::time::Duration::from_secs(5), - ) - .await; - } - } -} - -#[async_trait] -impl Sink for KafkaSink { - async fn write(&self, entry: &UnifiedLogEntry) { - let mut batch = self.entries.lock().await; - batch.push(entry.clone()); - - let should_flush_by_size = batch.len() >= self.batch_size; - let should_flush_by_time = { - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_millis() as u64; - let last = self.last_flush.load(std::sync::atomic::Ordering::Relaxed); - now - last >= self.batch_timeout_ms - }; - - if should_flush_by_size || should_flush_by_time { - // Existing flush logic - let entries_to_send: Vec = batch.drain(..).collect(); - let producer = self.producer.clone(); - let topic = self.topic.clone(); - - self.last_flush.store( - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_millis() as u64, - std::sync::atomic::Ordering::Relaxed, - ); - - tokio::spawn(async move { - KafkaSink::send_batch(&producer, &topic, entries_to_send).await; - }); - } - } -} - -impl Drop for KafkaSink { - fn drop(&mut self) { - // Perform any necessary cleanup here - // For example, you might want to flush any remaining entries - let producer = self.producer.clone(); - let topic = self.topic.clone(); - let entries = self.entries.clone(); - let last_flush = self.last_flush.clone(); - - tokio::spawn(async move { - let mut batch = entries.lock().await; - if !batch.is_empty() { - KafkaSink::send_batch(&producer, &topic, batch.drain(..).collect()).await; - last_flush.store( - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_millis() as u64, - std::sync::atomic::Ordering::Relaxed, - ); - } - }); - - eprintln!("Dropping KafkaSink with topic: {0}", self.topic); - } -} diff --git a/crates/obs/src/sinks/mod.rs b/crates/obs/src/sinks/mod.rs deleted file mode 100644 index 71d84ac4..00000000 --- a/crates/obs/src/sinks/mod.rs +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{AppConfig, SinkConfig, UnifiedLogEntry}; -use async_trait::async_trait; -use std::sync::Arc; - -#[cfg(feature = "file")] -mod file; -#[cfg(all(feature = "kafka", target_os = "linux"))] -mod kafka; -#[cfg(feature = "webhook")] -mod webhook; - -/// Sink Trait definition, asynchronously write logs -#[async_trait] -pub trait Sink: Send + Sync { - async fn write(&self, entry: &UnifiedLogEntry); -} - -/// Create a list of Sink instances -pub async fn create_sinks(config: &AppConfig) -> Vec> { - let mut sinks: Vec> = Vec::new(); - - for sink_config in &config.sinks { - match sink_config { - #[cfg(all(feature = "kafka", target_os = "linux"))] - SinkConfig::Kafka(kafka_config) => { - match rdkafka::config::ClientConfig::new() - .set("bootstrap.servers", &kafka_config.brokers) - .set("message.timeout.ms", "5000") - .create() - { - Ok(producer) => { - sinks.push(Arc::new(kafka::KafkaSink::new( - producer, - kafka_config.topic.clone(), - kafka_config - .batch_size - .unwrap_or(rustfs_config::observability::DEFAULT_SINKS_KAFKA_BATCH_SIZE), - kafka_config - .batch_timeout_ms - .unwrap_or(rustfs_config::observability::DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS), - ))); - tracing::info!("Kafka sink created for topic: {}", kafka_config.topic); - } - Err(e) => { - tracing::error!("Failed to create Kafka producer: {}", e); - } - } - } - - #[cfg(feature = "webhook")] - SinkConfig::Webhook(webhook_config) => { - sinks.push(Arc::new(webhook::WebhookSink::new( - webhook_config.endpoint.clone(), - webhook_config.auth_token.clone(), - webhook_config - .max_retries - .unwrap_or(rustfs_config::observability::DEFAULT_SINKS_WEBHOOK_MAX_RETRIES), - webhook_config - .retry_delay_ms - .unwrap_or(rustfs_config::observability::DEFAULT_SINKS_WEBHOOK_RETRY_DELAY_MS), - ))); - tracing::info!("Webhook sink created for endpoint: {}", webhook_config.endpoint); - } - #[cfg(feature = "file")] - SinkConfig::File(file_config) => { - tracing::debug!("FileSink: Using path: {}", file_config.path); - match file::FileSink::new( - std::path::Path::new(&file_config.path) - .join(rustfs_config::DEFAULT_SINK_FILE_LOG_FILE) - .to_string_lossy() - .to_string(), - file_config - .buffer_size - .unwrap_or(rustfs_config::observability::DEFAULT_SINKS_FILE_BUFFER_SIZE), - file_config - .flush_interval_ms - .unwrap_or(rustfs_config::observability::DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS), - file_config - .flush_threshold - .unwrap_or(rustfs_config::observability::DEFAULT_SINKS_FILE_FLUSH_THRESHOLD), - ) - .await - { - Ok(sink) => { - sinks.push(Arc::new(sink)); - tracing::info!("File sink created for path: {}", file_config.path); - } - Err(e) => { - tracing::error!("Failed to create File sink: {}", e); - } - } - } - #[cfg(any(not(feature = "kafka"), not(target_os = "linux")))] - SinkConfig::Kafka(_) => { - tracing::warn!("Kafka sink is configured but the 'kafka' feature is not enabled"); - } - #[cfg(not(feature = "webhook"))] - SinkConfig::Webhook(_) => { - tracing::warn!("Webhook sink is configured but the 'webhook' feature is not enabled"); - } - #[cfg(not(feature = "file"))] - SinkConfig::File(_) => { - tracing::warn!("File sink is configured but the 'file' feature is not enabled"); - } - } - } - - sinks -} diff --git a/crates/obs/src/sinks/webhook.rs b/crates/obs/src/sinks/webhook.rs deleted file mode 100644 index 8350b016..00000000 --- a/crates/obs/src/sinks/webhook.rs +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::UnifiedLogEntry; -use crate::sinks::Sink; -use async_trait::async_trait; - -/// Webhook Sink Implementation -pub struct WebhookSink { - endpoint: String, - auth_token: String, - client: reqwest::Client, - max_retries: usize, - retry_delay_ms: u64, -} - -impl WebhookSink { - pub fn new(endpoint: String, auth_token: String, max_retries: usize, retry_delay_ms: u64) -> Self { - WebhookSink { - endpoint, - auth_token, - client: reqwest::Client::builder() - .timeout(std::time::Duration::from_secs(10)) - .build() - .unwrap_or_else(|_| reqwest::Client::new()), - max_retries, - retry_delay_ms, - } - } -} - -#[async_trait] -impl Sink for WebhookSink { - async fn write(&self, entry: &UnifiedLogEntry) { - let mut retries = 0; - let url = self.endpoint.clone(); - let entry_clone = entry.clone(); - let auth_value = reqwest::header::HeaderValue::from_str(format!("Bearer {}", self.auth_token.clone()).as_str()).unwrap(); - while retries < self.max_retries { - match self - .client - .post(&url) - .header(reqwest::header::AUTHORIZATION, auth_value.clone()) - .json(&entry_clone) - .send() - .await - { - Ok(response) if response.status().is_success() => { - return; - } - _ => { - retries += 1; - if retries < self.max_retries { - tokio::time::sleep(tokio::time::Duration::from_millis( - self.retry_delay_ms * (1 << retries), // Exponential backoff - )) - .await; - } - } - } - } - - eprintln!("Failed to send log to webhook after {0} retries", self.max_retries); - } -} - -impl Drop for WebhookSink { - fn drop(&mut self) { - // Perform any necessary cleanup here - // For example, you might want to log that the sink is being dropped - eprintln!("Dropping WebhookSink with URL: {0}", self.endpoint); - } -} diff --git a/crates/obs/src/telemetry.rs b/crates/obs/src/telemetry.rs index d31380fd..f8233ce8 100644 --- a/crates/obs/src/telemetry.rs +++ b/crates/obs/src/telemetry.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::OtelConfig; +use crate::config::OtelConfig; use flexi_logger::{ Age, Cleanup, Criterion, DeferredNow, FileSpec, LogSpecification, Naming, Record, WriteMode, WriteMode::{AsyncWith, BufferAndFlush}, @@ -63,7 +63,8 @@ use tracing_subscriber::{EnvFilter, Layer, layer::SubscriberExt, util::Subscribe /// - The tracer provider (for distributed tracing) /// - The meter provider (for metrics collection) /// - The logger provider (for structured logging) -// Implement Debug trait correctly, rather than using derive, as some fields may not have implemented Debug +/// +/// Implement Debug trait correctly, rather than using derive, as some fields may not have implemented Debug pub struct OtelGuard { tracer_provider: Option, meter_provider: Option, diff --git a/crates/obs/src/worker.rs b/crates/obs/src/worker.rs deleted file mode 100644 index 144ecd7c..00000000 --- a/crates/obs/src/worker.rs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2024 RustFS Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{UnifiedLogEntry, sinks::Sink}; -use std::sync::Arc; -use tokio::sync::mpsc::Receiver; - -/// Start the log processing worker thread -pub(crate) async fn start_worker(receiver: Receiver, sinks: Vec>) { - let mut receiver = receiver; - while let Some(entry) = receiver.recv().await { - for sink in &sinks { - sink.write(&entry).await; - } - } -} diff --git a/crates/utils/src/net.rs b/crates/utils/src/net.rs index 51f1220d..acb0725d 100644 --- a/crates/utils/src/net.rs +++ b/crates/utils/src/net.rs @@ -13,18 +13,14 @@ // limitations under the License. use bytes::Bytes; -use futures::pin_mut; -use futures::{Stream, StreamExt}; -use std::io::Error; +use futures::{Stream, StreamExt, pin_mut}; use std::{ collections::{HashMap, HashSet}, fmt::Display, - net::{IpAddr, SocketAddr, TcpListener, ToSocketAddrs}, - time::{Duration, Instant}, -}; -use std::{ - net::Ipv6Addr, + io::Error, + net::{IpAddr, Ipv6Addr, SocketAddr, TcpListener, ToSocketAddrs}, sync::{Arc, LazyLock, Mutex, RwLock}, + time::{Duration, Instant}, }; use tracing::{error, info}; use transform_stream::AsyncTryStream; diff --git a/rustfs/Cargo.toml b/rustfs/Cargo.toml index 790e9a29..f0e4436e 100644 --- a/rustfs/Cargo.toml +++ b/rustfs/Cargo.toml @@ -57,6 +57,7 @@ rustfs-protos = { workspace = true } rustfs-s3select-query = { workspace = true } rustfs-targets = { workspace = true } rustfs-kms = { workspace = true } +rustfs-lock.workspace = true atoi = { workspace = true } atomic_enum = { workspace = true } axum.workspace = true @@ -69,7 +70,9 @@ chrono = { workspace = true } clap = { workspace = true } datafusion = { workspace = true } const-str = { workspace = true } +flatbuffers.workspace = true futures.workspace = true +futures-util.workspace = true hyper.workspace = true hyper-util.workspace = true http.workspace = true @@ -80,6 +83,7 @@ mime_guess = { workspace = true } opentelemetry = { workspace = true } pin-project-lite.workspace = true reqwest = { workspace = true } +rmp-serde.workspace = true rustls = { workspace = true } rust-embed = { workspace = true, features = ["interpolate-folder-path"] } s3s.workspace = true @@ -116,10 +120,7 @@ url = { workspace = true } urlencoding = { workspace = true } uuid = { workspace = true } zip = { workspace = true } -futures-util.workspace = true -rmp-serde.workspace = true -flatbuffers.workspace = true -rustfs-lock.workspace = true + [target.'cfg(any(target_os = "macos", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))'.dependencies] sysctl = { workspace = true } @@ -129,7 +130,7 @@ sysctl = { workspace = true } libsystemd.workspace = true [target.'cfg(all(target_os = "linux", target_env = "gnu"))'.dependencies] -tikv-jemallocator = "0.6" +tikv-jemallocator = "0.6.1" [target.'cfg(all(target_os = "linux", target_env = "musl"))'.dependencies] mimalloc = "0.1" diff --git a/rustfs/src/main.rs b/rustfs/src/main.rs index 8d860ddd..8a418fa7 100644 --- a/rustfs/src/main.rs +++ b/rustfs/src/main.rs @@ -127,7 +127,7 @@ async fn async_main() -> Result<()> { init_license(opt.license.clone()); // Initialize Observability - let (_logger, guard) = init_obs(Some(opt.clone().obs_endpoint)).await; + let guard = init_obs(Some(opt.clone().obs_endpoint)).await; // print startup logo info!("{}", LOGO); diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 34eb95fc..759e37c6 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -117,7 +117,7 @@ use tokio::{io::AsyncRead, sync::mpsc}; use tokio_stream::wrappers::ReceiverStream; use tokio_tar::Archive; use tokio_util::io::{ReaderStream, StreamReader}; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, info, instrument, warn}; use uuid::Uuid; macro_rules! try_ { @@ -505,7 +505,7 @@ async fn get_validated_store(bucket: &str) -> S3Result) -> S3Result> { let CopyObjectInput { copy_source, @@ -709,7 +709,7 @@ impl S3 for FS { src_info.put_object_reader = Some(PutObjReader::new(reader)); // check quota - // TODO: src metadada + // TODO: src metadata for (k, v) in compress_metadata { src_info.user_defined.insert(k, v); @@ -923,7 +923,7 @@ impl S3 for FS { } /// Delete a bucket - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn delete_bucket(&self, req: S3Request) -> S3Result> { let input = req.input; // TODO: DeleteBucketInput doesn't have force parameter? @@ -945,7 +945,7 @@ impl S3 for FS { let event_args = rustfs_notify::event::EventArgs { event_name: EventName::BucketRemoved, bucket_name: input.bucket, - object: rustfs_ecstore::store_api::ObjectInfo { ..Default::default() }, + object: ObjectInfo { ..Default::default() }, req_params: rustfs_utils::extract_req_params_header(&req.headers), resp_elements: rustfs_utils::extract_resp_elements(&S3Response::new(DeleteBucketOutput {})), version_id: String::new(), @@ -962,7 +962,7 @@ impl S3 for FS { } /// Delete an object - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn delete_object(&self, mut req: S3Request) -> S3Result> { let DeleteObjectInput { bucket, key, version_id, .. @@ -1069,7 +1069,7 @@ impl S3 for FS { let event_args = rustfs_notify::event::EventArgs { event_name, bucket_name: bucket.clone(), - object: rustfs_ecstore::store_api::ObjectInfo { + object: ObjectInfo { name: key.clone(), bucket: bucket.clone(), ..Default::default() @@ -1090,7 +1090,7 @@ impl S3 for FS { } /// Delete multiple objects - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn delete_objects(&self, req: S3Request) -> S3Result> { let DeleteObjectsInput { bucket, delete, .. } = req.input; @@ -1329,7 +1329,7 @@ impl S3 for FS { let event_args = rustfs_notify::event::EventArgs { event_name, bucket_name: bucket.clone(), - object: rustfs_ecstore::store_api::ObjectInfo { + object: ObjectInfo { name: dobj.object_name, bucket: bucket.clone(), ..Default::default() @@ -1350,7 +1350,7 @@ impl S3 for FS { } /// Get bucket location - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn get_bucket_location(&self, req: S3Request) -> S3Result> { // mc get 1 let input = req.input; @@ -1375,7 +1375,7 @@ impl S3 for FS { } /// Get bucket notification - #[tracing::instrument( + #[instrument( level = "debug", skip(self, req), fields(start_time=?time::OffsetDateTime::now_utc()) @@ -1435,9 +1435,9 @@ impl S3 for FS { .map_err(ApiError::from)?; let info = reader.object_info; - tracing::debug!(object_size = info.size, part_count = info.parts.len(), "GET object metadata snapshot"); + debug!(object_size = info.size, part_count = info.parts.len(), "GET object metadata snapshot"); for part in &info.parts { - tracing::debug!( + debug!( part_number = part.number, part_size = part.size, part_actual_size = part.actual_size, @@ -1487,7 +1487,7 @@ impl S3 for FS { let mut managed_encryption_applied = false; let mut managed_original_size: Option = None; - tracing::debug!( + debug!( "GET object metadata check: stored_sse_algorithm={:?}, stored_sse_key_md5={:?}, provided_sse_key={:?}", stored_sse_algorithm, stored_sse_key_md5, @@ -1501,20 +1501,16 @@ impl S3 for FS { // Each part needs to be decrypted individually, which requires storage layer changes // Note: Single part objects also have info.parts.len() == 1, but they are not true multipart uploads if info.parts.len() > 1 { - tracing::warn!( + warn!( "SSE-C multipart object detected with {} parts. Currently, multipart SSE-C upload parts are not encrypted during upload_part, so no decryption is needed during GET.", info.parts.len() ); // Verify that the provided key MD5 matches the stored MD5 for security if let Some(stored_md5) = stored_sse_key_md5 { - tracing::debug!("SSE-C MD5 comparison: provided='{}', stored='{}'", sse_key_md5_provided, stored_md5); + debug!("SSE-C MD5 comparison: provided='{}', stored='{}'", sse_key_md5_provided, stored_md5); if sse_key_md5_provided != stored_md5 { - tracing::error!( - "SSE-C key MD5 mismatch: provided='{}', stored='{}'", - sse_key_md5_provided, - stored_md5 - ); + error!("SSE-C key MD5 mismatch: provided='{}', stored='{}'", sse_key_md5_provided, stored_md5); return Err( ApiError::from(StorageError::other("SSE-C key does not match object encryption key")).into() ); @@ -1532,13 +1528,9 @@ impl S3 for FS { } else { // Verify that the provided key MD5 matches the stored MD5 if let Some(stored_md5) = stored_sse_key_md5 { - tracing::debug!("SSE-C MD5 comparison: provided='{}', stored='{}'", sse_key_md5_provided, stored_md5); + debug!("SSE-C MD5 comparison: provided='{}', stored='{}'", sse_key_md5_provided, stored_md5); if sse_key_md5_provided != stored_md5 { - tracing::error!( - "SSE-C key MD5 mismatch: provided='{}', stored='{}'", - sse_key_md5_provided, - stored_md5 - ); + error!("SSE-C key MD5 mismatch: provided='{}', stored='{}'", sse_key_md5_provided, stored_md5); return Err( ApiError::from(StorageError::other("SSE-C key does not match object encryption key")).into() ); @@ -1613,14 +1605,13 @@ impl S3 for FS { let response_content_length = if stored_sse_algorithm.is_some() { if let Some(original_size_str) = info.user_defined.get("x-amz-server-side-encryption-customer-original-size") { let original_size = original_size_str.parse::().unwrap_or(content_length); - tracing::info!( + info!( "SSE-C decryption: using original size {} instead of encrypted size {}", - original_size, - content_length + original_size, content_length ); original_size } else { - tracing::debug!("SSE-C decryption: no original size found, using content_length {}", content_length); + debug!("SSE-C decryption: no original size found, using content_length {}", content_length); content_length } } else if managed_encryption_applied { @@ -1629,7 +1620,7 @@ impl S3 for FS { content_length }; - tracing::info!("Final response_content_length: {}", response_content_length); + info!("Final response_content_length: {}", response_content_length); if stored_sse_algorithm.is_some() || managed_encryption_applied { let limit_reader = HardLimitReader::new(Box::new(WarpReader::new(final_stream)), response_content_length); @@ -1639,7 +1630,7 @@ impl S3 for FS { // For SSE-C encrypted objects, don't use bytes_stream to limit the stream // because DecryptReader needs to read all encrypted data to produce decrypted output let body = if stored_sse_algorithm.is_some() || managed_encryption_applied { - tracing::info!("Managed SSE: Using unlimited stream for decryption"); + info!("Managed SSE: Using unlimited stream for decryption"); Some(StreamingBlob::wrap(ReaderStream::with_capacity(final_stream, DEFAULT_READ_BUFFER_SIZE))) } else { Some(StreamingBlob::wrap(bytes_stream( @@ -1702,7 +1693,7 @@ impl S3 for FS { Ok(S3Response::new(output)) } - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn head_bucket(&self, req: S3Request) -> S3Result> { let input = req.input; @@ -1719,7 +1710,7 @@ impl S3 for FS { Ok(S3Response::new(HeadBucketOutput::default())) } - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn head_object(&self, req: S3Request) -> S3Result> { // mc get 2 let HeadObjectInput { @@ -1838,7 +1829,7 @@ impl S3 for FS { Ok(S3Response::new(output)) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn list_buckets(&self, req: S3Request) -> S3Result> { // mc ls @@ -1893,7 +1884,7 @@ impl S3 for FS { Ok(S3Response::new(output)) } - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn list_objects(&self, req: S3Request) -> S3Result> { let v2_resp = self.list_objects_v2(req.map_input(Into::into)).await?; @@ -1909,7 +1900,7 @@ impl S3 for FS { })) } - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn list_objects_v2(&self, req: S3Request) -> S3Result> { // warn!("list_objects_v2 req {:?}", &req.input); let ListObjectsV2Input { @@ -2082,7 +2073,7 @@ impl S3 for FS { Ok(S3Response::new(output)) } - // #[tracing::instrument(level = "debug", skip(self, req))] + // #[instrument(level = "debug", skip(self, req))] async fn put_object(&self, req: S3Request) -> S3Result> { if req .headers @@ -2143,17 +2134,17 @@ impl S3 for FS { // TDD: Get bucket default encryption configuration let bucket_sse_config = metadata_sys::get_sse_config(&bucket).await.ok(); - tracing::debug!("TDD: bucket_sse_config={:?}", bucket_sse_config); + debug!("TDD: bucket_sse_config={:?}", bucket_sse_config); // TDD: Determine effective encryption configuration (request overrides bucket default) let original_sse = server_side_encryption.clone(); let effective_sse = server_side_encryption.or_else(|| { bucket_sse_config.as_ref().and_then(|(config, _timestamp)| { - tracing::debug!("TDD: Processing bucket SSE config: {:?}", config); + debug!("TDD: Processing bucket SSE config: {:?}", config); config.rules.first().and_then(|rule| { - tracing::debug!("TDD: Processing SSE rule: {:?}", rule); + debug!("TDD: Processing SSE rule: {:?}", rule); rule.apply_server_side_encryption_by_default.as_ref().map(|sse| { - tracing::debug!("TDD: Found SSE default: {:?}", sse); + debug!("TDD: Found SSE default: {:?}", sse); match sse.sse_algorithm.as_str() { "AES256" => ServerSideEncryption::from_static(ServerSideEncryption::AES256), "aws:kms" => ServerSideEncryption::from_static(ServerSideEncryption::AWS_KMS), @@ -2163,7 +2154,7 @@ impl S3 for FS { }) }) }); - tracing::debug!("TDD: effective_sse={:?} (original={:?})", effective_sse, original_sse); + debug!("TDD: effective_sse={:?} (original={:?})", effective_sse, original_sse); let mut effective_kms_key_id = ssekms_key_id.or_else(|| { bucket_sse_config.as_ref().and_then(|(config, _timestamp)| { @@ -2356,7 +2347,7 @@ impl S3 for FS { Ok(S3Response::new(output)) } - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn create_multipart_upload( &self, req: S3Request, @@ -2397,17 +2388,17 @@ impl S3 for FS { // TDD: Get bucket SSE configuration for multipart upload let bucket_sse_config = metadata_sys::get_sse_config(&bucket).await.ok(); - tracing::debug!("TDD: Got bucket SSE config for multipart: {:?}", bucket_sse_config); + debug!("TDD: Got bucket SSE config for multipart: {:?}", bucket_sse_config); // TDD: Determine effective encryption (request parameters override bucket defaults) let original_sse = server_side_encryption.clone(); let effective_sse = server_side_encryption.or_else(|| { bucket_sse_config.as_ref().and_then(|(config, _timestamp)| { - tracing::debug!("TDD: Processing bucket SSE config for multipart: {:?}", config); + debug!("TDD: Processing bucket SSE config for multipart: {:?}", config); config.rules.first().and_then(|rule| { - tracing::debug!("TDD: Processing SSE rule for multipart: {:?}", rule); + debug!("TDD: Processing SSE rule for multipart: {:?}", rule); rule.apply_server_side_encryption_by_default.as_ref().map(|sse| { - tracing::debug!("TDD: Found SSE default for multipart: {:?}", sse); + debug!("TDD: Found SSE default for multipart: {:?}", sse); match sse.sse_algorithm.as_str() { "AES256" => ServerSideEncryption::from_static(ServerSideEncryption::AES256), "aws:kms" => ServerSideEncryption::from_static(ServerSideEncryption::AWS_KMS), @@ -2417,7 +2408,7 @@ impl S3 for FS { }) }) }); - tracing::debug!("TDD: effective_sse for multipart={:?} (original={:?})", effective_sse, original_sse); + debug!("TDD: effective_sse for multipart={:?} (original={:?})", effective_sse, original_sse); let _original_kms_key_id = ssekms_key_id.clone(); let mut effective_kms_key_id = ssekms_key_id.or_else(|| { @@ -2496,7 +2487,7 @@ impl S3 for FS { let event_args = rustfs_notify::event::EventArgs { event_name: EventName::ObjectCreatedCompleteMultipartUpload, bucket_name: bucket_name.clone(), - object: rustfs_ecstore::store_api::ObjectInfo { + object: ObjectInfo { name: object_name, bucket: bucket_name, ..Default::default() @@ -2516,7 +2507,7 @@ impl S3 for FS { Ok(S3Response::new(output)) } - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn upload_part(&self, req: S3Request) -> S3Result> { let UploadPartInput { body, @@ -2582,7 +2573,7 @@ impl S3 for FS { .await? .is_some(); - // If managed encryption will be applied and we have Content-Length, buffer the entire body + // If managed encryption will be applied, and we have Content-Length, buffer the entire body // This is necessary because encryption changes the data size, which causes Content-Length mismatches if will_apply_managed_encryption && size.is_some() { let mut total = 0i64; @@ -2687,7 +2678,7 @@ impl S3 for FS { Ok(S3Response::new(output)) } - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn upload_part_copy(&self, req: S3Request) -> S3Result> { let UploadPartCopyInput { bucket, @@ -2880,7 +2871,7 @@ impl S3 for FS { Ok(S3Response::new(output)) } - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn list_parts(&self, req: S3Request) -> S3Result> { let ListPartsInput { bucket, @@ -3013,7 +3004,7 @@ impl S3 for FS { Ok(S3Response::new(output)) } - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn complete_multipart_upload( &self, req: S3Request, @@ -3044,26 +3035,24 @@ impl S3 for FS { }; // TDD: Get multipart info to extract encryption configuration before completing - tracing::info!( + info!( "TDD: Attempting to get multipart info for bucket={}, key={}, upload_id={}", - bucket, - key, - upload_id + bucket, key, upload_id ); let multipart_info = store .get_multipart_info(&bucket, &key, &upload_id, &ObjectOptions::default()) .await .map_err(ApiError::from)?; - tracing::info!("TDD: Got multipart info successfully"); - tracing::info!("TDD: Multipart info metadata: {:?}", multipart_info.user_defined); + info!("TDD: Got multipart info successfully"); + info!("TDD: Multipart info metadata: {:?}", multipart_info.user_defined); // TDD: Extract encryption information from multipart upload metadata let server_side_encryption = multipart_info .user_defined .get("x-amz-server-side-encryption") .map(|s| ServerSideEncryption::from(s.clone())); - tracing::info!( + info!( "TDD: Raw encryption from metadata: {:?} -> parsed: {:?}", multipart_info.user_defined.get("x-amz-server-side-encryption"), server_side_encryption @@ -3074,10 +3063,9 @@ impl S3 for FS { .get("x-amz-server-side-encryption-aws-kms-key-id") .cloned(); - tracing::info!( + info!( "TDD: Extracted encryption info - SSE: {:?}, KMS Key: {:?}", - server_side_encryption, - ssekms_key_id + server_side_encryption, ssekms_key_id ); let obj_info = store @@ -3086,10 +3074,9 @@ impl S3 for FS { .await .map_err(ApiError::from)?; - tracing::info!( + info!( "TDD: Creating output with SSE: {:?}, KMS Key: {:?}", - server_side_encryption, - ssekms_key_id + server_side_encryption, ssekms_key_id ); let output = CompleteMultipartUploadOutput { bucket: Some(bucket.clone()), @@ -3100,10 +3087,9 @@ impl S3 for FS { ssekms_key_id, // TDD: Return KMS key ID if present ..Default::default() }; - tracing::info!( + info!( "TDD: Created output: SSE={:?}, KMS={:?}", - output.server_side_encryption, - output.ssekms_key_id + output.server_side_encryption, output.ssekms_key_id ); let mt2 = HashMap::new(); @@ -3116,15 +3102,14 @@ impl S3 for FS { warn!("need multipart replication"); schedule_replication(obj_info, store, dsc, ReplicationType::Object).await; } - tracing::info!( + info!( "TDD: About to return S3Response with output: SSE={:?}, KMS={:?}", - output.server_side_encryption, - output.ssekms_key_id + output.server_side_encryption, output.ssekms_key_id ); Ok(S3Response::new(output)) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn abort_multipart_upload( &self, req: S3Request, @@ -3146,7 +3131,7 @@ impl S3 for FS { Ok(S3Response::new(AbortMultipartUploadOutput { ..Default::default() })) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn get_bucket_tagging(&self, req: S3Request) -> S3Result> { let bucket = req.input.bucket.clone(); // check bucket exists. @@ -3169,7 +3154,7 @@ impl S3 for FS { Ok(S3Response::new(GetBucketTaggingOutput { tag_set })) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn put_bucket_tagging(&self, req: S3Request) -> S3Result> { let PutBucketTaggingInput { bucket, tagging, .. } = req.input; @@ -3191,7 +3176,7 @@ impl S3 for FS { Ok(S3Response::new(Default::default())) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn delete_bucket_tagging( &self, req: S3Request, @@ -3205,7 +3190,7 @@ impl S3 for FS { Ok(S3Response::new(DeleteBucketTaggingOutput {})) } - #[tracing::instrument(level = "debug", skip(self, req))] + #[instrument(level = "debug", skip(self, req))] async fn put_object_tagging(&self, req: S3Request) -> S3Result> { let PutObjectTaggingInput { bucket, @@ -3270,7 +3255,7 @@ impl S3 for FS { let event_args = rustfs_notify::event::EventArgs { event_name: EventName::ObjectCreatedPutTagging, bucket_name: bucket.clone(), - object: rustfs_ecstore::store_api::ObjectInfo { + object: ObjectInfo { name: object.clone(), bucket, ..Default::default() @@ -3290,7 +3275,7 @@ impl S3 for FS { Ok(S3Response::new(PutObjectTaggingOutput { version_id: None })) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn get_object_tagging(&self, req: S3Request) -> S3Result> { let GetObjectTaggingInput { bucket, key: object, .. } = req.input; @@ -3312,7 +3297,7 @@ impl S3 for FS { })) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn delete_object_tagging( &self, req: S3Request, @@ -3337,7 +3322,7 @@ impl S3 for FS { let event_args = rustfs_notify::event::EventArgs { event_name: EventName::ObjectCreatedDeleteTagging, bucket_name: bucket.clone(), - object: rustfs_ecstore::store_api::ObjectInfo { + object: ObjectInfo { name: object.clone(), bucket, ..Default::default() @@ -3357,7 +3342,7 @@ impl S3 for FS { Ok(S3Response::new(DeleteObjectTaggingOutput { version_id: None })) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn get_bucket_versioning( &self, req: S3Request, @@ -3380,7 +3365,7 @@ impl S3 for FS { })) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn put_bucket_versioning( &self, req: S3Request, @@ -3537,7 +3522,7 @@ impl S3 for FS { Ok(S3Response::new(DeleteBucketPolicyOutput {})) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn get_bucket_lifecycle_configuration( &self, req: S3Request, @@ -3570,7 +3555,7 @@ impl S3 for FS { })) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn put_bucket_lifecycle_configuration( &self, req: S3Request, @@ -3604,7 +3589,7 @@ impl S3 for FS { Ok(S3Response::new(PutBucketLifecycleConfigurationOutput::default())) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn delete_bucket_lifecycle( &self, req: S3Request, @@ -3709,7 +3694,7 @@ impl S3 for FS { Ok(S3Response::new(DeleteBucketEncryptionOutput::default())) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn get_object_lock_configuration( &self, req: S3Request, @@ -3731,7 +3716,7 @@ impl S3 for FS { })) } - #[tracing::instrument(level = "debug", skip(self))] + #[instrument(level = "debug", skip(self))] async fn put_object_lock_configuration( &self, req: S3Request, @@ -3777,7 +3762,7 @@ impl S3 for FS { .await .map_err(ApiError::from)?; - let rcfg = match metadata_sys::get_replication_config(&bucket).await { + let rcfg = match get_replication_config(&bucket).await { Ok((cfg, _created)) => Some(cfg), Err(err) => { error!("get_replication_config err {:?}", err); @@ -4107,7 +4092,7 @@ impl S3 for FS { let event_args = rustfs_notify::event::EventArgs { event_name: EventName::ObjectAccessedAttributes, bucket_name: bucket.clone(), - object: rustfs_ecstore::store_api::ObjectInfo { + object: ObjectInfo { name: key.clone(), bucket, ..Default::default() diff --git a/scripts/run.sh b/scripts/run.sh index f2a98634..eb1a01fa 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -68,11 +68,6 @@ export RUSTFS_OBS_LOG_POOL_CAPA=10240 export RUSTFS_OBS_LOG_MESSAGE_CAPA=32768 export RUSTFS_OBS_LOG_FLUSH_MS=300 -export RUSTFS_SINKS_FILE_PATH="$current_dir/deploy/logs" -export RUSTFS_SINKS_FILE_BUFFER_SIZE=12 -export RUSTFS_SINKS_FILE_FLUSH_INTERVAL_MS=1000 -export RUSTFS_SINKS_FILE_FLUSH_THRESHOLD=100 - #tokio runtime export RUSTFS_RUNTIME_WORKER_THREADS=16 export RUSTFS_RUNTIME_MAX_BLOCKING_THREADS=1024 @@ -82,21 +77,6 @@ export RUSTFS_RUNTIME_THREAD_STACK_SIZE=1024*1024 export RUSTFS_RUNTIME_THREAD_KEEP_ALIVE=60 export RUSTFS_RUNTIME_GLOBAL_QUEUE_INTERVAL=31 -# -# Kafka sink 配置 -#export RUSTFS_SINKS_KAFKA_BROKERS=localhost:9092 -#export RUSTFS_SINKS_KAFKA_TOPIC=logs -#export RUSTFS_SINKS_KAFKA_BATCH_SIZE=100 -#export RUSTFS_SINKS_KAFKA_BATCH_TIMEOUT_MS=1000 -# -# Webhook sink 配置 -#export RUSTFS_SINKS_WEBHOOK_ENDPOINT=http://localhost:8080/webhook -#export RUSTFS_SINKS_WEBHOOK_AUTH_TOKEN=you-auth-token -#export RUSTFS_SINKS_WEBHOOK_BATCH_SIZE=100 -#export RUSTFS_SINKS_WEBHOOK_BATCH_TIMEOUT_MS=1000 -# -#export RUSTFS_LOGGER_QUEUE_CAPACITY=10 - export OTEL_INSTRUMENTATION_NAME="rustfs" export OTEL_INSTRUMENTATION_VERSION="0.1.1" export OTEL_INSTRUMENTATION_SCHEMA_URL="https://opentelemetry.io/schemas/1.31.0"