diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 50cdb35d..c914e2f0 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -26,6 +26,9 @@ updates: day: "monday" timezone: "Asia/Shanghai" time: "08:00" + ignore: + - dependency-name: "object_store" + versions: [ "0.13.x" ] groups: s3s: update-types: @@ -36,4 +39,4 @@ updates: - "s3s-*" dependencies: patterns: - - "*" + - "*" \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 722b5c2b..4985e2fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -231,15 +231,6 @@ dependencies = [ "object 0.32.2", ] -[[package]] -name = "arbitrary" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" -dependencies = [ - "derive_arbitrary", -] - [[package]] name = "arc-swap" version = "1.8.0" @@ -577,20 +568,14 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.19" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06575e6a9673580f52661c92107baabffbf41e2141373441cbcdc47cb733003c" +checksum = "d10e4f991a553474232bc0a31799f6d24b034a84c0971d80d2e2f78b2e576e40" dependencies = [ - "brotli 7.0.0", - "bzip2 0.5.2", - "flate2", - "futures-core", - "memchr", + "compression-codecs", + "compression-core", "pin-project-lite", "tokio", - "xz2", - "zstd", - "zstd-safe", ] [[package]] @@ -702,9 +687,9 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.15.2" +version = "1.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88aab2464f1f25453baa7a07c84c5b7684e274054ba06817f382357f77a288" +checksum = "e84ce723ab67259cfeb9877c6a639ee9eb7a27b28123abd71db7f0d5d0cc9d86" dependencies = [ "aws-lc-sys", "untrusted 0.7.1", @@ -713,9 +698,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45afffdee1e7c9126814751f88dddc747f41d91da16c9551a0f1e8a11e788a1" +checksum = "43a442ece363113bd4bd4c8b18977a7798dd4d3c3383f34fb61936960e8f4ad8" dependencies = [ "cc", "cmake", @@ -890,9 +875,9 @@ dependencies = [ [[package]] name = "aws-smithy-checksums" -version = "0.63.12" +version = "0.63.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87294a084b43d649d967efe58aa1f9e0adc260e13a6938eb904c0ae9b45824ae" +checksum = "23374b9170cbbcc6f5df8dc5ebb9b6c5c28a3c8f599f0e8b8b10eb6f4a5c6e74" dependencies = [ "aws-smithy-http", "aws-smithy-types", @@ -976,9 +961,9 @@ dependencies = [ [[package]] name = "aws-smithy-observability" -version = "0.1.5" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f616c3f2260612fe44cede278bafa18e73e6479c4e393e2c4518cf2a9a228a" +checksum = "ef1fcbefc7ece1d70dcce29e490f269695dfca2d2bacdeaf9e5c3f799e4e6a42" dependencies = [ "aws-smithy-runtime-api", ] @@ -995,9 +980,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.9.5" +version = "1.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a392db6c583ea4a912538afb86b7be7c5d8887d91604f50eb55c262ee1b4a5f5" +checksum = "bb5b6167fcdf47399024e81ac08e795180c576a20e4d4ce67949f9a88ae37dc1" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -1019,9 +1004,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.9.3" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab0d43d899f9e508300e587bf582ba54c27a452dd0a9ea294690669138ae14a2" +checksum = "efce7aaaf59ad53c5412f14fc19b2d5c6ab2c3ec688d272fd31f76ec12f44fb0" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -1036,9 +1021,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.3.5" +version = "1.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "905cb13a9895626d49cf2ced759b062d913834c7482c38e49557eac4e6193f01" +checksum = "65f172bcb02424eb94425db8aed1b6d583b5104d4d5ddddf22402c661a320048" dependencies = [ "base64-simd", "bytes", @@ -1208,9 +1193,9 @@ dependencies = [ [[package]] name = "base64ct" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d809780667f4410e7c41b07f52439b94d2bdf8528eeedc287fa38d3b7f95d82" +checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" [[package]] name = "bcrypt-pbkdf" @@ -1332,42 +1317,6 @@ dependencies = [ "cipher 0.4.4", ] -[[package]] -name = "bon" -version = "3.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234655ec178edd82b891e262ea7cf71f6584bcd09eff94db786be23f1821825c" -dependencies = [ - "bon-macros", - "rustversion", -] - -[[package]] -name = "bon-macros" -version = "3.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ec27229c38ed0eb3c0feee3d2c1d6a4379ae44f418a29a658890e062d8f365" -dependencies = [ - "darling 0.23.0", - "ident_case", - "prettyplease", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.114", -] - -[[package]] -name = "brotli" -version = "7.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc97b8f16f944bba54f0433f07e30be199b6dc2bd25937444bbad560bcea29bd" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor 4.0.3", -] - [[package]] name = "brotli" version = "8.0.2" @@ -1376,17 +1325,7 @@ checksum = "4bd8b9603c7aa97359dbd97ecf258968c95f3adddd6db2f7e7a5bef101c84560" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", - "brotli-decompressor 5.0.0", -] - -[[package]] -name = "brotli-decompressor" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a334ef7c9e23abf0ce748e8cd309037da93e606ad52eb372e4ce327a0dcfbdfd" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", + "brotli-decompressor", ] [[package]] @@ -1451,15 +1390,6 @@ dependencies = [ "bytes", ] -[[package]] -name = "bzip2" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49ecfb22d906f800d4fe833b6282cf4dc1c298f5057ca0b5445e5c209735ca47" -dependencies = [ - "bzip2-sys", -] - [[package]] name = "bzip2" version = "0.6.1" @@ -1469,16 +1399,6 @@ dependencies = [ "libbz2-rs-sys", ] -[[package]] -name = "bzip2-sys" -version = "0.1.13+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" -dependencies = [ - "cc", - "pkg-config", -] - [[package]] name = "camino" version = "1.2.2" @@ -1589,9 +1509,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ "iana-time-zone", "js-sys", @@ -1696,9 +1616,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" [[package]] name = "cmake" @@ -1711,9 +1631,9 @@ dependencies = [ [[package]] name = "cmov" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c11ed919bd3bae4af5ab56372b627dfc32622aba6cec36906e8ab46746037c9d" +checksum = "360a5d5b750cd7fb97d5ead6e6e0ef0b288d3c2464a189f04f38670e268842ed" [[package]] name = "colorchoice" @@ -1723,14 +1643,36 @@ checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "comfy-table" -version = "7.2.1" +version = "7.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03b7db8e0b4b2fdad6c551e634134e99ec000e5c8c3b6856c65e8bbaded7a3b" +checksum = "958c5d6ecf1f214b4c2bbbbf6ab9523a864bd136dcf71a7e8904799acfe1ad47" dependencies = [ "unicode-segmentation", "unicode-width", ] +[[package]] +name = "compression-codecs" +version = "0.4.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00828ba6fd27b45a448e57dbfe84f1029d4c9f26b368157e9a448a5f49a2ec2a" +dependencies = [ + "brotli", + "bzip2", + "compression-core", + "flate2", + "liblzma", + "memchr", + "zstd", + "zstd-safe", +] + +[[package]] +name = "compression-core" +version = "0.4.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d" + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -1905,15 +1847,14 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc-fast" -version = "1.6.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ddc2d09feefeee8bd78101665bd8645637828fa9317f9f292496dbbd8c65ff3" +checksum = "2fd92aca2c6001b1bf5ba0ff84ee74ec8501b52bbef0cac80bf25a6c1d87a83d" dependencies = [ "crc", "digest 0.10.7", - "rand 0.9.2", - "regex", "rustversion", + "spin 0.10.0", ] [[package]] @@ -2044,9 +1985,9 @@ dependencies = [ [[package]] name = "crypto-bigint" -version = "0.7.0-rc.15" +version = "0.7.0-rc.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a9e36ac79ac44866b74e08a0b4925f97b984e3fff17680d2c6fbce8317ab0f6" +checksum = "37387ceb32048ff590f2cbd24d8b05fffe63c3f69a5cfa089d4f722ca4385a19" dependencies = [ "ctutils", "num-traits", @@ -2078,11 +2019,11 @@ dependencies = [ [[package]] name = "crypto-primes" -version = "0.7.0-pre.5" +version = "0.7.0-pre.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0b07a7a616370e8b6efca0c6a25e5f4c6d02fde11f3d570e4af64d8ed7e2e9" +checksum = "e79c98a281f9441200b24e3151407a629bfbe720399186e50516da939195e482" dependencies = [ - "crypto-bigint 0.7.0-rc.15", + "crypto-bigint 0.7.0-rc.18", "libm", "rand_core 0.10.0-rc-3", ] @@ -2153,9 +2094,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "5.0.0-pre.2" +version = "5.0.0-pre.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d8cfa313d59919eda35b420bd37db85bf58d6754d6f128b9949932b0c0fcce7" +checksum = "6ae8b2fe5e4995d7fd08a7604e794dc569a65ed19659f5939d529813ed816d38" dependencies = [ "cfg-if", "cpufeatures", @@ -2338,15 +2279,15 @@ checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" [[package]] name = "datafusion" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ba7cb113e9c0bedf9e9765926031e132fa05a1b09ba6e93a6d1a4d7044457b8" +checksum = "f02e9a7e70f214e5282db11c8effba173f4e25a00977e520c6b811817e3a082b" dependencies = [ "arrow", "arrow-schema", "async-trait", "bytes", - "bzip2 0.6.1", + "bzip2", "chrono", "datafusion-catalog", "datafusion-catalog-listing", @@ -2376,27 +2317,26 @@ dependencies = [ "flate2", "futures", "itertools 0.14.0", + "liblzma", "log", "object_store", "parking_lot", "parquet", "rand 0.9.2", "regex", - "rstest", "sqlparser", "tempfile", "tokio", "url", "uuid", - "xz2", "zstd", ] [[package]] name = "datafusion-catalog" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a3a799f914a59b1ea343906a0486f17061f39509af74e874a866428951130d" +checksum = "f3e91b2603f906cf8cb8be84ba4e34f9d8fe6dbdfdd6916d55f22317074d1fdf" dependencies = [ "arrow", "async-trait", @@ -2419,9 +2359,9 @@ dependencies = [ [[package]] name = "datafusion-catalog-listing" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db1b113c80d7a0febcd901476a57aef378e717c54517a163ed51417d87621b0" +checksum = "919d20cdebddee4d8dca651aa0291a44c8104824d1ac288996a325c319ce31ba" dependencies = [ "arrow", "async-trait", @@ -2438,21 +2378,20 @@ dependencies = [ "itertools 0.14.0", "log", "object_store", - "tokio", ] [[package]] name = "datafusion-common" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c10f7659e96127d25e8366be7c8be4109595d6a2c3eac70421f380a7006a1b0" +checksum = "31ff2c4e95be40ad954de93862167b165a6fb49248bb882dea8aef4f888bc767" dependencies = [ "ahash", "arrow", "arrow-ipc", "chrono", "half", - "hashbrown 0.14.5", + "hashbrown 0.16.1", "indexmap 2.13.0", "libc", "log", @@ -2467,9 +2406,9 @@ dependencies = [ [[package]] name = "datafusion-common-runtime" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b92065bbc6532c6651e2f7dd30b55cba0c7a14f860c7e1d15f165c41a1868d95" +checksum = "0dd9f820fe58c2600b6c33a14432228dbaaf233b96c83a1fd61f16d073d5c3c5" dependencies = [ "futures", "log", @@ -2478,15 +2417,15 @@ dependencies = [ [[package]] name = "datafusion-datasource" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fde13794244bc7581cd82f6fff217068ed79cdc344cafe4ab2c3a1c3510b38d6" +checksum = "86b32b7b12645805d20b70aba6ba846cd262d7b073f7f617640c3294af108d44" dependencies = [ "arrow", "async-compression", "async-trait", "bytes", - "bzip2 0.6.1", + "bzip2", "chrono", "datafusion-common", "datafusion-common-runtime", @@ -2501,21 +2440,21 @@ dependencies = [ "futures", "glob", "itertools 0.14.0", + "liblzma", "log", "object_store", "rand 0.9.2", "tokio", "tokio-util", "url", - "xz2", "zstd", ] [[package]] name = "datafusion-datasource-arrow" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804fa9b4ecf3157982021770617200ef7c1b2979d57bec9044748314775a9aea" +checksum = "597695c8ebb723ee927b286139d43a3fbed6de7ad9210bd1a9fed5c721ac6fb1" dependencies = [ "arrow", "arrow-ipc", @@ -2537,9 +2476,9 @@ dependencies = [ [[package]] name = "datafusion-datasource-csv" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a1641a40b259bab38131c5e6f48fac0717bedb7dc93690e604142a849e0568" +checksum = "6bb493d07d8da6d00a89ea9cc3e74a56795076d9faed5ac30284bd9ef37929e9" dependencies = [ "arrow", "async-trait", @@ -2560,9 +2499,9 @@ dependencies = [ [[package]] name = "datafusion-datasource-json" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adeacdb00c1d37271176f8fb6a1d8ce096baba16ea7a4b2671840c5c9c64fe85" +checksum = "5e9806521c4d3632f53b9a664041813c267c670232efa1452ef29faee71c3749" dependencies = [ "arrow", "async-trait", @@ -2582,9 +2521,9 @@ dependencies = [ [[package]] name = "datafusion-datasource-parquet" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43d0b60ffd66f28bfb026565d62b0a6cbc416da09814766a3797bba7d85a3cd9" +checksum = "f6a3ccd48d5034f8461f522114d0e46dfb3a9f0ce01a4d53a721024ace95d60d" dependencies = [ "arrow", "async-trait", @@ -2612,18 +2551,19 @@ dependencies = [ [[package]] name = "datafusion-doc" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b99e13947667b36ad713549237362afb054b2d8f8cc447751e23ec61202db07" +checksum = "ff69a18418e9878d4840f35e2ad7f2a6386beedf192e9f065e628a7295ff5fbf" [[package]] name = "datafusion-execution" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63695643190679037bc946ad46a263b62016931547bf119859c511f7ff2f5178" +checksum = "ccbc5e469b35d87c0b115327be83d68356ef9154684d32566315b5c071577e23" dependencies = [ "arrow", "async-trait", + "chrono", "dashmap", "datafusion-common", "datafusion-expr", @@ -2638,9 +2578,9 @@ dependencies = [ [[package]] name = "datafusion-expr" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a4787cbf5feb1ab351f789063398f67654a6df75c4d37d7f637dc96f951a91" +checksum = "81ed3c02a3faf4e09356d5a314471703f440f0a6a14ca6addaf6cfb44ab14de5" dependencies = [ "arrow", "async-trait", @@ -2661,9 +2601,9 @@ dependencies = [ [[package]] name = "datafusion-expr-common" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce2fb1b8c15c9ac45b0863c30b268c69dc9ee7a1ee13ecf5d067738338173dc" +checksum = "1567e60d21c372ca766dc9dde98efabe2b06d98f008d988fed00d93546bf5be7" dependencies = [ "arrow", "datafusion-common", @@ -2674,9 +2614,9 @@ dependencies = [ [[package]] name = "datafusion-functions" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "794a9db7f7b96b3346fc007ff25e994f09b8f0511b4cf7dff651fadfe3ebb28f" +checksum = "c4593538abd95c27eeeb2f86b7ad827cce07d0c474eae9b122f4f9675f8c20ad" dependencies = [ "arrow", "arrow-buffer", @@ -2684,6 +2624,7 @@ dependencies = [ "blake2 0.10.6", "blake3", "chrono", + "chrono-tz", "datafusion-common", "datafusion-doc", "datafusion-execution", @@ -2704,9 +2645,9 @@ dependencies = [ [[package]] name = "datafusion-functions-aggregate" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c25210520a9dcf9c2b2cbbce31ebd4131ef5af7fc60ee92b266dc7d159cb305" +checksum = "f81cdf609f43cd26156934fd81beb7215d60dda40a776c2e1b83d73df69434f2" dependencies = [ "ahash", "arrow", @@ -2725,9 +2666,9 @@ dependencies = [ [[package]] name = "datafusion-functions-aggregate-common" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f4a66f3b87300bb70f4124b55434d2ae3fe80455f3574701d0348da040b55d" +checksum = "9173f1bcea2ede4a5c23630a48469f06c9db9a408eb5fd140d1ff9a5e0c40ebf" dependencies = [ "ahash", "arrow", @@ -2738,9 +2679,9 @@ dependencies = [ [[package]] name = "datafusion-functions-nested" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae5c06eed03918dc7fe7a9f082a284050f0e9ecf95d72f57712d1496da03b8c4" +checksum = "1d0b9f32e7735a3b94ae8b9596d89080dc63dd139029a91133be370da099490d" dependencies = [ "arrow", "arrow-ord", @@ -2761,9 +2702,9 @@ dependencies = [ [[package]] name = "datafusion-functions-table" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4fed1d71738fbe22e2712d71396db04c25de4111f1ec252b8f4c6d3b25d7f5" +checksum = "57a29e8a6201b3b9fb2be17d88e287c6d427948d64220cd5ea72ced614a1aee5" dependencies = [ "arrow", "async-trait", @@ -2777,9 +2718,9 @@ dependencies = [ [[package]] name = "datafusion-functions-window" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d92206aa5ae21892f1552b4d61758a862a70956e6fd7a95cb85db1de74bc6d1" +checksum = "cd412754964a31c515e5a814e5ce0edaf30f0ea975f3691e800eff115ee76dfb" dependencies = [ "arrow", "datafusion-common", @@ -2795,9 +2736,9 @@ dependencies = [ [[package]] name = "datafusion-functions-window-common" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53ae9bcc39800820d53a22d758b3b8726ff84a5a3e24cecef04ef4e5fdf1c7cc" +checksum = "d49be73a5ac0797398927a543118bd68e58e80bf95ebdabc77336bcd9c38a711" dependencies = [ "datafusion-common", "datafusion-physical-expr-common", @@ -2805,9 +2746,9 @@ dependencies = [ [[package]] name = "datafusion-macros" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1063ad4c9e094b3f798acee16d9a47bd7372d9699be2de21b05c3bd3f34ab848" +checksum = "439ff5489dcac4d34ed7a49a93310c3345018c4469e34726fa471cdda725346d" dependencies = [ "datafusion-doc", "quote", @@ -2816,9 +2757,9 @@ dependencies = [ [[package]] name = "datafusion-optimizer" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35f9ec5d08b87fd1893a30c2929f2559c2f9806ca072d8fefca5009dc0f06a" +checksum = "a80bb7de8ff5a9948799bc7749c292eac5c629385cdb582893ef2d80b6e718c4" dependencies = [ "arrow", "chrono", @@ -2836,9 +2777,9 @@ dependencies = [ [[package]] name = "datafusion-physical-expr" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c30cc8012e9eedcb48bbe112c6eff4ae5ed19cf3003cb0f505662e88b7014c5d" +checksum = "83480008f66691a0047c5a88990bd76b7c1117dd8a49ca79959e214948b81f0a" dependencies = [ "ahash", "arrow", @@ -2848,19 +2789,21 @@ dependencies = [ "datafusion-functions-aggregate-common", "datafusion-physical-expr-common", "half", - "hashbrown 0.14.5", + "hashbrown 0.16.1", "indexmap 2.13.0", "itertools 0.14.0", "parking_lot", "paste", "petgraph", + "recursive", + "tokio", ] [[package]] name = "datafusion-physical-expr-adapter" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9ff2dbd476221b1f67337699eff432781c4e6e1713d2aefdaa517dfbf79768" +checksum = "6b438306446646b359666a658cc29d5494b1e9873bc7a57707689760666fc82c" dependencies = [ "arrow", "datafusion-common", @@ -2873,23 +2816,26 @@ dependencies = [ [[package]] name = "datafusion-physical-expr-common" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90da43e1ec550b172f34c87ec68161986ced70fd05c8d2a2add66eef9c276f03" +checksum = "95b1fbf739038e0b313473588331c5bf79985d1b842b9937c1f10b170665cae1" dependencies = [ "ahash", "arrow", + "chrono", "datafusion-common", "datafusion-expr-common", - "hashbrown 0.14.5", + "hashbrown 0.16.1", + "indexmap 2.13.0", "itertools 0.14.0", + "parking_lot", ] [[package]] name = "datafusion-physical-optimizer" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce9804f799acd7daef3be7aaffe77c0033768ed8fdbf5fb82fc4c5f2e6bc14e6" +checksum = "fc4cd3a170faa0f1de04bd4365ccfe309056746dd802ed276e8787ccb8e8a0d4" dependencies = [ "arrow", "datafusion-common", @@ -2906,27 +2852,27 @@ dependencies = [ [[package]] name = "datafusion-physical-plan" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0acf0ad6b6924c6b1aa7d213b181e012e2d3ec0a64ff5b10ee6282ab0f8532ac" +checksum = "a616a72b4ddf550652b36d5a7c0386eac4accea3ffc6c29a7b16c45f237e9882" dependencies = [ "ahash", "arrow", "arrow-ord", "arrow-schema", "async-trait", - "chrono", "datafusion-common", "datafusion-common-runtime", "datafusion-execution", "datafusion-expr", + "datafusion-functions", "datafusion-functions-aggregate-common", "datafusion-functions-window-common", "datafusion-physical-expr", "datafusion-physical-expr-common", "futures", "half", - "hashbrown 0.14.5", + "hashbrown 0.16.1", "indexmap 2.13.0", "itertools 0.14.0", "log", @@ -2937,9 +2883,9 @@ dependencies = [ [[package]] name = "datafusion-pruning" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac2c2498a1f134a9e11a9f5ed202a2a7d7e9774bd9249295593053ea3be999db" +checksum = "4bf4b50be3ab65650452993eda4baf81edb245fb039b8714476b0f4c8801a527" dependencies = [ "arrow", "datafusion-common", @@ -2954,9 +2900,9 @@ dependencies = [ [[package]] name = "datafusion-session" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f96eebd17555386f459037c65ab73aae8df09f464524c709d6a3134ad4f4776" +checksum = "66e080e2c105284460580c18e751b2133cc306df298181e4349b5b134632811a" dependencies = [ "async-trait", "datafusion-common", @@ -2968,9 +2914,9 @@ dependencies = [ [[package]] name = "datafusion-sql" -version = "51.0.0" +version = "52.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fc195fe60634b2c6ccfd131b487de46dc30eccae8a3c35a13f136e7f440414f" +checksum = "3dac502db772ff9bffc2ceae321963091982e8d5f5dfcb877e8dc66fc9a093cc" dependencies = [ "arrow", "bigdecimal", @@ -3066,17 +3012,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "derive_arbitrary" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.114", -] - [[package]] name = "derive_builder" version = "0.12.0" @@ -3360,11 +3295,11 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "3.0.0-pre.2" +version = "3.0.0-pre.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbef01b6e6a5f913ae480bb34ddd798ce6d358054bebf77177200ec84af61ad5" +checksum = "a4b9f613e0c236c699bf70d39f825594d9b03aadfd8dd856ea40685f782a4ef2" dependencies = [ - "curve25519-dalek 5.0.0-pre.2", + "curve25519-dalek 5.0.0-pre.4", "ed25519 3.0.0-rc.2", "sha2 0.11.0-rc.3", "subtle", @@ -3651,13 +3586,13 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.5" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" dependencies = [ "crc32fast", - "libz-rs-sys", "miniz_oxide", + "zlib-rs", ] [[package]] @@ -3842,12 +3777,6 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" -[[package]] -name = "futures-timer" -version = "3.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" - [[package]] name = "futures-util" version = "0.3.31" @@ -3973,20 +3902,19 @@ checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "google-cloud-auth" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "590a1c28795779d5da6fda35b149d5271bcddcf2ce1709eae9e9460faf2f2aa9" +checksum = "34f8aadacd3195fc3b08f2a5d582f2401c60d9f1598574acfcfb6228de25db29" dependencies = [ "async-trait", "base64", - "bon", "bytes", "google-cloud-gax", "http 1.4.0", "reqwest", "rustc_version", "rustls", - "rustls-pemfile", + "rustls-pki-types", "serde", "serde_json", "thiserror 2.0.17", @@ -3996,9 +3924,9 @@ dependencies = [ [[package]] name = "google-cloud-gax" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324fb97d35103787e80a33ed41ccc43d947c376d2ece68ca53e860f5844dbe24" +checksum = "b218292363f2e2d6ab8d6da4118acf91cc044439c442d2d6809b581e0728b377" dependencies = [ "base64", "bytes", @@ -4016,9 +3944,9 @@ dependencies = [ [[package]] name = "google-cloud-gax-internal" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75b810886ae872aca68a35ad1d4d5e8f2be39e40238116d8aff9d778f04b38" +checksum = "78125fa0347492177131d30c010e57ddce9bba1504c33be135f5853a9105c277" dependencies = [ "bytes", "futures", @@ -4050,9 +3978,9 @@ dependencies = [ [[package]] name = "google-cloud-iam-v1" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "498a68e2a958e8aa9938f7db2c7147aad1b5a0ff2cd47c5ba4e10cb0dcb5bfc5" +checksum = "f84b431125034e0928e41e8c117bcbc40b0b55b55464b2e964b26e1ffcb15323" dependencies = [ "async-trait", "bytes", @@ -4070,9 +3998,9 @@ dependencies = [ [[package]] name = "google-cloud-longrunning" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c80938e704401a47fdf36b51ec10e1a99b1ec22793d607afd0e67c7b675b8b3" +checksum = "5d0612f4062f42b141b4d050d1a8a2f860e907a548bde28cb82d4fdf0eb346a3" dependencies = [ "async-trait", "bytes", @@ -4117,13 +4045,14 @@ dependencies = [ [[package]] name = "google-cloud-storage" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043be824d1b105bfdce786c720e45cae04e66436f8e5d0168e98ca8e5715ce9f" +checksum = "6abde5d51a4728f47b8f7781d7bf86ab51e310b42ec7c7c96578f1d03da938e4" dependencies = [ "async-trait", "base64", "bytes", + "chrono", "crc32c", "futures", "google-cloud-auth", @@ -4135,6 +4064,7 @@ dependencies = [ "google-cloud-rpc", "google-cloud-type", "google-cloud-wkt", + "hex", "http 1.4.0", "http-body 1.0.1", "hyper", @@ -4155,6 +4085,7 @@ dependencies = [ "tokio-stream", "tonic", "tracing", + "url", "uuid", ] @@ -4260,10 +4191,6 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" -dependencies = [ - "ahash", - "allocator-api2", -] [[package]] name = "hashbrown" @@ -4987,9 +4914,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", @@ -5209,6 +5136,26 @@ dependencies = [ "windows-link 0.2.1", ] +[[package]] +name = "liblzma" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73c36d08cad03a3fbe2c4e7bb3a9e84c57e4ee4135ed0b065cade3d98480c648" +dependencies = [ + "liblzma-sys", +] + +[[package]] +name = "liblzma-sys" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01b9596486f6d60c3bbe644c0e1be1aa6ccc472ad630fe8927b456973d7cb736" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + [[package]] name = "libm" version = "0.2.15" @@ -5289,15 +5236,6 @@ dependencies = [ "x509-parser 0.17.0", ] -[[package]] -name = "libz-rs-sys" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c10501e7805cee23da17c7790e59df2870c0d4043ec6d03f67d31e2b53e77415" -dependencies = [ - "zlib-rs", -] - [[package]] name = "linux-raw-sys" version = "0.4.15" @@ -5329,13 +5267,12 @@ dependencies = [ [[package]] name = "local-ip-address" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a60bf300a990b2d1ebdde4228e873e8e4da40d834adbf5265f3da1457ede652" +checksum = "92488bc8a0f99ee9f23577bdd06526d49657df8bd70504c61f812337cdad01ab" dependencies = [ "libc", "neli", - "thiserror 2.0.17", "windows-sys 0.61.2", ] @@ -5403,25 +5340,14 @@ dependencies = [ [[package]] name = "lzma-rust2" -version = "0.15.3" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fa48f5024824ecd3e8282cc948bd46fbd095aed5a98939de0594601a59b4e2b" +checksum = "1670343e58806300d87950e3401e820b519b9384281bbabfb15e3636689ffd69" dependencies = [ "crc", "sha2 0.10.9", ] -[[package]] -name = "lzma-sys" -version = "0.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - [[package]] name = "mappings" version = "0.7.1" @@ -6253,7 +6179,7 @@ dependencies = [ "arrow-schema", "arrow-select", "base64", - "brotli 8.0.2", + "brotli", "bytes", "chrono", "flate2", @@ -6350,9 +6276,9 @@ dependencies = [ [[package]] name = "pbkdf2" -version = "0.13.0-rc.6" +version = "0.13.0-rc.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb9b101849c3ddab38905781f5aa7ae14ea06e87befaf0e7b003e5d3186250d" +checksum = "eedc1683fe7216d6ce1294e870b994b4418660ad692d55297f631be0b6300666" dependencies = [ "digest 0.11.0-rc.5", "hmac 0.13.0-rc.3", @@ -6753,15 +6679,6 @@ dependencies = [ "elliptic-curve 0.13.8", ] -[[package]] -name = "proc-macro-crate" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" -dependencies = [ - "toml_edit 0.23.10+spec-1.0.0", -] - [[package]] name = "proc-macro-error-attr2" version = "2.0.0" @@ -7101,7 +7018,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -7133,7 +7050,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -7147,9 +7064,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" dependencies = [ "getrandom 0.3.4", ] @@ -7316,12 +7233,6 @@ version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" -[[package]] -name = "relative-path" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" - [[package]] name = "reqwest" version = "0.12.28" @@ -7469,12 +7380,12 @@ dependencies = [ [[package]] name = "rsa" -version = "0.10.0-rc.11" +version = "0.10.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d27d813937fdf8e9ad15e3e422a55da4021d29639000139ca19d99f3949060da" +checksum = "c9a2b1eacbc34fbaf77f6f1db1385518446008d49b9f9f59dc9d1340fce4ca9e" dependencies = [ "const-oid 0.10.2", - "crypto-bigint 0.7.0-rc.15", + "crypto-bigint 0.7.0-rc.18", "crypto-primes", "digest 0.11.0-rc.5", "pkcs1", @@ -7486,35 +7397,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rstest" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5a3193c063baaa2a95a33f03035c8a72b83d97a54916055ba22d35ed3839d49" -dependencies = [ - "futures-timer", - "futures-util", - "rstest_macros", -] - -[[package]] -name = "rstest_macros" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c845311f0ff7951c5506121a9ad75aec44d083c31583b2ea5a30bcb0b0abba0" -dependencies = [ - "cfg-if", - "glob", - "proc-macro-crate", - "proc-macro2", - "quote", - "regex", - "relative-path", - "rustc_version", - "syn 2.0.114", - "unicode-ident", -] - [[package]] name = "rumqttc" version = "0.25.1" @@ -7641,9 +7523,9 @@ dependencies = [ [[package]] name = "rust-embed" -version = "8.9.0" +version = "8.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "947d7f3fad52b283d261c4c99a084937e2fe492248cb9a68a8435a861b8798ca" +checksum = "04113cb9355a377d83f06ef1f0a45b8ab8cd7d8b1288160717d66df5c7988d27" dependencies = [ "rust-embed-impl", "rust-embed-utils", @@ -7652,9 +7534,9 @@ dependencies = [ [[package]] name = "rust-embed-impl" -version = "8.9.0" +version = "8.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fa2c8c9e8711e10f9c4fd2d64317ef13feaab820a4c51541f1a8c8e2e851ab2" +checksum = "da0902e4c7c8e997159ab384e6d0fc91c221375f6894346ae107f47dd0f3ccaa" dependencies = [ "proc-macro2", "quote", @@ -7666,9 +7548,9 @@ dependencies = [ [[package]] name = "rust-embed-utils" -version = "8.9.0" +version = "8.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b161f275cb337fe0a44d924a5f4df0ed69c2c39519858f931ce61c779d3475" +checksum = "5bcdef0be6fe7f6fa333b1073c949729274b05f123a0ad7efcb8efd878e5c3b1" dependencies = [ "sha2 0.10.9", "walkdir", @@ -7915,7 +7797,7 @@ dependencies = [ "cfg-if", "chacha20poly1305", "jsonwebtoken", - "pbkdf2 0.13.0-rc.6", + "pbkdf2 0.13.0-rc.7", "rand 0.10.0-rc.6", "serde_json", "sha2 0.11.0-rc.3", @@ -8375,7 +8257,7 @@ version = "0.0.5" dependencies = [ "base64-simd", "blake3", - "brotli 8.0.2", + "brotli", "bytes", "convert_case", "crc-fast", @@ -8587,8 +8469,9 @@ checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" [[package]] name = "s3s" -version = "0.13.0-alpha" -source = "git+https://github.com/s3s-project/s3s.git?branch=main#18c168ae21bf1176555f8f529686ecdc2ebd6db7" +version = "0.13.0-alpha.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9f3760ae04ec65fd1b0f17dc81c8061c432e8453365fa010cc610cb97ff877" dependencies = [ "arrayvec", "async-trait", @@ -9148,9 +9031,9 @@ dependencies = [ [[package]] name = "slog-scope" -version = "4.4.0" +version = "4.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f95a4b4c3274cd2869549da82b57ccc930859bdbf5bcea0424bc5f140b3c786" +checksum = "42b76cf645c92e7850d5a1c9205ebf2864bd32c0ab3e978e6daad51fedf7ef54" dependencies = [ "arc-swap", "lazy_static", @@ -9336,9 +9219,9 @@ dependencies = [ [[package]] name = "ssh-cipher" -version = "0.3.0-rc.4" +version = "0.3.0-rc.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "361de425e489d5fe3f1ecfd91531c8fe91ededbbc567e24b77a560d503309bf9" +checksum = "88ca7fe5fcf2f30c6fcbad76c65c0aef40a09087ef9092eae072383c7d959200" dependencies = [ "aes 0.9.0-rc.2", "aes-gcm 0.11.0-rc.2", @@ -9346,7 +9229,7 @@ dependencies = [ "cipher 0.5.0-rc.3", "des", "poly1305 0.9.0-rc.3", - "ssh-encoding 0.3.0-rc.3", + "ssh-encoding 0.3.0-rc.4", "zeroize", ] @@ -9364,12 +9247,12 @@ dependencies = [ [[package]] name = "ssh-encoding" -version = "0.3.0-rc.3" +version = "0.3.0-rc.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ad6a09263583e83e934fcd436b7e3bb9d69602e2feef3787adb615c1fe3a343" +checksum = "d25a544d1b898f016dd32083ec3a926a5bff0deb6e43c691db6eb39fa11b7c9e" dependencies = [ "base64ct", - "crypto-bigint 0.7.0-rc.15", + "crypto-bigint 0.7.0-rc.18", "digest 0.11.0-rc.5", "pem-rfc7468 1.0.0", "subtle", @@ -9378,18 +9261,18 @@ dependencies = [ [[package]] name = "ssh-key" -version = "0.7.0-rc.4" +version = "0.7.0-rc.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7faefb89d4a5304e31238913d1f7f164e22494276ed58cd84d5058ba7b04911f" +checksum = "6d1b3dd9b51062c9dfd6339675bcc2bab19e91400a08cc167227e9184e99d715" dependencies = [ - "ed25519-dalek 3.0.0-pre.2", + "ed25519-dalek 3.0.0-pre.4", "rand_core 0.10.0-rc-3", "rsa", "sec1 0.8.0-rc.11", "sha2 0.11.0-rc.3", "signature 3.0.0-rc.6", - "ssh-cipher 0.3.0-rc.4", - "ssh-encoding 0.3.0-rc.3", + "ssh-cipher 0.3.0-rc.5", + "ssh-encoding 0.3.0-rc.4", "subtle", "zeroize", ] @@ -9586,9 +9469,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "12.17.0" +version = "12.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d8046c5674ab857104bc4559d505f4809b8060d57806e45d49737c97afeb60" +checksum = "520cf51c674f8b93d533f80832babe413214bb766b6d7cb74ee99ad2971f8467" dependencies = [ "debugid", "memmap2", @@ -9598,9 +9481,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.17.0" +version = "12.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1accb6e5c4b0f682de907623912e616b44be1c9e725775155546669dbff720ec" +checksum = "9f0de2ee0ffa2641e17ba715ad51d48b9259778176517979cb38b6aa86fa7425" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -9860,9 +9743,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.44" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" dependencies = [ "deranged", "itoa", @@ -9870,22 +9753,22 @@ dependencies = [ "num-conv", "num_threads", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" +checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" [[package]] name = "time-macros" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" dependencies = [ "num-conv", "time-core", @@ -10038,8 +9921,8 @@ checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", - "toml_datetime 0.6.11", - "toml_edit 0.22.27", + "toml_datetime", + "toml_edit", ] [[package]] @@ -10051,15 +9934,6 @@ dependencies = [ "serde", ] -[[package]] -name = "toml_datetime" -version = "0.7.5+spec-1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" -dependencies = [ - "serde_core", -] - [[package]] name = "toml_edit" version = "0.22.27" @@ -10069,32 +9943,11 @@ dependencies = [ "indexmap 2.13.0", "serde", "serde_spanned", - "toml_datetime 0.6.11", + "toml_datetime", "toml_write", "winnow", ] -[[package]] -name = "toml_edit" -version = "0.23.10+spec-1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" -dependencies = [ - "indexmap 2.13.0", - "toml_datetime 0.7.5+spec-1.1.0", - "toml_parser", - "winnow", -] - -[[package]] -name = "toml_parser" -version = "1.0.6+spec-1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" -dependencies = [ - "winnow", -] - [[package]] name = "toml_write" version = "0.1.2" @@ -10174,9 +10027,9 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", @@ -10296,16 +10149,13 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.32.0" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6e5658463dd88089aba75c7791e1d3120633b1bfde22478b28f625a9bb1b8e" +checksum = "1ac28f2d093c6c477eaa76b23525478f38de514fa9aeb1285738d4b97a9552fc" dependencies = [ "js-sys", "opentelemetry", - "opentelemetry_sdk", - "rustversion", "smallvec", - "thiserror 2.0.17", "tracing", "tracing-core", "tracing-log", @@ -10631,9 +10481,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ "cfg-if", "once_cell", @@ -10644,11 +10494,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.56" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", + "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -10657,9 +10508,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10667,9 +10518,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ "bumpalo", "proc-macro2", @@ -10680,9 +10531,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] @@ -10702,9 +10553,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -11246,15 +11097,6 @@ version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3" -[[package]] -name = "xz2" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" -dependencies = [ - "lzma-sys", -] - [[package]] name = "yansi" version = "1.0.1" @@ -11389,13 +11231,12 @@ dependencies = [ [[package]] name = "zip" -version = "7.0.0" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdd8a47718a4ee5fe78e07667cd36f3de80e7c2bfe727c7074245ffc7303c037" +checksum = "9013f1222db8a6d680f13a7ccdc60a781199cd09c2fa4eff58e728bb181757fc" dependencies = [ "aes 0.8.4", - "arbitrary", - "bzip2 0.6.1", + "bzip2", "constant_time_eq 0.3.1", "crc32fast", "deflate64", @@ -11423,9 +11264,9 @@ checksum = "40990edd51aae2c2b6907af74ffb635029d5788228222c4bb811e9351c0caad3" [[package]] name = "zmij" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac93432f5b761b22864c774aac244fa5c0fd877678a4c37ebf6cf42208f9c9ec" +checksum = "bd8f3f50b848df28f887acb68e41201b5aea6bc8a8dacc00fb40635ff9a72fea" [[package]] name = "zopfli" diff --git a/Cargo.toml b/Cargo.toml index 1bea10c3..adfe6194 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -98,7 +98,7 @@ rustfs-zip = { path = "./crates/zip", version = "0.0.5" } # Async Runtime and Networking async-channel = "2.5.0" -async-compression = { version = "0.4.19" } +async-compression = { version = "0.4.37" } async-recursion = "1.1.1" async-trait = "0.1.89" axum = "0.8.8" @@ -123,7 +123,7 @@ tokio-util = { version = "0.7.18", features = ["io", "compat"] } tonic = { version = "0.14.2", features = ["gzip"] } tonic-prost = { version = "0.14.2" } tonic-prost-build = { version = "0.14.2" } -tower = { version = "0.5.2", features = ["timeout"] } +tower = { version = "0.5.3", features = ["timeout"] } tower-http = { version = "0.6.8", features = ["cors"] } # Serialization and Data Formats @@ -147,11 +147,11 @@ aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] } argon2 = { version = "0.6.0-rc.5" } blake3 = { version = "1.8.3", features = ["rayon", "mmap"] } chacha20poly1305 = { version = "0.11.0-rc.2" } -crc-fast = "1.6.0" +crc-fast = "1.9.0" hmac = { version = "0.13.0-rc.3" } jsonwebtoken = { version = "10.2.0", features = ["aws_lc_rs"] } -pbkdf2 = "0.13.0-rc.6" -rsa = { version = "0.10.0-rc.11" } +pbkdf2 = "0.13.0-rc.7" +rsa = { version = "0.10.0-rc.12" } rustls = { version = "0.23.36", default-features = false, features = ["aws-lc-rs", "logging", "tls12", "prefer-post-quantum", "std"] } rustls-pemfile = "2.2.0" rustls-pki-types = "1.13.2" @@ -161,9 +161,9 @@ subtle = "2.6" zeroize = { version = "1.8.2", features = ["derive"] } # Time and Date -chrono = { version = "0.4.42", features = ["serde"] } +chrono = { version = "0.4.43", features = ["serde"] } humantime = "2.3.0" -time = { version = "0.3.44", features = ["std", "parsing", "formatting", "macros", "serde"] } +time = { version = "0.3.45", features = ["std", "parsing", "formatting", "macros", "serde"] } # Utilities and Tools anyhow = "1.0.100" @@ -174,7 +174,7 @@ atomic_enum = "0.3.0" aws-config = { version = "1.8.12" } aws-credential-types = { version = "1.2.11" } aws-sdk-s3 = { version = "1.119.0", default-features = false, features = ["sigv4a", "default-https-client", "rt-tokio"] } -aws-smithy-types = { version = "1.3.5" } +aws-smithy-types = { version = "1.3.6" } base64 = "0.22.1" base64-simd = "0.8.0" brotli = "8.0.2" @@ -184,16 +184,16 @@ const-str = { version = "1.0.0", features = ["std", "proc"] } convert_case = "0.10.0" criterion = { version = "0.8", features = ["html_reports"] } crossbeam-queue = "0.3.12" -datafusion = "51.0.0" +datafusion = "52.0.0" derive_builder = "0.20.2" dunce = "1.0.5" enumset = "1.1.10" faster-hex = "0.10.0" -flate2 = "1.1.5" +flate2 = "1.1.8" flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv", "json"] } glob = "0.3.3" -google-cloud-storage = "1.5.0" -google-cloud-auth = "1.3.0" +google-cloud-storage = "1.6.0" +google-cloud-auth = "1.4.0" hashbrown = { version = "0.16.1", features = ["serde", "rayon"] } heed = { version = "0.22.0" } hex-simd = "0.8.0" @@ -202,7 +202,7 @@ ipnetwork = { version = "0.21.1", features = ["serde"] } lazy_static = "1.5.0" libc = "0.2.180" libsystemd = "0.7.2" -local-ip-address = "0.6.8" +local-ip-address = "0.6.9" lz4 = "1.28.1" matchit = "0.9.1" md-5 = "0.11.0-rc.3" @@ -225,9 +225,9 @@ rayon = "1.11.0" reed-solomon-simd = { version = "3.1.0" } regex = { version = "1.12.2" } rumqttc = { version = "0.25.1" } -rust-embed = { version = "8.9.0" } +rust-embed = { version = "8.11.0" } rustc-hash = { version = "2.1.1" } -s3s = { version = "0.13.0-alpha", features = ["minio"], git = "https://github.com/s3s-project/s3s.git", branch = "main" } +s3s = { version = "0.13.0-alpha.2", features = ["minio"] } serial_test = "3.3.1" shadow-rs = { version = "1.5.0", default-features = false } siphasher = "1.0.1" @@ -245,7 +245,7 @@ thiserror = "2.0.17" tracing = { version = "0.1.44" } tracing-appender = "0.2.4" tracing-error = "0.2.1" -tracing-opentelemetry = "0.32.0" +tracing-opentelemetry = "0.32.1" tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] } transform-stream = "0.3.1" url = "2.5.8" @@ -256,7 +256,7 @@ walkdir = "2.5.0" wildmatch = { version = "2.6.1", features = ["serde"] } windows = { version = "0.62.2" } xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] } -zip = "7.0.0" +zip = "7.1.0" zstd = "0.13.3" # Observability and Metrics @@ -272,7 +272,7 @@ opentelemetry-stdout = { version = "0.31.0" } libunftp = "0.21.0" russh = { version = "0.56.0", features = ["aws-lc-rs", "rsa"], default-features = false } russh-sftp = "2.1.1" -ssh-key = { version = "0.7.0-rc.4", features = ["std", "rsa", "ed25519"] } +ssh-key = { version = "0.7.0-rc.6", features = ["std", "rsa", "ed25519"] } suppaftp = { version = "7.1.0", features = ["tokio", "tokio-rustls", "rustls"] } rcgen = "0.14.6" diff --git a/crates/ecstore/src/bitrot.rs b/crates/ecstore/src/bitrot.rs index 8443b1ca..c02467a7 100644 --- a/crates/ecstore/src/bitrot.rs +++ b/crates/ecstore/src/bitrot.rs @@ -12,8 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::disk::error::DiskError; -use crate::disk::{self, DiskAPI as _, DiskStore}; +use crate::disk::{self, DiskAPI as _, DiskStore, error::DiskError}; use crate::erasure_coding::{BitrotReader, BitrotWriterWrapper, CustomWriter}; use rustfs_utils::HashAlgorithm; use std::io::Cursor; diff --git a/crates/ecstore/src/bucket/bucket_target_sys.rs b/crates/ecstore/src/bucket/bucket_target_sys.rs index fa623b22..fe331220 100644 --- a/crates/ecstore/src/bucket/bucket_target_sys.rs +++ b/crates/ecstore/src/bucket/bucket_target_sys.rs @@ -13,6 +13,14 @@ // limitations under the License. use crate::bucket::metadata::BucketMetadata; +use crate::bucket::metadata_sys::get_bucket_targets_config; +use crate::bucket::metadata_sys::get_replication_config; +use crate::bucket::replication::ObjectOpts; +use crate::bucket::replication::ReplicationConfigurationExt; +use crate::bucket::target::ARN; +use crate::bucket::target::BucketTargetType; +use crate::bucket::target::{self, BucketTarget, BucketTargets, Credentials}; +use crate::bucket::versioning_sys::BucketVersioningSys; use aws_credential_types::Credentials as SdkCredentials; use aws_sdk_s3::config::Region as SdkRegion; use aws_sdk_s3::error::SdkError; @@ -52,15 +60,6 @@ use tracing::warn; use url::Url; use uuid::Uuid; -use crate::bucket::metadata_sys::get_bucket_targets_config; -use crate::bucket::metadata_sys::get_replication_config; -use crate::bucket::replication::ObjectOpts; -use crate::bucket::replication::ReplicationConfigurationExt; -use crate::bucket::target::ARN; -use crate::bucket::target::BucketTargetType; -use crate::bucket::target::{self, BucketTarget, BucketTargets, Credentials}; -use crate::bucket::versioning_sys::BucketVersioningSys; - const DEFAULT_HEALTH_CHECK_DURATION: Duration = Duration::from_secs(5); const DEFAULT_HEALTH_CHECK_RELOAD_DURATION: Duration = Duration::from_secs(30 * 60); diff --git a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_audit.rs b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_audit.rs index 18945a08..d3420fc1 100644 --- a/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_audit.rs +++ b/crates/ecstore/src/bucket/lifecycle/bucket_lifecycle_audit.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::lifecycle; +use crate::bucket::lifecycle::lifecycle; #[derive(Debug, Clone, Default)] pub enum LcEventSrc { diff --git a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs index 9666699b..b785d11b 100644 --- a/crates/ecstore/src/bucket/lifecycle/lifecycle.rs +++ b/crates/ecstore/src/bucket/lifecycle/lifecycle.rs @@ -18,6 +18,8 @@ #![allow(unused_must_use)] #![allow(clippy::all)] +use crate::bucket::lifecycle::rule::TransitionOps; +use crate::store_api::ObjectInfo; use rustfs_filemeta::{ReplicationStatusType, VersionPurgeStatusType}; use s3s::dto::{ BucketLifecycleConfiguration, ExpirationStatus, LifecycleExpiration, LifecycleRule, NoncurrentVersionTransition, @@ -33,19 +35,15 @@ use time::{self, Duration, OffsetDateTime}; use tracing::info; use uuid::Uuid; -use crate::bucket::lifecycle::rule::TransitionOps; -use crate::store_api::ObjectInfo; - pub const TRANSITION_COMPLETE: &str = "complete"; pub const TRANSITION_PENDING: &str = "pending"; - -const ERR_LIFECYCLE_TOO_MANY_RULES: &str = "Lifecycle configuration allows a maximum of 1000 rules"; const ERR_LIFECYCLE_NO_RULE: &str = "Lifecycle configuration should have at least one rule"; const ERR_LIFECYCLE_DUPLICATE_ID: &str = "Rule ID must be unique. Found same ID for more than one rule"; const _ERR_XML_NOT_WELL_FORMED: &str = "The XML you provided was not well-formed or did not validate against our published schema"; const ERR_LIFECYCLE_BUCKET_LOCKED: &str = "ExpiredObjectAllVersions element and DelMarkerExpiration action cannot be used on an retention bucket"; +const ERR_LIFECYCLE_TOO_MANY_RULES: &str = "Lifecycle configuration should have at most 1000 rules"; pub use rustfs_common::metrics::IlmAction; diff --git a/crates/ecstore/src/bucket/lifecycle/tier_last_day_stats.rs b/crates/ecstore/src/bucket/lifecycle/tier_last_day_stats.rs index 557d6189..e987f47b 100644 --- a/crates/ecstore/src/bucket/lifecycle/tier_last_day_stats.rs +++ b/crates/ecstore/src/bucket/lifecycle/tier_last_day_stats.rs @@ -18,15 +18,13 @@ #![allow(unused_must_use)] #![allow(clippy::all)] +use rustfs_common::data_usage::TierStats; use sha2::Sha256; - use std::collections::HashMap; use std::ops::Sub; use time::OffsetDateTime; use tracing::{error, warn}; -use rustfs_common::data_usage::TierStats; - pub type DailyAllTierStats = HashMap; #[derive(Clone)] diff --git a/crates/ecstore/src/bucket/lifecycle/tier_sweeper.rs b/crates/ecstore/src/bucket/lifecycle/tier_sweeper.rs index 35a87a0b..8c905776 100644 --- a/crates/ecstore/src/bucket/lifecycle/tier_sweeper.rs +++ b/crates/ecstore/src/bucket/lifecycle/tier_sweeper.rs @@ -18,16 +18,15 @@ #![allow(unused_must_use)] #![allow(clippy::all)] +use crate::bucket::lifecycle::bucket_lifecycle_ops::{ExpiryOp, GLOBAL_ExpiryState, TransitionedObject}; +use crate::bucket::lifecycle::lifecycle::{self, ObjectOpts}; +use crate::global::GLOBAL_TierConfigMgr; use sha2::{Digest, Sha256}; use std::any::Any; use std::io::Write; use uuid::Uuid; use xxhash_rust::xxh64; -use super::bucket_lifecycle_ops::{ExpiryOp, GLOBAL_ExpiryState, TransitionedObject}; -use super::lifecycle::{self, ObjectOpts}; -use crate::global::GLOBAL_TierConfigMgr; - static XXHASH_SEED: u64 = 0; #[derive(Default)] diff --git a/crates/ecstore/src/bucket/metadata.rs b/crates/ecstore/src/bucket/metadata.rs index 5c75571e..78ceaf0b 100644 --- a/crates/ecstore/src/bucket/metadata.rs +++ b/crates/ecstore/src/bucket/metadata.rs @@ -12,20 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{quota::BucketQuota, target::BucketTargets}; - use super::object_lock::ObjectLockApi; use super::versioning::VersioningApi; +use super::{quota::BucketQuota, target::BucketTargets}; use crate::bucket::utils::deserialize; use crate::config::com::{read_config, save_config}; +use crate::disk::BUCKET_META_PREFIX; use crate::error::{Error, Result}; use crate::new_object_layer_fn; +use crate::store::ECStore; use byteorder::{BigEndian, ByteOrder, LittleEndian}; use rmp_serde::Serializer as rmpSerializer; use rustfs_policy::policy::BucketPolicy; use s3s::dto::{ - BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ReplicationConfiguration, - ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration, + BucketLifecycleConfiguration, CORSConfiguration, NotificationConfiguration, ObjectLockConfiguration, + ReplicationConfiguration, ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration, }; use serde::Serializer; use serde::{Deserialize, Serialize}; @@ -34,9 +35,6 @@ use std::sync::Arc; use time::OffsetDateTime; use tracing::error; -use crate::disk::BUCKET_META_PREFIX; -use crate::store::ECStore; - pub const BUCKET_METADATA_FILE: &str = ".metadata.bin"; pub const BUCKET_METADATA_FORMAT: u16 = 1; pub const BUCKET_METADATA_VERSION: u16 = 1; @@ -51,6 +49,7 @@ pub const OBJECT_LOCK_CONFIG: &str = "object-lock.xml"; pub const BUCKET_VERSIONING_CONFIG: &str = "versioning.xml"; pub const BUCKET_REPLICATION_CONFIG: &str = "replication.xml"; pub const BUCKET_TARGETS_FILE: &str = "bucket-targets.json"; +pub const BUCKET_CORS_CONFIG: &str = "cors.xml"; #[derive(Debug, Deserialize, Serialize, Clone)] #[serde(rename_all = "PascalCase", default)] @@ -69,6 +68,7 @@ pub struct BucketMetadata { pub replication_config_xml: Vec, pub bucket_targets_config_json: Vec, pub bucket_targets_config_meta_json: Vec, + pub cors_config_xml: Vec, pub policy_config_updated_at: OffsetDateTime, pub object_lock_config_updated_at: OffsetDateTime, @@ -81,6 +81,7 @@ pub struct BucketMetadata { pub notification_config_updated_at: OffsetDateTime, pub bucket_targets_config_updated_at: OffsetDateTime, pub bucket_targets_config_meta_updated_at: OffsetDateTime, + pub cors_config_updated_at: OffsetDateTime, #[serde(skip)] pub new_field_updated_at: OffsetDateTime, @@ -107,6 +108,8 @@ pub struct BucketMetadata { pub bucket_target_config: Option, #[serde(skip)] pub bucket_target_config_meta: Option>, + #[serde(skip)] + pub cors_config: Option, } impl Default for BucketMetadata { @@ -126,6 +129,7 @@ impl Default for BucketMetadata { replication_config_xml: Default::default(), bucket_targets_config_json: Default::default(), bucket_targets_config_meta_json: Default::default(), + cors_config_xml: Default::default(), policy_config_updated_at: OffsetDateTime::UNIX_EPOCH, object_lock_config_updated_at: OffsetDateTime::UNIX_EPOCH, encryption_config_updated_at: OffsetDateTime::UNIX_EPOCH, @@ -137,6 +141,7 @@ impl Default for BucketMetadata { notification_config_updated_at: OffsetDateTime::UNIX_EPOCH, bucket_targets_config_updated_at: OffsetDateTime::UNIX_EPOCH, bucket_targets_config_meta_updated_at: OffsetDateTime::UNIX_EPOCH, + cors_config_updated_at: OffsetDateTime::UNIX_EPOCH, new_field_updated_at: OffsetDateTime::UNIX_EPOCH, policy_config: Default::default(), notification_config: Default::default(), @@ -149,6 +154,7 @@ impl Default for BucketMetadata { replication_config: Default::default(), bucket_target_config: Default::default(), bucket_target_config_meta: Default::default(), + cors_config: Default::default(), } } } @@ -297,6 +303,10 @@ impl BucketMetadata { self.bucket_targets_config_json = data.clone(); self.bucket_targets_config_updated_at = updated; } + BUCKET_CORS_CONFIG => { + self.cors_config_xml = data; + self.cors_config_updated_at = updated; + } _ => return Err(Error::other(format!("config file not found : {config_file}"))), } @@ -367,6 +377,9 @@ impl BucketMetadata { } else { self.bucket_target_config = Some(BucketTargets::default()) } + if !self.cors_config_xml.is_empty() { + self.cors_config = Some(deserialize::(&self.cors_config_xml)?); + } Ok(()) } diff --git a/crates/ecstore/src/bucket/metadata_sys.rs b/crates/ecstore/src/bucket/metadata_sys.rs index d11ede91..64c14a5f 100644 --- a/crates/ecstore/src/bucket/metadata_sys.rs +++ b/crates/ecstore/src/bucket/metadata_sys.rs @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +use super::metadata::{BucketMetadata, load_bucket_metadata}; +use super::quota::BucketQuota; +use super::target::BucketTargets; use crate::StorageAPI as _; use crate::bucket::bucket_target_sys::BucketTargetSys; use crate::bucket::metadata::{BUCKET_LIFECYCLE_CONFIG, load_bucket_metadata_parse}; @@ -20,12 +23,13 @@ use crate::error::{Error, Result, is_err_bucket_not_found}; use crate::global::{GLOBAL_Endpoints, is_dist_erasure, is_erasure, new_object_layer_fn}; use crate::store::ECStore; use futures::future::join_all; +use lazy_static::lazy_static; use rustfs_common::heal_channel::HealOpts; use rustfs_policy::policy::BucketPolicy; use s3s::dto::ReplicationConfiguration; use s3s::dto::{ - BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ServerSideEncryptionConfiguration, Tagging, - VersioningConfiguration, + BucketLifecycleConfiguration, CORSConfiguration, NotificationConfiguration, ObjectLockConfiguration, + ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration, }; use std::collections::HashSet; use std::sync::OnceLock; @@ -36,12 +40,6 @@ use tokio::sync::RwLock; use tokio::time::sleep; use tracing::error; -use super::metadata::{BucketMetadata, load_bucket_metadata}; -use super::quota::BucketQuota; -use super::target::BucketTargets; - -use lazy_static::lazy_static; - lazy_static! { pub static ref GLOBAL_BucketMetadataSys: OnceLock>> = OnceLock::new(); } @@ -112,6 +110,13 @@ pub async fn get_bucket_targets_config(bucket: &str) -> Result { bucket_meta_sys.get_bucket_targets_config(bucket).await } +pub async fn get_cors_config(bucket: &str) -> Result<(CORSConfiguration, OffsetDateTime)> { + let bucket_meta_sys_lock = get_bucket_metadata_sys()?; + let bucket_meta_sys = bucket_meta_sys_lock.read().await; + + bucket_meta_sys.get_cors_config(bucket).await +} + pub async fn get_tagging_config(bucket: &str) -> Result<(Tagging, OffsetDateTime)> { let bucket_meta_sys_lock = get_bucket_metadata_sys()?; let bucket_meta_sys = bucket_meta_sys_lock.read().await; @@ -509,6 +514,16 @@ impl BucketMetadataSys { } } + pub async fn get_cors_config(&self, bucket: &str) -> Result<(CORSConfiguration, OffsetDateTime)> { + let (bm, _) = self.get_config(bucket).await?; + + if let Some(config) = &bm.cors_config { + Ok((config.clone(), bm.cors_config_updated_at)) + } else { + Err(Error::ConfigNotFound) + } + } + pub async fn created_at(&self, bucket: &str) -> Result { let bm = match self.get_config(bucket).await { Ok((bm, _)) => bm.created, diff --git a/crates/ecstore/src/bucket/object_lock/objectlock.rs b/crates/ecstore/src/bucket/object_lock/objectlock.rs index 4309739b..6452506f 100644 --- a/crates/ecstore/src/bucket/object_lock/objectlock.rs +++ b/crates/ecstore/src/bucket/object_lock/objectlock.rs @@ -12,11 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashMap; -use time::{OffsetDateTime, format_description}; - use s3s::dto::{Date, ObjectLockLegalHold, ObjectLockLegalHoldStatus, ObjectLockRetention, ObjectLockRetentionMode}; use s3s::header::{X_AMZ_OBJECT_LOCK_LEGAL_HOLD, X_AMZ_OBJECT_LOCK_MODE, X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE}; +use std::collections::HashMap; +use time::{OffsetDateTime, format_description}; const _ERR_MALFORMED_BUCKET_OBJECT_CONFIG: &str = "invalid bucket object lock config"; const _ERR_INVALID_RETENTION_DATE: &str = "date must be provided in ISO 8601 format"; diff --git a/crates/ecstore/src/bucket/object_lock/objectlock_sys.rs b/crates/ecstore/src/bucket/object_lock/objectlock_sys.rs index fb71339a..92e2e84d 100644 --- a/crates/ecstore/src/bucket/object_lock/objectlock_sys.rs +++ b/crates/ecstore/src/bucket/object_lock/objectlock_sys.rs @@ -12,16 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::bucket::metadata_sys::get_object_lock_config; +use crate::bucket::object_lock::objectlock; +use crate::store_api::ObjectInfo; +use s3s::dto::{DefaultRetention, ObjectLockLegalHoldStatus, ObjectLockRetentionMode}; use std::sync::Arc; use time::OffsetDateTime; -use s3s::dto::{DefaultRetention, ObjectLockLegalHoldStatus, ObjectLockRetentionMode}; - -use crate::bucket::metadata_sys::get_object_lock_config; -use crate::store_api::ObjectInfo; - -use super::objectlock; - pub struct BucketObjectLockSys {} impl BucketObjectLockSys { diff --git a/crates/ecstore/src/bucket/replication/config.rs b/crates/ecstore/src/bucket/replication/config.rs index 422b76be..ed137698 100644 --- a/crates/ecstore/src/bucket/replication/config.rs +++ b/crates/ecstore/src/bucket/replication/config.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::ReplicationRuleExt as _; +use crate::bucket::replication::ReplicationRuleExt as _; use crate::bucket::tagging::decode_tags_to_map; use rustfs_filemeta::ReplicationType; use s3s::dto::DeleteMarkerReplicationStatus; diff --git a/crates/ecstore/src/bucket/replication/replication_pool.rs b/crates/ecstore/src/bucket/replication/replication_pool.rs index e3d4abf7..def050cb 100644 --- a/crates/ecstore/src/bucket/replication/replication_pool.rs +++ b/crates/ecstore/src/bucket/replication/replication_pool.rs @@ -1,25 +1,33 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use crate::StorageAPI; +use crate::bucket::bucket_target_sys::BucketTargetSys; +use crate::bucket::metadata_sys; use crate::bucket::replication::ResyncOpts; use crate::bucket::replication::ResyncStatusType; use crate::bucket::replication::replicate_delete; use crate::bucket::replication::replicate_object; -use crate::disk::BUCKET_META_PREFIX; -use std::any::Any; -use std::sync::Arc; -use std::sync::atomic::AtomicI32; -use std::sync::atomic::Ordering; - -use crate::bucket::bucket_target_sys::BucketTargetSys; -use crate::bucket::metadata_sys; use crate::bucket::replication::replication_resyncer::{ BucketReplicationResyncStatus, DeletedObjectReplicationInfo, ReplicationConfig, ReplicationResyncer, get_heal_replicate_object_info, }; use crate::bucket::replication::replication_state::ReplicationStats; use crate::config::com::read_config; +use crate::disk::BUCKET_META_PREFIX; use crate::error::Error as EcstoreError; use crate::store_api::ObjectInfo; - use lazy_static::lazy_static; use rustfs_filemeta::MrfReplicateEntry; use rustfs_filemeta::ReplicateDecision; @@ -34,6 +42,10 @@ use rustfs_filemeta::replication_statuses_map; use rustfs_filemeta::version_purge_statuses_map; use rustfs_filemeta::{REPLICATE_EXISTING, REPLICATE_HEAL, REPLICATE_HEAL_DELETE}; use rustfs_utils::http::RESERVED_METADATA_PREFIX_LOWER; +use std::any::Any; +use std::sync::Arc; +use std::sync::atomic::AtomicI32; +use std::sync::atomic::Ordering; use time::OffsetDateTime; use time::format_description::well_known::Rfc3339; use tokio::sync::Mutex; diff --git a/crates/ecstore/src/bucket/replication/replication_resyncer.rs b/crates/ecstore/src/bucket/replication/replication_resyncer.rs index 051f0455..f3a68f6d 100644 --- a/crates/ecstore/src/bucket/replication/replication_resyncer.rs +++ b/crates/ecstore/src/bucket/replication/replication_resyncer.rs @@ -1,3 +1,17 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use crate::bucket::bucket_target_sys::{ AdvancedPutOptions, BucketTargetSys, PutObjectOptions, PutObjectPartOptions, RemoveObjectOptions, TargetClient, }; @@ -16,7 +30,6 @@ use crate::event_notification::{EventArgs, send_event}; use crate::global::GLOBAL_LocalNodeName; use crate::store_api::{DeletedObject, ObjectInfo, ObjectOptions, ObjectToDelete, WalkOptions}; use crate::{StorageAPI, new_object_layer_fn}; - use aws_sdk_s3::error::SdkError; use aws_sdk_s3::operation::head_object::HeadObjectOutput; use aws_sdk_s3::primitives::ByteStream; @@ -24,7 +37,6 @@ use aws_sdk_s3::types::{CompletedPart, ObjectLockLegalHoldStatus}; use byteorder::ByteOrder; use futures::future::join_all; use http::HeaderMap; - use regex::Regex; use rustfs_filemeta::{ MrfReplicateEntry, REPLICATE_EXISTING, REPLICATE_EXISTING_DELETE, REPLICATION_RESET, ReplicateDecision, ReplicateObjectInfo, diff --git a/crates/ecstore/src/bucket/replication/replication_state.rs b/crates/ecstore/src/bucket/replication/replication_state.rs index 1b887141..28971c7b 100644 --- a/crates/ecstore/src/bucket/replication/replication_state.rs +++ b/crates/ecstore/src/bucket/replication/replication_state.rs @@ -1,3 +1,17 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use crate::error::Error; use rustfs_filemeta::{ReplicatedTargetInfo, ReplicationStatusType, ReplicationType}; use serde::{Deserialize, Serialize}; diff --git a/crates/ecstore/src/bucket/replication/rule.rs b/crates/ecstore/src/bucket/replication/rule.rs index 136c5480..b8df680e 100644 --- a/crates/ecstore/src/bucket/replication/rule.rs +++ b/crates/ecstore/src/bucket/replication/rule.rs @@ -12,11 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::bucket::replication::ObjectOpts; use s3s::dto::ReplicaModificationsStatus; use s3s::dto::ReplicationRule; -use super::ObjectOpts; - pub trait ReplicationRuleExt { fn prefix(&self) -> &str; fn metadata_replicate(&self, obj: &ObjectOpts) -> bool; diff --git a/crates/ecstore/src/bucket/tagging/mod.rs b/crates/ecstore/src/bucket/tagging/mod.rs index 62e428a4..9c5f16be 100644 --- a/crates/ecstore/src/bucket/tagging/mod.rs +++ b/crates/ecstore/src/bucket/tagging/mod.rs @@ -12,9 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashMap; - use s3s::dto::Tag; +use std::collections::HashMap; use url::form_urlencoded; pub fn decode_tags(tags: &str) -> Vec { diff --git a/crates/ecstore/src/bucket/target/arn.rs b/crates/ecstore/src/bucket/target/arn.rs index a9104077..543b75d0 100644 --- a/crates/ecstore/src/bucket/target/arn.rs +++ b/crates/ecstore/src/bucket/target/arn.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::BucketTargetType; +use crate::bucket::target::BucketTargetType; use std::fmt::Display; use std::str::FromStr; diff --git a/crates/ecstore/src/bucket/utils.rs b/crates/ecstore/src/bucket/utils.rs index 8eb60ccb..ee7012a3 100644 --- a/crates/ecstore/src/bucket/utils.rs +++ b/crates/ecstore/src/bucket/utils.rs @@ -14,16 +14,15 @@ use crate::disk::RUSTFS_META_BUCKET; use crate::error::{Error, Result, StorageError}; +use regex::Regex; use rustfs_utils::path::SLASH_SEPARATOR_STR; use s3s::xml; +use tracing::instrument; pub fn is_meta_bucketname(name: &str) -> bool { name.starts_with(RUSTFS_META_BUCKET) } -use regex::Regex; -use tracing::instrument; - lazy_static::lazy_static! { static ref VALID_BUCKET_NAME: Regex = Regex::new(r"^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$").unwrap(); static ref VALID_BUCKET_NAME_STRICT: Regex = Regex::new(r"^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$").unwrap(); diff --git a/crates/ecstore/src/bucket/versioning/mod.rs b/crates/ecstore/src/bucket/versioning/mod.rs index 2750ee77..05f5b22e 100644 --- a/crates/ecstore/src/bucket/versioning/mod.rs +++ b/crates/ecstore/src/bucket/versioning/mod.rs @@ -12,9 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use s3s::dto::{BucketVersioningStatus, VersioningConfiguration}; - use rustfs_utils::string::match_simple; +use s3s::dto::{BucketVersioningStatus, VersioningConfiguration}; pub trait VersioningApi { fn enabled(&self) -> bool; diff --git a/crates/ecstore/src/cache_value/mod.rs b/crates/ecstore/src/cache_value/mod.rs index 9dfbb3b8..b268fc04 100644 --- a/crates/ecstore/src/cache_value/mod.rs +++ b/crates/ecstore/src/cache_value/mod.rs @@ -12,9 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::Arc; - use lazy_static::lazy_static; +use std::sync::Arc; use tokio_util::sync::CancellationToken; pub mod metacache_set; diff --git a/crates/ecstore/src/compress.rs b/crates/ecstore/src/compress.rs index aaa43154..29e0baa6 100644 --- a/crates/ecstore/src/compress.rs +++ b/crates/ecstore/src/compress.rs @@ -12,8 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use rustfs_utils::string::has_pattern; -use rustfs_utils::string::has_string_suffix_in_slice; +use rustfs_utils::string::{has_pattern, has_string_suffix_in_slice}; use std::env; use tracing::error; diff --git a/crates/ecstore/src/data_usage.rs b/crates/ecstore/src/data_usage.rs index bd434855..e38188a2 100644 --- a/crates/ecstore/src/data_usage.rs +++ b/crates/ecstore/src/data_usage.rs @@ -12,33 +12,29 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{ - collections::{HashMap, hash_map::Entry}, - sync::Arc, - time::{Duration, SystemTime}, -}; -use tokio::sync::RwLock; -use tracing::debug; - pub mod local_snapshot; + +use crate::{ + bucket::metadata_sys::get_replication_config, config::com::read_config, disk::DiskAPI, error::Error, store::ECStore, + store_api::StorageAPI, +}; pub use local_snapshot::{ DATA_USAGE_DIR, DATA_USAGE_STATE_DIR, LOCAL_USAGE_SNAPSHOT_VERSION, LocalUsageSnapshot, LocalUsageSnapshotMeta, data_usage_dir, data_usage_state_dir, ensure_data_usage_layout, read_snapshot as read_local_snapshot, snapshot_file_name, snapshot_object_path, snapshot_path, write_snapshot as write_local_snapshot, }; - -use crate::{ - bucket::metadata_sys::get_replication_config, config::com::read_config, disk::DiskAPI, store::ECStore, store_api::StorageAPI, -}; use rustfs_common::data_usage::{ BucketTargetUsageInfo, BucketUsageInfo, DataUsageCache, DataUsageEntry, DataUsageInfo, DiskUsageStatus, SizeSummary, }; use rustfs_utils::path::SLASH_SEPARATOR_STR; -use std::sync::OnceLock; +use std::{ + collections::{HashMap, hash_map::Entry}, + sync::{Arc, OnceLock}, + time::{Duration, SystemTime}, +}; use tokio::fs; -use tracing::{error, info, warn}; - -use crate::error::Error; +use tokio::sync::RwLock; +use tracing::{debug, error, info, warn}; // Data usage storage constants pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR_STR; @@ -112,8 +108,8 @@ pub async fn load_data_usage_from_backend(store: Arc) -> Result data, Err(e) => { error!("Failed to read data usage info from backend: {}", e); - if e == crate::error::Error::ConfigNotFound { - warn!("Data usage config not found, building basic statistics"); + if e == Error::ConfigNotFound { + info!("Data usage config not found, building basic statistics"); return build_basic_data_usage_info(store).await; } return Err(Error::other(e)); @@ -146,7 +142,7 @@ pub async fn load_data_usage_from_backend(store: Arc) -> Result) -> Result<(Vec) -> Result<(Vec, bucket_name: &str) -> Res continuation = result.next_continuation_token.clone(); if continuation.is_none() { - warn!( + info!( "Bucket {} listing marked truncated but no continuation token returned; stopping early", bucket_name ); @@ -567,7 +563,7 @@ pub fn cache_to_data_usage_info(cache: &DataUsageCache, path: &str, buckets: &[c None => continue, }; let flat = cache.flatten(&e); - let mut bui = rustfs_common::data_usage::BucketUsageInfo { + let mut bui = BucketUsageInfo { size: flat.size as u64, versions_count: flat.versions as u64, objects_count: flat.objects as u64, @@ -645,7 +641,7 @@ pub async fn load_data_usage_cache(store: &crate::set_disk::SetDisks, name: &str break; } Err(err) => match err { - crate::error::Error::FileNotFound | crate::error::Error::VolumeNotFound => { + Error::FileNotFound | Error::VolumeNotFound => { match store .get_object_reader( RUSTFS_META_BUCKET, @@ -666,7 +662,7 @@ pub async fn load_data_usage_cache(store: &crate::set_disk::SetDisks, name: &str break; } Err(_) => match err { - crate::error::Error::FileNotFound | crate::error::Error::VolumeNotFound => { + Error::FileNotFound | Error::VolumeNotFound => { break; } _ => {} @@ -695,9 +691,9 @@ pub async fn save_data_usage_cache(cache: &DataUsageCache, name: &str) -> crate: use std::path::Path; let Some(store) = new_object_layer_fn() else { - return Err(crate::error::Error::other("errServerNotInitialized")); + return Err(Error::other("errServerNotInitialized")); }; - let buf = cache.marshal_msg().map_err(crate::error::Error::other)?; + let buf = cache.marshal_msg().map_err(Error::other)?; let buf_clone = buf.clone(); let store_clone = store.clone(); diff --git a/crates/ecstore/src/data_usage/local_snapshot.rs b/crates/ecstore/src/data_usage/local_snapshot.rs index fa232f73..0ed2b6e1 100644 --- a/crates/ecstore/src/data_usage/local_snapshot.rs +++ b/crates/ecstore/src/data_usage/local_snapshot.rs @@ -1,13 +1,25 @@ -use std::collections::HashMap; -use std::path::{Path, PathBuf}; -use std::time::SystemTime; - -use serde::{Deserialize, Serialize}; -use tokio::fs; +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::data_usage::BucketUsageInfo; use crate::disk::RUSTFS_META_BUCKET; use crate::error::{Error, Result}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::time::SystemTime; +use tokio::fs; /// Directory used to store per-disk usage snapshots under the metadata bucket. pub const DATA_USAGE_DIR: &str = "datausage"; diff --git a/crates/ecstore/src/disk/endpoint.rs b/crates/ecstore/src/disk/endpoint.rs index 952cda94..5339d964 100644 --- a/crates/ecstore/src/disk/endpoint.rs +++ b/crates/ecstore/src/disk/endpoint.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::error::{Error, Result}; +use crate::disk::error::{Error, Result}; use path_absolutize::Absolutize; use rustfs_utils::{is_local_host, is_socket_addr}; use std::{fmt::Display, path::Path}; diff --git a/crates/ecstore/src/disk/error.rs b/crates/ecstore/src/disk/error.rs index ebe9df4f..669b286b 100644 --- a/crates/ecstore/src/disk/error.rs +++ b/crates/ecstore/src/disk/error.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// use crate::quorum::CheckErrorFn; use std::hash::{Hash, Hasher}; use std::io::{self}; use std::path::PathBuf; diff --git a/crates/ecstore/src/disk/error_conv.rs b/crates/ecstore/src/disk/error_conv.rs index ed8487a9..0ee28878 100644 --- a/crates/ecstore/src/disk/error_conv.rs +++ b/crates/ecstore/src/disk/error_conv.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::error::DiskError; +use crate::disk::error::DiskError; pub fn to_file_error(io_err: std::io::Error) -> std::io::Error { match io_err.kind() { diff --git a/crates/ecstore/src/disk/error_reduce.rs b/crates/ecstore/src/disk/error_reduce.rs index d3264334..0ad53f48 100644 --- a/crates/ecstore/src/disk/error_reduce.rs +++ b/crates/ecstore/src/disk/error_reduce.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::error::Error; +use crate::disk::error::Error; pub static OBJECT_OP_IGNORED_ERRS: &[Error] = &[ Error::DiskNotFound, diff --git a/crates/ecstore/src/disk/format.rs b/crates/ecstore/src/disk/format.rs index aca63c7d..592f6f93 100644 --- a/crates/ecstore/src/disk/format.rs +++ b/crates/ecstore/src/disk/format.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::error::{Error, Result}; -use super::{DiskInfo, error::DiskError}; +use crate::disk::error::{Error, Result}; +use crate::disk::{DiskInfo, error::DiskError}; use serde::{Deserialize, Serialize}; use serde_json::Error as JsonError; use uuid::Uuid; diff --git a/crates/ecstore/src/disk/fs.rs b/crates/ecstore/src/disk/fs.rs index df22eb7d..d2299d45 100644 --- a/crates/ecstore/src/disk/fs.rs +++ b/crates/ecstore/src/disk/fs.rs @@ -17,7 +17,6 @@ use std::{ path::Path, sync::{Arc, OnceLock}, }; - use tokio::{ fs::{self, File}, io, diff --git a/crates/ecstore/src/disk/local.rs b/crates/ecstore/src/disk/local.rs index aa813cae..ea08295e 100644 --- a/crates/ecstore/src/disk/local.rs +++ b/crates/ecstore/src/disk/local.rs @@ -831,7 +831,11 @@ impl LocalDisk { self.write_all_internal(&tmp_file_path, InternalBuf::Ref(buf), sync, &tmp_volume_dir) .await?; - rename_all(tmp_file_path, file_path, volume_dir).await + rename_all(tmp_file_path, &file_path, volume_dir).await?; + + // Invalidate cache after successful write + get_global_file_cache().invalidate(&file_path).await; + Ok(()) } // write_all_public for trail diff --git a/crates/ecstore/src/disk/os.rs b/crates/ecstore/src/disk/os.rs index 660deec5..92677957 100644 --- a/crates/ecstore/src/disk/os.rs +++ b/crates/ecstore/src/disk/os.rs @@ -12,19 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::disk::error::DiskError; +use crate::disk::error::Result; +use crate::disk::error_conv::to_file_error; +use rustfs_utils::path::SLASH_SEPARATOR_STR; use std::{ io, path::{Component, Path}, }; - -use super::error::Result; -use crate::disk::error_conv::to_file_error; -use rustfs_utils::path::SLASH_SEPARATOR_STR; use tokio::fs; use tracing::warn; -use super::error::DiskError; - /// Check path length according to OS limits. pub fn check_path_length(path_name: &str) -> Result<()> { // Apple OS X path length is limited to 1016 diff --git a/crates/ecstore/src/endpoints.rs b/crates/ecstore/src/endpoints.rs index 1a334c07..ff8427d1 100644 --- a/crates/ecstore/src/endpoints.rs +++ b/crates/ecstore/src/endpoints.rs @@ -12,19 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -use rustfs_utils::{XHost, check_local_server_addr, get_host_ip, is_local_host}; -use tracing::{error, info, instrument, warn}; - use crate::{ disk::endpoint::{Endpoint, EndpointType}, disks_layout::DisksLayout, global::global_rustfs_port, }; -use std::io::{Error, Result}; +use rustfs_utils::{XHost, check_local_server_addr, get_host_ip, is_local_host}; use std::{ collections::{HashMap, HashSet, hash_map::Entry}, + io::{Error, Result}, net::IpAddr, }; +use tracing::{error, info, instrument, warn}; /// enum for setup type. #[derive(PartialEq, Eq, Debug, Clone)] diff --git a/crates/ecstore/src/erasure_coding/decode.rs b/crates/ecstore/src/erasure_coding/decode.rs index c2a1daac..9e0925d8 100644 --- a/crates/ecstore/src/erasure_coding/decode.rs +++ b/crates/ecstore/src/erasure_coding/decode.rs @@ -12,10 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::BitrotReader; -use super::Erasure; use crate::disk::error::Error; use crate::disk::error_reduce::reduce_errs; +use crate::erasure_coding::{BitrotReader, Erasure}; use futures::stream::{FuturesUnordered, StreamExt}; use pin_project_lite::pin_project; use std::io; @@ -312,11 +311,12 @@ impl Erasure { #[cfg(test)] mod tests { - use rustfs_utils::HashAlgorithm; - - use crate::{disk::error::DiskError, erasure_coding::BitrotWriter}; - use super::*; + use crate::{ + disk::error::DiskError, + erasure_coding::{BitrotReader, BitrotWriter}, + }; + use rustfs_utils::HashAlgorithm; use std::io::Cursor; #[tokio::test] diff --git a/crates/ecstore/src/erasure_coding/encode.rs b/crates/ecstore/src/erasure_coding/encode.rs index 1b082550..cb7bd7ed 100644 --- a/crates/ecstore/src/erasure_coding/encode.rs +++ b/crates/ecstore/src/erasure_coding/encode.rs @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::BitrotWriterWrapper; -use super::Erasure; use crate::disk::error::Error; use crate::disk::error_reduce::count_errs; use crate::disk::error_reduce::{OBJECT_OP_IGNORED_ERRS, reduce_write_quorum_errs}; +use crate::erasure_coding::BitrotWriterWrapper; +use crate::erasure_coding::Erasure; use bytes::Bytes; use futures::StreamExt; use futures::stream::FuturesUnordered; diff --git a/crates/ecstore/src/erasure_coding/heal.rs b/crates/ecstore/src/erasure_coding/heal.rs index e654a678..422ae7da 100644 --- a/crates/ecstore/src/erasure_coding/heal.rs +++ b/crates/ecstore/src/erasure_coding/heal.rs @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::BitrotReader; -use super::BitrotWriterWrapper; -use super::decode::ParallelReader; use crate::disk::error::{Error, Result}; +use crate::erasure_coding::BitrotReader; +use crate::erasure_coding::BitrotWriterWrapper; +use crate::erasure_coding::decode::ParallelReader; use crate::erasure_coding::encode::MultiWriter; use bytes::Bytes; use tokio::io::AsyncRead; diff --git a/crates/ecstore/src/erasure_coding/mod.rs b/crates/ecstore/src/erasure_coding/mod.rs index 947bf4da..766562a9 100644 --- a/crates/ecstore/src/erasure_coding/mod.rs +++ b/crates/ecstore/src/erasure_coding/mod.rs @@ -12,12 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod bitrot; pub mod decode; pub mod encode; pub mod erasure; pub mod heal; - -mod bitrot; pub use bitrot::*; pub use erasure::{Erasure, ReedSolomonEncoder, calc_shard_size}; diff --git a/crates/ecstore/src/error.rs b/crates/ecstore/src/error.rs index 410faa72..dc747c36 100644 --- a/crates/ecstore/src/error.rs +++ b/crates/ecstore/src/error.rs @@ -12,12 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use s3s::{S3Error, S3ErrorCode}; - -use rustfs_utils::path::decode_dir_object; - use crate::bucket::error::BucketMetadataError; use crate::disk::error::DiskError; +use rustfs_utils::path::decode_dir_object; +use s3s::{S3Error, S3ErrorCode}; pub type Error = StorageError; pub type Result = core::result::Result; diff --git a/crates/ecstore/src/event/targetlist.rs b/crates/ecstore/src/event/targetlist.rs index f45b2cbc..29927b06 100644 --- a/crates/ecstore/src/event/targetlist.rs +++ b/crates/ecstore/src/event/targetlist.rs @@ -12,10 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::event::targetid::TargetID; use std::sync::atomic::AtomicI64; -use super::targetid::TargetID; - #[derive(Default)] pub struct TargetList { pub current_send_calls: AtomicI64, diff --git a/crates/ecstore/src/event_notification.rs b/crates/ecstore/src/event_notification.rs index 3d909542..fdbc007d 100644 --- a/crates/ecstore/src/event_notification.rs +++ b/crates/ecstore/src/event_notification.rs @@ -14,15 +14,14 @@ // limitations under the License. #![allow(unused_variables)] -use std::collections::HashMap; -use std::sync::Arc; -use tokio::sync::RwLock; - use crate::bucket::metadata::BucketMetadata; use crate::event::name::EventName; use crate::event::targetlist::TargetList; use crate::store::ECStore; use crate::store_api::ObjectInfo; +use std::collections::HashMap; +use std::sync::Arc; +use tokio::sync::RwLock; pub struct EventNotifier { target_list: TargetList, diff --git a/crates/ecstore/src/metrics_realtime.rs b/crates/ecstore/src/metrics_realtime.rs index 3e7af4b3..8f1b159d 100644 --- a/crates/ecstore/src/metrics_realtime.rs +++ b/crates/ecstore/src/metrics_realtime.rs @@ -12,12 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{ - admin_server_info::get_local_server_property, - new_object_layer_fn, - store_api::StorageAPI, - // utils::os::get_drive_stats, -}; +use crate::{admin_server_info::get_local_server_property, new_object_layer_fn, store_api::StorageAPI}; use chrono::Utc; use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, GLOBAL_RUSTFS_ADDR, heal_channel::DriveState, metrics::global_metrics}; use rustfs_madmin::metrics::{DiskIOStats, DiskMetric, RealtimeMetrics}; diff --git a/crates/ecstore/src/rpc/client.rs b/crates/ecstore/src/rpc/client.rs index fe966d86..a9830ef8 100644 --- a/crates/ecstore/src/rpc/client.rs +++ b/crates/ecstore/src/rpc/client.rs @@ -12,16 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::error::Error; - +use crate::rpc::{TONIC_RPC_PREFIX, gen_signature_headers}; use http::Method; use rustfs_common::GLOBAL_CONN_MAP; use rustfs_protos::{create_new_channel, proto_gen::node_service::node_service_client::NodeServiceClient}; +use std::error::Error; use tonic::{service::interceptor::InterceptedService, transport::Channel}; use tracing::debug; -use crate::rpc::{TONIC_RPC_PREFIX, gen_signature_headers}; - /// 3. Subsequent calls will attempt fresh connections /// 4. If node is still down, connection will fail fast (3s timeout) pub async fn node_service_time_out_client( diff --git a/crates/ecstore/src/rpc/peer_rest_client.rs b/crates/ecstore/src/rpc/peer_rest_client.rs index fc6d5374..78d50b5e 100644 --- a/crates/ecstore/src/rpc/peer_rest_client.rs +++ b/crates/ecstore/src/rpc/peer_rest_client.rs @@ -27,7 +27,6 @@ use rustfs_madmin::{ net::NetInfo, }; use rustfs_protos::evict_failed_connection; -use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient; use rustfs_protos::proto_gen::node_service::{ DeleteBucketMetadataRequest, DeletePolicyRequest, DeleteServiceAccountRequest, DeleteUserRequest, GetCpusRequest, GetMemInfoRequest, GetMetricsRequest, GetNetInfoRequest, GetOsInfoRequest, GetPartitionsRequest, GetProcInfoRequest, @@ -35,6 +34,7 @@ use rustfs_protos::proto_gen::node_service::{ LoadPolicyMappingRequest, LoadPolicyRequest, LoadRebalanceMetaRequest, LoadServiceAccountRequest, LoadTransitionTierConfigRequest, LoadUserRequest, LocalStorageInfoRequest, Mss, ReloadPoolMetaRequest, ReloadSiteReplicationConfigRequest, ServerInfoRequest, SignalServiceRequest, StartProfilingRequest, StopRebalanceRequest, + node_service_client::NodeServiceClient, }; use rustfs_utils::XHost; use serde::{Deserialize, Serialize as _}; diff --git a/crates/ecstore/src/rpc/remote_disk.rs b/crates/ecstore/src/rpc/remote_disk.rs index 64121420..d7185ffc 100644 --- a/crates/ecstore/src/rpc/remote_disk.rs +++ b/crates/ecstore/src/rpc/remote_disk.rs @@ -12,6 +12,34 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::disk::{ + CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions, FileReader, + FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions, + disk_store::{ + CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE, get_max_timeout_duration, + }, + endpoint::Endpoint, +}; +use crate::disk::{disk_store::DiskHealthTracker, error::DiskError, local::ScanGuard}; +use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client}; +use crate::{ + disk::error::{Error, Result}, + rpc::build_auth_headers, +}; +use bytes::Bytes; +use futures::lock::Mutex; +use http::{HeaderMap, HeaderValue, Method, header::CONTENT_TYPE}; +use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo}; +use rustfs_protos::proto_gen::node_service::RenamePartRequest; +use rustfs_protos::proto_gen::node_service::{ + CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest, + DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest, + ReadMetadataRequest, ReadMultipleRequest, ReadPartsRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest, + RenameFileRequest, StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest, + node_service_client::NodeServiceClient, +}; +use rustfs_rio::{HttpReader, HttpWriter}; +use rustfs_utils::string::parse_bool_with_default; use std::{ path::PathBuf, sync::{ @@ -20,56 +48,17 @@ use std::{ }, time::Duration, }; - -use bytes::Bytes; -use futures::lock::Mutex; -use http::{HeaderMap, HeaderValue, Method, header::CONTENT_TYPE}; -use rustfs_protos::proto_gen::node_service::{ - CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest, - DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest, - ReadMetadataRequest, ReadMultipleRequest, ReadPartsRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest, - RenameFileRequest, StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest, - node_service_client::NodeServiceClient, -}; -use rustfs_utils::string::parse_bool_with_default; use tokio::time; -use tokio_util::sync::CancellationToken; -use tracing::{debug, info, warn}; - -use crate::disk::{disk_store::DiskHealthTracker, error::DiskError, local::ScanGuard}; -use crate::{ - disk::error::{Error, Result}, - rpc::build_auth_headers, -}; -use crate::{ - disk::{ - CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions, - ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions, - disk_store::{ - CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE, - get_max_timeout_duration, - }, - endpoint::Endpoint, - }, - rpc::client::gen_tonic_signature_interceptor, -}; -use crate::{ - disk::{FileReader, FileWriter}, - rpc::client::{TonicInterceptor, node_service_time_out_client}, -}; -use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo}; -use rustfs_protos::proto_gen::node_service::RenamePartRequest; -use rustfs_rio::{HttpReader, HttpWriter}; use tokio::{io::AsyncWrite, net::TcpStream, time::timeout}; +use tokio_util::sync::CancellationToken; use tonic::{Request, service::interceptor::InterceptedService, transport::Channel}; +use tracing::{debug, info, warn}; use uuid::Uuid; #[derive(Debug)] pub struct RemoteDisk { pub id: Mutex>, pub addr: String, - pub url: url::Url, - pub root: PathBuf, endpoint: Endpoint, pub scanning: Arc, /// Whether health checking is enabled @@ -82,8 +71,6 @@ pub struct RemoteDisk { impl RemoteDisk { pub async fn new(ep: &Endpoint, opt: &DiskOption) -> Result { - // let root = fs::canonicalize(ep.url.path()).await?; - let root = PathBuf::from(ep.get_file_path()); let addr = if let Some(port) = ep.url.port() { format!("{}://{}:{}", ep.url.scheme(), ep.url.host_str().unwrap(), port) } else { @@ -97,8 +84,6 @@ impl RemoteDisk { let disk = Self { id: Mutex::new(None), addr: addr.clone(), - url: ep.url.clone(), - root, endpoint: ep.clone(), scanning: Arc::new(AtomicU32::new(0)), health_check: opt.health_check && env_health_check, diff --git a/crates/ecstore/src/rpc/remote_locker.rs b/crates/ecstore/src/rpc/remote_locker.rs index ea202de3..f7410a0d 100644 --- a/crates/ecstore/src/rpc/remote_locker.rs +++ b/crates/ecstore/src/rpc/remote_locker.rs @@ -14,9 +14,10 @@ use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client}; use async_trait::async_trait; -use rustfs_lock::types::{LockId, LockMetadata, LockPriority}; -use rustfs_lock::{LockClient, LockError, LockInfo, LockResponse, LockStats, LockStatus, Result}; -use rustfs_lock::{LockRequest, LockType}; +use rustfs_lock::{ + LockClient, LockError, LockInfo, LockRequest, LockResponse, LockStats, LockStatus, LockType, Result, + types::{LockId, LockMetadata, LockPriority}, +}; use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient; use rustfs_protos::proto_gen::node_service::{GenerallyLockRequest, PingRequest}; use std::collections::HashMap; diff --git a/crates/ecstore/src/sets.rs b/crates/ecstore/src/sets.rs index 54c352e4..3b6590be 100644 --- a/crates/ecstore/src/sets.rs +++ b/crates/ecstore/src/sets.rs @@ -13,8 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{collections::HashMap, sync::Arc}; - use crate::disk::error_reduce::count_errs; use crate::error::{Error, Result}; use crate::store_api::{ListPartsInfo, ObjectInfoOrErr, WalkOptions}; @@ -44,18 +42,17 @@ use rustfs_common::{ heal_channel::{DriveState, HealItemType}, }; use rustfs_filemeta::FileInfo; - use rustfs_lock::FastLockGuard; use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem}; use rustfs_utils::{crc_hash, path::path_join_buf, sip_hash}; +use std::{collections::HashMap, sync::Arc}; use tokio::sync::RwLock; -use tokio_util::sync::CancellationToken; -use uuid::Uuid; - use tokio::sync::broadcast::{Receiver, Sender}; use tokio::time::Duration; +use tokio_util::sync::CancellationToken; use tracing::warn; use tracing::{error, info}; +use uuid::Uuid; #[derive(Debug, Clone)] pub struct Sets { diff --git a/crates/ecstore/src/store.rs b/crates/ecstore/src/store.rs index 645cf7d3..14fb29a8 100644 --- a/crates/ecstore/src/store.rs +++ b/crates/ecstore/src/store.rs @@ -1830,16 +1830,16 @@ impl StorageAPI for ECStore { if self.is_suspended(pool.pool_idx).await { continue; } - match pool + return match pool .list_object_parts(bucket, object, upload_id, part_number_marker, max_parts, opts) .await { - Ok(res) => return Ok(res), + Ok(res) => Ok(res), Err(err) => { if is_err_invalid_upload_id(&err) { continue; } - return Err(err); + Err(err) } }; } @@ -2209,7 +2209,7 @@ impl StorageAPI for ECStore { async fn delete_object_version(&self, bucket: &str, object: &str, fi: &FileInfo, force_del_marker: bool) -> Result<()> { check_del_obj_args(bucket, object)?; - let object = rustfs_utils::path::encode_dir_object(object); + let object = encode_dir_object(object); if self.single_pool() { return self.pools[0] @@ -2329,17 +2329,15 @@ impl StorageAPI for ECStore { // No pool returned a nil error, return the first non 'not found' error for (index, err) in errs.iter().enumerate() { - match err { + return match err { Some(err) => { if is_err_object_not_found(err) || is_err_version_not_found(err) { continue; } - return Ok((ress.remove(index), Some(err.clone()))); + Ok((ress.remove(index), Some(err.clone()))) } - None => { - return Ok((ress.remove(index), None)); - } - } + None => Ok((ress.remove(index), None)), + }; } // At this stage, all errors are 'not found' diff --git a/crates/ecstore/src/store_init.rs b/crates/ecstore/src/store_init.rs index 437b5218..31ea864e 100644 --- a/crates/ecstore/src/store_init.rs +++ b/crates/ecstore/src/store_init.rs @@ -27,7 +27,6 @@ use crate::{ }; use futures::future::join_all; use std::collections::{HashMap, hash_map::Entry}; - use tracing::{info, warn}; use uuid::Uuid; diff --git a/crates/ecstore/src/store_list_objects.rs b/crates/ecstore/src/store_list_objects.rs index ff10fc41..aae98d48 100644 --- a/crates/ecstore/src/store_list_objects.rs +++ b/crates/ecstore/src/store_list_objects.rs @@ -410,13 +410,13 @@ impl ECStore { ..Default::default() }; - let mut list_result = match self.list_path(&opts).await { - Ok(res) => res, - Err(err) => MetaCacheEntriesSortedResult { + let mut list_result = self + .list_path(&opts) + .await + .unwrap_or_else(|err| MetaCacheEntriesSortedResult { err: Some(err.into()), ..Default::default() - }, - }; + }); if let Some(err) = list_result.err.clone() && err != rustfs_filemeta::Error::Unexpected @@ -988,7 +988,7 @@ async fn gather_results( } if let Some(marker) = &opts.marker - && &entry.name < marker + && &entry.name <= marker { continue; } @@ -1476,7 +1476,6 @@ mod test { // use crate::error::Error; // use crate::metacache::writer::MetacacheReader; // use crate::set_disk::SetDisks; - // use crate::store::ECStore; // use crate::store_list_objects::ListPathOptions; // use crate::store_list_objects::WalkOptions; // use crate::store_list_objects::WalkVersionsSortOrder; diff --git a/crates/notify/src/integration.rs b/crates/notify/src/integration.rs index ddce7560..43da7649 100644 --- a/crates/notify/src/integration.rs +++ b/crates/notify/src/integration.rs @@ -13,6 +13,7 @@ // limitations under the License. use crate::notification_system_subscriber::NotificationSystemSubscriberView; +use crate::notifier::TargetList; use crate::{ Event, error::NotificationError, notifier::EventNotifier, registry::TargetRegistry, rules::BucketNotificationConfig, stream, }; @@ -191,6 +192,22 @@ impl NotificationSystem { self.notifier.target_list().read().await.keys() } + /// Gets the complete Target list, including both active and inactive Targets. + /// + /// # Return + /// An `Arc>` containing all Targets. + pub async fn get_all_targets(&self) -> Arc> { + self.notifier.target_list() + } + + /// Gets all Target values, including both active and inactive Targets. + /// + /// # Return + /// A Vec containing all Targets. + pub async fn get_target_values(&self) -> Vec + Send + Sync>> { + self.notifier.target_list().read().await.values() + } + /// Checks if there are active subscribers for the given bucket and event name. pub async fn has_subscriber(&self, bucket: &str, event: &EventName) -> bool { if !self.subscriber_view.has_subscriber(bucket, event) { diff --git a/crates/notify/src/notifier.rs b/crates/notify/src/notifier.rs index a5c8dd6e..859e64b9 100644 --- a/crates/notify/src/notifier.rs +++ b/crates/notify/src/notifier.rs @@ -370,6 +370,11 @@ impl TargetList { self.targets.keys().cloned().collect() } + /// Returns all targets in the list + pub fn values(&self) -> Vec + Send + Sync>> { + self.targets.values().cloned().collect() + } + /// Returns the number of targets pub fn len(&self) -> usize { self.targets.len() diff --git a/crates/policy/src/policy/action.rs b/crates/policy/src/policy/action.rs index e6ac3f3d..7af21b49 100644 --- a/crates/policy/src/policy/action.rs +++ b/crates/policy/src/policy/action.rs @@ -22,8 +22,8 @@ use strum::{EnumString, IntoStaticStr}; use super::{Error as IamError, Validator, utils::wildcard}; -/// A set of policy actions that serializes as a single string when containing one item, -/// or as an array when containing multiple items (matching AWS S3 API format). +/// A set of policy actions that always serializes as an array of strings, +/// conforming to the S3 policy specification for consistency and compatibility. #[derive(Clone, Default, Debug)] pub struct ActionSet(pub HashSet); @@ -34,15 +34,8 @@ impl Serialize for ActionSet { { use serde::ser::SerializeSeq; - if self.0.len() == 1 { - // Serialize single action as string (not array) - if let Some(action) = self.0.iter().next() { - let action_str: &str = action.into(); - return serializer.serialize_str(action_str); - } - } - - // Serialize multiple actions as array + // Always serialize as array, even for single action, to match S3 specification + // and ensure compatibility with AWS SDK clients that expect array format let mut seq = serializer.serialize_seq(Some(self.0.len()))?; for action in &self.0 { let action_str: &str = action.into(); @@ -610,13 +603,17 @@ mod tests { #[test] fn test_actionset_serialize_single_element() { - // Single element should serialize as string + // Single element should serialize as array for S3 specification compliance let mut set = HashSet::new(); set.insert(Action::S3Action(S3Action::GetObjectAction)); let actionset = ActionSet(set); let json = serde_json::to_string(&actionset).expect("Should serialize"); - assert_eq!(json, "\"s3:GetObject\""); + let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse"); + assert!(parsed.is_array(), "Should serialize as array"); + let arr = parsed.as_array().expect("Should be array"); + assert_eq!(arr.len(), 1); + assert_eq!(arr[0].as_str().unwrap(), "s3:GetObject"); } #[test] @@ -636,12 +633,16 @@ mod tests { #[test] fn test_actionset_wildcard_serialization() { - // Wildcard action should serialize correctly + // Wildcard action should serialize as array for S3 specification compliance let mut set = HashSet::new(); set.insert(Action::try_from("*").expect("Should parse wildcard")); let actionset = ActionSet(set); let json = serde_json::to_string(&actionset).expect("Should serialize"); - assert_eq!(json, "\"s3:*\""); + let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse"); + assert!(parsed.is_array(), "Should serialize as array"); + let arr = parsed.as_array().expect("Should be array"); + assert_eq!(arr.len(), 1); + assert_eq!(arr[0].as_str().unwrap(), "s3:*"); } } diff --git a/crates/policy/src/policy/policy.rs b/crates/policy/src/policy/policy.rs index 7eb9e2a3..46ccf984 100644 --- a/crates/policy/src/policy/policy.rs +++ b/crates/policy/src/policy/policy.rs @@ -1119,7 +1119,7 @@ mod test { } #[test] - fn test_bucket_policy_serialize_single_action_as_string() { + fn test_bucket_policy_serialize_single_action_as_array() { use crate::policy::action::{Action, ActionSet, S3Action}; use crate::policy::resource::{Resource, ResourceSet}; use crate::policy::{Effect, Principal}; @@ -1153,8 +1153,10 @@ mod test { let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse"); let action = &parsed["Statement"][0]["Action"]; - // Single action should be serialized as string - assert!(action.is_string(), "Single action should serialize as string"); - assert_eq!(action.as_str().unwrap(), "s3:ListBucket"); + // Single action should be serialized as array for S3 specification compliance + assert!(action.is_array(), "Single action should serialize as array"); + let arr = action.as_array().expect("Should be array"); + assert_eq!(arr.len(), 1); + assert_eq!(arr[0].as_str().unwrap(), "s3:ListBucket"); } } diff --git a/crates/utils/src/os/mod.rs b/crates/utils/src/os/mod.rs index e1984580..d88bd513 100644 --- a/crates/utils/src/os/mod.rs +++ b/crates/utils/src/os/mod.rs @@ -16,6 +16,7 @@ mod linux; #[cfg(all(unix, not(target_os = "linux")))] mod unix; + #[cfg(target_os = "windows")] mod windows; diff --git a/crates/utils/src/os/unix.rs b/crates/utils/src/os/unix.rs index dce38318..3b1c3575 100644 --- a/crates/utils/src/os/unix.rs +++ b/crates/utils/src/os/unix.rs @@ -13,56 +13,19 @@ // limitations under the License. use super::{DiskInfo, IOStats}; -use nix::sys::statfs::Statfs; -use nix::sys::{stat::stat, statfs::statfs}; +use nix::sys::{stat::stat, statvfs::statvfs}; use std::io::Error; use std::path::Path; -// FreeBSD and OpenBSD return a signed integer for blocks_available. -// Cast to an unsigned integer to use with DiskInfo. -#[cfg(any(target_os = "freebsd", target_os = "openbsd"))] -fn blocks_available(stat: &Statfs) -> u64 { - match stat.blocks_available().try_into() { - Ok(bavail) => bavail, - Err(e) => { - tracing::warn!("blocks_available returned a negative value: Using 0 as fallback. {}", e); - 0 - } - } -} - -// FreeBSD returns a signed integer for files_free. Cast to an unsigned integer -// to use with DiskInfo -#[cfg(target_os = "freebsd")] -fn files_free(stat: &Statfs) -> u64 { - match stat.files_free().try_into() { - Ok(files_free) => files_free, - Err(e) => { - tracing::warn!("files_free returned a negative value: Using 0 as fallback. {}", e); - 0 - } - } -} - -#[cfg(not(target_os = "freebsd"))] -fn files_free(stat: &Statfs) -> u64 { - stat.files_free() -} - -#[cfg(not(any(target_os = "freebsd", target_os = "openbsd")))] -fn blocks_available(stat: &Statfs) -> u64 { - stat.blocks_available() -} - /// Returns total and free bytes available in a directory, e.g. `/`. pub fn get_info(p: impl AsRef) -> std::io::Result { let path_display = p.as_ref().display(); - let stat = statfs(p.as_ref())?; + let stat = statvfs(p.as_ref())?; - let bsize = stat.block_size() as u64; - let bfree = stat.blocks_free(); - let bavail = blocks_available(&stat); - let blocks = stat.blocks(); + let bsize = stat.block_size(); + let bfree = stat.blocks_free() as u64; + let bavail = stat.blocks_available() as u64; + let blocks = stat.blocks() as u64; let reserved = match bfree.checked_sub(bavail) { Some(reserved) => reserved, @@ -96,9 +59,9 @@ pub fn get_info(p: impl AsRef) -> std::io::Result { total, free, used, - files: stat.files(), - ffree: files_free(&stat), - fstype: stat.filesystem_type_name().to_string(), + files: stat.files() as u64, + ffree: stat.files_free() as u64, + // Statvfs does not provide a way to return the filesystem as name. ..Default::default() }) } diff --git a/crates/utils/src/sys/user_agent.rs b/crates/utils/src/sys/user_agent.rs index 28486e89..28ed7dd6 100644 --- a/crates/utils/src/sys/user_agent.rs +++ b/crates/utils/src/sys/user_agent.rs @@ -13,9 +13,11 @@ // limitations under the License. use rustfs_config::VERSION; +use std::borrow::Cow; use std::env; use std::fmt; -#[cfg(not(any(target_os = "openbsd", target_os = "freebsd")))] +use std::sync::OnceLock; +#[cfg(not(target_os = "openbsd"))] use sysinfo::System; /// Business Type Enumeration @@ -25,7 +27,7 @@ pub enum ServiceType { Core, Event, Logger, - Custom(String), + Custom(Cow<'static, str>), } impl ServiceType { @@ -35,71 +37,65 @@ impl ServiceType { ServiceType::Core => "core", ServiceType::Event => "event", ServiceType::Logger => "logger", - ServiceType::Custom(s) => s.as_str(), + ServiceType::Custom(s) => s, } } } /// UserAgent structure to hold User-Agent information /// including OS platform, architecture, version, and service type. -/// It provides methods to generate a formatted User-Agent string. -/// # Examples -/// ``` -/// use rustfs_utils::{get_user_agent, ServiceType}; -/// -/// let ua = get_user_agent(ServiceType::Core); -/// println!("User-Agent: {}", ua); -/// ``` #[derive(Debug)] struct UserAgent { - os_platform: String, - arch: String, - version: String, + os_platform: &'static str, + arch: &'static str, + version: &'static str, service: ServiceType, } +static OS_PLATFORM: OnceLock = OnceLock::new(); + impl UserAgent { /// Create a new UserAgent instance and accept business type parameters - /// - /// # Arguments - /// * `service` - The type of service for which the User-Agent is being created. - /// # Returns - /// A new instance of `UserAgent` with the current OS platform, architecture, version, and service type. fn new(service: ServiceType) -> Self { - let os_platform = Self::get_os_platform(); - let arch = env::consts::ARCH.to_string(); - let version = VERSION.to_string(); - UserAgent { - os_platform, - arch, - version, + os_platform: Self::get_os_platform(), + arch: env::consts::ARCH, + version: VERSION, service, } } - /// Obtain operating system platform information - fn get_os_platform() -> String { - if cfg!(target_os = "windows") { - Self::get_windows_platform() - } else if cfg!(target_os = "macos") { - Self::get_macos_platform() - } else if cfg!(target_os = "linux") { - Self::get_linux_platform() - } else { - "Unknown".to_string() - } + /// Obtain operating system platform information using a thread-safe cache. + /// + /// The value is computed once on first use via `OnceLock` and then reused + /// for all subsequent calls for the lifetime of the program. + fn get_os_platform() -> &'static str { + OS_PLATFORM.get_or_init(|| { + if cfg!(target_os = "windows") { + Self::get_windows_platform() + } else if cfg!(target_os = "macos") { + Self::get_macos_platform() + } else if cfg!(target_os = "linux") { + Self::get_linux_platform() + } else if cfg!(target_os = "freebsd") { + Self::get_freebsd_platform() + } else if cfg!(target_os = "netbsd") { + Self::get_netbsd_platform() + } else { + "Unknown".to_string() + } + }) } /// Get Windows platform information #[cfg(windows)] fn get_windows_platform() -> String { - // Priority to using sysinfo to get versions - let version = match System::os_version() { - Some(version) => version, - None => "Windows NT Unknown".to_string(), - }; - format!("Windows NT {version}") + let version = System::os_version().unwrap_or_else(|| "NT Unknown".to_string()); + if version.starts_with("Windows") { + version + } else { + format!("Windows NT {version}") + } } #[cfg(not(windows))] @@ -110,16 +106,14 @@ impl UserAgent { /// Get macOS platform information #[cfg(target_os = "macos")] fn get_macos_platform() -> String { - let binding = System::os_version().unwrap_or("14.5.0".to_string()); - let version = binding.split('.').collect::>(); - let major = version.first().unwrap_or(&"14").to_string(); - let minor = version.get(1).unwrap_or(&"5").to_string(); - let patch = version.get(2).unwrap_or(&"0").to_string(); + let version_str = System::os_version().unwrap_or_else(|| "14.0.0".to_string()); + let mut parts = version_str.split('.'); + let major = parts.next().unwrap_or("14"); + let minor = parts.next().unwrap_or("0"); + let patch = parts.next().unwrap_or("0"); - let arch = env::consts::ARCH; - let cpu_info = if arch == "aarch64" { "Apple" } else { "Intel" }; + let cpu_info = if env::consts::ARCH == "aarch64" { "Apple" } else { "Intel" }; - // Convert to User-Agent format format!("Macintosh; {cpu_info} Mac OS X {major}_{minor}_{patch}") } @@ -131,40 +125,47 @@ impl UserAgent { /// Get Linux platform information #[cfg(target_os = "linux")] fn get_linux_platform() -> String { - format!("X11; {}", System::long_os_version().unwrap_or("Linux Unknown".to_string())) + let os_name = System::long_os_version().unwrap_or_else(|| "Linux Unknown".to_string()); + format!("X11; {os_name}") } #[cfg(not(target_os = "linux"))] fn get_linux_platform() -> String { "N/A".to_string() } + + #[cfg(target_os = "freebsd")] + fn get_freebsd_platform() -> String { + format!("FreeBSD; {}", env::consts::ARCH) + } + + #[cfg(not(target_os = "freebsd"))] + fn get_freebsd_platform() -> String { + "N/A".to_string() + } + + #[cfg(target_os = "netbsd")] + fn get_netbsd_platform() -> String { + format!("NetBSD; {}", env::consts::ARCH) + } + + #[cfg(not(target_os = "netbsd"))] + fn get_netbsd_platform() -> String { + "N/A".to_string() + } } -/// Implement Display trait to format User-Agent impl fmt::Display for UserAgent { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.service == ServiceType::Basis { - return write!(f, "Mozilla/5.0 ({}; {}) RustFS/{}", self.os_platform, self.arch, self.version); + write!(f, "Mozilla/5.0 ({}; {}) RustFS/{}", self.os_platform, self.arch, self.version)?; + if self.service != ServiceType::Basis { + write!(f, " ({})", self.service.as_str())?; } - write!( - f, - "Mozilla/5.0 ({}; {}) RustFS/{} ({})", - self.os_platform, - self.arch, - self.version, - self.service.as_str() - ) + Ok(()) } } /// Get the User-Agent string and accept business type parameters -/// -/// # Arguments -/// * `service` - The type of service for which the User-Agent is being created. -/// -/// # Returns -/// A formatted User-Agent string. -/// pub fn get_user_agent(service: ServiceType) -> String { UserAgent::new(service).to_string() } @@ -173,58 +174,33 @@ pub fn get_user_agent(service: ServiceType) -> String { mod tests { use super::*; use rustfs_config::VERSION; - use tracing::debug; + #[test] fn test_user_agent_format_basis() { let ua = get_user_agent(ServiceType::Basis); assert!(ua.starts_with("Mozilla/5.0")); - assert!(ua.contains(&format!("RustFS/{VERSION}").to_string())); - debug!("Basic User-Agent: {}", ua); + assert!(ua.contains(&format!("RustFS/{VERSION}"))); + assert!(!ua.contains("(basis)")); } #[test] fn test_user_agent_format_core() { let ua = get_user_agent(ServiceType::Core); - assert!(ua.starts_with("Mozilla/5.0")); - assert!(ua.contains(&format!("RustFS/{VERSION} (core)").to_string())); - debug!("Core User-Agent: {}", ua); - } - - #[test] - fn test_user_agent_format_event() { - let ua = get_user_agent(ServiceType::Event); - assert!(ua.starts_with("Mozilla/5.0")); - assert!(ua.contains(&format!("RustFS/{VERSION} (event)").to_string())); - debug!("Event User-Agent: {}", ua); - } - - #[test] - fn test_user_agent_format_logger() { - let ua = get_user_agent(ServiceType::Logger); - assert!(ua.starts_with("Mozilla/5.0")); - assert!(ua.contains(&format!("RustFS/{VERSION} (logger)").to_string())); - debug!("Logger User-Agent: {}", ua); + assert!(ua.contains(&format!("RustFS/{VERSION} (core)"))); } #[test] fn test_user_agent_format_custom() { - let ua = get_user_agent(ServiceType::Custom("monitor".to_string())); - assert!(ua.starts_with("Mozilla/5.0")); - assert!(ua.contains(&format!("RustFS/{VERSION} (monitor)").to_string())); - debug!("Monitor User-Agent: {}", ua); + let ua = get_user_agent(ServiceType::Custom("monitor".into())); + assert!(ua.contains(&format!("RustFS/{VERSION} (monitor)"))); } #[test] - fn test_all_service_type() { - // Example: Generate User-Agents of Different Business Types - let ua_core = get_user_agent(ServiceType::Core); - let ua_event = get_user_agent(ServiceType::Event); - let ua_logger = get_user_agent(ServiceType::Logger); - let ua_custom = get_user_agent(ServiceType::Custom("monitor".to_string())); - - debug!("Core User-Agent: {}", ua_core); - debug!("Event User-Agent: {}", ua_event); - debug!("Logger User-Agent: {}", ua_logger); - debug!("Custom User-Agent: {}", ua_custom); + fn test_os_platform_caching() { + let ua1 = UserAgent::new(ServiceType::Basis); + let ua2 = UserAgent::new(ServiceType::Basis); + assert_eq!(ua1.os_platform, ua2.os_platform); + // Ensure they point to the same static memory + assert!(std::ptr::eq(ua1.os_platform.as_ptr(), ua2.os_platform.as_ptr())); } } diff --git a/docker-compose-simple.yml b/docker-compose-simple.yml index 9b409f43..ed60d261 100644 --- a/docker-compose-simple.yml +++ b/docker-compose-simple.yml @@ -40,7 +40,7 @@ services: [ "CMD", "sh", "-c", - "curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health" + "curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/rustfs/console/health" ] interval: 30s timeout: 10s diff --git a/docker-compose.yml b/docker-compose.yml index 2dd53a8c..2fcebd53 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -52,7 +52,7 @@ services: [ "CMD", "sh", "-c", - "curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health" + "curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/rustfs/console/health" ] interval: 30s timeout: 10s @@ -99,7 +99,7 @@ services: [ "CMD", "sh", "-c", - "curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health" + "curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/rustfs/console/health" ] interval: 30s timeout: 10s diff --git a/docs/ansible/docker-compose-mnmd.yml b/docs/ansible/docker-compose-mnmd.yml index 89b4cb56..fbc3159f 100644 --- a/docs/ansible/docker-compose-mnmd.yml +++ b/docs/ansible/docker-compose-mnmd.yml @@ -63,7 +63,7 @@ test: [ "CMD-SHELL", - "curl -f http://localhost:9000/health && curl -f http://localhost:9001/health || exit 1" + "curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/health || exit 1" ] interval: 10s timeout: 5s diff --git a/docs/examples/docker/docker-comprehensive.yml b/docs/examples/docker/docker-comprehensive.yml index a87a0d94..7a3ddd43 100644 --- a/docs/examples/docker/docker-comprehensive.yml +++ b/docs/examples/docker/docker-comprehensive.yml @@ -15,7 +15,7 @@ services: - RUSTFS_ADDRESS=0.0.0.0:9000 - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001 - RUSTFS_EXTERNAL_ADDRESS=:9000 - - RUSTFS_CORS_ALLOWED_ORIGINS=http://localhost:9001 + - RUSTFS_CORS_ALLOWED_ORIGINS=http://127.0.0.1:9001 - RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS=* - RUSTFS_ACCESS_KEY=admin - RUSTFS_SECRET_KEY=password @@ -25,7 +25,7 @@ services: - rustfs-network restart: unless-stopped healthcheck: - test: [ "CMD", "sh", "-c", "curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health" ] + test: [ "CMD", "sh", "-c", "curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/rustfs/console/health" ] interval: 30s timeout: 10s retries: 3 @@ -56,7 +56,7 @@ services: - rustfs-network restart: unless-stopped healthcheck: - test: [ "CMD", "sh", "-c", "curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health" ] + test: [ "CMD", "sh", "-c", "curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/rustfs/console/health" ] interval: 30s timeout: 10s retries: 3 @@ -92,7 +92,7 @@ services: - rustfs_secret_key restart: unless-stopped healthcheck: - test: [ "CMD", "sh", "-c", "curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health" ] + test: [ "CMD", "sh", "-c", "curl -f http://127.0.0.1:9000/health && curl -f http://127.0.0.1:9001/rustfs/console/health" ] interval: 30s timeout: 10s retries: 3 @@ -127,7 +127,7 @@ services: - rustfs_enterprise_secret_key restart: unless-stopped healthcheck: - test: [ "CMD", "sh", "-c", "curl -f http://localhost:9000/health && curl -k -f https://localhost:9001/rustfs/console/health" ] + test: [ "CMD", "sh", "-c", "curl -f http://127.0.0.1:9000/health && curl -k -f https://127.0.0.1:9001/rustfs/console/health" ] interval: 30s timeout: 10s retries: 3 @@ -152,7 +152,7 @@ services: - rustfs-network restart: unless-stopped healthcheck: - test: [ "CMD", "curl", "-f", "http://localhost:9000/health" ] + test: [ "CMD", "curl", "-f", "http://127.0.0.1:9000/health" ] interval: 30s timeout: 10s retries: 3 diff --git a/helm/README.md b/helm/README.md index 2d635767..1ace7039 100644 --- a/helm/README.md +++ b/helm/README.md @@ -114,6 +114,8 @@ RustFS helm chart supports **standalone and distributed mode**. For standalone m | gatewayApi.gatewayClass | string | `traefik` | Gateway class implementation. | | gatewayApi.hostname | string | Hostname to access RustFS via gateway api. | | gatewayApi.secretName | string | Secret tls to via RustFS using HTTPS. | +| gatewayApi.existingGateway.name | string | `""` | The existing gateway name, instead of creating a new one. | +| gatewayApi.existingGateway.namespace | string | `""` | The namespace of the existing gateway, if not the local namespace. | --- diff --git a/helm/rustfs/templates/gateway-api/gateway.yml b/helm/rustfs/templates/gateway-api/gateway.yml index 8b50f5ae..fde30aa1 100644 --- a/helm/rustfs/templates/gateway-api/gateway.yml +++ b/helm/rustfs/templates/gateway-api/gateway.yml @@ -1,4 +1,4 @@ -{{- if .Values.gatewayApi.enabled }} +{{- if and .Values.gatewayApi.enabled (empty .Values.gatewayApi.existingGateway.name) }} apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: diff --git a/helm/rustfs/templates/gateway-api/httproute.yml b/helm/rustfs/templates/gateway-api/httproute.yml index 9ac5d968..edd0b517 100644 --- a/helm/rustfs/templates/gateway-api/httproute.yml +++ b/helm/rustfs/templates/gateway-api/httproute.yml @@ -5,7 +5,14 @@ metadata: name: {{ include "rustfs.fullname" . }}-route spec: parentRefs: + {{- if .Values.gatewayApi.existingGateway.name }} + - name: {{ .Values.gatewayApi.existingGateway.name }} + {{- if .Values.gatewayApi.existingGateway.namespace }} + namespace: {{ .Values.gatewayApi.existingGateway.namespace }} + {{- end }} + {{- else }} - name: {{ include "rustfs.fullname" . }}-gateway + {{- end }} hostnames: - {{ .Values.gatewayApi.hostname }} rules: diff --git a/helm/rustfs/templates/pvc.yaml b/helm/rustfs/templates/pvc.yaml index 849da8f2..1942a685 100644 --- a/helm/rustfs/templates/pvc.yaml +++ b/helm/rustfs/templates/pvc.yaml @@ -1,32 +1,39 @@ {{- if .Values.mode.standalone.enabled }} +{{- with .Values.storageclass }} apiVersion: v1 kind: PersistentVolumeClaim metadata: annotations: helm.sh/resource-policy: keep - name: {{ include "rustfs.fullname" . }}-data + name: {{ include "rustfs.fullname" $ }}-data labels: - {{- toYaml .Values.commonLabels | nindent 4 }} + {{- toYaml $.Values.commonLabels | nindent 4 }} spec: - accessModes: ["ReadWriteOnce"] - storageClassName: {{ .Values.storageclass.name }} + accessModes: [ ReadWriteOnce ] + {{- with .name }} + storageClassName: {{ . }} + {{- end }} resources: requests: - storage: {{ .Values.storageclass.dataStorageSize }} + storage: {{ .dataStorageSize }} --- + apiVersion: v1 kind: PersistentVolumeClaim metadata: annotations: helm.sh/resource-policy: keep - name: {{ include "rustfs.fullname" . }}-logs + name: {{ include "rustfs.fullname" $ }}-logs labels: - {{- toYaml .Values.commonLabels | nindent 4 }} + {{- toYaml $.Values.commonLabels | nindent 4 }} spec: - accessModes: ["ReadWriteOnce"] - storageClassName: {{ .Values.storageclass.name }} + accessModes: [ ReadWriteOnce ] + {{- with .name }} + storageClassName: {{ . }} + {{- end }} resources: requests: - storage: {{ .Values.storageclass.logStorageSize }} + storage: {{ .logStorageSize }} +{{- end }} {{- end }} diff --git a/helm/rustfs/templates/secret-tls.yaml b/helm/rustfs/templates/secret-tls.yaml index fea2cf58..730f5766 100644 --- a/helm/rustfs/templates/secret-tls.yaml +++ b/helm/rustfs/templates/secret-tls.yaml @@ -1,4 +1,4 @@ -{{- if and (or .Values.gatewayApi.enabled .Values.ingress.tls.enabled) (not .Values.ingress.tls.certManager.enabled) }} +{{- if and (or .Values.gatewayApi.enabled .Values.ingress.tls.enabled) (not (or .Values.ingress.tls.certManager.enabled .Values.gatewayApi.existingGateway.name)) }} apiVersion: v1 kind: Secret metadata: diff --git a/helm/rustfs/values.yaml b/helm/rustfs/values.yaml index d8017cc0..eb772251 100644 --- a/helm/rustfs/values.yaml +++ b/helm/rustfs/values.yaml @@ -140,6 +140,9 @@ gatewayApi: gatewayClass: traefik hostname: example.rustfs.com secretName: secret-tls + existingGateway: + name: "" + namespace: "" resources: # We usually recommend not to specify default resources and to leave this as a conscious diff --git a/rustfs/src/admin/console.rs b/rustfs/src/admin/console.rs index 6e5c9cc1..07046d40 100644 --- a/rustfs/src/admin/console.rs +++ b/rustfs/src/admin/console.rs @@ -269,6 +269,7 @@ async fn version_handler() -> impl IntoResponse { /// - 200 OK with JSON body containing the console configuration if initialized. /// - 500 Internal Server Error if configuration is not initialized. #[instrument(fields(uri))] +#[allow(dead_code)] async fn config_handler(uri: Uri, headers: HeaderMap) -> impl IntoResponse { // Get the scheme from the headers or use the URI scheme let scheme = headers @@ -482,7 +483,6 @@ fn setup_console_middleware_stack( let mut app = Router::new() .route(FAVICON_PATH, get(static_handler)) .route(&format!("{CONSOLE_PREFIX}/license"), get(license_handler)) - .route(&format!("{CONSOLE_PREFIX}/config.json"), get(config_handler)) .route(&format!("{CONSOLE_PREFIX}/version"), get(version_handler)) .route(&format!("{CONSOLE_PREFIX}{HEALTH_PREFIX}"), get(health_check).head(health_check)) .nest(CONSOLE_PREFIX, Router::new().fallback_service(get(static_handler))) diff --git a/rustfs/src/admin/handlers/event.rs b/rustfs/src/admin/handlers/event.rs index a8b93227..eca0ad00 100644 --- a/rustfs/src/admin/handlers/event.rs +++ b/rustfs/src/admin/handlers/event.rs @@ -14,21 +14,24 @@ use crate::admin::router::Operation; use crate::auth::{check_key_valid, get_session_token}; +use futures::stream::{FuturesUnordered, StreamExt}; use http::{HeaderMap, StatusCode}; use matchit::Params; use rustfs_config::notify::{NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS}; use rustfs_config::{ENABLE_KEY, EnableState, MAX_ADMIN_REQUEST_BODY_SIZE}; use rustfs_targets::check_mqtt_broker_available; -use s3s::header::CONTENT_LENGTH; -use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error}; +use s3s::{Body, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error}; use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; use std::future::Future; use std::io::{Error, ErrorKind}; use std::net::SocketAddr; use std::path::Path; +use std::sync::Arc; use tokio::net::lookup_host; -use tokio::time::{Duration, sleep}; -use tracing::{Span, debug, error, info, warn}; +use tokio::sync::Semaphore; +use tokio::time::{Duration, sleep, timeout}; +use tracing::{Span, info, warn}; use url::Url; #[derive(Debug, Deserialize)] @@ -54,12 +57,34 @@ struct NotificationEndpointsResponse { notification_endpoints: Vec, } +// --- Helper Functions --- + +async fn check_permissions(req: &S3Request) -> S3Result<()> { + let Some(input_cred) = &req.credentials else { + return Err(s3_error!(InvalidRequest, "credentials not found")); + }; + check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?; + Ok(()) +} + +fn get_notification_system() -> S3Result> { + rustfs_notify::notification_system().ok_or_else(|| s3_error!(InternalError, "notification system not initialized")) +} + +fn build_response(status: StatusCode, body: Body, request_id: Option<&http::HeaderValue>) -> S3Response<(StatusCode, Body)> { + let mut header = HeaderMap::new(); + header.insert(CONTENT_TYPE, "application/json".parse().unwrap()); + if let Some(v) = request_id { + header.insert("x-request-id", v.clone()); + } + S3Response::with_headers((status, body), header) +} + async fn retry_with_backoff(mut operation: F, max_attempts: usize, base_delay: Duration) -> Result where F: FnMut() -> Fut, Fut: Future>, { - assert!(max_attempts > 0, "max_attempts must be greater than 0"); let mut attempts = 0; let mut delay = base_delay; let mut last_err = None; @@ -71,13 +96,6 @@ where last_err = Some(e); attempts += 1; if attempts < max_attempts { - warn!( - "Retry attempt {}/{} failed: {}. Retrying in {:?}", - attempts, - max_attempts, - last_err.as_ref().unwrap(), - delay - ); sleep(delay).await; delay = delay.saturating_mul(2); } @@ -87,130 +105,73 @@ where Err(last_err.unwrap_or_else(|| Error::other("retry_with_backoff: unknown error"))) } -async fn retry_metadata(path: &str) -> Result<(), Error> { - retry_with_backoff(|| async { tokio::fs::metadata(path).await.map(|_| ()) }, 3, Duration::from_millis(100)).await -} - async fn validate_queue_dir(queue_dir: &str) -> S3Result<()> { if !queue_dir.is_empty() { if !Path::new(queue_dir).is_absolute() { return Err(s3_error!(InvalidArgument, "queue_dir must be absolute path")); } - - if let Err(e) = retry_metadata(queue_dir).await { - return match e.kind() { - ErrorKind::NotFound => Err(s3_error!(InvalidArgument, "queue_dir does not exist")), - ErrorKind::PermissionDenied => Err(s3_error!(InvalidArgument, "queue_dir exists but permission denied")), - _ => Err(s3_error!(InvalidArgument, "failed to access queue_dir: {}", e)), - }; - } - } - - Ok(()) -} - -fn validate_cert_key_pair(cert: &Option, key: &Option) -> S3Result<()> { - if cert.is_some() != key.is_some() { - return Err(s3_error!(InvalidArgument, "client_cert and client_key must be specified as a pair")); + retry_with_backoff( + || async { tokio::fs::metadata(queue_dir).await.map(|_| ()) }, + 3, + Duration::from_millis(100), + ) + .await + .map_err(|e| match e.kind() { + ErrorKind::NotFound => s3_error!(InvalidArgument, "queue_dir does not exist"), + ErrorKind::PermissionDenied => s3_error!(InvalidArgument, "queue_dir exists but permission denied"), + _ => s3_error!(InvalidArgument, "failed to access queue_dir: {}", e), + })?; } Ok(()) } -/// Set (create or update) a notification target +// --- Operations --- + pub struct NotificationTarget {} #[async_trait::async_trait] impl Operation for NotificationTarget { async fn call(&self, req: S3Request, params: Params<'_, '_>) -> S3Result> { let span = Span::current(); let _enter = span.enter(); - // 1. Analyze query parameters let (target_type, target_name) = extract_target_params(¶ms)?; - // 2. Permission verification - let Some(input_cred) = &req.credentials else { - return Err(s3_error!(InvalidRequest, "credentials not found")); - }; - let (_cred, _owner) = - check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?; + check_permissions(&req).await?; + let ns = get_notification_system()?; - // 3. Get notification system instance - let Some(ns) = rustfs_notify::notification_system() else { - return Err(s3_error!(InternalError, "notification system not initialized")); - }; - - // 4. The parsing request body is KVS (Key-Value Store) let mut input = req.input; - let body = input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await.map_err(|e| { + let body_bytes = input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await.map_err(|e| { warn!("failed to read request body: {:?}", e); s3_error!(InvalidRequest, "failed to read request body") })?; - // 1. Get the allowed key range - let allowed_keys: std::collections::HashSet<&str> = match target_type { + let notification_body: NotificationTargetBody = serde_json::from_slice(&body_bytes) + .map_err(|e| s3_error!(InvalidArgument, "invalid json body for target config: {}", e))?; + + let allowed_keys: HashSet<&str> = match target_type { NOTIFY_WEBHOOK_SUB_SYS => rustfs_config::notify::NOTIFY_WEBHOOK_KEYS.iter().cloned().collect(), NOTIFY_MQTT_SUB_SYS => rustfs_config::notify::NOTIFY_MQTT_KEYS.iter().cloned().collect(), _ => unreachable!(), }; - let notification_body: NotificationTargetBody = serde_json::from_slice(&body) - .map_err(|e| s3_error!(InvalidArgument, "invalid json body for target config: {}", e))?; + let kv_map: HashMap<&str, &str> = notification_body + .key_values + .iter() + .map(|kv| (kv.key.as_str(), kv.value.as_str())) + .collect(); - // 2. Filter and verify keys, and splice target_name - let mut kvs_vec = Vec::new(); - let mut endpoint_val = None; - let mut queue_dir_val = None; - let mut client_cert_val = None; - let mut client_key_val = None; - let mut qos_val = None; - let mut topic_val = String::new(); - - for kv in notification_body.key_values.iter() { - if !allowed_keys.contains(kv.key.as_str()) { - return Err(s3_error!( - InvalidArgument, - "key '{}' not allowed for target type '{}'", - kv.key, - target_type - )); + // Validate keys + for key in kv_map.keys() { + if !allowed_keys.contains(key) { + return Err(s3_error!(InvalidArgument, "key '{}' not allowed for target type '{}'", key, target_type)); } - if kv.key == "endpoint" { - endpoint_val = Some(kv.value.clone()); - } - - if target_type == NOTIFY_MQTT_SUB_SYS { - if kv.key == rustfs_config::MQTT_BROKER { - endpoint_val = Some(kv.value.clone()); - } - if kv.key == rustfs_config::MQTT_TOPIC { - topic_val = kv.value.clone(); - } - } - - if kv.key == "queue_dir" { - queue_dir_val = Some(kv.value.clone()); - } - if kv.key == "client_cert" { - client_cert_val = Some(kv.value.clone()); - } - if kv.key == "client_key" { - client_key_val = Some(kv.value.clone()); - } - if kv.key == "qos" { - qos_val = Some(kv.value.clone()); - } - - kvs_vec.push(rustfs_ecstore::config::KV { - key: kv.key.clone(), - value: kv.value.clone(), - hidden_if_empty: false, - }); } + // Type-specific validation if target_type == NOTIFY_WEBHOOK_SUB_SYS { - let endpoint = endpoint_val - .clone() + let endpoint = kv_map + .get("endpoint") .ok_or_else(|| s3_error!(InvalidArgument, "endpoint is required"))?; - let url = Url::parse(&endpoint).map_err(|e| s3_error!(InvalidArgument, "invalid endpoint url: {}", e))?; + let url = Url::parse(endpoint).map_err(|e| s3_error!(InvalidArgument, "invalid endpoint url: {}", e))?; let host = url .host_str() .ok_or_else(|| s3_error!(InvalidArgument, "endpoint missing host"))?; @@ -218,207 +179,147 @@ impl Operation for NotificationTarget { .port_or_known_default() .ok_or_else(|| s3_error!(InvalidArgument, "endpoint missing port"))?; let addr = format!("{host}:{port}"); - // First, try to parse as SocketAddr (IP:port) - if addr.parse::().is_err() { - // If not an IP:port, try DNS resolution - if lookup_host(&addr).await.is_err() { - return Err(s3_error!(InvalidArgument, "invalid or unresolvable endpoint address")); - } + if addr.parse::().is_err() && lookup_host(&addr).await.is_err() { + return Err(s3_error!(InvalidArgument, "invalid or unresolvable endpoint address")); } - if let Some(queue_dir) = queue_dir_val.clone() { - validate_queue_dir(&queue_dir).await?; + if let Some(queue_dir) = kv_map.get("queue_dir") { + validate_queue_dir(queue_dir).await?; } - validate_cert_key_pair(&client_cert_val, &client_key_val)?; - } + if kv_map.contains_key("client_cert") != kv_map.contains_key("client_key") { + return Err(s3_error!(InvalidArgument, "client_cert and client_key must be specified as a pair")); + } + } else if target_type == NOTIFY_MQTT_SUB_SYS { + let endpoint = kv_map + .get(rustfs_config::MQTT_BROKER) + .ok_or_else(|| s3_error!(InvalidArgument, "broker endpoint is required"))?; + let topic = kv_map + .get(rustfs_config::MQTT_TOPIC) + .ok_or_else(|| s3_error!(InvalidArgument, "topic is required"))?; + check_mqtt_broker_available(endpoint, topic) + .await + .map_err(|e| s3_error!(InvalidArgument, "MQTT Broker unavailable: {}", e))?; - if target_type == NOTIFY_MQTT_SUB_SYS { - let endpoint = endpoint_val.ok_or_else(|| s3_error!(InvalidArgument, "broker endpoint is required"))?; - if topic_val.is_empty() { - return Err(s3_error!(InvalidArgument, "topic is required")); - } - // Check MQTT Broker availability - if let Err(e) = check_mqtt_broker_available(&endpoint, &topic_val).await { - return Err(s3_error!(InvalidArgument, "MQTT Broker unavailable: {}", e)); - } - - if let Some(queue_dir) = queue_dir_val { - validate_queue_dir(&queue_dir).await?; - if let Some(qos) = qos_val { + if let Some(queue_dir) = kv_map.get("queue_dir") { + validate_queue_dir(queue_dir).await?; + if let Some(qos) = kv_map.get("qos") { match qos.parse::() { - Ok(qos_int) if qos_int == 1 || qos_int == 2 => {} - Ok(0) => { - return Err(s3_error!(InvalidArgument, "qos should be 1 or 2 if queue_dir is set")); - } - _ => { - return Err(s3_error!(InvalidArgument, "qos must be an integer 0, 1, or 2")); - } + Ok(1) | Ok(2) => {} + Ok(0) => return Err(s3_error!(InvalidArgument, "qos should be 1 or 2 if queue_dir is set")), + _ => return Err(s3_error!(InvalidArgument, "qos must be an integer 0, 1, or 2")), } } } } - // 3. Add ENABLE_KEY + let mut kvs_vec: Vec<_> = notification_body + .key_values + .into_iter() + .map(|kv| rustfs_ecstore::config::KV { + key: kv.key, + value: kv.value, + hidden_if_empty: false, + }) + .collect(); + kvs_vec.push(rustfs_ecstore::config::KV { key: ENABLE_KEY.to_string(), value: EnableState::On.to_string(), hidden_if_empty: false, }); - let kvs = rustfs_ecstore::config::KVS(kvs_vec); - - // 5. Call notification system to set target configuration info!("Setting target config for type '{}', name '{}'", target_type, target_name); - ns.set_target_config(target_type, target_name, kvs).await.map_err(|e| { - error!("failed to set target config: {}", e); - S3Error::with_message(S3ErrorCode::InternalError, format!("failed to set target config: {e}")) - })?; + ns.set_target_config(target_type, target_name, rustfs_ecstore::config::KVS(kvs_vec)) + .await + .map_err(|e| s3_error!(InternalError, "failed to set target config: {}", e))?; - let mut header = HeaderMap::new(); - header.insert(CONTENT_TYPE, "application/json".parse().unwrap()); - header.insert(CONTENT_LENGTH, "0".parse().unwrap()); - if let Some(v) = req.headers.get("x-request-id") { - header.insert("x-request-id", v.clone()); - } - Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)) + Ok(build_response(StatusCode::OK, Body::empty(), req.headers.get("x-request-id"))) } } -/// Get a list of notification targets for all activities pub struct ListNotificationTargets {} #[async_trait::async_trait] impl Operation for ListNotificationTargets { async fn call(&self, req: S3Request, _params: Params<'_, '_>) -> S3Result> { let span = Span::current(); let _enter = span.enter(); - debug!("ListNotificationTargets call start request params: {:?}", req.uri.query()); + check_permissions(&req).await?; + let ns = get_notification_system()?; - // 1. Permission verification - let Some(input_cred) = &req.credentials else { - return Err(s3_error!(InvalidRequest, "credentials not found")); - }; - let (_cred, _owner) = - check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?; + let targets = ns.get_target_values().await; + let target_count = targets.len(); - // 2. Get notification system instance - let Some(ns) = rustfs_notify::notification_system() else { - return Err(s3_error!(InternalError, "notification system not initialized")); - }; + let semaphore = Arc::new(Semaphore::new(10)); + let mut futures = FuturesUnordered::new(); - // 3. Get the list of activity targets - let active_targets = ns.get_active_targets().await; - - debug!("ListNotificationTargets call found {} active targets", active_targets.len()); - let mut notification_endpoints = Vec::new(); - for target_id in active_targets.iter() { - notification_endpoints.push(NotificationEndpoint { - account_id: target_id.id.clone(), - service: target_id.name.to_string(), - status: "online".to_string(), + for target in targets { + let sem = Arc::clone(&semaphore); + futures.push(async move { + let _permit = sem.acquire().await; + let status = match timeout(Duration::from_secs(3), target.is_active()).await { + Ok(Ok(true)) => "online", + _ => "offline", + }; + NotificationEndpoint { + account_id: target.id().to_string(), + service: target.name().to_string(), + status: status.to_string(), + } }); } - let response = NotificationEndpointsResponse { notification_endpoints }; - - // 4. Serialize and return the result - let data = serde_json::to_vec(&response).map_err(|e| { - error!("Failed to serialize notification targets response: {:?}", response); - S3Error::with_message(S3ErrorCode::InternalError, format!("failed to serialize targets: {e}")) - })?; - debug!("ListNotificationTargets call end, response data length: {}", data.len(),); - let mut header = HeaderMap::new(); - header.insert(CONTENT_TYPE, "application/json".parse().unwrap()); - if let Some(v) = req.headers.get("x-request-id") { - header.insert("x-request-id", v.clone()); + let mut notification_endpoints = Vec::with_capacity(target_count); + while let Some(endpoint) = futures.next().await { + notification_endpoints.push(endpoint); } - Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header)) + + let data = serde_json::to_vec(&NotificationEndpointsResponse { notification_endpoints }) + .map_err(|e| s3_error!(InternalError, "failed to serialize targets: {}", e))?; + + Ok(build_response(StatusCode::OK, Body::from(data), req.headers.get("x-request-id"))) } } -/// Get a list of notification targets for all activities pub struct ListTargetsArns {} #[async_trait::async_trait] impl Operation for ListTargetsArns { async fn call(&self, req: S3Request, _params: Params<'_, '_>) -> S3Result> { let span = Span::current(); let _enter = span.enter(); - debug!("ListTargetsArns call start request params: {:?}", req.uri.query()); + check_permissions(&req).await?; + let ns = get_notification_system()?; - // 1. Permission verification - let Some(input_cred) = &req.credentials else { - return Err(s3_error!(InvalidRequest, "credentials not found")); - }; - let (_cred, _owner) = - check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?; - - // 2. Get notification system instance - let Some(ns) = rustfs_notify::notification_system() else { - return Err(s3_error!(InternalError, "notification system not initialized")); - }; - - // 3. Get the list of activity targets let active_targets = ns.get_active_targets().await; + let region = req + .region + .clone() + .ok_or_else(|| s3_error!(InvalidRequest, "region not found"))?; - debug!("ListTargetsArns call found {} active targets", active_targets.len()); + let data_target_arn_list: Vec<_> = active_targets.iter().map(|id| id.to_arn(®ion).to_string()).collect(); - let region = match req.region.clone() { - Some(region) => region, - None => return Err(s3_error!(InvalidRequest, "region not found")), - }; - let mut data_target_arn_list = Vec::new(); - - for target_id in active_targets.iter() { - data_target_arn_list.push(target_id.to_arn(®ion).to_string()); - } - - // 4. Serialize and return the result let data = serde_json::to_vec(&data_target_arn_list) - .map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("failed to serialize targets: {e}")))?; - debug!("ListTargetsArns call end, response data length: {}", data.len(),); - let mut header = HeaderMap::new(); - header.insert(CONTENT_TYPE, "application/json".parse().unwrap()); - if let Some(v) = req.headers.get("x-request-id") { - header.insert("x-request-id", v.clone()); - } - Ok(S3Response::with_headers((StatusCode::OK, Body::from(data)), header)) + .map_err(|e| s3_error!(InternalError, "failed to serialize targets: {}", e))?; + + Ok(build_response(StatusCode::OK, Body::from(data), req.headers.get("x-request-id"))) } } -/// Delete a specified notification target pub struct RemoveNotificationTarget {} #[async_trait::async_trait] impl Operation for RemoveNotificationTarget { async fn call(&self, req: S3Request, params: Params<'_, '_>) -> S3Result> { let span = Span::current(); let _enter = span.enter(); - // 1. Analyze query parameters let (target_type, target_name) = extract_target_params(¶ms)?; - // 2. Permission verification - let Some(input_cred) = &req.credentials else { - return Err(s3_error!(InvalidRequest, "credentials not found")); - }; - let (_cred, _owner) = - check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?; + check_permissions(&req).await?; + let ns = get_notification_system()?; - // 3. Get notification system instance - let Some(ns) = rustfs_notify::notification_system() else { - return Err(s3_error!(InternalError, "notification system not initialized")); - }; - - // 4. Call notification system to remove target configuration info!("Removing target config for type '{}', name '{}'", target_type, target_name); - ns.remove_target_config(target_type, target_name).await.map_err(|e| { - error!("failed to remove target config: {}", e); - S3Error::with_message(S3ErrorCode::InternalError, format!("failed to remove target config: {e}")) - })?; + ns.remove_target_config(target_type, target_name) + .await + .map_err(|e| s3_error!(InternalError, "failed to remove target config: {}", e))?; - let mut header = HeaderMap::new(); - header.insert(CONTENT_TYPE, "application/json".parse().unwrap()); - header.insert(CONTENT_LENGTH, "0".parse().unwrap()); - if let Some(v) = req.headers.get("x-request-id") { - header.insert("x-request-id", v.clone()); - } - Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header)) + Ok(build_response(StatusCode::OK, Body::empty(), req.headers.get("x-request-id"))) } } @@ -433,7 +334,6 @@ fn extract_target_params<'a>(params: &'a Params<'_, '_>) -> S3Result<(&'a str, & if target_type != NOTIFY_WEBHOOK_SUB_SYS && target_type != NOTIFY_MQTT_SUB_SYS { return Err(s3_error!(InvalidArgument, "unsupported target type: '{}'", target_type)); } - let target_name = extract_param(params, "target_name")?; Ok((target_type, target_name)) } diff --git a/rustfs/src/admin/router.rs b/rustfs/src/admin/router.rs index 09c390cf..b01565b5 100644 --- a/rustfs/src/admin/router.rs +++ b/rustfs/src/admin/router.rs @@ -84,6 +84,7 @@ where { fn is_match(&self, method: &Method, uri: &Uri, headers: &HeaderMap, _: &mut Extensions) -> bool { let path = uri.path(); + // Profiling endpoints if method == Method::GET && (path == PROFILE_CPU_PATH || path == PROFILE_MEMORY_PATH) { return true; @@ -150,6 +151,8 @@ where } async fn call(&self, req: S3Request) -> S3Result> { + // Console requests should be handled by console router first (including OPTIONS) + // Console has its own CORS layer configured if self.console_enabled && is_console_path(req.uri.path()) { if let Some(console_router) = &self.console_router { let mut console_router = console_router.clone(); @@ -164,11 +167,14 @@ where } let uri = format!("{}|{}", &req.method, req.uri.path()); + if let Ok(mat) = self.router.at(&uri) { let op: &T = mat.value; let mut resp = op.call(req, mat.params).await?; resp.status = Some(resp.output.0); - return Ok(resp.map_output(|x| x.1)); + let response = resp.map_output(|x| x.1); + + return Ok(response); } Err(s3_error!(NotImplemented)) diff --git a/rustfs/src/server/cors.rs b/rustfs/src/server/cors.rs new file mode 100644 index 00000000..b01d9034 --- /dev/null +++ b/rustfs/src/server/cors.rs @@ -0,0 +1,40 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! CORS (Cross-Origin Resource Sharing) header name constants. +//! +//! This module provides centralized constants for CORS-related HTTP header names. +//! The http crate doesn't provide pre-defined constants for CORS headers, +//! so we define them here for type safety and maintainability. + +/// CORS response header names +pub mod response { + pub const ACCESS_CONTROL_ALLOW_ORIGIN: &str = "access-control-allow-origin"; + pub const ACCESS_CONTROL_ALLOW_METHODS: &str = "access-control-allow-methods"; + pub const ACCESS_CONTROL_ALLOW_HEADERS: &str = "access-control-allow-headers"; + pub const ACCESS_CONTROL_EXPOSE_HEADERS: &str = "access-control-expose-headers"; + pub const ACCESS_CONTROL_ALLOW_CREDENTIALS: &str = "access-control-allow-credentials"; + pub const ACCESS_CONTROL_MAX_AGE: &str = "access-control-max-age"; +} + +/// CORS request header names +pub mod request { + pub const ACCESS_CONTROL_REQUEST_METHOD: &str = "access-control-request-method"; + pub const ACCESS_CONTROL_REQUEST_HEADERS: &str = "access-control-request-headers"; +} + +/// Standard HTTP header names used in CORS processing +pub mod standard { + pub use http::header::{ORIGIN, VARY}; +} diff --git a/rustfs/src/server/http.rs b/rustfs/src/server/http.rs index 314494fe..5cecb319 100644 --- a/rustfs/src/server/http.rs +++ b/rustfs/src/server/http.rs @@ -17,7 +17,11 @@ use super::compress::{CompressionConfig, CompressionPredicate}; use crate::admin; use crate::auth::IAMAuth; use crate::config; -use crate::server::{ReadinessGateLayer, RemoteAddr, ServiceState, ServiceStateManager, hybrid::hybrid, layer::RedirectLayer}; +use crate::server::{ + ReadinessGateLayer, RemoteAddr, ServiceState, ServiceStateManager, + hybrid::hybrid, + layer::{ConditionalCorsLayer, RedirectLayer}, +}; use crate::storage; use crate::storage::tonic_service::make_server; use bytes::Bytes; @@ -30,9 +34,6 @@ use hyper_util::{ }; use metrics::{counter, histogram}; use rustfs_common::GlobalReadiness; -#[cfg(not(target_os = "openbsd"))] -use rustfs_config::{MI_B, RUSTFS_TLS_CERT, RUSTFS_TLS_KEY}; -#[cfg(target_os = "openbsd")] use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY}; use rustfs_ecstore::rpc::{TONIC_RPC_PREFIX, verify_rpc_signature}; use rustfs_protos::proto_gen::node_service::node_service_server::NodeServiceServer; @@ -51,70 +52,10 @@ use tower::ServiceBuilder; use tower_http::add_extension::AddExtensionLayer; use tower_http::catch_panic::CatchPanicLayer; use tower_http::compression::CompressionLayer; -use tower_http::cors::{AllowOrigin, Any, CorsLayer}; use tower_http::request_id::{MakeRequestUuid, PropagateRequestIdLayer, SetRequestIdLayer}; use tower_http::trace::TraceLayer; use tracing::{Span, debug, error, info, instrument, warn}; -/// Parse CORS allowed origins from configuration -fn parse_cors_origins(origins: Option<&String>) -> CorsLayer { - use http::Method; - - let cors_layer = CorsLayer::new() - .allow_methods([ - Method::GET, - Method::POST, - Method::PUT, - Method::DELETE, - Method::HEAD, - Method::OPTIONS, - ]) - .allow_headers(Any); - - match origins { - Some(origins_str) if origins_str == "*" => cors_layer.allow_origin(Any).expose_headers(Any), - Some(origins_str) => { - let origins: Vec<&str> = origins_str.split(',').map(|s| s.trim()).collect(); - if origins.is_empty() { - warn!("Empty CORS origins provided, using permissive CORS"); - cors_layer.allow_origin(Any).expose_headers(Any) - } else { - // Parse origins with proper error handling - let mut valid_origins = Vec::new(); - for origin in origins { - match origin.parse::() { - Ok(header_value) => { - valid_origins.push(header_value); - } - Err(e) => { - warn!("Invalid CORS origin '{}': {}", origin, e); - } - } - } - - if valid_origins.is_empty() { - warn!("No valid CORS origins found, using permissive CORS"); - cors_layer.allow_origin(Any).expose_headers(Any) - } else { - info!("Endpoint CORS origins configured: {:?}", valid_origins); - cors_layer.allow_origin(AllowOrigin::list(valid_origins)).expose_headers(Any) - } - } - } - None => { - debug!("No CORS origins configured for endpoint, using permissive CORS"); - cors_layer.allow_origin(Any).expose_headers(Any) - } - } -} - -fn get_cors_allowed_origins() -> String { - std::env::var(rustfs_config::ENV_CORS_ALLOWED_ORIGINS) - .unwrap_or_else(|_| rustfs_config::DEFAULT_CORS_ALLOWED_ORIGINS.to_string()) - .parse::() - .unwrap_or(rustfs_config::DEFAULT_CONSOLE_CORS_ALLOWED_ORIGINS.to_string()) -} - pub async fn start_http_server( opt: &config::Opt, worker_state_manager: ServiceStateManager, @@ -276,14 +217,6 @@ pub async fn start_http_server( let (shutdown_tx, mut shutdown_rx) = tokio::sync::broadcast::channel(1); let shutdown_tx_clone = shutdown_tx.clone(); - // Capture CORS configuration for the server loop - let cors_allowed_origins = get_cors_allowed_origins(); - let cors_allowed_origins = if cors_allowed_origins.is_empty() { - None - } else { - Some(cors_allowed_origins) - }; - // Create compression configuration from environment variables let compression_config = CompressionConfig::from_env(); if compression_config.enabled { @@ -297,8 +230,10 @@ pub async fn start_http_server( let is_console = opt.console_enable; tokio::spawn(async move { - // Create CORS layer inside the server loop closure - let cors_layer = parse_cors_origins(cors_allowed_origins.as_ref()); + // Note: CORS layer is removed from global middleware stack + // - S3 API CORS is handled by bucket-level CORS configuration in apply_cors_headers() + // - Console CORS is handled by its own cors_layer in setup_console_middleware_stack() + // This ensures S3 API CORS behavior matches AWS S3 specification #[cfg(unix)] let (mut sigterm_inner, mut sigint_inner) = { @@ -379,11 +314,14 @@ pub async fn start_http_server( // Enable TCP Keepalive to detect dead clients (e.g. power loss) // Idle: 10s, Interval: 5s, Retries: 3 - let mut ka = TcpKeepalive::new().with_time(Duration::from_secs(10)); + #[cfg(target_os = "openbsd")] + let ka = TcpKeepalive::new().with_time(Duration::from_secs(10)); + #[cfg(not(target_os = "openbsd"))] - { - ka = ka.with_interval(Duration::from_secs(5)).with_retries(3); - } + let ka = TcpKeepalive::new() + .with_time(Duration::from_secs(10)) + .with_interval(Duration::from_secs(5)) + .with_retries(3); if let Err(err) = socket_ref.set_tcp_keepalive(&ka) { warn!(?err, "Failed to set TCP_KEEPALIVE"); @@ -392,19 +330,19 @@ pub async fn start_http_server( if let Err(err) = socket_ref.set_tcp_nodelay(true) { warn!(?err, "Failed to set TCP_NODELAY"); } - #[cfg(not(any(target_os = "openbsd")))] - if let Err(err) = socket_ref.set_recv_buffer_size(4 * MI_B) { + + #[cfg(not(target_os = "openbsd"))] + if let Err(err) = socket_ref.set_recv_buffer_size(4 * rustfs_config::MI_B) { warn!(?err, "Failed to set set_recv_buffer_size"); } - #[cfg(not(any(target_os = "openbsd")))] - if let Err(err) = socket_ref.set_send_buffer_size(4 * MI_B) { + #[cfg(not(target_os = "openbsd"))] + if let Err(err) = socket_ref.set_send_buffer_size(4 * rustfs_config::MI_B) { warn!(?err, "Failed to set set_send_buffer_size"); } let connection_ctx = ConnectionContext { http_server: http_server.clone(), s3_service: s3_service.clone(), - cors_layer: cors_layer.clone(), compression_config: compression_config.clone(), is_console, readiness: readiness.clone(), @@ -520,7 +458,6 @@ async fn setup_tls_acceptor(tls_path: &str) -> Result> { struct ConnectionContext { http_server: Arc>, s3_service: S3Service, - cors_layer: CorsLayer, compression_config: CompressionConfig, is_console: bool, readiness: Arc, @@ -545,7 +482,6 @@ fn process_connection( let ConnectionContext { http_server, s3_service, - cors_layer, compression_config, is_console, readiness, @@ -559,7 +495,7 @@ fn process_connection( let remote_addr = match socket.peer_addr() { Ok(addr) => Some(RemoteAddr(addr)), Err(e) => { - tracing::warn!( + warn!( error = %e, "Failed to obtain peer address; policy evaluation may fall back to a default source IP" ); @@ -628,10 +564,15 @@ fn process_connection( }), ) .layer(PropagateRequestIdLayer::x_request_id()) - .layer(cors_layer) // Compress responses based on whitelist configuration // Only compresses when enabled and matches configured extensions/MIME types .layer(CompressionLayer::new().compress_when(CompressionPredicate::new(compression_config))) + // Conditional CORS layer: only applies to S3 API requests (not Admin, not Console) + // Admin has its own CORS handling in router.rs + // Console has its own CORS layer in setup_console_middleware_stack() + // S3 API uses this system default CORS (RUSTFS_CORS_ALLOWED_ORIGINS) + // Bucket-level CORS takes precedence when configured (handled in router.rs for OPTIONS, and in ecfs.rs for actual requests) + .layer(ConditionalCorsLayer::new()) .option_layer(if is_console { Some(RedirectLayer) } else { None }) .service(service); @@ -752,17 +693,18 @@ fn get_listen_backlog() -> i32 { } // For macOS and BSD variants use the syscall way of getting the connection queue length. -#[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))] +// NetBSD has no somaxconn-like kernel state. +#[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "openbsd"))] #[allow(unsafe_code)] fn get_listen_backlog() -> i32 { const DEFAULT_BACKLOG: i32 = 1024; #[cfg(target_os = "openbsd")] let mut name = [libc::CTL_KERN, libc::KERN_SOMAXCONN]; - #[cfg(any(target_os = "netbsd", target_os = "macos", target_os = "freebsd"))] + #[cfg(any(target_os = "macos", target_os = "freebsd"))] let mut name = [libc::CTL_KERN, libc::KERN_IPC, libc::KIPC_SOMAXCONN]; let mut buf = [0; 1]; - let mut buf_len = std::mem::size_of_val(&buf); + let mut buf_len = size_of_val(&buf); if unsafe { libc::sysctl( @@ -781,14 +723,8 @@ fn get_listen_backlog() -> i32 { buf[0] } -// Fallback for Windows and other operating systems -#[cfg(not(any( - target_os = "linux", - target_os = "macos", - target_os = "freebsd", - target_os = "netbsd", - target_os = "openbsd" -)))] +// Fallback for Windows, NetBSD and other operating systems. +#[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "freebsd", target_os = "openbsd")))] fn get_listen_backlog() -> i32 { const DEFAULT_BACKLOG: i32 = 1024; DEFAULT_BACKLOG diff --git a/rustfs/src/server/layer.rs b/rustfs/src/server/layer.rs index f324d06b..705798d3 100644 --- a/rustfs/src/server/layer.rs +++ b/rustfs/src/server/layer.rs @@ -12,14 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::admin::console::is_console_path; +use crate::server::cors; use crate::server::hybrid::HybridBody; -use http::{Request as HttpRequest, Response, StatusCode}; +use crate::server::{ADMIN_PREFIX, RPC_PREFIX}; +use crate::storage::ecfs; +use http::{HeaderMap, HeaderValue, Method, Request as HttpRequest, Response, StatusCode}; use hyper::body::Incoming; use std::future::Future; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use tower::{Layer, Service}; -use tracing::debug; +use tracing::{debug, info}; /// Redirect layer that redirects browser requests to the console #[derive(Clone)] @@ -89,3 +94,173 @@ where Box::pin(async move { inner.call(req).await.map_err(Into::into) }) } } + +/// Conditional CORS layer that only applies to S3 API requests +/// (not Admin, not Console, not RPC) +#[derive(Clone)] +pub struct ConditionalCorsLayer { + cors_origins: Option, +} + +impl ConditionalCorsLayer { + pub fn new() -> Self { + let cors_origins = std::env::var("RUSTFS_CORS_ALLOWED_ORIGINS").ok().filter(|s| !s.is_empty()); + Self { cors_origins } + } + + /// Exact paths that should be excluded from being treated as S3 paths. + const EXCLUDED_EXACT_PATHS: &'static [&'static str] = &["/health", "/profile/cpu", "/profile/memory"]; + + fn is_s3_path(path: &str) -> bool { + // Exclude Admin, Console, RPC, and configured special paths + !path.starts_with(ADMIN_PREFIX) + && !path.starts_with(RPC_PREFIX) + && !is_console_path(path) + && !Self::EXCLUDED_EXACT_PATHS.contains(&path) + } + + fn apply_cors_headers(&self, request_headers: &HeaderMap, response_headers: &mut HeaderMap) { + let origin = request_headers + .get(cors::standard::ORIGIN) + .and_then(|v| v.to_str().ok()) + .map(|s| s.to_string()); + + let allowed_origin = match (origin, &self.cors_origins) { + (Some(orig), Some(config)) if config == "*" => Some(orig), + (Some(orig), Some(config)) => { + let origins: Vec<&str> = config.split(',').map(|s| s.trim()).collect(); + if origins.contains(&orig.as_str()) { Some(orig) } else { None } + } + (Some(orig), None) => Some(orig), // Default: allow all if not configured + _ => None, + }; + + // Track whether we're using a specific origin (not wildcard) + let using_specific_origin = if let Some(origin) = &allowed_origin { + if let Ok(header_value) = HeaderValue::from_str(origin) { + response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_ORIGIN, header_value); + true // Using specific origin, credentials allowed + } else { + false + } + } else { + false + }; + + // Allow all methods by default (S3-compatible set) + response_headers.insert( + cors::response::ACCESS_CONTROL_ALLOW_METHODS, + HeaderValue::from_static("GET, POST, PUT, DELETE, OPTIONS, HEAD"), + ); + + // Allow all headers by default + response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_HEADERS, HeaderValue::from_static("*")); + + // Expose common headers + response_headers.insert( + cors::response::ACCESS_CONTROL_EXPOSE_HEADERS, + HeaderValue::from_static("x-request-id, content-type, content-length, etag"), + ); + + // Only set credentials when using a specific origin (not wildcard) + // CORS spec: credentials cannot be used with wildcard origins + if using_specific_origin { + response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_CREDENTIALS, HeaderValue::from_static("true")); + } + } +} + +impl Default for ConditionalCorsLayer { + fn default() -> Self { + Self::new() + } +} + +impl Layer for ConditionalCorsLayer { + type Service = ConditionalCorsService; + + fn layer(&self, inner: S) -> Self::Service { + ConditionalCorsService { + inner, + cors_origins: Arc::new(self.cors_origins.clone()), + } + } +} + +/// Service implementation for conditional CORS +#[derive(Clone)] +pub struct ConditionalCorsService { + inner: S, + cors_origins: Arc>, +} + +impl Service> for ConditionalCorsService +where + S: Service, Response = Response> + Clone + Send + 'static, + S::Future: Send + 'static, + S::Error: Into> + Send + 'static, + ResBody: Default + Send + 'static, +{ + type Response = Response; + type Error = Box; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, req: HttpRequest) -> Self::Future { + let path = req.uri().path().to_string(); + let method = req.method().clone(); + let request_headers = req.headers().clone(); + let cors_origins = self.cors_origins.clone(); + // Handle OPTIONS preflight requests - return response directly without calling handler + if method == Method::OPTIONS && request_headers.contains_key(cors::standard::ORIGIN) { + info!("OPTIONS preflight request for path: {}", path); + + let path_trimmed = path.trim_start_matches('/'); + let bucket = path_trimmed.split('/').next().unwrap_or("").to_string(); // virtual host style? + let method_clone = method.clone(); + let request_headers_clone = request_headers.clone(); + + return Box::pin(async move { + let mut response = Response::builder().status(StatusCode::OK).body(ResBody::default()).unwrap(); + + if ConditionalCorsLayer::is_s3_path(&path) + && !bucket.is_empty() + && cors_origins.is_some() + && let Some(cors_headers) = ecfs::apply_cors_headers(&bucket, &method_clone, &request_headers_clone).await + { + for (key, value) in cors_headers.iter() { + response.headers_mut().insert(key, value.clone()); + } + return Ok(response); + } + + let cors_layer = ConditionalCorsLayer { + cors_origins: (*cors_origins).clone(), + }; + cors_layer.apply_cors_headers(&request_headers_clone, response.headers_mut()); + + Ok(response) + }); + } + + let mut inner = self.inner.clone(); + Box::pin(async move { + let mut response = inner.call(req).await.map_err(Into::into)?; + + // Apply CORS headers only to S3 API requests (non-OPTIONS) + if request_headers.contains_key(cors::standard::ORIGIN) + && !response.headers().contains_key(cors::response::ACCESS_CONTROL_ALLOW_ORIGIN) + { + let cors_layer = ConditionalCorsLayer { + cors_origins: (*cors_origins).clone(), + }; + cors_layer.apply_cors_headers(&request_headers, response.headers_mut()); + } + + Ok(response) + }) + } +} diff --git a/rustfs/src/server/mod.rs b/rustfs/src/server/mod.rs index c6f72d19..8714fa78 100644 --- a/rustfs/src/server/mod.rs +++ b/rustfs/src/server/mod.rs @@ -15,6 +15,7 @@ mod audit; mod cert; mod compress; +pub mod cors; mod event; mod http; mod hybrid; diff --git a/rustfs/src/storage/access.rs b/rustfs/src/storage/access.rs index e394c68f..79515cdc 100644 --- a/rustfs/src/storage/access.rs +++ b/rustfs/src/storage/access.rs @@ -342,7 +342,7 @@ impl S3Access for FS { let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); req_info.bucket = Some(req.input.bucket.clone()); - authorize_request(req, Action::S3Action(S3Action::PutBucketCorsAction)).await + authorize_request(req, Action::S3Action(S3Action::DeleteBucketCorsAction)).await } /// Checks whether the DeleteBucketEncryption request has accesses to the resources. diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 1fec35ec..9f584d60 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -18,6 +18,7 @@ use crate::config::workload_profiles::{ }; use crate::error::ApiError; use crate::server::RemoteAddr; +use crate::server::cors; use crate::storage::concurrency::{ CachedGetObject, ConcurrencyManager, GetObjectGuard, get_concurrency_aware_buffer_size, get_concurrency_manager, }; @@ -39,7 +40,7 @@ use datafusion::arrow::{ }; use futures::StreamExt; use http::{HeaderMap, StatusCode}; -use metrics::counter; +use metrics::{counter, histogram}; use rustfs_ecstore::bucket::quota::checker::QuotaChecker; use rustfs_ecstore::{ bucket::{ @@ -48,8 +49,8 @@ use rustfs_ecstore::{ lifecycle::{self, Lifecycle, TransitionOptions}, }, metadata::{ - BUCKET_LIFECYCLE_CONFIG, BUCKET_NOTIFICATION_CONFIG, BUCKET_POLICY_CONFIG, BUCKET_REPLICATION_CONFIG, - BUCKET_SSECONFIG, BUCKET_TAGGING_CONFIG, BUCKET_VERSIONING_CONFIG, OBJECT_LOCK_CONFIG, + BUCKET_CORS_CONFIG, BUCKET_LIFECYCLE_CONFIG, BUCKET_NOTIFICATION_CONFIG, BUCKET_POLICY_CONFIG, + BUCKET_REPLICATION_CONFIG, BUCKET_SSECONFIG, BUCKET_TAGGING_CONFIG, BUCKET_VERSIONING_CONFIG, OBJECT_LOCK_CONFIG, }, metadata_sys, metadata_sys::get_replication_config, @@ -116,10 +117,9 @@ use rustfs_utils::{ AMZ_BUCKET_REPLICATION_STATUS, AMZ_CHECKSUM_MODE, AMZ_CHECKSUM_TYPE, headers::{ AMZ_DECODED_CONTENT_LENGTH, AMZ_OBJECT_TAGGING, AMZ_RESTORE_EXPIRY_DAYS, AMZ_RESTORE_REQUEST_DATE, - RESERVED_METADATA_PREFIX_LOWER, + RESERVED_METADATA_PREFIX, RESERVED_METADATA_PREFIX_LOWER, }, }, - obj::extract_user_defined_metadata, path::{is_dir_object, path_join_buf}, }; use rustfs_zip::CompressionFormat; @@ -782,6 +782,25 @@ impl FS { let _ = helper.complete(&result); result } + + /// Auxiliary functions: parse version ID + /// + /// # Arguments + /// * `version_id` - An optional string representing the version ID to be parsed. + /// + /// # Returns + /// * `S3Result>` - A result containing an optional UUID if parsing is successful, or an S3 error if parsing fails. + fn parse_version_id(&self, version_id: Option) -> S3Result> { + if let Some(vid) = version_id { + let uuid = Uuid::parse_str(&vid).map_err(|e| { + error!("Invalid version ID: {}", e); + s3_error!(InvalidArgument, "Invalid version ID") + })?; + Ok(Some(uuid)) + } else { + Ok(None) + } + } } /// Helper function to get store and validate bucket exists @@ -799,6 +818,205 @@ async fn get_validated_store(bucket: &str) -> S3Result bool { + headers.contains_key(cors::standard::ORIGIN) +} + +/// Apply CORS headers to response based on bucket CORS configuration and request origin +/// +/// This function: +/// 1. Reads the Origin header from the request +/// 2. Retrieves the bucket's CORS configuration +/// 3. Matches the origin against CORS rules +/// 4. Validates AllowedHeaders if request headers are present +/// 5. Returns headers to add to the response if a match is found +/// +/// Note: This function should only be called if `needs_cors_processing()` returns true +/// to avoid unnecessary overhead for non-CORS requests. +pub(crate) async fn apply_cors_headers(bucket: &str, method: &http::Method, headers: &HeaderMap) -> Option { + use http::HeaderValue; + + // Get Origin header from request + let origin = headers.get(cors::standard::ORIGIN)?.to_str().ok()?; + + // Get CORS configuration for the bucket + let cors_config = match metadata_sys::get_cors_config(bucket).await { + Ok((config, _)) => config, + Err(_) => return None, // No CORS config, no headers to add + }; + + // Early return if no CORS rules configured + if cors_config.cors_rules.is_empty() { + return None; + } + + // Check if method is supported and get its string representation + const SUPPORTED_METHODS: &[&str] = &["GET", "PUT", "POST", "DELETE", "HEAD", "OPTIONS"]; + let method_str = method.as_str(); + if !SUPPORTED_METHODS.contains(&method_str) { + return None; + } + + // For OPTIONS (preflight) requests, check Access-Control-Request-Method + let is_preflight = method == http::Method::OPTIONS; + let requested_method = if is_preflight { + headers + .get(cors::request::ACCESS_CONTROL_REQUEST_METHOD) + .and_then(|v| v.to_str().ok()) + .unwrap_or(method_str) + } else { + method_str + }; + + // Get requested headers from preflight request + let requested_headers = if is_preflight { + headers + .get(cors::request::ACCESS_CONTROL_REQUEST_HEADERS) + .and_then(|v| v.to_str().ok()) + .map(|h| h.split(',').map(|s| s.trim().to_lowercase()).collect::>()) + } else { + None + }; + + // Find matching CORS rule + for rule in cors_config.cors_rules.iter() { + // Check if origin matches + let origin_matches = rule.allowed_origins.iter().any(|allowed_origin| { + if allowed_origin == "*" { + true + } else { + // Exact match or pattern match (support wildcards like https://*.example.com) + allowed_origin == origin || matches_origin_pattern(allowed_origin, origin) + } + }); + + if !origin_matches { + continue; + } + + // Check if method is allowed + let method_allowed = rule + .allowed_methods + .iter() + .any(|allowed_method| allowed_method.as_str() == requested_method); + + if !method_allowed { + continue; + } + + // Validate AllowedHeaders if present in the request + if let Some(ref req_headers) = requested_headers { + if let Some(ref allowed_headers) = rule.allowed_headers { + // Check if all requested headers are allowed + let all_headers_allowed = req_headers.iter().all(|req_header| { + allowed_headers.iter().any(|allowed_header| { + let allowed_lower = allowed_header.to_lowercase(); + // "*" allows all headers, or exact match + allowed_lower == "*" || allowed_lower == *req_header + }) + }); + + if !all_headers_allowed { + // If not all headers are allowed, skip this rule + continue; + } + } else if !req_headers.is_empty() { + // If no AllowedHeaders specified but headers were requested, skip this rule + // Unless the rule explicitly allows all headers + continue; + } + } + + // Found matching rule, build response headers + let mut response_headers = HeaderMap::new(); + + // Access-Control-Allow-Origin + // If origin is "*", use "*", otherwise echo back the origin + let has_wildcard_origin = rule.allowed_origins.iter().any(|o| o == "*"); + if has_wildcard_origin { + response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_ORIGIN, HeaderValue::from_static("*")); + } else if let Ok(origin_value) = HeaderValue::from_str(origin) { + response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_ORIGIN, origin_value); + } + + // Vary: Origin (required for caching, except when using wildcard) + if !has_wildcard_origin { + response_headers.insert(cors::standard::VARY, HeaderValue::from_static("Origin")); + } + + // Access-Control-Allow-Methods (required for preflight) + if is_preflight || !rule.allowed_methods.is_empty() { + let methods_str = rule.allowed_methods.iter().map(|m| m.as_str()).collect::>().join(", "); + if let Ok(methods_value) = HeaderValue::from_str(&methods_str) { + response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_METHODS, methods_value); + } + } + + // Access-Control-Allow-Headers (required for preflight if headers were requested) + if is_preflight && let Some(ref allowed_headers) = rule.allowed_headers { + let headers_str = allowed_headers.iter().map(|h| h.as_str()).collect::>().join(", "); + if let Ok(headers_value) = HeaderValue::from_str(&headers_str) { + response_headers.insert(cors::response::ACCESS_CONTROL_ALLOW_HEADERS, headers_value); + } + } + + // Access-Control-Expose-Headers (for actual requests) + if !is_preflight && let Some(ref expose_headers) = rule.expose_headers { + let expose_headers_str = expose_headers.iter().map(|h| h.as_str()).collect::>().join(", "); + if let Ok(expose_value) = HeaderValue::from_str(&expose_headers_str) { + response_headers.insert(cors::response::ACCESS_CONTROL_EXPOSE_HEADERS, expose_value); + } + } + + // Access-Control-Max-Age (for preflight requests) + if is_preflight + && let Some(max_age) = rule.max_age_seconds + && let Ok(max_age_value) = HeaderValue::from_str(&max_age.to_string()) + { + response_headers.insert(cors::response::ACCESS_CONTROL_MAX_AGE, max_age_value); + } + + return Some(response_headers); + } + + None // No matching rule found +} +/// Check if an origin matches a pattern (supports wildcards like https://*.example.com) +fn matches_origin_pattern(pattern: &str, origin: &str) -> bool { + // Simple wildcard matching: * matches any sequence + if pattern.contains('*') { + let pattern_parts: Vec<&str> = pattern.split('*').collect(); + if pattern_parts.len() == 2 { + origin.starts_with(pattern_parts[0]) && origin.ends_with(pattern_parts[1]) + } else { + false + } + } else { + pattern == origin + } +} + +/// Wrap S3Response with CORS headers if needed +/// This function performs a lightweight check first to avoid unnecessary CORS processing +/// for non-CORS requests (requests without Origin header) +async fn wrap_response_with_cors(bucket: &str, method: &http::Method, headers: &HeaderMap, output: T) -> S3Response { + let mut response = S3Response::new(output); + + // Quick check: only process CORS if Origin header is present + if needs_cors_processing(headers) + && let Some(cors_headers) = apply_cors_headers(bucket, method, headers).await + { + for (key, value) in cors_headers.iter() { + response.headers.insert(key, value.clone()); + } + } + + response +} + #[async_trait::async_trait] impl S3 for FS { #[instrument( @@ -854,6 +1072,9 @@ impl S3 for FS { sse_customer_key_md5, metadata_directive, metadata, + copy_source_if_match, + copy_source_if_none_match, + content_type, .. } = req.input.clone(); let (src_bucket, src_key, version_id) = match copy_source { @@ -869,6 +1090,19 @@ impl S3 for FS { validate_object_key(&src_key, "COPY (source)")?; validate_object_key(&key, "COPY (dest)")?; + // AWS S3 allows self-copy when metadata directive is REPLACE (used to update metadata in-place). + // Reject only when the directive is not REPLACE. + if metadata_directive.as_ref().map(|d| d.as_str()) != Some(MetadataDirective::REPLACE) + && src_bucket == bucket + && src_key == key + { + error!("Rejected self-copy operation: bucket={}, key={}", bucket, key); + return Err(s3_error!( + InvalidRequest, + "Cannot copy an object to itself. Source and destination must be different." + )); + } + // warn!("copy_object {}/{}, to {}/{}", &src_bucket, &src_key, &bucket, &key); let mut src_opts = copy_src_opts(&src_bucket, &src_key, &req.headers).map_err(ApiError::from)?; @@ -929,6 +1163,30 @@ impl S3 for FS { let mut src_info = gr.object_info.clone(); + // Validate copy source conditions + if let Some(if_match) = copy_source_if_match { + if let Some(ref etag) = src_info.etag { + if let Some(strong_etag) = if_match.into_etag() { + if ETag::Strong(etag.clone()) != strong_etag { + return Err(s3_error!(PreconditionFailed)); + } + } else { + // Weak ETag or Any (*) in If-Match should fail per RFC 9110 + return Err(s3_error!(PreconditionFailed)); + } + } else { + return Err(s3_error!(PreconditionFailed)); + } + } + + if let Some(if_none_match) = copy_source_if_none_match + && let Some(ref etag) = src_info.etag + && let Some(strong_etag) = if_none_match.into_etag() + && ETag::Strong(etag.clone()) == strong_etag + { + return Err(s3_error!(PreconditionFailed)); + } + if cp_src_dst_same { src_info.metadata_only = true; } @@ -969,12 +1227,35 @@ impl S3 for FS { src_info .user_defined .remove(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression")); + src_info + .user_defined + .remove(&format!("{RESERVED_METADATA_PREFIX}compression")); src_info .user_defined .remove(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size")); + src_info + .user_defined + .remove(&format!("{RESERVED_METADATA_PREFIX}actual-size")); src_info .user_defined .remove(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression-size")); + src_info + .user_defined + .remove(&format!("{RESERVED_METADATA_PREFIX}compression-size")); + } + + // Handle MetadataDirective REPLACE: replace user metadata while preserving system metadata. + // System metadata (compression, encryption) is added after this block to ensure + // it's not cleared by the REPLACE operation. + if metadata_directive.as_ref().map(|d| d.as_str()) == Some(MetadataDirective::REPLACE) { + src_info.user_defined.clear(); + if let Some(metadata) = metadata { + src_info.user_defined.extend(metadata); + } + if let Some(ct) = content_type { + src_info.content_type = Some(ct.clone()); + src_info.user_defined.insert("content-type".to_string(), ct); + } } let mut reader = HashReader::new(reader, length, actual_size, None, None, false).map_err(ApiError::from)?; @@ -1059,16 +1340,6 @@ impl S3 for FS { .insert("x-amz-server-side-encryption-customer-key-md5".to_string(), sse_md5.clone()); } - if metadata_directive.as_ref().map(|d| d.as_str()) == Some(MetadataDirective::REPLACE) { - let src_user_defined = extract_user_defined_metadata(&src_info.user_defined); - src_user_defined.keys().for_each(|k| { - src_info.user_defined.remove(k); - }); - if let Some(metadata) = metadata { - src_info.user_defined.extend(metadata); - } - } - // check quota for copy operation if let Some(metadata_sys) = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get() { let quota_checker = QuotaChecker::new(metadata_sys.clone()); @@ -2491,6 +2762,23 @@ impl S3 for FS { } } + let versioned = BucketVersioningSys::prefix_enabled(&bucket, &key).await; + + // Get version_id from object info + // If versioning is enabled and version_id exists in object info, return it + // If version_id is Uuid::nil(), return "null" string (AWS S3 convention) + let output_version_id = if versioned { + info.version_id.map(|vid| { + if vid == Uuid::nil() { + "null".to_string() + } else { + vid.to_string() + } + }) + } else { + None + }; + let output = GetObjectOutput { body, content_length: Some(response_content_length), @@ -2511,6 +2799,7 @@ impl S3 for FS { checksum_sha256, checksum_crc64nvme, checksum_type, + version_id: output_version_id, ..Default::default() }; @@ -2535,7 +2824,8 @@ impl S3 for FS { cache_key, response_content_length, total_duration, optimal_buffer_size ); - let result = Ok(S3Response::new(output)); + let response = wrap_response_with_cors(&bucket, &req.method, &req.headers, output).await; + let result = Ok(response); let _ = helper.complete(&result); result } @@ -2610,7 +2900,18 @@ impl S3 for FS { return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); }; - let info = store.get_object_info(&bucket, &key, &opts).await.map_err(ApiError::from)?; + // Modification Points: Explicitly handles get_object_info errors, distinguishing between object absence and other errors + let info = match store.get_object_info(&bucket, &key, &opts).await { + Ok(info) => info, + Err(err) => { + // If the error indicates the object or its version was not found, return 404 (NoSuchKey) + if is_err_object_not_found(&err) || is_err_version_not_found(&err) { + return Err(S3Error::new(S3ErrorCode::NoSuchKey)); + } + // Other errors, such as insufficient permissions, still return the original error + return Err(ApiError::from(err).into()); + } + }; if info.delete_marker { if opts.version_id.is_none() { @@ -2689,7 +2990,7 @@ impl S3 for FS { .get("x-amz-server-side-encryption-customer-algorithm") .map(|v| SSECustomerAlgorithm::from(v.clone())); let sse_customer_key_md5 = metadata_map.get("x-amz-server-side-encryption-customer-key-md5").cloned(); - let ssekms_key_id = metadata_map.get("x-amz-server-side-encryption-aws-kms-key-id").cloned(); + let sse_kms_key_id = metadata_map.get("x-amz-server-side-encryption-aws-kms-key-id").cloned(); // Prefer explicit storage_class from object info; fall back to persisted metadata header. let storage_class = info .storage_class @@ -2754,7 +3055,7 @@ impl S3 for FS { server_side_encryption, sse_customer_algorithm, sse_customer_key_md5, - ssekms_key_id, + ssekms_key_id: sse_kms_key_id, checksum_crc32, checksum_crc32c, checksum_sha1, @@ -2769,7 +3070,14 @@ impl S3 for FS { let version_id = req.input.version_id.clone().unwrap_or_default(); helper = helper.object(event_info).version_id(version_id); - let result = Ok(S3Response::new(output)); + // NOTE ON CORS: + // Bucket-level CORS headers are intentionally applied only for object retrieval + // operations (GET/HEAD) via `wrap_response_with_cors`. Other S3 operations that + // interact with objects (PUT/POST/DELETE/LIST, etc.) rely on the system-level + // CORS layer instead. In case both are applicable, this bucket-level CORS logic + // takes precedence for these read operations. + let response = wrap_response_with_cors(&bucket, &req.method, &req.headers, output).await; + let result = Ok(response); let _ = helper.complete(&result); result @@ -4639,8 +4947,85 @@ impl S3 for FS { Ok(S3Response::new(DeleteBucketTaggingOutput {})) } + #[instrument(level = "debug", skip(self))] + async fn get_bucket_cors(&self, req: S3Request) -> S3Result> { + let bucket = req.input.bucket.clone(); + // check bucket exists. + let _bucket = self + .head_bucket(req.map_input(|input| HeadBucketInput { + bucket: input.bucket, + expected_bucket_owner: None, + })) + .await?; + + let cors_configuration = match metadata_sys::get_cors_config(&bucket).await { + Ok((config, _)) => config, + Err(err) => { + if err == StorageError::ConfigNotFound { + return Err(S3Error::with_message( + S3ErrorCode::NoSuchCORSConfiguration, + "The CORS configuration does not exist".to_string(), + )); + } + warn!("get_cors_config err {:?}", &err); + return Err(ApiError::from(err).into()); + } + }; + + Ok(S3Response::new(GetBucketCorsOutput { + cors_rules: Some(cors_configuration.cors_rules), + })) + } + + #[instrument(level = "debug", skip(self))] + async fn put_bucket_cors(&self, req: S3Request) -> S3Result> { + let PutBucketCorsInput { + bucket, + cors_configuration, + .. + } = req.input; + + let Some(store) = new_object_layer_fn() else { + return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); + }; + + store + .get_bucket_info(&bucket, &BucketOptions::default()) + .await + .map_err(ApiError::from)?; + + let data = try_!(serialize(&cors_configuration)); + + metadata_sys::update(&bucket, BUCKET_CORS_CONFIG, data) + .await + .map_err(ApiError::from)?; + + Ok(S3Response::new(PutBucketCorsOutput::default())) + } + + #[instrument(level = "debug", skip(self))] + async fn delete_bucket_cors(&self, req: S3Request) -> S3Result> { + let DeleteBucketCorsInput { bucket, .. } = req.input; + + let Some(store) = new_object_layer_fn() else { + return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); + }; + + store + .get_bucket_info(&bucket, &BucketOptions::default()) + .await + .map_err(ApiError::from)?; + + metadata_sys::delete(&bucket, BUCKET_CORS_CONFIG) + .await + .map_err(ApiError::from)?; + + Ok(S3Response::new(DeleteBucketCorsOutput {})) + } + #[instrument(level = "debug", skip(self, req))] async fn put_object_tagging(&self, req: S3Request) -> S3Result> { + let start_time = std::time::Instant::now(); let mut helper = OperationHelper::new(&req, EventName::ObjectCreatedPutTagging, "s3:PutObjectTagging"); let PutObjectTaggingInput { bucket, @@ -4654,6 +5039,8 @@ impl S3 for FS { // Reference: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html // Reference: https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/API/API_PutObjectTagging.html // https://github.com/minio/mint/blob/master/run/core/aws-sdk-go-v2/main.go#L1647 + error!("Tag set exceeds maximum of 10 tags: {}", tagging.tag_set.len()); + return Err(s3_error!(InvalidTag, "Cannot have more than 10 tags per object")); } let Some(store) = new_object_layer_fn() else { @@ -4662,71 +5049,118 @@ impl S3 for FS { let mut tag_keys = std::collections::HashSet::with_capacity(tagging.tag_set.len()); for tag in &tagging.tag_set { - let key = tag - .key - .as_ref() - .filter(|k| !k.is_empty()) - .ok_or_else(|| s3_error!(InvalidTag, "Tag key cannot be empty"))?; + let key = tag.key.as_ref().filter(|k| !k.is_empty()).ok_or_else(|| { + error!("Empty tag key"); + s3_error!(InvalidTag, "Tag key cannot be empty") + })?; if key.len() > 128 { + error!("Tag key too long: {} bytes", key.len()); return Err(s3_error!(InvalidTag, "Tag key is too long, maximum allowed length is 128 characters")); } - let value = tag - .value - .as_ref() - .ok_or_else(|| s3_error!(InvalidTag, "Tag value cannot be null"))?; + let value = tag.value.as_ref().ok_or_else(|| { + error!("Null tag value"); + s3_error!(InvalidTag, "Tag value cannot be null") + })?; if value.is_empty() { + error!("Empty tag value"); return Err(s3_error!(InvalidTag, "Tag value cannot be empty")); } if value.len() > 256 { + error!("Tag value too long: {} bytes", value.len()); return Err(s3_error!(InvalidTag, "Tag value is too long, maximum allowed length is 256 characters")); } if !tag_keys.insert(key) { + error!("Duplicate tag key: {}", key); return Err(s3_error!(InvalidTag, "Cannot provide multiple Tags with the same key")); } } let tags = encode_tags(tagging.tag_set); + debug!("Encoded tags: {}", tags); - // TODO: getOpts - // TODO: Replicate + // TODO: getOpts, Replicate + // Support versioned objects + let version_id = req.input.version_id.clone(); + let opts = ObjectOptions { + version_id: self.parse_version_id(version_id)?.map(Into::into), + ..Default::default() + }; - store - .put_object_tags(&bucket, &object, &tags, &ObjectOptions::default()) - .await - .map_err(ApiError::from)?; + store.put_object_tags(&bucket, &object, &tags, &opts).await.map_err(|e| { + error!("Failed to put object tags: {}", e); + counter!("rustfs.put_object_tagging.failure").increment(1); + ApiError::from(e) + })?; - let version_id = req.input.version_id.clone().unwrap_or_default(); - helper = helper.version_id(version_id); + // Invalidate cache for the tagged object + let manager = get_concurrency_manager(); + let version_id = req.input.version_id.clone(); + let cache_key = ConcurrencyManager::make_cache_key(&bucket, &object, version_id.clone().as_deref()); + tokio::spawn(async move { + manager + .invalidate_cache_versioned(&bucket, &object, version_id.as_deref()) + .await; + debug!("Cache invalidated for tagged object: {}", cache_key); + }); - let result = Ok(S3Response::new(PutObjectTaggingOutput { version_id: None })); + // Add metrics + counter!("rustfs.put_object_tagging.success").increment(1); + + let version_id_resp = req.input.version_id.clone().unwrap_or_default(); + helper = helper.version_id(version_id_resp); + + let result = Ok(S3Response::new(PutObjectTaggingOutput { + version_id: req.input.version_id.clone(), + })); let _ = helper.complete(&result); + let duration = start_time.elapsed(); + histogram!("rustfs.object_tagging.operation.duration.seconds", "operation" => "put").record(duration.as_secs_f64()); result } #[instrument(level = "debug", skip(self))] async fn get_object_tagging(&self, req: S3Request) -> S3Result> { + let start_time = std::time::Instant::now(); let GetObjectTaggingInput { bucket, key: object, .. } = req.input; + info!("Starting get_object_tagging for bucket: {}, object: {}", bucket, object); + let Some(store) = new_object_layer_fn() else { + error!("Store not initialized"); return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); }; - // TODO: version - let tags = store - .get_object_tags(&bucket, &object, &ObjectOptions::default()) - .await - .map_err(ApiError::from)?; + // Support versioned objects + let version_id = req.input.version_id.clone(); + let opts = ObjectOptions { + version_id: self.parse_version_id(version_id)?.map(Into::into), + ..Default::default() + }; + + let tags = store.get_object_tags(&bucket, &object, &opts).await.map_err(|e| { + if is_err_object_not_found(&e) { + error!("Object not found: {}", e); + return s3_error!(NoSuchKey); + } + error!("Failed to get object tags: {}", e); + ApiError::from(e).into() + })?; let tag_set = decode_tags(tags.as_str()); + debug!("Decoded tag set: {:?}", tag_set); + // Add metrics + counter!("rustfs.get_object_tagging.success").increment(1); + let duration = start_time.elapsed(); + histogram!("rustfs.object_tagging.operation.duration.seconds", "operation" => "put").record(duration.as_secs_f64()); Ok(S3Response::new(GetObjectTaggingOutput { tag_set, - version_id: None, + version_id: req.input.version_id.clone(), })) } @@ -4735,25 +5169,56 @@ impl S3 for FS { &self, req: S3Request, ) -> S3Result> { + let start_time = std::time::Instant::now(); let mut helper = OperationHelper::new(&req, EventName::ObjectCreatedDeleteTagging, "s3:DeleteObjectTagging"); - let DeleteObjectTaggingInput { bucket, key: object, .. } = req.input.clone(); + let DeleteObjectTaggingInput { + bucket, + key: object, + version_id, + .. + } = req.input.clone(); let Some(store) = new_object_layer_fn() else { + error!("Store not initialized"); return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); }; - // TODO: Replicate - // TODO: version - store - .delete_object_tags(&bucket, &object, &ObjectOptions::default()) - .await - .map_err(ApiError::from)?; + // Support versioned objects + let version_id_for_parse = version_id.clone(); + let opts = ObjectOptions { + version_id: self.parse_version_id(version_id_for_parse)?.map(Into::into), + ..Default::default() + }; - let version_id = req.input.version_id.clone().unwrap_or_else(|| Uuid::new_v4().to_string()); - helper = helper.version_id(version_id); + // TODO: Replicate (keep the original TODO, if further replication logic is needed) + store.delete_object_tags(&bucket, &object, &opts).await.map_err(|e| { + error!("Failed to delete object tags: {}", e); + ApiError::from(e) + })?; - let result = Ok(S3Response::new(DeleteObjectTaggingOutput { version_id: None })); + // Invalidate cache for the deleted tagged object + let manager = get_concurrency_manager(); + let version_id_clone = version_id.clone(); + tokio::spawn(async move { + manager + .invalidate_cache_versioned(&bucket, &object, version_id_clone.as_deref()) + .await; + debug!( + "Cache invalidated for deleted tagged object: bucket={}, object={}, version_id={:?}", + bucket, object, version_id_clone + ); + }); + + // Add metrics + counter!("rustfs.delete_object_tagging.success").increment(1); + + let version_id_resp = version_id.clone().unwrap_or_default(); + helper = helper.version_id(version_id_resp); + + let result = Ok(S3Response::new(DeleteObjectTaggingOutput { version_id })); let _ = helper.complete(&result); + let duration = start_time.elapsed(); + histogram!("rustfs.object_tagging.operation.duration.seconds", "operation" => "delete").record(duration.as_secs_f64()); result } @@ -6402,4 +6867,201 @@ mod tests { assert!(filtered_version_marker.is_some()); assert_eq!(filtered_version_marker.unwrap(), "null"); } + + #[test] + fn test_matches_origin_pattern_exact_match() { + // Test exact match + assert!(matches_origin_pattern("https://example.com", "https://example.com")); + assert!(matches_origin_pattern("http://localhost:3000", "http://localhost:3000")); + assert!(!matches_origin_pattern("https://example.com", "https://other.com")); + } + + #[test] + fn test_matches_origin_pattern_wildcard() { + // Test wildcard pattern matching (S3 CORS supports * as subdomain wildcard) + assert!(matches_origin_pattern("https://*.example.com", "https://app.example.com")); + assert!(matches_origin_pattern("https://*.example.com", "https://api.example.com")); + assert!(matches_origin_pattern("https://*.example.com", "https://subdomain.example.com")); + + // Test wildcard at start (matches any domain) + assert!(matches_origin_pattern("https://*", "https://example.com")); + assert!(matches_origin_pattern("https://*", "https://any-domain.com")); + + // Test wildcard at end (matches any protocol) + assert!(matches_origin_pattern("*://example.com", "https://example.com")); + assert!(matches_origin_pattern("*://example.com", "http://example.com")); + + // Test invalid wildcard patterns (should not match) + assert!(!matches_origin_pattern("https://*.*.com", "https://app.example.com")); // Multiple wildcards (invalid pattern) + // Note: "https://*example.com" actually matches "https://app.example.com" with our current implementation + // because it splits on * and checks starts_with/ends_with. This is a limitation but acceptable + // for S3 CORS which typically uses patterns like "https://*.example.com" + } + + #[test] + fn test_matches_origin_pattern_no_wildcard() { + // Test patterns without wildcards + assert!(matches_origin_pattern("https://example.com", "https://example.com")); + assert!(!matches_origin_pattern("https://example.com", "https://example.org")); + assert!(!matches_origin_pattern("http://example.com", "https://example.com")); // Different protocol + } + + #[test] + fn test_matches_origin_pattern_edge_cases() { + // Test edge cases + assert!(!matches_origin_pattern("", "https://example.com")); // Empty pattern + assert!(!matches_origin_pattern("https://example.com", "")); // Empty origin + assert!(matches_origin_pattern("", "")); // Both empty + assert!(!matches_origin_pattern("https://example.com", "http://example.com")); // Protocol mismatch + } + + #[test] + fn test_cors_headers_validation() { + use http::HeaderMap; + + // Test case 1: Validate header name case-insensitivity + let mut headers = HeaderMap::new(); + headers.insert("access-control-request-headers", "Content-Type,X-Custom-Header".parse().unwrap()); + + let req_headers_str = headers + .get("access-control-request-headers") + .and_then(|v| v.to_str().ok()) + .unwrap(); + let req_headers: Vec = req_headers_str.split(',').map(|s| s.trim().to_lowercase()).collect(); + + // Headers should be lowercased for comparison + assert_eq!(req_headers, vec!["content-type", "x-custom-header"]); + + // Test case 2: Wildcard matching + let allowed_headers = ["*".to_string()]; + let all_allowed = req_headers.iter().all(|req_header| { + allowed_headers + .iter() + .any(|allowed| allowed.to_lowercase() == "*" || allowed.to_lowercase() == *req_header) + }); + assert!(all_allowed, "Wildcard should allow all headers"); + + // Test case 3: Specific header matching + let allowed_headers = ["content-type".to_string(), "x-custom-header".to_string()]; + let all_allowed = req_headers + .iter() + .all(|req_header| allowed_headers.iter().any(|allowed| allowed.to_lowercase() == *req_header)); + assert!(all_allowed, "All requested headers should be allowed"); + + // Test case 4: Disallowed header + let req_headers = ["content-type".to_string(), "x-forbidden-header".to_string()]; + let allowed_headers = ["content-type".to_string()]; + let all_allowed = req_headers + .iter() + .all(|req_header| allowed_headers.iter().any(|allowed| allowed.to_lowercase() == *req_header)); + assert!(!all_allowed, "Forbidden header should not be allowed"); + } + + #[test] + fn test_cors_response_headers_structure() { + use http::{HeaderMap, HeaderValue}; + + let mut cors_headers = HeaderMap::new(); + + // Simulate building CORS response headers + let origin = "https://example.com"; + let methods = ["GET", "PUT", "POST"]; + let allowed_headers = ["Content-Type", "Authorization"]; + let expose_headers = ["ETag", "x-amz-version-id"]; + let max_age = 3600; + + // Add headers + cors_headers.insert("access-control-allow-origin", HeaderValue::from_str(origin).unwrap()); + cors_headers.insert("vary", HeaderValue::from_static("Origin")); + + let methods_str = methods.join(", "); + cors_headers.insert("access-control-allow-methods", HeaderValue::from_str(&methods_str).unwrap()); + + let headers_str = allowed_headers.join(", "); + cors_headers.insert("access-control-allow-headers", HeaderValue::from_str(&headers_str).unwrap()); + + let expose_str = expose_headers.join(", "); + cors_headers.insert("access-control-expose-headers", HeaderValue::from_str(&expose_str).unwrap()); + + cors_headers.insert("access-control-max-age", HeaderValue::from_str(&max_age.to_string()).unwrap()); + + // Verify all headers are present + assert_eq!(cors_headers.get("access-control-allow-origin").unwrap(), origin); + assert_eq!(cors_headers.get("vary").unwrap(), "Origin"); + assert_eq!(cors_headers.get("access-control-allow-methods").unwrap(), "GET, PUT, POST"); + assert_eq!(cors_headers.get("access-control-allow-headers").unwrap(), "Content-Type, Authorization"); + assert_eq!(cors_headers.get("access-control-expose-headers").unwrap(), "ETag, x-amz-version-id"); + assert_eq!(cors_headers.get("access-control-max-age").unwrap(), "3600"); + } + + #[test] + fn test_cors_preflight_vs_actual_request() { + use http::Method; + + // Test that we can distinguish preflight from actual requests + let preflight_method = Method::OPTIONS; + let actual_method = Method::PUT; + + assert_eq!(preflight_method, Method::OPTIONS); + assert_ne!(actual_method, Method::OPTIONS); + + // Preflight should check Access-Control-Request-Method + // Actual request should use the actual method + let is_preflight_1 = preflight_method == Method::OPTIONS; + let is_preflight_2 = actual_method == Method::OPTIONS; + + assert!(is_preflight_1); + assert!(!is_preflight_2); + } + + #[tokio::test] + async fn test_apply_cors_headers_no_origin() { + // Test when no Origin header is present + let headers = HeaderMap::new(); + let method = http::Method::GET; + + // Should return None when no origin header + let result = apply_cors_headers("test-bucket", &method, &headers).await; + assert!(result.is_none(), "Should return None when no Origin header"); + } + + #[tokio::test] + async fn test_apply_cors_headers_no_cors_config() { + // Test when bucket has no CORS configuration + let mut headers = HeaderMap::new(); + headers.insert("origin", "https://example.com".parse().unwrap()); + let method = http::Method::GET; + + // Should return None when no CORS config exists + // Note: This test may fail if test-bucket actually has CORS config + // In a real scenario, we'd use a mock or ensure the bucket doesn't exist + let _result = apply_cors_headers("non-existent-bucket-for-testing", &method, &headers).await; + // Result depends on whether bucket exists and has CORS config + // This is expected behavior - we just verify it doesn't panic + } + + #[tokio::test] + async fn test_apply_cors_headers_unsupported_method() { + // Test with unsupported HTTP method + let mut headers = HeaderMap::new(); + headers.insert("origin", "https://example.com".parse().unwrap()); + let method = http::Method::PATCH; // Unsupported method + + let result = apply_cors_headers("test-bucket", &method, &headers).await; + assert!(result.is_none(), "Should return None for unsupported methods"); + } + + #[test] + fn test_matches_origin_pattern_complex_wildcards() { + // Test more complex wildcard scenarios + assert!(matches_origin_pattern("https://*.example.com", "https://sub.example.com")); + // Note: "https://*.example.com" matches "https://api.sub.example.com" with our implementation + // because it only checks starts_with and ends_with. Real S3 might be more strict. + + // Test wildcard in middle position + // Our implementation allows this, but it's not standard S3 CORS pattern + // The pattern "https://example.*.com" splits to ["https://example.", ".com"] + // and "https://example.sub.com" matches because it starts with "https://example." and ends with ".com" + // This is acceptable for our use case as S3 CORS typically uses "https://*.example.com" format + } } diff --git a/rustfs/src/storage/options.rs b/rustfs/src/storage/options.rs index 171b1ba3..222bbff0 100644 --- a/rustfs/src/storage/options.rs +++ b/rustfs/src/storage/options.rs @@ -65,13 +65,23 @@ pub async fn del_opts( let vid = vid.map(|v| v.as_str().trim().to_owned()); - if let Some(ref id) = vid - && *id != Uuid::nil().to_string() - && let Err(err) = Uuid::parse_str(id.as_str()) - { - error!("del_opts: invalid version id: {} error: {}", id, err); - return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone())); - } + // Handle AWS S3 special case: "null" string represents null version ID + // When VersionId='null' is specified, it means delete the object with null version ID + let vid = if let Some(ref id) = vid { + if id.eq_ignore_ascii_case("null") { + // Convert "null" to Uuid::nil() string representation + Some(Uuid::nil().to_string()) + } else { + // Validate UUID format for other version IDs + if *id != Uuid::nil().to_string() && Uuid::parse_str(id.as_str()).is_err() { + error!("del_opts: invalid version id: {} error: invalid UUID format", id); + return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone())); + } + Some(id.clone()) + } + } else { + None + }; let mut opts = put_opts_from_headers(headers, metadata.clone()).map_err(|err| { error!("del_opts: invalid argument: {} error: {}", object, err); @@ -704,6 +714,16 @@ mod tests { assert!(!opts.delete_prefix); } + #[tokio::test] + async fn test_del_opts_with_null_version_id() { + let headers = create_test_headers(); + let metadata = create_test_metadata(); + let result = del_opts("test-bucket", "test-object", Some("null".to_string()), &headers, metadata.clone()).await; + assert!(result.is_ok()); + let result = del_opts("test-bucket", "test-object", Some("NULL".to_string()), &headers, metadata.clone()).await; + assert!(result.is_ok()); + } + #[tokio::test] async fn test_get_opts_basic() { let headers = create_test_headers();