diff --git a/.gitignore b/.gitignore index 2f791d6e..0dc3785f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ /target .DS_Store .idea +.vscode diff --git a/Cargo.lock b/Cargo.lock index 8773cca8..a30a9562 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + [[package]] name = "ahash" version = "0.7.8" @@ -86,12 +92,40 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "anyhow" +version = "1.0.88" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e1496f8fb1fbf272686b8d37f523dab3e4a7443300055e74cdaa449f3114356" + [[package]] name = "arrayvec" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "async-stream" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.80" @@ -124,6 +158,64 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +[[package]] +name = "axum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 1.0.1", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backon" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4fa97bb310c33c811334143cf64c5bb2b7b3c06e453db6b095d7061eff8f113" +dependencies = [ + "fastrand", + "gloo-timers", + "tokio", +] + [[package]] name = "backtrace" version = "0.3.73" @@ -134,11 +226,17 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.7.4", "object", "rustc-demangle", ] +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "base64-simd" version = "0.8.0" @@ -170,6 +268,12 @@ dependencies = [ "generic-array", ] +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + [[package]] name = "byteorder" version = "1.5.0" @@ -267,6 +371,15 @@ dependencies = [ "libc", ] +[[package]] +name = "crc32c" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47" +dependencies = [ + "rustc_version", +] + [[package]] name = "crc32fast" version = "1.4.2" @@ -307,11 +420,24 @@ dependencies = [ "subtle", ] +[[package]] +name = "e2e_test" +version = "0.0.1" +dependencies = [ + "ecstore", + "flatbuffers", + "protos", + "serde_json", + "tokio", + "tonic", +] + [[package]] name = "ecstore" version = "0.1.0" dependencies = [ "async-trait", + "backon", "base64-simd", "byteorder", "bytes", @@ -324,6 +450,7 @@ dependencies = [ "openssl", "path-absolutize", "path-clean", + "protos", "reed-solomon-erasure", "regex", "rmp", @@ -338,6 +465,8 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", + "tonic", + "tower", "tracing", "tracing-error", "transform-stream", @@ -346,12 +475,60 @@ dependencies = [ "xxhash-rust", ] +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + [[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "fastrand" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" + +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "flatbuffers" +version = "24.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8add37afff2d4ffa83bc748a70b4b1370984f6980768554182424ef71447c35f" +dependencies = [ + "bitflags 1.3.2", + "rustc_version", +] + +[[package]] +name = "flate2" +version = "1.0.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c0596c1eac1f9e04ed902702e9878208b336edc9d6fddc8a48387349bab3666" +dependencies = [ + "crc32fast", + "miniz_oxide 0.8.0", +] + [[package]] name = "fnv" version = "1.0.7" @@ -498,6 +675,18 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "h2" version = "0.4.5" @@ -510,7 +699,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -627,6 +816,20 @@ dependencies = [ "pin-project-lite", "smallvec", "tokio", + "want", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", ] [[package]] @@ -636,12 +839,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" dependencies = [ "bytes", + "futures-channel", "futures-util", "http", "http-body", "hyper", "pin-project-lite", + "socket2", "tokio", + "tower", + "tower-service", + "tracing", ] [[package]] @@ -654,6 +862,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + [[package]] name = "indexmap" version = "2.2.6" @@ -679,12 +897,30 @@ version = "1.70.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +[[package]] +name = "js-sys" +version = "0.3.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +dependencies = [ + "wasm-bindgen", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -703,6 +939,12 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + [[package]] name = "lock_api" version = "0.4.12" @@ -737,6 +979,12 @@ dependencies = [ "regex-automata 0.1.10", ] +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "memchr" version = "2.7.4" @@ -764,6 +1012,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +dependencies = [ + "adler2", +] + [[package]] name = "mio" version = "0.8.11" @@ -775,6 +1032,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + [[package]] name = "netif" version = "0.1.6" @@ -839,6 +1102,12 @@ dependencies = [ "libc", ] +[[package]] +name = "numeric_cast" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf70ee2d9b1737d1836c20d9f8f96ec3901b2bf92128439db13237ddce9173a5" + [[package]] name = "object" version = "0.36.0" @@ -965,6 +1234,36 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap 2.2.6", +] + +[[package]] +name = "pin-project" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pin-project-lite" version = "0.2.14" @@ -995,6 +1294,16 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "prettyplease" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +dependencies = [ + "proc-macro2", + "syn", +] + [[package]] name = "proc-macro2" version = "1.0.86" @@ -1005,10 +1314,97 @@ dependencies = [ ] [[package]] -name = "quick-xml" -version = "0.31.0" +name = "prost" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1004a344b30a54e2ee58d66a71b32d2db2feb0a31f9a2d302bf0536f15de2a33" +checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bb182580f71dd070f88d01ce3de9f4da5021db7115d2e1c3605a754153b77c1" +dependencies = [ + "bytes", + "heck", + "itertools", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-types" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cee5168b05f49d4b0ca581206eb14a7b22fafd963efe729ac48eb03266e25cc2" +dependencies = [ + "prost", +] + +[[package]] +name = "protobuf" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bcc343da15609eaecd65f8aa76df8dc4209d325131d8219358c0aaaebab0bf6" +dependencies = [ + "once_cell", + "protobuf-support", + "thiserror", +] + +[[package]] +name = "protobuf-support" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0766e3675a627c327e4b3964582594b0e8741305d628a98a5de75a1d15f99b9" +dependencies = [ + "thiserror", +] + +[[package]] +name = "protos" +version = "0.0.1" +dependencies = [ + "flatbuffers", + "prost", + "prost-build", + "protobuf", + "tokio", + "tonic", + "tonic-build", + "tower", +] + +[[package]] +name = "quick-xml" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96a05e2e8efddfa51a84ca47cec303fac86c8541b686d37cac5efc0e094417bc" dependencies = [ "memchr", "serde", @@ -1119,6 +1515,21 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "spin", + "untrusted", + "windows-sys 0.52.0", +] + [[package]] name = "rmp" version = "0.8.14" @@ -1147,6 +1558,15 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + [[package]] name = "rustfs" version = "0.1.0" @@ -1155,15 +1575,29 @@ dependencies = [ "bytes", "clap", "ecstore", + "flatbuffers", "futures", "futures-util", "http", + "http-body", + "hyper", "hyper-util", "mime", "netif", + "pin-project-lite", + "prost", + "prost-build", + "prost-types", + "protobuf", + "protos", "s3s", + "serde_json", "time", "tokio", + "tonic", + "tonic-build", + "tonic-reflection", + "tower", "tracing", "tracing-error", "tracing-subscriber", @@ -1171,6 +1605,67 @@ dependencies = [ "uuid", ] +[[package]] +name = "rustix" +version = "0.38.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +dependencies = [ + "bitflags 2.6.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.23.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +dependencies = [ + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pemfile" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +dependencies = [ + "base64", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" + +[[package]] +name = "rustls-webpki" +version = "0.102.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" + [[package]] name = "ryu" version = "1.0.18" @@ -1179,9 +1674,9 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "s3s" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e6cdc8002708b435946eec39afa13c43e4288d1de6316a12816e4cfaaa6c2c" +checksum = "fa54e3b4b4791c8c62291516997866b4f265c3fcbfdbcdd0b8da62896fba8bfa" dependencies = [ "arrayvec", "async-trait", @@ -1190,7 +1685,9 @@ dependencies = [ "bytes", "bytestring", "chrono", + "crc32c", "crc32fast", + "digest", "futures", "hex-simd", "hmac", @@ -1203,6 +1700,7 @@ dependencies = [ "mime", "nom", "nugine-rust-utils", + "numeric_cast", "pin-project-lite", "quick-xml", "serde", @@ -1210,8 +1708,11 @@ dependencies = [ "sha1", "sha2", "smallvec", + "sync_wrapper 1.0.1", "thiserror", "time", + "tokio", + "tower", "tracing", "transform-stream", "urlencoding", @@ -1224,6 +1725,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" + [[package]] name = "serde" version = "1.0.203" @@ -1364,15 +1871,40 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.68" +version = "2.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" +checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + +[[package]] +name = "tempfile" +version = "3.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fcd239983515c23a32fb82099f97d0b11b8c72f654ed659363a95c3dad7a53" +dependencies = [ + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.52.0", +] + [[package]] name = "thiserror" version = "1.0.61" @@ -1478,6 +2010,17 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +dependencies = [ + "rustls", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.15" @@ -1502,12 +2045,104 @@ dependencies = [ "tokio", ] +[[package]] +name = "tonic" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64", + "bytes", + "flate2", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "rustls-pemfile", + "socket2", + "tokio", + "tokio-rustls", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "568392c5a2bd0020723e3f387891176aabafe36fd9fcd074ad309dfa0c8eb964" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "quote", + "syn", +] + +[[package]] +name = "tonic-reflection" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b742c83ad673e9ab5b4ce0981f7b9e8932be9d60e8682cbf9120494764dbc173" +dependencies = [ + "prost", + "prost-types", + "tokio", + "tokio-stream", + "tonic", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + [[package]] name = "tracing" version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -1583,6 +2218,12 @@ dependencies = [ "futures-core", ] +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + [[package]] name = "typenum" version = "1.17.0" @@ -1610,6 +2251,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "url" version = "2.5.2" @@ -1635,9 +2282,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.9.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ "getrandom", "rand", @@ -1668,12 +2315,76 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasm-bindgen" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" + [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index 92b3b1c8..003fce5d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,21 +1,43 @@ [workspace] resolver = "2" -members = ["rustfs", "ecstore"] +members = ["rustfs", "ecstore", "e2e_test", "common/protos"] [workspace.package] edition = "2021" license = "Apache-2.0" repository = "https://github.com/rustfs/rustfs" rust-version = "1.75" +version = "0.0.1" [workspace.dependencies] +async-trait = "0.1.80" +backon = "1.2.0" +bytes = "1.6.0" +clap = { version = "4.5.7", features = ["derive"] } +ecstore = { path = "./ecstore" } +flatbuffers = "24.3.25" +futures = "0.3.30" +futures-util = "0.3.30" +hyper = "1.3.1" +hyper-util = { version = "0.1.5", features = [ + "tokio", + "server-auto", + "server-graceful", +] } +http = "1.1.0" +http-body = "1.0.0" +mime = "0.3.17" +netif = "0.1.6" +pin-project-lite = "0.2" +# pin-utils = "0.1.0" +prost = "0.13.1" +prost-build = "0.13.1" +prost-types = "0.13.1" +protobuf = "3.2" +protos = { path = "./common/protos" } +s3s = { version = "0.10.1", default-features = true, features = ["tower"] } serde = { version = "1.0.203", features = ["derive"] } serde_json = "1.0.117" -tracing = "0.1.40" -tracing-error = "0.2.0" -futures = "0.3.30" -bytes = "1.6.0" -http = "1.1.0" thiserror = "1.0.61" time = { version = "0.3.36", features = [ "std", @@ -24,6 +46,12 @@ time = { version = "0.3.36", features = [ "macros", "serde", ] } -async-trait = "0.1.80" -tokio = { version = "1.38.0", features = ["fs"] } -futures-util = "0.3.30" +tokio = { version = "1.38.0", features = ["fs", "rt-multi-thread"] } +tonic = { version = "0.12.1", features = ["gzip"] } +tonic-build = "0.12.1" +tonic-reflection = "0.12" +tower = { version = "0.4.13", features = ["timeout"] } +tracing = "0.1.40" +tracing-error = "0.2.0" +tracing-subscriber = { version = "0.3.18", features = ["env-filter", "time"] } +transform-stream = "0.3.0" \ No newline at end of file diff --git a/TODO.md b/TODO.md index f020ad93..f19a90c3 100644 --- a/TODO.md +++ b/TODO.md @@ -34,9 +34,22 @@ - [ ] 对象锁 - [ ] 复制 CopyObject - [ ] 详情 HeadObject - + - [ ] 对象预先签名(get、put、head、post) + ## 扩展功能 -- [ ] 版本控制 -- [ ] 对象锁 -- [ ] 修复 +- [ ] 用户管理 +- [ ] Policy管理 +- [ ] AK/SK分配管理 +- [ ] data scanner统计和对象修复 +- [ ] 桶配额 +- [ ] 桶只读 +- [ ] 桶复制 +- [ ] 桶事件通知 +- [ ] 桶公开、桶私有 +- [ ] 对象生命周期管理 +- [ ] prometheus对接 +- [ ] 日志收集和日志外发 +- [ ] 对象压缩 +- [ ] STS +- [ ] 分层(阿里云、腾讯云、S3远程对接) diff --git a/common/protos/Cargo.toml b/common/protos/Cargo.toml new file mode 100644 index 00000000..a6ab9df6 --- /dev/null +++ b/common/protos/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "protos" +version.workspace = true +edition.workspace = true + +[dependencies] +#async-backtrace = { workspace = true, optional = true } +flatbuffers = { workspace = true } +prost = { workspace = true } +protobuf = { workspace = true } +tokio = { workspace = true } +tonic = { workspace = true, features = ["transport", "tls"] } +tower = { workspace = true } + +[build-dependencies] +prost-build = { workspace = true } +tonic-build = { workspace = true } diff --git a/common/protos/build.rs b/common/protos/build.rs new file mode 100644 index 00000000..5ef32dde --- /dev/null +++ b/common/protos/build.rs @@ -0,0 +1,262 @@ +use std::{ + cmp, env, fs, + io::Write, + path::{Path, PathBuf}, + process::Command, +}; + +type AnyError = Box; + +const ENV_OUT_DIR: &str = "OUT_DIR"; +const VERSION_PROTOBUF: Version = Version(27, 0, 0); // 27.0 +const VERSION_FLATBUFFERS: Version = Version(24, 3, 25); // 24.3.25 +/// Build protos if the major version of `flatc` or `protoc` is greater +/// or lesser than the expected version. +const ENV_BUILD_PROTOS: &str = "BUILD_PROTOS"; +/// Path of `flatc` binary. +const ENV_FLATC_PATH: &str = "FLATC_PATH"; + +fn main() -> Result<(), AnyError> { + let version = protobuf_compiler_version()?; + let need_compile = match version.compare_ext(&VERSION_PROTOBUF) { + Ok(cmp::Ordering::Equal) => true, + Ok(_) => { + let version_err = Version::build_error_message(&version, &VERSION_PROTOBUF).unwrap(); + println!("cargo:warning=Tool `protoc` {version_err}, skip compiling."); + false + } + Err(version_err) => { + // return Err(format!("Tool `protoc` {version_err}, please update it.").into()); + println!("cargo:warning=Tool `protoc` {version_err}, please update it."); + false + } + }; + + if !need_compile { + return Ok(()); + } + + // path of proto file + let project_root_dir = env::current_dir()?; + let proto_dir = project_root_dir.join("src"); + let proto_files = &["node.proto"]; + let proto_out_dir = project_root_dir.join("src").join("generated").join("proto_gen"); + let flatbuffer_out_dir = project_root_dir.join("src").join("generated").join("flatbuffers_generated"); + let descriptor_set_path = PathBuf::from(env::var(ENV_OUT_DIR).unwrap()).join("proto-descriptor.bin"); + + tonic_build::configure() + .out_dir(proto_out_dir) + .file_descriptor_set_path(descriptor_set_path) + .protoc_arg("--experimental_allow_proto3_optional") + .compile_well_known_types(true) + .emit_rerun_if_changed(false) + .compile(proto_files, &[proto_dir.clone()]) + .map_err(|e| format!("Failed to generate protobuf file: {e}."))?; + + // protos/gen/mod.rs + let generated_mod_rs_path = project_root_dir + .join("src") + .join("generated") + .join("proto_gen") + .join("mod.rs"); + + let mut generated_mod_rs = fs::File::create(generated_mod_rs_path)?; + writeln!(&mut generated_mod_rs, "pub mod node_service;")?; + generated_mod_rs.flush()?; + + let generated_mod_rs_path = project_root_dir.join("src").join("generated").join("mod.rs"); + + let mut generated_mod_rs = fs::File::create(generated_mod_rs_path)?; + writeln!(&mut generated_mod_rs, "#![allow(unused_imports)]")?; + writeln!(&mut generated_mod_rs, "#![allow(clippy::all)]")?; + writeln!(&mut generated_mod_rs, "pub mod proto_gen;")?; + generated_mod_rs.flush()?; + + let flatc_path = match env::var(ENV_FLATC_PATH) { + Ok(path) => { + println!("cargo:warning=Specified flatc path by environment {ENV_FLATC_PATH}={path}"); + path + } + Err(_) => "flatc".to_string(), + }; + + // build src/protos/*.fbs files to src/protos/gen/ + compile_flatbuffers_models( + &mut generated_mod_rs, + &flatc_path, + proto_dir.clone(), + flatbuffer_out_dir.clone(), + vec!["models"], + )?; + Ok(()) +} + +/// Compile proto/**.fbs files. +fn compile_flatbuffers_models, S: AsRef>( + generated_mod_rs: &mut fs::File, + flatc_path: &str, + in_fbs_dir: P, + out_rust_dir: P, + mod_names: Vec, +) -> Result<(), AnyError> { + let version = flatbuffers_compiler_version(flatc_path)?; + let need_compile = match version.compare_ext(&VERSION_FLATBUFFERS) { + Ok(cmp::Ordering::Equal) => true, + Ok(_) => { + let version_err = Version::build_error_message(&version, &VERSION_FLATBUFFERS).unwrap(); + println!("cargo:warning=Tool `{flatc_path}` {version_err}, skip compiling."); + false + } + Err(version_err) => { + return Err(format!("Tool `{flatc_path}` {version_err}, please update it.").into()); + } + }; + + let fbs_dir = in_fbs_dir.as_ref(); + let rust_dir = out_rust_dir.as_ref(); + fs::create_dir_all(rust_dir)?; + + // $rust_dir/mod.rs + let mut sub_mod_rs = fs::File::create(rust_dir.join("mod.rs"))?; + writeln!(generated_mod_rs)?; + writeln!(generated_mod_rs, "mod flatbuffers_generated;")?; + for mod_name in mod_names.iter() { + let mod_name = mod_name.as_ref(); + writeln!(generated_mod_rs, "pub use flatbuffers_generated::{mod_name}::*;")?; + writeln!(&mut sub_mod_rs, "pub mod {mod_name};")?; + + if need_compile { + let fbs_file_path = fbs_dir.join(format!("{mod_name}.fbs")); + let output = Command::new(flatc_path) + .arg("-o") + .arg(rust_dir) + .arg("--rust") + .arg("--gen-mutable") + .arg("--gen-onefile") + .arg("--gen-name-strings") + .arg("--filename-suffix") + .arg("") + .arg(&fbs_file_path) + .output() + .map_err(|e| format!("Failed to execute process of flatc: {e}"))?; + if !output.status.success() { + return Err(format!( + "Failed to generate file '{}' by flatc(path: '{flatc_path}'): {}.", + fbs_file_path.display(), + String::from_utf8_lossy(&output.stderr), + ) + .into()); + } + } + } + generated_mod_rs.flush()?; + sub_mod_rs.flush()?; + + Ok(()) +} + +/// Run command `flatc --version` to get the version of flatc. +/// +/// ```ignore +/// $ flatc --version +/// flatc version 24.3.25 +/// ``` +fn flatbuffers_compiler_version(flatc_path: impl AsRef) -> Result { + let flatc_path = flatc_path.as_ref(); + Version::try_get(format!("{}", flatc_path.display()), |output| { + const PREFIX_OF_VERSION: &str = "flatc version "; + let output = output.trim(); + if let Some(version) = output.strip_prefix(PREFIX_OF_VERSION) { + Ok(version.to_string()) + } else { + Err(format!("Failed to get flatc version: {output}")) + } + }) +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +struct Version(u32, u32, u32); + +impl Version { + fn try_get Result>(exe: String, output_to_version_string: F) -> Result { + let cmd = format!("{exe} --version"); + let output = std::process::Command::new(exe) + .arg("--version") + .output() + .map_err(|e| format!("Failed to execute `{cmd}`: {e}",))?; + let output_utf8 = String::from_utf8(output.stdout).map_err(|e| { + let output_lossy = String::from_utf8_lossy(e.as_bytes()); + format!("Command `{cmd}` returned invalid UTF-8('{output_lossy}'): {e}") + })?; + if output.status.success() { + let version_string = output_to_version_string(&output_utf8)?; + Ok(version_string.parse::()?) + } else { + Err(format!("Failed to get version by command `{cmd}`: {output_utf8}")) + } + } + + fn build_error_message(version: &Self, expected: &Self) -> Option { + match version.compare_major_version(expected) { + cmp::Ordering::Equal => None, + cmp::Ordering::Greater => Some(format!("version({version}) is greater than version({expected})")), + cmp::Ordering::Less => Some(format!("version({version}) is lesser than version({expected})")), + } + } + + fn compare_ext(&self, expected_version: &Self) -> Result { + match env::var(ENV_BUILD_PROTOS) { + Ok(build_protos) => { + if build_protos.is_empty() || build_protos == "0" { + Ok(self.compare_major_version(expected_version)) + } else { + match self.compare_major_version(expected_version) { + cmp::Ordering::Equal => Ok(cmp::Ordering::Equal), + _ => Err(Self::build_error_message(self, expected_version).unwrap()), + } + } + } + Err(_) => Ok(self.compare_major_version(expected_version)), + } + } + + fn compare_major_version(&self, other: &Self) -> cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl std::str::FromStr for Version { + type Err = String; + + fn from_str(s: &str) -> Result { + let mut version = [0_u32; 3]; + for (i, v) in s.split('.').take(3).enumerate() { + version[i] = v.parse().map_err(|e| format!("Failed to parse version string '{s}': {e}"))?; + } + Ok(Version(version[0], version[1], version[2])) + } +} + +impl std::fmt::Display for Version { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}.{}.{}", self.0, self.1, self.2) + } +} + +/// Run command `protoc --version` to get the version of flatc. +/// +/// ```ignore +/// $ protoc --version +/// libprotoc 27.0 +/// ``` +fn protobuf_compiler_version() -> Result { + Version::try_get("protoc".to_string(), |output| { + const PREFIX_OF_VERSION: &str = "libprotoc "; + let output = output.trim(); + if let Some(version) = output.strip_prefix(PREFIX_OF_VERSION) { + Ok(version.to_string()) + } else { + Err(format!("Failed to get protoc version: {output}")) + } + }) +} diff --git a/common/protos/src/generated/flatbuffers_generated/mod.rs b/common/protos/src/generated/flatbuffers_generated/mod.rs new file mode 100644 index 00000000..c446ac88 --- /dev/null +++ b/common/protos/src/generated/flatbuffers_generated/mod.rs @@ -0,0 +1 @@ +pub mod models; diff --git a/common/protos/src/generated/flatbuffers_generated/models.rs b/common/protos/src/generated/flatbuffers_generated/models.rs new file mode 100644 index 00000000..aa1f6ae2 --- /dev/null +++ b/common/protos/src/generated/flatbuffers_generated/models.rs @@ -0,0 +1,123 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +// @generated + +use core::mem; +use core::cmp::Ordering; + +extern crate flatbuffers; +use self::flatbuffers::{EndianScalar, Follow}; + +#[allow(unused_imports, dead_code)] +pub mod models { + + use core::mem; + use core::cmp::Ordering; + + extern crate flatbuffers; + use self::flatbuffers::{EndianScalar, Follow}; + +pub enum PingBodyOffset {} +#[derive(Copy, Clone, PartialEq)] + +pub struct PingBody<'a> { + pub _tab: flatbuffers::Table<'a>, +} + +impl<'a> flatbuffers::Follow<'a> for PingBody<'a> { + type Inner = PingBody<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { _tab: flatbuffers::Table::new(buf, loc) } + } +} + +impl<'a> PingBody<'a> { + pub const VT_PAYLOAD: flatbuffers::VOffsetT = 4; + + pub const fn get_fully_qualified_name() -> &'static str { + "models.PingBody" + } + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + PingBody { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args PingBodyArgs<'args> + ) -> flatbuffers::WIPOffset> { + let mut builder = PingBodyBuilder::new(_fbb); + if let Some(x) = args.payload { builder.add_payload(x); } + builder.finish() + } + + + #[inline] + pub fn payload(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::>>(PingBody::VT_PAYLOAD, None)} + } +} + +impl flatbuffers::Verifiable for PingBody<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, pos: usize + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>>("payload", Self::VT_PAYLOAD, false)? + .finish(); + Ok(()) + } +} +pub struct PingBodyArgs<'a> { + pub payload: Option>>, +} +impl<'a> Default for PingBodyArgs<'a> { + #[inline] + fn default() -> Self { + PingBodyArgs { + payload: None, + } + } +} + +pub struct PingBodyBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, +} +impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> PingBodyBuilder<'a, 'b, A> { + #[inline] + pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset>) { + self.fbb_.push_slot_always::>(PingBody::VT_PAYLOAD, payload); + } + #[inline] + pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> PingBodyBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + PingBodyBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } +} + +impl core::fmt::Debug for PingBody<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("PingBody"); + ds.field("payload", &self.payload()); + ds.finish() + } +} +} // pub mod models + diff --git a/common/protos/src/generated/mod.rs b/common/protos/src/generated/mod.rs new file mode 100644 index 00000000..4ab5a438 --- /dev/null +++ b/common/protos/src/generated/mod.rs @@ -0,0 +1,6 @@ +#![allow(unused_imports)] +#![allow(clippy::all)] +pub mod proto_gen; + +mod flatbuffers_generated; +pub use flatbuffers_generated::models::*; diff --git a/common/protos/src/generated/proto_gen/mod.rs b/common/protos/src/generated/proto_gen/mod.rs new file mode 100644 index 00000000..35d3fe1b --- /dev/null +++ b/common/protos/src/generated/proto_gen/mod.rs @@ -0,0 +1 @@ +pub mod node_service; diff --git a/common/protos/src/generated/proto_gen/node_service.rs b/common/protos/src/generated/proto_gen/node_service.rs new file mode 100644 index 00000000..b44e1c1d --- /dev/null +++ b/common/protos/src/generated/proto_gen/node_service.rs @@ -0,0 +1,2489 @@ +// This file is @generated by prost-build. +/// -------------------------------------------------------------------- +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PingRequest { + #[prost(uint64, tag = "1")] + pub version: u64, + #[prost(bytes = "vec", tag = "2")] + pub body: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PingResponse { + #[prost(uint64, tag = "1")] + pub version: u64, + #[prost(bytes = "vec", tag = "2")] + pub body: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListBucketRequest { + #[prost(string, tag = "1")] + pub options: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListBucketResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, repeated, tag = "2")] + pub bucket_infos: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, optional, tag = "3")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MakeBucketRequest { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub options: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MakeBucketResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, optional, tag = "2")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBucketInfoRequest { + #[prost(string, tag = "1")] + pub bucket: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub options: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBucketInfoResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, tag = "2")] + pub bucket_info: ::prost::alloc::string::String, + #[prost(string, optional, tag = "3")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteBucketRequest { + #[prost(string, tag = "1")] + pub bucket: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteBucketResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, optional, tag = "2")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadAllRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub path: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadAllResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(bytes = "vec", tag = "2")] + pub data: ::prost::alloc::vec::Vec, + #[prost(string, optional, tag = "3")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WriteAllRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub path: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "4")] + pub data: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WriteAllResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, optional, tag = "2")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub path: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub options: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, optional, tag = "2")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RenameFileRequst { + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub src_volume: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub src_path: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub dst_volume: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub dst_path: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RenameFileResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, optional, tag = "2")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WriteRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub path: ::prost::alloc::string::String, + #[prost(bool, tag = "4")] + pub is_append: bool, + #[prost(bytes = "vec", tag = "5")] + pub data: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WriteResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, optional, tag = "2")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadAtRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub path: ::prost::alloc::string::String, + #[prost(int64, tag = "4")] + pub offset: i64, + #[prost(int64, tag = "5")] + pub length: i64, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadAtResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(bytes = "vec", tag = "2")] + pub data: ::prost::alloc::vec::Vec, + #[prost(int64, tag = "3")] + pub read_size: i64, + #[prost(string, optional, tag = "4")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListDirRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListDirResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, repeated, tag = "2")] + pub volumes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, optional, tag = "3")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WalkDirRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub walk_dir_options: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WalkDirResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, repeated, tag = "2")] + pub meta_cache_entry: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, optional, tag = "3")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RenameDataRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub src_volume: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub src_path: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub file_info: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub dst_volume: ::prost::alloc::string::String, + #[prost(string, tag = "6")] + pub dst_path: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RenameDataResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, tag = "2")] + pub rename_data_resp: ::prost::alloc::string::String, + #[prost(string, optional, tag = "3")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MakeVolumesRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, repeated, tag = "2")] + pub volumes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MakeVolumesResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, optional, tag = "2")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MakeVolumeRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MakeVolumeResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, optional, tag = "2")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListVolumesRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListVolumesResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, repeated, tag = "2")] + pub volume_infos: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, optional, tag = "3")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StatVolumeRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StatVolumeResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, tag = "2")] + pub volume_info: ::prost::alloc::string::String, + #[prost(string, optional, tag = "3")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WriteMetadataRequest { + /// indicate which one in the disks + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub path: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub file_info: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WriteMetadataResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, optional, tag = "2")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadVersionRequest { + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub path: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub version_id: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub opts: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadVersionResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, tag = "2")] + pub file_info: ::prost::alloc::string::String, + #[prost(string, optional, tag = "3")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadXlRequest { + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub path: ::prost::alloc::string::String, + #[prost(bool, tag = "4")] + pub read_data: bool, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadXlResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, tag = "2")] + pub raw_file_info: ::prost::alloc::string::String, + #[prost(string, optional, tag = "3")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteVersionsRequest { + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, + #[prost(string, repeated, tag = "3")] + pub versions: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, tag = "4")] + pub opts: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteVersionsResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, repeated, tag = "2")] + pub errors: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, optional, tag = "3")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadMultipleRequest { + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub read_multiple_req: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadMultipleResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, repeated, tag = "2")] + pub read_multiple_resps: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, optional, tag = "3")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteVolumeRequest { + #[prost(string, tag = "1")] + pub disk: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub volume: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteVolumeResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, optional, tag = "2")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} +/// Generated client implementations. +pub mod node_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct NodeServiceClient { + inner: tonic::client::Grpc, + } + impl NodeServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl NodeServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> NodeServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + NodeServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// -------------------------------meta service-------------------------- + pub async fn ping( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Ping", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "Ping")); + self.inner.unary(req, path, codec).await + } + pub async fn list_bucket( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ListBucket", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "ListBucket")); + self.inner.unary(req, path, codec).await + } + pub async fn make_bucket( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/MakeBucket", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "MakeBucket")); + self.inner.unary(req, path, codec).await + } + pub async fn get_bucket_info( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetBucketInfo", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "GetBucketInfo")); + self.inner.unary(req, path, codec).await + } + pub async fn delete_bucket( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteBucket", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "DeleteBucket")); + self.inner.unary(req, path, codec).await + } + pub async fn read_all( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadAll", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "ReadAll")); + self.inner.unary(req, path, codec).await + } + pub async fn write_all( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/WriteAll", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "WriteAll")); + self.inner.unary(req, path, codec).await + } + pub async fn delete( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Delete", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "Delete")); + self.inner.unary(req, path, codec).await + } + pub async fn rename_file( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/RenameFile", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "RenameFile")); + self.inner.unary(req, path, codec).await + } + pub async fn write( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Write", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "Write")); + self.inner.unary(req, path, codec).await + } + /// rpc Append(AppendRequest) returns (AppendResponse) {}; + pub async fn read_at( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadAt", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "ReadAt")); + self.inner.unary(req, path, codec).await + } + pub async fn list_dir( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ListDir", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "ListDir")); + self.inner.unary(req, path, codec).await + } + pub async fn walk_dir( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/WalkDir", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "WalkDir")); + self.inner.unary(req, path, codec).await + } + pub async fn rename_data( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/RenameData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "RenameData")); + self.inner.unary(req, path, codec).await + } + pub async fn make_volumes( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/MakeVolumes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "MakeVolumes")); + self.inner.unary(req, path, codec).await + } + pub async fn make_volume( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/MakeVolume", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "MakeVolume")); + self.inner.unary(req, path, codec).await + } + pub async fn list_volumes( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ListVolumes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "ListVolumes")); + self.inner.unary(req, path, codec).await + } + pub async fn stat_volume( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/StatVolume", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "StatVolume")); + self.inner.unary(req, path, codec).await + } + pub async fn write_metadata( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/WriteMetadata", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "WriteMetadata")); + self.inner.unary(req, path, codec).await + } + pub async fn read_version( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadVersion", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "ReadVersion")); + self.inner.unary(req, path, codec).await + } + pub async fn read_xl( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadXL", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "ReadXL")); + self.inner.unary(req, path, codec).await + } + pub async fn delete_versions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteVersions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "DeleteVersions")); + self.inner.unary(req, path, codec).await + } + pub async fn read_multiple( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadMultiple", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "ReadMultiple")); + self.inner.unary(req, path, codec).await + } + pub async fn delete_volume( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteVolume", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "DeleteVolume")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod node_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with NodeServiceServer. + #[async_trait] + pub trait NodeService: Send + Sync + 'static { + /// -------------------------------meta service-------------------------- + async fn ping( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn list_bucket( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn make_bucket( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn get_bucket_info( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn delete_bucket( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn read_all( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn write_all( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn delete( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn rename_file( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn write( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// rpc Append(AppendRequest) returns (AppendResponse) {}; + async fn read_at( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn list_dir( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn walk_dir( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn rename_data( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn make_volumes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn make_volume( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn list_volumes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn stat_volume( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn write_metadata( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn read_version( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn read_xl( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn delete_versions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn read_multiple( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn delete_volume( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct NodeServiceServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl NodeServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for NodeServiceServer + where + T: NodeService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/node_service.NodeService/Ping" => { + #[allow(non_camel_case_types)] + struct PingSvc(pub Arc); + impl tonic::server::UnaryService + for PingSvc { + type Response = super::PingResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::ping(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = PingSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/ListBucket" => { + #[allow(non_camel_case_types)] + struct ListBucketSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for ListBucketSvc { + type Response = super::ListBucketResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_bucket(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ListBucketSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/MakeBucket" => { + #[allow(non_camel_case_types)] + struct MakeBucketSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for MakeBucketSvc { + type Response = super::MakeBucketResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::make_bucket(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = MakeBucketSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/GetBucketInfo" => { + #[allow(non_camel_case_types)] + struct GetBucketInfoSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for GetBucketInfoSvc { + type Response = super::GetBucketInfoResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_bucket_info(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBucketInfoSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/DeleteBucket" => { + #[allow(non_camel_case_types)] + struct DeleteBucketSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteBucketSvc { + type Response = super::DeleteBucketResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::delete_bucket(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = DeleteBucketSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/ReadAll" => { + #[allow(non_camel_case_types)] + struct ReadAllSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadAllSvc { + type Response = super::ReadAllResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::read_all(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ReadAllSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/WriteAll" => { + #[allow(non_camel_case_types)] + struct WriteAllSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for WriteAllSvc { + type Response = super::WriteAllResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::write_all(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = WriteAllSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/Delete" => { + #[allow(non_camel_case_types)] + struct DeleteSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteSvc { + type Response = super::DeleteResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::delete(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = DeleteSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/RenameFile" => { + #[allow(non_camel_case_types)] + struct RenameFileSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for RenameFileSvc { + type Response = super::RenameFileResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::rename_file(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = RenameFileSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/Write" => { + #[allow(non_camel_case_types)] + struct WriteSvc(pub Arc); + impl tonic::server::UnaryService + for WriteSvc { + type Response = super::WriteResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::write(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = WriteSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/ReadAt" => { + #[allow(non_camel_case_types)] + struct ReadAtSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadAtSvc { + type Response = super::ReadAtResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::read_at(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ReadAtSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/ListDir" => { + #[allow(non_camel_case_types)] + struct ListDirSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for ListDirSvc { + type Response = super::ListDirResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_dir(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ListDirSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/WalkDir" => { + #[allow(non_camel_case_types)] + struct WalkDirSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for WalkDirSvc { + type Response = super::WalkDirResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::walk_dir(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = WalkDirSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/RenameData" => { + #[allow(non_camel_case_types)] + struct RenameDataSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for RenameDataSvc { + type Response = super::RenameDataResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::rename_data(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = RenameDataSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/MakeVolumes" => { + #[allow(non_camel_case_types)] + struct MakeVolumesSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for MakeVolumesSvc { + type Response = super::MakeVolumesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::make_volumes(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = MakeVolumesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/MakeVolume" => { + #[allow(non_camel_case_types)] + struct MakeVolumeSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for MakeVolumeSvc { + type Response = super::MakeVolumeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::make_volume(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = MakeVolumeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/ListVolumes" => { + #[allow(non_camel_case_types)] + struct ListVolumesSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for ListVolumesSvc { + type Response = super::ListVolumesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_volumes(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ListVolumesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/StatVolume" => { + #[allow(non_camel_case_types)] + struct StatVolumeSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for StatVolumeSvc { + type Response = super::StatVolumeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::stat_volume(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = StatVolumeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/WriteMetadata" => { + #[allow(non_camel_case_types)] + struct WriteMetadataSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for WriteMetadataSvc { + type Response = super::WriteMetadataResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::write_metadata(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = WriteMetadataSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/ReadVersion" => { + #[allow(non_camel_case_types)] + struct ReadVersionSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadVersionSvc { + type Response = super::ReadVersionResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::read_version(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ReadVersionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/ReadXL" => { + #[allow(non_camel_case_types)] + struct ReadXLSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadXLSvc { + type Response = super::ReadXlResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::read_xl(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ReadXLSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/DeleteVersions" => { + #[allow(non_camel_case_types)] + struct DeleteVersionsSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteVersionsSvc { + type Response = super::DeleteVersionsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::delete_versions(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = DeleteVersionsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/ReadMultiple" => { + #[allow(non_camel_case_types)] + struct ReadMultipleSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadMultipleSvc { + type Response = super::ReadMultipleResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::read_multiple(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ReadMultipleSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/DeleteVolume" => { + #[allow(non_camel_case_types)] + struct DeleteVolumeSvc(pub Arc); + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteVolumeSvc { + type Response = super::DeleteVolumeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::delete_volume(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = DeleteVolumeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for NodeServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl tonic::server::NamedService for NodeServiceServer { + const NAME: &'static str = "node_service.NodeService"; + } +} diff --git a/common/protos/src/lib.rs b/common/protos/src/lib.rs new file mode 100644 index 00000000..639f9b06 --- /dev/null +++ b/common/protos/src/lib.rs @@ -0,0 +1,28 @@ +mod generated; +use std::time::Duration; + +pub use generated::*; +use proto_gen::node_service::node_service_client::NodeServiceClient; +use tonic::{codec::CompressionEncoding, transport::Channel}; +use tower::timeout::Timeout; + +// Default 100 MB +pub const DEFAULT_GRPC_SERVER_MESSAGE_LEN: usize = 100 * 1024 * 1024; + +pub fn node_service_time_out_client( + channel: Channel, + time_out: Duration, + max_message_size: usize, + grpc_enable_gzip: bool, +) -> NodeServiceClient> { + let timeout_channel = Timeout::new(channel, time_out); + let client = NodeServiceClient::>::new(timeout_channel); + let client = NodeServiceClient::max_decoding_message_size(client, max_message_size); + if grpc_enable_gzip { + NodeServiceClient::max_encoding_message_size(client, max_message_size) + .accept_compressed(CompressionEncoding::Gzip) + .send_compressed(CompressionEncoding::Gzip) + } else { + NodeServiceClient::max_encoding_message_size(client, max_message_size) + } +} diff --git a/common/protos/src/models.fbs b/common/protos/src/models.fbs new file mode 100644 index 00000000..d6a771ec --- /dev/null +++ b/common/protos/src/models.fbs @@ -0,0 +1,5 @@ +namespace models; + +table PingBody { + payload: [ubyte]; +} \ No newline at end of file diff --git a/common/protos/src/node.proto b/common/protos/src/node.proto new file mode 100644 index 00000000..a7e64b0a --- /dev/null +++ b/common/protos/src/node.proto @@ -0,0 +1,327 @@ +syntax = "proto3"; +package node_service; + +/* -------------------------------------------------------------------- */ +message PingRequest { + uint64 version = 1; + bytes body = 2; +} + +message PingResponse { + uint64 version = 1; + bytes body = 2; +} + +message ListBucketRequest { + string options = 1; +} + +message ListBucketResponse { + bool success = 1; + repeated string bucket_infos = 2; + optional string error_info = 3; +} + +message MakeBucketRequest { + string name = 1; + string options = 2; +} + +message MakeBucketResponse { + bool success = 1; + optional string error_info = 2; +} + +message GetBucketInfoRequest { + string bucket = 1; + string options = 2; +} + +message GetBucketInfoResponse { + bool success = 1; + string bucket_info = 2; + optional string error_info = 3; +} + +message DeleteBucketRequest { + string bucket = 1; +} + +message DeleteBucketResponse { + bool success = 1; + optional string error_info = 2; +} + +message ReadAllRequest { + string disk = 1; // indicate which one in the disks + string volume = 2; + string path = 3; +} + +message ReadAllResponse { + bool success = 1; + bytes data = 2; + optional string error_info = 3; +} + +message WriteAllRequest { + string disk = 1; // indicate which one in the disks + string volume = 2; + string path = 3; + bytes data = 4; +} + +message WriteAllResponse { + bool success = 1; + optional string error_info = 2; +} + +message DeleteRequest { + string disk = 1; // indicate which one in the disks + string volume = 2; + string path = 3; + string options = 4; +} + +message DeleteResponse { + bool success = 1; + optional string error_info = 2; +} + +message RenameFileRequst { + string disk = 1; + string src_volume = 2; + string src_path = 3; + string dst_volume = 4; + string dst_path = 5; +} + +message RenameFileResponse { + bool success = 1; + optional string error_info = 2; +} + +message WriteRequest { + string disk = 1; // indicate which one in the disks + string volume = 2; + string path = 3; + bool is_append = 4; + bytes data = 5; +} + +message WriteResponse { + bool success = 1; + optional string error_info = 2; +} + +// message AppendRequest { +// string disk = 1; // indicate which one in the disks +// string volume = 2; +// string path = 3; +// bytes data = 4; +// } +// +// message AppendResponse { +// bool success = 1; +// optional string error_info = 2; +// } + +message ReadAtRequest { + string disk = 1; // indicate which one in the disks + string volume = 2; + string path = 3; + int64 offset = 4; + int64 length = 5; +} + +message ReadAtResponse { + bool success = 1; + bytes data = 2; + int64 read_size = 3; + optional string error_info = 4; +} + +message ListDirRequest { + string disk = 1; // indicate which one in the disks + string volume = 2; +} + +message ListDirResponse { + bool success = 1; + repeated string volumes = 2; + optional string error_info = 3; +} + +message WalkDirRequest { + string disk = 1; // indicate which one in the disks + string walk_dir_options = 2; +} + +message WalkDirResponse { + bool success = 1; + repeated string meta_cache_entry = 2; + optional string error_info = 3; +} + +message RenameDataRequest { + string disk = 1; // indicate which one in the disks + string src_volume = 2; + string src_path = 3; + string file_info = 4; + string dst_volume = 5; + string dst_path = 6; +} + +message RenameDataResponse { + bool success = 1; + string rename_data_resp = 2; + optional string error_info = 3; +} + +message MakeVolumesRequest { + string disk = 1; // indicate which one in the disks + repeated string volumes = 2; +} + +message MakeVolumesResponse { + bool success = 1; + optional string error_info = 2; +} + +message MakeVolumeRequest { + string disk = 1; // indicate which one in the disks + string volume = 2; +} + +message MakeVolumeResponse { + bool success = 1; + optional string error_info = 2; +} + +message ListVolumesRequest { + string disk = 1; // indicate which one in the disks +} + +message ListVolumesResponse { + bool success = 1; + repeated string volume_infos = 2; + optional string error_info = 3; +} + +message StatVolumeRequest { + string disk = 1; // indicate which one in the disks + string volume = 2; +} + +message StatVolumeResponse { + bool success = 1; + string volume_info = 2; + optional string error_info = 3; +} + +message WriteMetadataRequest { + string disk = 1; // indicate which one in the disks + string volume = 2; + string path = 3; + string file_info = 4; +} + +message WriteMetadataResponse { + bool success = 1; + optional string error_info = 2; +} + +message ReadVersionRequest { + string disk = 1; + string volume = 2; + string path = 3; + string version_id = 4; + string opts = 5; +} + +message ReadVersionResponse { + bool success = 1; + string file_info = 2; + optional string error_info = 3; +} + +message ReadXLRequest { + string disk = 1; + string volume = 2; + string path = 3; + bool read_data = 4; +} + +message ReadXLResponse { + bool success = 1; + string raw_file_info = 2; + optional string error_info = 3; +} + +message DeleteVersionsRequest { + string disk = 1; + string volume = 2; + repeated string versions = 3; + string opts = 4; +} + +message DeleteVersionsResponse { + bool success = 1; + repeated string errors = 2; + optional string error_info = 3; +} + +message ReadMultipleRequest { + string disk = 1; + string read_multiple_req = 2; +} + +message ReadMultipleResponse { + bool success = 1; + repeated string read_multiple_resps = 2; + optional string error_info = 3; +} + +message DeleteVolumeRequest { + string disk = 1; + string volume = 2; +} + +message DeleteVolumeResponse { + bool success = 1; + optional string error_info = 2; +} + +/* -------------------------------------------------------------------- */ + +service NodeService { +/* -------------------------------meta service-------------------------- */ + rpc Ping(PingRequest) returns (PingResponse) {}; + rpc ListBucket(ListBucketRequest) returns (ListBucketResponse) {}; + rpc MakeBucket(MakeBucketRequest) returns (MakeBucketResponse) {}; + rpc GetBucketInfo(GetBucketInfoRequest) returns (GetBucketInfoResponse) {}; + rpc DeleteBucket(DeleteBucketRequest) returns (DeleteBucketResponse) {}; + +/* -------------------------------disk service-------------------------- */ + + rpc ReadAll(ReadAllRequest) returns (ReadAllResponse) {}; + rpc WriteAll(WriteAllRequest) returns (WriteAllResponse) {}; + rpc Delete(DeleteRequest) returns (DeleteResponse) {}; + rpc RenameFile(RenameFileRequst) returns (RenameFileResponse) {}; + rpc Write(WriteRequest) returns (WriteResponse) {}; +// rpc Append(AppendRequest) returns (AppendResponse) {}; + rpc ReadAt(ReadAtRequest) returns (ReadAtResponse) {}; + rpc ListDir(ListDirRequest) returns (ListDirResponse) {}; + rpc WalkDir(WalkDirRequest) returns (WalkDirResponse) {}; + rpc RenameData(RenameDataRequest) returns (RenameDataResponse) {}; + rpc MakeVolumes(MakeVolumesRequest) returns (MakeVolumesResponse) {}; + rpc MakeVolume(MakeVolumeRequest) returns (MakeVolumeResponse) {}; + rpc ListVolumes(ListVolumesRequest) returns (ListVolumesResponse) {}; + rpc StatVolume(StatVolumeRequest) returns (StatVolumeResponse) {}; + rpc WriteMetadata(WriteMetadataRequest) returns (WriteMetadataResponse) {}; + rpc ReadVersion(ReadVersionRequest) returns (ReadVersionResponse) {}; + rpc ReadXL(ReadXLRequest) returns (ReadXLResponse) {}; + rpc DeleteVersions(DeleteVersionsRequest) returns (DeleteVersionsResponse) {}; + rpc ReadMultiple(ReadMultipleRequest) returns (ReadMultipleResponse) {}; + rpc DeleteVolume(DeleteVolumeRequest) returns (DeleteVolumeResponse) {}; +} diff --git a/e2e_test/Cargo.toml b/e2e_test/Cargo.toml new file mode 100644 index 00000000..90f99158 --- /dev/null +++ b/e2e_test/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "e2e_test" +version.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ecstore.workspace = true +flatbuffers.workspace = true +protos.workspace = true +serde_json.workspace = true +tonic = { version = "0.12.1", features = ["gzip"] } +tokio = { workspace = true } \ No newline at end of file diff --git a/e2e_test/README.md b/e2e_test/README.md new file mode 100644 index 00000000..e69de29b diff --git a/e2e_test/src/lib.rs b/e2e_test/src/lib.rs new file mode 100644 index 00000000..07abfb17 --- /dev/null +++ b/e2e_test/src/lib.rs @@ -0,0 +1 @@ +mod reliant; diff --git a/e2e_test/src/reliant/README.md b/e2e_test/src/reliant/README.md new file mode 100644 index 00000000..b1a0f384 --- /dev/null +++ b/e2e_test/src/reliant/README.md @@ -0,0 +1 @@ +The test cases in this dir need to run the cluster \ No newline at end of file diff --git a/e2e_test/src/reliant/mod.rs b/e2e_test/src/reliant/mod.rs new file mode 100644 index 00000000..67d6e260 --- /dev/null +++ b/e2e_test/src/reliant/mod.rs @@ -0,0 +1 @@ +mod node_interact_test; diff --git a/e2e_test/src/reliant/node_interact_test.rs b/e2e_test/src/reliant/node_interact_test.rs new file mode 100644 index 00000000..d0566c3f --- /dev/null +++ b/e2e_test/src/reliant/node_interact_test.rs @@ -0,0 +1,106 @@ +#![cfg(test)] + +use ecstore::disk::VolumeInfo; +use protos::{ + models::{PingBody, PingBodyBuilder}, + proto_gen::node_service::{ + node_service_client::NodeServiceClient, ListVolumesRequest, MakeVolumeRequest, PingRequest, PingResponse, ReadAllRequest, + }, +}; +use std::error::Error; +use tonic::Request; + +async fn get_client() -> Result, Box> { + // Ok(NodeServiceClient::connect("http://220.181.1.138:9000").await?) + Ok(NodeServiceClient::connect("http://localhost:9000").await?) +} + +#[tokio::test] +async fn ping() -> Result<(), Box> { + let mut fbb = flatbuffers::FlatBufferBuilder::new(); + let payload = fbb.create_vector(b"hello world"); + + let mut builder = PingBodyBuilder::new(&mut fbb); + builder.add_payload(payload); + let root = builder.finish(); + fbb.finish(root, None); + + let finished_data = fbb.finished_data(); + + let decoded_payload = flatbuffers::root::(finished_data); + assert!(decoded_payload.is_ok()); + + // 创建客户端 + let mut client = get_client().await?; + + // 构造 PingRequest + let request = Request::new(PingRequest { + version: 1, + body: finished_data.to_vec(), + }); + + // 发送请求并获取响应 + let response: PingResponse = client.ping(request).await?.into_inner(); + + // 打印响应 + let ping_response_body = flatbuffers::root::(&response.body); + if let Err(e) = ping_response_body { + eprintln!("{}", e); + } else { + println!("ping_resp:body(flatbuffer): {:?}", ping_response_body); + } + + Ok(()) +} + +#[tokio::test] +async fn make_volume() -> Result<(), Box> { + let mut client = get_client().await?; + let request = Request::new(MakeVolumeRequest { + disk: "data".to_string(), + volume: "dandan".to_string(), + }); + + let response = client.make_volume(request).await?.into_inner(); + if response.success { + println!("success"); + } else { + println!("failed: {:?}", response.error_info); + } + Ok(()) +} + +#[tokio::test] +async fn list_volumes() -> Result<(), Box> { + let mut client = get_client().await?; + let request = Request::new(ListVolumesRequest { + disk: "data".to_string(), + }); + + let response = client.list_volumes(request).await?.into_inner(); + let volume_infos: Vec = response + .volume_infos + .into_iter() + .filter_map(|json_str| serde_json::from_str::(&json_str).ok()) + .collect(); + + println!("{:?}", volume_infos); + Ok(()) +} + +#[tokio::test] +async fn read_all() -> Result<(), Box> { + let mut client = get_client().await?; + let request = Request::new(ReadAllRequest { + disk: "data".to_string(), + volume: "ff".to_string(), + path: "format.json".to_string(), + }); + + let response = client.read_all(request).await?.into_inner(); + let volume_infos = response.data; + + println!("{}", response.success); + println!("{:?}", volume_infos); + Ok(()) +} diff --git a/ecstore/Cargo.toml b/ecstore/Cargo.toml index ac8e1948..ada0fc7a 100644 --- a/ecstore/Cargo.toml +++ b/ecstore/Cargo.toml @@ -9,7 +9,7 @@ rust-version.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -tokio = { workspace = true, features = ["io-util"] } +backon.workspace = true bytes.workspace = true thiserror.workspace = true futures.workspace = true @@ -28,6 +28,7 @@ lazy_static = "1.5.0" regex = "1.10.5" netif = "0.1.6" path-absolutize = "3.1.1" +protos.workspace = true rmp-serde = "1.3.0" tokio-util = { version = "0.7.11", features = ["io"] } s3s = "0.10.0" @@ -37,7 +38,10 @@ base64-simd = "0.8.0" sha2 = "0.10.8" hex-simd = "0.8.0" path-clean = "1.0.1" +tokio = { workspace = true, features = ["io-util"] } tokio-stream = "0.1.15" +tonic.workspace = true +tower.workspace = true rmp = "0.8.14" byteorder = "1.5.0" xxhash-rust = { version = "0.8.12", features = ["xxh64"] } diff --git a/ecstore/src/disk/local.rs b/ecstore/src/disk/local.rs index 80c53948..3f23819f 100644 --- a/ecstore/src/disk/local.rs +++ b/ecstore/src/disk/local.rs @@ -3,7 +3,7 @@ use super::{ DeleteOptions, DiskAPI, FileInfoVersions, FileReader, FileWriter, MetaCacheEntry, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, VolumeInfo, WalkDirOptions, }; -use crate::disk::STORAGE_FORMAT_FILE; +use crate::disk::{LocalFileReader, LocalFileWriter, STORAGE_FORMAT_FILE}; use crate::{ error::{Error, Result}, file_meta::FileMeta, @@ -19,18 +19,29 @@ use std::{ use time::OffsetDateTime; use tokio::fs::{self, File}; use tokio::io::ErrorKind; -use tracing::{debug, error, warn}; +use tokio::sync::Mutex; +use tracing::{debug, warn}; use uuid::Uuid; +#[derive(Debug)] +pub struct FormatInfo { + pub id: Option, + pub _data: Vec, + pub _file_info: Option, + pub _last_check: Option, +} + +impl FormatInfo {} + #[derive(Debug)] pub struct LocalDisk { pub root: PathBuf, - pub id: Uuid, - pub _format_data: Vec, - pub _format_meta: Option, pub _format_path: PathBuf, - // pub format_legacy: bool, // drop - pub _format_last_check: Option, + pub format_info: Mutex, + // pub id: Mutex>, + // pub format_data: Mutex>, + // pub format_file_info: Mutex>, + // pub format_last_check: Mutex>, } impl LocalDisk { @@ -48,7 +59,7 @@ impl LocalDisk { let (format_data, format_meta) = read_file_exists(&format_path).await?; - let mut id = Uuid::nil(); + let mut id = None; // let mut format_legacy = false; let mut format_last_check = None; @@ -61,19 +72,26 @@ impl LocalDisk { return Err(Error::from(DiskError::InconsistentDisk)); } - id = fm.erasure.this; + id = Some(fm.erasure.this); // format_legacy = fm.erasure.distribution_algo == DistributionAlgoVersion::V1; format_last_check = Some(OffsetDateTime::now_utc()); } + let format_info = FormatInfo { + id, + _data: format_data, + _file_info: format_meta, + _last_check: format_last_check, + }; + let disk = Self { root, - id, - _format_meta: format_meta, - _format_data: format_data, _format_path: format_path, - // format_legacy, - _format_last_check: format_last_check, + format_info: Mutex::new(format_info), + // // format_legacy, + // format_file_info: Mutex::new(format_meta), + // format_data: Mutex::new(format_data), + // format_last_check: Mutex::new(format_last_check), }; disk.make_meta_volumes().await?; @@ -204,7 +222,7 @@ impl LocalDisk { } else { if delete_path.is_dir() { if let Err(err) = fs::remove_dir(&delete_path).await { - error!("remove_dir err {:?} when {:?}", &err, &delete_path); + debug!("remove_dir err {:?} when {:?}", &err, &delete_path); match err.kind() { ErrorKind::NotFound => (), // ErrorKind::DirectoryNotEmpty => (), @@ -218,7 +236,7 @@ impl LocalDisk { } } else { if let Err(err) = fs::remove_file(&delete_path).await { - error!("remove_file err {:?} when {:?}", &err, &delete_path); + debug!("remove_file err {:?} when {:?}", &err, &delete_path); match err.kind() { ErrorKind::NotFound => (), _ => { @@ -413,9 +431,24 @@ impl DiskAPI for LocalDisk { fn is_local(&self) -> bool { true } + async fn close(&self) -> Result<()> { + Ok(()) + } + fn path(&self) -> PathBuf { + self.root.clone() + } - fn id(&self) -> Uuid { - self.id + async fn get_disk_id(&self) -> Option { + // TODO: check format file + let format_info = self.format_info.lock().await; + + format_info.id.clone() + // TODO: 判断源文件id,是否有效 + } + + async fn set_disk_id(&self, _id: Option) -> Result<()> { + // 本地不需要设置 + Ok(()) } #[must_use] @@ -517,7 +550,8 @@ impl DiskAPI for LocalDisk { let file = File::create(&fpath).await?; - Ok(FileWriter::new(file)) + Ok(FileWriter::Local(LocalFileWriter::new(file))) + // Ok(FileWriter::new(file)) // let mut writer = BufWriter::new(file); @@ -543,7 +577,8 @@ impl DiskAPI for LocalDisk { .open(&p) .await?; - Ok(FileWriter::new(file)) + Ok(FileWriter::Local(LocalFileWriter::new(file))) + // Ok(FileWriter::new(file)) // let mut writer = BufWriter::new(file); @@ -560,7 +595,7 @@ impl DiskAPI for LocalDisk { debug!("read_file {:?}", &p); let file = File::options().read(true).open(&p).await?; - Ok(FileReader::new(file)) + Ok(FileReader::Local(LocalFileReader::new(file))) // file.seek(SeekFrom::Start(offset as u64)).await?; @@ -756,9 +791,7 @@ impl DiskAPI for LocalDisk { .await?; } - Ok(RenameDataResp { - old_data_dir: old_data_dir, - }) + Ok(RenameDataResp { old_data_dir }) } async fn make_volumes(&self, volumes: Vec<&str>) -> Result<()> { diff --git a/ecstore/src/disk/mod.rs b/ecstore/src/disk/mod.rs index 71701467..d2f63c4f 100644 --- a/ecstore/src/disk/mod.rs +++ b/ecstore/src/disk/mod.rs @@ -2,6 +2,7 @@ pub mod endpoint; pub mod error; pub mod format; mod local; +mod remote; pub const RUSTFS_META_BUCKET: &str = ".rustfs.sys"; pub const RUSTFS_META_MULTIPART_BUCKET: &str = ".rustfs.sys/multipart"; @@ -12,18 +13,21 @@ pub const FORMAT_CONFIG_FILE: &str = "format.json"; const STORAGE_FORMAT_FILE: &str = "xl.meta"; use crate::{ - erasure::ReadAt, + erasure::{ReadAt, Write}, error::{Error, Result}, file_meta::FileMeta, store_api::{FileInfo, RawFileInfo}, }; use bytes::Bytes; -use std::{fmt::Debug, io::SeekFrom, pin::Pin, sync::Arc}; +use protos::proto_gen::node_service::{node_service_client::NodeServiceClient, ReadAtRequest, WriteRequest}; +use serde::{Deserialize, Serialize}; +use std::{fmt::Debug, io::SeekFrom, path::PathBuf, sync::Arc}; use time::OffsetDateTime; use tokio::{ fs::File, - io::{AsyncReadExt, AsyncSeekExt, AsyncWrite}, + io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt}, }; +use tonic::{transport::Channel, Request}; use uuid::Uuid; pub type DiskStore = Arc>; @@ -33,15 +37,18 @@ pub async fn new_disk(ep: &endpoint::Endpoint, opt: &DiskOption) -> Result bool; - fn id(&self) -> Uuid; + fn path(&self) -> PathBuf; + async fn close(&self) -> Result<()>; + async fn get_disk_id(&self) -> Option; + async fn set_disk_id(&self, id: Option) -> Result<()>; async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()>; async fn read_all(&self, volume: &str, path: &str) -> Result; @@ -88,7 +95,7 @@ pub trait DiskAPI: Debug + Send + Sync + 'static { async fn read_multiple(&self, req: ReadMultipleReq) -> Result>; } -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct FileInfoVersions { // Name of the volume. pub volume: String, @@ -104,7 +111,7 @@ pub struct FileInfoVersions { pub free_versions: Vec, } -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct WalkDirOptions { // Bucket to scanner pub bucket: String, @@ -131,7 +138,7 @@ pub struct WalkDirOptions { pub disk_id: String, } -#[derive(Debug, Default)] +#[derive(Debug, Default, Serialize, Deserialize)] pub struct MetaCacheEntry { // name is the full name of the object including prefixes pub name: String, @@ -206,17 +213,18 @@ pub struct DiskOption { pub health_check: bool, } +#[derive(Serialize, Deserialize)] pub struct RenameDataResp { pub old_data_dir: Option, } -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct DeleteOptions { pub recursive: bool, pub immediate: bool, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct ReadMultipleReq { pub bucket: String, pub prefix: String, @@ -227,7 +235,7 @@ pub struct ReadMultipleReq { pub max_results: usize, } -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct ReadMultipleResp { pub bucket: String, pub prefix: String, @@ -252,65 +260,154 @@ pub struct ReadMultipleResp { // } // } +#[derive(Debug, Deserialize, Serialize)] pub struct VolumeInfo { pub name: String, pub created: Option, } +#[derive(Deserialize, Serialize)] pub struct ReadOptions { pub read_data: bool, pub healing: bool, } -pub struct FileWriter { - pub inner: Pin>, +// pub struct FileWriter { +// pub inner: Pin>, +// } + +// impl AsyncWrite for FileWriter { +// fn poll_write( +// mut self: Pin<&mut Self>, +// cx: &mut std::task::Context<'_>, +// buf: &[u8], +// ) -> std::task::Poll> { +// Pin::new(&mut self.inner).poll_write(cx, buf) +// } + +// fn poll_flush( +// mut self: Pin<&mut Self>, +// cx: &mut std::task::Context<'_>, +// ) -> std::task::Poll> { +// Pin::new(&mut self.inner).poll_flush(cx) +// } + +// fn poll_shutdown( +// mut self: Pin<&mut Self>, +// cx: &mut std::task::Context<'_>, +// ) -> std::task::Poll> { +// Pin::new(&mut self.inner).poll_shutdown(cx) +// } +// } + +// impl FileWriter { +// pub fn new(inner: W) -> Self +// where +// W: AsyncWrite + Send + Sync + 'static, +// { +// Self { inner: Box::pin(inner) } +// } +// } + +pub enum FileWriter { + Local(LocalFileWriter), + Remote(RemoteFileWriter), } -impl AsyncWrite for FileWriter { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &[u8], - ) -> std::task::Poll> { - Pin::new(&mut self.inner).poll_write(cx, buf) - } - - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - Pin::new(&mut self.inner).poll_flush(cx) - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - Pin::new(&mut self.inner).poll_shutdown(cx) +#[async_trait::async_trait] +impl Write for FileWriter { + async fn write(&mut self, buf: &[u8]) -> Result<()> { + match self { + Self::Local(local_file_writer) => local_file_writer.write(buf).await, + Self::Remote(remote_file_writer) => remote_file_writer.write(buf).await, + } } } -impl FileWriter { - pub fn new(inner: W) -> Self - where - W: AsyncWrite + Send + Sync + 'static, - { - Self { inner: Box::pin(inner) } - } -} - -#[derive(Debug)] -pub struct FileReader { +pub struct LocalFileWriter { pub inner: File, } -impl FileReader { +impl LocalFileWriter { pub fn new(inner: File) -> Self { Self { inner } } } +#[async_trait::async_trait] +impl Write for LocalFileWriter { + async fn write(&mut self, buf: &[u8]) -> Result<()> { + self.inner.write(buf).await?; + self.inner.flush().await?; + + Ok(()) + } +} + +pub struct RemoteFileWriter { + pub root: PathBuf, + pub volume: String, + pub path: String, + pub is_append: bool, + client: NodeServiceClient, +} + +impl RemoteFileWriter { + pub fn new(root: PathBuf, volume: String, path: String, is_append: bool, client: NodeServiceClient) -> Self { + Self { + root, + volume, + path, + is_append, + client, + } + } +} + +#[async_trait::async_trait] +impl Write for RemoteFileWriter { + async fn write(&mut self, buf: &[u8]) -> Result<()> { + let request = Request::new(WriteRequest { + disk: self.root.to_string_lossy().to_string(), + volume: self.volume.to_string(), + path: self.path.to_string(), + is_append: self.is_append, + data: buf.to_vec(), + }); + let _response = self.client.write(request).await?.into_inner(); + Ok(()) + } +} + +#[derive(Debug)] +pub enum FileReader { + Local(LocalFileReader), + Remote(RemoteFileReader), +} + +#[async_trait::async_trait] impl ReadAt for FileReader { + async fn read_at(&mut self, offset: usize, length: usize) -> Result<(Vec, usize)> { + match self { + Self::Local(local_file_writer) => local_file_writer.read_at(offset, length).await, + Self::Remote(remote_file_writer) => remote_file_writer.read_at(offset, length).await, + } + } +} + +#[derive(Debug)] +pub struct LocalFileReader { + pub inner: File, +} + +impl LocalFileReader { + pub fn new(inner: File) -> Self { + Self { inner } + } +} + +#[async_trait::async_trait] +impl ReadAt for LocalFileReader { async fn read_at(&mut self, offset: usize, length: usize) -> Result<(Vec, usize)> { self.inner.seek(SeekFrom::Start(offset as u64)).await?; @@ -323,3 +420,38 @@ impl ReadAt for FileReader { Ok((buffer, bytes_read)) } } + +#[derive(Debug)] +pub struct RemoteFileReader { + pub root: PathBuf, + pub volume: String, + pub path: String, + client: NodeServiceClient, +} + +impl RemoteFileReader { + pub fn new(root: PathBuf, volume: String, path: String, client: NodeServiceClient) -> Self { + Self { + root, + volume, + path, + client, + } + } +} + +#[async_trait::async_trait] +impl ReadAt for RemoteFileReader { + async fn read_at(&mut self, offset: usize, length: usize) -> Result<(Vec, usize)> { + let request = Request::new(ReadAtRequest { + disk: self.root.to_string_lossy().to_string(), + volume: self.volume.to_string(), + path: self.path.to_string(), + offset: offset.try_into().unwrap(), + length: length.try_into().unwrap(), + }); + let response = self.client.read_at(request).await?.into_inner(); + + Ok((response.data, response.read_size.try_into().unwrap())) + } +} diff --git a/ecstore/src/disk/remote.rs b/ecstore/src/disk/remote.rs new file mode 100644 index 00000000..b0d5a8a0 --- /dev/null +++ b/ecstore/src/disk/remote.rs @@ -0,0 +1,529 @@ +use std::{path::PathBuf, sync::Arc, time::Duration}; + +use bytes::Bytes; +use futures::lock::Mutex; +use protos::{ + node_service_time_out_client, + proto_gen::node_service::{ + node_service_client::NodeServiceClient, DeleteRequest, DeleteVersionsRequest, DeleteVolumeRequest, ListDirRequest, + ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest, ReadMultipleRequest, ReadVersionRequest, + ReadXlRequest, RenameDataRequest, RenameFileRequst, StatVolumeRequest, WalkDirRequest, WriteAllRequest, + WriteMetadataRequest, + }, + DEFAULT_GRPC_SERVER_MESSAGE_LEN, +}; +use tokio::{fs, sync::RwLock}; +use tonic::{ + transport::{Channel, Endpoint as tonic_Endpoint}, + Request, +}; +use tower::timeout::Timeout; +use tracing::info; +use uuid::Uuid; + +use crate::{ + disk::error::DiskError, + error::{Error, Result}, + store_api::{FileInfo, RawFileInfo}, +}; + +use super::{ + endpoint::Endpoint, DeleteOptions, DiskAPI, DiskOption, FileInfoVersions, FileReader, FileWriter, MetaCacheEntry, + ReadMultipleReq, ReadMultipleResp, ReadOptions, RemoteFileReader, RemoteFileWriter, RenameDataResp, VolumeInfo, + WalkDirOptions, +}; + +#[derive(Debug)] +pub struct RemoteDisk { + id: Mutex>, + channel: Arc>>, + url: url::Url, + pub root: PathBuf, +} + +impl RemoteDisk { + pub async fn new(ep: &Endpoint, _opt: &DiskOption) -> Result { + let root = fs::canonicalize(ep.url.path()).await?; + + Ok(Self { + channel: Arc::new(RwLock::new(None)), + url: ep.url.clone(), + root, + id: Mutex::new(None), + }) + } + + #[allow(dead_code)] + async fn get_client(&self) -> Result>> { + let channel_clone = self.channel.clone(); + let channel = { + let read_lock = channel_clone.read().await; + + if let Some(ref channel) = *read_lock { + channel.clone() + } else { + let addr = format!("{}://{}:{}", self.url.scheme(), self.url.host_str().unwrap(), self.url.port().unwrap()); + info!("disk url: {}", addr); + let connector = tonic_Endpoint::from_shared(addr.clone())?; + + let new_channel = connector.connect().await.map_err(|_err| DiskError::DiskNotFound)?; + + info!("get channel success"); + + *self.channel.write().await = Some(new_channel.clone()); + + new_channel + } + }; + + Ok(node_service_time_out_client( + channel, + Duration::new(30, 0), // TODO: use config setting + DEFAULT_GRPC_SERVER_MESSAGE_LEN, + // grpc_enable_gzip, + false, // TODO: use config setting + )) + } + + async fn get_client_v2(&self) -> Result> { + // Ok(NodeServiceClient::connect("http://220.181.1.138:9000").await?) + let addr = format!("{}://{}:{}", self.url.scheme(), self.url.host_str().unwrap(), self.url.port().unwrap()); + Ok(NodeServiceClient::connect(addr).await?) + } +} + +// TODO: all api need to handle errors +#[async_trait::async_trait] +impl DiskAPI for RemoteDisk { + fn is_local(&self) -> bool { + false + } + async fn close(&self) -> Result<()> { + Ok(()) + } + fn path(&self) -> PathBuf { + self.root.clone() + } + + async fn get_disk_id(&self) -> Option { + self.id.lock().await.clone() + } + async fn set_disk_id(&self, id: Option) -> Result<()> { + let mut lock = self.id.lock().await; + *lock = id; + + Ok(()) + } + + async fn read_all(&self, volume: &str, path: &str) -> Result { + info!("read_all"); + let mut client = self.get_client_v2().await?; + let request = Request::new(ReadAllRequest { + disk: self.root.to_string_lossy().to_string(), + volume: volume.to_string(), + path: path.to_string(), + }); + + let response = client.read_all(request).await?.into_inner(); + + info!("read_all success"); + + if !response.success { + return Err(DiskError::FileNotFound.into()); + } + + Ok(Bytes::from(response.data)) + } + + async fn write_all(&self, volume: &str, path: &str, data: Vec) -> Result<()> { + info!("write_all"); + let mut client = self.get_client_v2().await?; + let request = Request::new(WriteAllRequest { + disk: self.root.to_string_lossy().to_string(), + volume: volume.to_string(), + path: path.to_string(), + data, + }); + + let response = client.write_all(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + Ok(()) + } + + async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()> { + info!("delete"); + let options = serde_json::to_string(&opt)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(DeleteRequest { + disk: self.root.to_string_lossy().to_string(), + volume: volume.to_string(), + path: path.to_string(), + options, + }); + + let response = client.delete(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + Ok(()) + } + + async fn rename_file(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str) -> Result<()> { + info!("rename_file"); + let mut client = self.get_client_v2().await?; + let request = Request::new(RenameFileRequst { + disk: self.root.to_string_lossy().to_string(), + src_volume: src_volume.to_string(), + src_path: src_path.to_string(), + dst_volume: dst_volume.to_string(), + dst_path: dst_path.to_string(), + }); + + let response = client.rename_file(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + Ok(()) + } + + async fn create_file(&self, _origvolume: &str, volume: &str, path: &str, _file_size: usize) -> Result { + info!("create_file"); + Ok(FileWriter::Remote(RemoteFileWriter::new( + self.root.clone(), + volume.to_string(), + path.to_string(), + false, + self.get_client_v2().await?, + ))) + } + + async fn append_file(&self, volume: &str, path: &str) -> Result { + info!("append_file"); + Ok(FileWriter::Remote(RemoteFileWriter::new( + self.root.clone(), + volume.to_string(), + path.to_string(), + true, + self.get_client_v2().await?, + ))) + } + + async fn read_file(&self, volume: &str, path: &str) -> Result { + info!("read_file"); + Ok(FileReader::Remote(RemoteFileReader::new( + self.root.clone(), + volume.to_string(), + path.to_string(), + self.get_client_v2().await?, + ))) + } + + async fn list_dir(&self, _origvolume: &str, volume: &str, _dir_path: &str, _count: i32) -> Result> { + info!("list_dir"); + let mut client = self.get_client_v2().await?; + let request = Request::new(ListDirRequest { + disk: self.root.to_string_lossy().to_string(), + volume: volume.to_string(), + }); + + let response = client.list_dir(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + Ok(response.volumes) + } + + async fn walk_dir(&self, opts: WalkDirOptions) -> Result> { + info!("walk_dir"); + let walk_dir_options = serde_json::to_string(&opts)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(WalkDirRequest { + disk: self.root.to_string_lossy().to_string(), + walk_dir_options, + }); + + let response = client.walk_dir(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + let entries = response + .meta_cache_entry + .into_iter() + .filter_map(|json_str| serde_json::from_str::(&json_str).ok()) + .collect(); + + Ok(entries) + } + + async fn rename_data( + &self, + src_volume: &str, + src_path: &str, + fi: FileInfo, + dst_volume: &str, + dst_path: &str, + ) -> Result { + info!("rename_data"); + let file_info = serde_json::to_string(&fi)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(RenameDataRequest { + disk: self.root.to_string_lossy().to_string(), + src_volume: src_volume.to_string(), + src_path: src_path.to_string(), + file_info, + dst_volume: dst_volume.to_string(), + dst_path: dst_path.to_string(), + }); + + let response = client.rename_data(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + let rename_data_resp = serde_json::from_str::(&response.rename_data_resp)?; + + Ok(rename_data_resp) + } + + async fn make_volumes(&self, volumes: Vec<&str>) -> Result<()> { + info!("make_volumes"); + let mut client = self.get_client_v2().await?; + let request = Request::new(MakeVolumesRequest { + disk: self.root.to_string_lossy().to_string(), + volumes: volumes.iter().map(|s| (*s).to_string()).collect(), + }); + + let response = client.make_volumes(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + Ok(()) + } + + async fn make_volume(&self, volume: &str) -> Result<()> { + info!("make_volume"); + let mut client = self.get_client_v2().await?; + let request = Request::new(MakeVolumeRequest { + disk: self.root.to_string_lossy().to_string(), + volume: volume.to_string(), + }); + + let response = client.make_volume(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + Ok(()) + } + + async fn list_volumes(&self) -> Result> { + info!("list_volumes"); + let mut client = self.get_client_v2().await?; + let request = Request::new(ListVolumesRequest { + disk: self.root.to_string_lossy().to_string(), + }); + + let response = client.list_volumes(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + let infos = response + .volume_infos + .into_iter() + .filter_map(|json_str| serde_json::from_str::(&json_str).ok()) + .collect(); + + Ok(infos) + } + + async fn stat_volume(&self, volume: &str) -> Result { + info!("stat_volume"); + let mut client = self.get_client_v2().await?; + let request = Request::new(StatVolumeRequest { + disk: self.root.to_string_lossy().to_string(), + volume: volume.to_string(), + }); + + let response = client.stat_volume(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + let volume_info = serde_json::from_str::(&response.volume_info)?; + + Ok(volume_info) + } + + async fn write_metadata(&self, _org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()> { + info!("write_metadata"); + let file_info = serde_json::to_string(&fi)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(WriteMetadataRequest { + disk: self.root.to_string_lossy().to_string(), + volume: volume.to_string(), + path: path.to_string(), + file_info, + }); + + let response = client.write_metadata(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + Ok(()) + } + + async fn read_version( + &self, + _org_volume: &str, + volume: &str, + path: &str, + version_id: &str, + opts: &ReadOptions, + ) -> Result { + info!("read_version"); + let opts = serde_json::to_string(opts)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(ReadVersionRequest { + disk: self.root.to_string_lossy().to_string(), + volume: volume.to_string(), + path: path.to_string(), + version_id: version_id.to_string(), + opts, + }); + + let response = client.read_version(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + let file_info = serde_json::from_str::(&response.file_info)?; + + Ok(file_info) + } + + async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result { + info!("read_xl"); + let mut client = self.get_client_v2().await?; + let request = Request::new(ReadXlRequest { + disk: self.root.to_string_lossy().to_string(), + volume: volume.to_string(), + path: path.to_string(), + read_data, + }); + + let response = client.read_xl(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + let raw_file_info = serde_json::from_str::(&response.raw_file_info)?; + + Ok(raw_file_info) + } + + async fn delete_versions( + &self, + volume: &str, + versions: Vec, + opts: DeleteOptions, + ) -> Result>> { + info!("delete_versions"); + let opts = serde_json::to_string(&opts)?; + let mut versions_str = Vec::with_capacity(versions.len()); + for file_info_versions in versions.iter() { + versions_str.push(serde_json::to_string(file_info_versions)?); + } + let mut client = self.get_client_v2().await?; + let request = Request::new(DeleteVersionsRequest { + disk: self.root.to_string_lossy().to_string(), + volume: volume.to_string(), + versions: versions_str, + opts, + }); + + let response = client.delete_versions(request).await?.into_inner(); + if !response.success { + return Err(Error::from_string(format!( + "delete versions remote err: {}", + response.error_info.unwrap_or("None".to_string()) + ))); + } + let errors = response + .errors + .iter() + .map(|error| { + if error.is_empty() { + None + } else { + Some(Error::from_string(error)) + } + }) + .collect(); + + Ok(errors) + } + + async fn read_multiple(&self, req: ReadMultipleReq) -> Result> { + info!("read_multiple"); + let read_multiple_req = serde_json::to_string(&req)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(ReadMultipleRequest { + disk: self.root.to_string_lossy().to_string(), + read_multiple_req, + }); + + let response = client.read_multiple(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + let read_multiple_resps = response + .read_multiple_resps + .into_iter() + .filter_map(|json_str| serde_json::from_str::(&json_str).ok()) + .collect(); + + Ok(read_multiple_resps) + } + + async fn delete_volume(&self, volume: &str) -> Result<()> { + info!("delete_volume"); + let mut client = self.get_client_v2().await?; + let request = Request::new(DeleteVolumeRequest { + disk: self.root.to_string_lossy().to_string(), + volume: volume.to_string(), + }); + + let response = client.delete_volume(request).await?.into_inner(); + + if !response.success { + return Err(Error::from_string(response.error_info.unwrap_or("".to_string()))); + } + + Ok(()) + } +} diff --git a/ecstore/src/endpoints.rs b/ecstore/src/endpoints.rs index 6f781704..9af5b963 100644 --- a/ecstore/src/endpoints.rs +++ b/ecstore/src/endpoints.rs @@ -408,6 +408,11 @@ impl AsMut> for EndpointServerPools { } impl EndpointServerPools { + pub fn from_volumes(server_addr: &str, endpoints: Vec) -> Result<(EndpointServerPools, SetupType)> { + let layouts = DisksLayout::try_from(endpoints.as_slice())?; + + Self::create_server_endpoints(server_addr, &layouts) + } /// validates and creates new endpoints from input args, supports /// both ellipses and without ellipses transparently. pub fn create_server_endpoints(server_addr: &str, disks_layout: &DisksLayout) -> Result<(EndpointServerPools, SetupType)> { diff --git a/ecstore/src/erasure.rs b/ecstore/src/erasure.rs index 6c23842a..13cb5af1 100644 --- a/ecstore/src/erasure.rs +++ b/ecstore/src/erasure.rs @@ -3,7 +3,7 @@ use bytes::Bytes; use futures::future::join_all; use futures::{Stream, StreamExt}; use reed_solomon_erasure::galois_8::ReedSolomon; -use tokio::io::AsyncWrite; +use std::fmt::Debug; use tokio::io::AsyncWriteExt; use tokio::io::DuplexStream; use tracing::debug; @@ -13,7 +13,7 @@ use uuid::Uuid; use crate::chunk_stream::ChunkedStream; use crate::disk::error::DiskError; -use crate::disk::FileReader; +use crate::disk::{FileReader, FileWriter}; pub struct Erasure { data_shards: usize, @@ -43,17 +43,16 @@ impl Erasure { } } - pub async fn encode( + pub async fn encode( &self, body: S, - writers: &mut [W], + writers: &mut [FileWriter], // block_size: usize, total_size: usize, _write_quorum: usize, ) -> Result where S: Stream> + Send + Sync + 'static, - W: AsyncWrite + Unpin, { let mut stream = ChunkedStream::new(body, total_size, self.block_size, false); let mut total: usize = 0; @@ -85,7 +84,7 @@ impl Erasure { let mut errs = Vec::new(); for (i, w) in writers.iter_mut().enumerate() { - match w.write_all(blocks[i].as_ref()).await { + match w.write(blocks[i].as_ref()).await { Ok(_) => errs.push(None), Err(e) => errs.push(Some(e)), } @@ -318,6 +317,12 @@ impl Erasure { } } +#[async_trait::async_trait] +pub trait Write { + async fn write(&mut self, buf: &[u8]) -> Result<()>; +} + +#[async_trait::async_trait] pub trait ReadAt { async fn read_at(&mut self, offset: usize, length: usize) -> Result<(Vec, usize)>; } diff --git a/ecstore/src/lib.rs b/ecstore/src/lib.rs index 1371d1f1..85e1c901 100644 --- a/ecstore/src/lib.rs +++ b/ecstore/src/lib.rs @@ -1,12 +1,12 @@ mod bucket_meta; mod chunk_stream; pub mod disk; -mod disks_layout; -mod endpoints; -mod erasure; +pub mod disks_layout; +pub mod endpoints; +pub mod erasure; pub mod error; mod file_meta; -mod peer; +pub mod peer; pub mod set_disk; mod sets; pub mod store; diff --git a/ecstore/src/peer.rs b/ecstore/src/peer.rs index 878eb828..79dc6aec 100644 --- a/ecstore/src/peer.rs +++ b/ecstore/src/peer.rs @@ -1,11 +1,19 @@ use async_trait::async_trait; use futures::future::join_all; +use protos::proto_gen::node_service::node_service_client::NodeServiceClient; +use protos::proto_gen::node_service::{DeleteBucketRequest, GetBucketInfoRequest, ListBucketRequest, MakeBucketRequest}; +use protos::{node_service_time_out_client, DEFAULT_GRPC_SERVER_MESSAGE_LEN}; use regex::Regex; -use std::{collections::HashMap, fmt::Debug, sync::Arc}; -use tracing::warn; +use std::{collections::HashMap, fmt::Debug, sync::Arc, time::Duration}; +use tokio::sync::RwLock; +use tonic::transport::{Channel, Endpoint}; +use tonic::Request; +use tower::timeout::Timeout; +use tracing::{info, warn}; +use crate::store::all_local_disk; use crate::{ - disk::{self, error::DiskError, DiskStore, VolumeInfo}, + disk::{self, error::DiskError, VolumeInfo}, endpoints::{EndpointServerPools, Node}, error::{Error, Result}, store_api::{BucketInfo, BucketOptions, DeleteBucketOptions, MakeBucketOptions}, @@ -19,7 +27,7 @@ pub trait PeerS3Client: Debug + Sync + Send + 'static { async fn list_bucket(&self, opts: &BucketOptions) -> Result>; async fn delete_bucket(&self, bucket: &str, opts: &DeleteBucketOptions) -> Result<()>; async fn get_bucket_info(&self, bucket: &str, opts: &BucketOptions) -> Result; - fn get_pools(&self) -> Vec; + fn get_pools(&self) -> Option>; } #[derive(Debug)] @@ -29,24 +37,23 @@ pub struct S3PeerSys { } impl S3PeerSys { - pub fn new(eps: &EndpointServerPools, local_disks: Vec) -> Self { + pub fn new(eps: &EndpointServerPools) -> Self { Self { - clients: Self::new_clients(eps, local_disks), + clients: Self::new_clients(eps), pools_count: eps.as_ref().len(), } } - fn new_clients(eps: &EndpointServerPools, local_disks: Vec) -> Vec { + fn new_clients(eps: &EndpointServerPools) -> Vec { let nodes = eps.get_nodes(); let v: Vec = nodes .iter() .map(|e| { if e.is_local { - let cli: Box = - Box::new(LocalPeerS3Client::new(local_disks.clone(), e.clone(), e.pools.clone())); + let cli: Box = Box::new(LocalPeerS3Client::new(Some(e.clone()), Some(e.pools.clone()))); Arc::new(cli) } else { - let cli: Box = Box::new(RemotePeerS3Client::new(e.clone(), e.pools.clone())); + let cli: Box = Box::new(RemotePeerS3Client::new(Some(e.clone()), Some(e.pools.clone()))); Arc::new(cli) } }) @@ -56,9 +63,8 @@ impl S3PeerSys { } } -#[async_trait] -impl PeerS3Client for S3PeerSys { - async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> { +impl S3PeerSys { + pub async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> { let mut futures = Vec::with_capacity(self.clients.len()); for cli in self.clients.iter() { futures.push(cli.make_bucket(bucket, opts)); @@ -83,7 +89,7 @@ impl PeerS3Client for S3PeerSys { for (j, cli) in self.clients.iter().enumerate() { let pools = cli.get_pools(); let idx = i; - if pools.contains(&idx) { + if pools.unwrap_or_default().contains(&idx) { per_pool_errs.push(errors[j].as_ref()); } @@ -95,7 +101,7 @@ impl PeerS3Client for S3PeerSys { Ok(()) } - async fn list_bucket(&self, opts: &BucketOptions) -> Result> { + pub async fn list_bucket(&self, opts: &BucketOptions) -> Result> { let mut futures = Vec::with_capacity(self.clients.len()); for cli in self.clients.iter() { futures.push(cli.list_bucket(opts)); @@ -165,7 +171,7 @@ impl PeerS3Client for S3PeerSys { Ok(()) } - async fn get_bucket_info(&self, bucket: &str, opts: &BucketOptions) -> Result { + pub async fn get_bucket_info(&self, bucket: &str, opts: &BucketOptions) -> Result { let mut futures = Vec::with_capacity(self.clients.len()); for cli in self.clients.iter() { futures.push(cli.get_bucket_info(bucket, opts)); @@ -193,7 +199,7 @@ impl PeerS3Client for S3PeerSys { for (j, cli) in self.clients.iter().enumerate() { let pools = cli.get_pools(); let idx = i; - if pools.contains(&idx) { + if pools.unwrap_or_default().contains(&idx) { per_pool_errs.push(errors[j].as_ref()); } @@ -206,22 +212,22 @@ impl PeerS3Client for S3PeerSys { .ok_or(Error::new(DiskError::VolumeNotFound)) } - fn get_pools(&self) -> Vec { + pub fn get_pools(&self) -> Option> { unimplemented!() } } #[derive(Debug)] pub struct LocalPeerS3Client { - pub local_disks: Vec, + // pub local_disks: Vec, // pub node: Node, - pub pools: Vec, + pub pools: Option>, } impl LocalPeerS3Client { - fn new(local_disks: Vec, _node: Node, pools: Vec) -> Self { + pub fn new(_node: Option, pools: Option>) -> Self { Self { - local_disks, + // local_disks, // node, pools, } @@ -230,12 +236,14 @@ impl LocalPeerS3Client { #[async_trait] impl PeerS3Client for LocalPeerS3Client { - fn get_pools(&self) -> Vec { + fn get_pools(&self) -> Option> { self.pools.clone() } async fn list_bucket(&self, _opts: &BucketOptions) -> Result> { - let mut futures = Vec::with_capacity(self.local_disks.len()); - for disk in self.local_disks.iter() { + let local_disks = all_local_disk().await; + + let mut futures = Vec::with_capacity(local_disks.len()); + for disk in local_disks.iter() { futures.push(disk.list_volumes()); } @@ -280,8 +288,9 @@ impl PeerS3Client for LocalPeerS3Client { Ok(buckets) } async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> { - let mut futures = Vec::with_capacity(self.local_disks.len()); - for disk in self.local_disks.iter() { + let local_disks = all_local_disk().await; + let mut futures = Vec::with_capacity(local_disks.len()); + for disk in local_disks.iter() { futures.push(async move { match disk.make_volume(bucket).await { Ok(_) => Ok(()), @@ -313,15 +322,16 @@ impl PeerS3Client for LocalPeerS3Client { } async fn get_bucket_info(&self, bucket: &str, _opts: &BucketOptions) -> Result { - let mut futures = Vec::with_capacity(self.local_disks.len()); - for disk in self.local_disks.iter() { + let local_disks = all_local_disk().await; + let mut futures = Vec::with_capacity(local_disks.len()); + for disk in local_disks.iter() { futures.push(disk.stat_volume(bucket)); } let results = join_all(futures).await; - let mut ress = Vec::with_capacity(self.local_disks.len()); - let mut errs = Vec::with_capacity(self.local_disks.len()); + let mut ress = Vec::with_capacity(local_disks.len()); + let mut errs = Vec::with_capacity(local_disks.len()); for res in results { match res { @@ -351,9 +361,10 @@ impl PeerS3Client for LocalPeerS3Client { } async fn delete_bucket(&self, bucket: &str, opts: &DeleteBucketOptions) -> Result<()> { - let mut futures = Vec::with_capacity(self.local_disks.len()); + let local_disks = all_local_disk().await; + let mut futures = Vec::with_capacity(local_disks.len()); - for disk in self.local_disks.iter() { + for disk in local_disks.iter() { futures.push(disk.delete_volume(bucket)); } @@ -398,34 +409,115 @@ impl PeerS3Client for LocalPeerS3Client { #[derive(Debug)] pub struct RemotePeerS3Client { - // pub node: Node, - // pub pools: Vec, + pub node: Option, + pub pools: Option>, + connector: Endpoint, + channel: Arc>>, } impl RemotePeerS3Client { - fn new(_node: Node, _pools: Vec) -> Self { - // Self { node, pools } - Self {} + fn new(node: Option, pools: Option>) -> Self { + let connector = + Endpoint::from_shared(format!("{}", node.as_ref().map(|v| { v.url.to_string() }).unwrap_or_default())).unwrap(); + Self { + node, + pools, + connector, + channel: Arc::new(RwLock::new(None)), + } + } + + #[allow(dead_code)] + async fn get_client(&self) -> Result>> { + let channel_clone = self.channel.clone(); + let channel = { + let read_lock = channel_clone.read().await; + + if let Some(ref channel) = *read_lock { + channel.clone() + } else { + let new_channel = self.connector.connect().await?; + + info!("get channel success"); + + *self.channel.write().await = Some(new_channel.clone()); + + new_channel + } + }; + + Ok(node_service_time_out_client( + channel, + Duration::new(30, 0), // TODO: use config setting + DEFAULT_GRPC_SERVER_MESSAGE_LEN, + // grpc_enable_gzip, + false, // TODO: use config setting + )) + } + + async fn get_client_v2(&self) -> Result> { + // Ok(NodeServiceClient::connect("http://220.181.1.138:9000").await?) + // let addr = format!("{}://{}:{}", self.url.scheme(), self.url.host_str().unwrap(), self.url.port().unwrap()); + let addr = format!("{}", self.node.as_ref().map(|v| { v.url.to_string() }).unwrap_or_default()); + Ok(NodeServiceClient::connect(addr).await?) } } #[async_trait] impl PeerS3Client for RemotePeerS3Client { - fn get_pools(&self) -> Vec { - unimplemented!() + fn get_pools(&self) -> Option> { + self.pools.clone() } - async fn list_bucket(&self, _opts: &BucketOptions) -> Result> { - unimplemented!() + async fn list_bucket(&self, opts: &BucketOptions) -> Result> { + let options = serde_json::to_string(opts)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(ListBucketRequest { options }); + let response = client.list_bucket(request).await?.into_inner(); + let bucket_infos = response + .bucket_infos + .into_iter() + .filter_map(|json_str| serde_json::from_str::(&json_str).ok()) + .collect(); + + Ok(bucket_infos) } - async fn make_bucket(&self, _bucket: &str, _opts: &MakeBucketOptions) -> Result<()> { - unimplemented!() + async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> { + let options = serde_json::to_string(opts)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(MakeBucketRequest { + name: bucket.to_string(), + options, + }); + let response = client.make_bucket(request).await?.into_inner(); + + // TODO: deal with error + if !response.success { + warn!("make bucket error: {:?}", response.error_info); + } + + Ok(()) } - async fn get_bucket_info(&self, _bucket: &str, _opts: &BucketOptions) -> Result { - unimplemented!() + async fn get_bucket_info(&self, bucket: &str, opts: &BucketOptions) -> Result { + let options = serde_json::to_string(opts)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(GetBucketInfoRequest { + bucket: bucket.to_string(), + options, + }); + let response = client.get_bucket_info(request).await?.into_inner(); + let bucket_info = serde_json::from_str::(&response.bucket_info)?; + + Ok(bucket_info) } - async fn delete_bucket(&self, _bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> { - unimplemented!() + async fn delete_bucket(&self, bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> { + let mut client = self.get_client_v2().await?; + let request = Request::new(DeleteBucketRequest { + bucket: bucket.to_string(), + }); + let _response = client.delete_bucket(request).await?.into_inner(); + + Ok(()) } } diff --git a/ecstore/src/sets.rs b/ecstore/src/sets.rs index 112ae61f..d7a93746 100644 --- a/ecstore/src/sets.rs +++ b/ecstore/src/sets.rs @@ -12,6 +12,7 @@ use crate::{ endpoints::PoolEndpoints, error::{Error, Result}, set_disk::SetDisks, + store::{GLOBAL_IsDistErasure, GLOBAL_LOCAL_DISK_SET_DRIVES}, store_api::{ BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec, ListObjectsV2Info, MakeBucketOptions, MultipartUploadResult, ObjectInfo, ObjectOptions, ObjectToDelete, PartInfo, @@ -36,7 +37,7 @@ pub struct Sets { } impl Sets { - pub fn new( + pub async fn new( disks: Vec>, endpoints: &PoolEndpoints, fm: &FormatV3, @@ -52,11 +53,32 @@ impl Sets { let mut set_drive = Vec::with_capacity(set_drive_count); for j in 0..set_drive_count { let idx = i * set_drive_count + j; - if disks[idx].is_none() { + let mut disk = disks[idx].clone(); + if disk.is_none() { set_drive.push(None); - } else { - let disk = disks[idx].clone(); + continue; + } + + if disk.as_ref().unwrap().is_local() && *GLOBAL_IsDistErasure.read().await { + let local_disk = { + let local_set_drives = GLOBAL_LOCAL_DISK_SET_DRIVES.read().await; + local_set_drives[pool_idx][i][j].clone() + }; + + if local_disk.is_none() { + set_drive.push(None); + continue; + } + + let _ = disk.as_ref().unwrap().close().await; + + disk = local_disk; + } + + if let Some(_disk_id) = disk.as_ref().unwrap().get_disk_id().await { set_drive.push(disk); + } else { + set_drive.push(None); } } diff --git a/ecstore/src/store.rs b/ecstore/src/store.rs index 3a147d44..bc4dca20 100644 --- a/ecstore/src/store.rs +++ b/ecstore/src/store.rs @@ -1,10 +1,11 @@ use crate::{ bucket_meta::BucketMetadata, - disk::{error::DiskError, DeleteOptions, DiskOption, DiskStore, WalkDirOptions, BUCKET_META_PREFIX, RUSTFS_META_BUCKET}, - disks_layout::DisksLayout, - endpoints::EndpointServerPools, + disk::{ + error::DiskError, new_disk, DeleteOptions, DiskOption, DiskStore, WalkDirOptions, BUCKET_META_PREFIX, RUSTFS_META_BUCKET, + }, + endpoints::{EndpointServerPools, SetupType}, error::{Error, Result}, - peer::{PeerS3Client, S3PeerSys}, + peer::S3PeerSys, sets::Sets, store_api::{ BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec, @@ -13,14 +14,137 @@ use crate::{ }, store_init, utils, }; +use backon::{ExponentialBuilder, Retryable}; use futures::future::join_all; use http::HeaderMap; use s3s::{dto::StreamingBlob, Body}; -use std::collections::{HashMap, HashSet}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::Duration, +}; use time::OffsetDateTime; -use tracing::{debug, warn}; +use tokio::{fs, sync::RwLock}; +use tracing::{debug, info, warn}; use uuid::Uuid; +use lazy_static::lazy_static; + +lazy_static! { + pub static ref GLOBAL_IsErasure: RwLock = RwLock::new(false); + pub static ref GLOBAL_IsDistErasure: RwLock = RwLock::new(false); + pub static ref GLOBAL_IsErasureSD: RwLock = RwLock::new(false); +} + +pub async fn update_erasure_type(setup_type: SetupType) { + let mut is_erasure = GLOBAL_IsErasure.write().await; + *is_erasure = setup_type == SetupType::Erasure; + + let mut is_dist_erasure = GLOBAL_IsDistErasure.write().await; + *is_dist_erasure = setup_type == SetupType::DistErasure; + + if *is_dist_erasure { + *is_erasure = true + } + + let mut is_erasure_sd = GLOBAL_IsErasureSD.write().await; + *is_erasure_sd = setup_type == SetupType::ErasureSD; +} + +lazy_static! { + pub static ref GLOBAL_LOCAL_DISK_MAP: Arc>>> = Arc::new(RwLock::new(HashMap::new())); + pub static ref GLOBAL_LOCAL_DISK_SET_DRIVES: Arc>>>>> = + Arc::new(RwLock::new(Vec::new())); +} + +pub async fn find_local_disk(disk_path: &String) -> Option { + let disk_path = match fs::canonicalize(disk_path).await { + Ok(disk_path) => disk_path, + Err(_) => return None, + }; + + let disk_map = GLOBAL_LOCAL_DISK_MAP.read().await; + + let path = disk_path.to_string_lossy().to_string(); + if disk_map.contains_key(&path) { + let a = disk_map[&path].as_ref().cloned(); + + return a; + } + None +} + +pub async fn all_local_disk_path() -> Vec { + let disk_map = GLOBAL_LOCAL_DISK_MAP.read().await; + disk_map.keys().map(|v| v.clone()).collect() +} + +pub async fn all_local_disk() -> Vec { + let disk_map = GLOBAL_LOCAL_DISK_MAP.read().await; + disk_map + .values() + .filter(|v| v.is_some()) + .map(|v| v.as_ref().unwrap().clone()) + .collect() +} + +// init_local_disks 初始化本地磁盘,server启动前必须初始化成功 +pub async fn init_local_disks(endpoint_pools: EndpointServerPools) -> Result<()> { + let opt = &DiskOption { + cleanup: true, + health_check: true, + }; + + let mut global_set_drives = GLOBAL_LOCAL_DISK_SET_DRIVES.write().await; + for pool_eps in endpoint_pools.as_ref().iter() { + let mut set_count_drives = Vec::with_capacity(pool_eps.set_count); + for _ in 0..pool_eps.set_count { + set_count_drives.push(vec![None; pool_eps.drives_per_set]); + } + + global_set_drives.push(set_count_drives); + } + + let mut global_local_disk_map = GLOBAL_LOCAL_DISK_MAP.write().await; + + for pool_eps in endpoint_pools.as_ref().iter() { + let mut set_drives = HashMap::new(); + for ep in pool_eps.endpoints.as_ref().iter() { + if !ep.is_local { + continue; + } + + let disk = new_disk(ep, opt).await?; + + let path = disk.path().to_string_lossy().to_string(); + + global_local_disk_map.insert(path, Some(disk.clone())); + + set_drives.insert(ep.disk_idx, Some(disk.clone())); + + if ep.pool_idx.is_some() && ep.set_idx.is_some() && ep.disk_idx.is_some() { + global_set_drives[ep.pool_idx.unwrap()][ep.set_idx.unwrap()][ep.disk_idx.unwrap()] = Some(disk.clone()); + } + } + } + + Ok(()) +} + +lazy_static! { + pub static ref GLOBAL_OBJECT_API: Arc>> = Arc::new(RwLock::new(None)); + pub static ref GLOBAL_LOCAL_DISK: Arc>>> = Arc::new(RwLock::new(Vec::new())); +} + +pub fn new_object_layer_fn() -> Arc>> { + GLOBAL_OBJECT_API.clone() +} + +async fn set_object_layer(o: ECStore) { + let mut global_object_api = GLOBAL_OBJECT_API.write().await; + *global_object_api = Some(o); +} + #[derive(Debug)] pub struct ECStore { pub id: uuid::Uuid, @@ -28,16 +152,16 @@ pub struct ECStore { pub disk_map: HashMap>>, pub pools: Vec, pub peer_sys: S3PeerSys, - pub local_disks: Vec, + // pub local_disks: Vec, } impl ECStore { - pub async fn new(address: String, endpoints: Vec) -> Result { - let layouts = DisksLayout::try_from(endpoints.as_slice())?; + pub async fn new(_address: String, endpoint_pools: EndpointServerPools) -> Result<()> { + // let layouts = DisksLayout::try_from(endpoints.as_slice())?; let mut deployment_id = None; - let (endpoint_pools, _) = EndpointServerPools::create_server_endpoints(address.as_str(), &layouts)?; + // let (endpoint_pools, _) = EndpointServerPools::create_server_endpoints(address.as_str(), &layouts)?; let mut pools = Vec::with_capacity(endpoint_pools.as_ref().len()); let mut disk_map = HashMap::with_capacity(endpoint_pools.as_ref().len()); @@ -46,6 +170,8 @@ impl ECStore { let mut local_disks = Vec::new(); + info!("endpoint_pools: {:?}", endpoint_pools); + for (i, pool_eps) in endpoint_pools.as_ref().iter().enumerate() { // TODO: read from config parseStorageClass let partiy_count = store_init::default_partiy_count(pool_eps.drives_per_set); @@ -61,13 +187,21 @@ impl ECStore { DiskError::check_disk_fatal_errs(&errs)?; - let fm = store_init::do_init_format_file( - first_is_local, - &disks, - pool_eps.set_count, - pool_eps.drives_per_set, - deployment_id, - ) + let fm = (|| async { + store_init::connect_load_init_formats( + first_is_local, + &disks, + pool_eps.set_count, + pool_eps.drives_per_set, + deployment_id, + ) + .await + }) + .retry(ExponentialBuilder::default().with_max_times(usize::MAX)) + .sleep(tokio::time::sleep) + .notify(|err, dur: Duration| { + info!("retrying get formats {:?} after {:?}", err, dur); + }) .await?; if deployment_id.is_none() { @@ -88,24 +222,42 @@ impl ECStore { } } - let sets = Sets::new(disks.clone(), pool_eps, &fm, i, partiy_count)?; + let sets = Sets::new(disks.clone(), pool_eps, &fm, i, partiy_count).await?; pools.push(sets); disk_map.insert(i, disks); } - let peer_sys = S3PeerSys::new(&endpoint_pools, local_disks.clone()); + // 替换本地磁盘 + if !*GLOBAL_IsDistErasure.read().await { + let mut global_local_disk_map = GLOBAL_LOCAL_DISK_MAP.write().await; + for disk in local_disks { + let path = disk.path().to_string_lossy().to_string(); + global_local_disk_map.insert(path, Some(disk.clone())); + } + } - Ok(ECStore { + let peer_sys = S3PeerSys::new(&endpoint_pools); + + let ec = ECStore { id: deployment_id.unwrap(), disk_map, pools, - local_disks, peer_sys, - }) + }; + + set_object_layer(ec).await; + + Ok(()) } + pub fn init_local_disks() {} + + // pub fn local_disks(&self) -> Vec { + // self.local_disks.clone() + // } + fn single_pool(&self) -> bool { self.pools.len() == 1 } diff --git a/ecstore/src/store_api.rs b/ecstore/src/store_api.rs index 213d829e..0638ec19 100644 --- a/ecstore/src/store_api.rs +++ b/ecstore/src/store_api.rs @@ -194,6 +194,7 @@ pub struct ObjectPartInfo { // } // } +#[derive(Serialize, Deserialize)] pub struct RawFileInfo { pub buf: Vec, } @@ -239,6 +240,7 @@ pub enum BitrotAlgorithm { BLAKE2b512, } +#[derive(Debug, Default, Serialize, Deserialize)] pub struct MakeBucketOptions { pub force_create: bool, } @@ -380,9 +382,10 @@ pub struct ObjectOptions { // } // } +#[derive(Debug, Default, Serialize, Deserialize)] pub struct BucketOptions {} -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct BucketInfo { pub name: String, pub created: Option, diff --git a/ecstore/src/store_init.rs b/ecstore/src/store_init.rs index a1178d88..6e6eb2fc 100644 --- a/ecstore/src/store_init.rs +++ b/ecstore/src/store_init.rs @@ -1,7 +1,9 @@ use crate::{ - disk::error::DiskError, - disk::format::{FormatErasureVersion, FormatMetaVersion, FormatV3}, - disk::{new_disk, DiskOption, DiskStore, FORMAT_CONFIG_FILE, RUSTFS_META_BUCKET}, + disk::{ + error::DiskError, + format::{FormatErasureVersion, FormatMetaVersion, FormatV3}, + new_disk, DiskOption, DiskStore, FORMAT_CONFIG_FILE, RUSTFS_META_BUCKET, + }, endpoints::Endpoints, error::{Error, Result}, }; @@ -10,6 +12,7 @@ use std::{ collections::{hash_map::Entry, HashMap}, fmt::Debug, }; + use tracing::warn; use uuid::Uuid; @@ -40,30 +43,34 @@ pub async fn init_disks(eps: &Endpoints, opt: &DiskOption) -> (Vec], set_count: usize, set_drive_count: usize, deployment_id: Option, ) -> Result { - let (formats, errs) = read_format_file_all(disks, false).await; + warn!("connect_load_init_formats id: {:?}, first_disk: {}", deployment_id, first_disk); + + let (formats, errs) = load_format_erasure_all(disks, false).await; DiskError::check_disk_fatal_errs(&errs)?; + warn!("load_format_erasure_all errs {:?}", &errs); + check_format_erasure_values(&formats, set_drive_count)?; if first_disk && DiskError::should_init_erasure_disks(&errs) { // UnformattedDisk, not format file create // new format and save - let fms = init_format_files(disks, set_count, set_drive_count, deployment_id); + let fms = init_format_erasure(disks, set_count, set_drive_count, deployment_id); let _errs = save_format_file_all(disks, &fms).await; // TODO: check quorum // reduceWriteQuorumErrs(&errs)?; - let fm = get_format_file_in_quorum(&fms)?; + let fm = get_format_erasure_in_quorum(&fms)?; return Ok(fm); } @@ -77,12 +84,12 @@ pub async fn do_init_format_file( return Err(Error::new(ErasureError::FirstDiskWait)); } - let fm = get_format_file_in_quorum(&formats)?; + let fm = get_format_erasure_in_quorum(&formats)?; Ok(fm) } -fn init_format_files( +fn init_format_erasure( disks: &[Option], set_count: usize, set_drive_count: usize, @@ -106,7 +113,7 @@ fn init_format_files( fms } -fn get_format_file_in_quorum(formats: &[Option]) -> Result { +fn get_format_erasure_in_quorum(formats: &[Option]) -> Result { let mut countmap = HashMap::new(); for f in formats.iter() { @@ -124,8 +131,15 @@ fn get_format_file_in_quorum(formats: &[Option]) -> Result { let (max_drives, max_count) = countmap.iter().max_by_key(|&(_, c)| c).unwrap_or((&0, &0)); - if *max_drives == 0 || *max_count < formats.len() / 2 { - warn!("*max_drives == 0 || *max_count < formats.len() / 2"); + warn!("get_format_erasure_in_quorum fi: {:?}", &formats); + + if *max_drives == 0 || *max_count <= formats.len() / 2 { + warn!( + "*max_drives == 0 || *max_count < formats.len() / 2, {} || {}<{}", + max_drives, + max_count, + formats.len() / 2 + ); return Err(Error::new(ErasureError::ErasureReadQuorum)); } @@ -184,21 +198,26 @@ pub fn default_partiy_count(drive: usize) -> usize { _ => 4, } } -// read_format_file_all 读取所有foramt.json -async fn read_format_file_all(disks: &[Option], heal: bool) -> (Vec>, Vec>) { +// load_format_erasure_all 读取所有foramt.json +async fn load_format_erasure_all(disks: &[Option], heal: bool) -> (Vec>, Vec>) { let mut futures = Vec::with_capacity(disks.len()); - for ep in disks.iter() { - futures.push(read_format_file(ep, heal)); + for disk in disks.iter() { + futures.push(read_format_file(disk, heal)); } let mut datas = Vec::with_capacity(disks.len()); let mut errors = Vec::with_capacity(disks.len()); let results = join_all(futures).await; + let mut i = 0; for result in results { match result { Ok(s) => { + if !heal { + let _ = disks[i].as_ref().unwrap().set_disk_id(Some(s.erasure.this.clone())).await; + } + datas.push(Some(s)); errors.push(None); } @@ -207,6 +226,8 @@ async fn read_format_file_all(disks: &[Option], heal: bool) -> (Vec, _heal: bool) -> Result], formats: &[Option]) -> Vec> { let mut futures = Vec::with_capacity(disks.len()); - for (i, ep) in disks.iter().enumerate() { - futures.push(save_format_file(ep, &formats[i])); + for (i, disk) in disks.iter().enumerate() { + futures.push(save_format_file(disk, &formats[i])); } let mut errors = Vec::with_capacity(disks.len()); @@ -277,9 +298,7 @@ async fn save_format_file(disk: &Option, format: &Option) - disk.rename_file(RUSTFS_META_BUCKET, tmpfile.as_str(), RUSTFS_META_BUCKET, FORMAT_CONFIG_FILE) .await?; - // let mut disk = disk; - - // disk.set_disk_id(format.erasure.this); + disk.set_disk_id(Some(format.erasure.this)).await?; Ok(()) } diff --git a/rustfs-inner.zip b/rustfs-inner.zip deleted file mode 100644 index f5aad0ed..00000000 Binary files a/rustfs-inner.zip and /dev/null differ diff --git a/rustfs/Cargo.toml b/rustfs/Cargo.toml index f08b977e..d2d1a989 100644 --- a/rustfs/Cargo.toml +++ b/rustfs/Cargo.toml @@ -10,6 +10,25 @@ rust-version.workspace = true [dependencies] async-trait.workspace = true +bytes.workspace = true +clap.workspace = true +ecstore.workspace = true +flatbuffers.workspace = true +futures.workspace = true +futures-util.workspace = true +hyper.workspace = true +hyper-util.workspace = true +http.workspace = true +http-body.workspace = true +mime.workspace = true +netif.workspace = true +pin-project-lite.workspace = true +prost.workspace = true +prost-types.workspace = true +protos.workspace = true +protobuf.workspace = true +s3s.workspace = true +serde_json.workspace = true tracing.workspace = true time = { workspace = true, features = ["parsing", "formatting"] } tokio = { workspace = true, features = [ @@ -18,12 +37,22 @@ tokio = { workspace = true, features = [ "net", "signal", ] } +tonic = { version = "0.12.1", features = ["gzip"] } +tonic-reflection.workspace = true +tower.workspace = true tracing-error.workspace = true +tracing-subscriber.workspace = true +transform-stream.workspace = true +uuid = "1.10.0" + +[build-dependencies] +prost-build.workspace = true +tonic-build.workspace = true http.workspace = true bytes.workspace = true futures.workspace = true futures-util.workspace = true -uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] } +# uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] } ecstore = { path = "../ecstore" } s3s = "0.10.0" clap = { version = "4.5.7", features = ["derive"] } diff --git a/rustfs/src/grpc.rs b/rustfs/src/grpc.rs new file mode 100644 index 00000000..7afc6700 --- /dev/null +++ b/rustfs/src/grpc.rs @@ -0,0 +1,816 @@ +use ecstore::{ + disk::{DeleteOptions, DiskStore, FileInfoVersions, ReadMultipleReq, ReadOptions, WalkDirOptions}, + erasure::{ReadAt, Write}, + peer::{LocalPeerS3Client, PeerS3Client}, + store::{all_local_disk_path, find_local_disk}, + store_api::{BucketOptions, FileInfo, MakeBucketOptions}, +}; +use tonic::{Request, Response, Status}; +use tracing::{debug, error, info}; + +use protos::{ + models::{PingBody, PingBodyBuilder}, + proto_gen::node_service::{ + node_service_server::{NodeService as Node, NodeServiceServer as NodeServer}, + DeleteBucketRequest, DeleteBucketResponse, DeleteRequest, DeleteResponse, DeleteVersionsRequest, DeleteVersionsResponse, + DeleteVolumeRequest, DeleteVolumeResponse, GetBucketInfoRequest, GetBucketInfoResponse, ListBucketRequest, + ListBucketResponse, ListDirRequest, ListDirResponse, ListVolumesRequest, ListVolumesResponse, MakeBucketRequest, + MakeBucketResponse, MakeVolumeRequest, MakeVolumeResponse, MakeVolumesRequest, MakeVolumesResponse, PingRequest, + PingResponse, ReadAllRequest, ReadAllResponse, ReadAtRequest, ReadAtResponse, ReadMultipleRequest, ReadMultipleResponse, + ReadVersionRequest, ReadVersionResponse, ReadXlRequest, ReadXlResponse, RenameDataRequest, RenameDataResponse, + RenameFileRequst, RenameFileResponse, StatVolumeRequest, StatVolumeResponse, WalkDirRequest, WalkDirResponse, + WriteAllRequest, WriteAllResponse, WriteMetadataRequest, WriteMetadataResponse, WriteRequest, WriteResponse, + }, +}; + +#[derive(Debug)] +struct NodeService { + pub local_peer: LocalPeerS3Client, +} + +pub fn make_server() -> NodeServer { + // let local_disks = all_local_disk().await; + let local_peer = LocalPeerS3Client::new(None, None); + NodeServer::new(NodeService { local_peer }) +} + +impl NodeService { + async fn find_disk(&self, disk_path: &String) -> Option { + find_local_disk(disk_path).await + // let disk_path = match fs::canonicalize(disk_path).await { + // Ok(disk_path) => disk_path, + // Err(_) => return None, + // }; + // self.local_peer.local_disks.iter().find(|&x| x.path() == disk_path).cloned() + } + + async fn all_disk(&self) -> Vec { + all_local_disk_path().await + // self.local_peer + // .local_disks + // .iter() + // .map(|disk| disk.path().to_string_lossy().to_string()) + // .collect() + } +} + +#[tonic::async_trait] +impl Node for NodeService { + async fn ping(&self, request: Request) -> Result, Status> { + debug!("PING"); + + let ping_req = request.into_inner(); + let ping_body = flatbuffers::root::(&ping_req.body); + if let Err(e) = ping_body { + error!("{}", e); + } else { + info!("ping_req:body(flatbuffer): {:?}", ping_body); + } + + let mut fbb = flatbuffers::FlatBufferBuilder::new(); + let payload = fbb.create_vector(b"hello, caller"); + + let mut builder = PingBodyBuilder::new(&mut fbb); + builder.add_payload(payload); + let root = builder.finish(); + fbb.finish(root, None); + + let finished_data = fbb.finished_data(); + + Ok(tonic::Response::new(PingResponse { + version: 1, + body: finished_data.to_vec(), + })) + } + + async fn list_bucket(&self, request: Request) -> Result, Status> { + debug!("list bucket"); + + let request = request.into_inner(); + let options = match serde_json::from_str::(&request.options) { + Ok(options) => options, + Err(err) => { + return Ok(tonic::Response::new(ListBucketResponse { + success: false, + bucket_infos: Vec::new(), + error_info: Some(format!("decode BucketOptions failed: {}", err.to_string())), + })) + } + }; + match self.local_peer.list_bucket(&options).await { + Ok(bucket_infos) => { + let bucket_infos = bucket_infos + .into_iter() + .filter_map(|bucket_info| serde_json::to_string(&bucket_info).ok()) + .collect(); + Ok(tonic::Response::new(ListBucketResponse { + success: true, + bucket_infos, + error_info: None, + })) + } + + Err(err) => Ok(tonic::Response::new(ListBucketResponse { + success: false, + bucket_infos: Vec::new(), + error_info: Some(format!("make failed: {}", err.to_string())), + })), + } + } + + async fn make_bucket(&self, request: Request) -> Result, Status> { + debug!("make bucket"); + + let request = request.into_inner(); + let options = match serde_json::from_str::(&request.options) { + Ok(options) => options, + Err(err) => { + return Ok(tonic::Response::new(MakeBucketResponse { + success: false, + error_info: Some(format!("decode MakeBucketOptions failed: {}", err.to_string())), + })) + } + }; + match self.local_peer.make_bucket(&request.name, &options).await { + Ok(_) => Ok(tonic::Response::new(MakeBucketResponse { + success: true, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(MakeBucketResponse { + success: false, + error_info: Some(format!("make failed: {}", err.to_string())), + })), + } + } + + async fn get_bucket_info(&self, request: Request) -> Result, Status> { + debug!("get bucket info"); + + let request = request.into_inner(); + let options = match serde_json::from_str::(&request.options) { + Ok(options) => options, + Err(err) => { + return Ok(tonic::Response::new(GetBucketInfoResponse { + success: false, + bucket_info: String::new(), + error_info: Some(format!("decode BucketOptions failed: {}", err.to_string())), + })) + } + }; + match self.local_peer.get_bucket_info(&request.bucket, &options).await { + Ok(bucket_info) => { + let bucket_info = match serde_json::to_string(&bucket_info) { + Ok(bucket_info) => bucket_info, + Err(err) => { + return Ok(tonic::Response::new(GetBucketInfoResponse { + success: false, + bucket_info: String::new(), + error_info: Some(format!("encode BucketInfo failed: {}", err.to_string())), + })); + } + }; + Ok(tonic::Response::new(GetBucketInfoResponse { + success: true, + bucket_info, + error_info: None, + })) + } + + Err(err) => Ok(tonic::Response::new(GetBucketInfoResponse { + success: false, + bucket_info: String::new(), + error_info: Some(format!("make failed: {}", err.to_string())), + })), + } + } + + async fn delete_bucket(&self, request: Request) -> Result, Status> { + debug!("make bucket"); + + let request = request.into_inner(); + match self.local_peer.delete_bucket(&request.bucket).await { + Ok(_) => Ok(tonic::Response::new(DeleteBucketResponse { + success: true, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(DeleteBucketResponse { + success: false, + error_info: Some(format!("make failed: {}", err.to_string())), + })), + } + } + + async fn read_all(&self, request: Request) -> Result, Status> { + debug!("read all"); + + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + match disk.read_all(&request.volume, &request.path).await { + Ok(data) => Ok(tonic::Response::new(ReadAllResponse { + success: true, + data: data.to_vec(), + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(ReadAllResponse { + success: false, + data: Vec::new(), + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(ReadAllResponse { + success: false, + data: Vec::new(), + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn write_all(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + match disk.write_all(&request.volume, &request.path, request.data).await { + Ok(_) => Ok(tonic::Response::new(WriteAllResponse { + success: true, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(WriteAllResponse { + success: false, + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(WriteAllResponse { + success: false, + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn delete(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + let options = match serde_json::from_str::(&request.options) { + Ok(options) => options, + Err(_) => { + return Ok(tonic::Response::new(DeleteResponse { + success: false, + error_info: Some("can not decode DeleteOptions".to_string()), + })); + } + }; + match disk.delete(&request.volume, &request.path, options).await { + Ok(_) => Ok(tonic::Response::new(DeleteResponse { + success: true, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(DeleteResponse { + success: false, + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(DeleteResponse { + success: false, + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn rename_file(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + match disk + .rename_file(&request.src_volume, &request.src_path, &request.dst_volume, &request.dst_path) + .await + { + Ok(_) => Ok(tonic::Response::new(RenameFileResponse { + success: true, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(RenameFileResponse { + success: false, + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(RenameFileResponse { + success: false, + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn write(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + let file_writer = if request.is_append { + disk.append_file(&request.volume, &request.path).await + } else { + disk.create_file("", &request.volume, &request.path, 0).await + }; + + match file_writer { + Ok(mut file_writer) => match file_writer.write(&request.data).await { + Ok(_) => Ok(tonic::Response::new(WriteResponse { + success: true, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(WriteResponse { + success: false, + error_info: Some(err.to_string()), + })), + }, + Err(err) => Ok(tonic::Response::new(WriteResponse { + success: false, + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(WriteResponse { + success: false, + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn read_at(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + match disk.read_file(&request.volume, &request.path).await { + Ok(mut file_reader) => { + match file_reader + .read_at(request.offset.try_into().unwrap(), request.length.try_into().unwrap()) + .await + { + Ok((data, read_size)) => Ok(tonic::Response::new(ReadAtResponse { + success: true, + data, + read_size: read_size.try_into().unwrap(), + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(ReadAtResponse { + success: false, + data: Vec::new(), + read_size: -1, + error_info: Some(err.to_string()), + })), + } + } + Err(err) => Ok(tonic::Response::new(ReadAtResponse { + success: false, + data: Vec::new(), + read_size: -1, + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(ReadAtResponse { + success: false, + data: Vec::new(), + read_size: -1, + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn list_dir(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + match disk.list_dir("", &request.volume, "", 0).await { + Ok(volumes) => Ok(tonic::Response::new(ListDirResponse { + success: true, + volumes, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(ListDirResponse { + success: false, + volumes: Vec::new(), + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(ListDirResponse { + success: false, + volumes: Vec::new(), + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn walk_dir(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + let opts = match serde_json::from_str::(&request.walk_dir_options) { + Ok(options) => options, + Err(_) => { + return Ok(tonic::Response::new(WalkDirResponse { + success: false, + meta_cache_entry: Vec::new(), + error_info: Some("can not decode DeleteOptions".to_string()), + })); + } + }; + match disk.walk_dir(opts).await { + Ok(entries) => { + let entries = entries + .into_iter() + .filter_map(|entry| serde_json::to_string(&entry).ok()) + .collect(); + Ok(tonic::Response::new(WalkDirResponse { + success: true, + meta_cache_entry: entries, + error_info: None, + })) + } + Err(err) => Ok(tonic::Response::new(WalkDirResponse { + success: false, + meta_cache_entry: Vec::new(), + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(WalkDirResponse { + success: false, + meta_cache_entry: Vec::new(), + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn rename_data(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + let file_info = match serde_json::from_str::(&request.file_info) { + Ok(file_info) => file_info, + Err(_) => { + return Ok(tonic::Response::new(RenameDataResponse { + success: false, + rename_data_resp: String::new(), + error_info: Some("can not decode DeleteOptions".to_string()), + })); + } + }; + match disk + .rename_data(&request.src_volume, &request.src_path, file_info, &request.dst_volume, &request.dst_path) + .await + { + Ok(rename_data_resp) => { + let rename_data_resp = match serde_json::to_string(&rename_data_resp) { + Ok(file_info) => file_info, + Err(_) => { + return Ok(tonic::Response::new(RenameDataResponse { + success: false, + rename_data_resp: String::new(), + error_info: Some("can not encode RenameDataResp".to_string()), + })); + } + }; + Ok(tonic::Response::new(RenameDataResponse { + success: true, + rename_data_resp, + error_info: None, + })) + } + Err(err) => Ok(tonic::Response::new(RenameDataResponse { + success: false, + rename_data_resp: String::new(), + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(RenameDataResponse { + success: false, + rename_data_resp: String::new(), + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn make_volumes(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + match disk.make_volumes(request.volumes.iter().map(|s| &**s).collect()).await { + Ok(_) => Ok(tonic::Response::new(MakeVolumesResponse { + success: true, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(MakeVolumesResponse { + success: false, + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(MakeVolumesResponse { + success: false, + error_info: Some(format!("can not find disk, all disks: {:?}", self.all_disk().await)), + })) + } + } + + async fn make_volume(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + match disk.make_volume(&request.volume).await { + Ok(_) => Ok(tonic::Response::new(MakeVolumeResponse { + success: true, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(MakeVolumeResponse { + success: false, + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(MakeVolumeResponse { + success: false, + error_info: Some(format!("can not find disk, all disks: {:?}", self.all_disk().await)), + })) + } + } + + async fn list_volumes(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + match disk.list_volumes().await { + Ok(volume_infos) => { + let volume_infos = volume_infos + .into_iter() + .filter_map(|volume_info| serde_json::to_string(&volume_info).ok()) + .collect(); + Ok(tonic::Response::new(ListVolumesResponse { + success: true, + volume_infos, + error_info: None, + })) + } + Err(err) => Ok(tonic::Response::new(ListVolumesResponse { + success: false, + volume_infos: Vec::new(), + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(ListVolumesResponse { + success: false, + volume_infos: Vec::new(), + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn stat_volume(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + match disk.stat_volume(&request.volume).await { + Ok(volume_info) => match serde_json::to_string(&volume_info) { + Ok(volume_info) => Ok(tonic::Response::new(StatVolumeResponse { + success: true, + volume_info, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(StatVolumeResponse { + success: false, + volume_info: String::new(), + error_info: Some(format!("encode VolumeInfo failed, {}", err.to_string())), + })), + }, + Err(err) => Ok(tonic::Response::new(StatVolumeResponse { + success: false, + volume_info: String::new(), + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(StatVolumeResponse { + success: false, + volume_info: String::new(), + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn write_metadata(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + let file_info = match serde_json::from_str::(&request.file_info) { + Ok(file_info) => file_info, + Err(err) => { + return Ok(tonic::Response::new(WriteMetadataResponse { + success: false, + error_info: Some(format!("decode FileInfo failed, {}", err.to_string())), + })); + } + }; + match disk.write_metadata("", &request.volume, &request.path, file_info).await { + Ok(_) => Ok(tonic::Response::new(WriteMetadataResponse { + success: true, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(WriteMetadataResponse { + success: false, + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(WriteMetadataResponse { + success: false, + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn read_version(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + let opts = match serde_json::from_str::(&request.opts) { + Ok(options) => options, + Err(_) => { + return Ok(tonic::Response::new(ReadVersionResponse { + success: false, + file_info: String::new(), + error_info: Some("can not decode DeleteOptions".to_string()), + })); + } + }; + match disk + .read_version("", &request.volume, &request.path, &request.version_id, &opts) + .await + { + Ok(file_info) => match serde_json::to_string(&file_info) { + Ok(file_info) => Ok(tonic::Response::new(ReadVersionResponse { + success: true, + file_info, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(ReadVersionResponse { + success: false, + file_info: String::new(), + error_info: Some(format!("encode VolumeInfo failed, {}", err.to_string())), + })), + }, + Err(err) => Ok(tonic::Response::new(ReadVersionResponse { + success: false, + file_info: String::new(), + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(ReadVersionResponse { + success: false, + file_info: String::new(), + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn read_xl(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + match disk.read_xl(&request.volume, &request.path, request.read_data).await { + Ok(raw_file_info) => match serde_json::to_string(&raw_file_info) { + Ok(raw_file_info) => Ok(tonic::Response::new(ReadXlResponse { + success: true, + raw_file_info, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(ReadXlResponse { + success: false, + raw_file_info: String::new(), + error_info: Some(format!("encode RawFileInfo failed, {}", err.to_string())), + })), + }, + Err(err) => Ok(tonic::Response::new(ReadXlResponse { + success: false, + raw_file_info: String::new(), + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(ReadXlResponse { + success: false, + raw_file_info: String::new(), + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn delete_versions(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + let mut versions = Vec::with_capacity(request.versions.len()); + for version in request.versions.iter() { + match serde_json::from_str::(&version) { + Ok(version) => versions.push(version), + Err(_) => { + return Ok(tonic::Response::new(DeleteVersionsResponse { + success: false, + errors: Vec::new(), + error_info: Some("can not decode FileInfoVersions".to_string()), + })); + } + }; + } + let opts = match serde_json::from_str::(&request.opts) { + Ok(opts) => opts, + Err(_) => { + return Ok(tonic::Response::new(DeleteVersionsResponse { + success: false, + errors: Vec::new(), + error_info: Some("can not decode DeleteOptions".to_string()), + })); + } + }; + match disk.delete_versions(&request.volume, versions, opts).await { + Ok(errors) => { + let errors = errors + .into_iter() + .map(|error| match error { + Some(e) => e.to_string(), + None => "".to_string(), + }) + .collect(); + + Ok(tonic::Response::new(DeleteVersionsResponse { + success: true, + errors, + error_info: None, + })) + } + Err(err) => Ok(tonic::Response::new(DeleteVersionsResponse { + success: false, + errors: Vec::new(), + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(DeleteVersionsResponse { + success: false, + errors: Vec::new(), + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn read_multiple(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + let read_multiple_req = match serde_json::from_str::(&request.read_multiple_req) { + Ok(read_multiple_req) => read_multiple_req, + Err(_) => { + return Ok(tonic::Response::new(ReadMultipleResponse { + success: false, + read_multiple_resps: Vec::new(), + error_info: Some("can not decode ReadMultipleReq".to_string()), + })); + } + }; + match disk.read_multiple(read_multiple_req).await { + Ok(read_multiple_resps) => { + let read_multiple_resps = read_multiple_resps + .into_iter() + .filter_map(|read_multiple_resp| serde_json::to_string(&read_multiple_resp).ok()) + .collect(); + + Ok(tonic::Response::new(ReadMultipleResponse { + success: true, + read_multiple_resps, + error_info: None, + })) + } + Err(err) => Ok(tonic::Response::new(ReadMultipleResponse { + success: false, + read_multiple_resps: Vec::new(), + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(ReadMultipleResponse { + success: false, + read_multiple_resps: Vec::new(), + error_info: Some("can not find disk".to_string()), + })) + } + } + + async fn delete_volume(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + if let Some(disk) = self.find_disk(&request.disk).await { + match disk.delete_volume(&request.volume).await { + Ok(_) => Ok(tonic::Response::new(DeleteVolumeResponse { + success: true, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(DeleteVolumeResponse { + success: false, + error_info: Some(err.to_string()), + })), + } + } else { + Ok(tonic::Response::new(DeleteVolumeResponse { + success: false, + error_info: Some("can not find disk".to_string()), + })) + } + } +} diff --git a/rustfs/src/main.rs b/rustfs/src/main.rs index 54aeccac..aa51bc44 100644 --- a/rustfs/src/main.rs +++ b/rustfs/src/main.rs @@ -1,16 +1,25 @@ mod config; +mod grpc; +mod service; mod storage; use clap::Parser; -use ecstore::error::Result; +use ecstore::{ + endpoints::EndpointServerPools, + error::Result, + store::{init_local_disks, update_erasure_type, ECStore}, +}; +use grpc::make_server; use hyper_util::{ rt::{TokioExecutor, TokioIo}, server::conn::auto::Builder as ConnBuilder, + service::TowerToHyperService, }; use s3s::{auth::SimpleAuth, service::S3ServiceBuilder}; +use service::hybrid; use std::{io::IsTerminal, net::SocketAddr, str::FromStr}; use tokio::net::TcpListener; -use tracing::{debug, info}; +use tracing::{debug, info, warn}; use tracing_error::ErrorLayer; use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt}; @@ -66,10 +75,19 @@ async fn run(opt: config::Opt) -> Result<()> { // }) // }; + // 用于rpc + let (endpoint_pools, setup_type) = EndpointServerPools::from_volumes(opt.address.clone().as_str(), opt.volumes.clone())?; + + update_erasure_type(setup_type).await; + + // 初始化本地磁盘 + init_local_disks(endpoint_pools.clone()).await?; + // Setup S3 service // 本项目使用s3s库来实现s3服务 let service = { - let mut b = S3ServiceBuilder::new(storage::ecfs::FS::new(opt.address.clone(), opt.volumes.clone()).await?); + // let mut b = S3ServiceBuilder::new(storage::ecfs::FS::new(opt.address.clone(), endpoint_pools).await?); + let mut b = S3ServiceBuilder::new(storage::ecfs::FS::new()); //设置AK和SK //其中部份内容从config配置文件中读取 let mut access_key = String::from_str(config::DEFAULT_ACCESS_KEY).unwrap(); @@ -101,44 +119,59 @@ async fn run(opt: config::Opt) -> Result<()> { b.build() }; - let hyper_service = service.into_shared(); + let rpc_service = make_server(); - let http_server = ConnBuilder::new(TokioExecutor::new()); - let graceful = hyper_util::server::graceful::GracefulShutdown::new(); + tokio::spawn(async move { + let hyper_service = service.into_shared(); - let mut ctrl_c = std::pin::pin!(tokio::signal::ctrl_c()); + let hybrid_service = TowerToHyperService::new(hybrid(hyper_service, rpc_service)); - info!("server is running at http://{local_addr}"); + let http_server = ConnBuilder::new(TokioExecutor::new()); + let mut ctrl_c = std::pin::pin!(tokio::signal::ctrl_c()); + let graceful = hyper_util::server::graceful::GracefulShutdown::new(); + info!("server is running at http://{local_addr}"); - loop { - let (socket, _) = tokio::select! { - res = listener.accept() => { - match res { - Ok(conn) => conn, - Err(err) => { - tracing::error!("error accepting connection: {err}"); - continue; + loop { + let (socket, _) = tokio::select! { + res = listener.accept() => { + match res { + Ok(conn) => conn, + Err(err) => { + tracing::error!("error accepting connection: {err}"); + continue; + } } } - } - _ = ctrl_c.as_mut() => { - break; - } - }; + _ = ctrl_c.as_mut() => { + break; + } + }; - let conn = http_server.serve_connection(TokioIo::new(socket), hyper_service.clone()); - let conn = graceful.watch(conn.into_owned()); - tokio::spawn(async move { - let _ = conn.await; - }); - } + let conn = http_server.serve_connection(TokioIo::new(socket), hybrid_service.clone()); + let conn = graceful.watch(conn.into_owned()); + tokio::spawn(async move { + let _ = conn.await; + }); + } + + tokio::select! { + () = graceful.shutdown() => { + tracing::debug!("Gracefully shutdown!"); + }, + () = tokio::time::sleep(std::time::Duration::from_secs(10)) => { + tracing::debug!("Waited 10 seconds for graceful shutdown, aborting..."); + } + } + }); + + warn!(" init store"); + // init store + ECStore::new(opt.address.clone(), endpoint_pools.clone()).await?; + warn!(" init store success!"); tokio::select! { - () = graceful.shutdown() => { - tracing::debug!("Gracefully shutdown!"); - }, - () = tokio::time::sleep(std::time::Duration::from_secs(10)) => { - tracing::debug!("Waited 10 seconds for graceful shutdown, aborting..."); + _ = tokio::signal::ctrl_c() => { + } } diff --git a/rustfs/src/service.rs b/rustfs/src/service.rs new file mode 100644 index 00000000..ea1d4272 --- /dev/null +++ b/rustfs/src/service.rs @@ -0,0 +1,150 @@ +use std::pin::Pin; +use std::task::{Context, Poll}; + +use futures::Future; +use http_body::Frame; +use hyper::body::Incoming; +use hyper::{Request, Response}; +use pin_project_lite::pin_project; +use tower::Service; + +type BoxError = Box; + +/// Generate a [`HybridService`] +pub(crate) fn hybrid(make_rest: MakeRest, grpc: Grpc) -> HybridService { + HybridService { rest: make_rest, grpc } +} + +/// The service that can serve both gRPC and REST HTTP Requests +#[derive(Clone)] +pub struct HybridService { + rest: Rest, + grpc: Grpc, +} + +impl Service> for HybridService +where + Rest: Service, Response = Response>, + Grpc: Service, Response = Response>, + Rest::Error: Into, + Grpc::Error: Into, +{ + type Response = Response>; + type Error = BoxError; + type Future = HybridFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + match self.rest.poll_ready(cx) { + Poll::Ready(Ok(())) => match self.grpc.poll_ready(cx) { + Poll::Ready(Ok(())) => Poll::Ready(Ok(())), + Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())), + Poll::Pending => Poll::Pending, + }, + Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())), + Poll::Pending => Poll::Pending, + } + } + + /// When calling the service, gRPC is served if the HTTP request version is HTTP/2 + /// and if the Content-Type is "application/grpc"; otherwise, the request is served + /// as a REST request + fn call(&mut self, req: Request) -> Self::Future { + match (req.version(), req.headers().get(hyper::header::CONTENT_TYPE)) { + (hyper::Version::HTTP_2, Some(hv)) if hv.as_bytes().starts_with(b"application/grpc") => HybridFuture::Grpc { + grpc_future: self.grpc.call(req), + }, + _ => HybridFuture::Rest { + rest_future: self.rest.call(req), + }, + } + } +} + +pin_project! { + /// A hybrid HTTP body that will be used in the response type for the + /// [`HybridFuture`], i.e., the output of the [`HybridService`] + #[project = HybridBodyProj] + pub enum HybridBody { + Rest { + #[pin] + rest_body: RestBody + }, + Grpc { + #[pin] + grpc_body: GrpcBody + }, + } +} + +impl http_body::Body for HybridBody +where + RestBody: http_body::Body + Send + Unpin, + GrpcBody: http_body::Body + Send + Unpin, + RestBody::Error: Into, + GrpcBody::Error: Into, +{ + type Data = RestBody::Data; + type Error = BoxError; + + fn is_end_stream(&self) -> bool { + match self { + Self::Rest { rest_body } => rest_body.is_end_stream(), + Self::Grpc { grpc_body } => grpc_body.is_end_stream(), + } + } + + fn poll_frame(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll, Self::Error>>> { + match self.project() { + HybridBodyProj::Rest { rest_body } => rest_body.poll_frame(cx).map_err(Into::into), + HybridBodyProj::Grpc { grpc_body } => grpc_body.poll_frame(cx).map_err(Into::into), + } + } + + fn size_hint(&self) -> http_body::SizeHint { + match self { + Self::Rest { rest_body } => rest_body.size_hint(), + Self::Grpc { grpc_body } => grpc_body.size_hint(), + } + } +} + +pin_project! { + /// A future that accepts an HTTP request as input and returns an HTTP + /// response as output for the [`HybridService`] + #[project = HybridFutureProj] + pub enum HybridFuture { + Rest { + #[pin] + rest_future: RestFuture, + }, + Grpc { + #[pin] + grpc_future: GrpcFuture, + } + } +} + +impl Future for HybridFuture +where + RestFuture: Future, RestError>>, + GrpcFuture: Future, GrpcError>>, + RestError: Into, + GrpcError: Into, +{ + type Output = Result>, BoxError>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.project() { + HybridFutureProj::Rest { rest_future } => match rest_future.poll(cx) { + Poll::Ready(Ok(res)) => Poll::Ready(Ok(res.map(|rest_body| HybridBody::Rest { rest_body }))), + Poll::Ready(Err(err)) => Poll::Ready(Err(err.into())), + Poll::Pending => Poll::Pending, + }, + HybridFutureProj::Grpc { grpc_future } => match grpc_future.poll(cx) { + Poll::Ready(Ok(res)) => Poll::Ready(Ok(res.map(|grpc_body| HybridBody::Grpc { grpc_body }))), + Poll::Ready(Err(err)) => Poll::Ready(Err(err.into())), + Poll::Pending => Poll::Pending, + }, + } + } +} diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index f7fb1d8e..fd8bcc9a 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -1,5 +1,6 @@ use bytes::Bytes; use ecstore::disk::error::DiskError; +use ecstore::store::new_object_layer_fn; use ecstore::store_api::BucketOptions; use ecstore::store_api::CompletePart; use ecstore::store_api::DeleteBucketOptions; @@ -22,12 +23,10 @@ use s3s::S3; use s3s::{S3Request, S3Response}; use std::fmt::Debug; use std::str::FromStr; -use tracing::warn; use transform_stream::AsyncTryStream; use uuid::Uuid; use ecstore::error::Result; -use ecstore::store::ECStore; use tracing::debug; macro_rules! try_ { @@ -43,13 +42,13 @@ macro_rules! try_ { #[derive(Debug)] pub struct FS { - pub store: ECStore, + // pub store: ECStore, } impl FS { - pub async fn new(address: String, endpoints: Vec) -> Result { - let store: ECStore = ECStore::new(address, endpoints).await?; - Ok(Self { store }) + pub fn new() -> Self { + // let store: ECStore = ECStore::new(address, endpoint_pools).await?; + Self {} } } #[async_trait::async_trait] @@ -62,8 +61,15 @@ impl S3 for FS { async fn create_bucket(&self, req: S3Request) -> S3Result> { let input = req.input; + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + try_!( - self.store + store .make_bucket(&input.bucket, &MakeBucketOptions { force_create: true }) .await ); @@ -88,8 +94,14 @@ impl S3 for FS { async fn delete_bucket(&self, req: S3Request) -> S3Result> { let input = req.input; // TODO: DeleteBucketInput 没有force参数? + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; try_!( - self.store + store .delete_bucket(&input.bucket, &DeleteBucketOptions { force: false }) .await ); @@ -117,7 +129,13 @@ impl S3 for FS { let objects: Vec = vec![dobj]; - let (dobjs, _errs) = try_!(self.store.delete_objects(&bucket, objects, ObjectOptions::default()).await); + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + let (dobjs, _errs) = try_!(store.delete_objects(&bucket, objects, ObjectOptions::default()).await); // TODO: let errors; @@ -178,8 +196,15 @@ impl S3 for FS { }) .collect(); - let (dobjs, errs) = try_!(self.store.delete_objects(&bucket, objects, ObjectOptions::default()).await); - warn!("delete_objects res {:?} {:?}", &dobjs, errs); + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + + let (dobjs, _errs) = try_!(store.delete_objects(&bucket, objects, ObjectOptions::default()).await); + // info!("delete_objects res {:?} {:?}", &dobjs, errs); let deleted = dobjs .iter() @@ -212,7 +237,14 @@ impl S3 for FS { // mc get 1 let input = req.input; - if let Err(e) = self.store.get_bucket_info(&input.bucket, &BucketOptions {}).await { + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + + if let Err(e) = store.get_bucket_info(&input.bucket, &BucketOptions {}).await { if DiskError::VolumeNotFound.is(&e) { return Err(s3_error!(NoSuchBucket)); } else { @@ -248,11 +280,14 @@ impl S3 for FS { let h = HeaderMap::new(); let opts = &ObjectOptions::default(); - let reader = try_!( - self.store - .get_object_reader(bucket.as_str(), key.as_str(), range, h, opts) - .await - ); + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + + let reader = try_!(store.get_object_reader(bucket.as_str(), key.as_str(), range, h, opts).await); let info = reader.object_info; @@ -275,7 +310,14 @@ impl S3 for FS { async fn head_bucket(&self, req: S3Request) -> S3Result> { let input = req.input; - if let Err(e) = self.store.get_bucket_info(&input.bucket, &BucketOptions {}).await { + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + + if let Err(e) = store.get_bucket_info(&input.bucket, &BucketOptions {}).await { if DiskError::VolumeNotFound.is(&e) { return Err(s3_error!(NoSuchBucket)); } else { @@ -292,7 +334,14 @@ impl S3 for FS { // mc get 2 let HeadObjectInput { bucket, key, .. } = req.input; - let info = try_!(self.store.get_object_info(&bucket, &key, &ObjectOptions::default()).await); + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + + let info = try_!(store.get_object_info(&bucket, &key, &ObjectOptions::default()).await); debug!("info {:?}", info); let content_type = try_!(ContentType::from_str("application/x-msdownload")); @@ -312,7 +361,14 @@ impl S3 for FS { async fn list_buckets(&self, _: S3Request) -> S3Result> { // mc ls - let bucket_infos = try_!(self.store.list_bucket(&BucketOptions {}).await); + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + + let bucket_infos = try_!(store.list_bucket(&BucketOptions {}).await); let buckets: Vec = bucket_infos .iter() @@ -360,8 +416,15 @@ impl S3 for FS { let prefix = prefix.unwrap_or_default(); let delimiter = delimiter.unwrap_or_default(); + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + let object_infos = try_!( - self.store + store .list_objects_v2( &bucket, &prefix, @@ -440,9 +503,16 @@ impl S3 for FS { let reader = PutObjReader::new(body, content_length as usize); - try_!(self.store.put_object(&bucket, &key, reader, &ObjectOptions::default()).await); + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; - // self.store.put_object(bucket, object, data, opts); + try_!(store.put_object(&bucket, &key, reader, &ObjectOptions::default()).await); + + // store.put_object(bucket, object, data, opts); let output = PutObjectOutput { ..Default::default() }; Ok(S3Response::new(output)) @@ -461,11 +531,15 @@ impl S3 for FS { debug!("create_multipart_upload meta {:?}", &metadata); - let MultipartUploadResult { upload_id, .. } = try_!( - self.store - .new_multipart_upload(&bucket, &key, &ObjectOptions::default()) - .await - ); + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + + let MultipartUploadResult { upload_id, .. } = + try_!(store.new_multipart_upload(&bucket, &key, &ObjectOptions::default()).await); let output = CreateMultipartUploadOutput { bucket: Some(bucket), @@ -500,11 +574,14 @@ impl S3 for FS { let data = PutObjReader::new(body, content_length as usize); let opts = ObjectOptions::default(); - try_!( - self.store - .put_object_part(&bucket, &key, &upload_id, part_id, data, &opts) - .await - ); + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + + try_!(store.put_object_part(&bucket, &key, &upload_id, part_id, data, &opts).await); let output = UploadPartOutput { ..Default::default() }; Ok(S3Response::new(output)) @@ -559,8 +636,15 @@ impl S3 for FS { uploaded_parts.push(CompletePart::from(part)); } + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + try_!( - self.store + store .complete_multipart_upload(&bucket, &key, &upload_id, uploaded_parts, opts) .await ); @@ -582,9 +666,16 @@ impl S3 for FS { bucket, key, upload_id, .. } = req.input; + let layer = new_object_layer_fn(); + let lock = layer.read().await; + let store = match lock.as_ref() { + Some(s) => s, + None => return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("Not init",))), + }; + let opts = &ObjectOptions::default(); try_!( - self.store + store .abort_multipart_upload(bucket.as_str(), key.as_str(), upload_id.as_str(), opts) .await );