diff --git a/.docker/Dockerfile.devenv b/.docker/Dockerfile.devenv index e95027d2..de2fcb49 100644 --- a/.docker/Dockerfile.devenv +++ b/.docker/Dockerfile.devenv @@ -4,7 +4,7 @@ ENV LANG C.UTF-8 RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list -RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev -y +RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y # install protoc RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v27.0/protoc-27.0-linux-x86_64.zip \ diff --git a/.docker/Dockerfile.ubuntu22.04 b/.docker/Dockerfile.ubuntu22.04 index 546b16b7..b955de8e 100644 --- a/.docker/Dockerfile.ubuntu22.04 +++ b/.docker/Dockerfile.ubuntu22.04 @@ -4,7 +4,7 @@ ENV LANG C.UTF-8 RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list -RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev -y +RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y # install protoc RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v27.0/protoc-27.0-linux-x86_64.zip \ diff --git a/Cargo.lock b/Cargo.lock index b6bba26a..b5722b5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,21 +17,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" -[[package]] -name = "admin" -version = "0.0.1" -dependencies = [ - "axum", - "ecstore", - "futures-util", - "hyper", - "mime", - "serde", - "serde_json", - "time", - "tower 0.5.2", -] - [[package]] name = "aead" version = "0.5.2" @@ -1960,6 +1945,7 @@ dependencies = [ "tracing-error", "transform-stream", "url", + "urlencoding", "uuid", "winapi", "workers", @@ -5309,7 +5295,6 @@ dependencies = [ name = "rustfs" version = "0.1.0" dependencies = [ - "admin", "async-trait", "atoi", "axum", diff --git a/Cargo.toml b/Cargo.toml index 01e4a072..a78ec0d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,6 @@ members = [ "common/common", # Shared utilities and data structures "common/lock", # Distributed locking implementation "common/protos", # Protocol buffer definitions - "api/admin", # Admin HTTP API endpoints "reader", # Object reading service "common/workers", # Worker thread pools and task scheduling "iam", # Identity and Access Management diff --git a/Makefile b/Makefile index b3b2e83a..94dd8853 100644 --- a/Makefile +++ b/Makefile @@ -37,9 +37,9 @@ probe-e2e: # in target/rockylinux9.3/release/rustfs BUILD_OS ?= rockylinux9.3 .PHONY: build -build: ROCKYLINUX_BUILD_IMAGE_NAME = $(BUILD_OS):v1 +build: ROCKYLINUX_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1 build: ROCKYLINUX_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build -build: BUILD_CMD = /root/.cargo/bin/cargo build --release --target-dir /root/s3-rustfs/target/$(BUILD_OS) +build: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS) build: $(DOCKER_CLI) build -t $(ROCKYLINUX_BUILD_IMAGE_NAME) -f $(DOCKERFILE_PATH)/Dockerfile.$(BUILD_OS) . $(DOCKER_CLI) run --rm --name $(ROCKYLINUX_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(ROCKYLINUX_BUILD_IMAGE_NAME) $(BUILD_CMD) diff --git a/TODO.md b/TODO.md index 2aa1ca64..597a00f4 100644 --- a/TODO.md +++ b/TODO.md @@ -57,3 +57,41 @@ - [ ] 对象压缩 - [ ] STS - [ ] 分层(阿里云、腾讯云、S3远程对接) + + + +scp ./target/ubuntu22.04/release/rustfs.zip root@8.130.183.154:~/ +scp ./target/ubuntu22.04/release/rustfs.zip root@8.130.177.182:~/ +scp ./target/ubuntu22.04/release/rustfs.zip root@8.130.91.189:~/ +scp ./target/ubuntu22.04/release/rustfs.zip root@8.130.182.114:~/ + +scp ./target/x86_64-unknown-linux-musl/release/rustfs root@8.130.183.154:~/ +scp ./target/x86_64-unknown-linux-musl/release/rustfs root@8.130.177.182:~/ +scp ./target/x86_64-unknown-linux-musl/release/rustfs root@8.130.91.189:~/ +scp ./target/x86_64-unknown-linux-musl/release/rustfs root@8.130.182.114:~/ + + + + + + 2025-03-11T06:18:50.011565Z DEBUG s3s::service: req: Request { method: PUT, uri: /rustfs/rpc/put_file_stream?disk=http://node2:9000/data/rustfs2&volume=.rustfs.sys/tmp&path=a45ade1a-e09b-4eb4-bac1-8b5f55f7d438/235da61f-a705-4f9a-aa21-7801d2eaf61d/part.1&append=false, version: HTTP/1.1, headers: {"accept": "*/*", "host": "node2:9000", "transfer-encoding": "chunked"}, body: Body { hyper: Body(Streaming) } } + at /Users/weisd/.cargo/git/checkouts/s3s-58426f2d17c34859/ab139f7/crates/s3s/src/service.rs:81 + in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 + + 2025-03-11T06:18:50.011603Z DEBUG s3s::ops: parsing path-style request, decoded_uri_path: "/rustfs/rpc/put_file_stream" + at /Users/weisd/.cargo/git/checkouts/s3s-58426f2d17c34859/ab139f7/crates/s3s/src/ops/mod.rs:266 + in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 + + 2025-03-11T06:18:50.011651Z DEBUG s3s::ops: body_changed: false, decoded_content_length: None, has_multipart: false + at /Users/weisd/.cargo/git/checkouts/s3s-58426f2d17c34859/ab139f7/crates/s3s/src/ops/mod.rs:342 + in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 + + 2025-03-11T06:18:50.011687Z WARN rustfs::admin::rpc: handle PutFile + at rustfs/src/admin/rpc.rs:120 + in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 + + 2025-03-11T06:18:50.011716Z DEBUG s3s::ops: custom route returns error, err: S3Error(Inner { code: InvalidArgument, message: Some("get query failed1 Error(\"missing field `size`\")"), request_id: None, status_code: None, source: None, headers: None }) + at /Users/weisd/.cargo/git/checkouts/s3s-58426f2d17c34859/ab139f7/crates/s3s/src/ops/mod.rs:227 + in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 + + 2025-03-11T06:18:50.011751Z DEBUG s3s::service: res: Response { status: 400, version: HTTP/1.1, headers: {"content-type": "application/xml"}, body: Body { once: b"InvalidArgumentget query failed1 Error("missing field `size`")" } } \ No newline at end of file diff --git a/api/admin/Cargo.toml b/api/admin/Cargo.toml deleted file mode 100644 index 7c86092c..00000000 --- a/api/admin/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "admin" -edition.workspace = true -license.workspace = true -repository.workspace = true -rust-version.workspace = true -version.workspace = true - -[lints] -workspace = true - -[dependencies] -axum.workspace = true -mime.workspace = true -serde.workspace = true -serde_json.workspace = true -ecstore = { path = "../../ecstore" } -time = { workspace = true, features = ["serde"] } -tower.workspace = true -futures-util = "0.3.31" -hyper.workspace = true diff --git a/api/admin/src/error.rs b/api/admin/src/error.rs deleted file mode 100644 index bbe24460..00000000 --- a/api/admin/src/error.rs +++ /dev/null @@ -1,98 +0,0 @@ -use axum::{ - body::Body, - http::{header::CONTENT_TYPE, HeaderValue, StatusCode}, - response::{IntoResponse, Response}, -}; -use mime::APPLICATION_JSON; -use serde::Serialize; - -#[derive(Serialize, Default)] -#[serde(rename_all = "PascalCase")] -pub struct ErrorResponse { - pub code: String, - pub message: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub key: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub bucket_name: Option, - pub resource: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub region: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub request_id: Option, - pub host_id: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub actual_object_size: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub range_requested: Option, -} - -impl IntoResponse for APIError { - fn into_response(self) -> Response { - let code = self.http_status_code; - let err_response = ErrorResponse::from(self); - let json_res = match serde_json::to_vec(&err_response) { - Ok(r) => r, - Err(e) => return (StatusCode::INTERNAL_SERVER_ERROR, format!("{e}")).into_response(), - }; - - Response::builder() - .status(code) - .header(CONTENT_TYPE, HeaderValue::from_static(APPLICATION_JSON.as_ref())) - .body(Body::from(json_res)) - .unwrap_or_else(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("{e}")).into_response()) - } -} - -#[derive(Default)] -pub struct APIError { - code: String, - description: String, - http_status_code: StatusCode, - object_size: Option, - range_requested: Option, -} - -pub enum ErrorCode { - ErrNotImplemented, - ErrServerNotInitialized, -} - -impl IntoResponse for ErrorCode { - fn into_response(self) -> Response { - APIError::from(self).into_response() - } -} - -impl From for APIError { - fn from(value: ErrorCode) -> Self { - use ErrorCode::*; - - match value { - ErrNotImplemented => APIError { - code: "NotImplemented".into(), - description: "A header you provided implies functionality that is not implemented.".into(), - http_status_code: StatusCode::NOT_IMPLEMENTED, - ..Default::default() - }, - ErrServerNotInitialized => APIError { - code: "ServerNotInitialized".into(), - description: "Server not initialized yet, please try again.".into(), - http_status_code: StatusCode::SERVICE_UNAVAILABLE, - ..Default::default() - }, - } - } -} - -impl From for ErrorResponse { - fn from(value: APIError) -> Self { - Self { - code: value.code, - message: value.description, - actual_object_size: value.object_size, - range_requested: value.range_requested, - ..Default::default() - } - } -} diff --git a/api/admin/src/handlers.rs b/api/admin/src/handlers.rs deleted file mode 100644 index fa5c33dd..00000000 --- a/api/admin/src/handlers.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod list_pools; diff --git a/api/admin/src/handlers/list_pools.rs b/api/admin/src/handlers/list_pools.rs deleted file mode 100644 index b80dd465..00000000 --- a/api/admin/src/handlers/list_pools.rs +++ /dev/null @@ -1,83 +0,0 @@ -use crate::error::ErrorCode; -use crate::Result as LocalResult; - -use axum::Json; -use ecstore::new_object_layer_fn; -use serde::Serialize; -use time::OffsetDateTime; - -#[derive(Serialize)] -pub struct PoolStatus { - id: i64, - cmdline: String, - #[serde(rename = "lastUpdate")] - #[serde(serialize_with = "time::serde::rfc3339::serialize")] - last_updat: OffsetDateTime, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(rename = "decommissionInfo")] - decommission_info: Option, -} - -#[derive(Serialize)] -#[serde(rename_all = "camelCase")] -struct PoolDecommissionInfo { - #[serde(serialize_with = "time::serde::rfc3339::serialize")] - start_time: OffsetDateTime, - start_size: i64, - total_size: i64, - current_size: i64, - complete: bool, - failed: bool, - canceled: bool, - - #[serde(rename = "objectsDecommissioned")] - items_decommissioned: i64, - #[serde(rename = "objectsDecommissionedFailed")] - items_decommission_failed: i64, - #[serde(rename = "bytesDecommissioned")] - bytes_done: i64, - #[serde(rename = "bytesDecommissionedFailed")] - bytes_failed: i64, -} - -pub async fn handler() -> LocalResult>> { - // if ecstore::is_legacy().await { - // return Err(ErrorCode::ErrNotImplemented); - // } - // - // - - // todo 实用oncelock作为全局变量 - - let Some(store) = new_object_layer_fn() else { return Err(ErrorCode::ErrNotImplemented) }; - // todo, 调用pool.status()接口获取每个池的数据 - // - let mut result = Vec::new(); - for (idx, _pool) in store.pools.iter().enumerate() { - // 这里mock一下数据 - result.push(PoolStatus { - id: idx as _, - cmdline: "cmdline".into(), - last_updat: OffsetDateTime::now_utc(), - decommission_info: if idx % 2 == 0 { - Some(PoolDecommissionInfo { - start_time: OffsetDateTime::now_utc(), - start_size: 1, - total_size: 2, - current_size: 2, - complete: true, - failed: true, - canceled: true, - items_decommissioned: 1, - items_decommission_failed: 1, - bytes_done: 1, - bytes_failed: 1, - }) - } else { - None - }, - }) - } - - Ok(Json(result)) -} diff --git a/api/admin/src/lib.rs b/api/admin/src/lib.rs deleted file mode 100644 index a0477dfd..00000000 --- a/api/admin/src/lib.rs +++ /dev/null @@ -1,20 +0,0 @@ -pub mod error; -pub mod handlers; - -use axum::{extract::Request, response::Response, routing::get, BoxError, Router}; -use error::ErrorCode; -use handlers::list_pools; -use tower::Service; - -pub type Result = std::result::Result; - -const API_VERSION: &str = "/v3"; - -pub fn register_admin_router() -> impl Service, Future: Send> + Clone { - Router::new() - .nest( - "/rustfs/admin", - Router::new().nest(API_VERSION, Router::new().route("/pools/list", get(list_pools::handler))), - ) - .into_service() -} diff --git a/api/admin/src/middlewares.rs b/api/admin/src/middlewares.rs deleted file mode 100644 index e69de29b..00000000 diff --git a/common/protos/src/generated/flatbuffers_generated/models.rs b/common/protos/src/generated/flatbuffers_generated/models.rs index e4949fdc..aa1f6ae2 100644 --- a/common/protos/src/generated/flatbuffers_generated/models.rs +++ b/common/protos/src/generated/flatbuffers_generated/models.rs @@ -1,9 +1,10 @@ // automatically generated by the FlatBuffers compiler, do not modify + // @generated -use core::cmp::Ordering; use core::mem; +use core::cmp::Ordering; extern crate flatbuffers; use self::flatbuffers::{EndianScalar, Follow}; @@ -11,114 +12,112 @@ use self::flatbuffers::{EndianScalar, Follow}; #[allow(unused_imports, dead_code)] pub mod models { - use core::cmp::Ordering; - use core::mem; + use core::mem; + use core::cmp::Ordering; - extern crate flatbuffers; - use self::flatbuffers::{EndianScalar, Follow}; + extern crate flatbuffers; + use self::flatbuffers::{EndianScalar, Follow}; - pub enum PingBodyOffset {} - #[derive(Copy, Clone, PartialEq)] +pub enum PingBodyOffset {} +#[derive(Copy, Clone, PartialEq)] - pub struct PingBody<'a> { - pub _tab: flatbuffers::Table<'a>, +pub struct PingBody<'a> { + pub _tab: flatbuffers::Table<'a>, +} + +impl<'a> flatbuffers::Follow<'a> for PingBody<'a> { + type Inner = PingBody<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { _tab: flatbuffers::Table::new(buf, loc) } + } +} + +impl<'a> PingBody<'a> { + pub const VT_PAYLOAD: flatbuffers::VOffsetT = 4; + + pub const fn get_fully_qualified_name() -> &'static str { + "models.PingBody" + } + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + PingBody { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args PingBodyArgs<'args> + ) -> flatbuffers::WIPOffset> { + let mut builder = PingBodyBuilder::new(_fbb); + if let Some(x) = args.payload { builder.add_payload(x); } + builder.finish() + } + + + #[inline] + pub fn payload(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::>>(PingBody::VT_PAYLOAD, None)} + } +} + +impl flatbuffers::Verifiable for PingBody<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, pos: usize + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>>("payload", Self::VT_PAYLOAD, false)? + .finish(); + Ok(()) + } +} +pub struct PingBodyArgs<'a> { + pub payload: Option>>, +} +impl<'a> Default for PingBodyArgs<'a> { + #[inline] + fn default() -> Self { + PingBodyArgs { + payload: None, } + } +} - impl<'a> flatbuffers::Follow<'a> for PingBody<'a> { - type Inner = PingBody<'a>; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - Self { - _tab: flatbuffers::Table::new(buf, loc), - } - } +pub struct PingBodyBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, +} +impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> PingBodyBuilder<'a, 'b, A> { + #[inline] + pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset>) { + self.fbb_.push_slot_always::>(PingBody::VT_PAYLOAD, payload); + } + #[inline] + pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> PingBodyBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + PingBodyBuilder { + fbb_: _fbb, + start_: start, } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } +} - impl<'a> PingBody<'a> { - pub const VT_PAYLOAD: flatbuffers::VOffsetT = 4; +impl core::fmt::Debug for PingBody<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("PingBody"); + ds.field("payload", &self.payload()); + ds.finish() + } +} +} // pub mod models - pub const fn get_fully_qualified_name() -> &'static str { - "models.PingBody" - } - - #[inline] - pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { - PingBody { _tab: table } - } - #[allow(unused_mut)] - pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>( - _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, - args: &'args PingBodyArgs<'args>, - ) -> flatbuffers::WIPOffset> { - let mut builder = PingBodyBuilder::new(_fbb); - if let Some(x) = args.payload { - builder.add_payload(x); - } - builder.finish() - } - - #[inline] - pub fn payload(&self) -> Option> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { - self._tab - .get::>>(PingBody::VT_PAYLOAD, None) - } - } - } - - impl flatbuffers::Verifiable for PingBody<'_> { - #[inline] - fn run_verifier(v: &mut flatbuffers::Verifier, pos: usize) -> Result<(), flatbuffers::InvalidFlatbuffer> { - use self::flatbuffers::Verifiable; - v.visit_table(pos)? - .visit_field::>>("payload", Self::VT_PAYLOAD, false)? - .finish(); - Ok(()) - } - } - pub struct PingBodyArgs<'a> { - pub payload: Option>>, - } - impl<'a> Default for PingBodyArgs<'a> { - #[inline] - fn default() -> Self { - PingBodyArgs { payload: None } - } - } - - pub struct PingBodyBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { - fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, - start_: flatbuffers::WIPOffset, - } - impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> PingBodyBuilder<'a, 'b, A> { - #[inline] - pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset>) { - self.fbb_ - .push_slot_always::>(PingBody::VT_PAYLOAD, payload); - } - #[inline] - pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> PingBodyBuilder<'a, 'b, A> { - let start = _fbb.start_table(); - PingBodyBuilder { - fbb_: _fbb, - start_: start, - } - } - #[inline] - pub fn finish(self) -> flatbuffers::WIPOffset> { - let o = self.fbb_.end_table(self.start_); - flatbuffers::WIPOffset::new(o.value()) - } - } - - impl core::fmt::Debug for PingBody<'_> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let mut ds = f.debug_struct("PingBody"); - ds.field("payload", &self.payload()); - ds.finish() - } - } -} // pub mod models diff --git a/common/protos/src/generated/proto_gen/node_service.rs b/common/protos/src/generated/proto_gen/node_service.rs index 000d5b48..88f7b3ab 100644 --- a/common/protos/src/generated/proto_gen/node_service.rs +++ b/common/protos/src/generated/proto_gen/node_service.rs @@ -622,7 +622,10 @@ pub struct GenerallyLockResponse { #[derive(Clone, PartialEq, ::prost::Message)] pub struct Mss { #[prost(map = "string, string", tag = "1")] - pub value: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub value: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct LocalStorageInfoRequest { @@ -786,7 +789,10 @@ pub struct DownloadProfileDataResponse { #[prost(bool, tag = "1")] pub success: bool, #[prost(map = "string, bytes", tag = "2")] - pub data: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::vec::Vec>, + pub data: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::vec::Vec, + >, #[prost(string, optional, tag = "3")] pub error_info: ::core::option::Option<::prost::alloc::string::String>, } @@ -1053,9 +1059,15 @@ pub struct LoadTransitionTierConfigResponse { } /// Generated client implementations. pub mod node_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::wildcard_imports, clippy::let_unit_value)] - use tonic::codegen::http::Uri; + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct NodeServiceClient { inner: tonic::client::Grpc, @@ -1086,16 +1098,22 @@ pub mod node_service_client { let inner = tonic::client::Grpc::with_origin(inner, origin); Self { inner } } - pub fn with_interceptor(inner: T, interceptor: F) -> NodeServiceClient> + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> NodeServiceClient> where F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< http::Request, - Response = http::Response<>::ResponseBody>, + Response = http::Response< + >::ResponseBody, + >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { NodeServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -1138,9 +1156,15 @@ pub mod node_service_client { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Ping"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Ping", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Ping")); @@ -1149,13 +1173,22 @@ pub mod node_service_client { pub async fn heal_bucket( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/HealBucket"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/HealBucket", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "HealBucket")); @@ -1164,13 +1197,22 @@ pub mod node_service_client { pub async fn list_bucket( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ListBucket"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ListBucket", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ListBucket")); @@ -1179,13 +1221,22 @@ pub mod node_service_client { pub async fn make_bucket( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/MakeBucket"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/MakeBucket", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "MakeBucket")); @@ -1194,13 +1245,22 @@ pub mod node_service_client { pub async fn get_bucket_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetBucketInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetBucketInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetBucketInfo")); @@ -1209,13 +1269,22 @@ pub mod node_service_client { pub async fn delete_bucket( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteBucket"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteBucket", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteBucket")); @@ -1224,13 +1293,22 @@ pub mod node_service_client { pub async fn read_all( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadAll"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadAll", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadAll")); @@ -1239,13 +1317,22 @@ pub mod node_service_client { pub async fn write_all( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/WriteAll"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/WriteAll", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "WriteAll")); @@ -1258,9 +1345,15 @@ pub mod node_service_client { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Delete"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Delete", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Delete")); @@ -1269,13 +1362,22 @@ pub mod node_service_client { pub async fn verify_file( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/VerifyFile"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/VerifyFile", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "VerifyFile")); @@ -1284,13 +1386,22 @@ pub mod node_service_client { pub async fn check_parts( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/CheckParts"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/CheckParts", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "CheckParts")); @@ -1299,13 +1410,22 @@ pub mod node_service_client { pub async fn rename_part( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RenamePart"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/RenamePart", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "RenamePart")); @@ -1314,13 +1434,22 @@ pub mod node_service_client { pub async fn rename_file( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RenameFile"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/RenameFile", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "RenameFile")); @@ -1333,9 +1462,15 @@ pub mod node_service_client { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Write"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Write", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Write")); @@ -1344,13 +1479,22 @@ pub mod node_service_client { pub async fn write_stream( &mut self, request: impl tonic::IntoStreamingRequest, - ) -> std::result::Result>, tonic::Status> { + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/WriteStream"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/WriteStream", + ); let mut req = request.into_streaming_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "WriteStream")); @@ -1360,13 +1504,22 @@ pub mod node_service_client { pub async fn read_at( &mut self, request: impl tonic::IntoStreamingRequest, - ) -> std::result::Result>, tonic::Status> { + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadAt"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadAt", + ); let mut req = request.into_streaming_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadAt")); @@ -1375,13 +1528,22 @@ pub mod node_service_client { pub async fn list_dir( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ListDir"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ListDir", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ListDir")); @@ -1390,13 +1552,22 @@ pub mod node_service_client { pub async fn walk_dir( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result>, tonic::Status> { + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/WalkDir"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/WalkDir", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "WalkDir")); @@ -1405,13 +1576,22 @@ pub mod node_service_client { pub async fn rename_data( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RenameData"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/RenameData", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "RenameData")); @@ -1420,13 +1600,22 @@ pub mod node_service_client { pub async fn make_volumes( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/MakeVolumes"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/MakeVolumes", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "MakeVolumes")); @@ -1435,13 +1624,22 @@ pub mod node_service_client { pub async fn make_volume( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/MakeVolume"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/MakeVolume", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "MakeVolume")); @@ -1450,13 +1648,22 @@ pub mod node_service_client { pub async fn list_volumes( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ListVolumes"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ListVolumes", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ListVolumes")); @@ -1465,13 +1672,22 @@ pub mod node_service_client { pub async fn stat_volume( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/StatVolume"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/StatVolume", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "StatVolume")); @@ -1480,13 +1696,22 @@ pub mod node_service_client { pub async fn delete_paths( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeletePaths"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeletePaths", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeletePaths")); @@ -1495,13 +1720,22 @@ pub mod node_service_client { pub async fn update_metadata( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/UpdateMetadata"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/UpdateMetadata", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "UpdateMetadata")); @@ -1510,13 +1744,22 @@ pub mod node_service_client { pub async fn write_metadata( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/WriteMetadata"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/WriteMetadata", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "WriteMetadata")); @@ -1525,13 +1768,22 @@ pub mod node_service_client { pub async fn read_version( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadVersion"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadVersion", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadVersion")); @@ -1544,9 +1796,15 @@ pub mod node_service_client { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadXL"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadXL", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadXL")); @@ -1555,13 +1813,22 @@ pub mod node_service_client { pub async fn delete_version( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteVersion"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteVersion", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteVersion")); @@ -1570,13 +1837,22 @@ pub mod node_service_client { pub async fn delete_versions( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteVersions"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteVersions", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteVersions")); @@ -1585,13 +1861,22 @@ pub mod node_service_client { pub async fn read_multiple( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadMultiple"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadMultiple", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadMultiple")); @@ -1600,13 +1885,22 @@ pub mod node_service_client { pub async fn delete_volume( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteVolume"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteVolume", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteVolume")); @@ -1615,13 +1909,22 @@ pub mod node_service_client { pub async fn disk_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DiskInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DiskInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DiskInfo")); @@ -1630,13 +1933,22 @@ pub mod node_service_client { pub async fn ns_scanner( &mut self, request: impl tonic::IntoStreamingRequest, - ) -> std::result::Result>, tonic::Status> { + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/NsScanner"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/NsScanner", + ); let mut req = request.into_streaming_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "NsScanner")); @@ -1645,13 +1957,22 @@ pub mod node_service_client { pub async fn lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Lock"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Lock", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Lock")); @@ -1660,13 +1981,22 @@ pub mod node_service_client { pub async fn un_lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/UnLock"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/UnLock", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "UnLock")); @@ -1675,13 +2005,22 @@ pub mod node_service_client { pub async fn r_lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RLock"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/RLock", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "RLock")); @@ -1690,13 +2029,22 @@ pub mod node_service_client { pub async fn r_un_lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RUnLock"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/RUnLock", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "RUnLock")); @@ -1705,13 +2053,22 @@ pub mod node_service_client { pub async fn force_un_lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ForceUnLock"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ForceUnLock", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ForceUnLock")); @@ -1720,13 +2077,22 @@ pub mod node_service_client { pub async fn refresh( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Refresh"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Refresh", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Refresh")); @@ -1735,13 +2101,22 @@ pub mod node_service_client { pub async fn local_storage_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LocalStorageInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LocalStorageInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "LocalStorageInfo")); @@ -1750,13 +2125,22 @@ pub mod node_service_client { pub async fn server_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ServerInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ServerInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ServerInfo")); @@ -1765,13 +2149,22 @@ pub mod node_service_client { pub async fn get_cpus( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetCpus"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetCpus", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetCpus")); @@ -1780,13 +2173,22 @@ pub mod node_service_client { pub async fn get_net_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetNetInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetNetInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetNetInfo")); @@ -1795,13 +2197,22 @@ pub mod node_service_client { pub async fn get_partitions( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetPartitions"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetPartitions", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetPartitions")); @@ -1810,13 +2221,22 @@ pub mod node_service_client { pub async fn get_os_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetOsInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetOsInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetOsInfo")); @@ -1825,13 +2245,22 @@ pub mod node_service_client { pub async fn get_se_linux_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetSELinuxInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetSELinuxInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetSELinuxInfo")); @@ -1840,13 +2269,22 @@ pub mod node_service_client { pub async fn get_sys_config( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetSysConfig"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetSysConfig", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetSysConfig")); @@ -1855,13 +2293,22 @@ pub mod node_service_client { pub async fn get_sys_errors( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetSysErrors"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetSysErrors", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetSysErrors")); @@ -1870,13 +2317,22 @@ pub mod node_service_client { pub async fn get_mem_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetMemInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetMemInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetMemInfo")); @@ -1885,13 +2341,22 @@ pub mod node_service_client { pub async fn get_metrics( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetMetrics"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetMetrics", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetMetrics")); @@ -1900,13 +2365,22 @@ pub mod node_service_client { pub async fn get_proc_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetProcInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetProcInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetProcInfo")); @@ -1915,13 +2389,22 @@ pub mod node_service_client { pub async fn start_profiling( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/StartProfiling"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/StartProfiling", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "StartProfiling")); @@ -1930,28 +2413,48 @@ pub mod node_service_client { pub async fn download_profile_data( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DownloadProfileData"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DownloadProfileData", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "DownloadProfileData")); + .insert( + GrpcMethod::new("node_service.NodeService", "DownloadProfileData"), + ); self.inner.unary(req, path, codec).await } pub async fn get_bucket_stats( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetBucketStats"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetBucketStats", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetBucketStats")); @@ -1960,13 +2463,22 @@ pub mod node_service_client { pub async fn get_sr_metrics( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetSRMetrics"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetSRMetrics", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetSRMetrics")); @@ -1975,58 +2487,100 @@ pub mod node_service_client { pub async fn get_all_bucket_stats( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetAllBucketStats"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetAllBucketStats", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "GetAllBucketStats")); + .insert( + GrpcMethod::new("node_service.NodeService", "GetAllBucketStats"), + ); self.inner.unary(req, path, codec).await } pub async fn load_bucket_metadata( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadBucketMetadata"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadBucketMetadata", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "LoadBucketMetadata")); + .insert( + GrpcMethod::new("node_service.NodeService", "LoadBucketMetadata"), + ); self.inner.unary(req, path, codec).await } pub async fn delete_bucket_metadata( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteBucketMetadata"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteBucketMetadata", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "DeleteBucketMetadata")); + .insert( + GrpcMethod::new("node_service.NodeService", "DeleteBucketMetadata"), + ); self.inner.unary(req, path, codec).await } pub async fn delete_policy( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeletePolicy"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeletePolicy", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeletePolicy")); @@ -2035,13 +2589,22 @@ pub mod node_service_client { pub async fn load_policy( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadPolicy"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadPolicy", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "LoadPolicy")); @@ -2050,28 +2613,48 @@ pub mod node_service_client { pub async fn load_policy_mapping( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadPolicyMapping"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadPolicyMapping", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "LoadPolicyMapping")); + .insert( + GrpcMethod::new("node_service.NodeService", "LoadPolicyMapping"), + ); self.inner.unary(req, path, codec).await } pub async fn delete_user( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteUser"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteUser", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteUser")); @@ -2080,28 +2663,48 @@ pub mod node_service_client { pub async fn delete_service_account( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteServiceAccount"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteServiceAccount", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "DeleteServiceAccount")); + .insert( + GrpcMethod::new("node_service.NodeService", "DeleteServiceAccount"), + ); self.inner.unary(req, path, codec).await } pub async fn load_user( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadUser"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadUser", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "LoadUser")); @@ -2110,28 +2713,48 @@ pub mod node_service_client { pub async fn load_service_account( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadServiceAccount"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadServiceAccount", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "LoadServiceAccount")); + .insert( + GrpcMethod::new("node_service.NodeService", "LoadServiceAccount"), + ); self.inner.unary(req, path, codec).await } pub async fn load_group( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadGroup"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadGroup", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "LoadGroup")); @@ -2140,16 +2763,30 @@ pub mod node_service_client { pub async fn reload_site_replication_config( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReloadSiteReplicationConfig"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReloadSiteReplicationConfig", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "ReloadSiteReplicationConfig")); + .insert( + GrpcMethod::new( + "node_service.NodeService", + "ReloadSiteReplicationConfig", + ), + ); self.inner.unary(req, path, codec).await } /// rpc VerifyBinary() returns () {}; @@ -2157,13 +2794,22 @@ pub mod node_service_client { pub async fn signal_service( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/SignalService"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/SignalService", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "SignalService")); @@ -2172,58 +2818,100 @@ pub mod node_service_client { pub async fn background_heal_status( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/BackgroundHealStatus"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/BackgroundHealStatus", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "BackgroundHealStatus")); + .insert( + GrpcMethod::new("node_service.NodeService", "BackgroundHealStatus"), + ); self.inner.unary(req, path, codec).await } pub async fn get_metacache_listing( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetMetacacheListing"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetMetacacheListing", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "GetMetacacheListing")); + .insert( + GrpcMethod::new("node_service.NodeService", "GetMetacacheListing"), + ); self.inner.unary(req, path, codec).await } pub async fn update_metacache_listing( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/UpdateMetacacheListing"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/UpdateMetacacheListing", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "UpdateMetacacheListing")); + .insert( + GrpcMethod::new("node_service.NodeService", "UpdateMetacacheListing"), + ); self.inner.unary(req, path, codec).await } pub async fn reload_pool_meta( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReloadPoolMeta"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReloadPoolMeta", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReloadPoolMeta")); @@ -2232,13 +2920,22 @@ pub mod node_service_client { pub async fn stop_rebalance( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/StopRebalance"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/StopRebalance", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "StopRebalance")); @@ -2247,38 +2944,69 @@ pub mod node_service_client { pub async fn load_rebalance_meta( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadRebalanceMeta"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadRebalanceMeta", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "LoadRebalanceMeta")); + .insert( + GrpcMethod::new("node_service.NodeService", "LoadRebalanceMeta"), + ); self.inner.unary(req, path, codec).await } pub async fn load_transition_tier_config( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadTransitionTierConfig"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadTransitionTierConfig", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "LoadTransitionTierConfig")); + .insert( + GrpcMethod::new( + "node_service.NodeService", + "LoadTransitionTierConfig", + ), + ); self.inner.unary(req, path, codec).await } } } /// Generated server implementations. pub mod node_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::wildcard_imports, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with NodeServiceServer. #[async_trait] @@ -2291,23 +3019,38 @@ pub mod node_service_server { async fn heal_bucket( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn list_bucket( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn make_bucket( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_bucket_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_bucket( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn read_all( &self, request: tonic::Request, @@ -2315,7 +3058,10 @@ pub mod node_service_server { async fn write_all( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete( &self, request: tonic::Request, @@ -2323,33 +3069,52 @@ pub mod node_service_server { async fn verify_file( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn check_parts( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn rename_part( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn rename_file( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn write( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the WriteStream method. - type WriteStreamStream: tonic::codegen::tokio_stream::Stream> + type WriteStreamStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + std::marker::Send + 'static; async fn write_stream( &self, request: tonic::Request>, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the ReadAt method. - type ReadAtStream: tonic::codegen::tokio_stream::Stream> + type ReadAtStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + std::marker::Send + 'static; /// rpc Append(AppendRequest) returns (AppendResponse) {}; @@ -2362,7 +3127,9 @@ pub mod node_service_server { request: tonic::Request, ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the WalkDir method. - type WalkDirStream: tonic::codegen::tokio_stream::Stream> + type WalkDirStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + std::marker::Send + 'static; async fn walk_dir( @@ -2372,39 +3139,66 @@ pub mod node_service_server { async fn rename_data( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn make_volumes( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn make_volume( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn list_volumes( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn stat_volume( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_paths( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn update_metadata( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn write_metadata( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn read_version( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn read_xl( &self, request: tonic::Request, @@ -2412,25 +3206,42 @@ pub mod node_service_server { async fn delete_version( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_versions( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn read_multiple( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_volume( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn disk_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the NsScanner method. - type NsScannerStream: tonic::codegen::tokio_stream::Stream> + type NsScannerStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + std::marker::Send + 'static; async fn ns_scanner( @@ -2440,35 +3251,59 @@ pub mod node_service_server { async fn lock( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn un_lock( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn r_lock( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn r_un_lock( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn force_un_lock( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn refresh( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn local_storage_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn server_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_cpus( &self, request: tonic::Request, @@ -2476,137 +3311,236 @@ pub mod node_service_server { async fn get_net_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_partitions( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_os_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_se_linux_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_sys_config( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_sys_errors( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_mem_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_metrics( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_proc_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn start_profiling( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn download_profile_data( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_bucket_stats( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_sr_metrics( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_all_bucket_stats( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_bucket_metadata( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_bucket_metadata( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_policy( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_policy( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_policy_mapping( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_user( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_service_account( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_user( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_service_account( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_group( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn reload_site_replication_config( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// rpc VerifyBinary() returns () {}; /// rpc CommitBinary() returns () {}; async fn signal_service( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn background_heal_status( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_metacache_listing( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn update_metacache_listing( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn reload_pool_meta( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn stop_rebalance( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_rebalance_meta( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_transition_tier_config( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } #[derive(Debug)] pub struct NodeServiceServer { @@ -2629,7 +3563,10 @@ pub mod node_service_server { max_encoding_message_size: None, } } - pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService where F: tonic::service::Interceptor, { @@ -2673,7 +3610,10 @@ pub mod node_service_server { type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; - fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { @@ -2681,12 +3621,21 @@ pub mod node_service_server { "/node_service.NodeService/Ping" => { #[allow(non_camel_case_types)] struct PingSvc(pub Arc); - impl tonic::server::UnaryService for PingSvc { + impl tonic::server::UnaryService + for PingSvc { type Response = super::PingResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::ping(&inner, request).await }; + let fut = async move { + ::ping(&inner, request).await + }; Box::pin(fut) } } @@ -2699,8 +3648,14 @@ pub mod node_service_server { let method = PingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2709,12 +3664,23 @@ pub mod node_service_server { "/node_service.NodeService/HealBucket" => { #[allow(non_camel_case_types)] struct HealBucketSvc(pub Arc); - impl tonic::server::UnaryService for HealBucketSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for HealBucketSvc { type Response = super::HealBucketResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::heal_bucket(&inner, request).await }; + let fut = async move { + ::heal_bucket(&inner, request).await + }; Box::pin(fut) } } @@ -2727,8 +3693,14 @@ pub mod node_service_server { let method = HealBucketSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2737,12 +3709,23 @@ pub mod node_service_server { "/node_service.NodeService/ListBucket" => { #[allow(non_camel_case_types)] struct ListBucketSvc(pub Arc); - impl tonic::server::UnaryService for ListBucketSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ListBucketSvc { type Response = super::ListBucketResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::list_bucket(&inner, request).await }; + let fut = async move { + ::list_bucket(&inner, request).await + }; Box::pin(fut) } } @@ -2755,8 +3738,14 @@ pub mod node_service_server { let method = ListBucketSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2765,12 +3754,23 @@ pub mod node_service_server { "/node_service.NodeService/MakeBucket" => { #[allow(non_camel_case_types)] struct MakeBucketSvc(pub Arc); - impl tonic::server::UnaryService for MakeBucketSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for MakeBucketSvc { type Response = super::MakeBucketResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::make_bucket(&inner, request).await }; + let fut = async move { + ::make_bucket(&inner, request).await + }; Box::pin(fut) } } @@ -2783,8 +3783,14 @@ pub mod node_service_server { let method = MakeBucketSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2793,12 +3799,23 @@ pub mod node_service_server { "/node_service.NodeService/GetBucketInfo" => { #[allow(non_camel_case_types)] struct GetBucketInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetBucketInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetBucketInfoSvc { type Response = super::GetBucketInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_bucket_info(&inner, request).await }; + let fut = async move { + ::get_bucket_info(&inner, request).await + }; Box::pin(fut) } } @@ -2811,8 +3828,14 @@ pub mod node_service_server { let method = GetBucketInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2821,12 +3844,23 @@ pub mod node_service_server { "/node_service.NodeService/DeleteBucket" => { #[allow(non_camel_case_types)] struct DeleteBucketSvc(pub Arc); - impl tonic::server::UnaryService for DeleteBucketSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteBucketSvc { type Response = super::DeleteBucketResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_bucket(&inner, request).await }; + let fut = async move { + ::delete_bucket(&inner, request).await + }; Box::pin(fut) } } @@ -2839,8 +3873,14 @@ pub mod node_service_server { let method = DeleteBucketSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2849,12 +3889,23 @@ pub mod node_service_server { "/node_service.NodeService/ReadAll" => { #[allow(non_camel_case_types)] struct ReadAllSvc(pub Arc); - impl tonic::server::UnaryService for ReadAllSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadAllSvc { type Response = super::ReadAllResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::read_all(&inner, request).await }; + let fut = async move { + ::read_all(&inner, request).await + }; Box::pin(fut) } } @@ -2867,8 +3918,14 @@ pub mod node_service_server { let method = ReadAllSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2877,12 +3934,23 @@ pub mod node_service_server { "/node_service.NodeService/WriteAll" => { #[allow(non_camel_case_types)] struct WriteAllSvc(pub Arc); - impl tonic::server::UnaryService for WriteAllSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for WriteAllSvc { type Response = super::WriteAllResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::write_all(&inner, request).await }; + let fut = async move { + ::write_all(&inner, request).await + }; Box::pin(fut) } } @@ -2895,8 +3963,14 @@ pub mod node_service_server { let method = WriteAllSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2905,12 +3979,23 @@ pub mod node_service_server { "/node_service.NodeService/Delete" => { #[allow(non_camel_case_types)] struct DeleteSvc(pub Arc); - impl tonic::server::UnaryService for DeleteSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteSvc { type Response = super::DeleteResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete(&inner, request).await }; + let fut = async move { + ::delete(&inner, request).await + }; Box::pin(fut) } } @@ -2923,8 +4008,14 @@ pub mod node_service_server { let method = DeleteSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2933,12 +4024,23 @@ pub mod node_service_server { "/node_service.NodeService/VerifyFile" => { #[allow(non_camel_case_types)] struct VerifyFileSvc(pub Arc); - impl tonic::server::UnaryService for VerifyFileSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for VerifyFileSvc { type Response = super::VerifyFileResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::verify_file(&inner, request).await }; + let fut = async move { + ::verify_file(&inner, request).await + }; Box::pin(fut) } } @@ -2951,8 +4053,14 @@ pub mod node_service_server { let method = VerifyFileSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2961,12 +4069,23 @@ pub mod node_service_server { "/node_service.NodeService/CheckParts" => { #[allow(non_camel_case_types)] struct CheckPartsSvc(pub Arc); - impl tonic::server::UnaryService for CheckPartsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for CheckPartsSvc { type Response = super::CheckPartsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::check_parts(&inner, request).await }; + let fut = async move { + ::check_parts(&inner, request).await + }; Box::pin(fut) } } @@ -2979,8 +4098,14 @@ pub mod node_service_server { let method = CheckPartsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2989,12 +4114,23 @@ pub mod node_service_server { "/node_service.NodeService/RenamePart" => { #[allow(non_camel_case_types)] struct RenamePartSvc(pub Arc); - impl tonic::server::UnaryService for RenamePartSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for RenamePartSvc { type Response = super::RenamePartResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::rename_part(&inner, request).await }; + let fut = async move { + ::rename_part(&inner, request).await + }; Box::pin(fut) } } @@ -3007,8 +4143,14 @@ pub mod node_service_server { let method = RenamePartSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3017,12 +4159,23 @@ pub mod node_service_server { "/node_service.NodeService/RenameFile" => { #[allow(non_camel_case_types)] struct RenameFileSvc(pub Arc); - impl tonic::server::UnaryService for RenameFileSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for RenameFileSvc { type Response = super::RenameFileResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::rename_file(&inner, request).await }; + let fut = async move { + ::rename_file(&inner, request).await + }; Box::pin(fut) } } @@ -3035,8 +4188,14 @@ pub mod node_service_server { let method = RenameFileSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3045,12 +4204,21 @@ pub mod node_service_server { "/node_service.NodeService/Write" => { #[allow(non_camel_case_types)] struct WriteSvc(pub Arc); - impl tonic::server::UnaryService for WriteSvc { + impl tonic::server::UnaryService + for WriteSvc { type Response = super::WriteResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::write(&inner, request).await }; + let fut = async move { + ::write(&inner, request).await + }; Box::pin(fut) } } @@ -3063,8 +4231,14 @@ pub mod node_service_server { let method = WriteSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3073,13 +4247,26 @@ pub mod node_service_server { "/node_service.NodeService/WriteStream" => { #[allow(non_camel_case_types)] struct WriteStreamSvc(pub Arc); - impl tonic::server::StreamingService for WriteStreamSvc { + impl< + T: NodeService, + > tonic::server::StreamingService + for WriteStreamSvc { type Response = super::WriteResponse; type ResponseStream = T::WriteStreamStream; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request>) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::write_stream(&inner, request).await }; + let fut = async move { + ::write_stream(&inner, request).await + }; Box::pin(fut) } } @@ -3092,8 +4279,14 @@ pub mod node_service_server { let method = WriteStreamSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.streaming(method, req).await; Ok(res) }; @@ -3102,13 +4295,26 @@ pub mod node_service_server { "/node_service.NodeService/ReadAt" => { #[allow(non_camel_case_types)] struct ReadAtSvc(pub Arc); - impl tonic::server::StreamingService for ReadAtSvc { + impl< + T: NodeService, + > tonic::server::StreamingService + for ReadAtSvc { type Response = super::ReadAtResponse; type ResponseStream = T::ReadAtStream; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request>) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::read_at(&inner, request).await }; + let fut = async move { + ::read_at(&inner, request).await + }; Box::pin(fut) } } @@ -3121,8 +4327,14 @@ pub mod node_service_server { let method = ReadAtSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.streaming(method, req).await; Ok(res) }; @@ -3131,12 +4343,23 @@ pub mod node_service_server { "/node_service.NodeService/ListDir" => { #[allow(non_camel_case_types)] struct ListDirSvc(pub Arc); - impl tonic::server::UnaryService for ListDirSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ListDirSvc { type Response = super::ListDirResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::list_dir(&inner, request).await }; + let fut = async move { + ::list_dir(&inner, request).await + }; Box::pin(fut) } } @@ -3149,8 +4372,14 @@ pub mod node_service_server { let method = ListDirSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3159,13 +4388,24 @@ pub mod node_service_server { "/node_service.NodeService/WalkDir" => { #[allow(non_camel_case_types)] struct WalkDirSvc(pub Arc); - impl tonic::server::ServerStreamingService for WalkDirSvc { + impl< + T: NodeService, + > tonic::server::ServerStreamingService + for WalkDirSvc { type Response = super::WalkDirResponse; type ResponseStream = T::WalkDirStream; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::walk_dir(&inner, request).await }; + let fut = async move { + ::walk_dir(&inner, request).await + }; Box::pin(fut) } } @@ -3178,8 +4418,14 @@ pub mod node_service_server { let method = WalkDirSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.server_streaming(method, req).await; Ok(res) }; @@ -3188,12 +4434,23 @@ pub mod node_service_server { "/node_service.NodeService/RenameData" => { #[allow(non_camel_case_types)] struct RenameDataSvc(pub Arc); - impl tonic::server::UnaryService for RenameDataSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for RenameDataSvc { type Response = super::RenameDataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::rename_data(&inner, request).await }; + let fut = async move { + ::rename_data(&inner, request).await + }; Box::pin(fut) } } @@ -3206,8 +4463,14 @@ pub mod node_service_server { let method = RenameDataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3216,12 +4479,23 @@ pub mod node_service_server { "/node_service.NodeService/MakeVolumes" => { #[allow(non_camel_case_types)] struct MakeVolumesSvc(pub Arc); - impl tonic::server::UnaryService for MakeVolumesSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for MakeVolumesSvc { type Response = super::MakeVolumesResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::make_volumes(&inner, request).await }; + let fut = async move { + ::make_volumes(&inner, request).await + }; Box::pin(fut) } } @@ -3234,8 +4508,14 @@ pub mod node_service_server { let method = MakeVolumesSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3244,12 +4524,23 @@ pub mod node_service_server { "/node_service.NodeService/MakeVolume" => { #[allow(non_camel_case_types)] struct MakeVolumeSvc(pub Arc); - impl tonic::server::UnaryService for MakeVolumeSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for MakeVolumeSvc { type Response = super::MakeVolumeResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::make_volume(&inner, request).await }; + let fut = async move { + ::make_volume(&inner, request).await + }; Box::pin(fut) } } @@ -3262,8 +4553,14 @@ pub mod node_service_server { let method = MakeVolumeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3272,12 +4569,23 @@ pub mod node_service_server { "/node_service.NodeService/ListVolumes" => { #[allow(non_camel_case_types)] struct ListVolumesSvc(pub Arc); - impl tonic::server::UnaryService for ListVolumesSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ListVolumesSvc { type Response = super::ListVolumesResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::list_volumes(&inner, request).await }; + let fut = async move { + ::list_volumes(&inner, request).await + }; Box::pin(fut) } } @@ -3290,8 +4598,14 @@ pub mod node_service_server { let method = ListVolumesSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3300,12 +4614,23 @@ pub mod node_service_server { "/node_service.NodeService/StatVolume" => { #[allow(non_camel_case_types)] struct StatVolumeSvc(pub Arc); - impl tonic::server::UnaryService for StatVolumeSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for StatVolumeSvc { type Response = super::StatVolumeResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::stat_volume(&inner, request).await }; + let fut = async move { + ::stat_volume(&inner, request).await + }; Box::pin(fut) } } @@ -3318,8 +4643,14 @@ pub mod node_service_server { let method = StatVolumeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3328,12 +4659,23 @@ pub mod node_service_server { "/node_service.NodeService/DeletePaths" => { #[allow(non_camel_case_types)] struct DeletePathsSvc(pub Arc); - impl tonic::server::UnaryService for DeletePathsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeletePathsSvc { type Response = super::DeletePathsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_paths(&inner, request).await }; + let fut = async move { + ::delete_paths(&inner, request).await + }; Box::pin(fut) } } @@ -3346,8 +4688,14 @@ pub mod node_service_server { let method = DeletePathsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3356,12 +4704,23 @@ pub mod node_service_server { "/node_service.NodeService/UpdateMetadata" => { #[allow(non_camel_case_types)] struct UpdateMetadataSvc(pub Arc); - impl tonic::server::UnaryService for UpdateMetadataSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for UpdateMetadataSvc { type Response = super::UpdateMetadataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::update_metadata(&inner, request).await }; + let fut = async move { + ::update_metadata(&inner, request).await + }; Box::pin(fut) } } @@ -3374,8 +4733,14 @@ pub mod node_service_server { let method = UpdateMetadataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3384,12 +4749,23 @@ pub mod node_service_server { "/node_service.NodeService/WriteMetadata" => { #[allow(non_camel_case_types)] struct WriteMetadataSvc(pub Arc); - impl tonic::server::UnaryService for WriteMetadataSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for WriteMetadataSvc { type Response = super::WriteMetadataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::write_metadata(&inner, request).await }; + let fut = async move { + ::write_metadata(&inner, request).await + }; Box::pin(fut) } } @@ -3402,8 +4778,14 @@ pub mod node_service_server { let method = WriteMetadataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3412,12 +4794,23 @@ pub mod node_service_server { "/node_service.NodeService/ReadVersion" => { #[allow(non_camel_case_types)] struct ReadVersionSvc(pub Arc); - impl tonic::server::UnaryService for ReadVersionSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadVersionSvc { type Response = super::ReadVersionResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::read_version(&inner, request).await }; + let fut = async move { + ::read_version(&inner, request).await + }; Box::pin(fut) } } @@ -3430,8 +4823,14 @@ pub mod node_service_server { let method = ReadVersionSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3440,12 +4839,23 @@ pub mod node_service_server { "/node_service.NodeService/ReadXL" => { #[allow(non_camel_case_types)] struct ReadXLSvc(pub Arc); - impl tonic::server::UnaryService for ReadXLSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadXLSvc { type Response = super::ReadXlResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::read_xl(&inner, request).await }; + let fut = async move { + ::read_xl(&inner, request).await + }; Box::pin(fut) } } @@ -3458,8 +4868,14 @@ pub mod node_service_server { let method = ReadXLSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3468,12 +4884,23 @@ pub mod node_service_server { "/node_service.NodeService/DeleteVersion" => { #[allow(non_camel_case_types)] struct DeleteVersionSvc(pub Arc); - impl tonic::server::UnaryService for DeleteVersionSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteVersionSvc { type Response = super::DeleteVersionResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_version(&inner, request).await }; + let fut = async move { + ::delete_version(&inner, request).await + }; Box::pin(fut) } } @@ -3486,8 +4913,14 @@ pub mod node_service_server { let method = DeleteVersionSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3496,12 +4929,23 @@ pub mod node_service_server { "/node_service.NodeService/DeleteVersions" => { #[allow(non_camel_case_types)] struct DeleteVersionsSvc(pub Arc); - impl tonic::server::UnaryService for DeleteVersionsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteVersionsSvc { type Response = super::DeleteVersionsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_versions(&inner, request).await }; + let fut = async move { + ::delete_versions(&inner, request).await + }; Box::pin(fut) } } @@ -3514,8 +4958,14 @@ pub mod node_service_server { let method = DeleteVersionsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3524,12 +4974,23 @@ pub mod node_service_server { "/node_service.NodeService/ReadMultiple" => { #[allow(non_camel_case_types)] struct ReadMultipleSvc(pub Arc); - impl tonic::server::UnaryService for ReadMultipleSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadMultipleSvc { type Response = super::ReadMultipleResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::read_multiple(&inner, request).await }; + let fut = async move { + ::read_multiple(&inner, request).await + }; Box::pin(fut) } } @@ -3542,8 +5003,14 @@ pub mod node_service_server { let method = ReadMultipleSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3552,12 +5019,23 @@ pub mod node_service_server { "/node_service.NodeService/DeleteVolume" => { #[allow(non_camel_case_types)] struct DeleteVolumeSvc(pub Arc); - impl tonic::server::UnaryService for DeleteVolumeSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteVolumeSvc { type Response = super::DeleteVolumeResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_volume(&inner, request).await }; + let fut = async move { + ::delete_volume(&inner, request).await + }; Box::pin(fut) } } @@ -3570,8 +5048,14 @@ pub mod node_service_server { let method = DeleteVolumeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3580,12 +5064,23 @@ pub mod node_service_server { "/node_service.NodeService/DiskInfo" => { #[allow(non_camel_case_types)] struct DiskInfoSvc(pub Arc); - impl tonic::server::UnaryService for DiskInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DiskInfoSvc { type Response = super::DiskInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::disk_info(&inner, request).await }; + let fut = async move { + ::disk_info(&inner, request).await + }; Box::pin(fut) } } @@ -3598,8 +5093,14 @@ pub mod node_service_server { let method = DiskInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3608,13 +5109,26 @@ pub mod node_service_server { "/node_service.NodeService/NsScanner" => { #[allow(non_camel_case_types)] struct NsScannerSvc(pub Arc); - impl tonic::server::StreamingService for NsScannerSvc { + impl< + T: NodeService, + > tonic::server::StreamingService + for NsScannerSvc { type Response = super::NsScannerResponse; type ResponseStream = T::NsScannerStream; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request>) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::ns_scanner(&inner, request).await }; + let fut = async move { + ::ns_scanner(&inner, request).await + }; Box::pin(fut) } } @@ -3627,8 +5141,14 @@ pub mod node_service_server { let method = NsScannerSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.streaming(method, req).await; Ok(res) }; @@ -3637,12 +5157,23 @@ pub mod node_service_server { "/node_service.NodeService/Lock" => { #[allow(non_camel_case_types)] struct LockSvc(pub Arc); - impl tonic::server::UnaryService for LockSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LockSvc { type Response = super::GenerallyLockResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::lock(&inner, request).await }; + let fut = async move { + ::lock(&inner, request).await + }; Box::pin(fut) } } @@ -3655,8 +5186,14 @@ pub mod node_service_server { let method = LockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3665,12 +5202,23 @@ pub mod node_service_server { "/node_service.NodeService/UnLock" => { #[allow(non_camel_case_types)] struct UnLockSvc(pub Arc); - impl tonic::server::UnaryService for UnLockSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for UnLockSvc { type Response = super::GenerallyLockResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::un_lock(&inner, request).await }; + let fut = async move { + ::un_lock(&inner, request).await + }; Box::pin(fut) } } @@ -3683,8 +5231,14 @@ pub mod node_service_server { let method = UnLockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3693,12 +5247,23 @@ pub mod node_service_server { "/node_service.NodeService/RLock" => { #[allow(non_camel_case_types)] struct RLockSvc(pub Arc); - impl tonic::server::UnaryService for RLockSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for RLockSvc { type Response = super::GenerallyLockResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::r_lock(&inner, request).await }; + let fut = async move { + ::r_lock(&inner, request).await + }; Box::pin(fut) } } @@ -3711,8 +5276,14 @@ pub mod node_service_server { let method = RLockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3721,12 +5292,23 @@ pub mod node_service_server { "/node_service.NodeService/RUnLock" => { #[allow(non_camel_case_types)] struct RUnLockSvc(pub Arc); - impl tonic::server::UnaryService for RUnLockSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for RUnLockSvc { type Response = super::GenerallyLockResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::r_un_lock(&inner, request).await }; + let fut = async move { + ::r_un_lock(&inner, request).await + }; Box::pin(fut) } } @@ -3739,8 +5321,14 @@ pub mod node_service_server { let method = RUnLockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3749,12 +5337,23 @@ pub mod node_service_server { "/node_service.NodeService/ForceUnLock" => { #[allow(non_camel_case_types)] struct ForceUnLockSvc(pub Arc); - impl tonic::server::UnaryService for ForceUnLockSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ForceUnLockSvc { type Response = super::GenerallyLockResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::force_un_lock(&inner, request).await }; + let fut = async move { + ::force_un_lock(&inner, request).await + }; Box::pin(fut) } } @@ -3767,8 +5366,14 @@ pub mod node_service_server { let method = ForceUnLockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3777,12 +5382,23 @@ pub mod node_service_server { "/node_service.NodeService/Refresh" => { #[allow(non_camel_case_types)] struct RefreshSvc(pub Arc); - impl tonic::server::UnaryService for RefreshSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for RefreshSvc { type Response = super::GenerallyLockResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::refresh(&inner, request).await }; + let fut = async move { + ::refresh(&inner, request).await + }; Box::pin(fut) } } @@ -3795,8 +5411,14 @@ pub mod node_service_server { let method = RefreshSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3805,12 +5427,24 @@ pub mod node_service_server { "/node_service.NodeService/LocalStorageInfo" => { #[allow(non_camel_case_types)] struct LocalStorageInfoSvc(pub Arc); - impl tonic::server::UnaryService for LocalStorageInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LocalStorageInfoSvc { type Response = super::LocalStorageInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::local_storage_info(&inner, request).await }; + let fut = async move { + ::local_storage_info(&inner, request) + .await + }; Box::pin(fut) } } @@ -3823,8 +5457,14 @@ pub mod node_service_server { let method = LocalStorageInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3833,12 +5473,23 @@ pub mod node_service_server { "/node_service.NodeService/ServerInfo" => { #[allow(non_camel_case_types)] struct ServerInfoSvc(pub Arc); - impl tonic::server::UnaryService for ServerInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ServerInfoSvc { type Response = super::ServerInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::server_info(&inner, request).await }; + let fut = async move { + ::server_info(&inner, request).await + }; Box::pin(fut) } } @@ -3851,8 +5502,14 @@ pub mod node_service_server { let method = ServerInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3861,12 +5518,23 @@ pub mod node_service_server { "/node_service.NodeService/GetCpus" => { #[allow(non_camel_case_types)] struct GetCpusSvc(pub Arc); - impl tonic::server::UnaryService for GetCpusSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetCpusSvc { type Response = super::GetCpusResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_cpus(&inner, request).await }; + let fut = async move { + ::get_cpus(&inner, request).await + }; Box::pin(fut) } } @@ -3879,8 +5547,14 @@ pub mod node_service_server { let method = GetCpusSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3889,12 +5563,23 @@ pub mod node_service_server { "/node_service.NodeService/GetNetInfo" => { #[allow(non_camel_case_types)] struct GetNetInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetNetInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetNetInfoSvc { type Response = super::GetNetInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_net_info(&inner, request).await }; + let fut = async move { + ::get_net_info(&inner, request).await + }; Box::pin(fut) } } @@ -3907,8 +5592,14 @@ pub mod node_service_server { let method = GetNetInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3917,12 +5608,23 @@ pub mod node_service_server { "/node_service.NodeService/GetPartitions" => { #[allow(non_camel_case_types)] struct GetPartitionsSvc(pub Arc); - impl tonic::server::UnaryService for GetPartitionsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetPartitionsSvc { type Response = super::GetPartitionsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_partitions(&inner, request).await }; + let fut = async move { + ::get_partitions(&inner, request).await + }; Box::pin(fut) } } @@ -3935,8 +5637,14 @@ pub mod node_service_server { let method = GetPartitionsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3945,12 +5653,23 @@ pub mod node_service_server { "/node_service.NodeService/GetOsInfo" => { #[allow(non_camel_case_types)] struct GetOsInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetOsInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetOsInfoSvc { type Response = super::GetOsInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_os_info(&inner, request).await }; + let fut = async move { + ::get_os_info(&inner, request).await + }; Box::pin(fut) } } @@ -3963,8 +5682,14 @@ pub mod node_service_server { let method = GetOsInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3973,12 +5698,23 @@ pub mod node_service_server { "/node_service.NodeService/GetSELinuxInfo" => { #[allow(non_camel_case_types)] struct GetSELinuxInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetSELinuxInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetSELinuxInfoSvc { type Response = super::GetSeLinuxInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_se_linux_info(&inner, request).await }; + let fut = async move { + ::get_se_linux_info(&inner, request).await + }; Box::pin(fut) } } @@ -3991,8 +5727,14 @@ pub mod node_service_server { let method = GetSELinuxInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4001,12 +5743,23 @@ pub mod node_service_server { "/node_service.NodeService/GetSysConfig" => { #[allow(non_camel_case_types)] struct GetSysConfigSvc(pub Arc); - impl tonic::server::UnaryService for GetSysConfigSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetSysConfigSvc { type Response = super::GetSysConfigResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_sys_config(&inner, request).await }; + let fut = async move { + ::get_sys_config(&inner, request).await + }; Box::pin(fut) } } @@ -4019,8 +5772,14 @@ pub mod node_service_server { let method = GetSysConfigSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4029,12 +5788,23 @@ pub mod node_service_server { "/node_service.NodeService/GetSysErrors" => { #[allow(non_camel_case_types)] struct GetSysErrorsSvc(pub Arc); - impl tonic::server::UnaryService for GetSysErrorsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetSysErrorsSvc { type Response = super::GetSysErrorsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_sys_errors(&inner, request).await }; + let fut = async move { + ::get_sys_errors(&inner, request).await + }; Box::pin(fut) } } @@ -4047,8 +5817,14 @@ pub mod node_service_server { let method = GetSysErrorsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4057,12 +5833,23 @@ pub mod node_service_server { "/node_service.NodeService/GetMemInfo" => { #[allow(non_camel_case_types)] struct GetMemInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetMemInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetMemInfoSvc { type Response = super::GetMemInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_mem_info(&inner, request).await }; + let fut = async move { + ::get_mem_info(&inner, request).await + }; Box::pin(fut) } } @@ -4075,8 +5862,14 @@ pub mod node_service_server { let method = GetMemInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4085,12 +5878,23 @@ pub mod node_service_server { "/node_service.NodeService/GetMetrics" => { #[allow(non_camel_case_types)] struct GetMetricsSvc(pub Arc); - impl tonic::server::UnaryService for GetMetricsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetMetricsSvc { type Response = super::GetMetricsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_metrics(&inner, request).await }; + let fut = async move { + ::get_metrics(&inner, request).await + }; Box::pin(fut) } } @@ -4103,8 +5907,14 @@ pub mod node_service_server { let method = GetMetricsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4113,12 +5923,23 @@ pub mod node_service_server { "/node_service.NodeService/GetProcInfo" => { #[allow(non_camel_case_types)] struct GetProcInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetProcInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetProcInfoSvc { type Response = super::GetProcInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_proc_info(&inner, request).await }; + let fut = async move { + ::get_proc_info(&inner, request).await + }; Box::pin(fut) } } @@ -4131,8 +5952,14 @@ pub mod node_service_server { let method = GetProcInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4141,12 +5968,23 @@ pub mod node_service_server { "/node_service.NodeService/StartProfiling" => { #[allow(non_camel_case_types)] struct StartProfilingSvc(pub Arc); - impl tonic::server::UnaryService for StartProfilingSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for StartProfilingSvc { type Response = super::StartProfilingResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::start_profiling(&inner, request).await }; + let fut = async move { + ::start_profiling(&inner, request).await + }; Box::pin(fut) } } @@ -4159,8 +5997,14 @@ pub mod node_service_server { let method = StartProfilingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4169,12 +6013,24 @@ pub mod node_service_server { "/node_service.NodeService/DownloadProfileData" => { #[allow(non_camel_case_types)] struct DownloadProfileDataSvc(pub Arc); - impl tonic::server::UnaryService for DownloadProfileDataSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DownloadProfileDataSvc { type Response = super::DownloadProfileDataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::download_profile_data(&inner, request).await }; + let fut = async move { + ::download_profile_data(&inner, request) + .await + }; Box::pin(fut) } } @@ -4187,8 +6043,14 @@ pub mod node_service_server { let method = DownloadProfileDataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4197,12 +6059,23 @@ pub mod node_service_server { "/node_service.NodeService/GetBucketStats" => { #[allow(non_camel_case_types)] struct GetBucketStatsSvc(pub Arc); - impl tonic::server::UnaryService for GetBucketStatsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetBucketStatsSvc { type Response = super::GetBucketStatsDataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_bucket_stats(&inner, request).await }; + let fut = async move { + ::get_bucket_stats(&inner, request).await + }; Box::pin(fut) } } @@ -4215,8 +6088,14 @@ pub mod node_service_server { let method = GetBucketStatsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4225,12 +6104,23 @@ pub mod node_service_server { "/node_service.NodeService/GetSRMetrics" => { #[allow(non_camel_case_types)] struct GetSRMetricsSvc(pub Arc); - impl tonic::server::UnaryService for GetSRMetricsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetSRMetricsSvc { type Response = super::GetSrMetricsDataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_sr_metrics(&inner, request).await }; + let fut = async move { + ::get_sr_metrics(&inner, request).await + }; Box::pin(fut) } } @@ -4243,8 +6133,14 @@ pub mod node_service_server { let method = GetSRMetricsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4253,12 +6149,24 @@ pub mod node_service_server { "/node_service.NodeService/GetAllBucketStats" => { #[allow(non_camel_case_types)] struct GetAllBucketStatsSvc(pub Arc); - impl tonic::server::UnaryService for GetAllBucketStatsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetAllBucketStatsSvc { type Response = super::GetAllBucketStatsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_all_bucket_stats(&inner, request).await }; + let fut = async move { + ::get_all_bucket_stats(&inner, request) + .await + }; Box::pin(fut) } } @@ -4271,8 +6179,14 @@ pub mod node_service_server { let method = GetAllBucketStatsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4281,12 +6195,24 @@ pub mod node_service_server { "/node_service.NodeService/LoadBucketMetadata" => { #[allow(non_camel_case_types)] struct LoadBucketMetadataSvc(pub Arc); - impl tonic::server::UnaryService for LoadBucketMetadataSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadBucketMetadataSvc { type Response = super::LoadBucketMetadataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_bucket_metadata(&inner, request).await }; + let fut = async move { + ::load_bucket_metadata(&inner, request) + .await + }; Box::pin(fut) } } @@ -4299,8 +6225,14 @@ pub mod node_service_server { let method = LoadBucketMetadataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4309,12 +6241,24 @@ pub mod node_service_server { "/node_service.NodeService/DeleteBucketMetadata" => { #[allow(non_camel_case_types)] struct DeleteBucketMetadataSvc(pub Arc); - impl tonic::server::UnaryService for DeleteBucketMetadataSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteBucketMetadataSvc { type Response = super::DeleteBucketMetadataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_bucket_metadata(&inner, request).await }; + let fut = async move { + ::delete_bucket_metadata(&inner, request) + .await + }; Box::pin(fut) } } @@ -4327,8 +6271,14 @@ pub mod node_service_server { let method = DeleteBucketMetadataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4337,12 +6287,23 @@ pub mod node_service_server { "/node_service.NodeService/DeletePolicy" => { #[allow(non_camel_case_types)] struct DeletePolicySvc(pub Arc); - impl tonic::server::UnaryService for DeletePolicySvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeletePolicySvc { type Response = super::DeletePolicyResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_policy(&inner, request).await }; + let fut = async move { + ::delete_policy(&inner, request).await + }; Box::pin(fut) } } @@ -4355,8 +6316,14 @@ pub mod node_service_server { let method = DeletePolicySvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4365,12 +6332,23 @@ pub mod node_service_server { "/node_service.NodeService/LoadPolicy" => { #[allow(non_camel_case_types)] struct LoadPolicySvc(pub Arc); - impl tonic::server::UnaryService for LoadPolicySvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadPolicySvc { type Response = super::LoadPolicyResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_policy(&inner, request).await }; + let fut = async move { + ::load_policy(&inner, request).await + }; Box::pin(fut) } } @@ -4383,8 +6361,14 @@ pub mod node_service_server { let method = LoadPolicySvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4393,12 +6377,24 @@ pub mod node_service_server { "/node_service.NodeService/LoadPolicyMapping" => { #[allow(non_camel_case_types)] struct LoadPolicyMappingSvc(pub Arc); - impl tonic::server::UnaryService for LoadPolicyMappingSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadPolicyMappingSvc { type Response = super::LoadPolicyMappingResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_policy_mapping(&inner, request).await }; + let fut = async move { + ::load_policy_mapping(&inner, request) + .await + }; Box::pin(fut) } } @@ -4411,8 +6407,14 @@ pub mod node_service_server { let method = LoadPolicyMappingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4421,12 +6423,23 @@ pub mod node_service_server { "/node_service.NodeService/DeleteUser" => { #[allow(non_camel_case_types)] struct DeleteUserSvc(pub Arc); - impl tonic::server::UnaryService for DeleteUserSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteUserSvc { type Response = super::DeleteUserResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_user(&inner, request).await }; + let fut = async move { + ::delete_user(&inner, request).await + }; Box::pin(fut) } } @@ -4439,8 +6452,14 @@ pub mod node_service_server { let method = DeleteUserSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4449,12 +6468,24 @@ pub mod node_service_server { "/node_service.NodeService/DeleteServiceAccount" => { #[allow(non_camel_case_types)] struct DeleteServiceAccountSvc(pub Arc); - impl tonic::server::UnaryService for DeleteServiceAccountSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteServiceAccountSvc { type Response = super::DeleteServiceAccountResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_service_account(&inner, request).await }; + let fut = async move { + ::delete_service_account(&inner, request) + .await + }; Box::pin(fut) } } @@ -4467,8 +6498,14 @@ pub mod node_service_server { let method = DeleteServiceAccountSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4477,12 +6514,23 @@ pub mod node_service_server { "/node_service.NodeService/LoadUser" => { #[allow(non_camel_case_types)] struct LoadUserSvc(pub Arc); - impl tonic::server::UnaryService for LoadUserSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadUserSvc { type Response = super::LoadUserResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_user(&inner, request).await }; + let fut = async move { + ::load_user(&inner, request).await + }; Box::pin(fut) } } @@ -4495,8 +6543,14 @@ pub mod node_service_server { let method = LoadUserSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4505,12 +6559,24 @@ pub mod node_service_server { "/node_service.NodeService/LoadServiceAccount" => { #[allow(non_camel_case_types)] struct LoadServiceAccountSvc(pub Arc); - impl tonic::server::UnaryService for LoadServiceAccountSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadServiceAccountSvc { type Response = super::LoadServiceAccountResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_service_account(&inner, request).await }; + let fut = async move { + ::load_service_account(&inner, request) + .await + }; Box::pin(fut) } } @@ -4523,8 +6589,14 @@ pub mod node_service_server { let method = LoadServiceAccountSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4533,12 +6605,23 @@ pub mod node_service_server { "/node_service.NodeService/LoadGroup" => { #[allow(non_camel_case_types)] struct LoadGroupSvc(pub Arc); - impl tonic::server::UnaryService for LoadGroupSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadGroupSvc { type Response = super::LoadGroupResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_group(&inner, request).await }; + let fut = async move { + ::load_group(&inner, request).await + }; Box::pin(fut) } } @@ -4551,8 +6634,14 @@ pub mod node_service_server { let method = LoadGroupSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4561,14 +6650,30 @@ pub mod node_service_server { "/node_service.NodeService/ReloadSiteReplicationConfig" => { #[allow(non_camel_case_types)] struct ReloadSiteReplicationConfigSvc(pub Arc); - impl tonic::server::UnaryService - for ReloadSiteReplicationConfigSvc - { + impl< + T: NodeService, + > tonic::server::UnaryService< + super::ReloadSiteReplicationConfigRequest, + > for ReloadSiteReplicationConfigSvc { type Response = super::ReloadSiteReplicationConfigResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::ReloadSiteReplicationConfigRequest, + >, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::reload_site_replication_config(&inner, request).await }; + let fut = async move { + ::reload_site_replication_config( + &inner, + request, + ) + .await + }; Box::pin(fut) } } @@ -4581,8 +6686,14 @@ pub mod node_service_server { let method = ReloadSiteReplicationConfigSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4591,12 +6702,23 @@ pub mod node_service_server { "/node_service.NodeService/SignalService" => { #[allow(non_camel_case_types)] struct SignalServiceSvc(pub Arc); - impl tonic::server::UnaryService for SignalServiceSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for SignalServiceSvc { type Response = super::SignalServiceResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::signal_service(&inner, request).await }; + let fut = async move { + ::signal_service(&inner, request).await + }; Box::pin(fut) } } @@ -4609,8 +6731,14 @@ pub mod node_service_server { let method = SignalServiceSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4619,12 +6747,24 @@ pub mod node_service_server { "/node_service.NodeService/BackgroundHealStatus" => { #[allow(non_camel_case_types)] struct BackgroundHealStatusSvc(pub Arc); - impl tonic::server::UnaryService for BackgroundHealStatusSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for BackgroundHealStatusSvc { type Response = super::BackgroundHealStatusResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::background_heal_status(&inner, request).await }; + let fut = async move { + ::background_heal_status(&inner, request) + .await + }; Box::pin(fut) } } @@ -4637,8 +6777,14 @@ pub mod node_service_server { let method = BackgroundHealStatusSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4647,12 +6793,24 @@ pub mod node_service_server { "/node_service.NodeService/GetMetacacheListing" => { #[allow(non_camel_case_types)] struct GetMetacacheListingSvc(pub Arc); - impl tonic::server::UnaryService for GetMetacacheListingSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetMetacacheListingSvc { type Response = super::GetMetacacheListingResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_metacache_listing(&inner, request).await }; + let fut = async move { + ::get_metacache_listing(&inner, request) + .await + }; Box::pin(fut) } } @@ -4665,8 +6823,14 @@ pub mod node_service_server { let method = GetMetacacheListingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4675,12 +6839,27 @@ pub mod node_service_server { "/node_service.NodeService/UpdateMetacacheListing" => { #[allow(non_camel_case_types)] struct UpdateMetacacheListingSvc(pub Arc); - impl tonic::server::UnaryService for UpdateMetacacheListingSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for UpdateMetacacheListingSvc { type Response = super::UpdateMetacacheListingResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::update_metacache_listing(&inner, request).await }; + let fut = async move { + ::update_metacache_listing( + &inner, + request, + ) + .await + }; Box::pin(fut) } } @@ -4693,8 +6872,14 @@ pub mod node_service_server { let method = UpdateMetacacheListingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4703,12 +6888,23 @@ pub mod node_service_server { "/node_service.NodeService/ReloadPoolMeta" => { #[allow(non_camel_case_types)] struct ReloadPoolMetaSvc(pub Arc); - impl tonic::server::UnaryService for ReloadPoolMetaSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ReloadPoolMetaSvc { type Response = super::ReloadPoolMetaResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::reload_pool_meta(&inner, request).await }; + let fut = async move { + ::reload_pool_meta(&inner, request).await + }; Box::pin(fut) } } @@ -4721,8 +6917,14 @@ pub mod node_service_server { let method = ReloadPoolMetaSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4731,12 +6933,23 @@ pub mod node_service_server { "/node_service.NodeService/StopRebalance" => { #[allow(non_camel_case_types)] struct StopRebalanceSvc(pub Arc); - impl tonic::server::UnaryService for StopRebalanceSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for StopRebalanceSvc { type Response = super::StopRebalanceResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::stop_rebalance(&inner, request).await }; + let fut = async move { + ::stop_rebalance(&inner, request).await + }; Box::pin(fut) } } @@ -4749,8 +6962,14 @@ pub mod node_service_server { let method = StopRebalanceSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4759,12 +6978,24 @@ pub mod node_service_server { "/node_service.NodeService/LoadRebalanceMeta" => { #[allow(non_camel_case_types)] struct LoadRebalanceMetaSvc(pub Arc); - impl tonic::server::UnaryService for LoadRebalanceMetaSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadRebalanceMetaSvc { type Response = super::LoadRebalanceMetaResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_rebalance_meta(&inner, request).await }; + let fut = async move { + ::load_rebalance_meta(&inner, request) + .await + }; Box::pin(fut) } } @@ -4777,8 +7008,14 @@ pub mod node_service_server { let method = LoadRebalanceMetaSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4787,12 +7024,29 @@ pub mod node_service_server { "/node_service.NodeService/LoadTransitionTierConfig" => { #[allow(non_camel_case_types)] struct LoadTransitionTierConfigSvc(pub Arc); - impl tonic::server::UnaryService for LoadTransitionTierConfigSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadTransitionTierConfigSvc { type Response = super::LoadTransitionTierConfigResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::LoadTransitionTierConfigRequest, + >, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_transition_tier_config(&inner, request).await }; + let fut = async move { + ::load_transition_tier_config( + &inner, + request, + ) + .await + }; Box::pin(fut) } } @@ -4805,20 +7059,36 @@ pub mod node_service_server { let method = LoadTransitionTierConfigSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } - _ => Box::pin(async move { - let mut response = http::Response::new(empty_body()); - let headers = response.headers_mut(); - headers.insert(tonic::Status::GRPC_STATUS, (tonic::Code::Unimplemented as i32).into()); - headers.insert(http::header::CONTENT_TYPE, tonic::metadata::GRPC_CONTENT_TYPE); - Ok(response) - }), + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } } } } diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 00000000..5c68534b --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,74 @@ +version: '3.8' + +services: + node1: + image: rustfs:v1 # 替换为你的镜像名称和标签 + container_name: node1 + environment: + - RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4} + - RUSTFS_ADDRESS=0.0.0.0:9000 + - RUSTFS_CONSOLE_ENABLE=true + - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 + platform: linux/amd64 + ports: + - "9001:9000" # 映射宿主机的 9001 端口到容器的 9000 端口 + volumes: + - ..:/root/data # 将当前路径挂载到容器内的 /root/data + command: "/root/rustfs" + networks: + - my_network + + node2: + image: rustfs:v1 + container_name: node2 + environment: + - RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4} + - RUSTFS_ADDRESS=0.0.0.0:9000 + - RUSTFS_CONSOLE_ENABLE=true + - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 + platform: linux/amd64 + ports: + - "9002:9000" # 映射宿主机的 9002 端口到容器的 9000 端口 + volumes: + - ..:/root/data + command: "mkdir -p /root/data/target/volume/test{1..4} && /root/data/target/ubuntu22.04/release/rustfs" + networks: + - my_network + + node3: + image: rustfs:v1 + container_name: node3 + environment: + - RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4} + - RUSTFS_ADDRESS=0.0.0.0:9000 + - RUSTFS_CONSOLE_ENABLE=true + - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 + platform: linux/amd64 + ports: + - "9003:9000" # 映射宿主机的 9003 端口到容器的 9000 端口 + volumes: + - ..:/root/data + command: "mkdir -p /root/data/target/volume/test{1..4} && /root/data/target/ubuntu22.04/release/rustfs" + networks: + - my_network + + node4: + image: rustfs:v1 + container_name: node4 + environment: + - RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4} + - RUSTFS_ADDRESS=0.0.0.0:9000 + - RUSTFS_CONSOLE_ENABLE=true + - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 + platform: linux/amd64 + ports: + - "9004:9000" # 映射宿主机的 9004 端口到容器的 9000 端口 + volumes: + - ..:/root/data + command: "mkdir -p /root/data/target/volume/test{1..4} && /root/data/target/ubuntu22.04/release/rustfs" + networks: + - my_network + +networks: + my_network: + driver: bridge \ No newline at end of file diff --git a/ecstore/Cargo.toml b/ecstore/Cargo.toml index 7ddc751d..ab3bd5aa 100644 --- a/ecstore/Cargo.toml +++ b/ecstore/Cargo.toml @@ -67,6 +67,7 @@ md-5.workspace = true madmin.workspace = true workers.workspace = true reqwest = { workspace = true } +urlencoding = "2.1.3" [target.'cfg(not(windows))'.dependencies] diff --git a/ecstore/src/bitrot.rs b/ecstore/src/bitrot.rs index 86a92ada..21ceb5b6 100644 --- a/ecstore/src/bitrot.rs +++ b/ecstore/src/bitrot.rs @@ -1,5 +1,9 @@ use crate::{ - disk::{error::DiskError, BufferReader, Disk, DiskAPI, DiskStore, FileReader, FileWriter}, + disk::{ + error::DiskError, + io::{FileReader, FileWriter}, + Disk, DiskAPI, + }, erasure::{ReadAt, Writer}, error::{Error, Result}, store_api::BitrotAlgorithm, @@ -9,13 +13,8 @@ use blake2::Digest as _; use highway::{HighwayHash, HighwayHasher, Key}; use lazy_static::lazy_static; use sha2::{digest::core_api::BlockSizeUser, Digest, Sha256}; -use std::{any::Any, collections::HashMap, sync::Arc}; -use tokio::{ - io::AsyncReadExt as _, - spawn, - sync::mpsc::{self, Sender}, - task::JoinHandle, -}; +use std::{any::Any, collections::HashMap, io::Cursor, sync::Arc}; +use tokio::io::{AsyncReadExt as _, AsyncWriteExt}; use tracing::{error, info}; lazy_static! { @@ -145,22 +144,22 @@ pub fn bitrot_algorithm_from_string(s: &str) -> BitrotAlgorithm { pub type BitrotWriter = Box; -pub async fn new_bitrot_writer( - disk: DiskStore, - orig_volume: &str, - volume: &str, - file_path: &str, - length: usize, - algo: BitrotAlgorithm, - shard_size: usize, -) -> Result { - if algo == BitrotAlgorithm::HighwayHash256S { - return Ok(Box::new( - StreamingBitrotWriter::new(disk, orig_volume, volume, file_path, length, algo, shard_size).await?, - )); - } - Ok(Box::new(WholeBitrotWriter::new(disk, volume, file_path, algo, shard_size))) -} +// pub async fn new_bitrot_writer( +// disk: DiskStore, +// orig_volume: &str, +// volume: &str, +// file_path: &str, +// length: usize, +// algo: BitrotAlgorithm, +// shard_size: usize, +// ) -> Result { +// if algo == BitrotAlgorithm::HighwayHash256S { +// return Ok(Box::new( +// StreamingBitrotWriter::new(disk, orig_volume, volume, file_path, length, algo, shard_size).await?, +// )); +// } +// Ok(Box::new(WholeBitrotWriter::new(disk, volume, file_path, algo, shard_size))) +// } pub type BitrotReader = Box; @@ -189,13 +188,13 @@ pub async fn close_bitrot_writers(writers: &mut [Option]) -> Resul Ok(()) } -pub fn bitrot_writer_sum(w: &BitrotWriter) -> Vec { - if let Some(w) = w.as_any().downcast_ref::() { - return w.hash.clone().finalize(); - } +// pub fn bitrot_writer_sum(w: &BitrotWriter) -> Vec { +// if let Some(w) = w.as_any().downcast_ref::() { +// return w.hash.clone().finalize(); +// } - Vec::new() -} +// Vec::new() +// } pub fn bitrot_shard_file_size(size: usize, shard_size: usize, algo: BitrotAlgorithm) -> usize { if algo != BitrotAlgorithm::HighwayHash256S { @@ -260,40 +259,40 @@ pub async fn bitrot_verify( Ok(()) } -pub struct WholeBitrotWriter { - disk: DiskStore, - volume: String, - file_path: String, - _shard_size: usize, - pub hash: Hasher, -} +// pub struct WholeBitrotWriter { +// disk: DiskStore, +// volume: String, +// file_path: String, +// _shard_size: usize, +// pub hash: Hasher, +// } -impl WholeBitrotWriter { - pub fn new(disk: DiskStore, volume: &str, file_path: &str, algo: BitrotAlgorithm, shard_size: usize) -> Self { - WholeBitrotWriter { - disk, - volume: volume.to_string(), - file_path: file_path.to_string(), - _shard_size: shard_size, - hash: algo.new_hasher(), - } - } -} +// impl WholeBitrotWriter { +// pub fn new(disk: DiskStore, volume: &str, file_path: &str, algo: BitrotAlgorithm, shard_size: usize) -> Self { +// WholeBitrotWriter { +// disk, +// volume: volume.to_string(), +// file_path: file_path.to_string(), +// _shard_size: shard_size, +// hash: algo.new_hasher(), +// } +// } +// } -#[async_trait::async_trait] -impl Writer for WholeBitrotWriter { - fn as_any(&self) -> &dyn Any { - self - } +// #[async_trait::async_trait] +// impl Writer for WholeBitrotWriter { +// fn as_any(&self) -> &dyn Any { +// self +// } - async fn write(&mut self, buf: &[u8]) -> Result<()> { - let mut file = self.disk.append_file(&self.volume, &self.file_path).await?; - let _ = file.write(buf).await?; - self.hash.update(buf); +// async fn write(&mut self, buf: &[u8]) -> Result<()> { +// let mut file = self.disk.append_file(&self.volume, &self.file_path).await?; +// let _ = file.write(buf).await?; +// self.hash.update(buf); - Ok(()) - } -} +// Ok(()) +// } +// } // #[derive(Debug)] // pub struct WholeBitrotReader { @@ -344,74 +343,74 @@ impl Writer for WholeBitrotWriter { // } // } -struct StreamingBitrotWriter { - hasher: Hasher, - tx: Sender>>, - task: Option>, -} +// struct StreamingBitrotWriter { +// hasher: Hasher, +// tx: Sender>>, +// task: Option>, +// } -impl StreamingBitrotWriter { - pub async fn new( - disk: DiskStore, - orig_volume: &str, - volume: &str, - file_path: &str, - length: usize, - algo: BitrotAlgorithm, - shard_size: usize, - ) -> Result { - let hasher = algo.new_hasher(); - let (tx, mut rx) = mpsc::channel::>>(10); +// impl StreamingBitrotWriter { +// pub async fn new( +// disk: DiskStore, +// orig_volume: &str, +// volume: &str, +// file_path: &str, +// length: usize, +// algo: BitrotAlgorithm, +// shard_size: usize, +// ) -> Result { +// let hasher = algo.new_hasher(); +// let (tx, mut rx) = mpsc::channel::>>(10); - let total_file_size = length.div_ceil(shard_size) * hasher.size() + length; - let mut writer = disk.create_file(orig_volume, volume, file_path, total_file_size).await?; +// let total_file_size = length.div_ceil(shard_size) * hasher.size() + length; +// let mut writer = disk.create_file(orig_volume, volume, file_path, total_file_size).await?; - let task = spawn(async move { - loop { - if let Some(Some(buf)) = rx.recv().await { - writer.write(&buf).await.unwrap(); - continue; - } +// let task = spawn(async move { +// loop { +// if let Some(Some(buf)) = rx.recv().await { +// writer.write(&buf).await.unwrap(); +// continue; +// } - break; - } - }); +// break; +// } +// }); - Ok(StreamingBitrotWriter { - hasher, - tx, - task: Some(task), - }) - } -} +// Ok(StreamingBitrotWriter { +// hasher, +// tx, +// task: Some(task), +// }) +// } +// } -#[async_trait::async_trait] -impl Writer for StreamingBitrotWriter { - fn as_any(&self) -> &dyn Any { - self - } +// #[async_trait::async_trait] +// impl Writer for StreamingBitrotWriter { +// fn as_any(&self) -> &dyn Any { +// self +// } - async fn write(&mut self, buf: &[u8]) -> Result<()> { - if buf.is_empty() { - return Ok(()); - } - self.hasher.reset(); - self.hasher.update(buf); - let hash_bytes = self.hasher.clone().finalize(); - let _ = self.tx.send(Some(hash_bytes)).await?; - let _ = self.tx.send(Some(buf.to_vec())).await?; +// async fn write(&mut self, buf: &[u8]) -> Result<()> { +// if buf.is_empty() { +// return Ok(()); +// } +// self.hasher.reset(); +// self.hasher.update(buf); +// let hash_bytes = self.hasher.clone().finalize(); +// let _ = self.tx.send(Some(hash_bytes)).await?; +// let _ = self.tx.send(Some(buf.to_vec())).await?; - Ok(()) - } +// Ok(()) +// } - async fn close(&mut self) -> Result<()> { - let _ = self.tx.send(None).await?; - if let Some(task) = self.task.take() { - let _ = task.await; // 等待任务完成 - } - Ok(()) - } -} +// async fn close(&mut self) -> Result<()> { +// let _ = self.tx.send(None).await?; +// if let Some(task) = self.task.take() { +// let _ = task.await; // 等待任务完成 +// } +// Ok(()) +// } +// } // #[derive(Debug)] // struct StreamingBitrotReader { @@ -522,8 +521,8 @@ impl Writer for BitrotFileWriter { self.hasher.reset(); self.hasher.update(buf); let hash_bytes = self.hasher.clone().finalize(); - let _ = self.inner.write(&hash_bytes).await?; - let _ = self.inner.write(buf).await?; + let _ = self.inner.write_all(&hash_bytes).await?; + let _ = self.inner.write_all(buf).await?; Ok(()) } @@ -600,11 +599,7 @@ impl ReadAt for BitrotFileReader { let stream_offset = (offset / self.shard_size) * self.hasher.size() + offset; if let Some(data) = self.data.clone() { - self.reader = Some(FileReader::Buffer(BufferReader::new( - data, - stream_offset, - self.till_offset - stream_offset, - ))); + self.reader = Some(FileReader::Buffer(Cursor::new(data))); } else { self.reader = Some( self.disk diff --git a/ecstore/src/disk/io.rs b/ecstore/src/disk/io.rs new file mode 100644 index 00000000..981c6ddd --- /dev/null +++ b/ecstore/src/disk/io.rs @@ -0,0 +1,229 @@ +use crate::error::Result; +use futures::TryStreamExt; +use std::io::Cursor; +use std::pin::Pin; +use std::task::Poll; +use tokio::fs::File; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio_util::io::ReaderStream; +use tokio_util::io::StreamReader; +use tracing::error; +use tracing::warn; + +#[derive(Debug)] +pub enum FileReader { + Local(File), + // Remote(RemoteFileReader), + Buffer(Cursor>), + Http(HttpFileReader), +} + +impl AsyncRead for FileReader { + #[tracing::instrument(level = "debug", skip(self, buf))] + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + match &mut *self { + Self::Local(reader) => Pin::new(reader).poll_read(cx, buf), + Self::Buffer(reader) => Pin::new(reader).poll_read(cx, buf), + Self::Http(reader) => Pin::new(reader).poll_read(cx, buf), + } + } +} + +#[derive(Debug)] +pub struct HttpFileReader { + // client: reqwest::Client, + // url: String, + // disk: String, + // volume: String, + // path: String, + // offset: usize, + // length: usize, + inner: tokio::io::DuplexStream, + // buf: Vec, + // pos: usize, +} + +impl HttpFileReader { + pub fn new(url: &str, disk: &str, volume: &str, path: &str, offset: usize, length: usize) -> Result { + warn!("http read start {}", path); + let url = url.to_owned(); + let disk = disk.to_owned(); + let volume = volume.to_owned(); + let path = path.to_owned(); + + // let (reader, mut writer) = tokio::io::simplex(1024); + let (reader, mut writer) = tokio::io::duplex(1024 * 1024 * 10); + + tokio::spawn(async move { + let client = reqwest::Client::new(); + let resp = match client + .get(format!( + "{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}", + url, + urlencoding::encode(&disk), + urlencoding::encode(&volume), + urlencoding::encode(&path), + offset, + length + )) + .send() + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) + { + Ok(resp) => resp, + Err(err) => { + warn!("http file reader error: {}", err); + return; + } + }; + + let mut rd = StreamReader::new( + resp.bytes_stream() + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)), + ); + + if let Err(err) = tokio::io::copy(&mut rd, &mut writer).await { + error!("http file reader copy error: {}", err); + }; + }); + Ok(Self { + // client: reqwest::Client::new(), + // url: url.to_string(), + // disk: disk.to_string(), + // volume: volume.to_string(), + // path: path.to_string(), + // offset, + // length, + inner: reader, + // buf: Vec::new(), + // pos: 0, + }) + } +} + +impl AsyncRead for HttpFileReader { + #[tracing::instrument(level = "debug", skip(self, buf))] + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + Pin::new(&mut self.inner).poll_read(cx, buf) + } +} + +#[derive(Debug)] +pub enum FileWriter { + Local(File), + Http(HttpFileWriter), + Buffer(Cursor>), +} + +impl AsyncWrite for FileWriter { + #[tracing::instrument(level = "debug", skip(self, buf))] + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> Poll> { + match &mut *self { + Self::Local(writer) => Pin::new(writer).poll_write(cx, buf), + Self::Buffer(writer) => Pin::new(writer).poll_write(cx, buf), + Self::Http(writer) => Pin::new(writer).poll_write(cx, buf), + } + } + + #[tracing::instrument(level = "debug", skip(self))] + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { + match &mut *self { + Self::Local(writer) => Pin::new(writer).poll_flush(cx), + Self::Buffer(writer) => Pin::new(writer).poll_flush(cx), + Self::Http(writer) => Pin::new(writer).poll_flush(cx), + } + } + + #[tracing::instrument(level = "debug", skip(self))] + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { + match &mut *self { + Self::Local(writer) => Pin::new(writer).poll_shutdown(cx), + Self::Buffer(writer) => Pin::new(writer).poll_shutdown(cx), + Self::Http(writer) => Pin::new(writer).poll_shutdown(cx), + } + } +} + +#[derive(Debug)] +pub struct HttpFileWriter { + wd: tokio::io::WriteHalf, +} + +impl HttpFileWriter { + pub fn new(url: &str, disk: &str, volume: &str, path: &str, size: usize, append: bool) -> Result { + let (rd, wd) = tokio::io::simplex(1024 * 1024 * 10); + + let body = reqwest::Body::wrap_stream(ReaderStream::new(rd)); + + let url = url.to_owned(); + let disk = disk.to_owned(); + let volume = volume.to_owned(); + let path = path.to_owned(); + + tokio::spawn(async move { + let client = reqwest::Client::new(); + if let Err(err) = client + .put(format!( + "{}/rustfs/rpc/put_file_stream?disk={}&volume={}&path={}&append={}&size={}", + url, + urlencoding::encode(&disk), + urlencoding::encode(&volume), + urlencoding::encode(&path), + append, + size + )) + .body(body) + .send() + .await + { + error!("HttpFileWriter put file err: {:?}", err); + // return; + } + + // TODO: handle response + + // debug!("http write done {}", path); + }); + + Ok(Self { + wd, + // client: reqwest::Client::new(), + // url: url.to_string(), + // disk: disk.to_string(), + // volume: volume.to_string(), + }) + } +} + +impl AsyncWrite for HttpFileWriter { + #[tracing::instrument(level = "debug", skip(self, buf))] + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.wd).poll_write(cx, buf) + } + + #[tracing::instrument(level = "debug", skip(self))] + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { + Pin::new(&mut self.wd).poll_flush(cx) + } + + #[tracing::instrument(level = "debug", skip(self))] + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { + Pin::new(&mut self.wd).poll_shutdown(cx) + } +} diff --git a/ecstore/src/disk/local.rs b/ecstore/src/disk/local.rs index a1f1e0c7..013be96a 100644 --- a/ecstore/src/disk/local.rs +++ b/ecstore/src/disk/local.rs @@ -17,7 +17,7 @@ use crate::disk::error::{ is_sys_err_not_dir, map_err_not_exists, os_err_to_file_err, }; use crate::disk::os::{check_path_length, is_empty_dir}; -use crate::disk::{LocalFileReader, LocalFileWriter, STORAGE_FORMAT_FILE}; +use crate::disk::STORAGE_FORMAT_FILE; use crate::error::{Error, Result}; use crate::file_meta::{get_file_info, read_xl_meta_no_data, FileInfoOpts}; use crate::global::{GLOBAL_IsErasureSD, GLOBAL_RootDiskThreshold}; @@ -745,15 +745,7 @@ impl LocalDisk { let meta = file.metadata().await?; - bitrot_verify( - FileReader::Local(LocalFileReader::new(file)), - meta.size() as usize, - part_size, - algo, - sum.to_vec(), - shard_size, - ) - .await + bitrot_verify(FileReader::Local(file), meta.size() as usize, part_size, algo, sum.to_vec(), shard_size).await } async fn scan_dir( @@ -1297,6 +1289,7 @@ impl DiskAPI for LocalDisk { Ok(resp) } + #[tracing::instrument(level = "debug", skip(self))] async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Vec) -> Result<()> { let src_volume_dir = self.get_bucket_path(src_volume)?; let dst_volume_dir = self.get_bucket_path(dst_volume)?; @@ -1311,12 +1304,18 @@ impl DiskAPI for LocalDisk { let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR); if !src_is_dir && dst_is_dir || src_is_dir && !dst_is_dir { + warn!( + "rename_part src and dst must be both dir or file src_is_dir:{}, dst_is_dir:{}", + src_is_dir, dst_is_dir + ); return Err(Error::from(DiskError::FileAccessDenied)); } let src_file_path = src_volume_dir.join(Path::new(src_path)); let dst_file_path = dst_volume_dir.join(Path::new(dst_path)); + warn!("rename_part src_file_path:{:?}, dst_file_path:{:?}", &src_file_path, &dst_file_path); + check_path_length(src_file_path.to_string_lossy().as_ref())?; check_path_length(dst_file_path.to_string_lossy().as_ref())?; @@ -1337,12 +1336,14 @@ impl DiskAPI for LocalDisk { if let Some(meta) = meta_op { if !meta.is_dir() { + warn!("rename_part src is not dir {:?}", &src_file_path); return Err(Error::new(DiskError::FileAccessDenied)); } } if let Err(e) = utils::fs::remove(&dst_file_path).await { if is_sys_err_not_empty(&e) || is_sys_err_not_dir(&e) { + warn!("rename_part remove dst failed {:?} err {:?}", &dst_file_path, e); return Err(Error::new(DiskError::FileAccessDenied)); } else if is_sys_err_io(&e) { return Err(Error::new(DiskError::FaultyDisk)); @@ -1355,6 +1356,7 @@ impl DiskAPI for LocalDisk { if let Err(err) = os::rename_all(&src_file_path, &dst_file_path, &dst_volume_dir).await { if let Some(e) = err.to_io_err() { if is_sys_err_not_empty(&e) || is_sys_err_not_dir(&e) { + warn!("rename_part rename all failed {:?} err {:?}", &dst_file_path, e); return Err(Error::new(DiskError::FileAccessDenied)); } @@ -1467,8 +1469,10 @@ impl DiskAPI for LocalDisk { Ok(()) } - // TODO: use io.reader + #[tracing::instrument(level = "debug", skip(self))] async fn create_file(&self, origvolume: &str, volume: &str, path: &str, _file_size: usize) -> Result { + warn!("disk create_file: origvolume: {}, volume: {}, path: {}", origvolume, volume, path); + if !origvolume.is_empty() { let origvolume_dir = self.get_bucket_path(origvolume)?; if !skip_access_checks(origvolume) { @@ -1491,12 +1495,16 @@ impl DiskAPI for LocalDisk { .await .map_err(os_err_to_file_err)?; - Ok(FileWriter::Local(LocalFileWriter::new(f))) + Ok(FileWriter::Local(f)) // Ok(()) } + + #[tracing::instrument(level = "debug", skip(self))] // async fn append_file(&self, volume: &str, path: &str, mut r: DuplexStream) -> Result { async fn append_file(&self, volume: &str, path: &str) -> Result { + warn!("disk append_file: volume: {}, path: {}", volume, path); + let volume_dir = self.get_bucket_path(volume)?; if !skip_access_checks(volume) { if let Err(e) = utils::fs::access(&volume_dir).await { @@ -1509,11 +1517,13 @@ impl DiskAPI for LocalDisk { let f = self.open_file(file_path, O_CREATE | O_APPEND | O_WRONLY, volume_dir).await?; - Ok(FileWriter::Local(LocalFileWriter::new(f))) + Ok(FileWriter::Local(f)) } // TODO: io verifier + #[tracing::instrument(level = "debug", skip(self))] async fn read_file(&self, volume: &str, path: &str) -> Result { + warn!("disk read_file: volume: {}, path: {}", volume, path); let volume_dir = self.get_bucket_path(volume)?; if !skip_access_checks(volume) { if let Err(e) = utils::fs::access(&volume_dir).await { @@ -1542,10 +1552,16 @@ impl DiskAPI for LocalDisk { } })?; - Ok(FileReader::Local(LocalFileReader::new(f))) + Ok(FileReader::Local(f)) } + #[tracing::instrument(level = "debug", skip(self))] async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result { + warn!( + "disk read_file_stream: volume: {}, path: {}, offset: {}, length: {}", + volume, path, offset, length + ); + let volume_dir = self.get_bucket_path(volume)?; if !skip_access_checks(volume) { if let Err(e) = utils::fs::access(&volume_dir).await { @@ -1587,7 +1603,7 @@ impl DiskAPI for LocalDisk { f.seek(SeekFrom::Start(offset as u64)).await?; - Ok(FileReader::Local(LocalFileReader::new(f))) + Ok(FileReader::Local(f)) } #[tracing::instrument(level = "debug", skip(self))] async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result> { diff --git a/ecstore/src/disk/mod.rs b/ecstore/src/disk/mod.rs index b1d341d7..dcf719f2 100644 --- a/ecstore/src/disk/mod.rs +++ b/ecstore/src/disk/mod.rs @@ -1,6 +1,7 @@ pub mod endpoint; pub mod error; pub mod format; +pub mod io; pub mod local; pub mod os; pub mod remote; @@ -14,10 +15,8 @@ pub const FORMAT_CONFIG_FILE: &str = "format.json"; pub const STORAGE_FORMAT_FILE: &str = "xl.meta"; pub const STORAGE_FORMAT_FILE_BACKUP: &str = "xl.meta.bkp"; -use crate::utils::proto_err_to_err; use crate::{ bucket::{metadata_sys::get_versioning_config, versioning::VersioningApi}, - erasure::Writer, error::{Error, Result}, file_meta::{merge_file_meta_versions, FileMeta, FileMetaShallowVersion, VersionType}, heal::{ @@ -28,28 +27,16 @@ use crate::{ store_api::{FileInfo, ObjectInfo, RawFileInfo}, utils::path::SLASH_SEPARATOR, }; - use endpoint::Endpoint; use error::DiskError; -use futures::StreamExt; +use io::{FileReader, FileWriter}; use local::LocalDisk; use madmin::info_commands::DiskMetrics; -use protos::proto_gen::node_service::{node_service_client::NodeServiceClient, WriteRequest, WriteResponse}; use remote::RemoteDisk; use serde::{Deserialize, Serialize}; -use std::io::Read as _; -use std::pin::Pin; -use std::task::Poll; -use std::{any::Any, cmp::Ordering, fmt::Debug, io::Cursor, path::PathBuf, sync::Arc}; +use std::{cmp::Ordering, fmt::Debug, path::PathBuf, sync::Arc}; use time::OffsetDateTime; -use tokio::io::AsyncRead; -use tokio::{ - fs::File, - io::{AsyncWrite, AsyncWriteExt}, - sync::mpsc::{self, Sender}, -}; -use tokio_stream::wrappers::ReceiverStream; -use tonic::{service::interceptor::InterceptedService, transport::Channel, Request, Status, Streaming}; +use tokio::{io::AsyncWrite, sync::mpsc::Sender}; use tracing::info; use tracing::warn; use uuid::Uuid; @@ -1256,164 +1243,142 @@ pub struct ReadOptions { // } // } -#[derive(Debug)] -pub enum FileWriter { - Local(LocalFileWriter), - Remote(RemoteFileWriter), - Buffer(BufferWriter), -} +// #[derive(Debug)] +// pub struct BufferWriter { +// pub inner: Vec, +// } -#[async_trait::async_trait] -impl Writer for FileWriter { - fn as_any(&self) -> &dyn Any { - self - } +// impl BufferWriter { +// pub fn new(inner: Vec) -> Self { +// Self { inner } +// } +// #[allow(clippy::should_implement_trait)] +// pub fn as_ref(&self) -> &[u8] { +// self.inner.as_ref() +// } +// } - async fn write(&mut self, buf: &[u8]) -> Result<()> { - match self { - Self::Local(writer) => writer.write(buf).await, - Self::Remote(writter) => writter.write(buf).await, - Self::Buffer(writer) => writer.write(buf).await, - } - } -} +// #[async_trait::async_trait] +// impl Writer for BufferWriter { +// fn as_any(&self) -> &dyn Any { +// self +// } -#[derive(Debug)] -pub struct BufferWriter { - pub inner: Vec, -} +// async fn write(&mut self, buf: &[u8]) -> Result<()> { +// let _ = self.inner.write(buf).await?; +// self.inner.flush().await?; -impl BufferWriter { - pub fn new(inner: Vec) -> Self { - Self { inner } - } - #[allow(clippy::should_implement_trait)] - pub fn as_ref(&self) -> &[u8] { - self.inner.as_ref() - } -} +// Ok(()) +// } +// } -#[async_trait::async_trait] -impl Writer for BufferWriter { - fn as_any(&self) -> &dyn Any { - self - } +// #[derive(Debug)] +// pub struct LocalFileWriter { +// pub inner: File, +// } - async fn write(&mut self, buf: &[u8]) -> Result<()> { - let _ = self.inner.write(buf).await?; - self.inner.flush().await?; +// impl LocalFileWriter { +// pub fn new(inner: File) -> Self { +// Self { inner } +// } +// } - Ok(()) - } -} +// #[async_trait::async_trait] +// impl Writer for LocalFileWriter { +// fn as_any(&self) -> &dyn Any { +// self +// } -#[derive(Debug)] -pub struct LocalFileWriter { - pub inner: File, -} +// async fn write(&mut self, buf: &[u8]) -> Result<()> { +// let _ = self.inner.write(buf).await?; +// self.inner.flush().await?; -impl LocalFileWriter { - pub fn new(inner: File) -> Self { - Self { inner } - } -} +// Ok(()) +// } +// } -#[async_trait::async_trait] -impl Writer for LocalFileWriter { - fn as_any(&self) -> &dyn Any { - self - } +// type NodeClient = NodeServiceClient< +// InterceptedService) -> Result, Status> + Send + Sync + 'static>>, +// >; - async fn write(&mut self, buf: &[u8]) -> Result<()> { - let _ = self.inner.write(buf).await?; - self.inner.flush().await?; +// #[derive(Debug)] +// pub struct RemoteFileWriter { +// pub endpoint: Endpoint, +// pub volume: String, +// pub path: String, +// pub is_append: bool, +// tx: Sender, +// resp_stream: Streaming, +// } - Ok(()) - } -} +// impl RemoteFileWriter { +// pub async fn new(endpoint: Endpoint, volume: String, path: String, is_append: bool, mut client: NodeClient) -> Result { +// let (tx, rx) = mpsc::channel(128); +// let in_stream = ReceiverStream::new(rx); -type NodeClient = NodeServiceClient< - InterceptedService) -> Result, Status> + Send + Sync + 'static>>, ->; +// let response = client.write_stream(in_stream).await.unwrap(); -#[derive(Debug)] -pub struct RemoteFileWriter { - pub endpoint: Endpoint, - pub volume: String, - pub path: String, - pub is_append: bool, - tx: Sender, - resp_stream: Streaming, -} +// let resp_stream = response.into_inner(); -impl RemoteFileWriter { - pub async fn new(endpoint: Endpoint, volume: String, path: String, is_append: bool, mut client: NodeClient) -> Result { - let (tx, rx) = mpsc::channel(128); - let in_stream = ReceiverStream::new(rx); +// Ok(Self { +// endpoint, +// volume, +// path, +// is_append, +// tx, +// resp_stream, +// }) +// } +// } - let response = client.write_stream(in_stream).await.unwrap(); +// #[async_trait::async_trait] +// impl Writer for RemoteFileWriter { +// fn as_any(&self) -> &dyn Any { +// self +// } - let resp_stream = response.into_inner(); +// async fn write(&mut self, buf: &[u8]) -> Result<()> { +// let request = WriteRequest { +// disk: self.endpoint.to_string(), +// volume: self.volume.to_string(), +// path: self.path.to_string(), +// is_append: self.is_append, +// data: buf.to_vec(), +// }; +// self.tx.send(request).await?; - Ok(Self { - endpoint, - volume, - path, - is_append, - tx, - resp_stream, - }) - } -} +// if let Some(resp) = self.resp_stream.next().await { +// // match resp { +// // Ok(resp) => { +// // if resp.success { +// // info!("write stream success"); +// // } else { +// // info!("write stream failed: {}", resp.error_info.unwrap_or("".to_string())); +// // } +// // } +// // Err(_err) => { -#[async_trait::async_trait] -impl Writer for RemoteFileWriter { - fn as_any(&self) -> &dyn Any { - self - } +// // } +// // } +// let resp = resp?; +// if resp.success { +// info!("write stream success"); +// } else { +// return if let Some(err) = &resp.error { +// Err(proto_err_to_err(err)) +// } else { +// Err(Error::from_string("")) +// }; +// } +// } else { +// let error_info = "can not get response"; +// info!("write stream failed: {}", error_info); +// return Err(Error::from_string(error_info)); +// } - async fn write(&mut self, buf: &[u8]) -> Result<()> { - let request = WriteRequest { - disk: self.endpoint.to_string(), - volume: self.volume.to_string(), - path: self.path.to_string(), - is_append: self.is_append, - data: buf.to_vec(), - }; - self.tx.send(request).await?; - - if let Some(resp) = self.resp_stream.next().await { - // match resp { - // Ok(resp) => { - // if resp.success { - // info!("write stream success"); - // } else { - // info!("write stream failed: {}", resp.error_info.unwrap_or("".to_string())); - // } - // } - // Err(_err) => { - - // } - // } - let resp = resp?; - if resp.success { - info!("write stream success"); - } else { - return if let Some(err) = &resp.error { - Err(proto_err_to_err(err)) - } else { - Err(Error::from_string("")) - }; - } - } else { - let error_info = "can not get response"; - info!("write stream failed: {}", error_info); - return Err(Error::from_string(error_info)); - } - - Ok(()) - } -} +// Ok(()) +// } +// } // #[async_trait::async_trait] // pub trait Reader { @@ -1422,29 +1387,6 @@ impl Writer for RemoteFileWriter { // // async fn read_exact(&mut self, buf: &mut [u8]) -> Result; // } -#[derive(Debug)] -pub enum FileReader { - Local(LocalFileReader), - // Remote(RemoteFileReader), - Buffer(BufferReader), - Http(HttpFileReader), -} - -impl AsyncRead for FileReader { - #[tracing::instrument(level = "debug", skip(self, buf))] - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> std::task::Poll> { - match &mut *self { - Self::Local(reader) => Pin::new(&mut reader.inner).poll_read(cx, buf), - Self::Buffer(reader) => Pin::new(&mut reader.inner).poll_read(cx, buf), - Self::Http(reader) => Pin::new(reader).poll_read(cx, buf), - } - } -} - // #[async_trait::async_trait] // impl Reader for FileReader { // async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { @@ -1471,44 +1413,44 @@ impl AsyncRead for FileReader { // // } // } -#[derive(Debug)] -pub struct BufferReader { - pub inner: Cursor>, - remaining: usize, -} +// #[derive(Debug)] +// pub struct BufferReader { +// pub inner: Cursor>, +// remaining: usize, +// } -impl BufferReader { - pub fn new(inner: Vec, offset: usize, read_length: usize) -> Self { - let mut cur = Cursor::new(inner); - cur.set_position(offset as u64); - Self { - inner: cur, - remaining: offset + read_length, - } - } -} +// impl BufferReader { +// pub fn new(inner: Vec, offset: usize, read_length: usize) -> Self { +// let mut cur = Cursor::new(inner); +// cur.set_position(offset as u64); +// Self { +// inner: cur, +// remaining: offset + read_length, +// } +// } +// } -impl AsyncRead for BufferReader { - #[tracing::instrument(level = "debug", skip(self, buf))] - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> std::task::Poll> { - match Pin::new(&mut self.inner).poll_read(cx, buf) { - Poll::Ready(Ok(_)) => { - if self.inner.position() as usize >= self.remaining { - self.remaining -= buf.filled().len(); - Poll::Ready(Ok(())) - } else { - Poll::Pending - } - } - Poll::Ready(Err(err)) => Poll::Ready(Err(err)), - Poll::Pending => Poll::Pending, - } - } -} +// impl AsyncRead for BufferReader { +// #[tracing::instrument(level = "debug", skip(self, buf))] +// fn poll_read( +// mut self: Pin<&mut Self>, +// cx: &mut std::task::Context<'_>, +// buf: &mut tokio::io::ReadBuf<'_>, +// ) -> std::task::Poll> { +// match Pin::new(&mut self.inner).poll_read(cx, buf) { +// Poll::Ready(Ok(_)) => { +// if self.inner.position() as usize >= self.remaining { +// self.remaining -= buf.filled().len(); +// Poll::Ready(Ok(())) +// } else { +// Poll::Pending +// } +// } +// Poll::Ready(Err(err)) => Poll::Ready(Err(err)), +// Poll::Pending => Poll::Pending, +// } +// } +// } // #[async_trait::async_trait] // impl Reader for BufferReader { @@ -1537,17 +1479,17 @@ impl AsyncRead for BufferReader { // // } // } -#[derive(Debug)] -pub struct LocalFileReader { - pub inner: File, - // pos: usize, -} +// #[derive(Debug)] +// pub struct LocalFileReader { +// pub inner: File, +// // pos: usize, +// } -impl LocalFileReader { - pub fn new(inner: File) -> Self { - Self { inner } - } -} +// impl LocalFileReader { +// pub fn new(inner: File) -> Self { +// Self { inner } +// } +// } // #[async_trait::async_trait] // impl Reader for LocalFileReader { @@ -1579,16 +1521,16 @@ impl LocalFileReader { // // } // } -impl AsyncRead for LocalFileReader { - #[tracing::instrument(level = "debug", skip(self, buf))] - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> std::task::Poll> { - Pin::new(&mut self.inner).poll_read(cx, buf) - } -} +// impl AsyncRead for LocalFileReader { +// #[tracing::instrument(level = "debug", skip(self, buf))] +// fn poll_read( +// mut self: Pin<&mut Self>, +// cx: &mut std::task::Context<'_>, +// buf: &mut tokio::io::ReadBuf<'_>, +// ) -> std::task::Poll> { +// Pin::new(&mut self.inner).poll_read(cx, buf) +// } +// } // #[derive(Debug)] // pub struct RemoteFileReader { @@ -1670,85 +1612,3 @@ impl AsyncRead for LocalFileReader { // unimplemented!("poll_read") // } // } - -#[derive(Debug)] -pub struct HttpFileReader { - // client: reqwest::Client, - // url: String, - // disk: String, - // volume: String, - // path: String, - // offset: usize, - // length: usize, - inner: reqwest::blocking::Response, - // buf: Vec, - pos: usize, -} - -impl HttpFileReader { - pub async fn new(url: &str, disk: &str, volume: &str, path: &str, offset: usize, length: usize) -> Result { - let client = reqwest::blocking::Client::new(); - let resp = client - .get(format!( - "{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}", - url, disk, volume, path, offset, length - )) - .send()?; - Ok(Self { - // client: reqwest::Client::new(), - // url: url.to_string(), - // disk: disk.to_string(), - // volume: volume.to_string(), - // path: path.to_string(), - // offset, - // length, - inner: resp, - // buf: Vec::new(), - pos: 0, - }) - } - - // pub async fn get_response(&self) -> Result<&Response, std::io::Error> { - // if let Some(resp) = self.inner.get() { - // return Ok(resp); - // } else { - // let client = reqwest::Client::new(); - // let resp = client - // .get(&format!( - // "{}/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}", - // self.url, self.disk, self.volume, self.path, self.offset, self.length - // )) - // .send() - // .await - // .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; - // self.inner.set(resp); - // Ok(self.inner.get().unwrap()) - // } - // } -} - -impl AsyncRead for HttpFileReader { - #[tracing::instrument(level = "debug", skip(self, buf))] - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> std::task::Poll> { - let buf = buf.initialize_unfilled(); - self.inner.read_exact(buf)?; - self.pos += buf.len(); - Poll::Ready(Ok(())) - } -} - -// impl Reader for HttpFileReader { -// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { -// if self.pos != offset { -// self.inner.seek(SeekFrom::Start(offset as u64))?; -// self.pos = offset; -// } -// let bytes_read = self.inner.read(buf)?; -// self.pos += bytes_read; -// Ok(bytes_read) -// } -// } diff --git a/ecstore/src/disk/remote.rs b/ecstore/src/disk/remote.rs index 2fd2ca3f..8528ac1e 100644 --- a/ecstore/src/disk/remote.rs +++ b/ecstore/src/disk/remote.rs @@ -22,8 +22,8 @@ use tracing::info; use uuid::Uuid; use super::{ - endpoint::Endpoint, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, - FileInfoVersions, FileReader, FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RemoteFileWriter, RenameDataResp, + endpoint::Endpoint, io::HttpFileReader, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, + DiskOption, FileInfoVersions, FileReader, FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions, }; use crate::{ @@ -36,7 +36,7 @@ use crate::{ }, store_api::{FileInfo, RawFileInfo}, }; -use crate::{disk::HttpFileReader, utils::proto_err_to_err}; +use crate::{disk::io::HttpFileWriter, utils::proto_err_to_err}; use crate::{disk::MetaCacheEntry, metacache::writer::MetacacheWriter}; use protos::proto_gen::node_service::RenamePartRequst; @@ -286,6 +286,7 @@ impl DiskAPI for RemoteDisk { Ok(()) } + #[tracing::instrument(level = "debug", skip(self))] async fn rename_file(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str) -> Result<()> { info!("rename_file"); let mut client = node_service_time_out_client(&self.addr) @@ -312,58 +313,55 @@ impl DiskAPI for RemoteDisk { Ok(()) } - async fn create_file(&self, _origvolume: &str, volume: &str, path: &str, _file_size: usize) -> Result { + #[tracing::instrument(level = "debug", skip(self))] + async fn create_file(&self, _origvolume: &str, volume: &str, path: &str, file_size: usize) -> Result { info!("create_file"); - Ok(FileWriter::Remote( - RemoteFileWriter::new( - self.endpoint.clone(), - volume.to_string(), - path.to_string(), - false, - node_service_time_out_client(&self.addr) - .await - .map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?, - ) - .await?, - )) + Ok(FileWriter::Http(HttpFileWriter::new( + self.endpoint.grid_host().as_str(), + self.endpoint.to_string().as_str(), + volume, + path, + file_size, + false, + )?)) } + #[tracing::instrument(level = "debug", skip(self))] async fn append_file(&self, volume: &str, path: &str) -> Result { info!("append_file"); - Ok(FileWriter::Remote( - RemoteFileWriter::new( - self.endpoint.clone(), - volume.to_string(), - path.to_string(), - true, - node_service_time_out_client(&self.addr) - .await - .map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?, - ) - .await?, - )) + Ok(FileWriter::Http(HttpFileWriter::new( + self.endpoint.grid_host().as_str(), + self.endpoint.to_string().as_str(), + volume, + path, + 0, + true, + )?)) } + #[tracing::instrument(level = "debug", skip(self))] async fn read_file(&self, volume: &str, path: &str) -> Result { info!("read_file"); - Ok(FileReader::Http( - HttpFileReader::new(self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), volume, path, 0, 0) - .await?, - )) + Ok(FileReader::Http(HttpFileReader::new( + self.endpoint.grid_host().as_str(), + self.endpoint.to_string().as_str(), + volume, + path, + 0, + 0, + )?)) } + #[tracing::instrument(level = "debug", skip(self))] async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result { - Ok(FileReader::Http( - HttpFileReader::new( - self.endpoint.grid_host().as_str(), - self.endpoint.to_string().as_str(), - volume, - path, - offset, - length, - ) - .await?, - )) + Ok(FileReader::Http(HttpFileReader::new( + self.endpoint.grid_host().as_str(), + self.endpoint.to_string().as_str(), + volume, + path, + offset, + length, + )?)) } async fn list_dir(&self, _origvolume: &str, volume: &str, _dir_path: &str, _count: i32) -> Result> { diff --git a/ecstore/src/set_disk.rs b/ecstore/src/set_disk.rs index 0855fa94..d1ef1de4 100644 --- a/ecstore/src/set_disk.rs +++ b/ecstore/src/set_disk.rs @@ -1,6 +1,6 @@ use std::{ collections::{HashMap, HashSet}, - io::Write, + io::{Cursor, Write}, path::Path, sync::Arc, time::Duration, @@ -14,10 +14,10 @@ use crate::{ endpoint::Endpoint, error::{is_all_not_found, DiskError}, format::FormatV3, - new_disk, BufferReader, BufferWriter, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskOption, - DiskStore, FileInfoVersions, FileReader, FileWriter, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, - ReadMultipleReq, ReadMultipleResp, ReadOptions, UpdateMetadataOpts, RUSTFS_META_BUCKET, RUSTFS_META_MULTIPART_BUCKET, - RUSTFS_META_TMP_BUCKET, + io::{FileReader, FileWriter}, + new_disk, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskOption, DiskStore, FileInfoVersions, + MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, ReadMultipleReq, ReadMultipleResp, ReadOptions, + UpdateMetadataOpts, RUSTFS_META_BUCKET, RUSTFS_META_MULTIPART_BUCKET, RUSTFS_META_TMP_BUCKET, }, erasure::Erasure, error::{Error, Result}, @@ -1361,7 +1361,7 @@ impl SetDisks { for (i, opdisk) in disks.iter().enumerate() { if let Some(disk) = opdisk { if disk.is_online().await && disk.get_disk_location().set_idx.is_some() { - info!("Disk {:?} is online", disk); + info!("Disk {:?} is online", disk.to_string()); continue; } @@ -2452,7 +2452,7 @@ impl SetDisks { if let Some(disk) = disk { let filewriter = { if is_inline_buffer { - FileWriter::Buffer(BufferWriter::new(Vec::new())) + FileWriter::Buffer(Cursor::new(Vec::new())) } else { let disk = disk.clone(); let part_path = format!("{}/{}/part.{}", tmp_id, dst_data_dir, part.number); @@ -2501,7 +2501,7 @@ impl SetDisks { if let Some(ref writer) = writers[index] { if let Some(w) = writer.as_any().downcast_ref::() { if let FileWriter::Buffer(buffer_writer) = w.writer() { - parts_metadata[index].data = Some(buffer_writer.as_ref().to_vec()); + parts_metadata[index].data = Some(buffer_writer.clone().into_inner()); } } } @@ -3744,7 +3744,7 @@ impl ObjectIO for SetDisks { if let Some(disk) = disk_op { let filewriter = { if is_inline_buffer { - FileWriter::Buffer(BufferWriter::new(Vec::new())) + FileWriter::Buffer(Cursor::new(Vec::new())) } else { let disk = disk.clone(); @@ -3760,6 +3760,8 @@ impl ObjectIO for SetDisks { } } + warn!("put_object data.content_length {}", data.content_length); + // TODO: etag from header let mut etag_stream = EtagReader::new(&mut data.stream, None, None); @@ -3789,7 +3791,7 @@ impl ObjectIO for SetDisks { if let Some(ref writer) = writers[i] { if let Some(w) = writer.as_any().downcast_ref::() { if let FileWriter::Buffer(buffer_writer) = w.writer() { - fi.data = Some(buffer_writer.as_ref().to_vec()); + fi.data = Some(buffer_writer.clone().into_inner()); } } } @@ -4294,6 +4296,7 @@ impl StorageAPI for SetDisks { unimplemented!() } + #[tracing::instrument(level = "debug", skip(self, data, opts))] async fn put_object_part( &self, bucket: &str, @@ -5245,7 +5248,7 @@ async fn disks_with_all_parts( let checksum_info = meta.erasure.get_checksum_info(meta.parts[0].number); let data_len = data.len(); let verify_err = match bitrot_verify( - FileReader::Buffer(BufferReader::new(data.clone(), 0, data_len)), + FileReader::Buffer(Cursor::new(data.clone())), data_len, meta.erasure.shard_file_size(meta.size), checksum_info.algorithm, diff --git a/ecstore/src/store_init.rs b/ecstore/src/store_init.rs index 3c44cc0f..00f93d11 100644 --- a/ecstore/src/store_init.rs +++ b/ecstore/src/store_init.rs @@ -53,6 +53,7 @@ pub async fn connect_load_init_formats( set_drive_count: usize, deployment_id: Option, ) -> Result { + warn!("connect_load_init_formats first_disk: {}", first_disk); let (formats, errs) = load_format_erasure_all(disks, false).await; debug!("load_format_erasure_all errs {:?}", &errs); @@ -63,12 +64,13 @@ pub async fn connect_load_init_formats( if first_disk && DiskError::should_init_erasure_disks(&errs) { // UnformattedDisk, not format file create + warn!("first_disk && should_init_erasure_disks"); // new format and save let fms = init_format_erasure(disks, set_count, set_drive_count, deployment_id); let errs = save_format_file_all(disks, &fms).await; - debug!("save_format_file_all errs {:?}", &errs); + warn!("save_format_file_all errs {:?}", &errs); // TODO: check quorum // reduceWriteQuorumErrs(&errs)?; @@ -77,6 +79,12 @@ pub async fn connect_load_init_formats( return Ok(fm); } + warn!( + "first_disk: {}, should_init_erasure_disks: {}", + first_disk, + DiskError::should_init_erasure_disks(&errs) + ); + let unformatted = DiskError::quorum_unformatted_disks(&errs); if unformatted && !first_disk { return Err(Error::new(ErasureError::NotFirstDisk)); diff --git a/ecstore/src/utils/os/linux.rs b/ecstore/src/utils/os/linux.rs index bd8782bc..27b73f86 100644 --- a/ecstore/src/utils/os/linux.rs +++ b/ecstore/src/utils/os/linux.rs @@ -99,7 +99,7 @@ fn get_fs_type(fs_type: FsType) -> &'static str { match fs_type { statfs::TMPFS_MAGIC => "TMPFS", statfs::MSDOS_SUPER_MAGIC => "MSDOS", - statfs::XFS_SUPER_MAGIC => "XFS", + // statfs::XFS_SUPER_MAGIC => "XFS", statfs::NFS_SUPER_MAGIC => "NFS", statfs::EXT4_SUPER_MAGIC => "EXT4", statfs::ECRYPTFS_SUPER_MAGIC => "ecryptfs", diff --git a/rustfs/Cargo.toml b/rustfs/Cargo.toml index e27c5957..404257dc 100644 --- a/rustfs/Cargo.toml +++ b/rustfs/Cargo.toml @@ -61,7 +61,6 @@ tracing-subscriber.workspace = true transform-stream.workspace = true uuid = "1.15.1" url.workspace = true -admin = { path = "../api/admin" } axum.workspace = true matchit = "0.8.6" shadow-rs.workspace = true diff --git a/rustfs/src/admin/rpc.rs b/rustfs/src/admin/rpc.rs index fc7b5652..9d2fda5b 100644 --- a/rustfs/src/admin/rpc.rs +++ b/rustfs/src/admin/rpc.rs @@ -3,9 +3,10 @@ use super::router::Operation; use super::router::S3Router; use crate::storage::ecfs::bytes_stream; use common::error::Result; +use ecstore::disk::io::FileReader; use ecstore::disk::DiskAPI; -use ecstore::disk::FileReader; use ecstore::store::find_local_disk; +use futures::TryStreamExt; use http::StatusCode; use hyper::Method; use matchit::Params; @@ -17,6 +18,7 @@ use s3s::S3Response; use s3s::S3Result; use serde_urlencoded::from_bytes; use tokio_util::io::ReaderStream; +use tokio_util::io::StreamReader; use tracing::warn; pub const RPC_PREFIX: &str = "/rustfs/rpc"; @@ -28,6 +30,12 @@ pub fn regist_rpc_route(r: &mut S3Router) -> Result<()> { AdminOperation(&ReadFile {}), )?; + r.insert( + Method::PUT, + format!("{}{}", RPC_PREFIX, "/put_file_stream").as_str(), + AdminOperation(&PutFile {}), + )?; + Ok(()) } @@ -49,7 +57,7 @@ impl Operation for ReadFile { let query = { if let Some(query) = req.uri.query() { let input: ReadFileQuery = - from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed1"))?; + from_bytes(query.as_bytes()).map_err(|e| s3_error!(InvalidArgument, "get query failed1 {:?}", e))?; input } else { ReadFileQuery::default() @@ -95,3 +103,56 @@ impl Operation for ReadFile { // Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::empty()))) } } + +// /rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}" +#[derive(Debug, Default, serde::Deserialize)] +pub struct PutFileQuery { + disk: String, + volume: String, + path: String, + append: bool, + size: usize, +} +pub struct PutFile {} +#[async_trait::async_trait] +impl Operation for PutFile { + async fn call(&self, req: S3Request, _params: Params<'_, '_>) -> S3Result> { + warn!("handle PutFile"); + + let query = { + if let Some(query) = req.uri.query() { + let input: PutFileQuery = + from_bytes(query.as_bytes()).map_err(|e| s3_error!(InvalidArgument, "get query failed1 {:?}", e))?; + input + } else { + PutFileQuery::default() + } + }; + + let Some(disk) = find_local_disk(&query.disk).await else { + return Err(s3_error!(InvalidArgument, "disk not found")); + }; + + let mut file = if query.append { + disk.append_file(&query.volume, &query.path) + .await + .map_err(|e| s3_error!(InternalError, "append file err {}", e))? + } else { + disk.create_file("", &query.volume, &query.path, query.size) + .await + .map_err(|e| s3_error!(InternalError, "read file err {}", e))? + }; + + let mut body = StreamReader::new( + req.input + .into_stream() + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)), + ); + + tokio::io::copy(&mut body, &mut file) + .await + .map_err(|e| s3_error!(InternalError, "copy err {}", e))?; + + Ok(S3Response::new((StatusCode::OK, Body::empty()))) + } +} diff --git a/rustfs/src/grpc.rs b/rustfs/src/grpc.rs index c25c0519..1aeb64b2 100644 --- a/rustfs/src/grpc.rs +++ b/rustfs/src/grpc.rs @@ -1,9 +1,4 @@ -use std::{ - collections::HashMap, - error::Error, - io::{Cursor, ErrorKind}, - pin::Pin, -}; +use std::{collections::HashMap, io::Cursor, pin::Pin}; use ecstore::{ admin_server_info::get_local_server_property, @@ -11,7 +6,6 @@ use ecstore::{ disk::{ DeleteOptions, DiskAPI, DiskInfoOptions, DiskStore, FileInfoVersions, ReadMultipleReq, ReadOptions, UpdateMetadataOpts, }, - erasure::Writer, error::Error as EcsError, heal::{ data_usage_cache::DataUsageCache, @@ -50,25 +44,25 @@ use tracing::{debug, error, info}; type ResponseStream = Pin> + Send>>; -fn match_for_io_error(err_status: &Status) -> Option<&std::io::Error> { - let mut err: &(dyn Error + 'static) = err_status; +// fn match_for_io_error(err_status: &Status) -> Option<&std::io::Error> { +// let mut err: &(dyn Error + 'static) = err_status; - loop { - if let Some(io_err) = err.downcast_ref::() { - return Some(io_err); - } +// loop { +// if let Some(io_err) = err.downcast_ref::() { +// return Some(io_err); +// } - // h2::Error do not expose std::io::Error with `source()` - // https://github.com/hyperium/h2/pull/462 - if let Some(h2_err) = err.downcast_ref::() { - if let Some(io_err) = h2_err.get_io() { - return Some(io_err); - } - } +// // h2::Error do not expose std::io::Error with `source()` +// // https://github.com/hyperium/h2/pull/462 +// if let Some(h2_err) = err.downcast_ref::() { +// if let Some(io_err) = h2_err.get_io() { +// return Some(io_err); +// } +// } - err = err.source()?; - } -} +// err = err.source()?; +// } +// } #[derive(Debug)] pub struct NodeService { @@ -558,138 +552,144 @@ impl Node for NodeService { } } - async fn write(&self, request: Request) -> Result, Status> { - let request = request.into_inner(); - if let Some(disk) = self.find_disk(&request.disk).await { - let file_writer = if request.is_append { - disk.append_file(&request.volume, &request.path).await - } else { - disk.create_file("", &request.volume, &request.path, 0).await - }; + async fn write(&self, _request: Request) -> Result, Status> { + unimplemented!("write"); + // let request = request.into_inner(); + // if let Some(disk) = self.find_disk(&request.disk).await { + // let file_writer = if request.is_append { + // disk.append_file(&request.volume, &request.path).await + // } else { + // disk.create_file("", &request.volume, &request.path, 0).await + // }; - match file_writer { - Ok(mut file_writer) => match file_writer.write(&request.data).await { - Ok(_) => Ok(tonic::Response::new(WriteResponse { - success: true, - error: None, - })), - Err(err) => Ok(tonic::Response::new(WriteResponse { - success: false, - error: Some(err_to_proto_err(&err, &format!("write failed: {}", err))), - })), - }, - Err(err) => Ok(tonic::Response::new(WriteResponse { - success: false, - error: Some(err_to_proto_err(&err, &format!("get writer failed: {}", err))), - })), - } - } else { - Ok(tonic::Response::new(WriteResponse { - success: false, - error: Some(err_to_proto_err( - &EcsError::new(StorageError::InvalidArgument(Default::default(), Default::default(), Default::default())), - "can not find disk", - )), - })) - } + // match file_writer { + // Ok(mut file_writer) => match file_writer.write(&request.data).await { + // Ok(_) => Ok(tonic::Response::new(WriteResponse { + // success: true, + // error: None, + // })), + // Err(err) => Ok(tonic::Response::new(WriteResponse { + // success: false, + // error: Some(err_to_proto_err(&err, &format!("write failed: {}", err))), + // })), + // }, + // Err(err) => Ok(tonic::Response::new(WriteResponse { + // success: false, + // error: Some(err_to_proto_err(&err, &format!("get writer failed: {}", err))), + // })), + // } + // } else { + // Ok(tonic::Response::new(WriteResponse { + // success: false, + // error: Some(err_to_proto_err( + // &EcsError::new(StorageError::InvalidArgument(Default::default(), Default::default(), Default::default())), + // "can not find disk", + // )), + // })) + // } } type WriteStreamStream = ResponseStream; - async fn write_stream(&self, request: Request>) -> Result, Status> { + async fn write_stream( + &self, + _request: Request>, + ) -> Result, Status> { info!("write_stream"); - let mut in_stream = request.into_inner(); - let (tx, rx) = mpsc::channel(128); + unimplemented!("write_stream"); - tokio::spawn(async move { - let mut file_ref = None; - while let Some(result) = in_stream.next().await { - match result { - // Ok(v) => tx - // .send(Ok(EchoResponse { message: v.message })) - // .await - // .expect("working rx"), - Ok(v) => { - match file_ref.as_ref() { - Some(_) => (), - None => { - if let Some(disk) = find_local_disk(&v.disk).await { - let file_writer = if v.is_append { - disk.append_file(&v.volume, &v.path).await - } else { - disk.create_file("", &v.volume, &v.path, 0).await - }; + // let mut in_stream = request.into_inner(); + // let (tx, rx) = mpsc::channel(128); - match file_writer { - Ok(file_writer) => file_ref = Some(file_writer), - Err(err) => { - tx.send(Ok(WriteResponse { - success: false, - error: Some(err_to_proto_err( - &err, - &format!("get get file writer failed: {}", err), - )), - })) - .await - .expect("working rx"); - break; - } - } - } else { - tx.send(Ok(WriteResponse { - success: false, - error: Some(err_to_proto_err( - &EcsError::new(StorageError::InvalidArgument( - Default::default(), - Default::default(), - Default::default(), - )), - "can not find disk", - )), - })) - .await - .expect("working rx"); - break; - } - } - }; + // tokio::spawn(async move { + // let mut file_ref = None; + // while let Some(result) = in_stream.next().await { + // match result { + // // Ok(v) => tx + // // .send(Ok(EchoResponse { message: v.message })) + // // .await + // // .expect("working rx"), + // Ok(v) => { + // match file_ref.as_ref() { + // Some(_) => (), + // None => { + // if let Some(disk) = find_local_disk(&v.disk).await { + // let file_writer = if v.is_append { + // disk.append_file(&v.volume, &v.path).await + // } else { + // disk.create_file("", &v.volume, &v.path, 0).await + // }; - match file_ref.as_mut().unwrap().write(&v.data).await { - Ok(_) => tx.send(Ok(WriteResponse { - success: true, - error: None, - })), - Err(err) => tx.send(Ok(WriteResponse { - success: false, - error: Some(err_to_proto_err(&err, &format!("write failed: {}", err))), - })), - } - .await - .unwrap(); - } - Err(err) => { - if let Some(io_err) = match_for_io_error(&err) { - if io_err.kind() == ErrorKind::BrokenPipe { - // here you can handle special case when client - // disconnected in unexpected way - eprintln!("\tclient disconnected: broken pipe"); - break; - } - } + // match file_writer { + // Ok(file_writer) => file_ref = Some(file_writer), + // Err(err) => { + // tx.send(Ok(WriteResponse { + // success: false, + // error: Some(err_to_proto_err( + // &err, + // &format!("get get file writer failed: {}", err), + // )), + // })) + // .await + // .expect("working rx"); + // break; + // } + // } + // } else { + // tx.send(Ok(WriteResponse { + // success: false, + // error: Some(err_to_proto_err( + // &EcsError::new(StorageError::InvalidArgument( + // Default::default(), + // Default::default(), + // Default::default(), + // )), + // "can not find disk", + // )), + // })) + // .await + // .expect("working rx"); + // break; + // } + // } + // }; - match tx.send(Err(err)).await { - Ok(_) => (), - Err(_err) => break, // response was dropped - } - } - } - } - println!("\tstream ended"); - }); + // match file_ref.as_mut().unwrap().write(&v.data).await { + // Ok(_) => tx.send(Ok(WriteResponse { + // success: true, + // error: None, + // })), + // Err(err) => tx.send(Ok(WriteResponse { + // success: false, + // error: Some(err_to_proto_err(&err, &format!("write failed: {}", err))), + // })), + // } + // .await + // .unwrap(); + // } + // Err(err) => { + // if let Some(io_err) = match_for_io_error(&err) { + // if io_err.kind() == ErrorKind::BrokenPipe { + // // here you can handle special case when client + // // disconnected in unexpected way + // eprintln!("\tclient disconnected: broken pipe"); + // break; + // } + // } - let out_stream = ReceiverStream::new(rx); + // match tx.send(Err(err)).await { + // Ok(_) => (), + // Err(_err) => break, // response was dropped + // } + // } + // } + // } + // println!("\tstream ended"); + // }); - Ok(tonic::Response::new(Box::pin(out_stream))) + // let out_stream = ReceiverStream::new(rx); + + // Ok(tonic::Response::new(Box::pin(out_stream))) } type ReadAtStream = ResponseStream; diff --git a/rustfs/src/main.rs b/rustfs/src/main.rs index 002e4f57..d3e5b718 100644 --- a/rustfs/src/main.rs +++ b/rustfs/src/main.rs @@ -152,8 +152,8 @@ async fn run(opt: config::Opt) -> Result<()> { for (i, eps) in endpoint_pools.as_ref().iter().enumerate() { info!( - "created endpoints {}, set_count:{}, drives_per_set: {}, cmd: {:?}", - i, eps.set_count, eps.drives_per_set, eps.cmd_line + "created endpoints {}, set_count:{}, drives_per_set: {}, cmd: {:?}, \n{:?}", + i, eps.set_count, eps.drives_per_set, eps.cmd_line, eps ); }