From 7a7aee20491d8375ea9bbc930392a7f8092aaf37 Mon Sep 17 00:00:00 2001 From: weisd Date: Wed, 19 Feb 2025 17:41:18 +0800 Subject: [PATCH 1/5] use filereader as asyncread --- Cargo.lock | 167 +++++++++++- Cargo.toml | 2 +- ecstore/Cargo.toml | 1 + ecstore/src/bitrot.rs | 502 +++++++++++++++++++++---------------- ecstore/src/disk/local.rs | 68 ++++- ecstore/src/disk/mod.rs | 442 +++++++++++++++++++++----------- ecstore/src/disk/remote.rs | 29 ++- ecstore/src/erasure.rs | 2 + ecstore/src/set_disk.rs | 56 +++-- rustfs/src/admin/mod.rs | 3 + rustfs/src/admin/router.rs | 7 +- rustfs/src/admin/rpc.rs | 97 +++++++ rustfs/src/grpc.rs | 182 +++++++------- scripts/run.sh | 8 +- 14 files changed, 1067 insertions(+), 499 deletions(-) create mode 100644 rustfs/src/admin/rpc.rs diff --git a/Cargo.lock b/Cargo.lock index 89db6bc2..b6bba26a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1377,7 +1377,7 @@ dependencies = [ "futures-util", "generational-box", "longest-increasing-subsequence", - "rustc-hash", + "rustc-hash 1.1.0", "rustversion", "serde", "slab", @@ -1443,7 +1443,7 @@ dependencies = [ "objc_id", "once_cell", "rfd 0.14.1", - "rustc-hash", + "rustc-hash 1.1.0", "serde", "serde_json", "signal-hook", @@ -1604,7 +1604,7 @@ dependencies = [ "dioxus-html", "js-sys", "lazy-js-bundle", - "rustc-hash", + "rustc-hash 1.1.0", "serde", "sledgehammer_bindgen", "sledgehammer_utils", @@ -1710,7 +1710,7 @@ dependencies = [ "generational-box", "once_cell", "parking_lot 0.12.3", - "rustc-hash", + "rustc-hash 1.1.0", "tracing", "warnings", ] @@ -1737,7 +1737,7 @@ dependencies = [ "generational-box", "js-sys", "lazy-js-bundle", - "rustc-hash", + "rustc-hash 1.1.0", "serde", "serde-wasm-bindgen", "serde_json", @@ -1939,6 +1939,7 @@ dependencies = [ "reader", "reed-solomon-erasure", "regex", + "reqwest", "rmp", "rmp-serde", "s3s", @@ -1971,6 +1972,15 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + [[package]] name = "endi" version = "1.1.0" @@ -2881,6 +2891,24 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.27.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +dependencies = [ + "futures-util", + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots", +] + [[package]] name = "hyper-timeout" version = "0.5.2" @@ -4783,6 +4811,58 @@ dependencies = [ "serde", ] +[[package]] +name = "quinn" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +dependencies = [ + "bytes", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash 2.1.1", + "rustls", + "socket2", + "thiserror 2.0.11", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +dependencies = [ + "bytes", + "getrandom 0.2.15", + "rand 0.8.5", + "ring", + "rustc-hash 2.1.1", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.11", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "quote" version = "1.0.38" @@ -5038,12 +5118,16 @@ checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" dependencies = [ "base64", "bytes", + "encoding_rs", + "futures-channel", "futures-core", "futures-util", + "h2", "http", "http-body", "http-body-util", "hyper", + "hyper-rustls", "hyper-util", "ipnet", "js-sys", @@ -5053,11 +5137,17 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "quinn", + "rustls", + "rustls-pemfile", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", + "system-configuration", "tokio", + "tokio-rustls", "tokio-util", "tower 0.5.2", "tower-service", @@ -5066,6 +5156,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", + "webpki-roots", "windows-registry", ] @@ -5199,6 +5290,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + [[package]] name = "rustc_version" version = "0.4.1" @@ -5335,6 +5432,9 @@ name = "rustls-pki-types" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +dependencies = [ + "web-time", +] [[package]] name = "rustls-webpki" @@ -5837,7 +5937,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "debdd4b83524961983cea3c55383b3910fd2f24fd13a188f5b091d2d504a61ae" dependencies = [ - "rustc-hash", + "rustc-hash 1.1.0", ] [[package]] @@ -6021,6 +6121,27 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.9.0", + "core-foundation 0.9.4", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "system-deps" version = "6.2.2" @@ -6248,6 +6369,21 @@ dependencies = [ "zerovec", ] +[[package]] +name = "tinyvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" version = "1.43.0" @@ -7015,6 +7151,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webbrowser" version = "0.8.15" @@ -7076,6 +7222,15 @@ dependencies = [ "system-deps", ] +[[package]] +name = "webpki-roots" +version = "0.26.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2210b291f7ea53617fbafcc4939f10914214ec15aace5ba62293a668f322c5c9" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "webview2-com" version = "0.33.0" diff --git a/Cargo.toml b/Cargo.toml index c034c32d..01e4a072 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,7 +69,7 @@ prost-types = "0.13.4" protobuf = "3.7" protos = { path = "./common/protos" } rand = "0.8.5" -reqwest = { version = "0.12.12", default-features = false, features = ["rustls-tls", "charset", "http2", "macos-system-configuration", "stream"] } +reqwest = { version = "0.12.12", default-features = false, features = ["rustls-tls", "charset", "http2", "macos-system-configuration", "stream","blocking"] } rfd = { version = "0.15.2", default-features = false, features = ["xdg-portal", "tokio"] } rmp = "0.8.14" rmp-serde = "1.3.0" diff --git a/ecstore/Cargo.toml b/ecstore/Cargo.toml index cacafa49..7ddc751d 100644 --- a/ecstore/Cargo.toml +++ b/ecstore/Cargo.toml @@ -66,6 +66,7 @@ pin-project-lite.workspace = true md-5.workspace = true madmin.workspace = true workers.workspace = true +reqwest = { workspace = true } [target.'cfg(not(windows))'.dependencies] diff --git a/ecstore/src/bitrot.rs b/ecstore/src/bitrot.rs index e26d039b..86a92ada 100644 --- a/ecstore/src/bitrot.rs +++ b/ecstore/src/bitrot.rs @@ -1,27 +1,22 @@ use crate::{ - disk::{error::DiskError, DiskAPI, DiskStore, FileReader, FileWriter, Reader}, + disk::{error::DiskError, BufferReader, Disk, DiskAPI, DiskStore, FileReader, FileWriter}, erasure::{ReadAt, Writer}, error::{Error, Result}, store_api::BitrotAlgorithm, }; - use blake2::Blake2b512; use blake2::Digest as _; use highway::{HighwayHash, HighwayHasher, Key}; use lazy_static::lazy_static; use sha2::{digest::core_api::BlockSizeUser, Digest, Sha256}; -use std::{ - any::Any, - collections::HashMap, - io::{Cursor, Read}, -}; -use tracing::{error, info}; - +use std::{any::Any, collections::HashMap, sync::Arc}; use tokio::{ + io::AsyncReadExt as _, spawn, sync::mpsc::{self, Sender}, task::JoinHandle, }; +use tracing::{error, info}; lazy_static! { static ref BITROT_ALGORITHMS: HashMap = { @@ -169,22 +164,22 @@ pub async fn new_bitrot_writer( pub type BitrotReader = Box; -#[allow(clippy::too_many_arguments)] -pub fn new_bitrot_reader( - disk: DiskStore, - data: &[u8], - bucket: &str, - file_path: &str, - till_offset: usize, - algo: BitrotAlgorithm, - sum: &[u8], - shard_size: usize, -) -> BitrotReader { - if algo == BitrotAlgorithm::HighwayHash256S { - return Box::new(StreamingBitrotReader::new(disk, data, bucket, file_path, algo, till_offset, shard_size)); - } - Box::new(WholeBitrotReader::new(disk, bucket, file_path, algo, till_offset, sum)) -} +// #[allow(clippy::too_many_arguments)] +// pub fn new_bitrot_reader( +// disk: DiskStore, +// data: &[u8], +// bucket: &str, +// file_path: &str, +// till_offset: usize, +// algo: BitrotAlgorithm, +// sum: &[u8], +// shard_size: usize, +// ) -> BitrotReader { +// if algo == BitrotAlgorithm::HighwayHash256S { +// return Box::new(StreamingBitrotReader::new(disk, data, bucket, file_path, algo, till_offset, shard_size)); +// } +// Box::new(WholeBitrotReader::new(disk, bucket, file_path, algo, till_offset, sum)) +// } pub async fn close_bitrot_writers(writers: &mut [Option]) -> Result<()> { for w in writers.iter_mut().flatten() { @@ -209,25 +204,25 @@ pub fn bitrot_shard_file_size(size: usize, shard_size: usize, algo: BitrotAlgori size.div_ceil(shard_size) * algo.new_hasher().size() + size } -pub fn bitrot_verify( - r: &mut Cursor>, +pub async fn bitrot_verify( + r: FileReader, want_size: usize, part_size: usize, algo: BitrotAlgorithm, - want: Vec, + _want: Vec, mut shard_size: usize, ) -> Result<()> { - if algo != BitrotAlgorithm::HighwayHash256S { - let mut h = algo.new_hasher(); - h.update(r.get_ref()); - let hash = h.finalize(); - if hash != want { - info!("bitrot_verify except: {:?}, got: {:?}", want, hash); - return Err(Error::new(DiskError::FileCorrupt)); - } + // if algo != BitrotAlgorithm::HighwayHash256S { + // let mut h = algo.new_hasher(); + // h.update(r.get_ref()); + // let hash = h.finalize(); + // if hash != want { + // info!("bitrot_verify except: {:?}, got: {:?}", want, hash); + // return Err(Error::new(DiskError::FileCorrupt)); + // } - return Ok(()); - } + // return Ok(()); + // } let mut h = algo.new_hasher(); let mut hash_buf = vec![0; h.size()]; let mut left = want_size; @@ -240,9 +235,11 @@ pub fn bitrot_verify( return Err(Error::new(DiskError::FileCorrupt)); } + let mut r = r; + while left > 0 { h.reset(); - let n = r.read(&mut hash_buf)?; + let n = r.read_exact(&mut hash_buf).await?; left -= n; if left < shard_size { @@ -250,7 +247,7 @@ pub fn bitrot_verify( } let mut buf = vec![0; shard_size]; - let read = r.read(&mut buf)?; + let read = r.read_exact(&mut buf).await?; h.update(buf); left -= read; let hash = h.clone().finalize(); @@ -298,51 +295,54 @@ impl Writer for WholeBitrotWriter { } } -#[derive(Debug)] -pub struct WholeBitrotReader { - disk: DiskStore, - volume: String, - file_path: String, - _verifier: BitrotVerifier, - till_offset: usize, - buf: Option>, -} +// #[derive(Debug)] +// pub struct WholeBitrotReader { +// disk: DiskStore, +// volume: String, +// file_path: String, +// _verifier: BitrotVerifier, +// till_offset: usize, +// buf: Option>, +// } -impl WholeBitrotReader { - pub fn new(disk: DiskStore, volume: &str, file_path: &str, algo: BitrotAlgorithm, till_offset: usize, sum: &[u8]) -> Self { - Self { - disk, - volume: volume.to_string(), - file_path: file_path.to_string(), - _verifier: BitrotVerifier::new(algo, sum), - till_offset, - buf: None, - } - } -} +// impl WholeBitrotReader { +// pub fn new(disk: DiskStore, volume: &str, file_path: &str, algo: BitrotAlgorithm, till_offset: usize, sum: &[u8]) -> Self { +// Self { +// disk, +// volume: volume.to_string(), +// file_path: file_path.to_string(), +// _verifier: BitrotVerifier::new(algo, sum), +// till_offset, +// buf: None, +// } +// } +// } -#[async_trait::async_trait] -impl ReadAt for WholeBitrotReader { - async fn read_at(&mut self, offset: usize, length: usize) -> Result<(Vec, usize)> { - if self.buf.is_none() { - let buf_len = self.till_offset - offset; - let mut file = self.disk.read_file(&self.volume, &self.file_path).await?; - let mut buf = vec![0u8; buf_len]; - file.read_at(offset, &mut buf).await?; - self.buf = Some(buf); - } +// #[async_trait::async_trait] +// impl ReadAt for WholeBitrotReader { +// async fn read_at(&mut self, offset: usize, length: usize) -> Result<(Vec, usize)> { +// if self.buf.is_none() { +// let buf_len = self.till_offset - offset; +// let mut file = self +// .disk +// .read_file_stream(&self.volume, &self.file_path, offset, length) +// .await?; +// let mut buf = vec![0u8; buf_len]; +// file.read_at(offset, &mut buf).await?; +// self.buf = Some(buf); +// } - if let Some(buf) = &mut self.buf { - if buf.len() < length { - return Err(Error::new(DiskError::LessData)); - } +// if let Some(buf) = &mut self.buf { +// if buf.len() < length { +// return Err(Error::new(DiskError::LessData)); +// } - return Ok((buf.drain(0..length).collect::>(), length)); - } +// return Ok((buf.drain(0..length).collect::>(), length)); +// } - Err(Error::new(DiskError::LessData)) - } -} +// Err(Error::new(DiskError::LessData)) +// } +// } struct StreamingBitrotWriter { hasher: Hasher, @@ -413,80 +413,80 @@ impl Writer for StreamingBitrotWriter { } } -#[derive(Debug)] -struct StreamingBitrotReader { - disk: DiskStore, - _data: Vec, - volume: String, - file_path: String, - till_offset: usize, - curr_offset: usize, - hasher: Hasher, - shard_size: usize, - buf: Vec, - hash_bytes: Vec, -} +// #[derive(Debug)] +// struct StreamingBitrotReader { +// disk: DiskStore, +// _data: Vec, +// volume: String, +// file_path: String, +// till_offset: usize, +// curr_offset: usize, +// hasher: Hasher, +// shard_size: usize, +// buf: Vec, +// hash_bytes: Vec, +// } -impl StreamingBitrotReader { - pub fn new( - disk: DiskStore, - data: &[u8], - volume: &str, - file_path: &str, - algo: BitrotAlgorithm, - till_offset: usize, - shard_size: usize, - ) -> Self { - let hasher = algo.new_hasher(); - Self { - disk, - _data: data.to_vec(), - volume: volume.to_string(), - file_path: file_path.to_string(), - till_offset: till_offset.div_ceil(shard_size) * hasher.size() + till_offset, - curr_offset: 0, - hash_bytes: Vec::with_capacity(hasher.size()), - hasher, - shard_size, - buf: Vec::new(), - } - } -} +// impl StreamingBitrotReader { +// pub fn new( +// disk: DiskStore, +// data: &[u8], +// volume: &str, +// file_path: &str, +// algo: BitrotAlgorithm, +// till_offset: usize, +// shard_size: usize, +// ) -> Self { +// let hasher = algo.new_hasher(); +// Self { +// disk, +// _data: data.to_vec(), +// volume: volume.to_string(), +// file_path: file_path.to_string(), +// till_offset: till_offset.div_ceil(shard_size) * hasher.size() + till_offset, +// curr_offset: 0, +// hash_bytes: Vec::with_capacity(hasher.size()), +// hasher, +// shard_size, +// buf: Vec::new(), +// } +// } +// } -#[async_trait::async_trait] -impl ReadAt for StreamingBitrotReader { - async fn read_at(&mut self, offset: usize, length: usize) -> Result<(Vec, usize)> { - if offset % self.shard_size != 0 { - return Err(Error::new(DiskError::Unexpected)); - } - if self.buf.is_empty() { - self.curr_offset = offset; - let stream_offset = (offset / self.shard_size) * self.hasher.size() + offset; - let buf_len = self.till_offset - stream_offset; - let mut file = self.disk.read_file(&self.volume, &self.file_path).await?; - let mut buf = vec![0u8; buf_len]; - file.read_at(stream_offset, &mut buf).await?; - self.buf = buf; - } - if offset != self.curr_offset { - return Err(Error::new(DiskError::Unexpected)); - } +// #[async_trait::async_trait] +// impl ReadAt for StreamingBitrotReader { +// async fn read_at(&mut self, offset: usize, length: usize) -> Result<(Vec, usize)> { +// if offset % self.shard_size != 0 { +// return Err(Error::new(DiskError::Unexpected)); +// } +// if self.buf.is_empty() { +// self.curr_offset = offset; +// let stream_offset = (offset / self.shard_size) * self.hasher.size() + offset; +// let buf_len = self.till_offset - stream_offset; +// let mut file = self.disk.read_file(&self.volume, &self.file_path).await?; +// let mut buf = vec![0u8; buf_len]; +// file.read_at(stream_offset, &mut buf).await?; +// self.buf = buf; +// } +// if offset != self.curr_offset { +// return Err(Error::new(DiskError::Unexpected)); +// } - self.hash_bytes = self.buf.drain(0..self.hash_bytes.capacity()).collect(); - let buf = self.buf.drain(0..length).collect::>(); - self.hasher.reset(); - self.hasher.update(&buf); - let actual = self.hasher.clone().finalize(); - if actual != self.hash_bytes { - return Err(Error::new(DiskError::FileCorrupt)); - } +// self.hash_bytes = self.buf.drain(0..self.hash_bytes.capacity()).collect(); +// let buf = self.buf.drain(0..length).collect::>(); +// self.hasher.reset(); +// self.hasher.update(&buf); +// let actual = self.hasher.clone().finalize(); +// if actual != self.hash_bytes { +// return Err(Error::new(DiskError::FileCorrupt)); +// } - let readed_len = buf.len(); - self.curr_offset += readed_len; +// let readed_len = buf.len(); +// self.curr_offset += readed_len; - Ok((buf, readed_len)) - } -} +// Ok((buf, readed_len)) +// } +// } pub struct BitrotFileWriter { pub inner: FileWriter, @@ -535,8 +535,12 @@ pub fn new_bitrot_filewriter(inner: FileWriter, algo: BitrotAlgorithm, shard_siz #[derive(Debug)] struct BitrotFileReader { - pub inner: FileReader, - // till_offset: usize, + disk: Arc, + data: Option>, + volume: String, + file_path: String, + reader: Option, + till_offset: usize, curr_offset: usize, hasher: Hasher, shard_size: usize, @@ -545,28 +549,41 @@ struct BitrotFileReader { read_buf: Vec, } -// fn ceil(a: usize, b: usize) -> usize { -// (a + b - 1) / b -// } +fn ceil(a: usize, b: usize) -> usize { + a.div_ceil(b) +} impl BitrotFileReader { - pub fn new(inner: FileReader, algo: BitrotAlgorithm, _till_offset: usize, shard_size: usize) -> Self { + pub fn new( + disk: Arc, + data: Option>, + volume: String, + file_path: String, + algo: BitrotAlgorithm, + till_offset: usize, + shard_size: usize, + ) -> Self { let hasher = algo.new_hasher(); Self { - inner, - // till_offset: ceil(till_offset, shard_size) * hasher.size() + till_offset, + disk, + data, + volume, + file_path, + till_offset: ceil(till_offset, shard_size) * hasher.size() + till_offset, curr_offset: 0, hash_bytes: vec![0u8; hasher.size()], hasher, shard_size, // buf: Vec::new(), read_buf: Vec::new(), + reader: None, } } } #[async_trait::async_trait] impl ReadAt for BitrotFileReader { + // 读取数据 async fn read_at(&mut self, offset: usize, length: usize) -> Result<(Vec, usize)> { if offset % self.shard_size != 0 { error!( @@ -578,53 +595,112 @@ impl ReadAt for BitrotFileReader { return Err(Error::new(DiskError::Unexpected)); } - let stream_offset = (offset / self.shard_size) * self.hasher.size() + offset; - let buf_len = self.hasher.size() + length; + if self.reader.is_none() { + self.curr_offset = offset; + let stream_offset = (offset / self.shard_size) * self.hasher.size() + offset; + + if let Some(data) = self.data.clone() { + self.reader = Some(FileReader::Buffer(BufferReader::new( + data, + stream_offset, + self.till_offset - stream_offset, + ))); + } else { + self.reader = Some( + self.disk + .read_file_stream(&self.volume, &self.file_path, stream_offset, self.till_offset - stream_offset) + .await?, + ); + } + } + + if offset != self.curr_offset { + error!("BitrotFileReader read_at offset != self.curr_offset, {} != {}", offset, self.curr_offset); + return Err(Error::new(DiskError::Unexpected)); + } + + let reader = self.reader.as_mut().unwrap(); + // let mut hash_buf = self.hash_bytes; + + self.hash_bytes.clear(); + self.hash_bytes.resize(self.hasher.size(), 0u8); + + reader.read_exact(&mut self.hash_bytes).await?; self.read_buf.clear(); - self.read_buf.resize(buf_len, 0u8); + self.read_buf.resize(length, 0u8); - self.inner.read_at(stream_offset, &mut self.read_buf).await?; - - let hash_bytes = &self.read_buf.as_slice()[0..self.hash_bytes.capacity()]; - - self.hash_bytes.clone_from_slice(hash_bytes); - let buf = self.read_buf.as_slice()[self.hash_bytes.capacity()..self.hash_bytes.capacity() + length].to_vec(); + reader.read_exact(&mut self.read_buf).await?; self.hasher.reset(); - self.hasher.update(&buf); + self.hasher.update(&self.read_buf); let actual = self.hasher.clone().finalize(); - if actual != self.hash_bytes { + error!( + "BitrotFileReader read_at actual != self.hash_bytes, {:?} != {:?}", + actual, self.hash_bytes + ); return Err(Error::new(DiskError::FileCorrupt)); } - let readed_len = buf.len(); + let readed_len = self.read_buf.len(); self.curr_offset += readed_len; - Ok((buf, readed_len)) + Ok((self.read_buf.clone(), readed_len)) + + // let stream_offset = (offset / self.shard_size) * self.hasher.size() + offset; + // let buf_len = self.hasher.size() + length; + + // self.read_buf.clear(); + // self.read_buf.resize(buf_len, 0u8); + + // self.inner.read_at(stream_offset, &mut self.read_buf).await?; + + // let hash_bytes = &self.read_buf.as_slice()[0..self.hash_bytes.capacity()]; + + // self.hash_bytes.clone_from_slice(hash_bytes); + // let buf = self.read_buf.as_slice()[self.hash_bytes.capacity()..self.hash_bytes.capacity() + length].to_vec(); + + // self.hasher.reset(); + // self.hasher.update(&buf); + // let actual = self.hasher.clone().finalize(); + + // if actual != self.hash_bytes { + // return Err(Error::new(DiskError::FileCorrupt)); + // } + + // let readed_len = buf.len(); + // self.curr_offset += readed_len; + + // Ok((buf, readed_len)) } } -pub fn new_bitrot_filereader(inner: FileReader, till_offset: usize, algo: BitrotAlgorithm, shard_size: usize) -> BitrotReader { - Box::new(BitrotFileReader::new(inner, algo, till_offset, shard_size)) +pub fn new_bitrot_filereader( + disk: Arc, + data: Option>, + volume: String, + file_path: String, + till_offset: usize, + algo: BitrotAlgorithm, + shard_size: usize, +) -> BitrotReader { + Box::new(BitrotFileReader::new(disk, data, volume, file_path, algo, till_offset, shard_size)) } #[cfg(test)] mod test { - use std::{collections::HashMap, fs}; + use std::collections::HashMap; use hex_simd::decode_to_vec; - use tempfile::TempDir; use crate::{ - bitrot::{new_bitrot_writer, BITROT_ALGORITHMS}, - disk::{endpoint::Endpoint, error::DiskError, new_disk, DiskAPI, DiskOption}, + disk::error::DiskError, error::{Error, Result}, store_api::BitrotAlgorithm, }; - use super::{bitrot_writer_sum, new_bitrot_reader}; + // use super::{bitrot_writer_sum, new_bitrot_reader}; #[test] fn bitrot_self_test() -> Result<()> { @@ -674,47 +750,47 @@ mod test { Ok(()) } - #[tokio::test] - async fn test_all_bitrot_algorithms() -> Result<()> { - for algo in BITROT_ALGORITHMS.keys() { - test_bitrot_reader_writer_algo(algo.clone()).await?; - } + // #[tokio::test] + // async fn test_all_bitrot_algorithms() -> Result<()> { + // for algo in BITROT_ALGORITHMS.keys() { + // test_bitrot_reader_writer_algo(algo.clone()).await?; + // } - Ok(()) - } + // Ok(()) + // } - async fn test_bitrot_reader_writer_algo(algo: BitrotAlgorithm) -> Result<()> { - let temp_dir = TempDir::new().unwrap().path().to_string_lossy().to_string(); - fs::create_dir_all(&temp_dir)?; - let volume = "testvol"; - let file_path = "testfile"; + // async fn test_bitrot_reader_writer_algo(algo: BitrotAlgorithm) -> Result<()> { + // let temp_dir = TempDir::new().unwrap().path().to_string_lossy().to_string(); + // fs::create_dir_all(&temp_dir)?; + // let volume = "testvol"; + // let file_path = "testfile"; - let ep = Endpoint::try_from(temp_dir.as_str())?; - let opt = DiskOption::default(); - let disk = new_disk(&ep, &opt).await?; - disk.make_volume(volume).await?; - let mut writer = new_bitrot_writer(disk.clone(), "", volume, file_path, 35, algo.clone(), 10).await?; + // let ep = Endpoint::try_from(temp_dir.as_str())?; + // let opt = DiskOption::default(); + // let disk = new_disk(&ep, &opt).await?; + // disk.make_volume(volume).await?; + // let mut writer = new_bitrot_writer(disk.clone(), "", volume, file_path, 35, algo.clone(), 10).await?; - writer.write(b"aaaaaaaaaa").await?; - writer.write(b"aaaaaaaaaa").await?; - writer.write(b"aaaaaaaaaa").await?; - writer.write(b"aaaaa").await?; + // writer.write(b"aaaaaaaaaa").await?; + // writer.write(b"aaaaaaaaaa").await?; + // writer.write(b"aaaaaaaaaa").await?; + // writer.write(b"aaaaa").await?; - let sum = bitrot_writer_sum(&writer); - writer.close().await?; + // let sum = bitrot_writer_sum(&writer); + // writer.close().await?; - let mut reader = new_bitrot_reader(disk, b"", volume, file_path, 35, algo, &sum, 10); - let read_len = 10; - let mut result: Vec; - (result, _) = reader.read_at(0, read_len).await?; - assert_eq!(result, b"aaaaaaaaaa"); - (result, _) = reader.read_at(10, read_len).await?; - assert_eq!(result, b"aaaaaaaaaa"); - (result, _) = reader.read_at(20, read_len).await?; - assert_eq!(result, b"aaaaaaaaaa"); - (result, _) = reader.read_at(30, read_len / 2).await?; - assert_eq!(result, b"aaaaa"); + // let mut reader = new_bitrot_reader(disk, b"", volume, file_path, 35, algo, &sum, 10); + // let read_len = 10; + // let mut result: Vec; + // (result, _) = reader.read_at(0, read_len).await?; + // assert_eq!(result, b"aaaaaaaaaa"); + // (result, _) = reader.read_at(10, read_len).await?; + // assert_eq!(result, b"aaaaaaaaaa"); + // (result, _) = reader.read_at(20, read_len).await?; + // assert_eq!(result, b"aaaaaaaaaa"); + // (result, _) = reader.read_at(30, read_len / 2).await?; + // assert_eq!(result, b"aaaaa"); - Ok(()) - } + // Ok(()) + // } } diff --git a/ecstore/src/disk/local.rs b/ecstore/src/disk/local.rs index 7f8bd1cc..a1f1e0c7 100644 --- a/ecstore/src/disk/local.rs +++ b/ecstore/src/disk/local.rs @@ -49,7 +49,8 @@ use common::defer; use path_absolutize::Absolutize; use std::collections::{HashMap, HashSet}; use std::fmt::Debug; -use std::io::Cursor; +use std::io::SeekFrom; +use std::os::unix::fs::MetadataExt; use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::Arc; use std::time::{Duration, SystemTime}; @@ -59,7 +60,7 @@ use std::{ }; use time::OffsetDateTime; use tokio::fs::{self, File}; -use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt, ErrorKind}; +use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWrite, AsyncWriteExt, ErrorKind}; use tokio::sync::mpsc::Sender; use tokio::sync::RwLock; use tracing::{error, info, warn}; @@ -735,13 +736,24 @@ impl LocalDisk { sum: &[u8], shard_size: usize, ) -> Result<()> { - let mut file = utils::fs::open_file(part_path, O_CREATE | O_WRONLY) + let file = utils::fs::open_file(part_path, O_CREATE | O_WRONLY) .await .map_err(os_err_to_file_err)?; - let mut data = Vec::new(); - let n = file.read_to_end(&mut data).await?; - bitrot_verify(&mut Cursor::new(data), n, part_size, algo, sum.to_vec(), shard_size) + // let mut data = Vec::new(); + // let n = file.read_to_end(&mut data).await?; + + let meta = file.metadata().await?; + + bitrot_verify( + FileReader::Local(LocalFileReader::new(file)), + meta.size() as usize, + part_size, + algo, + sum.to_vec(), + shard_size, + ) + .await } async fn scan_dir( @@ -1533,6 +1545,50 @@ impl DiskAPI for LocalDisk { Ok(FileReader::Local(LocalFileReader::new(f))) } + async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result { + let volume_dir = self.get_bucket_path(volume)?; + if !skip_access_checks(volume) { + if let Err(e) = utils::fs::access(&volume_dir).await { + return Err(convert_access_error(e, DiskError::VolumeAccessDenied)); + } + } + + let file_path = volume_dir.join(Path::new(&path)); + check_path_length(file_path.to_string_lossy().to_string().as_str())?; + + let mut f = self.open_file(file_path, O_RDONLY, volume_dir).await.map_err(|err| { + if let Some(e) = err.to_io_err() { + if os_is_not_exist(&e) { + Error::new(DiskError::FileNotFound) + } else if os_is_permission(&e) || is_sys_err_not_dir(&e) { + Error::new(DiskError::FileAccessDenied) + } else if is_sys_err_io(&e) { + Error::new(DiskError::FaultyDisk) + } else if is_sys_err_too_many_files(&e) { + Error::new(DiskError::TooManyOpenFiles) + } else { + Error::new(e) + } + } else { + err + } + })?; + + let meta = f.metadata().await?; + if meta.len() < (offset + length) as u64 { + error!( + "read_file_stream: file size is less than offset + length {} + {} = {}", + offset, + length, + meta.len() + ); + return Err(Error::new(DiskError::FileCorrupt)); + } + + f.seek(SeekFrom::Start(offset as u64)).await?; + + Ok(FileReader::Local(LocalFileReader::new(f))) + } #[tracing::instrument(level = "debug", skip(self))] async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result> { if !origvolume.is_empty() { diff --git a/ecstore/src/disk/mod.rs b/ecstore/src/disk/mod.rs index 0b7aba4c..b1d341d7 100644 --- a/ecstore/src/disk/mod.rs +++ b/ecstore/src/disk/mod.rs @@ -28,28 +28,24 @@ use crate::{ store_api::{FileInfo, ObjectInfo, RawFileInfo}, utils::path::SLASH_SEPARATOR, }; + use endpoint::Endpoint; use error::DiskError; use futures::StreamExt; use local::LocalDisk; use madmin::info_commands::DiskMetrics; -use protos::proto_gen::node_service::{ - node_service_client::NodeServiceClient, ReadAtRequest, ReadAtResponse, WriteRequest, WriteResponse, -}; +use protos::proto_gen::node_service::{node_service_client::NodeServiceClient, WriteRequest, WriteResponse}; use remote::RemoteDisk; use serde::{Deserialize, Serialize}; -use std::{ - any::Any, - cmp::Ordering, - fmt::Debug, - io::{Cursor, SeekFrom}, - path::PathBuf, - sync::Arc, -}; +use std::io::Read as _; +use std::pin::Pin; +use std::task::Poll; +use std::{any::Any, cmp::Ordering, fmt::Debug, io::Cursor, path::PathBuf, sync::Arc}; use time::OffsetDateTime; +use tokio::io::AsyncRead; use tokio::{ fs::File, - io::{AsyncReadExt, AsyncSeekExt, AsyncWrite, AsyncWriteExt}, + io::{AsyncWrite, AsyncWriteExt}, sync::mpsc::{self, Sender}, }; use tokio_stream::wrappers::ReceiverStream; @@ -206,6 +202,13 @@ impl DiskAPI for Disk { } } + async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result { + match self { + Disk::Local(local_disk) => local_disk.read_file_stream(volume, path, offset, length).await, + Disk::Remote(remote_disk) => remote_disk.read_file_stream(volume, path, offset, length).await, + } + } + async fn list_dir(&self, _origvolume: &str, volume: &str, _dir_path: &str, _count: i32) -> Result> { match self { Disk::Local(local_disk) => local_disk.list_dir(_origvolume, volume, _dir_path, _count).await, @@ -451,6 +454,7 @@ pub trait DiskAPI: Debug + Send + Sync + 'static { // 读目录下的所有文件、目录 async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result>; async fn read_file(&self, volume: &str, path: &str) -> Result; + async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result; async fn append_file(&self, volume: &str, path: &str) -> Result; async fn create_file(&self, origvolume: &str, volume: &str, path: &str, file_size: usize) -> Result; // ReadFileStream @@ -1411,186 +1415,340 @@ impl Writer for RemoteFileWriter { } } -#[async_trait::async_trait] -pub trait Reader { - async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result; - async fn seek(&mut self, offset: usize) -> Result<()>; - async fn read_exact(&mut self, buf: &mut [u8]) -> Result; -} +// #[async_trait::async_trait] +// pub trait Reader { +// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result; +// // async fn seek(&mut self, offset: usize) -> Result<()>; +// // async fn read_exact(&mut self, buf: &mut [u8]) -> Result; +// } #[derive(Debug)] pub enum FileReader { Local(LocalFileReader), - Remote(RemoteFileReader), + // Remote(RemoteFileReader), Buffer(BufferReader), + Http(HttpFileReader), } -#[async_trait::async_trait] -impl Reader for FileReader { - async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { - match self { - Self::Local(reader) => reader.read_at(offset, buf).await, - Self::Remote(reader) => reader.read_at(offset, buf).await, - Self::Buffer(reader) => reader.read_at(offset, buf).await, - } - } - async fn seek(&mut self, offset: usize) -> Result<()> { - match self { - Self::Local(reader) => reader.seek(offset).await, - Self::Remote(reader) => reader.seek(offset).await, - Self::Buffer(reader) => reader.seek(offset).await, - } - } - async fn read_exact(&mut self, buf: &mut [u8]) -> Result { - match self { - Self::Local(reader) => reader.read_exact(buf).await, - Self::Remote(reader) => reader.read_exact(buf).await, - Self::Buffer(reader) => reader.read_exact(buf).await, +impl AsyncRead for FileReader { + #[tracing::instrument(level = "debug", skip(self, buf))] + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + match &mut *self { + Self::Local(reader) => Pin::new(&mut reader.inner).poll_read(cx, buf), + Self::Buffer(reader) => Pin::new(&mut reader.inner).poll_read(cx, buf), + Self::Http(reader) => Pin::new(reader).poll_read(cx, buf), } } } +// #[async_trait::async_trait] +// impl Reader for FileReader { +// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { +// match self { +// Self::Local(reader) => reader.read_at(offset, buf).await, +// Self::Remote(reader) => reader.read_at(offset, buf).await, +// Self::Buffer(reader) => reader.read_at(offset, buf).await, +// Self::Http(reader) => reader.read_at(offset, buf).await, +// } +// } +// // async fn seek(&mut self, offset: usize) -> Result<()> { +// // match self { +// // Self::Local(reader) => reader.seek(offset).await, +// // Self::Remote(reader) => reader.seek(offset).await, +// // Self::Buffer(reader) => reader.seek(offset).await, +// // } +// // } +// // async fn read_exact(&mut self, buf: &mut [u8]) -> Result { +// // match self { +// // Self::Local(reader) => reader.read_exact(buf).await, +// // Self::Remote(reader) => reader.read_exact(buf).await, +// // Self::Buffer(reader) => reader.read_exact(buf).await, +// // } +// // } +// } + #[derive(Debug)] pub struct BufferReader { pub inner: Cursor>, - pos: usize, + remaining: usize, } impl BufferReader { - pub fn new(inner: Vec) -> Self { + pub fn new(inner: Vec, offset: usize, read_length: usize) -> Self { + let mut cur = Cursor::new(inner); + cur.set_position(offset as u64); Self { - inner: Cursor::new(inner), - pos: 0, + inner: cur, + remaining: offset + read_length, } } } -#[async_trait::async_trait] -impl Reader for BufferReader { +impl AsyncRead for BufferReader { #[tracing::instrument(level = "debug", skip(self, buf))] - async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { - self.seek(offset).await?; - self.read_exact(buf).await - } - #[tracing::instrument(level = "debug", skip(self))] - async fn seek(&mut self, offset: usize) -> Result<()> { - if self.pos != offset { - self.inner.set_position(offset as u64); + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + match Pin::new(&mut self.inner).poll_read(cx, buf) { + Poll::Ready(Ok(_)) => { + if self.inner.position() as usize >= self.remaining { + self.remaining -= buf.filled().len(); + Poll::Ready(Ok(())) + } else { + Poll::Pending + } + } + Poll::Ready(Err(err)) => Poll::Ready(Err(err)), + Poll::Pending => Poll::Pending, } - - Ok(()) - } - #[tracing::instrument(level = "debug", skip(self))] - async fn read_exact(&mut self, buf: &mut [u8]) -> Result { - let bytes_read = self.inner.read_exact(buf).await?; - self.pos += buf.len(); - Ok(bytes_read) } } +// #[async_trait::async_trait] +// impl Reader for BufferReader { +// #[tracing::instrument(level = "debug", skip(self, buf))] +// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { +// if self.pos != offset { +// self.inner.set_position(offset as u64); +// } +// self.inner.read_exact(buf).await?; +// self.pos += buf.len(); +// Ok(buf.len()) +// } +// // #[tracing::instrument(level = "debug", skip(self))] +// // async fn seek(&mut self, offset: usize) -> Result<()> { +// // if self.pos != offset { +// // self.inner.set_position(offset as u64); +// // } + +// // Ok(()) +// // } +// // #[tracing::instrument(level = "debug", skip(self))] +// // async fn read_exact(&mut self, buf: &mut [u8]) -> Result { +// // let bytes_read = self.inner.read_exact(buf).await?; +// // self.pos += buf.len(); +// // Ok(bytes_read) +// // } +// } + #[derive(Debug)] pub struct LocalFileReader { pub inner: File, - pos: usize, + // pos: usize, } impl LocalFileReader { pub fn new(inner: File) -> Self { - Self { inner, pos: 0 } + Self { inner } } } -#[async_trait::async_trait] -impl Reader for LocalFileReader { - #[tracing::instrument(level = "debug", skip(self, buf))] - async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { - self.seek(offset).await?; - self.read_exact(buf).await - } +// #[async_trait::async_trait] +// impl Reader for LocalFileReader { +// #[tracing::instrument(level = "debug", skip(self, buf))] +// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { +// if self.pos != offset { +// self.inner.seek(SeekFrom::Start(offset as u64)).await?; +// self.pos = offset; +// } +// self.inner.read_exact(buf).await?; +// self.pos += buf.len(); +// Ok(buf.len()) +// } - #[tracing::instrument(level = "debug", skip(self))] - async fn seek(&mut self, offset: usize) -> Result<()> { - if self.pos != offset { - self.inner.seek(SeekFrom::Start(offset as u64)).await?; - self.pos = offset; - } +// // #[tracing::instrument(level = "debug", skip(self))] +// // async fn seek(&mut self, offset: usize) -> Result<()> { +// // if self.pos != offset { +// // self.inner.seek(SeekFrom::Start(offset as u64)).await?; +// // self.pos = offset; +// // } - Ok(()) - } +// // Ok(()) +// // } +// // #[tracing::instrument(level = "debug", skip(self, buf))] +// // async fn read_exact(&mut self, buf: &mut [u8]) -> Result { +// // let bytes_read = self.inner.read_exact(buf).await?; +// // self.pos += buf.len(); +// // Ok(bytes_read) +// // } +// } + +impl AsyncRead for LocalFileReader { #[tracing::instrument(level = "debug", skip(self, buf))] - async fn read_exact(&mut self, buf: &mut [u8]) -> Result { - let bytes_read = self.inner.read_exact(buf).await?; - self.pos += buf.len(); - Ok(bytes_read) + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + Pin::new(&mut self.inner).poll_read(cx, buf) } } +// #[derive(Debug)] +// pub struct RemoteFileReader { +// pub endpoint: Endpoint, +// pub volume: String, +// pub path: String, +// tx: Sender, +// resp_stream: Streaming, +// } + +// impl RemoteFileReader { +// pub async fn new(endpoint: Endpoint, volume: String, path: String, mut client: NodeClient) -> Result { +// let (tx, rx) = mpsc::channel(128); +// let in_stream = ReceiverStream::new(rx); + +// let response = client.read_at(in_stream).await.unwrap(); + +// let resp_stream = response.into_inner(); + +// Ok(Self { +// endpoint, +// volume, +// path, +// tx, +// resp_stream, +// }) +// } +// } + +// #[async_trait::async_trait] +// impl Reader for RemoteFileReader { +// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { +// let request = ReadAtRequest { +// disk: self.endpoint.to_string(), +// volume: self.volume.to_string(), +// path: self.path.to_string(), +// offset: offset.try_into().unwrap(), +// // length: length.try_into().unwrap(), +// length: buf.len().try_into().unwrap(), +// }; +// self.tx.send(request).await?; + +// if let Some(resp) = self.resp_stream.next().await { +// let resp = resp?; +// if resp.success { +// info!("read at stream success"); + +// buf.copy_from_slice(&resp.data); + +// Ok(resp.read_size.try_into().unwrap()) +// } else { +// return if let Some(err) = &resp.error { +// Err(proto_err_to_err(err)) +// } else { +// Err(Error::from_string("")) +// }; +// } +// } else { +// let error_info = "can not get response"; +// info!("read at stream failed: {}", error_info); +// Err(Error::from_string(error_info)) +// } +// } +// // async fn seek(&mut self, _offset: usize) -> Result<()> { +// // unimplemented!() +// // } +// // async fn read_exact(&mut self, _buf: &mut [u8]) -> Result { +// // unimplemented!() +// // } +// } + +// impl AsyncRead for RemoteFileReader { +// #[tracing::instrument(level = "debug", skip(self, buf))] +// fn poll_read( +// mut self: Pin<&mut Self>, +// cx: &mut std::task::Context<'_>, +// buf: &mut tokio::io::ReadBuf<'_>, +// ) -> std::task::Poll> { +// unimplemented!("poll_read") +// } +// } + #[derive(Debug)] -pub struct RemoteFileReader { - pub endpoint: Endpoint, - pub volume: String, - pub path: String, - tx: Sender, - resp_stream: Streaming, +pub struct HttpFileReader { + // client: reqwest::Client, + // url: String, + // disk: String, + // volume: String, + // path: String, + // offset: usize, + // length: usize, + inner: reqwest::blocking::Response, + // buf: Vec, + pos: usize, } -impl RemoteFileReader { - pub async fn new(endpoint: Endpoint, volume: String, path: String, mut client: NodeClient) -> Result { - let (tx, rx) = mpsc::channel(128); - let in_stream = ReceiverStream::new(rx); - - let response = client.read_at(in_stream).await.unwrap(); - - let resp_stream = response.into_inner(); - +impl HttpFileReader { + pub async fn new(url: &str, disk: &str, volume: &str, path: &str, offset: usize, length: usize) -> Result { + let client = reqwest::blocking::Client::new(); + let resp = client + .get(format!( + "{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}", + url, disk, volume, path, offset, length + )) + .send()?; Ok(Self { - endpoint, - volume, - path, - tx, - resp_stream, + // client: reqwest::Client::new(), + // url: url.to_string(), + // disk: disk.to_string(), + // volume: volume.to_string(), + // path: path.to_string(), + // offset, + // length, + inner: resp, + // buf: Vec::new(), + pos: 0, }) } + + // pub async fn get_response(&self) -> Result<&Response, std::io::Error> { + // if let Some(resp) = self.inner.get() { + // return Ok(resp); + // } else { + // let client = reqwest::Client::new(); + // let resp = client + // .get(&format!( + // "{}/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}", + // self.url, self.disk, self.volume, self.path, self.offset, self.length + // )) + // .send() + // .await + // .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + // self.inner.set(resp); + // Ok(self.inner.get().unwrap()) + // } + // } } -#[async_trait::async_trait] -impl Reader for RemoteFileReader { - async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { - let request = ReadAtRequest { - disk: self.endpoint.to_string(), - volume: self.volume.to_string(), - path: self.path.to_string(), - offset: offset.try_into().unwrap(), - // length: length.try_into().unwrap(), - length: buf.len().try_into().unwrap(), - }; - self.tx.send(request).await?; - - if let Some(resp) = self.resp_stream.next().await { - let resp = resp?; - if resp.success { - info!("read at stream success"); - - buf.copy_from_slice(&resp.data); - - Ok(resp.read_size.try_into().unwrap()) - } else { - return if let Some(err) = &resp.error { - Err(proto_err_to_err(err)) - } else { - Err(Error::from_string("")) - }; - } - } else { - let error_info = "can not get response"; - info!("read at stream failed: {}", error_info); - Err(Error::from_string(error_info)) - } - } - async fn seek(&mut self, _offset: usize) -> Result<()> { - unimplemented!() - } - async fn read_exact(&mut self, _buf: &mut [u8]) -> Result { - unimplemented!() +impl AsyncRead for HttpFileReader { + #[tracing::instrument(level = "debug", skip(self, buf))] + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + let buf = buf.initialize_unfilled(); + self.inner.read_exact(buf)?; + self.pos += buf.len(); + Poll::Ready(Ok(())) } } + +// impl Reader for HttpFileReader { +// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { +// if self.pos != offset { +// self.inner.seek(SeekFrom::Start(offset as u64))?; +// self.pos = offset; +// } +// let bytes_read = self.inner.read(buf)?; +// self.pos += bytes_read; +// Ok(bytes_read) +// } +// } diff --git a/ecstore/src/disk/remote.rs b/ecstore/src/disk/remote.rs index 68a5ab31..2fd2ca3f 100644 --- a/ecstore/src/disk/remote.rs +++ b/ecstore/src/disk/remote.rs @@ -23,10 +23,9 @@ use uuid::Uuid; use super::{ endpoint::Endpoint, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, - FileInfoVersions, FileReader, FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RemoteFileReader, RemoteFileWriter, - RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions, + FileInfoVersions, FileReader, FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RemoteFileWriter, RenameDataResp, + UpdateMetadataOpts, VolumeInfo, WalkDirOptions, }; -use crate::utils::proto_err_to_err; use crate::{ disk::error::DiskError, error::{Error, Result}, @@ -37,6 +36,7 @@ use crate::{ }, store_api::{FileInfo, RawFileInfo}, }; +use crate::{disk::HttpFileReader, utils::proto_err_to_err}; use crate::{disk::MetaCacheEntry, metacache::writer::MetacacheWriter}; use protos::proto_gen::node_service::RenamePartRequst; @@ -346,14 +346,21 @@ impl DiskAPI for RemoteDisk { async fn read_file(&self, volume: &str, path: &str) -> Result { info!("read_file"); - Ok(FileReader::Remote( - RemoteFileReader::new( - self.endpoint.clone(), - volume.to_string(), - path.to_string(), - node_service_time_out_client(&self.addr) - .await - .map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?, + Ok(FileReader::Http( + HttpFileReader::new(self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), volume, path, 0, 0) + .await?, + )) + } + + async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result { + Ok(FileReader::Http( + HttpFileReader::new( + self.endpoint.grid_host().as_str(), + self.endpoint.to_string().as_str(), + volume, + path, + offset, + length, ) .await?, )) diff --git a/ecstore/src/erasure.rs b/ecstore/src/erasure.rs index 4121ddf6..f068b109 100644 --- a/ecstore/src/erasure.rs +++ b/ecstore/src/erasure.rs @@ -419,6 +419,7 @@ impl Erasure { // num_shards * self.shard_size(self.block_size) } + // where erasure reading begins. pub fn shard_file_offset(&self, start_offset: usize, length: usize, total_length: usize) -> usize { let shard_size = self.shard_size(self.block_size); let shard_file_size = self.shard_file_size(total_length); @@ -528,6 +529,7 @@ impl ShardReader { pub async fn read(&mut self) -> Result>>> { // let mut disks = self.readers; let reader_length = self.readers.len(); + // 需要读取的块长度 let mut read_length = self.shard_size; if self.offset + read_length > self.shard_file_size { read_length = self.shard_file_size - self.offset diff --git a/ecstore/src/set_disk.rs b/ecstore/src/set_disk.rs index c6db0f8e..0855fa94 100644 --- a/ecstore/src/set_disk.rs +++ b/ecstore/src/set_disk.rs @@ -1,6 +1,6 @@ use std::{ collections::{HashMap, HashSet}, - io::{Cursor, Write}, + io::Write, path::Path, sync::Arc, time::Duration, @@ -1855,20 +1855,23 @@ impl SetDisks { // debug!("read part_path {}", &part_path); if let Some(disk) = disk_op { - let filereader = { - if let Some(ref data) = files[idx].data { - FileReader::Buffer(BufferReader::new(data.clone())) - } else { - let disk = disk.clone(); - let part_path = - format!("{}/{}/part.{}", object, files[idx].data_dir.unwrap_or(Uuid::nil()), part_number); + // let filereader = { + // if let Some(ref data) = files[idx].data { + // FileReader::Buffer(BufferReader::new(data.clone())) + // } else { + // let disk = disk.clone(); + // let part_path = + // format!("{}/{}/part.{}", object, files[idx].data_dir.unwrap_or(Uuid::nil()), part_number); - disk.read_file(bucket, &part_path).await? - } - }; + // disk.read_file(bucket, &part_path).await? + // } + // }; let checksum_info = files[idx].erasure.get_checksum_info(part_number); let reader = new_bitrot_filereader( - filereader, + disk.clone(), + files[idx].data.clone(), + bucket.to_owned(), + format!("{}/{}/part.{}", object, files[idx].data_dir.unwrap_or(Uuid::nil()), part_number), till_offset, checksum_info.algorithm, erasure.shard_size(erasure.block_size), @@ -2411,18 +2414,21 @@ impl SetDisks { let mut prefer = vec![false; latest_disks.len()]; for (index, disk) in latest_disks.iter().enumerate() { if let (Some(disk), Some(metadata)) = (disk, ©_parts_metadata[index]) { - let filereader = { - if let Some(ref data) = metadata.data { - FileReader::Buffer(BufferReader::new(data.clone())) - } else { - let disk = disk.clone(); - let part_path = format!("{}/{}/part.{}", object, src_data_dir, part.number); + // let filereader = { + // if let Some(ref data) = metadata.data { + // FileReader::Buffer(BufferReader::new(data.clone())) + // } else { + // let disk = disk.clone(); + // let part_path = format!("{}/{}/part.{}", object, src_data_dir, part.number); - disk.read_file(bucket, &part_path).await? - } - }; + // disk.read_file(bucket, &part_path).await? + // } + // }; let reader = new_bitrot_filereader( - filereader, + disk.clone(), + metadata.data.clone(), + bucket.to_owned(), + format!("{}/{}/part.{}", object, src_data_dir, part.number), till_offset, checksum_algo.clone(), erasure.shard_size(erasure.block_size), @@ -5239,13 +5245,15 @@ async fn disks_with_all_parts( let checksum_info = meta.erasure.get_checksum_info(meta.parts[0].number); let data_len = data.len(); let verify_err = match bitrot_verify( - &mut Cursor::new(data.to_vec()), + FileReader::Buffer(BufferReader::new(data.clone(), 0, data_len)), data_len, meta.erasure.shard_file_size(meta.size), checksum_info.algorithm, checksum_info.hash, meta.erasure.shard_size(meta.erasure.block_size), - ) { + ) + .await + { Ok(_) => None, Err(err) => Some(err), }; diff --git a/rustfs/src/admin/mod.rs b/rustfs/src/admin/mod.rs index 9c8f2403..4b398132 100644 --- a/rustfs/src/admin/mod.rs +++ b/rustfs/src/admin/mod.rs @@ -1,5 +1,6 @@ pub mod handlers; pub mod router; +mod rpc; pub mod utils; use common::error::Result; @@ -11,6 +12,7 @@ use handlers::{ }; use hyper::Method; use router::{AdminOperation, S3Router}; +use rpc::regist_rpc_route; use s3s::route::S3Route; const ADMIN_PREFIX: &str = "/rustfs/admin"; @@ -21,6 +23,7 @@ pub fn make_admin_route() -> Result { // 1 r.insert(Method::POST, "/", AdminOperation(&sts::AssumeRoleHandle {}))?; + regist_rpc_route(&mut r)?; regist_user_route(&mut r)?; r.insert( diff --git a/rustfs/src/admin/router.rs b/rustfs/src/admin/router.rs index 46a6bb9a..4fb605d6 100644 --- a/rustfs/src/admin/router.rs +++ b/rustfs/src/admin/router.rs @@ -14,6 +14,7 @@ use s3s::S3Request; use s3s::S3Response; use s3s::S3Result; +use super::rpc::RPC_PREFIX; use super::ADMIN_PREFIX; pub struct S3Router { @@ -63,7 +64,7 @@ where } } - uri.path().starts_with(ADMIN_PREFIX) + uri.path().starts_with(ADMIN_PREFIX) || uri.path().starts_with(RPC_PREFIX) } async fn call(&self, req: S3Request) -> S3Result> { @@ -81,6 +82,10 @@ where // check_access before call async fn check_access(&self, req: &mut S3Request) -> S3Result<()> { + // TODO: check access by req.credentials + if req.uri.path().starts_with(RPC_PREFIX) { + return Ok(()); + } match req.credentials { Some(_) => Ok(()), None => Err(s3_error!(AccessDenied, "Signature is required")), diff --git a/rustfs/src/admin/rpc.rs b/rustfs/src/admin/rpc.rs new file mode 100644 index 00000000..fc7b5652 --- /dev/null +++ b/rustfs/src/admin/rpc.rs @@ -0,0 +1,97 @@ +use super::router::AdminOperation; +use super::router::Operation; +use super::router::S3Router; +use crate::storage::ecfs::bytes_stream; +use common::error::Result; +use ecstore::disk::DiskAPI; +use ecstore::disk::FileReader; +use ecstore::store::find_local_disk; +use http::StatusCode; +use hyper::Method; +use matchit::Params; +use s3s::dto::StreamingBlob; +use s3s::s3_error; +use s3s::Body; +use s3s::S3Request; +use s3s::S3Response; +use s3s::S3Result; +use serde_urlencoded::from_bytes; +use tokio_util::io::ReaderStream; +use tracing::warn; + +pub const RPC_PREFIX: &str = "/rustfs/rpc"; + +pub fn regist_rpc_route(r: &mut S3Router) -> Result<()> { + r.insert( + Method::GET, + format!("{}{}", RPC_PREFIX, "/read_file_stream").as_str(), + AdminOperation(&ReadFile {}), + )?; + + Ok(()) +} + +// /rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}" +#[derive(Debug, Default, serde::Deserialize)] +pub struct ReadFileQuery { + disk: String, + volume: String, + path: String, + offset: usize, + length: usize, +} +pub struct ReadFile {} +#[async_trait::async_trait] +impl Operation for ReadFile { + async fn call(&self, req: S3Request, _params: Params<'_, '_>) -> S3Result> { + warn!("handle ReadFile"); + + let query = { + if let Some(query) = req.uri.query() { + let input: ReadFileQuery = + from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed1"))?; + input + } else { + ReadFileQuery::default() + } + }; + + let Some(disk) = find_local_disk(&query.disk).await else { + return Err(s3_error!(InvalidArgument, "disk not found")); + }; + + let file: FileReader = disk + .read_file_stream(&query.volume, &query.path, query.offset, query.length) + .await + .map_err(|e| s3_error!(InternalError, "read file err {}", e))?; + + let s = bytes_stream(ReaderStream::new(file), query.length); + + Ok(S3Response::new((StatusCode::OK, Body::from(StreamingBlob::wrap(s))))) + + // let querys = req.uri.query().map(|q| { + // let mut querys = HashMap::new(); + // for (k, v) in url::form_urlencoded::parse(q.as_bytes()) { + // println!("{}={}", k, v); + // querys.insert(k.to_string(), v.to_string()); + // } + // querys + // }); + + // // TODO: file_path from root + + // if let Some(file_path) = querys.and_then(|q| q.get("file_path").cloned()) { + // let file = fs::OpenOptions::new() + // .read(true) + // .open(file_path) + // .await + // .map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("open file err {}", e)))?; + + // let s = bytes_stream(ReaderStream::new(file), 0); + + // return Ok(S3Response::new((StatusCode::OK, Body::from(StreamingBlob::wrap(s))))); + // } + + // Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::empty()))) + } +} diff --git a/rustfs/src/grpc.rs b/rustfs/src/grpc.rs index 0411d095..c25c0519 100644 --- a/rustfs/src/grpc.rs +++ b/rustfs/src/grpc.rs @@ -9,8 +9,7 @@ use ecstore::{ admin_server_info::get_local_server_property, bucket::{metadata::load_bucket_metadata, metadata_sys}, disk::{ - DeleteOptions, DiskAPI, DiskInfoOptions, DiskStore, FileInfoVersions, ReadMultipleReq, ReadOptions, Reader, - UpdateMetadataOpts, + DeleteOptions, DiskAPI, DiskInfoOptions, DiskStore, FileInfoVersions, ReadMultipleReq, ReadOptions, UpdateMetadataOpts, }, erasure::Writer, error::Error as EcsError, @@ -694,103 +693,104 @@ impl Node for NodeService { } type ReadAtStream = ResponseStream; - async fn read_at(&self, request: Request>) -> Result, Status> { + async fn read_at(&self, _request: Request>) -> Result, Status> { info!("read_at"); + unimplemented!("read_at"); - let mut in_stream = request.into_inner(); - let (tx, rx) = mpsc::channel(128); + // let mut in_stream = request.into_inner(); + // let (tx, rx) = mpsc::channel(128); - tokio::spawn(async move { - let mut file_ref = None; - while let Some(result) = in_stream.next().await { - match result { - Ok(v) => { - match file_ref.as_ref() { - Some(_) => (), - None => { - if let Some(disk) = find_local_disk(&v.disk).await { - match disk.read_file(&v.volume, &v.path).await { - Ok(file_reader) => file_ref = Some(file_reader), - Err(err) => { - tx.send(Ok(ReadAtResponse { - success: false, - data: Vec::new(), - error: Some(err_to_proto_err(&err, &format!("read file failed: {}", err))), - read_size: -1, - })) - .await - .expect("working rx"); - break; - } - } - } else { - tx.send(Ok(ReadAtResponse { - success: false, - data: Vec::new(), - error: Some(err_to_proto_err( - &EcsError::new(StorageError::InvalidArgument( - Default::default(), - Default::default(), - Default::default(), - )), - "can not find disk", - )), - read_size: -1, - })) - .await - .expect("working rx"); - break; - } - } - }; + // tokio::spawn(async move { + // let mut file_ref = None; + // while let Some(result) = in_stream.next().await { + // match result { + // Ok(v) => { + // match file_ref.as_ref() { + // Some(_) => (), + // None => { + // if let Some(disk) = find_local_disk(&v.disk).await { + // match disk.read_file(&v.volume, &v.path).await { + // Ok(file_reader) => file_ref = Some(file_reader), + // Err(err) => { + // tx.send(Ok(ReadAtResponse { + // success: false, + // data: Vec::new(), + // error: Some(err_to_proto_err(&err, &format!("read file failed: {}", err))), + // read_size: -1, + // })) + // .await + // .expect("working rx"); + // break; + // } + // } + // } else { + // tx.send(Ok(ReadAtResponse { + // success: false, + // data: Vec::new(), + // error: Some(err_to_proto_err( + // &EcsError::new(StorageError::InvalidArgument( + // Default::default(), + // Default::default(), + // Default::default(), + // )), + // "can not find disk", + // )), + // read_size: -1, + // })) + // .await + // .expect("working rx"); + // break; + // } + // } + // }; - let mut data = vec![0u8; v.length.try_into().unwrap()]; + // let mut data = vec![0u8; v.length.try_into().unwrap()]; - match file_ref - .as_mut() - .unwrap() - .read_at(v.offset.try_into().unwrap(), &mut data) - .await - { - Ok(read_size) => tx.send(Ok(ReadAtResponse { - success: true, - data, - read_size: read_size.try_into().unwrap(), - error: None, - })), - Err(err) => tx.send(Ok(ReadAtResponse { - success: false, - data: Vec::new(), - error: Some(err_to_proto_err(&err, &format!("read at failed: {}", err))), - read_size: -1, - })), - } - .await - .unwrap(); - } - Err(err) => { - if let Some(io_err) = match_for_io_error(&err) { - if io_err.kind() == ErrorKind::BrokenPipe { - // here you can handle special case when client - // disconnected in unexpected way - eprintln!("\tclient disconnected: broken pipe"); - break; - } - } + // match file_ref + // .as_mut() + // .unwrap() + // .read_at(v.offset.try_into().unwrap(), &mut data) + // .await + // { + // Ok(read_size) => tx.send(Ok(ReadAtResponse { + // success: true, + // data, + // read_size: read_size.try_into().unwrap(), + // error: None, + // })), + // Err(err) => tx.send(Ok(ReadAtResponse { + // success: false, + // data: Vec::new(), + // error: Some(err_to_proto_err(&err, &format!("read at failed: {}", err))), + // read_size: -1, + // })), + // } + // .await + // .unwrap(); + // } + // Err(err) => { + // if let Some(io_err) = match_for_io_error(&err) { + // if io_err.kind() == ErrorKind::BrokenPipe { + // // here you can handle special case when client + // // disconnected in unexpected way + // eprintln!("\tclient disconnected: broken pipe"); + // break; + // } + // } - match tx.send(Err(err)).await { - Ok(_) => (), - Err(_err) => break, // response was dropped - } - } - } - } - println!("\tstream ended"); - }); + // match tx.send(Err(err)).await { + // Ok(_) => (), + // Err(_err) => break, // response was dropped + // } + // } + // } + // } + // println!("\tstream ended"); + // }); - let out_stream = ReceiverStream::new(rx); + // let out_stream = ReceiverStream::new(rx); - Ok(tonic::Response::new(Box::pin(out_stream))) + // Ok(tonic::Response::new(Box::pin(out_stream))) } async fn list_dir(&self, request: Request) -> Result, Status> { diff --git a/scripts/run.sh b/scripts/run.sh index bd114184..2b00ade0 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -6,8 +6,8 @@ fi current_dir=$(pwd) -mkdir -p ./target/volume/test -# mkdir -p ./target/volume/test{0..4} +# mkdir -p ./target/volume/test +mkdir -p ./target/volume/test{0..4} if [ -z "$RUST_LOG" ]; then @@ -19,8 +19,8 @@ fi # export RUSTFS_STORAGE_CLASS_INLINE_BLOCK="512 KB" -# RUSTFS_VOLUMES="./target/volume/test{0...4}" -export RUSTFS_VOLUMES="./target/volume/test" +export RUSTFS_VOLUMES="./target/volume/test{0...4}" +# export RUSTFS_VOLUMES="./target/volume/test" export RUSTFS_ADDRESS="0.0.0.0:9000" export RUSTFS_CONSOLE_ENABLE=true export RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9002" From 70031effa76ae1931fe38b51e76f16381e70d3c9 Mon Sep 17 00:00:00 2001 From: weisd Date: Tue, 11 Mar 2025 09:12:20 +0800 Subject: [PATCH 2/5] use http for remote read/write --- .docker/Dockerfile.devenv | 2 +- .docker/Dockerfile.ubuntu22.04 | 2 +- Cargo.lock | 17 +- Cargo.toml | 1 - Makefile | 4 +- TODO.md | 38 + api/admin/Cargo.toml | 21 - api/admin/src/error.rs | 98 - api/admin/src/handlers.rs | 1 - api/admin/src/handlers/list_pools.rs | 83 - api/admin/src/lib.rs | 20 - api/admin/src/middlewares.rs | 0 .../generated/flatbuffers_generated/models.rs | 207 +- .../src/generated/proto_gen/node_service.rs | 3836 +++++++++++++---- docker-compose.yaml | 74 + ecstore/Cargo.toml | 1 + ecstore/src/bitrot.rs | 247 +- ecstore/src/disk/io.rs | 229 + ecstore/src/disk/local.rs | 46 +- ecstore/src/disk/mod.rs | 492 +-- ecstore/src/disk/remote.rs | 84 +- ecstore/src/set_disk.rs | 25 +- ecstore/src/store_init.rs | 10 +- ecstore/src/utils/os/linux.rs | 2 +- rustfs/Cargo.toml | 1 - rustfs/src/admin/rpc.rs | 65 +- rustfs/src/grpc.rs | 286 +- rustfs/src/main.rs | 4 +- 28 files changed, 4104 insertions(+), 1792 deletions(-) delete mode 100644 api/admin/Cargo.toml delete mode 100644 api/admin/src/error.rs delete mode 100644 api/admin/src/handlers.rs delete mode 100644 api/admin/src/handlers/list_pools.rs delete mode 100644 api/admin/src/lib.rs delete mode 100644 api/admin/src/middlewares.rs create mode 100644 docker-compose.yaml create mode 100644 ecstore/src/disk/io.rs diff --git a/.docker/Dockerfile.devenv b/.docker/Dockerfile.devenv index e95027d2..de2fcb49 100644 --- a/.docker/Dockerfile.devenv +++ b/.docker/Dockerfile.devenv @@ -4,7 +4,7 @@ ENV LANG C.UTF-8 RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list -RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev -y +RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y # install protoc RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v27.0/protoc-27.0-linux-x86_64.zip \ diff --git a/.docker/Dockerfile.ubuntu22.04 b/.docker/Dockerfile.ubuntu22.04 index 546b16b7..b955de8e 100644 --- a/.docker/Dockerfile.ubuntu22.04 +++ b/.docker/Dockerfile.ubuntu22.04 @@ -4,7 +4,7 @@ ENV LANG C.UTF-8 RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list -RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev -y +RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y # install protoc RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v27.0/protoc-27.0-linux-x86_64.zip \ diff --git a/Cargo.lock b/Cargo.lock index b6bba26a..b5722b5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,21 +17,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" -[[package]] -name = "admin" -version = "0.0.1" -dependencies = [ - "axum", - "ecstore", - "futures-util", - "hyper", - "mime", - "serde", - "serde_json", - "time", - "tower 0.5.2", -] - [[package]] name = "aead" version = "0.5.2" @@ -1960,6 +1945,7 @@ dependencies = [ "tracing-error", "transform-stream", "url", + "urlencoding", "uuid", "winapi", "workers", @@ -5309,7 +5295,6 @@ dependencies = [ name = "rustfs" version = "0.1.0" dependencies = [ - "admin", "async-trait", "atoi", "axum", diff --git a/Cargo.toml b/Cargo.toml index 01e4a072..a78ec0d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,6 @@ members = [ "common/common", # Shared utilities and data structures "common/lock", # Distributed locking implementation "common/protos", # Protocol buffer definitions - "api/admin", # Admin HTTP API endpoints "reader", # Object reading service "common/workers", # Worker thread pools and task scheduling "iam", # Identity and Access Management diff --git a/Makefile b/Makefile index b3b2e83a..94dd8853 100644 --- a/Makefile +++ b/Makefile @@ -37,9 +37,9 @@ probe-e2e: # in target/rockylinux9.3/release/rustfs BUILD_OS ?= rockylinux9.3 .PHONY: build -build: ROCKYLINUX_BUILD_IMAGE_NAME = $(BUILD_OS):v1 +build: ROCKYLINUX_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1 build: ROCKYLINUX_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build -build: BUILD_CMD = /root/.cargo/bin/cargo build --release --target-dir /root/s3-rustfs/target/$(BUILD_OS) +build: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS) build: $(DOCKER_CLI) build -t $(ROCKYLINUX_BUILD_IMAGE_NAME) -f $(DOCKERFILE_PATH)/Dockerfile.$(BUILD_OS) . $(DOCKER_CLI) run --rm --name $(ROCKYLINUX_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(ROCKYLINUX_BUILD_IMAGE_NAME) $(BUILD_CMD) diff --git a/TODO.md b/TODO.md index 2aa1ca64..597a00f4 100644 --- a/TODO.md +++ b/TODO.md @@ -57,3 +57,41 @@ - [ ] 对象压缩 - [ ] STS - [ ] 分层(阿里云、腾讯云、S3远程对接) + + + +scp ./target/ubuntu22.04/release/rustfs.zip root@8.130.183.154:~/ +scp ./target/ubuntu22.04/release/rustfs.zip root@8.130.177.182:~/ +scp ./target/ubuntu22.04/release/rustfs.zip root@8.130.91.189:~/ +scp ./target/ubuntu22.04/release/rustfs.zip root@8.130.182.114:~/ + +scp ./target/x86_64-unknown-linux-musl/release/rustfs root@8.130.183.154:~/ +scp ./target/x86_64-unknown-linux-musl/release/rustfs root@8.130.177.182:~/ +scp ./target/x86_64-unknown-linux-musl/release/rustfs root@8.130.91.189:~/ +scp ./target/x86_64-unknown-linux-musl/release/rustfs root@8.130.182.114:~/ + + + + + + 2025-03-11T06:18:50.011565Z DEBUG s3s::service: req: Request { method: PUT, uri: /rustfs/rpc/put_file_stream?disk=http://node2:9000/data/rustfs2&volume=.rustfs.sys/tmp&path=a45ade1a-e09b-4eb4-bac1-8b5f55f7d438/235da61f-a705-4f9a-aa21-7801d2eaf61d/part.1&append=false, version: HTTP/1.1, headers: {"accept": "*/*", "host": "node2:9000", "transfer-encoding": "chunked"}, body: Body { hyper: Body(Streaming) } } + at /Users/weisd/.cargo/git/checkouts/s3s-58426f2d17c34859/ab139f7/crates/s3s/src/service.rs:81 + in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 + + 2025-03-11T06:18:50.011603Z DEBUG s3s::ops: parsing path-style request, decoded_uri_path: "/rustfs/rpc/put_file_stream" + at /Users/weisd/.cargo/git/checkouts/s3s-58426f2d17c34859/ab139f7/crates/s3s/src/ops/mod.rs:266 + in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 + + 2025-03-11T06:18:50.011651Z DEBUG s3s::ops: body_changed: false, decoded_content_length: None, has_multipart: false + at /Users/weisd/.cargo/git/checkouts/s3s-58426f2d17c34859/ab139f7/crates/s3s/src/ops/mod.rs:342 + in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 + + 2025-03-11T06:18:50.011687Z WARN rustfs::admin::rpc: handle PutFile + at rustfs/src/admin/rpc.rs:120 + in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 + + 2025-03-11T06:18:50.011716Z DEBUG s3s::ops: custom route returns error, err: S3Error(Inner { code: InvalidArgument, message: Some("get query failed1 Error(\"missing field `size`\")"), request_id: None, status_code: None, source: None, headers: None }) + at /Users/weisd/.cargo/git/checkouts/s3s-58426f2d17c34859/ab139f7/crates/s3s/src/ops/mod.rs:227 + in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 + + 2025-03-11T06:18:50.011751Z DEBUG s3s::service: res: Response { status: 400, version: HTTP/1.1, headers: {"content-type": "application/xml"}, body: Body { once: b"InvalidArgumentget query failed1 Error("missing field `size`")" } } \ No newline at end of file diff --git a/api/admin/Cargo.toml b/api/admin/Cargo.toml deleted file mode 100644 index 7c86092c..00000000 --- a/api/admin/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "admin" -edition.workspace = true -license.workspace = true -repository.workspace = true -rust-version.workspace = true -version.workspace = true - -[lints] -workspace = true - -[dependencies] -axum.workspace = true -mime.workspace = true -serde.workspace = true -serde_json.workspace = true -ecstore = { path = "../../ecstore" } -time = { workspace = true, features = ["serde"] } -tower.workspace = true -futures-util = "0.3.31" -hyper.workspace = true diff --git a/api/admin/src/error.rs b/api/admin/src/error.rs deleted file mode 100644 index bbe24460..00000000 --- a/api/admin/src/error.rs +++ /dev/null @@ -1,98 +0,0 @@ -use axum::{ - body::Body, - http::{header::CONTENT_TYPE, HeaderValue, StatusCode}, - response::{IntoResponse, Response}, -}; -use mime::APPLICATION_JSON; -use serde::Serialize; - -#[derive(Serialize, Default)] -#[serde(rename_all = "PascalCase")] -pub struct ErrorResponse { - pub code: String, - pub message: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub key: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub bucket_name: Option, - pub resource: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub region: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub request_id: Option, - pub host_id: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub actual_object_size: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub range_requested: Option, -} - -impl IntoResponse for APIError { - fn into_response(self) -> Response { - let code = self.http_status_code; - let err_response = ErrorResponse::from(self); - let json_res = match serde_json::to_vec(&err_response) { - Ok(r) => r, - Err(e) => return (StatusCode::INTERNAL_SERVER_ERROR, format!("{e}")).into_response(), - }; - - Response::builder() - .status(code) - .header(CONTENT_TYPE, HeaderValue::from_static(APPLICATION_JSON.as_ref())) - .body(Body::from(json_res)) - .unwrap_or_else(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("{e}")).into_response()) - } -} - -#[derive(Default)] -pub struct APIError { - code: String, - description: String, - http_status_code: StatusCode, - object_size: Option, - range_requested: Option, -} - -pub enum ErrorCode { - ErrNotImplemented, - ErrServerNotInitialized, -} - -impl IntoResponse for ErrorCode { - fn into_response(self) -> Response { - APIError::from(self).into_response() - } -} - -impl From for APIError { - fn from(value: ErrorCode) -> Self { - use ErrorCode::*; - - match value { - ErrNotImplemented => APIError { - code: "NotImplemented".into(), - description: "A header you provided implies functionality that is not implemented.".into(), - http_status_code: StatusCode::NOT_IMPLEMENTED, - ..Default::default() - }, - ErrServerNotInitialized => APIError { - code: "ServerNotInitialized".into(), - description: "Server not initialized yet, please try again.".into(), - http_status_code: StatusCode::SERVICE_UNAVAILABLE, - ..Default::default() - }, - } - } -} - -impl From for ErrorResponse { - fn from(value: APIError) -> Self { - Self { - code: value.code, - message: value.description, - actual_object_size: value.object_size, - range_requested: value.range_requested, - ..Default::default() - } - } -} diff --git a/api/admin/src/handlers.rs b/api/admin/src/handlers.rs deleted file mode 100644 index fa5c33dd..00000000 --- a/api/admin/src/handlers.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod list_pools; diff --git a/api/admin/src/handlers/list_pools.rs b/api/admin/src/handlers/list_pools.rs deleted file mode 100644 index b80dd465..00000000 --- a/api/admin/src/handlers/list_pools.rs +++ /dev/null @@ -1,83 +0,0 @@ -use crate::error::ErrorCode; -use crate::Result as LocalResult; - -use axum::Json; -use ecstore::new_object_layer_fn; -use serde::Serialize; -use time::OffsetDateTime; - -#[derive(Serialize)] -pub struct PoolStatus { - id: i64, - cmdline: String, - #[serde(rename = "lastUpdate")] - #[serde(serialize_with = "time::serde::rfc3339::serialize")] - last_updat: OffsetDateTime, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(rename = "decommissionInfo")] - decommission_info: Option, -} - -#[derive(Serialize)] -#[serde(rename_all = "camelCase")] -struct PoolDecommissionInfo { - #[serde(serialize_with = "time::serde::rfc3339::serialize")] - start_time: OffsetDateTime, - start_size: i64, - total_size: i64, - current_size: i64, - complete: bool, - failed: bool, - canceled: bool, - - #[serde(rename = "objectsDecommissioned")] - items_decommissioned: i64, - #[serde(rename = "objectsDecommissionedFailed")] - items_decommission_failed: i64, - #[serde(rename = "bytesDecommissioned")] - bytes_done: i64, - #[serde(rename = "bytesDecommissionedFailed")] - bytes_failed: i64, -} - -pub async fn handler() -> LocalResult>> { - // if ecstore::is_legacy().await { - // return Err(ErrorCode::ErrNotImplemented); - // } - // - // - - // todo 实用oncelock作为全局变量 - - let Some(store) = new_object_layer_fn() else { return Err(ErrorCode::ErrNotImplemented) }; - // todo, 调用pool.status()接口获取每个池的数据 - // - let mut result = Vec::new(); - for (idx, _pool) in store.pools.iter().enumerate() { - // 这里mock一下数据 - result.push(PoolStatus { - id: idx as _, - cmdline: "cmdline".into(), - last_updat: OffsetDateTime::now_utc(), - decommission_info: if idx % 2 == 0 { - Some(PoolDecommissionInfo { - start_time: OffsetDateTime::now_utc(), - start_size: 1, - total_size: 2, - current_size: 2, - complete: true, - failed: true, - canceled: true, - items_decommissioned: 1, - items_decommission_failed: 1, - bytes_done: 1, - bytes_failed: 1, - }) - } else { - None - }, - }) - } - - Ok(Json(result)) -} diff --git a/api/admin/src/lib.rs b/api/admin/src/lib.rs deleted file mode 100644 index a0477dfd..00000000 --- a/api/admin/src/lib.rs +++ /dev/null @@ -1,20 +0,0 @@ -pub mod error; -pub mod handlers; - -use axum::{extract::Request, response::Response, routing::get, BoxError, Router}; -use error::ErrorCode; -use handlers::list_pools; -use tower::Service; - -pub type Result = std::result::Result; - -const API_VERSION: &str = "/v3"; - -pub fn register_admin_router() -> impl Service, Future: Send> + Clone { - Router::new() - .nest( - "/rustfs/admin", - Router::new().nest(API_VERSION, Router::new().route("/pools/list", get(list_pools::handler))), - ) - .into_service() -} diff --git a/api/admin/src/middlewares.rs b/api/admin/src/middlewares.rs deleted file mode 100644 index e69de29b..00000000 diff --git a/common/protos/src/generated/flatbuffers_generated/models.rs b/common/protos/src/generated/flatbuffers_generated/models.rs index e4949fdc..aa1f6ae2 100644 --- a/common/protos/src/generated/flatbuffers_generated/models.rs +++ b/common/protos/src/generated/flatbuffers_generated/models.rs @@ -1,9 +1,10 @@ // automatically generated by the FlatBuffers compiler, do not modify + // @generated -use core::cmp::Ordering; use core::mem; +use core::cmp::Ordering; extern crate flatbuffers; use self::flatbuffers::{EndianScalar, Follow}; @@ -11,114 +12,112 @@ use self::flatbuffers::{EndianScalar, Follow}; #[allow(unused_imports, dead_code)] pub mod models { - use core::cmp::Ordering; - use core::mem; + use core::mem; + use core::cmp::Ordering; - extern crate flatbuffers; - use self::flatbuffers::{EndianScalar, Follow}; + extern crate flatbuffers; + use self::flatbuffers::{EndianScalar, Follow}; - pub enum PingBodyOffset {} - #[derive(Copy, Clone, PartialEq)] +pub enum PingBodyOffset {} +#[derive(Copy, Clone, PartialEq)] - pub struct PingBody<'a> { - pub _tab: flatbuffers::Table<'a>, +pub struct PingBody<'a> { + pub _tab: flatbuffers::Table<'a>, +} + +impl<'a> flatbuffers::Follow<'a> for PingBody<'a> { + type Inner = PingBody<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { _tab: flatbuffers::Table::new(buf, loc) } + } +} + +impl<'a> PingBody<'a> { + pub const VT_PAYLOAD: flatbuffers::VOffsetT = 4; + + pub const fn get_fully_qualified_name() -> &'static str { + "models.PingBody" + } + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + PingBody { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args PingBodyArgs<'args> + ) -> flatbuffers::WIPOffset> { + let mut builder = PingBodyBuilder::new(_fbb); + if let Some(x) = args.payload { builder.add_payload(x); } + builder.finish() + } + + + #[inline] + pub fn payload(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::>>(PingBody::VT_PAYLOAD, None)} + } +} + +impl flatbuffers::Verifiable for PingBody<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, pos: usize + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>>("payload", Self::VT_PAYLOAD, false)? + .finish(); + Ok(()) + } +} +pub struct PingBodyArgs<'a> { + pub payload: Option>>, +} +impl<'a> Default for PingBodyArgs<'a> { + #[inline] + fn default() -> Self { + PingBodyArgs { + payload: None, } + } +} - impl<'a> flatbuffers::Follow<'a> for PingBody<'a> { - type Inner = PingBody<'a>; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - Self { - _tab: flatbuffers::Table::new(buf, loc), - } - } +pub struct PingBodyBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, +} +impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> PingBodyBuilder<'a, 'b, A> { + #[inline] + pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset>) { + self.fbb_.push_slot_always::>(PingBody::VT_PAYLOAD, payload); + } + #[inline] + pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> PingBodyBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + PingBodyBuilder { + fbb_: _fbb, + start_: start, } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } +} - impl<'a> PingBody<'a> { - pub const VT_PAYLOAD: flatbuffers::VOffsetT = 4; +impl core::fmt::Debug for PingBody<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("PingBody"); + ds.field("payload", &self.payload()); + ds.finish() + } +} +} // pub mod models - pub const fn get_fully_qualified_name() -> &'static str { - "models.PingBody" - } - - #[inline] - pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { - PingBody { _tab: table } - } - #[allow(unused_mut)] - pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>( - _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, - args: &'args PingBodyArgs<'args>, - ) -> flatbuffers::WIPOffset> { - let mut builder = PingBodyBuilder::new(_fbb); - if let Some(x) = args.payload { - builder.add_payload(x); - } - builder.finish() - } - - #[inline] - pub fn payload(&self) -> Option> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { - self._tab - .get::>>(PingBody::VT_PAYLOAD, None) - } - } - } - - impl flatbuffers::Verifiable for PingBody<'_> { - #[inline] - fn run_verifier(v: &mut flatbuffers::Verifier, pos: usize) -> Result<(), flatbuffers::InvalidFlatbuffer> { - use self::flatbuffers::Verifiable; - v.visit_table(pos)? - .visit_field::>>("payload", Self::VT_PAYLOAD, false)? - .finish(); - Ok(()) - } - } - pub struct PingBodyArgs<'a> { - pub payload: Option>>, - } - impl<'a> Default for PingBodyArgs<'a> { - #[inline] - fn default() -> Self { - PingBodyArgs { payload: None } - } - } - - pub struct PingBodyBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { - fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, - start_: flatbuffers::WIPOffset, - } - impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> PingBodyBuilder<'a, 'b, A> { - #[inline] - pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset>) { - self.fbb_ - .push_slot_always::>(PingBody::VT_PAYLOAD, payload); - } - #[inline] - pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> PingBodyBuilder<'a, 'b, A> { - let start = _fbb.start_table(); - PingBodyBuilder { - fbb_: _fbb, - start_: start, - } - } - #[inline] - pub fn finish(self) -> flatbuffers::WIPOffset> { - let o = self.fbb_.end_table(self.start_); - flatbuffers::WIPOffset::new(o.value()) - } - } - - impl core::fmt::Debug for PingBody<'_> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let mut ds = f.debug_struct("PingBody"); - ds.field("payload", &self.payload()); - ds.finish() - } - } -} // pub mod models diff --git a/common/protos/src/generated/proto_gen/node_service.rs b/common/protos/src/generated/proto_gen/node_service.rs index 000d5b48..88f7b3ab 100644 --- a/common/protos/src/generated/proto_gen/node_service.rs +++ b/common/protos/src/generated/proto_gen/node_service.rs @@ -622,7 +622,10 @@ pub struct GenerallyLockResponse { #[derive(Clone, PartialEq, ::prost::Message)] pub struct Mss { #[prost(map = "string, string", tag = "1")] - pub value: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + pub value: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, } #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct LocalStorageInfoRequest { @@ -786,7 +789,10 @@ pub struct DownloadProfileDataResponse { #[prost(bool, tag = "1")] pub success: bool, #[prost(map = "string, bytes", tag = "2")] - pub data: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::vec::Vec>, + pub data: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::vec::Vec, + >, #[prost(string, optional, tag = "3")] pub error_info: ::core::option::Option<::prost::alloc::string::String>, } @@ -1053,9 +1059,15 @@ pub struct LoadTransitionTierConfigResponse { } /// Generated client implementations. pub mod node_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::wildcard_imports, clippy::let_unit_value)] - use tonic::codegen::http::Uri; + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct NodeServiceClient { inner: tonic::client::Grpc, @@ -1086,16 +1098,22 @@ pub mod node_service_client { let inner = tonic::client::Grpc::with_origin(inner, origin); Self { inner } } - pub fn with_interceptor(inner: T, interceptor: F) -> NodeServiceClient> + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> NodeServiceClient> where F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< http::Request, - Response = http::Response<>::ResponseBody>, + Response = http::Response< + >::ResponseBody, + >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { NodeServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -1138,9 +1156,15 @@ pub mod node_service_client { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Ping"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Ping", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Ping")); @@ -1149,13 +1173,22 @@ pub mod node_service_client { pub async fn heal_bucket( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/HealBucket"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/HealBucket", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "HealBucket")); @@ -1164,13 +1197,22 @@ pub mod node_service_client { pub async fn list_bucket( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ListBucket"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ListBucket", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ListBucket")); @@ -1179,13 +1221,22 @@ pub mod node_service_client { pub async fn make_bucket( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/MakeBucket"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/MakeBucket", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "MakeBucket")); @@ -1194,13 +1245,22 @@ pub mod node_service_client { pub async fn get_bucket_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetBucketInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetBucketInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetBucketInfo")); @@ -1209,13 +1269,22 @@ pub mod node_service_client { pub async fn delete_bucket( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteBucket"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteBucket", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteBucket")); @@ -1224,13 +1293,22 @@ pub mod node_service_client { pub async fn read_all( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadAll"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadAll", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadAll")); @@ -1239,13 +1317,22 @@ pub mod node_service_client { pub async fn write_all( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/WriteAll"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/WriteAll", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "WriteAll")); @@ -1258,9 +1345,15 @@ pub mod node_service_client { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Delete"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Delete", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Delete")); @@ -1269,13 +1362,22 @@ pub mod node_service_client { pub async fn verify_file( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/VerifyFile"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/VerifyFile", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "VerifyFile")); @@ -1284,13 +1386,22 @@ pub mod node_service_client { pub async fn check_parts( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/CheckParts"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/CheckParts", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "CheckParts")); @@ -1299,13 +1410,22 @@ pub mod node_service_client { pub async fn rename_part( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RenamePart"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/RenamePart", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "RenamePart")); @@ -1314,13 +1434,22 @@ pub mod node_service_client { pub async fn rename_file( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RenameFile"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/RenameFile", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "RenameFile")); @@ -1333,9 +1462,15 @@ pub mod node_service_client { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Write"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Write", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Write")); @@ -1344,13 +1479,22 @@ pub mod node_service_client { pub async fn write_stream( &mut self, request: impl tonic::IntoStreamingRequest, - ) -> std::result::Result>, tonic::Status> { + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/WriteStream"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/WriteStream", + ); let mut req = request.into_streaming_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "WriteStream")); @@ -1360,13 +1504,22 @@ pub mod node_service_client { pub async fn read_at( &mut self, request: impl tonic::IntoStreamingRequest, - ) -> std::result::Result>, tonic::Status> { + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadAt"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadAt", + ); let mut req = request.into_streaming_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadAt")); @@ -1375,13 +1528,22 @@ pub mod node_service_client { pub async fn list_dir( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ListDir"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ListDir", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ListDir")); @@ -1390,13 +1552,22 @@ pub mod node_service_client { pub async fn walk_dir( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result>, tonic::Status> { + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/WalkDir"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/WalkDir", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "WalkDir")); @@ -1405,13 +1576,22 @@ pub mod node_service_client { pub async fn rename_data( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RenameData"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/RenameData", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "RenameData")); @@ -1420,13 +1600,22 @@ pub mod node_service_client { pub async fn make_volumes( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/MakeVolumes"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/MakeVolumes", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "MakeVolumes")); @@ -1435,13 +1624,22 @@ pub mod node_service_client { pub async fn make_volume( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/MakeVolume"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/MakeVolume", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "MakeVolume")); @@ -1450,13 +1648,22 @@ pub mod node_service_client { pub async fn list_volumes( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ListVolumes"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ListVolumes", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ListVolumes")); @@ -1465,13 +1672,22 @@ pub mod node_service_client { pub async fn stat_volume( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/StatVolume"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/StatVolume", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "StatVolume")); @@ -1480,13 +1696,22 @@ pub mod node_service_client { pub async fn delete_paths( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeletePaths"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeletePaths", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeletePaths")); @@ -1495,13 +1720,22 @@ pub mod node_service_client { pub async fn update_metadata( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/UpdateMetadata"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/UpdateMetadata", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "UpdateMetadata")); @@ -1510,13 +1744,22 @@ pub mod node_service_client { pub async fn write_metadata( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/WriteMetadata"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/WriteMetadata", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "WriteMetadata")); @@ -1525,13 +1768,22 @@ pub mod node_service_client { pub async fn read_version( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadVersion"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadVersion", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadVersion")); @@ -1544,9 +1796,15 @@ pub mod node_service_client { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadXL"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadXL", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadXL")); @@ -1555,13 +1813,22 @@ pub mod node_service_client { pub async fn delete_version( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteVersion"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteVersion", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteVersion")); @@ -1570,13 +1837,22 @@ pub mod node_service_client { pub async fn delete_versions( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteVersions"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteVersions", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteVersions")); @@ -1585,13 +1861,22 @@ pub mod node_service_client { pub async fn read_multiple( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadMultiple"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReadMultiple", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadMultiple")); @@ -1600,13 +1885,22 @@ pub mod node_service_client { pub async fn delete_volume( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteVolume"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteVolume", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteVolume")); @@ -1615,13 +1909,22 @@ pub mod node_service_client { pub async fn disk_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DiskInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DiskInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DiskInfo")); @@ -1630,13 +1933,22 @@ pub mod node_service_client { pub async fn ns_scanner( &mut self, request: impl tonic::IntoStreamingRequest, - ) -> std::result::Result>, tonic::Status> { + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/NsScanner"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/NsScanner", + ); let mut req = request.into_streaming_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "NsScanner")); @@ -1645,13 +1957,22 @@ pub mod node_service_client { pub async fn lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Lock"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Lock", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Lock")); @@ -1660,13 +1981,22 @@ pub mod node_service_client { pub async fn un_lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/UnLock"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/UnLock", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "UnLock")); @@ -1675,13 +2005,22 @@ pub mod node_service_client { pub async fn r_lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RLock"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/RLock", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "RLock")); @@ -1690,13 +2029,22 @@ pub mod node_service_client { pub async fn r_un_lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RUnLock"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/RUnLock", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "RUnLock")); @@ -1705,13 +2053,22 @@ pub mod node_service_client { pub async fn force_un_lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ForceUnLock"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ForceUnLock", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ForceUnLock")); @@ -1720,13 +2077,22 @@ pub mod node_service_client { pub async fn refresh( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Refresh"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/Refresh", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Refresh")); @@ -1735,13 +2101,22 @@ pub mod node_service_client { pub async fn local_storage_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LocalStorageInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LocalStorageInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "LocalStorageInfo")); @@ -1750,13 +2125,22 @@ pub mod node_service_client { pub async fn server_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ServerInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ServerInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ServerInfo")); @@ -1765,13 +2149,22 @@ pub mod node_service_client { pub async fn get_cpus( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetCpus"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetCpus", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetCpus")); @@ -1780,13 +2173,22 @@ pub mod node_service_client { pub async fn get_net_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetNetInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetNetInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetNetInfo")); @@ -1795,13 +2197,22 @@ pub mod node_service_client { pub async fn get_partitions( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetPartitions"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetPartitions", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetPartitions")); @@ -1810,13 +2221,22 @@ pub mod node_service_client { pub async fn get_os_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetOsInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetOsInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetOsInfo")); @@ -1825,13 +2245,22 @@ pub mod node_service_client { pub async fn get_se_linux_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetSELinuxInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetSELinuxInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetSELinuxInfo")); @@ -1840,13 +2269,22 @@ pub mod node_service_client { pub async fn get_sys_config( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetSysConfig"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetSysConfig", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetSysConfig")); @@ -1855,13 +2293,22 @@ pub mod node_service_client { pub async fn get_sys_errors( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetSysErrors"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetSysErrors", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetSysErrors")); @@ -1870,13 +2317,22 @@ pub mod node_service_client { pub async fn get_mem_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetMemInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetMemInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetMemInfo")); @@ -1885,13 +2341,22 @@ pub mod node_service_client { pub async fn get_metrics( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetMetrics"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetMetrics", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetMetrics")); @@ -1900,13 +2365,22 @@ pub mod node_service_client { pub async fn get_proc_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetProcInfo"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetProcInfo", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetProcInfo")); @@ -1915,13 +2389,22 @@ pub mod node_service_client { pub async fn start_profiling( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/StartProfiling"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/StartProfiling", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "StartProfiling")); @@ -1930,28 +2413,48 @@ pub mod node_service_client { pub async fn download_profile_data( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DownloadProfileData"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DownloadProfileData", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "DownloadProfileData")); + .insert( + GrpcMethod::new("node_service.NodeService", "DownloadProfileData"), + ); self.inner.unary(req, path, codec).await } pub async fn get_bucket_stats( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetBucketStats"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetBucketStats", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetBucketStats")); @@ -1960,13 +2463,22 @@ pub mod node_service_client { pub async fn get_sr_metrics( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetSRMetrics"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetSRMetrics", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetSRMetrics")); @@ -1975,58 +2487,100 @@ pub mod node_service_client { pub async fn get_all_bucket_stats( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetAllBucketStats"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetAllBucketStats", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "GetAllBucketStats")); + .insert( + GrpcMethod::new("node_service.NodeService", "GetAllBucketStats"), + ); self.inner.unary(req, path, codec).await } pub async fn load_bucket_metadata( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadBucketMetadata"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadBucketMetadata", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "LoadBucketMetadata")); + .insert( + GrpcMethod::new("node_service.NodeService", "LoadBucketMetadata"), + ); self.inner.unary(req, path, codec).await } pub async fn delete_bucket_metadata( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteBucketMetadata"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteBucketMetadata", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "DeleteBucketMetadata")); + .insert( + GrpcMethod::new("node_service.NodeService", "DeleteBucketMetadata"), + ); self.inner.unary(req, path, codec).await } pub async fn delete_policy( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeletePolicy"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeletePolicy", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeletePolicy")); @@ -2035,13 +2589,22 @@ pub mod node_service_client { pub async fn load_policy( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadPolicy"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadPolicy", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "LoadPolicy")); @@ -2050,28 +2613,48 @@ pub mod node_service_client { pub async fn load_policy_mapping( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadPolicyMapping"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadPolicyMapping", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "LoadPolicyMapping")); + .insert( + GrpcMethod::new("node_service.NodeService", "LoadPolicyMapping"), + ); self.inner.unary(req, path, codec).await } pub async fn delete_user( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteUser"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteUser", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteUser")); @@ -2080,28 +2663,48 @@ pub mod node_service_client { pub async fn delete_service_account( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteServiceAccount"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/DeleteServiceAccount", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "DeleteServiceAccount")); + .insert( + GrpcMethod::new("node_service.NodeService", "DeleteServiceAccount"), + ); self.inner.unary(req, path, codec).await } pub async fn load_user( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadUser"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadUser", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "LoadUser")); @@ -2110,28 +2713,48 @@ pub mod node_service_client { pub async fn load_service_account( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadServiceAccount"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadServiceAccount", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "LoadServiceAccount")); + .insert( + GrpcMethod::new("node_service.NodeService", "LoadServiceAccount"), + ); self.inner.unary(req, path, codec).await } pub async fn load_group( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadGroup"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadGroup", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "LoadGroup")); @@ -2140,16 +2763,30 @@ pub mod node_service_client { pub async fn reload_site_replication_config( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReloadSiteReplicationConfig"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReloadSiteReplicationConfig", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "ReloadSiteReplicationConfig")); + .insert( + GrpcMethod::new( + "node_service.NodeService", + "ReloadSiteReplicationConfig", + ), + ); self.inner.unary(req, path, codec).await } /// rpc VerifyBinary() returns () {}; @@ -2157,13 +2794,22 @@ pub mod node_service_client { pub async fn signal_service( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/SignalService"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/SignalService", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "SignalService")); @@ -2172,58 +2818,100 @@ pub mod node_service_client { pub async fn background_heal_status( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/BackgroundHealStatus"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/BackgroundHealStatus", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "BackgroundHealStatus")); + .insert( + GrpcMethod::new("node_service.NodeService", "BackgroundHealStatus"), + ); self.inner.unary(req, path, codec).await } pub async fn get_metacache_listing( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetMetacacheListing"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/GetMetacacheListing", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "GetMetacacheListing")); + .insert( + GrpcMethod::new("node_service.NodeService", "GetMetacacheListing"), + ); self.inner.unary(req, path, codec).await } pub async fn update_metacache_listing( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/UpdateMetacacheListing"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/UpdateMetacacheListing", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "UpdateMetacacheListing")); + .insert( + GrpcMethod::new("node_service.NodeService", "UpdateMetacacheListing"), + ); self.inner.unary(req, path, codec).await } pub async fn reload_pool_meta( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReloadPoolMeta"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/ReloadPoolMeta", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReloadPoolMeta")); @@ -2232,13 +2920,22 @@ pub mod node_service_client { pub async fn stop_rebalance( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/StopRebalance"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/StopRebalance", + ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "StopRebalance")); @@ -2247,38 +2944,69 @@ pub mod node_service_client { pub async fn load_rebalance_meta( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadRebalanceMeta"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadRebalanceMeta", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "LoadRebalanceMeta")); + .insert( + GrpcMethod::new("node_service.NodeService", "LoadRebalanceMeta"), + ); self.inner.unary(req, path, codec).await } pub async fn load_transition_tier_config( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await - .map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?; + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/LoadTransitionTierConfig"); + let path = http::uri::PathAndQuery::from_static( + "/node_service.NodeService/LoadTransitionTierConfig", + ); let mut req = request.into_request(); req.extensions_mut() - .insert(GrpcMethod::new("node_service.NodeService", "LoadTransitionTierConfig")); + .insert( + GrpcMethod::new( + "node_service.NodeService", + "LoadTransitionTierConfig", + ), + ); self.inner.unary(req, path, codec).await } } } /// Generated server implementations. pub mod node_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::wildcard_imports, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with NodeServiceServer. #[async_trait] @@ -2291,23 +3019,38 @@ pub mod node_service_server { async fn heal_bucket( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn list_bucket( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn make_bucket( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_bucket_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_bucket( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn read_all( &self, request: tonic::Request, @@ -2315,7 +3058,10 @@ pub mod node_service_server { async fn write_all( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete( &self, request: tonic::Request, @@ -2323,33 +3069,52 @@ pub mod node_service_server { async fn verify_file( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn check_parts( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn rename_part( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn rename_file( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn write( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the WriteStream method. - type WriteStreamStream: tonic::codegen::tokio_stream::Stream> + type WriteStreamStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + std::marker::Send + 'static; async fn write_stream( &self, request: tonic::Request>, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the ReadAt method. - type ReadAtStream: tonic::codegen::tokio_stream::Stream> + type ReadAtStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + std::marker::Send + 'static; /// rpc Append(AppendRequest) returns (AppendResponse) {}; @@ -2362,7 +3127,9 @@ pub mod node_service_server { request: tonic::Request, ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the WalkDir method. - type WalkDirStream: tonic::codegen::tokio_stream::Stream> + type WalkDirStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + std::marker::Send + 'static; async fn walk_dir( @@ -2372,39 +3139,66 @@ pub mod node_service_server { async fn rename_data( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn make_volumes( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn make_volume( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn list_volumes( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn stat_volume( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_paths( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn update_metadata( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn write_metadata( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn read_version( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn read_xl( &self, request: tonic::Request, @@ -2412,25 +3206,42 @@ pub mod node_service_server { async fn delete_version( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_versions( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn read_multiple( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_volume( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn disk_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the NsScanner method. - type NsScannerStream: tonic::codegen::tokio_stream::Stream> + type NsScannerStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + std::marker::Send + 'static; async fn ns_scanner( @@ -2440,35 +3251,59 @@ pub mod node_service_server { async fn lock( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn un_lock( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn r_lock( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn r_un_lock( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn force_un_lock( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn refresh( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn local_storage_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn server_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_cpus( &self, request: tonic::Request, @@ -2476,137 +3311,236 @@ pub mod node_service_server { async fn get_net_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_partitions( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_os_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_se_linux_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_sys_config( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_sys_errors( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_mem_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_metrics( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_proc_info( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn start_profiling( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn download_profile_data( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_bucket_stats( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_sr_metrics( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_all_bucket_stats( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_bucket_metadata( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_bucket_metadata( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_policy( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_policy( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_policy_mapping( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_user( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn delete_service_account( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_user( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_service_account( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_group( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn reload_site_replication_config( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// rpc VerifyBinary() returns () {}; /// rpc CommitBinary() returns () {}; async fn signal_service( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn background_heal_status( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_metacache_listing( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn update_metacache_listing( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn reload_pool_meta( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn stop_rebalance( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_rebalance_meta( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn load_transition_tier_config( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } #[derive(Debug)] pub struct NodeServiceServer { @@ -2629,7 +3563,10 @@ pub mod node_service_server { max_encoding_message_size: None, } } - pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService where F: tonic::service::Interceptor, { @@ -2673,7 +3610,10 @@ pub mod node_service_server { type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; - fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { @@ -2681,12 +3621,21 @@ pub mod node_service_server { "/node_service.NodeService/Ping" => { #[allow(non_camel_case_types)] struct PingSvc(pub Arc); - impl tonic::server::UnaryService for PingSvc { + impl tonic::server::UnaryService + for PingSvc { type Response = super::PingResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::ping(&inner, request).await }; + let fut = async move { + ::ping(&inner, request).await + }; Box::pin(fut) } } @@ -2699,8 +3648,14 @@ pub mod node_service_server { let method = PingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2709,12 +3664,23 @@ pub mod node_service_server { "/node_service.NodeService/HealBucket" => { #[allow(non_camel_case_types)] struct HealBucketSvc(pub Arc); - impl tonic::server::UnaryService for HealBucketSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for HealBucketSvc { type Response = super::HealBucketResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::heal_bucket(&inner, request).await }; + let fut = async move { + ::heal_bucket(&inner, request).await + }; Box::pin(fut) } } @@ -2727,8 +3693,14 @@ pub mod node_service_server { let method = HealBucketSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2737,12 +3709,23 @@ pub mod node_service_server { "/node_service.NodeService/ListBucket" => { #[allow(non_camel_case_types)] struct ListBucketSvc(pub Arc); - impl tonic::server::UnaryService for ListBucketSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ListBucketSvc { type Response = super::ListBucketResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::list_bucket(&inner, request).await }; + let fut = async move { + ::list_bucket(&inner, request).await + }; Box::pin(fut) } } @@ -2755,8 +3738,14 @@ pub mod node_service_server { let method = ListBucketSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2765,12 +3754,23 @@ pub mod node_service_server { "/node_service.NodeService/MakeBucket" => { #[allow(non_camel_case_types)] struct MakeBucketSvc(pub Arc); - impl tonic::server::UnaryService for MakeBucketSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for MakeBucketSvc { type Response = super::MakeBucketResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::make_bucket(&inner, request).await }; + let fut = async move { + ::make_bucket(&inner, request).await + }; Box::pin(fut) } } @@ -2783,8 +3783,14 @@ pub mod node_service_server { let method = MakeBucketSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2793,12 +3799,23 @@ pub mod node_service_server { "/node_service.NodeService/GetBucketInfo" => { #[allow(non_camel_case_types)] struct GetBucketInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetBucketInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetBucketInfoSvc { type Response = super::GetBucketInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_bucket_info(&inner, request).await }; + let fut = async move { + ::get_bucket_info(&inner, request).await + }; Box::pin(fut) } } @@ -2811,8 +3828,14 @@ pub mod node_service_server { let method = GetBucketInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2821,12 +3844,23 @@ pub mod node_service_server { "/node_service.NodeService/DeleteBucket" => { #[allow(non_camel_case_types)] struct DeleteBucketSvc(pub Arc); - impl tonic::server::UnaryService for DeleteBucketSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteBucketSvc { type Response = super::DeleteBucketResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_bucket(&inner, request).await }; + let fut = async move { + ::delete_bucket(&inner, request).await + }; Box::pin(fut) } } @@ -2839,8 +3873,14 @@ pub mod node_service_server { let method = DeleteBucketSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2849,12 +3889,23 @@ pub mod node_service_server { "/node_service.NodeService/ReadAll" => { #[allow(non_camel_case_types)] struct ReadAllSvc(pub Arc); - impl tonic::server::UnaryService for ReadAllSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadAllSvc { type Response = super::ReadAllResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::read_all(&inner, request).await }; + let fut = async move { + ::read_all(&inner, request).await + }; Box::pin(fut) } } @@ -2867,8 +3918,14 @@ pub mod node_service_server { let method = ReadAllSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2877,12 +3934,23 @@ pub mod node_service_server { "/node_service.NodeService/WriteAll" => { #[allow(non_camel_case_types)] struct WriteAllSvc(pub Arc); - impl tonic::server::UnaryService for WriteAllSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for WriteAllSvc { type Response = super::WriteAllResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::write_all(&inner, request).await }; + let fut = async move { + ::write_all(&inner, request).await + }; Box::pin(fut) } } @@ -2895,8 +3963,14 @@ pub mod node_service_server { let method = WriteAllSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2905,12 +3979,23 @@ pub mod node_service_server { "/node_service.NodeService/Delete" => { #[allow(non_camel_case_types)] struct DeleteSvc(pub Arc); - impl tonic::server::UnaryService for DeleteSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteSvc { type Response = super::DeleteResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete(&inner, request).await }; + let fut = async move { + ::delete(&inner, request).await + }; Box::pin(fut) } } @@ -2923,8 +4008,14 @@ pub mod node_service_server { let method = DeleteSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2933,12 +4024,23 @@ pub mod node_service_server { "/node_service.NodeService/VerifyFile" => { #[allow(non_camel_case_types)] struct VerifyFileSvc(pub Arc); - impl tonic::server::UnaryService for VerifyFileSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for VerifyFileSvc { type Response = super::VerifyFileResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::verify_file(&inner, request).await }; + let fut = async move { + ::verify_file(&inner, request).await + }; Box::pin(fut) } } @@ -2951,8 +4053,14 @@ pub mod node_service_server { let method = VerifyFileSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2961,12 +4069,23 @@ pub mod node_service_server { "/node_service.NodeService/CheckParts" => { #[allow(non_camel_case_types)] struct CheckPartsSvc(pub Arc); - impl tonic::server::UnaryService for CheckPartsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for CheckPartsSvc { type Response = super::CheckPartsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::check_parts(&inner, request).await }; + let fut = async move { + ::check_parts(&inner, request).await + }; Box::pin(fut) } } @@ -2979,8 +4098,14 @@ pub mod node_service_server { let method = CheckPartsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2989,12 +4114,23 @@ pub mod node_service_server { "/node_service.NodeService/RenamePart" => { #[allow(non_camel_case_types)] struct RenamePartSvc(pub Arc); - impl tonic::server::UnaryService for RenamePartSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for RenamePartSvc { type Response = super::RenamePartResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::rename_part(&inner, request).await }; + let fut = async move { + ::rename_part(&inner, request).await + }; Box::pin(fut) } } @@ -3007,8 +4143,14 @@ pub mod node_service_server { let method = RenamePartSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3017,12 +4159,23 @@ pub mod node_service_server { "/node_service.NodeService/RenameFile" => { #[allow(non_camel_case_types)] struct RenameFileSvc(pub Arc); - impl tonic::server::UnaryService for RenameFileSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for RenameFileSvc { type Response = super::RenameFileResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::rename_file(&inner, request).await }; + let fut = async move { + ::rename_file(&inner, request).await + }; Box::pin(fut) } } @@ -3035,8 +4188,14 @@ pub mod node_service_server { let method = RenameFileSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3045,12 +4204,21 @@ pub mod node_service_server { "/node_service.NodeService/Write" => { #[allow(non_camel_case_types)] struct WriteSvc(pub Arc); - impl tonic::server::UnaryService for WriteSvc { + impl tonic::server::UnaryService + for WriteSvc { type Response = super::WriteResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::write(&inner, request).await }; + let fut = async move { + ::write(&inner, request).await + }; Box::pin(fut) } } @@ -3063,8 +4231,14 @@ pub mod node_service_server { let method = WriteSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3073,13 +4247,26 @@ pub mod node_service_server { "/node_service.NodeService/WriteStream" => { #[allow(non_camel_case_types)] struct WriteStreamSvc(pub Arc); - impl tonic::server::StreamingService for WriteStreamSvc { + impl< + T: NodeService, + > tonic::server::StreamingService + for WriteStreamSvc { type Response = super::WriteResponse; type ResponseStream = T::WriteStreamStream; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request>) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::write_stream(&inner, request).await }; + let fut = async move { + ::write_stream(&inner, request).await + }; Box::pin(fut) } } @@ -3092,8 +4279,14 @@ pub mod node_service_server { let method = WriteStreamSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.streaming(method, req).await; Ok(res) }; @@ -3102,13 +4295,26 @@ pub mod node_service_server { "/node_service.NodeService/ReadAt" => { #[allow(non_camel_case_types)] struct ReadAtSvc(pub Arc); - impl tonic::server::StreamingService for ReadAtSvc { + impl< + T: NodeService, + > tonic::server::StreamingService + for ReadAtSvc { type Response = super::ReadAtResponse; type ResponseStream = T::ReadAtStream; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request>) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::read_at(&inner, request).await }; + let fut = async move { + ::read_at(&inner, request).await + }; Box::pin(fut) } } @@ -3121,8 +4327,14 @@ pub mod node_service_server { let method = ReadAtSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.streaming(method, req).await; Ok(res) }; @@ -3131,12 +4343,23 @@ pub mod node_service_server { "/node_service.NodeService/ListDir" => { #[allow(non_camel_case_types)] struct ListDirSvc(pub Arc); - impl tonic::server::UnaryService for ListDirSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ListDirSvc { type Response = super::ListDirResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::list_dir(&inner, request).await }; + let fut = async move { + ::list_dir(&inner, request).await + }; Box::pin(fut) } } @@ -3149,8 +4372,14 @@ pub mod node_service_server { let method = ListDirSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3159,13 +4388,24 @@ pub mod node_service_server { "/node_service.NodeService/WalkDir" => { #[allow(non_camel_case_types)] struct WalkDirSvc(pub Arc); - impl tonic::server::ServerStreamingService for WalkDirSvc { + impl< + T: NodeService, + > tonic::server::ServerStreamingService + for WalkDirSvc { type Response = super::WalkDirResponse; type ResponseStream = T::WalkDirStream; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::walk_dir(&inner, request).await }; + let fut = async move { + ::walk_dir(&inner, request).await + }; Box::pin(fut) } } @@ -3178,8 +4418,14 @@ pub mod node_service_server { let method = WalkDirSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.server_streaming(method, req).await; Ok(res) }; @@ -3188,12 +4434,23 @@ pub mod node_service_server { "/node_service.NodeService/RenameData" => { #[allow(non_camel_case_types)] struct RenameDataSvc(pub Arc); - impl tonic::server::UnaryService for RenameDataSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for RenameDataSvc { type Response = super::RenameDataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::rename_data(&inner, request).await }; + let fut = async move { + ::rename_data(&inner, request).await + }; Box::pin(fut) } } @@ -3206,8 +4463,14 @@ pub mod node_service_server { let method = RenameDataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3216,12 +4479,23 @@ pub mod node_service_server { "/node_service.NodeService/MakeVolumes" => { #[allow(non_camel_case_types)] struct MakeVolumesSvc(pub Arc); - impl tonic::server::UnaryService for MakeVolumesSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for MakeVolumesSvc { type Response = super::MakeVolumesResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::make_volumes(&inner, request).await }; + let fut = async move { + ::make_volumes(&inner, request).await + }; Box::pin(fut) } } @@ -3234,8 +4508,14 @@ pub mod node_service_server { let method = MakeVolumesSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3244,12 +4524,23 @@ pub mod node_service_server { "/node_service.NodeService/MakeVolume" => { #[allow(non_camel_case_types)] struct MakeVolumeSvc(pub Arc); - impl tonic::server::UnaryService for MakeVolumeSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for MakeVolumeSvc { type Response = super::MakeVolumeResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::make_volume(&inner, request).await }; + let fut = async move { + ::make_volume(&inner, request).await + }; Box::pin(fut) } } @@ -3262,8 +4553,14 @@ pub mod node_service_server { let method = MakeVolumeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3272,12 +4569,23 @@ pub mod node_service_server { "/node_service.NodeService/ListVolumes" => { #[allow(non_camel_case_types)] struct ListVolumesSvc(pub Arc); - impl tonic::server::UnaryService for ListVolumesSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ListVolumesSvc { type Response = super::ListVolumesResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::list_volumes(&inner, request).await }; + let fut = async move { + ::list_volumes(&inner, request).await + }; Box::pin(fut) } } @@ -3290,8 +4598,14 @@ pub mod node_service_server { let method = ListVolumesSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3300,12 +4614,23 @@ pub mod node_service_server { "/node_service.NodeService/StatVolume" => { #[allow(non_camel_case_types)] struct StatVolumeSvc(pub Arc); - impl tonic::server::UnaryService for StatVolumeSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for StatVolumeSvc { type Response = super::StatVolumeResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::stat_volume(&inner, request).await }; + let fut = async move { + ::stat_volume(&inner, request).await + }; Box::pin(fut) } } @@ -3318,8 +4643,14 @@ pub mod node_service_server { let method = StatVolumeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3328,12 +4659,23 @@ pub mod node_service_server { "/node_service.NodeService/DeletePaths" => { #[allow(non_camel_case_types)] struct DeletePathsSvc(pub Arc); - impl tonic::server::UnaryService for DeletePathsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeletePathsSvc { type Response = super::DeletePathsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_paths(&inner, request).await }; + let fut = async move { + ::delete_paths(&inner, request).await + }; Box::pin(fut) } } @@ -3346,8 +4688,14 @@ pub mod node_service_server { let method = DeletePathsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3356,12 +4704,23 @@ pub mod node_service_server { "/node_service.NodeService/UpdateMetadata" => { #[allow(non_camel_case_types)] struct UpdateMetadataSvc(pub Arc); - impl tonic::server::UnaryService for UpdateMetadataSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for UpdateMetadataSvc { type Response = super::UpdateMetadataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::update_metadata(&inner, request).await }; + let fut = async move { + ::update_metadata(&inner, request).await + }; Box::pin(fut) } } @@ -3374,8 +4733,14 @@ pub mod node_service_server { let method = UpdateMetadataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3384,12 +4749,23 @@ pub mod node_service_server { "/node_service.NodeService/WriteMetadata" => { #[allow(non_camel_case_types)] struct WriteMetadataSvc(pub Arc); - impl tonic::server::UnaryService for WriteMetadataSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for WriteMetadataSvc { type Response = super::WriteMetadataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::write_metadata(&inner, request).await }; + let fut = async move { + ::write_metadata(&inner, request).await + }; Box::pin(fut) } } @@ -3402,8 +4778,14 @@ pub mod node_service_server { let method = WriteMetadataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3412,12 +4794,23 @@ pub mod node_service_server { "/node_service.NodeService/ReadVersion" => { #[allow(non_camel_case_types)] struct ReadVersionSvc(pub Arc); - impl tonic::server::UnaryService for ReadVersionSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadVersionSvc { type Response = super::ReadVersionResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::read_version(&inner, request).await }; + let fut = async move { + ::read_version(&inner, request).await + }; Box::pin(fut) } } @@ -3430,8 +4823,14 @@ pub mod node_service_server { let method = ReadVersionSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3440,12 +4839,23 @@ pub mod node_service_server { "/node_service.NodeService/ReadXL" => { #[allow(non_camel_case_types)] struct ReadXLSvc(pub Arc); - impl tonic::server::UnaryService for ReadXLSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadXLSvc { type Response = super::ReadXlResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::read_xl(&inner, request).await }; + let fut = async move { + ::read_xl(&inner, request).await + }; Box::pin(fut) } } @@ -3458,8 +4868,14 @@ pub mod node_service_server { let method = ReadXLSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3468,12 +4884,23 @@ pub mod node_service_server { "/node_service.NodeService/DeleteVersion" => { #[allow(non_camel_case_types)] struct DeleteVersionSvc(pub Arc); - impl tonic::server::UnaryService for DeleteVersionSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteVersionSvc { type Response = super::DeleteVersionResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_version(&inner, request).await }; + let fut = async move { + ::delete_version(&inner, request).await + }; Box::pin(fut) } } @@ -3486,8 +4913,14 @@ pub mod node_service_server { let method = DeleteVersionSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3496,12 +4929,23 @@ pub mod node_service_server { "/node_service.NodeService/DeleteVersions" => { #[allow(non_camel_case_types)] struct DeleteVersionsSvc(pub Arc); - impl tonic::server::UnaryService for DeleteVersionsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteVersionsSvc { type Response = super::DeleteVersionsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_versions(&inner, request).await }; + let fut = async move { + ::delete_versions(&inner, request).await + }; Box::pin(fut) } } @@ -3514,8 +4958,14 @@ pub mod node_service_server { let method = DeleteVersionsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3524,12 +4974,23 @@ pub mod node_service_server { "/node_service.NodeService/ReadMultiple" => { #[allow(non_camel_case_types)] struct ReadMultipleSvc(pub Arc); - impl tonic::server::UnaryService for ReadMultipleSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ReadMultipleSvc { type Response = super::ReadMultipleResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::read_multiple(&inner, request).await }; + let fut = async move { + ::read_multiple(&inner, request).await + }; Box::pin(fut) } } @@ -3542,8 +5003,14 @@ pub mod node_service_server { let method = ReadMultipleSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3552,12 +5019,23 @@ pub mod node_service_server { "/node_service.NodeService/DeleteVolume" => { #[allow(non_camel_case_types)] struct DeleteVolumeSvc(pub Arc); - impl tonic::server::UnaryService for DeleteVolumeSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteVolumeSvc { type Response = super::DeleteVolumeResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_volume(&inner, request).await }; + let fut = async move { + ::delete_volume(&inner, request).await + }; Box::pin(fut) } } @@ -3570,8 +5048,14 @@ pub mod node_service_server { let method = DeleteVolumeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3580,12 +5064,23 @@ pub mod node_service_server { "/node_service.NodeService/DiskInfo" => { #[allow(non_camel_case_types)] struct DiskInfoSvc(pub Arc); - impl tonic::server::UnaryService for DiskInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DiskInfoSvc { type Response = super::DiskInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::disk_info(&inner, request).await }; + let fut = async move { + ::disk_info(&inner, request).await + }; Box::pin(fut) } } @@ -3598,8 +5093,14 @@ pub mod node_service_server { let method = DiskInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3608,13 +5109,26 @@ pub mod node_service_server { "/node_service.NodeService/NsScanner" => { #[allow(non_camel_case_types)] struct NsScannerSvc(pub Arc); - impl tonic::server::StreamingService for NsScannerSvc { + impl< + T: NodeService, + > tonic::server::StreamingService + for NsScannerSvc { type Response = super::NsScannerResponse; type ResponseStream = T::NsScannerStream; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request>) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::ns_scanner(&inner, request).await }; + let fut = async move { + ::ns_scanner(&inner, request).await + }; Box::pin(fut) } } @@ -3627,8 +5141,14 @@ pub mod node_service_server { let method = NsScannerSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.streaming(method, req).await; Ok(res) }; @@ -3637,12 +5157,23 @@ pub mod node_service_server { "/node_service.NodeService/Lock" => { #[allow(non_camel_case_types)] struct LockSvc(pub Arc); - impl tonic::server::UnaryService for LockSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LockSvc { type Response = super::GenerallyLockResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::lock(&inner, request).await }; + let fut = async move { + ::lock(&inner, request).await + }; Box::pin(fut) } } @@ -3655,8 +5186,14 @@ pub mod node_service_server { let method = LockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3665,12 +5202,23 @@ pub mod node_service_server { "/node_service.NodeService/UnLock" => { #[allow(non_camel_case_types)] struct UnLockSvc(pub Arc); - impl tonic::server::UnaryService for UnLockSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for UnLockSvc { type Response = super::GenerallyLockResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::un_lock(&inner, request).await }; + let fut = async move { + ::un_lock(&inner, request).await + }; Box::pin(fut) } } @@ -3683,8 +5231,14 @@ pub mod node_service_server { let method = UnLockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3693,12 +5247,23 @@ pub mod node_service_server { "/node_service.NodeService/RLock" => { #[allow(non_camel_case_types)] struct RLockSvc(pub Arc); - impl tonic::server::UnaryService for RLockSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for RLockSvc { type Response = super::GenerallyLockResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::r_lock(&inner, request).await }; + let fut = async move { + ::r_lock(&inner, request).await + }; Box::pin(fut) } } @@ -3711,8 +5276,14 @@ pub mod node_service_server { let method = RLockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3721,12 +5292,23 @@ pub mod node_service_server { "/node_service.NodeService/RUnLock" => { #[allow(non_camel_case_types)] struct RUnLockSvc(pub Arc); - impl tonic::server::UnaryService for RUnLockSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for RUnLockSvc { type Response = super::GenerallyLockResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::r_un_lock(&inner, request).await }; + let fut = async move { + ::r_un_lock(&inner, request).await + }; Box::pin(fut) } } @@ -3739,8 +5321,14 @@ pub mod node_service_server { let method = RUnLockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3749,12 +5337,23 @@ pub mod node_service_server { "/node_service.NodeService/ForceUnLock" => { #[allow(non_camel_case_types)] struct ForceUnLockSvc(pub Arc); - impl tonic::server::UnaryService for ForceUnLockSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ForceUnLockSvc { type Response = super::GenerallyLockResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::force_un_lock(&inner, request).await }; + let fut = async move { + ::force_un_lock(&inner, request).await + }; Box::pin(fut) } } @@ -3767,8 +5366,14 @@ pub mod node_service_server { let method = ForceUnLockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3777,12 +5382,23 @@ pub mod node_service_server { "/node_service.NodeService/Refresh" => { #[allow(non_camel_case_types)] struct RefreshSvc(pub Arc); - impl tonic::server::UnaryService for RefreshSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for RefreshSvc { type Response = super::GenerallyLockResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::refresh(&inner, request).await }; + let fut = async move { + ::refresh(&inner, request).await + }; Box::pin(fut) } } @@ -3795,8 +5411,14 @@ pub mod node_service_server { let method = RefreshSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3805,12 +5427,24 @@ pub mod node_service_server { "/node_service.NodeService/LocalStorageInfo" => { #[allow(non_camel_case_types)] struct LocalStorageInfoSvc(pub Arc); - impl tonic::server::UnaryService for LocalStorageInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LocalStorageInfoSvc { type Response = super::LocalStorageInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::local_storage_info(&inner, request).await }; + let fut = async move { + ::local_storage_info(&inner, request) + .await + }; Box::pin(fut) } } @@ -3823,8 +5457,14 @@ pub mod node_service_server { let method = LocalStorageInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3833,12 +5473,23 @@ pub mod node_service_server { "/node_service.NodeService/ServerInfo" => { #[allow(non_camel_case_types)] struct ServerInfoSvc(pub Arc); - impl tonic::server::UnaryService for ServerInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ServerInfoSvc { type Response = super::ServerInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::server_info(&inner, request).await }; + let fut = async move { + ::server_info(&inner, request).await + }; Box::pin(fut) } } @@ -3851,8 +5502,14 @@ pub mod node_service_server { let method = ServerInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3861,12 +5518,23 @@ pub mod node_service_server { "/node_service.NodeService/GetCpus" => { #[allow(non_camel_case_types)] struct GetCpusSvc(pub Arc); - impl tonic::server::UnaryService for GetCpusSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetCpusSvc { type Response = super::GetCpusResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_cpus(&inner, request).await }; + let fut = async move { + ::get_cpus(&inner, request).await + }; Box::pin(fut) } } @@ -3879,8 +5547,14 @@ pub mod node_service_server { let method = GetCpusSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3889,12 +5563,23 @@ pub mod node_service_server { "/node_service.NodeService/GetNetInfo" => { #[allow(non_camel_case_types)] struct GetNetInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetNetInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetNetInfoSvc { type Response = super::GetNetInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_net_info(&inner, request).await }; + let fut = async move { + ::get_net_info(&inner, request).await + }; Box::pin(fut) } } @@ -3907,8 +5592,14 @@ pub mod node_service_server { let method = GetNetInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3917,12 +5608,23 @@ pub mod node_service_server { "/node_service.NodeService/GetPartitions" => { #[allow(non_camel_case_types)] struct GetPartitionsSvc(pub Arc); - impl tonic::server::UnaryService for GetPartitionsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetPartitionsSvc { type Response = super::GetPartitionsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_partitions(&inner, request).await }; + let fut = async move { + ::get_partitions(&inner, request).await + }; Box::pin(fut) } } @@ -3935,8 +5637,14 @@ pub mod node_service_server { let method = GetPartitionsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3945,12 +5653,23 @@ pub mod node_service_server { "/node_service.NodeService/GetOsInfo" => { #[allow(non_camel_case_types)] struct GetOsInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetOsInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetOsInfoSvc { type Response = super::GetOsInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_os_info(&inner, request).await }; + let fut = async move { + ::get_os_info(&inner, request).await + }; Box::pin(fut) } } @@ -3963,8 +5682,14 @@ pub mod node_service_server { let method = GetOsInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -3973,12 +5698,23 @@ pub mod node_service_server { "/node_service.NodeService/GetSELinuxInfo" => { #[allow(non_camel_case_types)] struct GetSELinuxInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetSELinuxInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetSELinuxInfoSvc { type Response = super::GetSeLinuxInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_se_linux_info(&inner, request).await }; + let fut = async move { + ::get_se_linux_info(&inner, request).await + }; Box::pin(fut) } } @@ -3991,8 +5727,14 @@ pub mod node_service_server { let method = GetSELinuxInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4001,12 +5743,23 @@ pub mod node_service_server { "/node_service.NodeService/GetSysConfig" => { #[allow(non_camel_case_types)] struct GetSysConfigSvc(pub Arc); - impl tonic::server::UnaryService for GetSysConfigSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetSysConfigSvc { type Response = super::GetSysConfigResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_sys_config(&inner, request).await }; + let fut = async move { + ::get_sys_config(&inner, request).await + }; Box::pin(fut) } } @@ -4019,8 +5772,14 @@ pub mod node_service_server { let method = GetSysConfigSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4029,12 +5788,23 @@ pub mod node_service_server { "/node_service.NodeService/GetSysErrors" => { #[allow(non_camel_case_types)] struct GetSysErrorsSvc(pub Arc); - impl tonic::server::UnaryService for GetSysErrorsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetSysErrorsSvc { type Response = super::GetSysErrorsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_sys_errors(&inner, request).await }; + let fut = async move { + ::get_sys_errors(&inner, request).await + }; Box::pin(fut) } } @@ -4047,8 +5817,14 @@ pub mod node_service_server { let method = GetSysErrorsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4057,12 +5833,23 @@ pub mod node_service_server { "/node_service.NodeService/GetMemInfo" => { #[allow(non_camel_case_types)] struct GetMemInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetMemInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetMemInfoSvc { type Response = super::GetMemInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_mem_info(&inner, request).await }; + let fut = async move { + ::get_mem_info(&inner, request).await + }; Box::pin(fut) } } @@ -4075,8 +5862,14 @@ pub mod node_service_server { let method = GetMemInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4085,12 +5878,23 @@ pub mod node_service_server { "/node_service.NodeService/GetMetrics" => { #[allow(non_camel_case_types)] struct GetMetricsSvc(pub Arc); - impl tonic::server::UnaryService for GetMetricsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetMetricsSvc { type Response = super::GetMetricsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_metrics(&inner, request).await }; + let fut = async move { + ::get_metrics(&inner, request).await + }; Box::pin(fut) } } @@ -4103,8 +5907,14 @@ pub mod node_service_server { let method = GetMetricsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4113,12 +5923,23 @@ pub mod node_service_server { "/node_service.NodeService/GetProcInfo" => { #[allow(non_camel_case_types)] struct GetProcInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetProcInfoSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetProcInfoSvc { type Response = super::GetProcInfoResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_proc_info(&inner, request).await }; + let fut = async move { + ::get_proc_info(&inner, request).await + }; Box::pin(fut) } } @@ -4131,8 +5952,14 @@ pub mod node_service_server { let method = GetProcInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4141,12 +5968,23 @@ pub mod node_service_server { "/node_service.NodeService/StartProfiling" => { #[allow(non_camel_case_types)] struct StartProfilingSvc(pub Arc); - impl tonic::server::UnaryService for StartProfilingSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for StartProfilingSvc { type Response = super::StartProfilingResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::start_profiling(&inner, request).await }; + let fut = async move { + ::start_profiling(&inner, request).await + }; Box::pin(fut) } } @@ -4159,8 +5997,14 @@ pub mod node_service_server { let method = StartProfilingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4169,12 +6013,24 @@ pub mod node_service_server { "/node_service.NodeService/DownloadProfileData" => { #[allow(non_camel_case_types)] struct DownloadProfileDataSvc(pub Arc); - impl tonic::server::UnaryService for DownloadProfileDataSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DownloadProfileDataSvc { type Response = super::DownloadProfileDataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::download_profile_data(&inner, request).await }; + let fut = async move { + ::download_profile_data(&inner, request) + .await + }; Box::pin(fut) } } @@ -4187,8 +6043,14 @@ pub mod node_service_server { let method = DownloadProfileDataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4197,12 +6059,23 @@ pub mod node_service_server { "/node_service.NodeService/GetBucketStats" => { #[allow(non_camel_case_types)] struct GetBucketStatsSvc(pub Arc); - impl tonic::server::UnaryService for GetBucketStatsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetBucketStatsSvc { type Response = super::GetBucketStatsDataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_bucket_stats(&inner, request).await }; + let fut = async move { + ::get_bucket_stats(&inner, request).await + }; Box::pin(fut) } } @@ -4215,8 +6088,14 @@ pub mod node_service_server { let method = GetBucketStatsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4225,12 +6104,23 @@ pub mod node_service_server { "/node_service.NodeService/GetSRMetrics" => { #[allow(non_camel_case_types)] struct GetSRMetricsSvc(pub Arc); - impl tonic::server::UnaryService for GetSRMetricsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetSRMetricsSvc { type Response = super::GetSrMetricsDataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_sr_metrics(&inner, request).await }; + let fut = async move { + ::get_sr_metrics(&inner, request).await + }; Box::pin(fut) } } @@ -4243,8 +6133,14 @@ pub mod node_service_server { let method = GetSRMetricsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4253,12 +6149,24 @@ pub mod node_service_server { "/node_service.NodeService/GetAllBucketStats" => { #[allow(non_camel_case_types)] struct GetAllBucketStatsSvc(pub Arc); - impl tonic::server::UnaryService for GetAllBucketStatsSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetAllBucketStatsSvc { type Response = super::GetAllBucketStatsResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_all_bucket_stats(&inner, request).await }; + let fut = async move { + ::get_all_bucket_stats(&inner, request) + .await + }; Box::pin(fut) } } @@ -4271,8 +6179,14 @@ pub mod node_service_server { let method = GetAllBucketStatsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4281,12 +6195,24 @@ pub mod node_service_server { "/node_service.NodeService/LoadBucketMetadata" => { #[allow(non_camel_case_types)] struct LoadBucketMetadataSvc(pub Arc); - impl tonic::server::UnaryService for LoadBucketMetadataSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadBucketMetadataSvc { type Response = super::LoadBucketMetadataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_bucket_metadata(&inner, request).await }; + let fut = async move { + ::load_bucket_metadata(&inner, request) + .await + }; Box::pin(fut) } } @@ -4299,8 +6225,14 @@ pub mod node_service_server { let method = LoadBucketMetadataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4309,12 +6241,24 @@ pub mod node_service_server { "/node_service.NodeService/DeleteBucketMetadata" => { #[allow(non_camel_case_types)] struct DeleteBucketMetadataSvc(pub Arc); - impl tonic::server::UnaryService for DeleteBucketMetadataSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteBucketMetadataSvc { type Response = super::DeleteBucketMetadataResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_bucket_metadata(&inner, request).await }; + let fut = async move { + ::delete_bucket_metadata(&inner, request) + .await + }; Box::pin(fut) } } @@ -4327,8 +6271,14 @@ pub mod node_service_server { let method = DeleteBucketMetadataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4337,12 +6287,23 @@ pub mod node_service_server { "/node_service.NodeService/DeletePolicy" => { #[allow(non_camel_case_types)] struct DeletePolicySvc(pub Arc); - impl tonic::server::UnaryService for DeletePolicySvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeletePolicySvc { type Response = super::DeletePolicyResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_policy(&inner, request).await }; + let fut = async move { + ::delete_policy(&inner, request).await + }; Box::pin(fut) } } @@ -4355,8 +6316,14 @@ pub mod node_service_server { let method = DeletePolicySvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4365,12 +6332,23 @@ pub mod node_service_server { "/node_service.NodeService/LoadPolicy" => { #[allow(non_camel_case_types)] struct LoadPolicySvc(pub Arc); - impl tonic::server::UnaryService for LoadPolicySvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadPolicySvc { type Response = super::LoadPolicyResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_policy(&inner, request).await }; + let fut = async move { + ::load_policy(&inner, request).await + }; Box::pin(fut) } } @@ -4383,8 +6361,14 @@ pub mod node_service_server { let method = LoadPolicySvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4393,12 +6377,24 @@ pub mod node_service_server { "/node_service.NodeService/LoadPolicyMapping" => { #[allow(non_camel_case_types)] struct LoadPolicyMappingSvc(pub Arc); - impl tonic::server::UnaryService for LoadPolicyMappingSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadPolicyMappingSvc { type Response = super::LoadPolicyMappingResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_policy_mapping(&inner, request).await }; + let fut = async move { + ::load_policy_mapping(&inner, request) + .await + }; Box::pin(fut) } } @@ -4411,8 +6407,14 @@ pub mod node_service_server { let method = LoadPolicyMappingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4421,12 +6423,23 @@ pub mod node_service_server { "/node_service.NodeService/DeleteUser" => { #[allow(non_camel_case_types)] struct DeleteUserSvc(pub Arc); - impl tonic::server::UnaryService for DeleteUserSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteUserSvc { type Response = super::DeleteUserResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_user(&inner, request).await }; + let fut = async move { + ::delete_user(&inner, request).await + }; Box::pin(fut) } } @@ -4439,8 +6452,14 @@ pub mod node_service_server { let method = DeleteUserSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4449,12 +6468,24 @@ pub mod node_service_server { "/node_service.NodeService/DeleteServiceAccount" => { #[allow(non_camel_case_types)] struct DeleteServiceAccountSvc(pub Arc); - impl tonic::server::UnaryService for DeleteServiceAccountSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for DeleteServiceAccountSvc { type Response = super::DeleteServiceAccountResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::delete_service_account(&inner, request).await }; + let fut = async move { + ::delete_service_account(&inner, request) + .await + }; Box::pin(fut) } } @@ -4467,8 +6498,14 @@ pub mod node_service_server { let method = DeleteServiceAccountSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4477,12 +6514,23 @@ pub mod node_service_server { "/node_service.NodeService/LoadUser" => { #[allow(non_camel_case_types)] struct LoadUserSvc(pub Arc); - impl tonic::server::UnaryService for LoadUserSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadUserSvc { type Response = super::LoadUserResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_user(&inner, request).await }; + let fut = async move { + ::load_user(&inner, request).await + }; Box::pin(fut) } } @@ -4495,8 +6543,14 @@ pub mod node_service_server { let method = LoadUserSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4505,12 +6559,24 @@ pub mod node_service_server { "/node_service.NodeService/LoadServiceAccount" => { #[allow(non_camel_case_types)] struct LoadServiceAccountSvc(pub Arc); - impl tonic::server::UnaryService for LoadServiceAccountSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadServiceAccountSvc { type Response = super::LoadServiceAccountResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_service_account(&inner, request).await }; + let fut = async move { + ::load_service_account(&inner, request) + .await + }; Box::pin(fut) } } @@ -4523,8 +6589,14 @@ pub mod node_service_server { let method = LoadServiceAccountSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4533,12 +6605,23 @@ pub mod node_service_server { "/node_service.NodeService/LoadGroup" => { #[allow(non_camel_case_types)] struct LoadGroupSvc(pub Arc); - impl tonic::server::UnaryService for LoadGroupSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadGroupSvc { type Response = super::LoadGroupResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_group(&inner, request).await }; + let fut = async move { + ::load_group(&inner, request).await + }; Box::pin(fut) } } @@ -4551,8 +6634,14 @@ pub mod node_service_server { let method = LoadGroupSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4561,14 +6650,30 @@ pub mod node_service_server { "/node_service.NodeService/ReloadSiteReplicationConfig" => { #[allow(non_camel_case_types)] struct ReloadSiteReplicationConfigSvc(pub Arc); - impl tonic::server::UnaryService - for ReloadSiteReplicationConfigSvc - { + impl< + T: NodeService, + > tonic::server::UnaryService< + super::ReloadSiteReplicationConfigRequest, + > for ReloadSiteReplicationConfigSvc { type Response = super::ReloadSiteReplicationConfigResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::ReloadSiteReplicationConfigRequest, + >, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::reload_site_replication_config(&inner, request).await }; + let fut = async move { + ::reload_site_replication_config( + &inner, + request, + ) + .await + }; Box::pin(fut) } } @@ -4581,8 +6686,14 @@ pub mod node_service_server { let method = ReloadSiteReplicationConfigSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4591,12 +6702,23 @@ pub mod node_service_server { "/node_service.NodeService/SignalService" => { #[allow(non_camel_case_types)] struct SignalServiceSvc(pub Arc); - impl tonic::server::UnaryService for SignalServiceSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for SignalServiceSvc { type Response = super::SignalServiceResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::signal_service(&inner, request).await }; + let fut = async move { + ::signal_service(&inner, request).await + }; Box::pin(fut) } } @@ -4609,8 +6731,14 @@ pub mod node_service_server { let method = SignalServiceSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4619,12 +6747,24 @@ pub mod node_service_server { "/node_service.NodeService/BackgroundHealStatus" => { #[allow(non_camel_case_types)] struct BackgroundHealStatusSvc(pub Arc); - impl tonic::server::UnaryService for BackgroundHealStatusSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for BackgroundHealStatusSvc { type Response = super::BackgroundHealStatusResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::background_heal_status(&inner, request).await }; + let fut = async move { + ::background_heal_status(&inner, request) + .await + }; Box::pin(fut) } } @@ -4637,8 +6777,14 @@ pub mod node_service_server { let method = BackgroundHealStatusSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4647,12 +6793,24 @@ pub mod node_service_server { "/node_service.NodeService/GetMetacacheListing" => { #[allow(non_camel_case_types)] struct GetMetacacheListingSvc(pub Arc); - impl tonic::server::UnaryService for GetMetacacheListingSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for GetMetacacheListingSvc { type Response = super::GetMetacacheListingResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::get_metacache_listing(&inner, request).await }; + let fut = async move { + ::get_metacache_listing(&inner, request) + .await + }; Box::pin(fut) } } @@ -4665,8 +6823,14 @@ pub mod node_service_server { let method = GetMetacacheListingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4675,12 +6839,27 @@ pub mod node_service_server { "/node_service.NodeService/UpdateMetacacheListing" => { #[allow(non_camel_case_types)] struct UpdateMetacacheListingSvc(pub Arc); - impl tonic::server::UnaryService for UpdateMetacacheListingSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for UpdateMetacacheListingSvc { type Response = super::UpdateMetacacheListingResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::update_metacache_listing(&inner, request).await }; + let fut = async move { + ::update_metacache_listing( + &inner, + request, + ) + .await + }; Box::pin(fut) } } @@ -4693,8 +6872,14 @@ pub mod node_service_server { let method = UpdateMetacacheListingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4703,12 +6888,23 @@ pub mod node_service_server { "/node_service.NodeService/ReloadPoolMeta" => { #[allow(non_camel_case_types)] struct ReloadPoolMetaSvc(pub Arc); - impl tonic::server::UnaryService for ReloadPoolMetaSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for ReloadPoolMetaSvc { type Response = super::ReloadPoolMetaResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::reload_pool_meta(&inner, request).await }; + let fut = async move { + ::reload_pool_meta(&inner, request).await + }; Box::pin(fut) } } @@ -4721,8 +6917,14 @@ pub mod node_service_server { let method = ReloadPoolMetaSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4731,12 +6933,23 @@ pub mod node_service_server { "/node_service.NodeService/StopRebalance" => { #[allow(non_camel_case_types)] struct StopRebalanceSvc(pub Arc); - impl tonic::server::UnaryService for StopRebalanceSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for StopRebalanceSvc { type Response = super::StopRebalanceResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::stop_rebalance(&inner, request).await }; + let fut = async move { + ::stop_rebalance(&inner, request).await + }; Box::pin(fut) } } @@ -4749,8 +6962,14 @@ pub mod node_service_server { let method = StopRebalanceSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4759,12 +6978,24 @@ pub mod node_service_server { "/node_service.NodeService/LoadRebalanceMeta" => { #[allow(non_camel_case_types)] struct LoadRebalanceMetaSvc(pub Arc); - impl tonic::server::UnaryService for LoadRebalanceMetaSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadRebalanceMetaSvc { type Response = super::LoadRebalanceMetaResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_rebalance_meta(&inner, request).await }; + let fut = async move { + ::load_rebalance_meta(&inner, request) + .await + }; Box::pin(fut) } } @@ -4777,8 +7008,14 @@ pub mod node_service_server { let method = LoadRebalanceMetaSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; @@ -4787,12 +7024,29 @@ pub mod node_service_server { "/node_service.NodeService/LoadTransitionTierConfig" => { #[allow(non_camel_case_types)] struct LoadTransitionTierConfigSvc(pub Arc); - impl tonic::server::UnaryService for LoadTransitionTierConfigSvc { + impl< + T: NodeService, + > tonic::server::UnaryService + for LoadTransitionTierConfigSvc { type Response = super::LoadTransitionTierConfigResponse; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::LoadTransitionTierConfigRequest, + >, + ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::load_transition_tier_config(&inner, request).await }; + let fut = async move { + ::load_transition_tier_config( + &inner, + request, + ) + .await + }; Box::pin(fut) } } @@ -4805,20 +7059,36 @@ pub mod node_service_server { let method = LoadTransitionTierConfigSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config(accept_compression_encodings, send_compression_encodings) - .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } - _ => Box::pin(async move { - let mut response = http::Response::new(empty_body()); - let headers = response.headers_mut(); - headers.insert(tonic::Status::GRPC_STATUS, (tonic::Code::Unimplemented as i32).into()); - headers.insert(http::header::CONTENT_TYPE, tonic::metadata::GRPC_CONTENT_TYPE); - Ok(response) - }), + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } } } } diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 00000000..5c68534b --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,74 @@ +version: '3.8' + +services: + node1: + image: rustfs:v1 # 替换为你的镜像名称和标签 + container_name: node1 + environment: + - RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4} + - RUSTFS_ADDRESS=0.0.0.0:9000 + - RUSTFS_CONSOLE_ENABLE=true + - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 + platform: linux/amd64 + ports: + - "9001:9000" # 映射宿主机的 9001 端口到容器的 9000 端口 + volumes: + - ..:/root/data # 将当前路径挂载到容器内的 /root/data + command: "/root/rustfs" + networks: + - my_network + + node2: + image: rustfs:v1 + container_name: node2 + environment: + - RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4} + - RUSTFS_ADDRESS=0.0.0.0:9000 + - RUSTFS_CONSOLE_ENABLE=true + - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 + platform: linux/amd64 + ports: + - "9002:9000" # 映射宿主机的 9002 端口到容器的 9000 端口 + volumes: + - ..:/root/data + command: "mkdir -p /root/data/target/volume/test{1..4} && /root/data/target/ubuntu22.04/release/rustfs" + networks: + - my_network + + node3: + image: rustfs:v1 + container_name: node3 + environment: + - RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4} + - RUSTFS_ADDRESS=0.0.0.0:9000 + - RUSTFS_CONSOLE_ENABLE=true + - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 + platform: linux/amd64 + ports: + - "9003:9000" # 映射宿主机的 9003 端口到容器的 9000 端口 + volumes: + - ..:/root/data + command: "mkdir -p /root/data/target/volume/test{1..4} && /root/data/target/ubuntu22.04/release/rustfs" + networks: + - my_network + + node4: + image: rustfs:v1 + container_name: node4 + environment: + - RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4} + - RUSTFS_ADDRESS=0.0.0.0:9000 + - RUSTFS_CONSOLE_ENABLE=true + - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 + platform: linux/amd64 + ports: + - "9004:9000" # 映射宿主机的 9004 端口到容器的 9000 端口 + volumes: + - ..:/root/data + command: "mkdir -p /root/data/target/volume/test{1..4} && /root/data/target/ubuntu22.04/release/rustfs" + networks: + - my_network + +networks: + my_network: + driver: bridge \ No newline at end of file diff --git a/ecstore/Cargo.toml b/ecstore/Cargo.toml index 7ddc751d..ab3bd5aa 100644 --- a/ecstore/Cargo.toml +++ b/ecstore/Cargo.toml @@ -67,6 +67,7 @@ md-5.workspace = true madmin.workspace = true workers.workspace = true reqwest = { workspace = true } +urlencoding = "2.1.3" [target.'cfg(not(windows))'.dependencies] diff --git a/ecstore/src/bitrot.rs b/ecstore/src/bitrot.rs index 86a92ada..21ceb5b6 100644 --- a/ecstore/src/bitrot.rs +++ b/ecstore/src/bitrot.rs @@ -1,5 +1,9 @@ use crate::{ - disk::{error::DiskError, BufferReader, Disk, DiskAPI, DiskStore, FileReader, FileWriter}, + disk::{ + error::DiskError, + io::{FileReader, FileWriter}, + Disk, DiskAPI, + }, erasure::{ReadAt, Writer}, error::{Error, Result}, store_api::BitrotAlgorithm, @@ -9,13 +13,8 @@ use blake2::Digest as _; use highway::{HighwayHash, HighwayHasher, Key}; use lazy_static::lazy_static; use sha2::{digest::core_api::BlockSizeUser, Digest, Sha256}; -use std::{any::Any, collections::HashMap, sync::Arc}; -use tokio::{ - io::AsyncReadExt as _, - spawn, - sync::mpsc::{self, Sender}, - task::JoinHandle, -}; +use std::{any::Any, collections::HashMap, io::Cursor, sync::Arc}; +use tokio::io::{AsyncReadExt as _, AsyncWriteExt}; use tracing::{error, info}; lazy_static! { @@ -145,22 +144,22 @@ pub fn bitrot_algorithm_from_string(s: &str) -> BitrotAlgorithm { pub type BitrotWriter = Box; -pub async fn new_bitrot_writer( - disk: DiskStore, - orig_volume: &str, - volume: &str, - file_path: &str, - length: usize, - algo: BitrotAlgorithm, - shard_size: usize, -) -> Result { - if algo == BitrotAlgorithm::HighwayHash256S { - return Ok(Box::new( - StreamingBitrotWriter::new(disk, orig_volume, volume, file_path, length, algo, shard_size).await?, - )); - } - Ok(Box::new(WholeBitrotWriter::new(disk, volume, file_path, algo, shard_size))) -} +// pub async fn new_bitrot_writer( +// disk: DiskStore, +// orig_volume: &str, +// volume: &str, +// file_path: &str, +// length: usize, +// algo: BitrotAlgorithm, +// shard_size: usize, +// ) -> Result { +// if algo == BitrotAlgorithm::HighwayHash256S { +// return Ok(Box::new( +// StreamingBitrotWriter::new(disk, orig_volume, volume, file_path, length, algo, shard_size).await?, +// )); +// } +// Ok(Box::new(WholeBitrotWriter::new(disk, volume, file_path, algo, shard_size))) +// } pub type BitrotReader = Box; @@ -189,13 +188,13 @@ pub async fn close_bitrot_writers(writers: &mut [Option]) -> Resul Ok(()) } -pub fn bitrot_writer_sum(w: &BitrotWriter) -> Vec { - if let Some(w) = w.as_any().downcast_ref::() { - return w.hash.clone().finalize(); - } +// pub fn bitrot_writer_sum(w: &BitrotWriter) -> Vec { +// if let Some(w) = w.as_any().downcast_ref::() { +// return w.hash.clone().finalize(); +// } - Vec::new() -} +// Vec::new() +// } pub fn bitrot_shard_file_size(size: usize, shard_size: usize, algo: BitrotAlgorithm) -> usize { if algo != BitrotAlgorithm::HighwayHash256S { @@ -260,40 +259,40 @@ pub async fn bitrot_verify( Ok(()) } -pub struct WholeBitrotWriter { - disk: DiskStore, - volume: String, - file_path: String, - _shard_size: usize, - pub hash: Hasher, -} +// pub struct WholeBitrotWriter { +// disk: DiskStore, +// volume: String, +// file_path: String, +// _shard_size: usize, +// pub hash: Hasher, +// } -impl WholeBitrotWriter { - pub fn new(disk: DiskStore, volume: &str, file_path: &str, algo: BitrotAlgorithm, shard_size: usize) -> Self { - WholeBitrotWriter { - disk, - volume: volume.to_string(), - file_path: file_path.to_string(), - _shard_size: shard_size, - hash: algo.new_hasher(), - } - } -} +// impl WholeBitrotWriter { +// pub fn new(disk: DiskStore, volume: &str, file_path: &str, algo: BitrotAlgorithm, shard_size: usize) -> Self { +// WholeBitrotWriter { +// disk, +// volume: volume.to_string(), +// file_path: file_path.to_string(), +// _shard_size: shard_size, +// hash: algo.new_hasher(), +// } +// } +// } -#[async_trait::async_trait] -impl Writer for WholeBitrotWriter { - fn as_any(&self) -> &dyn Any { - self - } +// #[async_trait::async_trait] +// impl Writer for WholeBitrotWriter { +// fn as_any(&self) -> &dyn Any { +// self +// } - async fn write(&mut self, buf: &[u8]) -> Result<()> { - let mut file = self.disk.append_file(&self.volume, &self.file_path).await?; - let _ = file.write(buf).await?; - self.hash.update(buf); +// async fn write(&mut self, buf: &[u8]) -> Result<()> { +// let mut file = self.disk.append_file(&self.volume, &self.file_path).await?; +// let _ = file.write(buf).await?; +// self.hash.update(buf); - Ok(()) - } -} +// Ok(()) +// } +// } // #[derive(Debug)] // pub struct WholeBitrotReader { @@ -344,74 +343,74 @@ impl Writer for WholeBitrotWriter { // } // } -struct StreamingBitrotWriter { - hasher: Hasher, - tx: Sender>>, - task: Option>, -} +// struct StreamingBitrotWriter { +// hasher: Hasher, +// tx: Sender>>, +// task: Option>, +// } -impl StreamingBitrotWriter { - pub async fn new( - disk: DiskStore, - orig_volume: &str, - volume: &str, - file_path: &str, - length: usize, - algo: BitrotAlgorithm, - shard_size: usize, - ) -> Result { - let hasher = algo.new_hasher(); - let (tx, mut rx) = mpsc::channel::>>(10); +// impl StreamingBitrotWriter { +// pub async fn new( +// disk: DiskStore, +// orig_volume: &str, +// volume: &str, +// file_path: &str, +// length: usize, +// algo: BitrotAlgorithm, +// shard_size: usize, +// ) -> Result { +// let hasher = algo.new_hasher(); +// let (tx, mut rx) = mpsc::channel::>>(10); - let total_file_size = length.div_ceil(shard_size) * hasher.size() + length; - let mut writer = disk.create_file(orig_volume, volume, file_path, total_file_size).await?; +// let total_file_size = length.div_ceil(shard_size) * hasher.size() + length; +// let mut writer = disk.create_file(orig_volume, volume, file_path, total_file_size).await?; - let task = spawn(async move { - loop { - if let Some(Some(buf)) = rx.recv().await { - writer.write(&buf).await.unwrap(); - continue; - } +// let task = spawn(async move { +// loop { +// if let Some(Some(buf)) = rx.recv().await { +// writer.write(&buf).await.unwrap(); +// continue; +// } - break; - } - }); +// break; +// } +// }); - Ok(StreamingBitrotWriter { - hasher, - tx, - task: Some(task), - }) - } -} +// Ok(StreamingBitrotWriter { +// hasher, +// tx, +// task: Some(task), +// }) +// } +// } -#[async_trait::async_trait] -impl Writer for StreamingBitrotWriter { - fn as_any(&self) -> &dyn Any { - self - } +// #[async_trait::async_trait] +// impl Writer for StreamingBitrotWriter { +// fn as_any(&self) -> &dyn Any { +// self +// } - async fn write(&mut self, buf: &[u8]) -> Result<()> { - if buf.is_empty() { - return Ok(()); - } - self.hasher.reset(); - self.hasher.update(buf); - let hash_bytes = self.hasher.clone().finalize(); - let _ = self.tx.send(Some(hash_bytes)).await?; - let _ = self.tx.send(Some(buf.to_vec())).await?; +// async fn write(&mut self, buf: &[u8]) -> Result<()> { +// if buf.is_empty() { +// return Ok(()); +// } +// self.hasher.reset(); +// self.hasher.update(buf); +// let hash_bytes = self.hasher.clone().finalize(); +// let _ = self.tx.send(Some(hash_bytes)).await?; +// let _ = self.tx.send(Some(buf.to_vec())).await?; - Ok(()) - } +// Ok(()) +// } - async fn close(&mut self) -> Result<()> { - let _ = self.tx.send(None).await?; - if let Some(task) = self.task.take() { - let _ = task.await; // 等待任务完成 - } - Ok(()) - } -} +// async fn close(&mut self) -> Result<()> { +// let _ = self.tx.send(None).await?; +// if let Some(task) = self.task.take() { +// let _ = task.await; // 等待任务完成 +// } +// Ok(()) +// } +// } // #[derive(Debug)] // struct StreamingBitrotReader { @@ -522,8 +521,8 @@ impl Writer for BitrotFileWriter { self.hasher.reset(); self.hasher.update(buf); let hash_bytes = self.hasher.clone().finalize(); - let _ = self.inner.write(&hash_bytes).await?; - let _ = self.inner.write(buf).await?; + let _ = self.inner.write_all(&hash_bytes).await?; + let _ = self.inner.write_all(buf).await?; Ok(()) } @@ -600,11 +599,7 @@ impl ReadAt for BitrotFileReader { let stream_offset = (offset / self.shard_size) * self.hasher.size() + offset; if let Some(data) = self.data.clone() { - self.reader = Some(FileReader::Buffer(BufferReader::new( - data, - stream_offset, - self.till_offset - stream_offset, - ))); + self.reader = Some(FileReader::Buffer(Cursor::new(data))); } else { self.reader = Some( self.disk diff --git a/ecstore/src/disk/io.rs b/ecstore/src/disk/io.rs new file mode 100644 index 00000000..981c6ddd --- /dev/null +++ b/ecstore/src/disk/io.rs @@ -0,0 +1,229 @@ +use crate::error::Result; +use futures::TryStreamExt; +use std::io::Cursor; +use std::pin::Pin; +use std::task::Poll; +use tokio::fs::File; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio_util::io::ReaderStream; +use tokio_util::io::StreamReader; +use tracing::error; +use tracing::warn; + +#[derive(Debug)] +pub enum FileReader { + Local(File), + // Remote(RemoteFileReader), + Buffer(Cursor>), + Http(HttpFileReader), +} + +impl AsyncRead for FileReader { + #[tracing::instrument(level = "debug", skip(self, buf))] + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + match &mut *self { + Self::Local(reader) => Pin::new(reader).poll_read(cx, buf), + Self::Buffer(reader) => Pin::new(reader).poll_read(cx, buf), + Self::Http(reader) => Pin::new(reader).poll_read(cx, buf), + } + } +} + +#[derive(Debug)] +pub struct HttpFileReader { + // client: reqwest::Client, + // url: String, + // disk: String, + // volume: String, + // path: String, + // offset: usize, + // length: usize, + inner: tokio::io::DuplexStream, + // buf: Vec, + // pos: usize, +} + +impl HttpFileReader { + pub fn new(url: &str, disk: &str, volume: &str, path: &str, offset: usize, length: usize) -> Result { + warn!("http read start {}", path); + let url = url.to_owned(); + let disk = disk.to_owned(); + let volume = volume.to_owned(); + let path = path.to_owned(); + + // let (reader, mut writer) = tokio::io::simplex(1024); + let (reader, mut writer) = tokio::io::duplex(1024 * 1024 * 10); + + tokio::spawn(async move { + let client = reqwest::Client::new(); + let resp = match client + .get(format!( + "{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}", + url, + urlencoding::encode(&disk), + urlencoding::encode(&volume), + urlencoding::encode(&path), + offset, + length + )) + .send() + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) + { + Ok(resp) => resp, + Err(err) => { + warn!("http file reader error: {}", err); + return; + } + }; + + let mut rd = StreamReader::new( + resp.bytes_stream() + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)), + ); + + if let Err(err) = tokio::io::copy(&mut rd, &mut writer).await { + error!("http file reader copy error: {}", err); + }; + }); + Ok(Self { + // client: reqwest::Client::new(), + // url: url.to_string(), + // disk: disk.to_string(), + // volume: volume.to_string(), + // path: path.to_string(), + // offset, + // length, + inner: reader, + // buf: Vec::new(), + // pos: 0, + }) + } +} + +impl AsyncRead for HttpFileReader { + #[tracing::instrument(level = "debug", skip(self, buf))] + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + Pin::new(&mut self.inner).poll_read(cx, buf) + } +} + +#[derive(Debug)] +pub enum FileWriter { + Local(File), + Http(HttpFileWriter), + Buffer(Cursor>), +} + +impl AsyncWrite for FileWriter { + #[tracing::instrument(level = "debug", skip(self, buf))] + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> Poll> { + match &mut *self { + Self::Local(writer) => Pin::new(writer).poll_write(cx, buf), + Self::Buffer(writer) => Pin::new(writer).poll_write(cx, buf), + Self::Http(writer) => Pin::new(writer).poll_write(cx, buf), + } + } + + #[tracing::instrument(level = "debug", skip(self))] + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { + match &mut *self { + Self::Local(writer) => Pin::new(writer).poll_flush(cx), + Self::Buffer(writer) => Pin::new(writer).poll_flush(cx), + Self::Http(writer) => Pin::new(writer).poll_flush(cx), + } + } + + #[tracing::instrument(level = "debug", skip(self))] + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { + match &mut *self { + Self::Local(writer) => Pin::new(writer).poll_shutdown(cx), + Self::Buffer(writer) => Pin::new(writer).poll_shutdown(cx), + Self::Http(writer) => Pin::new(writer).poll_shutdown(cx), + } + } +} + +#[derive(Debug)] +pub struct HttpFileWriter { + wd: tokio::io::WriteHalf, +} + +impl HttpFileWriter { + pub fn new(url: &str, disk: &str, volume: &str, path: &str, size: usize, append: bool) -> Result { + let (rd, wd) = tokio::io::simplex(1024 * 1024 * 10); + + let body = reqwest::Body::wrap_stream(ReaderStream::new(rd)); + + let url = url.to_owned(); + let disk = disk.to_owned(); + let volume = volume.to_owned(); + let path = path.to_owned(); + + tokio::spawn(async move { + let client = reqwest::Client::new(); + if let Err(err) = client + .put(format!( + "{}/rustfs/rpc/put_file_stream?disk={}&volume={}&path={}&append={}&size={}", + url, + urlencoding::encode(&disk), + urlencoding::encode(&volume), + urlencoding::encode(&path), + append, + size + )) + .body(body) + .send() + .await + { + error!("HttpFileWriter put file err: {:?}", err); + // return; + } + + // TODO: handle response + + // debug!("http write done {}", path); + }); + + Ok(Self { + wd, + // client: reqwest::Client::new(), + // url: url.to_string(), + // disk: disk.to_string(), + // volume: volume.to_string(), + }) + } +} + +impl AsyncWrite for HttpFileWriter { + #[tracing::instrument(level = "debug", skip(self, buf))] + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.wd).poll_write(cx, buf) + } + + #[tracing::instrument(level = "debug", skip(self))] + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { + Pin::new(&mut self.wd).poll_flush(cx) + } + + #[tracing::instrument(level = "debug", skip(self))] + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { + Pin::new(&mut self.wd).poll_shutdown(cx) + } +} diff --git a/ecstore/src/disk/local.rs b/ecstore/src/disk/local.rs index a1f1e0c7..013be96a 100644 --- a/ecstore/src/disk/local.rs +++ b/ecstore/src/disk/local.rs @@ -17,7 +17,7 @@ use crate::disk::error::{ is_sys_err_not_dir, map_err_not_exists, os_err_to_file_err, }; use crate::disk::os::{check_path_length, is_empty_dir}; -use crate::disk::{LocalFileReader, LocalFileWriter, STORAGE_FORMAT_FILE}; +use crate::disk::STORAGE_FORMAT_FILE; use crate::error::{Error, Result}; use crate::file_meta::{get_file_info, read_xl_meta_no_data, FileInfoOpts}; use crate::global::{GLOBAL_IsErasureSD, GLOBAL_RootDiskThreshold}; @@ -745,15 +745,7 @@ impl LocalDisk { let meta = file.metadata().await?; - bitrot_verify( - FileReader::Local(LocalFileReader::new(file)), - meta.size() as usize, - part_size, - algo, - sum.to_vec(), - shard_size, - ) - .await + bitrot_verify(FileReader::Local(file), meta.size() as usize, part_size, algo, sum.to_vec(), shard_size).await } async fn scan_dir( @@ -1297,6 +1289,7 @@ impl DiskAPI for LocalDisk { Ok(resp) } + #[tracing::instrument(level = "debug", skip(self))] async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Vec) -> Result<()> { let src_volume_dir = self.get_bucket_path(src_volume)?; let dst_volume_dir = self.get_bucket_path(dst_volume)?; @@ -1311,12 +1304,18 @@ impl DiskAPI for LocalDisk { let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR); if !src_is_dir && dst_is_dir || src_is_dir && !dst_is_dir { + warn!( + "rename_part src and dst must be both dir or file src_is_dir:{}, dst_is_dir:{}", + src_is_dir, dst_is_dir + ); return Err(Error::from(DiskError::FileAccessDenied)); } let src_file_path = src_volume_dir.join(Path::new(src_path)); let dst_file_path = dst_volume_dir.join(Path::new(dst_path)); + warn!("rename_part src_file_path:{:?}, dst_file_path:{:?}", &src_file_path, &dst_file_path); + check_path_length(src_file_path.to_string_lossy().as_ref())?; check_path_length(dst_file_path.to_string_lossy().as_ref())?; @@ -1337,12 +1336,14 @@ impl DiskAPI for LocalDisk { if let Some(meta) = meta_op { if !meta.is_dir() { + warn!("rename_part src is not dir {:?}", &src_file_path); return Err(Error::new(DiskError::FileAccessDenied)); } } if let Err(e) = utils::fs::remove(&dst_file_path).await { if is_sys_err_not_empty(&e) || is_sys_err_not_dir(&e) { + warn!("rename_part remove dst failed {:?} err {:?}", &dst_file_path, e); return Err(Error::new(DiskError::FileAccessDenied)); } else if is_sys_err_io(&e) { return Err(Error::new(DiskError::FaultyDisk)); @@ -1355,6 +1356,7 @@ impl DiskAPI for LocalDisk { if let Err(err) = os::rename_all(&src_file_path, &dst_file_path, &dst_volume_dir).await { if let Some(e) = err.to_io_err() { if is_sys_err_not_empty(&e) || is_sys_err_not_dir(&e) { + warn!("rename_part rename all failed {:?} err {:?}", &dst_file_path, e); return Err(Error::new(DiskError::FileAccessDenied)); } @@ -1467,8 +1469,10 @@ impl DiskAPI for LocalDisk { Ok(()) } - // TODO: use io.reader + #[tracing::instrument(level = "debug", skip(self))] async fn create_file(&self, origvolume: &str, volume: &str, path: &str, _file_size: usize) -> Result { + warn!("disk create_file: origvolume: {}, volume: {}, path: {}", origvolume, volume, path); + if !origvolume.is_empty() { let origvolume_dir = self.get_bucket_path(origvolume)?; if !skip_access_checks(origvolume) { @@ -1491,12 +1495,16 @@ impl DiskAPI for LocalDisk { .await .map_err(os_err_to_file_err)?; - Ok(FileWriter::Local(LocalFileWriter::new(f))) + Ok(FileWriter::Local(f)) // Ok(()) } + + #[tracing::instrument(level = "debug", skip(self))] // async fn append_file(&self, volume: &str, path: &str, mut r: DuplexStream) -> Result { async fn append_file(&self, volume: &str, path: &str) -> Result { + warn!("disk append_file: volume: {}, path: {}", volume, path); + let volume_dir = self.get_bucket_path(volume)?; if !skip_access_checks(volume) { if let Err(e) = utils::fs::access(&volume_dir).await { @@ -1509,11 +1517,13 @@ impl DiskAPI for LocalDisk { let f = self.open_file(file_path, O_CREATE | O_APPEND | O_WRONLY, volume_dir).await?; - Ok(FileWriter::Local(LocalFileWriter::new(f))) + Ok(FileWriter::Local(f)) } // TODO: io verifier + #[tracing::instrument(level = "debug", skip(self))] async fn read_file(&self, volume: &str, path: &str) -> Result { + warn!("disk read_file: volume: {}, path: {}", volume, path); let volume_dir = self.get_bucket_path(volume)?; if !skip_access_checks(volume) { if let Err(e) = utils::fs::access(&volume_dir).await { @@ -1542,10 +1552,16 @@ impl DiskAPI for LocalDisk { } })?; - Ok(FileReader::Local(LocalFileReader::new(f))) + Ok(FileReader::Local(f)) } + #[tracing::instrument(level = "debug", skip(self))] async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result { + warn!( + "disk read_file_stream: volume: {}, path: {}, offset: {}, length: {}", + volume, path, offset, length + ); + let volume_dir = self.get_bucket_path(volume)?; if !skip_access_checks(volume) { if let Err(e) = utils::fs::access(&volume_dir).await { @@ -1587,7 +1603,7 @@ impl DiskAPI for LocalDisk { f.seek(SeekFrom::Start(offset as u64)).await?; - Ok(FileReader::Local(LocalFileReader::new(f))) + Ok(FileReader::Local(f)) } #[tracing::instrument(level = "debug", skip(self))] async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result> { diff --git a/ecstore/src/disk/mod.rs b/ecstore/src/disk/mod.rs index b1d341d7..dcf719f2 100644 --- a/ecstore/src/disk/mod.rs +++ b/ecstore/src/disk/mod.rs @@ -1,6 +1,7 @@ pub mod endpoint; pub mod error; pub mod format; +pub mod io; pub mod local; pub mod os; pub mod remote; @@ -14,10 +15,8 @@ pub const FORMAT_CONFIG_FILE: &str = "format.json"; pub const STORAGE_FORMAT_FILE: &str = "xl.meta"; pub const STORAGE_FORMAT_FILE_BACKUP: &str = "xl.meta.bkp"; -use crate::utils::proto_err_to_err; use crate::{ bucket::{metadata_sys::get_versioning_config, versioning::VersioningApi}, - erasure::Writer, error::{Error, Result}, file_meta::{merge_file_meta_versions, FileMeta, FileMetaShallowVersion, VersionType}, heal::{ @@ -28,28 +27,16 @@ use crate::{ store_api::{FileInfo, ObjectInfo, RawFileInfo}, utils::path::SLASH_SEPARATOR, }; - use endpoint::Endpoint; use error::DiskError; -use futures::StreamExt; +use io::{FileReader, FileWriter}; use local::LocalDisk; use madmin::info_commands::DiskMetrics; -use protos::proto_gen::node_service::{node_service_client::NodeServiceClient, WriteRequest, WriteResponse}; use remote::RemoteDisk; use serde::{Deserialize, Serialize}; -use std::io::Read as _; -use std::pin::Pin; -use std::task::Poll; -use std::{any::Any, cmp::Ordering, fmt::Debug, io::Cursor, path::PathBuf, sync::Arc}; +use std::{cmp::Ordering, fmt::Debug, path::PathBuf, sync::Arc}; use time::OffsetDateTime; -use tokio::io::AsyncRead; -use tokio::{ - fs::File, - io::{AsyncWrite, AsyncWriteExt}, - sync::mpsc::{self, Sender}, -}; -use tokio_stream::wrappers::ReceiverStream; -use tonic::{service::interceptor::InterceptedService, transport::Channel, Request, Status, Streaming}; +use tokio::{io::AsyncWrite, sync::mpsc::Sender}; use tracing::info; use tracing::warn; use uuid::Uuid; @@ -1256,164 +1243,142 @@ pub struct ReadOptions { // } // } -#[derive(Debug)] -pub enum FileWriter { - Local(LocalFileWriter), - Remote(RemoteFileWriter), - Buffer(BufferWriter), -} +// #[derive(Debug)] +// pub struct BufferWriter { +// pub inner: Vec, +// } -#[async_trait::async_trait] -impl Writer for FileWriter { - fn as_any(&self) -> &dyn Any { - self - } +// impl BufferWriter { +// pub fn new(inner: Vec) -> Self { +// Self { inner } +// } +// #[allow(clippy::should_implement_trait)] +// pub fn as_ref(&self) -> &[u8] { +// self.inner.as_ref() +// } +// } - async fn write(&mut self, buf: &[u8]) -> Result<()> { - match self { - Self::Local(writer) => writer.write(buf).await, - Self::Remote(writter) => writter.write(buf).await, - Self::Buffer(writer) => writer.write(buf).await, - } - } -} +// #[async_trait::async_trait] +// impl Writer for BufferWriter { +// fn as_any(&self) -> &dyn Any { +// self +// } -#[derive(Debug)] -pub struct BufferWriter { - pub inner: Vec, -} +// async fn write(&mut self, buf: &[u8]) -> Result<()> { +// let _ = self.inner.write(buf).await?; +// self.inner.flush().await?; -impl BufferWriter { - pub fn new(inner: Vec) -> Self { - Self { inner } - } - #[allow(clippy::should_implement_trait)] - pub fn as_ref(&self) -> &[u8] { - self.inner.as_ref() - } -} +// Ok(()) +// } +// } -#[async_trait::async_trait] -impl Writer for BufferWriter { - fn as_any(&self) -> &dyn Any { - self - } +// #[derive(Debug)] +// pub struct LocalFileWriter { +// pub inner: File, +// } - async fn write(&mut self, buf: &[u8]) -> Result<()> { - let _ = self.inner.write(buf).await?; - self.inner.flush().await?; +// impl LocalFileWriter { +// pub fn new(inner: File) -> Self { +// Self { inner } +// } +// } - Ok(()) - } -} +// #[async_trait::async_trait] +// impl Writer for LocalFileWriter { +// fn as_any(&self) -> &dyn Any { +// self +// } -#[derive(Debug)] -pub struct LocalFileWriter { - pub inner: File, -} +// async fn write(&mut self, buf: &[u8]) -> Result<()> { +// let _ = self.inner.write(buf).await?; +// self.inner.flush().await?; -impl LocalFileWriter { - pub fn new(inner: File) -> Self { - Self { inner } - } -} +// Ok(()) +// } +// } -#[async_trait::async_trait] -impl Writer for LocalFileWriter { - fn as_any(&self) -> &dyn Any { - self - } +// type NodeClient = NodeServiceClient< +// InterceptedService) -> Result, Status> + Send + Sync + 'static>>, +// >; - async fn write(&mut self, buf: &[u8]) -> Result<()> { - let _ = self.inner.write(buf).await?; - self.inner.flush().await?; +// #[derive(Debug)] +// pub struct RemoteFileWriter { +// pub endpoint: Endpoint, +// pub volume: String, +// pub path: String, +// pub is_append: bool, +// tx: Sender, +// resp_stream: Streaming, +// } - Ok(()) - } -} +// impl RemoteFileWriter { +// pub async fn new(endpoint: Endpoint, volume: String, path: String, is_append: bool, mut client: NodeClient) -> Result { +// let (tx, rx) = mpsc::channel(128); +// let in_stream = ReceiverStream::new(rx); -type NodeClient = NodeServiceClient< - InterceptedService) -> Result, Status> + Send + Sync + 'static>>, ->; +// let response = client.write_stream(in_stream).await.unwrap(); -#[derive(Debug)] -pub struct RemoteFileWriter { - pub endpoint: Endpoint, - pub volume: String, - pub path: String, - pub is_append: bool, - tx: Sender, - resp_stream: Streaming, -} +// let resp_stream = response.into_inner(); -impl RemoteFileWriter { - pub async fn new(endpoint: Endpoint, volume: String, path: String, is_append: bool, mut client: NodeClient) -> Result { - let (tx, rx) = mpsc::channel(128); - let in_stream = ReceiverStream::new(rx); +// Ok(Self { +// endpoint, +// volume, +// path, +// is_append, +// tx, +// resp_stream, +// }) +// } +// } - let response = client.write_stream(in_stream).await.unwrap(); +// #[async_trait::async_trait] +// impl Writer for RemoteFileWriter { +// fn as_any(&self) -> &dyn Any { +// self +// } - let resp_stream = response.into_inner(); +// async fn write(&mut self, buf: &[u8]) -> Result<()> { +// let request = WriteRequest { +// disk: self.endpoint.to_string(), +// volume: self.volume.to_string(), +// path: self.path.to_string(), +// is_append: self.is_append, +// data: buf.to_vec(), +// }; +// self.tx.send(request).await?; - Ok(Self { - endpoint, - volume, - path, - is_append, - tx, - resp_stream, - }) - } -} +// if let Some(resp) = self.resp_stream.next().await { +// // match resp { +// // Ok(resp) => { +// // if resp.success { +// // info!("write stream success"); +// // } else { +// // info!("write stream failed: {}", resp.error_info.unwrap_or("".to_string())); +// // } +// // } +// // Err(_err) => { -#[async_trait::async_trait] -impl Writer for RemoteFileWriter { - fn as_any(&self) -> &dyn Any { - self - } +// // } +// // } +// let resp = resp?; +// if resp.success { +// info!("write stream success"); +// } else { +// return if let Some(err) = &resp.error { +// Err(proto_err_to_err(err)) +// } else { +// Err(Error::from_string("")) +// }; +// } +// } else { +// let error_info = "can not get response"; +// info!("write stream failed: {}", error_info); +// return Err(Error::from_string(error_info)); +// } - async fn write(&mut self, buf: &[u8]) -> Result<()> { - let request = WriteRequest { - disk: self.endpoint.to_string(), - volume: self.volume.to_string(), - path: self.path.to_string(), - is_append: self.is_append, - data: buf.to_vec(), - }; - self.tx.send(request).await?; - - if let Some(resp) = self.resp_stream.next().await { - // match resp { - // Ok(resp) => { - // if resp.success { - // info!("write stream success"); - // } else { - // info!("write stream failed: {}", resp.error_info.unwrap_or("".to_string())); - // } - // } - // Err(_err) => { - - // } - // } - let resp = resp?; - if resp.success { - info!("write stream success"); - } else { - return if let Some(err) = &resp.error { - Err(proto_err_to_err(err)) - } else { - Err(Error::from_string("")) - }; - } - } else { - let error_info = "can not get response"; - info!("write stream failed: {}", error_info); - return Err(Error::from_string(error_info)); - } - - Ok(()) - } -} +// Ok(()) +// } +// } // #[async_trait::async_trait] // pub trait Reader { @@ -1422,29 +1387,6 @@ impl Writer for RemoteFileWriter { // // async fn read_exact(&mut self, buf: &mut [u8]) -> Result; // } -#[derive(Debug)] -pub enum FileReader { - Local(LocalFileReader), - // Remote(RemoteFileReader), - Buffer(BufferReader), - Http(HttpFileReader), -} - -impl AsyncRead for FileReader { - #[tracing::instrument(level = "debug", skip(self, buf))] - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> std::task::Poll> { - match &mut *self { - Self::Local(reader) => Pin::new(&mut reader.inner).poll_read(cx, buf), - Self::Buffer(reader) => Pin::new(&mut reader.inner).poll_read(cx, buf), - Self::Http(reader) => Pin::new(reader).poll_read(cx, buf), - } - } -} - // #[async_trait::async_trait] // impl Reader for FileReader { // async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { @@ -1471,44 +1413,44 @@ impl AsyncRead for FileReader { // // } // } -#[derive(Debug)] -pub struct BufferReader { - pub inner: Cursor>, - remaining: usize, -} +// #[derive(Debug)] +// pub struct BufferReader { +// pub inner: Cursor>, +// remaining: usize, +// } -impl BufferReader { - pub fn new(inner: Vec, offset: usize, read_length: usize) -> Self { - let mut cur = Cursor::new(inner); - cur.set_position(offset as u64); - Self { - inner: cur, - remaining: offset + read_length, - } - } -} +// impl BufferReader { +// pub fn new(inner: Vec, offset: usize, read_length: usize) -> Self { +// let mut cur = Cursor::new(inner); +// cur.set_position(offset as u64); +// Self { +// inner: cur, +// remaining: offset + read_length, +// } +// } +// } -impl AsyncRead for BufferReader { - #[tracing::instrument(level = "debug", skip(self, buf))] - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> std::task::Poll> { - match Pin::new(&mut self.inner).poll_read(cx, buf) { - Poll::Ready(Ok(_)) => { - if self.inner.position() as usize >= self.remaining { - self.remaining -= buf.filled().len(); - Poll::Ready(Ok(())) - } else { - Poll::Pending - } - } - Poll::Ready(Err(err)) => Poll::Ready(Err(err)), - Poll::Pending => Poll::Pending, - } - } -} +// impl AsyncRead for BufferReader { +// #[tracing::instrument(level = "debug", skip(self, buf))] +// fn poll_read( +// mut self: Pin<&mut Self>, +// cx: &mut std::task::Context<'_>, +// buf: &mut tokio::io::ReadBuf<'_>, +// ) -> std::task::Poll> { +// match Pin::new(&mut self.inner).poll_read(cx, buf) { +// Poll::Ready(Ok(_)) => { +// if self.inner.position() as usize >= self.remaining { +// self.remaining -= buf.filled().len(); +// Poll::Ready(Ok(())) +// } else { +// Poll::Pending +// } +// } +// Poll::Ready(Err(err)) => Poll::Ready(Err(err)), +// Poll::Pending => Poll::Pending, +// } +// } +// } // #[async_trait::async_trait] // impl Reader for BufferReader { @@ -1537,17 +1479,17 @@ impl AsyncRead for BufferReader { // // } // } -#[derive(Debug)] -pub struct LocalFileReader { - pub inner: File, - // pos: usize, -} +// #[derive(Debug)] +// pub struct LocalFileReader { +// pub inner: File, +// // pos: usize, +// } -impl LocalFileReader { - pub fn new(inner: File) -> Self { - Self { inner } - } -} +// impl LocalFileReader { +// pub fn new(inner: File) -> Self { +// Self { inner } +// } +// } // #[async_trait::async_trait] // impl Reader for LocalFileReader { @@ -1579,16 +1521,16 @@ impl LocalFileReader { // // } // } -impl AsyncRead for LocalFileReader { - #[tracing::instrument(level = "debug", skip(self, buf))] - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> std::task::Poll> { - Pin::new(&mut self.inner).poll_read(cx, buf) - } -} +// impl AsyncRead for LocalFileReader { +// #[tracing::instrument(level = "debug", skip(self, buf))] +// fn poll_read( +// mut self: Pin<&mut Self>, +// cx: &mut std::task::Context<'_>, +// buf: &mut tokio::io::ReadBuf<'_>, +// ) -> std::task::Poll> { +// Pin::new(&mut self.inner).poll_read(cx, buf) +// } +// } // #[derive(Debug)] // pub struct RemoteFileReader { @@ -1670,85 +1612,3 @@ impl AsyncRead for LocalFileReader { // unimplemented!("poll_read") // } // } - -#[derive(Debug)] -pub struct HttpFileReader { - // client: reqwest::Client, - // url: String, - // disk: String, - // volume: String, - // path: String, - // offset: usize, - // length: usize, - inner: reqwest::blocking::Response, - // buf: Vec, - pos: usize, -} - -impl HttpFileReader { - pub async fn new(url: &str, disk: &str, volume: &str, path: &str, offset: usize, length: usize) -> Result { - let client = reqwest::blocking::Client::new(); - let resp = client - .get(format!( - "{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}", - url, disk, volume, path, offset, length - )) - .send()?; - Ok(Self { - // client: reqwest::Client::new(), - // url: url.to_string(), - // disk: disk.to_string(), - // volume: volume.to_string(), - // path: path.to_string(), - // offset, - // length, - inner: resp, - // buf: Vec::new(), - pos: 0, - }) - } - - // pub async fn get_response(&self) -> Result<&Response, std::io::Error> { - // if let Some(resp) = self.inner.get() { - // return Ok(resp); - // } else { - // let client = reqwest::Client::new(); - // let resp = client - // .get(&format!( - // "{}/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}", - // self.url, self.disk, self.volume, self.path, self.offset, self.length - // )) - // .send() - // .await - // .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; - // self.inner.set(resp); - // Ok(self.inner.get().unwrap()) - // } - // } -} - -impl AsyncRead for HttpFileReader { - #[tracing::instrument(level = "debug", skip(self, buf))] - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> std::task::Poll> { - let buf = buf.initialize_unfilled(); - self.inner.read_exact(buf)?; - self.pos += buf.len(); - Poll::Ready(Ok(())) - } -} - -// impl Reader for HttpFileReader { -// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { -// if self.pos != offset { -// self.inner.seek(SeekFrom::Start(offset as u64))?; -// self.pos = offset; -// } -// let bytes_read = self.inner.read(buf)?; -// self.pos += bytes_read; -// Ok(bytes_read) -// } -// } diff --git a/ecstore/src/disk/remote.rs b/ecstore/src/disk/remote.rs index 2fd2ca3f..8528ac1e 100644 --- a/ecstore/src/disk/remote.rs +++ b/ecstore/src/disk/remote.rs @@ -22,8 +22,8 @@ use tracing::info; use uuid::Uuid; use super::{ - endpoint::Endpoint, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, - FileInfoVersions, FileReader, FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RemoteFileWriter, RenameDataResp, + endpoint::Endpoint, io::HttpFileReader, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, + DiskOption, FileInfoVersions, FileReader, FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions, }; use crate::{ @@ -36,7 +36,7 @@ use crate::{ }, store_api::{FileInfo, RawFileInfo}, }; -use crate::{disk::HttpFileReader, utils::proto_err_to_err}; +use crate::{disk::io::HttpFileWriter, utils::proto_err_to_err}; use crate::{disk::MetaCacheEntry, metacache::writer::MetacacheWriter}; use protos::proto_gen::node_service::RenamePartRequst; @@ -286,6 +286,7 @@ impl DiskAPI for RemoteDisk { Ok(()) } + #[tracing::instrument(level = "debug", skip(self))] async fn rename_file(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str) -> Result<()> { info!("rename_file"); let mut client = node_service_time_out_client(&self.addr) @@ -312,58 +313,55 @@ impl DiskAPI for RemoteDisk { Ok(()) } - async fn create_file(&self, _origvolume: &str, volume: &str, path: &str, _file_size: usize) -> Result { + #[tracing::instrument(level = "debug", skip(self))] + async fn create_file(&self, _origvolume: &str, volume: &str, path: &str, file_size: usize) -> Result { info!("create_file"); - Ok(FileWriter::Remote( - RemoteFileWriter::new( - self.endpoint.clone(), - volume.to_string(), - path.to_string(), - false, - node_service_time_out_client(&self.addr) - .await - .map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?, - ) - .await?, - )) + Ok(FileWriter::Http(HttpFileWriter::new( + self.endpoint.grid_host().as_str(), + self.endpoint.to_string().as_str(), + volume, + path, + file_size, + false, + )?)) } + #[tracing::instrument(level = "debug", skip(self))] async fn append_file(&self, volume: &str, path: &str) -> Result { info!("append_file"); - Ok(FileWriter::Remote( - RemoteFileWriter::new( - self.endpoint.clone(), - volume.to_string(), - path.to_string(), - true, - node_service_time_out_client(&self.addr) - .await - .map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?, - ) - .await?, - )) + Ok(FileWriter::Http(HttpFileWriter::new( + self.endpoint.grid_host().as_str(), + self.endpoint.to_string().as_str(), + volume, + path, + 0, + true, + )?)) } + #[tracing::instrument(level = "debug", skip(self))] async fn read_file(&self, volume: &str, path: &str) -> Result { info!("read_file"); - Ok(FileReader::Http( - HttpFileReader::new(self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), volume, path, 0, 0) - .await?, - )) + Ok(FileReader::Http(HttpFileReader::new( + self.endpoint.grid_host().as_str(), + self.endpoint.to_string().as_str(), + volume, + path, + 0, + 0, + )?)) } + #[tracing::instrument(level = "debug", skip(self))] async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result { - Ok(FileReader::Http( - HttpFileReader::new( - self.endpoint.grid_host().as_str(), - self.endpoint.to_string().as_str(), - volume, - path, - offset, - length, - ) - .await?, - )) + Ok(FileReader::Http(HttpFileReader::new( + self.endpoint.grid_host().as_str(), + self.endpoint.to_string().as_str(), + volume, + path, + offset, + length, + )?)) } async fn list_dir(&self, _origvolume: &str, volume: &str, _dir_path: &str, _count: i32) -> Result> { diff --git a/ecstore/src/set_disk.rs b/ecstore/src/set_disk.rs index 0855fa94..d1ef1de4 100644 --- a/ecstore/src/set_disk.rs +++ b/ecstore/src/set_disk.rs @@ -1,6 +1,6 @@ use std::{ collections::{HashMap, HashSet}, - io::Write, + io::{Cursor, Write}, path::Path, sync::Arc, time::Duration, @@ -14,10 +14,10 @@ use crate::{ endpoint::Endpoint, error::{is_all_not_found, DiskError}, format::FormatV3, - new_disk, BufferReader, BufferWriter, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskOption, - DiskStore, FileInfoVersions, FileReader, FileWriter, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, - ReadMultipleReq, ReadMultipleResp, ReadOptions, UpdateMetadataOpts, RUSTFS_META_BUCKET, RUSTFS_META_MULTIPART_BUCKET, - RUSTFS_META_TMP_BUCKET, + io::{FileReader, FileWriter}, + new_disk, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskOption, DiskStore, FileInfoVersions, + MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, ReadMultipleReq, ReadMultipleResp, ReadOptions, + UpdateMetadataOpts, RUSTFS_META_BUCKET, RUSTFS_META_MULTIPART_BUCKET, RUSTFS_META_TMP_BUCKET, }, erasure::Erasure, error::{Error, Result}, @@ -1361,7 +1361,7 @@ impl SetDisks { for (i, opdisk) in disks.iter().enumerate() { if let Some(disk) = opdisk { if disk.is_online().await && disk.get_disk_location().set_idx.is_some() { - info!("Disk {:?} is online", disk); + info!("Disk {:?} is online", disk.to_string()); continue; } @@ -2452,7 +2452,7 @@ impl SetDisks { if let Some(disk) = disk { let filewriter = { if is_inline_buffer { - FileWriter::Buffer(BufferWriter::new(Vec::new())) + FileWriter::Buffer(Cursor::new(Vec::new())) } else { let disk = disk.clone(); let part_path = format!("{}/{}/part.{}", tmp_id, dst_data_dir, part.number); @@ -2501,7 +2501,7 @@ impl SetDisks { if let Some(ref writer) = writers[index] { if let Some(w) = writer.as_any().downcast_ref::() { if let FileWriter::Buffer(buffer_writer) = w.writer() { - parts_metadata[index].data = Some(buffer_writer.as_ref().to_vec()); + parts_metadata[index].data = Some(buffer_writer.clone().into_inner()); } } } @@ -3744,7 +3744,7 @@ impl ObjectIO for SetDisks { if let Some(disk) = disk_op { let filewriter = { if is_inline_buffer { - FileWriter::Buffer(BufferWriter::new(Vec::new())) + FileWriter::Buffer(Cursor::new(Vec::new())) } else { let disk = disk.clone(); @@ -3760,6 +3760,8 @@ impl ObjectIO for SetDisks { } } + warn!("put_object data.content_length {}", data.content_length); + // TODO: etag from header let mut etag_stream = EtagReader::new(&mut data.stream, None, None); @@ -3789,7 +3791,7 @@ impl ObjectIO for SetDisks { if let Some(ref writer) = writers[i] { if let Some(w) = writer.as_any().downcast_ref::() { if let FileWriter::Buffer(buffer_writer) = w.writer() { - fi.data = Some(buffer_writer.as_ref().to_vec()); + fi.data = Some(buffer_writer.clone().into_inner()); } } } @@ -4294,6 +4296,7 @@ impl StorageAPI for SetDisks { unimplemented!() } + #[tracing::instrument(level = "debug", skip(self, data, opts))] async fn put_object_part( &self, bucket: &str, @@ -5245,7 +5248,7 @@ async fn disks_with_all_parts( let checksum_info = meta.erasure.get_checksum_info(meta.parts[0].number); let data_len = data.len(); let verify_err = match bitrot_verify( - FileReader::Buffer(BufferReader::new(data.clone(), 0, data_len)), + FileReader::Buffer(Cursor::new(data.clone())), data_len, meta.erasure.shard_file_size(meta.size), checksum_info.algorithm, diff --git a/ecstore/src/store_init.rs b/ecstore/src/store_init.rs index 3c44cc0f..00f93d11 100644 --- a/ecstore/src/store_init.rs +++ b/ecstore/src/store_init.rs @@ -53,6 +53,7 @@ pub async fn connect_load_init_formats( set_drive_count: usize, deployment_id: Option, ) -> Result { + warn!("connect_load_init_formats first_disk: {}", first_disk); let (formats, errs) = load_format_erasure_all(disks, false).await; debug!("load_format_erasure_all errs {:?}", &errs); @@ -63,12 +64,13 @@ pub async fn connect_load_init_formats( if first_disk && DiskError::should_init_erasure_disks(&errs) { // UnformattedDisk, not format file create + warn!("first_disk && should_init_erasure_disks"); // new format and save let fms = init_format_erasure(disks, set_count, set_drive_count, deployment_id); let errs = save_format_file_all(disks, &fms).await; - debug!("save_format_file_all errs {:?}", &errs); + warn!("save_format_file_all errs {:?}", &errs); // TODO: check quorum // reduceWriteQuorumErrs(&errs)?; @@ -77,6 +79,12 @@ pub async fn connect_load_init_formats( return Ok(fm); } + warn!( + "first_disk: {}, should_init_erasure_disks: {}", + first_disk, + DiskError::should_init_erasure_disks(&errs) + ); + let unformatted = DiskError::quorum_unformatted_disks(&errs); if unformatted && !first_disk { return Err(Error::new(ErasureError::NotFirstDisk)); diff --git a/ecstore/src/utils/os/linux.rs b/ecstore/src/utils/os/linux.rs index bd8782bc..27b73f86 100644 --- a/ecstore/src/utils/os/linux.rs +++ b/ecstore/src/utils/os/linux.rs @@ -99,7 +99,7 @@ fn get_fs_type(fs_type: FsType) -> &'static str { match fs_type { statfs::TMPFS_MAGIC => "TMPFS", statfs::MSDOS_SUPER_MAGIC => "MSDOS", - statfs::XFS_SUPER_MAGIC => "XFS", + // statfs::XFS_SUPER_MAGIC => "XFS", statfs::NFS_SUPER_MAGIC => "NFS", statfs::EXT4_SUPER_MAGIC => "EXT4", statfs::ECRYPTFS_SUPER_MAGIC => "ecryptfs", diff --git a/rustfs/Cargo.toml b/rustfs/Cargo.toml index e27c5957..404257dc 100644 --- a/rustfs/Cargo.toml +++ b/rustfs/Cargo.toml @@ -61,7 +61,6 @@ tracing-subscriber.workspace = true transform-stream.workspace = true uuid = "1.15.1" url.workspace = true -admin = { path = "../api/admin" } axum.workspace = true matchit = "0.8.6" shadow-rs.workspace = true diff --git a/rustfs/src/admin/rpc.rs b/rustfs/src/admin/rpc.rs index fc7b5652..9d2fda5b 100644 --- a/rustfs/src/admin/rpc.rs +++ b/rustfs/src/admin/rpc.rs @@ -3,9 +3,10 @@ use super::router::Operation; use super::router::S3Router; use crate::storage::ecfs::bytes_stream; use common::error::Result; +use ecstore::disk::io::FileReader; use ecstore::disk::DiskAPI; -use ecstore::disk::FileReader; use ecstore::store::find_local_disk; +use futures::TryStreamExt; use http::StatusCode; use hyper::Method; use matchit::Params; @@ -17,6 +18,7 @@ use s3s::S3Response; use s3s::S3Result; use serde_urlencoded::from_bytes; use tokio_util::io::ReaderStream; +use tokio_util::io::StreamReader; use tracing::warn; pub const RPC_PREFIX: &str = "/rustfs/rpc"; @@ -28,6 +30,12 @@ pub fn regist_rpc_route(r: &mut S3Router) -> Result<()> { AdminOperation(&ReadFile {}), )?; + r.insert( + Method::PUT, + format!("{}{}", RPC_PREFIX, "/put_file_stream").as_str(), + AdminOperation(&PutFile {}), + )?; + Ok(()) } @@ -49,7 +57,7 @@ impl Operation for ReadFile { let query = { if let Some(query) = req.uri.query() { let input: ReadFileQuery = - from_bytes(query.as_bytes()).map_err(|_e| s3_error!(InvalidArgument, "get query failed1"))?; + from_bytes(query.as_bytes()).map_err(|e| s3_error!(InvalidArgument, "get query failed1 {:?}", e))?; input } else { ReadFileQuery::default() @@ -95,3 +103,56 @@ impl Operation for ReadFile { // Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::empty()))) } } + +// /rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}" +#[derive(Debug, Default, serde::Deserialize)] +pub struct PutFileQuery { + disk: String, + volume: String, + path: String, + append: bool, + size: usize, +} +pub struct PutFile {} +#[async_trait::async_trait] +impl Operation for PutFile { + async fn call(&self, req: S3Request, _params: Params<'_, '_>) -> S3Result> { + warn!("handle PutFile"); + + let query = { + if let Some(query) = req.uri.query() { + let input: PutFileQuery = + from_bytes(query.as_bytes()).map_err(|e| s3_error!(InvalidArgument, "get query failed1 {:?}", e))?; + input + } else { + PutFileQuery::default() + } + }; + + let Some(disk) = find_local_disk(&query.disk).await else { + return Err(s3_error!(InvalidArgument, "disk not found")); + }; + + let mut file = if query.append { + disk.append_file(&query.volume, &query.path) + .await + .map_err(|e| s3_error!(InternalError, "append file err {}", e))? + } else { + disk.create_file("", &query.volume, &query.path, query.size) + .await + .map_err(|e| s3_error!(InternalError, "read file err {}", e))? + }; + + let mut body = StreamReader::new( + req.input + .into_stream() + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)), + ); + + tokio::io::copy(&mut body, &mut file) + .await + .map_err(|e| s3_error!(InternalError, "copy err {}", e))?; + + Ok(S3Response::new((StatusCode::OK, Body::empty()))) + } +} diff --git a/rustfs/src/grpc.rs b/rustfs/src/grpc.rs index c25c0519..1aeb64b2 100644 --- a/rustfs/src/grpc.rs +++ b/rustfs/src/grpc.rs @@ -1,9 +1,4 @@ -use std::{ - collections::HashMap, - error::Error, - io::{Cursor, ErrorKind}, - pin::Pin, -}; +use std::{collections::HashMap, io::Cursor, pin::Pin}; use ecstore::{ admin_server_info::get_local_server_property, @@ -11,7 +6,6 @@ use ecstore::{ disk::{ DeleteOptions, DiskAPI, DiskInfoOptions, DiskStore, FileInfoVersions, ReadMultipleReq, ReadOptions, UpdateMetadataOpts, }, - erasure::Writer, error::Error as EcsError, heal::{ data_usage_cache::DataUsageCache, @@ -50,25 +44,25 @@ use tracing::{debug, error, info}; type ResponseStream = Pin> + Send>>; -fn match_for_io_error(err_status: &Status) -> Option<&std::io::Error> { - let mut err: &(dyn Error + 'static) = err_status; +// fn match_for_io_error(err_status: &Status) -> Option<&std::io::Error> { +// let mut err: &(dyn Error + 'static) = err_status; - loop { - if let Some(io_err) = err.downcast_ref::() { - return Some(io_err); - } +// loop { +// if let Some(io_err) = err.downcast_ref::() { +// return Some(io_err); +// } - // h2::Error do not expose std::io::Error with `source()` - // https://github.com/hyperium/h2/pull/462 - if let Some(h2_err) = err.downcast_ref::() { - if let Some(io_err) = h2_err.get_io() { - return Some(io_err); - } - } +// // h2::Error do not expose std::io::Error with `source()` +// // https://github.com/hyperium/h2/pull/462 +// if let Some(h2_err) = err.downcast_ref::() { +// if let Some(io_err) = h2_err.get_io() { +// return Some(io_err); +// } +// } - err = err.source()?; - } -} +// err = err.source()?; +// } +// } #[derive(Debug)] pub struct NodeService { @@ -558,138 +552,144 @@ impl Node for NodeService { } } - async fn write(&self, request: Request) -> Result, Status> { - let request = request.into_inner(); - if let Some(disk) = self.find_disk(&request.disk).await { - let file_writer = if request.is_append { - disk.append_file(&request.volume, &request.path).await - } else { - disk.create_file("", &request.volume, &request.path, 0).await - }; + async fn write(&self, _request: Request) -> Result, Status> { + unimplemented!("write"); + // let request = request.into_inner(); + // if let Some(disk) = self.find_disk(&request.disk).await { + // let file_writer = if request.is_append { + // disk.append_file(&request.volume, &request.path).await + // } else { + // disk.create_file("", &request.volume, &request.path, 0).await + // }; - match file_writer { - Ok(mut file_writer) => match file_writer.write(&request.data).await { - Ok(_) => Ok(tonic::Response::new(WriteResponse { - success: true, - error: None, - })), - Err(err) => Ok(tonic::Response::new(WriteResponse { - success: false, - error: Some(err_to_proto_err(&err, &format!("write failed: {}", err))), - })), - }, - Err(err) => Ok(tonic::Response::new(WriteResponse { - success: false, - error: Some(err_to_proto_err(&err, &format!("get writer failed: {}", err))), - })), - } - } else { - Ok(tonic::Response::new(WriteResponse { - success: false, - error: Some(err_to_proto_err( - &EcsError::new(StorageError::InvalidArgument(Default::default(), Default::default(), Default::default())), - "can not find disk", - )), - })) - } + // match file_writer { + // Ok(mut file_writer) => match file_writer.write(&request.data).await { + // Ok(_) => Ok(tonic::Response::new(WriteResponse { + // success: true, + // error: None, + // })), + // Err(err) => Ok(tonic::Response::new(WriteResponse { + // success: false, + // error: Some(err_to_proto_err(&err, &format!("write failed: {}", err))), + // })), + // }, + // Err(err) => Ok(tonic::Response::new(WriteResponse { + // success: false, + // error: Some(err_to_proto_err(&err, &format!("get writer failed: {}", err))), + // })), + // } + // } else { + // Ok(tonic::Response::new(WriteResponse { + // success: false, + // error: Some(err_to_proto_err( + // &EcsError::new(StorageError::InvalidArgument(Default::default(), Default::default(), Default::default())), + // "can not find disk", + // )), + // })) + // } } type WriteStreamStream = ResponseStream; - async fn write_stream(&self, request: Request>) -> Result, Status> { + async fn write_stream( + &self, + _request: Request>, + ) -> Result, Status> { info!("write_stream"); - let mut in_stream = request.into_inner(); - let (tx, rx) = mpsc::channel(128); + unimplemented!("write_stream"); - tokio::spawn(async move { - let mut file_ref = None; - while let Some(result) = in_stream.next().await { - match result { - // Ok(v) => tx - // .send(Ok(EchoResponse { message: v.message })) - // .await - // .expect("working rx"), - Ok(v) => { - match file_ref.as_ref() { - Some(_) => (), - None => { - if let Some(disk) = find_local_disk(&v.disk).await { - let file_writer = if v.is_append { - disk.append_file(&v.volume, &v.path).await - } else { - disk.create_file("", &v.volume, &v.path, 0).await - }; + // let mut in_stream = request.into_inner(); + // let (tx, rx) = mpsc::channel(128); - match file_writer { - Ok(file_writer) => file_ref = Some(file_writer), - Err(err) => { - tx.send(Ok(WriteResponse { - success: false, - error: Some(err_to_proto_err( - &err, - &format!("get get file writer failed: {}", err), - )), - })) - .await - .expect("working rx"); - break; - } - } - } else { - tx.send(Ok(WriteResponse { - success: false, - error: Some(err_to_proto_err( - &EcsError::new(StorageError::InvalidArgument( - Default::default(), - Default::default(), - Default::default(), - )), - "can not find disk", - )), - })) - .await - .expect("working rx"); - break; - } - } - }; + // tokio::spawn(async move { + // let mut file_ref = None; + // while let Some(result) = in_stream.next().await { + // match result { + // // Ok(v) => tx + // // .send(Ok(EchoResponse { message: v.message })) + // // .await + // // .expect("working rx"), + // Ok(v) => { + // match file_ref.as_ref() { + // Some(_) => (), + // None => { + // if let Some(disk) = find_local_disk(&v.disk).await { + // let file_writer = if v.is_append { + // disk.append_file(&v.volume, &v.path).await + // } else { + // disk.create_file("", &v.volume, &v.path, 0).await + // }; - match file_ref.as_mut().unwrap().write(&v.data).await { - Ok(_) => tx.send(Ok(WriteResponse { - success: true, - error: None, - })), - Err(err) => tx.send(Ok(WriteResponse { - success: false, - error: Some(err_to_proto_err(&err, &format!("write failed: {}", err))), - })), - } - .await - .unwrap(); - } - Err(err) => { - if let Some(io_err) = match_for_io_error(&err) { - if io_err.kind() == ErrorKind::BrokenPipe { - // here you can handle special case when client - // disconnected in unexpected way - eprintln!("\tclient disconnected: broken pipe"); - break; - } - } + // match file_writer { + // Ok(file_writer) => file_ref = Some(file_writer), + // Err(err) => { + // tx.send(Ok(WriteResponse { + // success: false, + // error: Some(err_to_proto_err( + // &err, + // &format!("get get file writer failed: {}", err), + // )), + // })) + // .await + // .expect("working rx"); + // break; + // } + // } + // } else { + // tx.send(Ok(WriteResponse { + // success: false, + // error: Some(err_to_proto_err( + // &EcsError::new(StorageError::InvalidArgument( + // Default::default(), + // Default::default(), + // Default::default(), + // )), + // "can not find disk", + // )), + // })) + // .await + // .expect("working rx"); + // break; + // } + // } + // }; - match tx.send(Err(err)).await { - Ok(_) => (), - Err(_err) => break, // response was dropped - } - } - } - } - println!("\tstream ended"); - }); + // match file_ref.as_mut().unwrap().write(&v.data).await { + // Ok(_) => tx.send(Ok(WriteResponse { + // success: true, + // error: None, + // })), + // Err(err) => tx.send(Ok(WriteResponse { + // success: false, + // error: Some(err_to_proto_err(&err, &format!("write failed: {}", err))), + // })), + // } + // .await + // .unwrap(); + // } + // Err(err) => { + // if let Some(io_err) = match_for_io_error(&err) { + // if io_err.kind() == ErrorKind::BrokenPipe { + // // here you can handle special case when client + // // disconnected in unexpected way + // eprintln!("\tclient disconnected: broken pipe"); + // break; + // } + // } - let out_stream = ReceiverStream::new(rx); + // match tx.send(Err(err)).await { + // Ok(_) => (), + // Err(_err) => break, // response was dropped + // } + // } + // } + // } + // println!("\tstream ended"); + // }); - Ok(tonic::Response::new(Box::pin(out_stream))) + // let out_stream = ReceiverStream::new(rx); + + // Ok(tonic::Response::new(Box::pin(out_stream))) } type ReadAtStream = ResponseStream; diff --git a/rustfs/src/main.rs b/rustfs/src/main.rs index 002e4f57..d3e5b718 100644 --- a/rustfs/src/main.rs +++ b/rustfs/src/main.rs @@ -152,8 +152,8 @@ async fn run(opt: config::Opt) -> Result<()> { for (i, eps) in endpoint_pools.as_ref().iter().enumerate() { info!( - "created endpoints {}, set_count:{}, drives_per_set: {}, cmd: {:?}", - i, eps.set_count, eps.drives_per_set, eps.cmd_line + "created endpoints {}, set_count:{}, drives_per_set: {}, cmd: {:?}, \n{:?}", + i, eps.set_count, eps.drives_per_set, eps.cmd_line, eps ); } From 17d7c869ac5bfb07eb5c04e749cd451fb7b70f87 Mon Sep 17 00:00:00 2001 From: weisd Date: Wed, 12 Mar 2025 10:44:38 +0800 Subject: [PATCH 3/5] r/w io as async --- ecstore/src/bitrot.rs | 77 +++++++++++---- ecstore/src/disk/io.rs | 196 +++++++++---------------------------- ecstore/src/disk/local.rs | 18 ++-- ecstore/src/disk/mod.rs | 9 +- ecstore/src/disk/remote.rs | 29 +++--- ecstore/src/erasure.rs | 4 +- ecstore/src/set_disk.rs | 93 +++++++++++------- ecstore/src/store_api.rs | 2 +- rustfs/src/admin/rpc.rs | 40 +------- 9 files changed, 198 insertions(+), 270 deletions(-) diff --git a/ecstore/src/bitrot.rs b/ecstore/src/bitrot.rs index 21ceb5b6..b69ef859 100644 --- a/ecstore/src/bitrot.rs +++ b/ecstore/src/bitrot.rs @@ -1,9 +1,5 @@ use crate::{ - disk::{ - error::DiskError, - io::{FileReader, FileWriter}, - Disk, DiskAPI, - }, + disk::{error::DiskError, Disk, DiskAPI, FileReader, FileWriter}, erasure::{ReadAt, Writer}, error::{Error, Result}, store_api::BitrotAlgorithm, @@ -488,23 +484,45 @@ pub async fn bitrot_verify( // } pub struct BitrotFileWriter { - pub inner: FileWriter, + inner: Option, hasher: Hasher, _shard_size: usize, + inline: bool, + inline_data: Vec, } impl BitrotFileWriter { - pub fn new(inner: FileWriter, algo: BitrotAlgorithm, _shard_size: usize) -> Self { + pub async fn new( + disk: Arc, + volume: &str, + path: &str, + inline: bool, + algo: BitrotAlgorithm, + _shard_size: usize, + ) -> Result { + let inner = if !inline { + Some(disk.create_file("", volume, path, 0).await?) + } else { + None + }; + let hasher = algo.new_hasher(); - Self { + + Ok(Self { inner, + inline, + inline_data: Vec::new(), hasher, _shard_size, - } + }) } - pub fn writer(&self) -> &FileWriter { - &self.inner + // pub fn writer(&self) -> &FileWriter { + // &self.inner + // } + + pub fn inline_data(&self) -> &[u8] { + &self.inline_data } } @@ -521,18 +539,43 @@ impl Writer for BitrotFileWriter { self.hasher.reset(); self.hasher.update(buf); let hash_bytes = self.hasher.clone().finalize(); - let _ = self.inner.write_all(&hash_bytes).await?; - let _ = self.inner.write_all(buf).await?; + + if let Some(f) = self.inner.as_mut() { + f.write_all(&hash_bytes).await?; + f.write_all(buf).await?; + } else { + self.inline_data.extend_from_slice(&hash_bytes); + self.inline_data.extend_from_slice(buf); + } + + Ok(()) + } + async fn close(&mut self) -> Result<()> { + if self.inline { + return Ok(()); + } + + if let Some(f) = self.inner.as_mut() { + f.shutdown().await?; + } Ok(()) } } -pub fn new_bitrot_filewriter(inner: FileWriter, algo: BitrotAlgorithm, shard_size: usize) -> BitrotWriter { - Box::new(BitrotFileWriter::new(inner, algo, shard_size)) +pub async fn new_bitrot_filewriter( + disk: Arc, + volume: &str, + path: &str, + inline: bool, + algo: BitrotAlgorithm, + shard_size: usize, +) -> Result { + let w = BitrotFileWriter::new(disk, volume, path, inline, algo, shard_size).await?; + + Ok(Box::new(w)) } -#[derive(Debug)] struct BitrotFileReader { disk: Arc, data: Option>, @@ -599,7 +642,7 @@ impl ReadAt for BitrotFileReader { let stream_offset = (offset / self.shard_size) * self.hasher.size() + offset; if let Some(data) = self.data.clone() { - self.reader = Some(FileReader::Buffer(Cursor::new(data))); + self.reader = Some(Box::new(Cursor::new(data))); } else { self.reader = Some( self.disk diff --git a/ecstore/src/disk/io.rs b/ecstore/src/disk/io.rs index 981c6ddd..c0360050 100644 --- a/ecstore/src/disk/io.rs +++ b/ecstore/src/disk/io.rs @@ -1,169 +1,27 @@ use crate::error::Result; use futures::TryStreamExt; -use std::io::Cursor; use std::pin::Pin; use std::task::Poll; -use tokio::fs::File; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::AsyncWrite; +use tokio::sync::oneshot; use tokio_util::io::ReaderStream; use tokio_util::io::StreamReader; use tracing::error; use tracing::warn; -#[derive(Debug)] -pub enum FileReader { - Local(File), - // Remote(RemoteFileReader), - Buffer(Cursor>), - Http(HttpFileReader), -} - -impl AsyncRead for FileReader { - #[tracing::instrument(level = "debug", skip(self, buf))] - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> std::task::Poll> { - match &mut *self { - Self::Local(reader) => Pin::new(reader).poll_read(cx, buf), - Self::Buffer(reader) => Pin::new(reader).poll_read(cx, buf), - Self::Http(reader) => Pin::new(reader).poll_read(cx, buf), - } - } -} - -#[derive(Debug)] -pub struct HttpFileReader { - // client: reqwest::Client, - // url: String, - // disk: String, - // volume: String, - // path: String, - // offset: usize, - // length: usize, - inner: tokio::io::DuplexStream, - // buf: Vec, - // pos: usize, -} - -impl HttpFileReader { - pub fn new(url: &str, disk: &str, volume: &str, path: &str, offset: usize, length: usize) -> Result { - warn!("http read start {}", path); - let url = url.to_owned(); - let disk = disk.to_owned(); - let volume = volume.to_owned(); - let path = path.to_owned(); - - // let (reader, mut writer) = tokio::io::simplex(1024); - let (reader, mut writer) = tokio::io::duplex(1024 * 1024 * 10); - - tokio::spawn(async move { - let client = reqwest::Client::new(); - let resp = match client - .get(format!( - "{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}", - url, - urlencoding::encode(&disk), - urlencoding::encode(&volume), - urlencoding::encode(&path), - offset, - length - )) - .send() - .await - .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) - { - Ok(resp) => resp, - Err(err) => { - warn!("http file reader error: {}", err); - return; - } - }; - - let mut rd = StreamReader::new( - resp.bytes_stream() - .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)), - ); - - if let Err(err) = tokio::io::copy(&mut rd, &mut writer).await { - error!("http file reader copy error: {}", err); - }; - }); - Ok(Self { - // client: reqwest::Client::new(), - // url: url.to_string(), - // disk: disk.to_string(), - // volume: volume.to_string(), - // path: path.to_string(), - // offset, - // length, - inner: reader, - // buf: Vec::new(), - // pos: 0, - }) - } -} - -impl AsyncRead for HttpFileReader { - #[tracing::instrument(level = "debug", skip(self, buf))] - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> std::task::Poll> { - Pin::new(&mut self.inner).poll_read(cx, buf) - } -} - -#[derive(Debug)] -pub enum FileWriter { - Local(File), - Http(HttpFileWriter), - Buffer(Cursor>), -} - -impl AsyncWrite for FileWriter { - #[tracing::instrument(level = "debug", skip(self, buf))] - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &[u8], - ) -> Poll> { - match &mut *self { - Self::Local(writer) => Pin::new(writer).poll_write(cx, buf), - Self::Buffer(writer) => Pin::new(writer).poll_write(cx, buf), - Self::Http(writer) => Pin::new(writer).poll_write(cx, buf), - } - } - - #[tracing::instrument(level = "debug", skip(self))] - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { - match &mut *self { - Self::Local(writer) => Pin::new(writer).poll_flush(cx), - Self::Buffer(writer) => Pin::new(writer).poll_flush(cx), - Self::Http(writer) => Pin::new(writer).poll_flush(cx), - } - } - - #[tracing::instrument(level = "debug", skip(self))] - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { - match &mut *self { - Self::Local(writer) => Pin::new(writer).poll_shutdown(cx), - Self::Buffer(writer) => Pin::new(writer).poll_shutdown(cx), - Self::Http(writer) => Pin::new(writer).poll_shutdown(cx), - } - } -} +use super::FileReader; #[derive(Debug)] pub struct HttpFileWriter { wd: tokio::io::WriteHalf, + err_rx: oneshot::Receiver, } impl HttpFileWriter { pub fn new(url: &str, disk: &str, volume: &str, path: &str, size: usize, append: bool) -> Result { - let (rd, wd) = tokio::io::simplex(1024 * 1024 * 10); + let (rd, wd) = tokio::io::simplex(4096); + + let (err_tx, err_rx) = oneshot::channel::(); let body = reqwest::Body::wrap_stream(ReaderStream::new(rd)); @@ -187,18 +45,22 @@ impl HttpFileWriter { .body(body) .send() .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) { error!("HttpFileWriter put file err: {:?}", err); + + if let Err(er) = err_tx.send(err) { + error!("HttpFileWriter tx.send err: {:?}", er); + } // return; } - // TODO: handle response - - // debug!("http write done {}", path); + // error!("http write done {}", path); }); Ok(Self { wd, + err_rx, // client: reqwest::Client::new(), // url: url.to_string(), // disk: disk.to_string(), @@ -214,6 +76,10 @@ impl AsyncWrite for HttpFileWriter { cx: &mut std::task::Context<'_>, buf: &[u8], ) -> Poll> { + if let Ok(err) = self.as_mut().err_rx.try_recv() { + return Poll::Ready(Err(err)); + } + Pin::new(&mut self.wd).poll_write(cx, buf) } @@ -227,3 +93,29 @@ impl AsyncWrite for HttpFileWriter { Pin::new(&mut self.wd).poll_shutdown(cx) } } + +pub async fn new_http_reader( + url: &str, + disk: &str, + volume: &str, + path: &str, + offset: usize, + length: usize, +) -> Result { + let resp = reqwest::Client::new() + .get(format!( + "{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}", + url, + urlencoding::encode(disk), + urlencoding::encode(volume), + urlencoding::encode(path), + offset, + length + )) + .send() + .await?; + + let inner = StreamReader::new(resp.bytes_stream().map_err(std::io::Error::other)); + + Ok(Box::new(inner)) +} diff --git a/ecstore/src/disk/local.rs b/ecstore/src/disk/local.rs index 013be96a..4690450c 100644 --- a/ecstore/src/disk/local.rs +++ b/ecstore/src/disk/local.rs @@ -745,7 +745,7 @@ impl LocalDisk { let meta = file.metadata().await?; - bitrot_verify(FileReader::Local(file), meta.size() as usize, part_size, algo, sum.to_vec(), shard_size).await + bitrot_verify(Box::new(file), meta.size() as usize, part_size, algo, sum.to_vec(), shard_size).await } async fn scan_dir( @@ -1314,7 +1314,7 @@ impl DiskAPI for LocalDisk { let src_file_path = src_volume_dir.join(Path::new(src_path)); let dst_file_path = dst_volume_dir.join(Path::new(dst_path)); - warn!("rename_part src_file_path:{:?}, dst_file_path:{:?}", &src_file_path, &dst_file_path); + // warn!("rename_part src_file_path:{:?}, dst_file_path:{:?}", &src_file_path, &dst_file_path); check_path_length(src_file_path.to_string_lossy().as_ref())?; check_path_length(dst_file_path.to_string_lossy().as_ref())?; @@ -1471,7 +1471,7 @@ impl DiskAPI for LocalDisk { #[tracing::instrument(level = "debug", skip(self))] async fn create_file(&self, origvolume: &str, volume: &str, path: &str, _file_size: usize) -> Result { - warn!("disk create_file: origvolume: {}, volume: {}, path: {}", origvolume, volume, path); + // warn!("disk create_file: origvolume: {}, volume: {}, path: {}", origvolume, volume, path); if !origvolume.is_empty() { let origvolume_dir = self.get_bucket_path(origvolume)?; @@ -1495,7 +1495,7 @@ impl DiskAPI for LocalDisk { .await .map_err(os_err_to_file_err)?; - Ok(FileWriter::Local(f)) + Ok(Box::new(f)) // Ok(()) } @@ -1517,7 +1517,7 @@ impl DiskAPI for LocalDisk { let f = self.open_file(file_path, O_CREATE | O_APPEND | O_WRONLY, volume_dir).await?; - Ok(FileWriter::Local(f)) + Ok(Box::new(f)) } // TODO: io verifier @@ -1552,7 +1552,7 @@ impl DiskAPI for LocalDisk { } })?; - Ok(FileReader::Local(f)) + Ok(Box::new(f)) } #[tracing::instrument(level = "debug", skip(self))] @@ -1603,7 +1603,7 @@ impl DiskAPI for LocalDisk { f.seek(SeekFrom::Start(offset as u64)).await?; - Ok(FileReader::Local(f)) + Ok(Box::new(f)) } #[tracing::instrument(level = "debug", skip(self))] async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result> { @@ -2291,6 +2291,9 @@ impl DiskAPI for LocalDisk { self.scanning.fetch_add(1, Ordering::SeqCst); defer!(|| { self.scanning.fetch_sub(1, Ordering::SeqCst) }); + // must befor metadata_sys + let Some(store) = new_object_layer_fn() else { return Err(Error::msg("errServerNotInitialized")) }; + // Check if the current bucket has replication configuration if let Ok((rcfg, _)) = metadata_sys::get_replication_config(&cache.info.name).await { if has_active_rules(&rcfg, "", true) { @@ -2298,7 +2301,6 @@ impl DiskAPI for LocalDisk { } } - let Some(store) = new_object_layer_fn() else { return Err(Error::msg("errServerNotInitialized")) }; let loc = self.get_disk_location(); let disks = store.get_disks(loc.pool_idx.unwrap(), loc.disk_idx.unwrap()).await?; let disk = Arc::new(LocalDisk::new(&self.endpoint(), false).await?); diff --git a/ecstore/src/disk/mod.rs b/ecstore/src/disk/mod.rs index dcf719f2..a708977e 100644 --- a/ecstore/src/disk/mod.rs +++ b/ecstore/src/disk/mod.rs @@ -29,14 +29,16 @@ use crate::{ }; use endpoint::Endpoint; use error::DiskError; -use io::{FileReader, FileWriter}; use local::LocalDisk; use madmin::info_commands::DiskMetrics; use remote::RemoteDisk; use serde::{Deserialize, Serialize}; use std::{cmp::Ordering, fmt::Debug, path::PathBuf, sync::Arc}; use time::OffsetDateTime; -use tokio::{io::AsyncWrite, sync::mpsc::Sender}; +use tokio::{ + io::{AsyncRead, AsyncWrite}, + sync::mpsc::Sender, +}; use tracing::info; use tracing::warn; use uuid::Uuid; @@ -372,6 +374,9 @@ pub async fn new_disk(ep: &endpoint::Endpoint, opt: &DiskOption) -> Result; +pub type FileWriter = Box; + #[async_trait::async_trait] pub trait DiskAPI: Debug + Send + Sync + 'static { fn to_string(&self) -> String; diff --git a/ecstore/src/disk/remote.rs b/ecstore/src/disk/remote.rs index 8528ac1e..17c4c992 100644 --- a/ecstore/src/disk/remote.rs +++ b/ecstore/src/disk/remote.rs @@ -22,9 +22,9 @@ use tracing::info; use uuid::Uuid; use super::{ - endpoint::Endpoint, io::HttpFileReader, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, - DiskOption, FileInfoVersions, FileReader, FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, - UpdateMetadataOpts, VolumeInfo, WalkDirOptions, + endpoint::Endpoint, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, + FileInfoVersions, FileReader, FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, + VolumeInfo, WalkDirOptions, }; use crate::{ disk::error::DiskError, @@ -36,7 +36,10 @@ use crate::{ }, store_api::{FileInfo, RawFileInfo}, }; -use crate::{disk::io::HttpFileWriter, utils::proto_err_to_err}; +use crate::{ + disk::io::{new_http_reader, HttpFileWriter}, + utils::proto_err_to_err, +}; use crate::{disk::MetaCacheEntry, metacache::writer::MetacacheWriter}; use protos::proto_gen::node_service::RenamePartRequst; @@ -316,7 +319,7 @@ impl DiskAPI for RemoteDisk { #[tracing::instrument(level = "debug", skip(self))] async fn create_file(&self, _origvolume: &str, volume: &str, path: &str, file_size: usize) -> Result { info!("create_file"); - Ok(FileWriter::Http(HttpFileWriter::new( + Ok(Box::new(HttpFileWriter::new( self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), volume, @@ -329,7 +332,7 @@ impl DiskAPI for RemoteDisk { #[tracing::instrument(level = "debug", skip(self))] async fn append_file(&self, volume: &str, path: &str) -> Result { info!("append_file"); - Ok(FileWriter::Http(HttpFileWriter::new( + Ok(Box::new(HttpFileWriter::new( self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), volume, @@ -342,26 +345,20 @@ impl DiskAPI for RemoteDisk { #[tracing::instrument(level = "debug", skip(self))] async fn read_file(&self, volume: &str, path: &str) -> Result { info!("read_file"); - Ok(FileReader::Http(HttpFileReader::new( - self.endpoint.grid_host().as_str(), - self.endpoint.to_string().as_str(), - volume, - path, - 0, - 0, - )?)) + Ok(new_http_reader(self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), volume, path, 0, 0).await?) } #[tracing::instrument(level = "debug", skip(self))] async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result { - Ok(FileReader::Http(HttpFileReader::new( + Ok(new_http_reader( self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), volume, path, offset, length, - )?)) + ) + .await?) } async fn list_dir(&self, _origvolume: &str, volume: &str, _dir_path: &str, _count: i32) -> Result> { diff --git a/ecstore/src/erasure.rs b/ecstore/src/erasure.rs index f068b109..0056d4bf 100644 --- a/ecstore/src/erasure.rs +++ b/ecstore/src/erasure.rs @@ -6,7 +6,6 @@ use futures::future::join_all; use futures::{pin_mut, Stream, StreamExt}; use reed_solomon_erasure::galois_8::ReedSolomon; use std::any::Any; -use std::fmt::Debug; use std::io::ErrorKind; use tokio::io::DuplexStream; use tokio::io::{AsyncReadExt, AsyncWriteExt}; @@ -500,11 +499,10 @@ pub trait Writer { } #[async_trait::async_trait] -pub trait ReadAt: Debug { +pub trait ReadAt { async fn read_at(&mut self, offset: usize, length: usize) -> Result<(Vec, usize)>; } -#[derive(Debug)] pub struct ShardReader { readers: Vec>, // 磁盘 data_block_count: usize, // 总的分片数量 diff --git a/ecstore/src/set_disk.rs b/ecstore/src/set_disk.rs index d1ef1de4..e91907bc 100644 --- a/ecstore/src/set_disk.rs +++ b/ecstore/src/set_disk.rs @@ -14,7 +14,6 @@ use crate::{ endpoint::Endpoint, error::{is_all_not_found, DiskError}, format::FormatV3, - io::{FileReader, FileWriter}, new_disk, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskOption, DiskStore, FileInfoVersions, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, ReadMultipleReq, ReadMultipleResp, ReadOptions, UpdateMetadataOpts, RUSTFS_META_BUCKET, RUSTFS_META_MULTIPART_BUCKET, RUSTFS_META_TMP_BUCKET, @@ -636,7 +635,7 @@ impl SetDisks { } fn get_upload_id_dir(bucket: &str, object: &str, upload_id: &str) -> String { - warn!("get_upload_id_dir upload_id {:?}", upload_id); + // warn!("get_upload_id_dir upload_id {:?}", upload_id); let upload_uuid = base64_decode(upload_id.as_bytes()) .and_then(|v| { @@ -2450,21 +2449,25 @@ impl SetDisks { for disk in out_dated_disks.iter() { if let Some(disk) = disk { - let filewriter = { - if is_inline_buffer { - FileWriter::Buffer(Cursor::new(Vec::new())) - } else { - let disk = disk.clone(); - let part_path = format!("{}/{}/part.{}", tmp_id, dst_data_dir, part.number); - disk.create_file("", RUSTFS_META_TMP_BUCKET, &part_path, 0).await? - } - }; + // let filewriter = { + // if is_inline_buffer { + // Box::new(Cursor::new(Vec::new())) + // } else { + // let disk = disk.clone(); + // let part_path = format!("{}/{}/part.{}", tmp_id, dst_data_dir, part.number); + // disk.create_file("", RUSTFS_META_TMP_BUCKET, &part_path, 0).await? + // } + // }; let writer = new_bitrot_filewriter( - filewriter, + disk.clone(), + RUSTFS_META_TMP_BUCKET, + format!("{}/{}/part.{}", tmp_id, dst_data_dir, part.number).as_str(), + is_inline_buffer, DEFAULT_BITROT_ALGO, erasure.shard_size(erasure.block_size), - ); + ) + .await?; writers.push(Some(writer)); } else { @@ -2500,9 +2503,7 @@ impl SetDisks { if is_inline_buffer { if let Some(ref writer) = writers[index] { if let Some(w) = writer.as_any().downcast_ref::() { - if let FileWriter::Buffer(buffer_writer) = w.writer() { - parts_metadata[index].data = Some(buffer_writer.clone().into_inner()); - } + parts_metadata[index].data = Some(w.inline_data().to_vec()); } } parts_metadata[index].set_inline_data(); @@ -3742,17 +3743,25 @@ impl ObjectIO for SetDisks { for disk_op in shuffle_disks.iter() { if let Some(disk) = disk_op { - let filewriter = { - if is_inline_buffer { - FileWriter::Buffer(Cursor::new(Vec::new())) - } else { - let disk = disk.clone(); + // let filewriter = { + // if is_inline_buffer { + // Box::new(Cursor::new(Vec::new())) + // } else { + // let disk = disk.clone(); - disk.create_file("", RUSTFS_META_TMP_BUCKET, &tmp_object, 0).await? - } - }; + // disk.create_file("", RUSTFS_META_TMP_BUCKET, &tmp_object, 0).await? + // } + // }; - let writer = new_bitrot_filewriter(filewriter, DEFAULT_BITROT_ALGO, erasure.shard_size(erasure.block_size)); + let writer = new_bitrot_filewriter( + disk.clone(), + RUSTFS_META_TMP_BUCKET, + &tmp_object, + is_inline_buffer, + DEFAULT_BITROT_ALGO, + erasure.shard_size(erasure.block_size), + ) + .await?; writers.push(Some(writer)); } else { @@ -3760,8 +3769,6 @@ impl ObjectIO for SetDisks { } } - warn!("put_object data.content_length {}", data.content_length); - // TODO: etag from header let mut etag_stream = EtagReader::new(&mut data.stream, None, None); @@ -3769,6 +3776,10 @@ impl ObjectIO for SetDisks { .encode(&mut etag_stream, &mut writers, data.content_length, write_quorum) .await?; // TODO: 出错,删除临时目录 + if let Err(err) = close_bitrot_writers(&mut writers).await { + error!("close_bitrot_writers err {:?}", err); + } + let etag = etag_stream.etag(); //TODO: userDefined @@ -3790,9 +3801,7 @@ impl ObjectIO for SetDisks { if is_inline_buffer { if let Some(ref writer) = writers[i] { if let Some(w) = writer.as_any().downcast_ref::() { - if let FileWriter::Buffer(buffer_writer) = w.writer() { - fi.data = Some(buffer_writer.clone().into_inner()); - } + fi.data = Some(w.inline_data().to_vec()); } } } @@ -4089,7 +4098,7 @@ impl StorageAPI for SetDisks { for errs in results.into_iter().flatten() { // TODO: handle err reduceWriteQuorumErrs - for err in errs.iter() { + for err in errs.iter().flatten() { warn!("result err {:?}", err); } } @@ -4327,10 +4336,18 @@ impl StorageAPI for SetDisks { for disk in disks.iter() { if let Some(disk) = disk { // let writer = disk.append_file(RUSTFS_META_TMP_BUCKET, &tmp_part_path).await?; - let filewriter = disk - .create_file("", RUSTFS_META_TMP_BUCKET, &tmp_part_path, data.content_length) - .await?; - let writer = new_bitrot_filewriter(filewriter, DEFAULT_BITROT_ALGO, erasure.shard_size(erasure.block_size)); + // let filewriter = disk + // .create_file("", RUSTFS_META_TMP_BUCKET, &tmp_part_path, data.content_length) + // .await?; + let writer = new_bitrot_filewriter( + disk.clone(), + RUSTFS_META_TMP_BUCKET, + &tmp_part_path, + false, + DEFAULT_BITROT_ALGO, + erasure.shard_size(erasure.block_size), + ) + .await?; writers.push(Some(writer)); } else { writers.push(None); @@ -4345,6 +4362,10 @@ impl StorageAPI for SetDisks { .encode(&mut etag_stream, &mut writers, data.content_length, write_quorum) .await?; + if let Err(err) = close_bitrot_writers(&mut writers).await { + error!("close_bitrot_writers err {:?}", err); + } + let mut etag = etag_stream.etag(); if let Some(ref tag) = opts.preserve_etag { @@ -5248,7 +5269,7 @@ async fn disks_with_all_parts( let checksum_info = meta.erasure.get_checksum_info(meta.parts[0].number); let data_len = data.len(); let verify_err = match bitrot_verify( - FileReader::Buffer(Cursor::new(data.clone())), + Box::new(Cursor::new(data.clone())), data_len, meta.erasure.shard_file_size(meta.size), checksum_info.algorithm, diff --git a/ecstore/src/store_api.rs b/ecstore/src/store_api.rs index c67d1781..091ee1e8 100644 --- a/ecstore/src/store_api.rs +++ b/ecstore/src/store_api.rs @@ -19,7 +19,7 @@ use time::OffsetDateTime; use uuid::Uuid; pub const ERASURE_ALGORITHM: &str = "rs-vandermonde"; -pub const BLOCK_SIZE_V2: usize = 1048576; // 1M +pub const BLOCK_SIZE_V2: usize = 1024 * 1024; // 1M pub const RESERVED_METADATA_PREFIX: &str = "X-Rustfs-Internal-"; pub const RESERVED_METADATA_PREFIX_LOWER: &str = "X-Rustfs-Internal-"; pub const RUSTFS_HEALING: &str = "X-Rustfs-Internal-healing"; diff --git a/rustfs/src/admin/rpc.rs b/rustfs/src/admin/rpc.rs index 9d2fda5b..42bbb0ac 100644 --- a/rustfs/src/admin/rpc.rs +++ b/rustfs/src/admin/rpc.rs @@ -3,7 +3,6 @@ use super::router::Operation; use super::router::S3Router; use crate::storage::ecfs::bytes_stream; use common::error::Result; -use ecstore::disk::io::FileReader; use ecstore::disk::DiskAPI; use ecstore::store::find_local_disk; use futures::TryStreamExt; @@ -19,7 +18,6 @@ use s3s::S3Result; use serde_urlencoded::from_bytes; use tokio_util::io::ReaderStream; use tokio_util::io::StreamReader; -use tracing::warn; pub const RPC_PREFIX: &str = "/rustfs/rpc"; @@ -52,8 +50,6 @@ pub struct ReadFile {} #[async_trait::async_trait] impl Operation for ReadFile { async fn call(&self, req: S3Request, _params: Params<'_, '_>) -> S3Result> { - warn!("handle ReadFile"); - let query = { if let Some(query) = req.uri.query() { let input: ReadFileQuery = @@ -68,39 +64,15 @@ impl Operation for ReadFile { return Err(s3_error!(InvalidArgument, "disk not found")); }; - let file: FileReader = disk + let file = disk .read_file_stream(&query.volume, &query.path, query.offset, query.length) .await .map_err(|e| s3_error!(InternalError, "read file err {}", e))?; - let s = bytes_stream(ReaderStream::new(file), query.length); - - Ok(S3Response::new((StatusCode::OK, Body::from(StreamingBlob::wrap(s))))) - - // let querys = req.uri.query().map(|q| { - // let mut querys = HashMap::new(); - // for (k, v) in url::form_urlencoded::parse(q.as_bytes()) { - // println!("{}={}", k, v); - // querys.insert(k.to_string(), v.to_string()); - // } - // querys - // }); - - // // TODO: file_path from root - - // if let Some(file_path) = querys.and_then(|q| q.get("file_path").cloned()) { - // let file = fs::OpenOptions::new() - // .read(true) - // .open(file_path) - // .await - // .map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("open file err {}", e)))?; - - // let s = bytes_stream(ReaderStream::new(file), 0); - - // return Ok(S3Response::new((StatusCode::OK, Body::from(StreamingBlob::wrap(s))))); - // } - - // Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::empty()))) + Ok(S3Response::new(( + StatusCode::OK, + Body::from(StreamingBlob::wrap(bytes_stream(ReaderStream::new(file), query.length))), + ))) } } @@ -117,8 +89,6 @@ pub struct PutFile {} #[async_trait::async_trait] impl Operation for PutFile { async fn call(&self, req: S3Request, _params: Params<'_, '_>) -> S3Result> { - warn!("handle PutFile"); - let query = { if let Some(query) = req.uri.query() { let input: PutFileQuery = From 01cf4c663dab6b0ff468cf8766e295bbd2ed8604 Mon Sep 17 00:00:00 2001 From: weisd Date: Fri, 14 Mar 2025 00:35:27 +0800 Subject: [PATCH 4/5] opt network io --- .cargo/config.toml | 4 +- .gitignore | 1 + Cargo.lock | 17 - Cargo.toml | 2 - Dockerfile | 17 + docker-compose.yaml | 90 +++-- ecstore/Cargo.toml | 1 - ecstore/src/bitrot.rs | 3 +- ecstore/src/bucket/metadata.rs | 6 +- ecstore/src/cache_value/metacache_set.rs | 2 +- ecstore/src/config/common.rs | 22 +- ecstore/src/disk/io.rs | 121 ------ ecstore/src/disk/local.rs | 22 +- ecstore/src/disk/mod.rs | 434 +------------------- ecstore/src/disk/os.rs | 16 +- ecstore/src/disk/remote.rs | 70 ++-- ecstore/src/erasure.rs | 39 +- ecstore/src/heal/data_scanner.rs | 4 +- ecstore/src/heal/data_usage.rs | 5 +- ecstore/src/heal/data_usage_cache.rs | 4 +- ecstore/src/io.rs | 325 ++++++--------- ecstore/src/metacache/writer.rs | 15 +- ecstore/src/peer_rest_client.rs | 2 +- ecstore/src/pools.rs | 2 +- ecstore/src/set_disk.rs | 99 ++--- ecstore/src/sets.rs | 4 +- ecstore/src/store_api.rs | 40 +- iam/src/store/object.rs | 2 +- reader/Cargo.toml | 24 -- reader/src/error.rs | 12 - reader/src/hasher.rs | 170 -------- reader/src/lib.rs | 7 - reader/src/reader.rs | 493 ----------------------- reader/src/readme.md | 5 - rustfs/src/admin/rpc.rs | 6 +- rustfs/src/main.rs | 8 +- rustfs/src/storage/ecfs.rs | 18 +- 37 files changed, 420 insertions(+), 1692 deletions(-) create mode 100644 Dockerfile delete mode 100644 ecstore/src/disk/io.rs delete mode 100644 reader/Cargo.toml delete mode 100644 reader/src/error.rs delete mode 100644 reader/src/hasher.rs delete mode 100644 reader/src/lib.rs delete mode 100644 reader/src/reader.rs delete mode 100644 reader/src/readme.md diff --git a/.cargo/config.toml b/.cargo/config.toml index 624d2724..52d967b7 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,2 +1,4 @@ [target.x86_64-unknown-linux-gnu] -rustflags = ["-Clink-arg=-fuse-ld=lld"] +rustflags = [ + "-C", "link-arg=-fuse-ld=bfd" +] diff --git a/.gitignore b/.gitignore index 45147d58..83b9ef43 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ .vscode /test /logs +/data .devcontainer rustfs/static/* vendor diff --git a/Cargo.lock b/Cargo.lock index b5722b5c..a7d6a71a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1921,7 +1921,6 @@ dependencies = [ "pin-project-lite", "protos", "rand 0.8.5", - "reader", "reed-solomon-erasure", "regex", "reqwest", @@ -4981,22 +4980,6 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20675572f6f24e9e76ef639bc5552774ed45f1c30e2951e1e99c59888861c539" -[[package]] -name = "reader" -version = "0.0.1" -dependencies = [ - "bytes", - "futures", - "hex-simd", - "md-5", - "pin-project-lite", - "s3s", - "sha2 0.11.0-pre.4", - "thiserror 2.0.11", - "tokio", - "tracing", -] - [[package]] name = "redox_syscall" version = "0.2.16" diff --git a/Cargo.toml b/Cargo.toml index a78ec0d7..72c912bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,6 @@ members = [ "common/common", # Shared utilities and data structures "common/lock", # Distributed locking implementation "common/protos", # Protocol buffer definitions - "reader", # Object reading service "common/workers", # Worker thread pools and task scheduling "iam", # Identity and Access Management "crypto", # Cryptography and security features @@ -43,7 +42,6 @@ flatbuffers = "24.12.23" futures = "0.3.31" futures-util = "0.3.31" common = { path = "./common/common" } -reader = { path = "./reader" } hex = "0.4.3" hyper = "1.6.0" hyper-util = { version = "0.1.10", features = [ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..035a2c08 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +FROM alpine:latest + +# RUN apk add --no-cache + +WORKDIR /app + +RUN mkdir -p /data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3 + +COPY ./target/x86_64-unknown-linux-musl/release/rustfs /app/rustfs + +RUN chmod +x /app/rustfs + +EXPOSE 9000 +EXPOSE 9001 + + +CMD ["/app/rustfs"] \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml index 5c68534b..bee2f4a0 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,74 +1,82 @@ -version: '3.8' - services: - node1: + node0: image: rustfs:v1 # 替换为你的镜像名称和标签 - container_name: node1 + container_name: node0 + hostname: node0 environment: - - RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4} + - RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3} - RUSTFS_ADDRESS=0.0.0.0:9000 - RUSTFS_CONSOLE_ENABLE=true - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 platform: linux/amd64 ports: - - "9001:9000" # 映射宿主机的 9001 端口到容器的 9000 端口 + - "9000:9000" # 映射宿主机的 9001 端口到容器的 9000 端口 + - "8000:9001" # 映射宿主机的 9001 端口到容器的 9000 端口 volumes: - - ..:/root/data # 将当前路径挂载到容器内的 /root/data - command: "/root/rustfs" - networks: - - my_network + - ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs + # - ./data/node0:/data # 将当前路径挂载到容器内的 /root/data + command: "/app/rustfs" + + node1: + image: rustfs:v1 + container_name: node1 + hostname: node1 + environment: + - RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3} + - RUSTFS_ADDRESS=0.0.0.0:9000 + - RUSTFS_CONSOLE_ENABLE=true + - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 + platform: linux/amd64 + ports: + - "9001:9000" # 映射宿主机的 9002 端口到容器的 9000 端口 + volumes: + - ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs + # - ./data/node1:/data + command: "/app/rustfs" node2: image: rustfs:v1 container_name: node2 + hostname: node2 environment: - - RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4} + - RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3} - RUSTFS_ADDRESS=0.0.0.0:9000 - RUSTFS_CONSOLE_ENABLE=true - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 platform: linux/amd64 ports: - - "9002:9000" # 映射宿主机的 9002 端口到容器的 9000 端口 + - "9002:9000" # 映射宿主机的 9003 端口到容器的 9000 端口 volumes: - - ..:/root/data - command: "mkdir -p /root/data/target/volume/test{1..4} && /root/data/target/ubuntu22.04/release/rustfs" - networks: - - my_network + - ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs + # - ./data/node2:/data + command: "/app/rustfs" node3: image: rustfs:v1 container_name: node3 + hostname: node3 environment: - - RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4} + - RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3} - RUSTFS_ADDRESS=0.0.0.0:9000 - RUSTFS_CONSOLE_ENABLE=true - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 platform: linux/amd64 ports: - - "9003:9000" # 映射宿主机的 9003 端口到容器的 9000 端口 + - "9003:9000" # 映射宿主机的 9004 端口到容器的 9000 端口 volumes: - - ..:/root/data - command: "mkdir -p /root/data/target/volume/test{1..4} && /root/data/target/ubuntu22.04/release/rustfs" - networks: - - my_network + - ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs + # - ./data/node3:/data + command: "/app/rustfs" - node4: - image: rustfs:v1 - container_name: node4 - environment: - - RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4} - - RUSTFS_ADDRESS=0.0.0.0:9000 - - RUSTFS_CONSOLE_ENABLE=true - - RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002 - platform: linux/amd64 - ports: - - "9004:9000" # 映射宿主机的 9004 端口到容器的 9000 端口 - volumes: - - ..:/root/data - command: "mkdir -p /root/data/target/volume/test{1..4} && /root/data/target/ubuntu22.04/release/rustfs" - networks: - - my_network -networks: - my_network: - driver: bridge \ No newline at end of file + + + + 2025-03-14T05:23:15.661154Z INFO ecstore::disk::os: reliable_rename rm dst failed. src_file_path: "/data/rustfs1/.rustfs.sys/tmp/c7fabb9c-48c8-4827-b5e2-13271c3867c3x1741929793/part.38", dst_file_path: "/data/rustfs1/.rustfs.sys/multipart/494d877741f5e87d5160dc4e1bd4fbdacda64559ea0b7d16cdbeed61f252b98f/a83dc20f-e73a-46d0-a02b-11b330ba6e7ex1741929773056730169/641d3efd-cca0-418e-983b-ca2d47652900/part.38", base_dir: "/data/rustfs1/.rustfs.sys/multipart", err: Os { code: 2, kind: NotFound, message: "No such file or directory" } + at ecstore/src/disk/os.rs:144 + + 2025-03-14T05:23:15.953116Z INFO ecstore::disk::os: reliable_rename rm dst failed. src_file_path: "/data/rustfs3/.rustfs.sys/tmp/e712821f-bc3f-4ffe-8a0c-0daa379d00d4x1741929793/part.39", dst_file_path: "/data/rustfs3/.rustfs.sys/multipart/494d877741f5e87d5160dc4e1bd4fbdacda64559ea0b7d16cdbeed61f252b98f/a83dc20f-e73a-46d0-a02b-11b330ba6e7ex1741929773056730169/641d3efd-cca0-418e-983b-ca2d47652900/part.39", base_dir: "/data/rustfs3/.rustfs.sys/multipart", err: Os { code: 2, kind: NotFound, message: "No such file or directory" } + at ecstore/src/disk/os.rs:144 + + 2025-03-14T05:23:15.953218Z INFO ecstore::disk::os: reliable_rename rm dst failed. src_file_path: "/data/rustfs2/.rustfs.sys/tmp/e712821f-bc3f-4ffe-8a0c-0daa379d00d4x1741929793/part.39", dst_file_path: "/data/rustfs2/.rustfs.sys/multipart/494d877741f5e87d5160dc4e1bd4fbdacda64559ea0b7d16cdbeed61f252b98f/a83dc20f-e73a-46d0-a02b-11b330ba6e7ex1741929773056730169/641d3efd-cca0-418e-983b-ca2d47652900/part.39", base_dir: "/data/rustfs2/.rustfs.sys/multipart", err: Os { code: 2, kind: NotFound, message: "No such file or directory" } + at ecstore/src/disk/os.rs:144 \ No newline at end of file diff --git a/ecstore/Cargo.toml b/ecstore/Cargo.toml index ab3bd5aa..f7357e91 100644 --- a/ecstore/Cargo.toml +++ b/ecstore/Cargo.toml @@ -17,7 +17,6 @@ blake2 = "0.10.6" bytes.workspace = true common.workspace = true chrono.workspace = true -reader.workspace = true glob = "0.3.2" thiserror.workspace = true flatbuffers.workspace = true diff --git a/ecstore/src/bitrot.rs b/ecstore/src/bitrot.rs index b69ef859..849e54b9 100644 --- a/ecstore/src/bitrot.rs +++ b/ecstore/src/bitrot.rs @@ -1,7 +1,8 @@ use crate::{ - disk::{error::DiskError, Disk, DiskAPI, FileReader, FileWriter}, + disk::{error::DiskError, Disk, DiskAPI}, erasure::{ReadAt, Writer}, error::{Error, Result}, + io::{FileReader, FileWriter}, store_api::BitrotAlgorithm, }; use blake2::Blake2b512; diff --git a/ecstore/src/bucket/metadata.rs b/ecstore/src/bucket/metadata.rs index 02d25e8f..fc97224e 100644 --- a/ecstore/src/bucket/metadata.rs +++ b/ecstore/src/bucket/metadata.rs @@ -14,7 +14,7 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::sync::Arc; use time::OffsetDateTime; -use tracing::{error, info}; +use tracing::error; use crate::config::common::{read_config, save_config}; use crate::error::{Error, Result}; @@ -311,7 +311,7 @@ impl BucketMetadata { buf.extend_from_slice(&data); - save_config(store, self.save_file_path().as_str(), &buf).await?; + save_config(store, self.save_file_path().as_str(), buf).await?; Ok(()) } @@ -367,7 +367,7 @@ pub async fn load_bucket_metadata_parse(api: Arc, bucket: &str, parse: return Err(err); } - info!("bucketmeta {} not found with err {:?}, start to init ", bucket, &err); + // info!("bucketmeta {} not found with err {:?}, start to init ", bucket, &err); BucketMetadata::new(bucket) } diff --git a/ecstore/src/cache_value/metacache_set.rs b/ecstore/src/cache_value/metacache_set.rs index 263b7fa0..401561b6 100644 --- a/ecstore/src/cache_value/metacache_set.rs +++ b/ecstore/src/cache_value/metacache_set.rs @@ -164,7 +164,7 @@ pub async fn list_path_raw(mut rx: B_Receiver, opts: ListPathRawOptions) - let entry = match r.peek().await { Ok(res) => { if let Some(entry) = res { - info!("read entry disk: {}, name: {}", i, entry.name); + // info!("read entry disk: {}, name: {}", i, entry.name); entry } else { // eof diff --git a/ecstore/src/config/common.rs b/ecstore/src/config/common.rs index 14c386cd..837f577a 100644 --- a/ecstore/src/config/common.rs +++ b/ecstore/src/config/common.rs @@ -1,6 +1,3 @@ -use std::collections::HashSet; -use std::sync::Arc; - use super::error::{is_err_config_not_found, ConfigError}; use super::{storageclass, Config, GLOBAL_StorageClass, KVS}; use crate::disk::RUSTFS_META_BUCKET; @@ -10,8 +7,9 @@ use crate::store_err::is_err_object_not_found; use crate::utils::path::SLASH_SEPARATOR; use http::HeaderMap; use lazy_static::lazy_static; -use s3s::dto::StreamingBlob; -use s3s::Body; +use std::collections::HashSet; +use std::io::Cursor; +use std::sync::Arc; use tracing::{error, warn}; pub const CONFIG_PREFIX: &str = "config"; @@ -59,7 +57,7 @@ pub async fn read_config_with_metadata( Ok((data, rd.object_info)) } -pub async fn save_config(api: Arc, file: &str, data: &[u8]) -> Result<()> { +pub async fn save_config(api: Arc, file: &str, data: Vec) -> Result<()> { save_config_with_opts( api, file, @@ -96,14 +94,10 @@ pub async fn delete_config(api: Arc, file: &str) -> Result<()> } } -async fn save_config_with_opts(api: Arc, file: &str, data: &[u8], opts: &ObjectOptions) -> Result<()> { +async fn save_config_with_opts(api: Arc, file: &str, data: Vec, opts: &ObjectOptions) -> Result<()> { + let size = data.len(); let _ = api - .put_object( - RUSTFS_META_BUCKET, - file, - &mut PutObjReader::new(StreamingBlob::from(Body::from(data.to_vec())), data.len()), - opts, - ) + .put_object(RUSTFS_META_BUCKET, file, &mut PutObjReader::new(Box::new(Cursor::new(data)), size), opts) .await?; Ok(()) } @@ -174,7 +168,7 @@ async fn save_server_config(api: Arc, cfg: &Config) -> Result< let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR, CONFIG_FILE); - save_config(api, &config_file, data.as_slice()).await + save_config(api, &config_file, data).await } pub async fn lookup_configs(cfg: &mut Config, api: Arc) { diff --git a/ecstore/src/disk/io.rs b/ecstore/src/disk/io.rs deleted file mode 100644 index c0360050..00000000 --- a/ecstore/src/disk/io.rs +++ /dev/null @@ -1,121 +0,0 @@ -use crate::error::Result; -use futures::TryStreamExt; -use std::pin::Pin; -use std::task::Poll; -use tokio::io::AsyncWrite; -use tokio::sync::oneshot; -use tokio_util::io::ReaderStream; -use tokio_util::io::StreamReader; -use tracing::error; -use tracing::warn; - -use super::FileReader; - -#[derive(Debug)] -pub struct HttpFileWriter { - wd: tokio::io::WriteHalf, - err_rx: oneshot::Receiver, -} - -impl HttpFileWriter { - pub fn new(url: &str, disk: &str, volume: &str, path: &str, size: usize, append: bool) -> Result { - let (rd, wd) = tokio::io::simplex(4096); - - let (err_tx, err_rx) = oneshot::channel::(); - - let body = reqwest::Body::wrap_stream(ReaderStream::new(rd)); - - let url = url.to_owned(); - let disk = disk.to_owned(); - let volume = volume.to_owned(); - let path = path.to_owned(); - - tokio::spawn(async move { - let client = reqwest::Client::new(); - if let Err(err) = client - .put(format!( - "{}/rustfs/rpc/put_file_stream?disk={}&volume={}&path={}&append={}&size={}", - url, - urlencoding::encode(&disk), - urlencoding::encode(&volume), - urlencoding::encode(&path), - append, - size - )) - .body(body) - .send() - .await - .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) - { - error!("HttpFileWriter put file err: {:?}", err); - - if let Err(er) = err_tx.send(err) { - error!("HttpFileWriter tx.send err: {:?}", er); - } - // return; - } - - // error!("http write done {}", path); - }); - - Ok(Self { - wd, - err_rx, - // client: reqwest::Client::new(), - // url: url.to_string(), - // disk: disk.to_string(), - // volume: volume.to_string(), - }) - } -} - -impl AsyncWrite for HttpFileWriter { - #[tracing::instrument(level = "debug", skip(self, buf))] - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &[u8], - ) -> Poll> { - if let Ok(err) = self.as_mut().err_rx.try_recv() { - return Poll::Ready(Err(err)); - } - - Pin::new(&mut self.wd).poll_write(cx, buf) - } - - #[tracing::instrument(level = "debug", skip(self))] - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { - Pin::new(&mut self.wd).poll_flush(cx) - } - - #[tracing::instrument(level = "debug", skip(self))] - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { - Pin::new(&mut self.wd).poll_shutdown(cx) - } -} - -pub async fn new_http_reader( - url: &str, - disk: &str, - volume: &str, - path: &str, - offset: usize, - length: usize, -) -> Result { - let resp = reqwest::Client::new() - .get(format!( - "{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}", - url, - urlencoding::encode(disk), - urlencoding::encode(volume), - urlencoding::encode(path), - offset, - length - )) - .send() - .await?; - - let inner = StreamReader::new(resp.bytes_stream().map_err(std::io::Error::other)); - - Ok(Box::new(inner)) -} diff --git a/ecstore/src/disk/local.rs b/ecstore/src/disk/local.rs index 4690450c..69b1717c 100644 --- a/ecstore/src/disk/local.rs +++ b/ecstore/src/disk/local.rs @@ -5,9 +5,9 @@ use super::error::{ use super::os::{is_root_disk, rename_all}; use super::{endpoint::Endpoint, error::DiskError, format::FormatV3}; use super::{ - os, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskMetrics, FileInfoVersions, - FileReader, FileWriter, Info, MetaCacheEntry, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, - UpdateMetadataOpts, VolumeInfo, WalkDirOptions, BUCKET_META_PREFIX, RUSTFS_META_BUCKET, STORAGE_FORMAT_FILE_BACKUP, + os, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskMetrics, FileInfoVersions, Info, + MetaCacheEntry, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, + WalkDirOptions, BUCKET_META_PREFIX, RUSTFS_META_BUCKET, STORAGE_FORMAT_FILE_BACKUP, }; use crate::bitrot::bitrot_verify; use crate::bucket::metadata_sys::{self}; @@ -27,6 +27,7 @@ use crate::heal::data_usage_cache::{DataUsageCache, DataUsageEntry}; use crate::heal::error::{ERR_IGNORE_FILE_CONTRIB, ERR_SKIP_FILE}; use crate::heal::heal_commands::{HealScanMode, HealingTracker}; use crate::heal::heal_ops::HEALING_TRACKER_FILENAME; +use crate::io::{FileReader, FileWriter}; use crate::metacache::writer::MetacacheWriter; use crate::new_object_layer_fn; use crate::set_disk::{ @@ -326,7 +327,7 @@ impl LocalDisk { } } - // FIXME: 先清空回收站吧,有时间再添加判断逻辑 + // TODO: 优化 FIXME: 先清空回收站吧,有时间再添加判断逻辑 if let Err(err) = { if trash_path.is_dir() { @@ -1523,7 +1524,7 @@ impl DiskAPI for LocalDisk { // TODO: io verifier #[tracing::instrument(level = "debug", skip(self))] async fn read_file(&self, volume: &str, path: &str) -> Result { - warn!("disk read_file: volume: {}, path: {}", volume, path); + // warn!("disk read_file: volume: {}, path: {}", volume, path); let volume_dir = self.get_bucket_path(volume)?; if !skip_access_checks(volume) { if let Err(e) = utils::fs::access(&volume_dir).await { @@ -1557,10 +1558,10 @@ impl DiskAPI for LocalDisk { #[tracing::instrument(level = "debug", skip(self))] async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result { - warn!( - "disk read_file_stream: volume: {}, path: {}, offset: {}, length: {}", - volume, path, offset, length - ); + // warn!( + // "disk read_file_stream: volume: {}, path: {}, offset: {}, length: {}", + // volume, path, offset, length + // ); let volume_dir = self.get_bucket_path(volume)?; if !skip_access_checks(volume) { @@ -1748,7 +1749,7 @@ impl DiskAPI for LocalDisk { return Err(os_err_to_file_err(e)); } - info!("read xl.meta failed, dst_file_path: {:?}, err: {:?}", dst_file_path, e); + // info!("read xl.meta failed, dst_file_path: {:?}, err: {:?}", dst_file_path, e); None } }; @@ -2247,7 +2248,6 @@ impl DiskAPI for LocalDisk { } async fn delete_volume(&self, volume: &str) -> Result<()> { - info!("delete_volume, volume: {}", volume); let p = self.get_bucket_path(volume)?; // TODO: 不能用递归删除,如果目录下面有文件,返回errVolumeNotEmpty diff --git a/ecstore/src/disk/mod.rs b/ecstore/src/disk/mod.rs index a708977e..8d737e92 100644 --- a/ecstore/src/disk/mod.rs +++ b/ecstore/src/disk/mod.rs @@ -1,7 +1,6 @@ pub mod endpoint; pub mod error; pub mod format; -pub mod io; pub mod local; pub mod os; pub mod remote; @@ -24,6 +23,7 @@ use crate::{ data_usage_cache::{DataUsageCache, DataUsageEntry}, heal_commands::{HealScanMode, HealingTracker}, }, + io::{FileReader, FileWriter}, store_api::{FileInfo, ObjectInfo, RawFileInfo}, utils::path::SLASH_SEPARATOR, }; @@ -35,11 +35,7 @@ use remote::RemoteDisk; use serde::{Deserialize, Serialize}; use std::{cmp::Ordering, fmt::Debug, path::PathBuf, sync::Arc}; use time::OffsetDateTime; -use tokio::{ - io::{AsyncRead, AsyncWrite}, - sync::mpsc::Sender, -}; -use tracing::info; +use tokio::{io::AsyncWrite, sync::mpsc::Sender}; use tracing::warn; use uuid::Uuid; @@ -328,7 +324,6 @@ impl DiskAPI for Disk { } async fn delete_volume(&self, volume: &str) -> Result<()> { - info!("delete_volume, volume: {}", volume); match self { Disk::Local(local_disk) => local_disk.delete_volume(volume).await, Disk::Remote(remote_disk) => remote_disk.delete_volume(volume).await, @@ -349,7 +344,6 @@ impl DiskAPI for Disk { scan_mode: HealScanMode, we_sleep: ShouldSleepFn, ) -> Result { - info!("ns_scanner"); match self { Disk::Local(local_disk) => local_disk.ns_scanner(cache, updates, scan_mode, we_sleep).await, Disk::Remote(remote_disk) => remote_disk.ns_scanner(cache, updates, scan_mode, we_sleep).await, @@ -374,9 +368,6 @@ pub async fn new_disk(ep: &endpoint::Endpoint, opt: &DiskOption) -> Result; -pub type FileWriter = Box; - #[async_trait::async_trait] pub trait DiskAPI: Debug + Send + Sync + 'static { fn to_string(&self) -> String; @@ -1184,20 +1175,6 @@ pub struct ReadMultipleResp { pub mod_time: Option, } -// impl Default for ReadMultipleResp { -// fn default() -> Self { -// Self { -// bucket: String::new(), -// prefix: String::new(), -// file: String::new(), -// exists: false, -// error: String::new(), -// data: Vec::new(), -// mod_time: OffsetDateTime::UNIX_EPOCH, -// } -// } -// } - #[derive(Debug, Deserialize, Serialize)] pub struct VolumeInfo { pub name: String, @@ -1210,410 +1187,3 @@ pub struct ReadOptions { pub read_data: bool, pub healing: bool, } - -// pub struct FileWriter { -// pub inner: Pin>, -// } - -// impl AsyncWrite for FileWriter { -// fn poll_write( -// mut self: Pin<&mut Self>, -// cx: &mut std::task::Context<'_>, -// buf: &[u8], -// ) -> std::task::Poll> { -// Pin::new(&mut self.inner).poll_write(cx, buf) -// } - -// fn poll_flush( -// mut self: Pin<&mut Self>, -// cx: &mut std::task::Context<'_>, -// ) -> std::task::Poll> { -// Pin::new(&mut self.inner).poll_flush(cx) -// } - -// fn poll_shutdown( -// mut self: Pin<&mut Self>, -// cx: &mut std::task::Context<'_>, -// ) -> std::task::Poll> { -// Pin::new(&mut self.inner).poll_shutdown(cx) -// } -// } - -// impl FileWriter { -// pub fn new(inner: W) -> Self -// where -// W: AsyncWrite + Send + Sync + 'static, -// { -// Self { inner: Box::pin(inner) } -// } -// } - -// #[derive(Debug)] -// pub struct BufferWriter { -// pub inner: Vec, -// } - -// impl BufferWriter { -// pub fn new(inner: Vec) -> Self { -// Self { inner } -// } -// #[allow(clippy::should_implement_trait)] -// pub fn as_ref(&self) -> &[u8] { -// self.inner.as_ref() -// } -// } - -// #[async_trait::async_trait] -// impl Writer for BufferWriter { -// fn as_any(&self) -> &dyn Any { -// self -// } - -// async fn write(&mut self, buf: &[u8]) -> Result<()> { -// let _ = self.inner.write(buf).await?; -// self.inner.flush().await?; - -// Ok(()) -// } -// } - -// #[derive(Debug)] -// pub struct LocalFileWriter { -// pub inner: File, -// } - -// impl LocalFileWriter { -// pub fn new(inner: File) -> Self { -// Self { inner } -// } -// } - -// #[async_trait::async_trait] -// impl Writer for LocalFileWriter { -// fn as_any(&self) -> &dyn Any { -// self -// } - -// async fn write(&mut self, buf: &[u8]) -> Result<()> { -// let _ = self.inner.write(buf).await?; -// self.inner.flush().await?; - -// Ok(()) -// } -// } - -// type NodeClient = NodeServiceClient< -// InterceptedService) -> Result, Status> + Send + Sync + 'static>>, -// >; - -// #[derive(Debug)] -// pub struct RemoteFileWriter { -// pub endpoint: Endpoint, -// pub volume: String, -// pub path: String, -// pub is_append: bool, -// tx: Sender, -// resp_stream: Streaming, -// } - -// impl RemoteFileWriter { -// pub async fn new(endpoint: Endpoint, volume: String, path: String, is_append: bool, mut client: NodeClient) -> Result { -// let (tx, rx) = mpsc::channel(128); -// let in_stream = ReceiverStream::new(rx); - -// let response = client.write_stream(in_stream).await.unwrap(); - -// let resp_stream = response.into_inner(); - -// Ok(Self { -// endpoint, -// volume, -// path, -// is_append, -// tx, -// resp_stream, -// }) -// } -// } - -// #[async_trait::async_trait] -// impl Writer for RemoteFileWriter { -// fn as_any(&self) -> &dyn Any { -// self -// } - -// async fn write(&mut self, buf: &[u8]) -> Result<()> { -// let request = WriteRequest { -// disk: self.endpoint.to_string(), -// volume: self.volume.to_string(), -// path: self.path.to_string(), -// is_append: self.is_append, -// data: buf.to_vec(), -// }; -// self.tx.send(request).await?; - -// if let Some(resp) = self.resp_stream.next().await { -// // match resp { -// // Ok(resp) => { -// // if resp.success { -// // info!("write stream success"); -// // } else { -// // info!("write stream failed: {}", resp.error_info.unwrap_or("".to_string())); -// // } -// // } -// // Err(_err) => { - -// // } -// // } -// let resp = resp?; -// if resp.success { -// info!("write stream success"); -// } else { -// return if let Some(err) = &resp.error { -// Err(proto_err_to_err(err)) -// } else { -// Err(Error::from_string("")) -// }; -// } -// } else { -// let error_info = "can not get response"; -// info!("write stream failed: {}", error_info); -// return Err(Error::from_string(error_info)); -// } - -// Ok(()) -// } -// } - -// #[async_trait::async_trait] -// pub trait Reader { -// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result; -// // async fn seek(&mut self, offset: usize) -> Result<()>; -// // async fn read_exact(&mut self, buf: &mut [u8]) -> Result; -// } - -// #[async_trait::async_trait] -// impl Reader for FileReader { -// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { -// match self { -// Self::Local(reader) => reader.read_at(offset, buf).await, -// Self::Remote(reader) => reader.read_at(offset, buf).await, -// Self::Buffer(reader) => reader.read_at(offset, buf).await, -// Self::Http(reader) => reader.read_at(offset, buf).await, -// } -// } -// // async fn seek(&mut self, offset: usize) -> Result<()> { -// // match self { -// // Self::Local(reader) => reader.seek(offset).await, -// // Self::Remote(reader) => reader.seek(offset).await, -// // Self::Buffer(reader) => reader.seek(offset).await, -// // } -// // } -// // async fn read_exact(&mut self, buf: &mut [u8]) -> Result { -// // match self { -// // Self::Local(reader) => reader.read_exact(buf).await, -// // Self::Remote(reader) => reader.read_exact(buf).await, -// // Self::Buffer(reader) => reader.read_exact(buf).await, -// // } -// // } -// } - -// #[derive(Debug)] -// pub struct BufferReader { -// pub inner: Cursor>, -// remaining: usize, -// } - -// impl BufferReader { -// pub fn new(inner: Vec, offset: usize, read_length: usize) -> Self { -// let mut cur = Cursor::new(inner); -// cur.set_position(offset as u64); -// Self { -// inner: cur, -// remaining: offset + read_length, -// } -// } -// } - -// impl AsyncRead for BufferReader { -// #[tracing::instrument(level = "debug", skip(self, buf))] -// fn poll_read( -// mut self: Pin<&mut Self>, -// cx: &mut std::task::Context<'_>, -// buf: &mut tokio::io::ReadBuf<'_>, -// ) -> std::task::Poll> { -// match Pin::new(&mut self.inner).poll_read(cx, buf) { -// Poll::Ready(Ok(_)) => { -// if self.inner.position() as usize >= self.remaining { -// self.remaining -= buf.filled().len(); -// Poll::Ready(Ok(())) -// } else { -// Poll::Pending -// } -// } -// Poll::Ready(Err(err)) => Poll::Ready(Err(err)), -// Poll::Pending => Poll::Pending, -// } -// } -// } - -// #[async_trait::async_trait] -// impl Reader for BufferReader { -// #[tracing::instrument(level = "debug", skip(self, buf))] -// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { -// if self.pos != offset { -// self.inner.set_position(offset as u64); -// } -// self.inner.read_exact(buf).await?; -// self.pos += buf.len(); -// Ok(buf.len()) -// } -// // #[tracing::instrument(level = "debug", skip(self))] -// // async fn seek(&mut self, offset: usize) -> Result<()> { -// // if self.pos != offset { -// // self.inner.set_position(offset as u64); -// // } - -// // Ok(()) -// // } -// // #[tracing::instrument(level = "debug", skip(self))] -// // async fn read_exact(&mut self, buf: &mut [u8]) -> Result { -// // let bytes_read = self.inner.read_exact(buf).await?; -// // self.pos += buf.len(); -// // Ok(bytes_read) -// // } -// } - -// #[derive(Debug)] -// pub struct LocalFileReader { -// pub inner: File, -// // pos: usize, -// } - -// impl LocalFileReader { -// pub fn new(inner: File) -> Self { -// Self { inner } -// } -// } - -// #[async_trait::async_trait] -// impl Reader for LocalFileReader { -// #[tracing::instrument(level = "debug", skip(self, buf))] -// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { -// if self.pos != offset { -// self.inner.seek(SeekFrom::Start(offset as u64)).await?; -// self.pos = offset; -// } -// self.inner.read_exact(buf).await?; -// self.pos += buf.len(); -// Ok(buf.len()) -// } - -// // #[tracing::instrument(level = "debug", skip(self))] -// // async fn seek(&mut self, offset: usize) -> Result<()> { -// // if self.pos != offset { -// // self.inner.seek(SeekFrom::Start(offset as u64)).await?; -// // self.pos = offset; -// // } - -// // Ok(()) -// // } -// // #[tracing::instrument(level = "debug", skip(self, buf))] -// // async fn read_exact(&mut self, buf: &mut [u8]) -> Result { -// // let bytes_read = self.inner.read_exact(buf).await?; -// // self.pos += buf.len(); -// // Ok(bytes_read) -// // } -// } - -// impl AsyncRead for LocalFileReader { -// #[tracing::instrument(level = "debug", skip(self, buf))] -// fn poll_read( -// mut self: Pin<&mut Self>, -// cx: &mut std::task::Context<'_>, -// buf: &mut tokio::io::ReadBuf<'_>, -// ) -> std::task::Poll> { -// Pin::new(&mut self.inner).poll_read(cx, buf) -// } -// } - -// #[derive(Debug)] -// pub struct RemoteFileReader { -// pub endpoint: Endpoint, -// pub volume: String, -// pub path: String, -// tx: Sender, -// resp_stream: Streaming, -// } - -// impl RemoteFileReader { -// pub async fn new(endpoint: Endpoint, volume: String, path: String, mut client: NodeClient) -> Result { -// let (tx, rx) = mpsc::channel(128); -// let in_stream = ReceiverStream::new(rx); - -// let response = client.read_at(in_stream).await.unwrap(); - -// let resp_stream = response.into_inner(); - -// Ok(Self { -// endpoint, -// volume, -// path, -// tx, -// resp_stream, -// }) -// } -// } - -// #[async_trait::async_trait] -// impl Reader for RemoteFileReader { -// async fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> Result { -// let request = ReadAtRequest { -// disk: self.endpoint.to_string(), -// volume: self.volume.to_string(), -// path: self.path.to_string(), -// offset: offset.try_into().unwrap(), -// // length: length.try_into().unwrap(), -// length: buf.len().try_into().unwrap(), -// }; -// self.tx.send(request).await?; - -// if let Some(resp) = self.resp_stream.next().await { -// let resp = resp?; -// if resp.success { -// info!("read at stream success"); - -// buf.copy_from_slice(&resp.data); - -// Ok(resp.read_size.try_into().unwrap()) -// } else { -// return if let Some(err) = &resp.error { -// Err(proto_err_to_err(err)) -// } else { -// Err(Error::from_string("")) -// }; -// } -// } else { -// let error_info = "can not get response"; -// info!("read at stream failed: {}", error_info); -// Err(Error::from_string(error_info)) -// } -// } -// // async fn seek(&mut self, _offset: usize) -> Result<()> { -// // unimplemented!() -// // } -// // async fn read_exact(&mut self, _buf: &mut [u8]) -> Result { -// // unimplemented!() -// // } -// } - -// impl AsyncRead for RemoteFileReader { -// #[tracing::instrument(level = "debug", skip(self, buf))] -// fn poll_read( -// mut self: Pin<&mut Self>, -// cx: &mut std::task::Context<'_>, -// buf: &mut tokio::io::ReadBuf<'_>, -// ) -> std::task::Poll> { -// unimplemented!("poll_read") -// } -// } diff --git a/ecstore/src/disk/os.rs b/ecstore/src/disk/os.rs index fd5e043d..175052cb 100644 --- a/ecstore/src/disk/os.rs +++ b/ecstore/src/disk/os.rs @@ -141,13 +141,15 @@ pub async fn reliable_rename( } // need remove dst path if let Err(err) = utils::fs::remove_all(dst_file_path.as_ref()).await { - info!( - "reliable_rename rm dst failed. src_file_path: {:?}, dst_file_path: {:?}, base_dir: {:?}, err: {:?}", - src_file_path.as_ref(), - dst_file_path.as_ref(), - base_dir.as_ref(), - err - ); + if err.kind() != io::ErrorKind::NotFound { + info!( + "reliable_rename rm dst failed. src_file_path: {:?}, dst_file_path: {:?}, base_dir: {:?}, err: {:?}", + src_file_path.as_ref(), + dst_file_path.as_ref(), + base_dir.as_ref(), + err + ); + } } let mut i = 0; loop { diff --git a/ecstore/src/disk/remote.rs b/ecstore/src/disk/remote.rs index 17c4c992..43f02832 100644 --- a/ecstore/src/disk/remote.rs +++ b/ecstore/src/disk/remote.rs @@ -23,8 +23,8 @@ use uuid::Uuid; use super::{ endpoint::Endpoint, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, - FileInfoVersions, FileReader, FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, - VolumeInfo, WalkDirOptions, + FileInfoVersions, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, + WalkDirOptions, }; use crate::{ disk::error::DiskError, @@ -36,11 +36,11 @@ use crate::{ }, store_api::{FileInfo, RawFileInfo}, }; +use crate::{disk::MetaCacheEntry, metacache::writer::MetacacheWriter}; use crate::{ - disk::io::{new_http_reader, HttpFileWriter}, + io::{FileReader, FileWriter, HttpFileReader, HttpFileWriter}, utils::proto_err_to_err, }; -use crate::{disk::MetaCacheEntry, metacache::writer::MetacacheWriter}; use protos::proto_gen::node_service::RenamePartRequst; #[derive(Debug)] @@ -135,7 +135,7 @@ impl DiskAPI for RemoteDisk { } async fn read_all(&self, volume: &str, path: &str) -> Result> { - info!("read_all"); + info!("read_all {}/{}", volume, path); let mut client = node_service_time_out_client(&self.addr) .await .map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?; @@ -147,8 +147,6 @@ impl DiskAPI for RemoteDisk { let response = client.read_all(request).await?.into_inner(); - info!("read_all success"); - if !response.success { return Err(Error::new(DiskError::FileNotFound)); } @@ -182,7 +180,7 @@ impl DiskAPI for RemoteDisk { } async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()> { - info!("delete"); + info!("delete {}/{}/{}", self.endpoint.to_string(), volume, path); let options = serde_json::to_string(&opt)?; let mut client = node_service_time_out_client(&self.addr) .await @@ -264,7 +262,7 @@ impl DiskAPI for RemoteDisk { } async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Vec) -> Result<()> { - info!("rename_part"); + info!("rename_part {}/{}", src_volume, src_path); let mut client = node_service_time_out_client(&self.addr) .await .map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?; @@ -318,7 +316,7 @@ impl DiskAPI for RemoteDisk { #[tracing::instrument(level = "debug", skip(self))] async fn create_file(&self, _origvolume: &str, volume: &str, path: &str, file_size: usize) -> Result { - info!("create_file"); + info!("create_file {}/{}/{}", self.endpoint.to_string(), volume, path); Ok(Box::new(HttpFileWriter::new( self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), @@ -331,7 +329,7 @@ impl DiskAPI for RemoteDisk { #[tracing::instrument(level = "debug", skip(self))] async fn append_file(&self, volume: &str, path: &str) -> Result { - info!("append_file"); + info!("append_file {}/{}", volume, path); Ok(Box::new(HttpFileWriter::new( self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), @@ -344,25 +342,31 @@ impl DiskAPI for RemoteDisk { #[tracing::instrument(level = "debug", skip(self))] async fn read_file(&self, volume: &str, path: &str) -> Result { - info!("read_file"); - Ok(new_http_reader(self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), volume, path, 0, 0).await?) + info!("read_file {}/{}", volume, path); + Ok(Box::new( + HttpFileReader::new(self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), volume, path, 0, 0) + .await?, + )) } #[tracing::instrument(level = "debug", skip(self))] async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result { - Ok(new_http_reader( - self.endpoint.grid_host().as_str(), - self.endpoint.to_string().as_str(), - volume, - path, - offset, - length, - ) - .await?) + info!("read_file_stream {}/{}/{}", self.endpoint.to_string(), volume, path); + Ok(Box::new( + HttpFileReader::new( + self.endpoint.grid_host().as_str(), + self.endpoint.to_string().as_str(), + volume, + path, + offset, + length, + ) + .await?, + )) } async fn list_dir(&self, _origvolume: &str, volume: &str, _dir_path: &str, _count: i32) -> Result> { - info!("list_dir"); + info!("list_dir {}/{}", volume, _dir_path); let mut client = node_service_time_out_client(&self.addr) .await .map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?; @@ -386,7 +390,8 @@ impl DiskAPI for RemoteDisk { // FIXME: TODO: use writer async fn walk_dir(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()> { - info!("walk_dir"); + let now = std::time::SystemTime::now(); + info!("walk_dir {}/{}/{:?}", self.endpoint.to_string(), opts.bucket, opts.filter_prefix); let mut wr = wr; let mut out = MetacacheWriter::new(&mut wr); let mut buf = Vec::new(); @@ -415,6 +420,12 @@ impl DiskAPI for RemoteDisk { } } + info!( + "walk_dir {}/{:?} done {:?}", + opts.bucket, + opts.filter_prefix, + now.elapsed().unwrap_or_default() + ); Ok(()) } @@ -426,7 +437,7 @@ impl DiskAPI for RemoteDisk { dst_volume: &str, dst_path: &str, ) -> Result { - info!("rename_data"); + info!("rename_data {}/{}/{}/{}", self.addr, self.endpoint.to_string(), dst_volume, dst_path); let file_info = serde_json::to_string(&fi)?; let mut client = node_service_time_out_client(&self.addr) .await @@ -608,7 +619,7 @@ impl DiskAPI for RemoteDisk { } async fn write_metadata(&self, _org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()> { - info!("write_metadata"); + info!("write_metadata {}/{}", volume, path); let file_info = serde_json::to_string(&fi)?; let mut client = node_service_time_out_client(&self.addr) .await @@ -670,7 +681,7 @@ impl DiskAPI for RemoteDisk { } async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result { - info!("read_xl"); + info!("read_xl {}/{}/{}", self.endpoint.to_string(), volume, path); let mut client = node_service_time_out_client(&self.addr) .await .map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?; @@ -779,7 +790,7 @@ impl DiskAPI for RemoteDisk { } async fn read_multiple(&self, req: ReadMultipleReq) -> Result> { - info!("read_multiple"); + info!("read_multiple {}/{}/{}", self.endpoint.to_string(), req.bucket, req.prefix); let read_multiple_req = serde_json::to_string(&req)?; let mut client = node_service_time_out_client(&self.addr) .await @@ -809,7 +820,7 @@ impl DiskAPI for RemoteDisk { } async fn delete_volume(&self, volume: &str) -> Result<()> { - info!("delete_volume"); + info!("delete_volume {}/{}", self.endpoint.to_string(), volume); let mut client = node_service_time_out_client(&self.addr) .await .map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?; @@ -832,7 +843,6 @@ impl DiskAPI for RemoteDisk { } async fn disk_info(&self, opts: &DiskInfoOptions) -> Result { - info!("delete_volume"); let opts = serde_json::to_string(&opts)?; let mut client = node_service_time_out_client(&self.addr) .await diff --git a/ecstore/src/erasure.rs b/ecstore/src/erasure.rs index 0056d4bf..83d1d9ae 100644 --- a/ecstore/src/erasure.rs +++ b/ecstore/src/erasure.rs @@ -1,13 +1,11 @@ use crate::bitrot::{BitrotReader, BitrotWriter}; -use crate::error::{Error, Result, StdError}; +use crate::error::{Error, Result}; use crate::quorum::{object_op_ignored_errs, reduce_write_quorum_errs}; -use bytes::Bytes; use futures::future::join_all; -use futures::{pin_mut, Stream, StreamExt}; use reed_solomon_erasure::galois_8::ReedSolomon; use std::any::Any; use std::io::ErrorKind; -use tokio::io::DuplexStream; +use tokio::io::{AsyncRead, AsyncWrite}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tracing::warn; use tracing::{error, info}; @@ -49,22 +47,22 @@ impl Erasure { } } - #[tracing::instrument(level = "debug", skip(self, body, writers))] + #[tracing::instrument(level = "debug", skip(self, reader, writers))] pub async fn encode( &mut self, - body: S, + reader: &mut S, writers: &mut [Option], // block_size: usize, total_size: usize, write_quorum: usize, ) -> Result where - S: Stream> + Send + Sync, + S: AsyncRead + Unpin + Send + 'static, { - pin_mut!(body); - let mut reader = tokio_util::io::StreamReader::new( - body.map(|f| f.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))), - ); + // pin_mut!(body); + // let mut reader = tokio_util::io::StreamReader::new( + // body.map(|f| f.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))), + // ); let mut total: usize = 0; @@ -101,6 +99,7 @@ impl Erasure { let blocks = self.encode_data(&self.buf)?; let mut errs = Vec::new(); + // TODO: 并发写入 for (i, w_op) in writers.iter_mut().enumerate() { if let Some(w) = w_op { match w.write(blocks[i].as_ref()).await { @@ -204,14 +203,17 @@ impl Erasure { // Ok(total) } - pub async fn decode( + pub async fn decode( &self, - writer: &mut DuplexStream, + writer: &mut W, readers: Vec>, offset: usize, length: usize, total_length: usize, - ) -> (usize, Option) { + ) -> (usize, Option) + where + W: AsyncWriteExt + Send + Unpin + 'static, + { if length == 0 { return (0, None); } @@ -281,14 +283,17 @@ impl Erasure { (bytes_writed, None) } - async fn write_data_blocks( + async fn write_data_blocks( &self, - writer: &mut DuplexStream, + writer: &mut W, bufs: Vec>>, data_blocks: usize, offset: usize, length: usize, - ) -> Result { + ) -> Result + where + W: AsyncWrite + Send + Unpin + 'static, + { if bufs.len() < data_blocks { return Err(Error::msg("read bufs not match data_blocks")); } diff --git a/ecstore/src/heal/data_scanner.rs b/ecstore/src/heal/data_scanner.rs index 05b883af..66d796ce 100644 --- a/ecstore/src/heal/data_scanner.rs +++ b/ecstore/src/heal/data_scanner.rs @@ -217,7 +217,7 @@ async fn run_data_scanner() { globalScannerMetrics.write().await.set_cycle(Some(cycle_info.clone())).await; let mut wr = Vec::new(); cycle_info.serialize(&mut Serializer::new(&mut wr)).unwrap(); - let _ = save_config(store.clone(), &DATA_USAGE_BLOOM_NAME_PATH, &wr).await; + let _ = save_config(store.clone(), &DATA_USAGE_BLOOM_NAME_PATH, wr).await; } Err(err) => { info!("ns_scanner failed: {:?}", err); @@ -268,7 +268,7 @@ async fn save_background_heal_info(store: Arc, info: &BackgroundHealInf Ok(info) => info, Err(_) => return, }; - let _ = save_config(store, &BACKGROUND_HEAL_INFO_PATH, &b).await; + let _ = save_config(store, &BACKGROUND_HEAL_INFO_PATH, b).await; } async fn get_cycle_scan_mode(current_cycle: u64, bitrot_start_cycle: u64, bitrot_start_time: SystemTime) -> HealScanMode { diff --git a/ecstore/src/heal/data_usage.rs b/ecstore/src/heal/data_usage.rs index 5460de3c..ef569d6a 100644 --- a/ecstore/src/heal/data_usage.rs +++ b/ecstore/src/heal/data_usage.rs @@ -124,10 +124,11 @@ pub async fn store_data_usage_in_backend(mut rx: Receiver) { Some(data_usage_info) => { if let Ok(data) = serde_json::to_vec(&data_usage_info) { if attempts > 10 { - let _ = save_config(store.clone(), &format!("{}{}", *DATA_USAGE_OBJ_NAME_PATH, ".bkp"), &data).await; + let _ = + save_config(store.clone(), &format!("{}{}", *DATA_USAGE_OBJ_NAME_PATH, ".bkp"), data.clone()).await; attempts += 1; } - let _ = save_config(store.clone(), &DATA_USAGE_OBJ_NAME_PATH, &data).await; + let _ = save_config(store.clone(), &DATA_USAGE_OBJ_NAME_PATH, data).await; attempts += 1; } else { continue; diff --git a/ecstore/src/heal/data_usage_cache.rs b/ecstore/src/heal/data_usage_cache.rs index 2e459e2a..6b336790 100644 --- a/ecstore/src/heal/data_usage_cache.rs +++ b/ecstore/src/heal/data_usage_cache.rs @@ -458,9 +458,9 @@ impl DataUsageCache { let name_clone = name.clone(); tokio::spawn(async move { - let _ = save_config(store_clone, &format!("{}{}", &name_clone, ".bkp"), &buf_clone).await; + let _ = save_config(store_clone, &format!("{}{}", &name_clone, ".bkp"), buf_clone).await; }); - save_config(store, &name, &buf).await + save_config(store, &name, buf).await } pub fn replace(&mut self, path: &str, parent: &str, e: DataUsageEntry) { diff --git a/ecstore/src/io.rs b/ecstore/src/io.rs index 7c149345..764c8834 100644 --- a/ecstore/src/io.rs +++ b/ecstore/src/io.rs @@ -1,226 +1,153 @@ -use std::io::Read; -use std::io::Write; +use futures::TryStreamExt; +use md5::Digest; +use md5::Md5; use std::pin::Pin; -use std::task::{Context, Poll}; -use tokio::fs::File; -use tokio::io::{self, AsyncRead, AsyncWrite, ReadBuf}; +use std::task::Context; +use std::task::Poll; +use tokio::io::AsyncRead; +use tokio::io::AsyncWrite; +use tokio::io::ReadBuf; +use tokio::sync::oneshot; +use tokio_util::io::ReaderStream; +use tokio_util::io::StreamReader; +use tracing::error; +use tracing::warn; -pub enum Reader { - File(File), - Buffer(VecAsyncReader), +pub type FileReader = Box; +pub type FileWriter = Box; + +pub const READ_BUFFER_SIZE: usize = 1024 * 1024; + +#[derive(Debug)] +pub struct HttpFileWriter { + wd: tokio::io::DuplexStream, + err_rx: oneshot::Receiver, } -impl AsyncRead for Reader { - fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { - match self.get_mut() { - Reader::File(file) => Pin::new(file).poll_read(cx, buf), - Reader::Buffer(buffer) => Pin::new(buffer).poll_read(cx, buf), - } +impl HttpFileWriter { + pub fn new(url: &str, disk: &str, volume: &str, path: &str, size: usize, append: bool) -> std::io::Result { + let (rd, wd) = tokio::io::duplex(READ_BUFFER_SIZE); + + let (err_tx, err_rx) = oneshot::channel::(); + + let body = reqwest::Body::wrap_stream(ReaderStream::with_capacity(rd, READ_BUFFER_SIZE)); + + let url = url.to_owned(); + let disk = disk.to_owned(); + let volume = volume.to_owned(); + let path = path.to_owned(); + + tokio::spawn(async move { + let client = reqwest::Client::new(); + if let Err(err) = client + .put(format!( + "{}/rustfs/rpc/put_file_stream?disk={}&volume={}&path={}&append={}&size={}", + url, + urlencoding::encode(&disk), + urlencoding::encode(&volume), + urlencoding::encode(&path), + append, + size + )) + .body(body) + .send() + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) + { + error!("HttpFileWriter put file err: {:?}", err); + + if let Err(er) = err_tx.send(err) { + error!("HttpFileWriter tx.send err: {:?}", er); + } + } + }); + + Ok(Self { wd, err_rx }) } } -#[derive(Default)] -pub enum Writer { - #[default] - NotUse, - File(File), - Buffer(VecAsyncWriter), -} - -impl AsyncWrite for Writer { - fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { - match self.get_mut() { - Writer::File(file) => Pin::new(file).poll_write(cx, buf), - Writer::Buffer(buff) => Pin::new(buff).poll_write(cx, buf), - Writer::NotUse => Poll::Ready(Ok(0)), +impl AsyncWrite for HttpFileWriter { + #[tracing::instrument(level = "debug", skip(self, buf))] + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> Poll> { + if let Ok(err) = self.as_mut().err_rx.try_recv() { + return Poll::Ready(Err(err)); } + + Pin::new(&mut self.wd).poll_write(cx, buf) } - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.get_mut() { - Writer::File(file) => Pin::new(file).poll_flush(cx), - Writer::Buffer(buff) => Pin::new(buff).poll_flush(cx), - Writer::NotUse => Poll::Ready(Ok(())), - } + #[tracing::instrument(level = "debug", skip(self))] + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { + Pin::new(&mut self.wd).poll_flush(cx) } - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.get_mut() { - Writer::File(file) => Pin::new(file).poll_shutdown(cx), - Writer::Buffer(buff) => Pin::new(buff).poll_shutdown(cx), - Writer::NotUse => Poll::Ready(Ok(())), - } + #[tracing::instrument(level = "debug", skip(self))] + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { + Pin::new(&mut self.wd).poll_shutdown(cx) } } -pub struct AsyncToSync { +pub struct HttpFileReader { + inner: FileReader, +} + +impl HttpFileReader { + pub async fn new(url: &str, disk: &str, volume: &str, path: &str, offset: usize, length: usize) -> std::io::Result { + let resp = reqwest::Client::new() + .get(format!( + "{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}", + url, + urlencoding::encode(disk), + urlencoding::encode(volume), + urlencoding::encode(path), + offset, + length + )) + .send() + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + + let inner = Box::new(StreamReader::new(resp.bytes_stream().map_err(std::io::Error::other))); + + Ok(Self { inner }) + } +} + +impl AsyncRead for HttpFileReader { + fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_read(cx, buf) + } +} + +pub struct EtagReader { inner: R, + md5: Md5, } -impl AsyncToSync { - pub fn new_reader(inner: R) -> Self { - Self { inner } +impl EtagReader { + pub fn new(inner: R) -> Self { + EtagReader { inner, md5: Md5::new() } } - fn read_async(&mut self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll> { - let mut read_buf = ReadBuf::new(buf); - // Poll the underlying AsyncRead to fill the ReadBuf - match Pin::new(&mut self.inner).poll_read(cx, &mut read_buf) { - Poll::Ready(Ok(())) => Poll::Ready(Ok(read_buf.filled().len())), - Poll::Ready(Err(e)) => Poll::Ready(Err(e)), - Poll::Pending => Poll::Pending, - } + + pub fn etag(self) -> String { + hex_simd::encode_to_string(self.md5.finalize(), hex_simd::AsciiCase::Lower) } } -impl AsyncToSync { - pub fn new_writer(inner: R) -> Self { - Self { inner } - } - // This function will perform a write using AsyncWrite - fn write_async(&mut self, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { - let result = Pin::new(&mut self.inner).poll_write(cx, buf); - match result { - Poll::Ready(Ok(n)) => Poll::Ready(Ok(n)), - Poll::Ready(Err(e)) => Poll::Ready(Err(e)), - Poll::Pending => Poll::Pending, - } - } +impl AsyncRead for EtagReader { + fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { + match Pin::new(&mut self.inner).poll_read(cx, buf) { + Poll::Ready(Ok(())) => { + let bytes = buf.filled(); + self.md5.update(bytes); - // This function will perform a flush using AsyncWrite - fn flush_async(&mut self, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_flush(cx) - } -} - -impl Read for AsyncToSync { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - let mut cx = std::task::Context::from_waker(futures::task::noop_waker_ref()); - loop { - match self.read_async(&mut cx, buf) { - Poll::Ready(Ok(n)) => return Ok(n), - Poll::Ready(Err(e)) => return Err(e), - Poll::Pending => { - // If Pending, we need to wait for the readiness. - // Here, we can use an arbitrary mechanism to yield control, - // this might be blocking until some readiness occurs can be complex. - // A full blocking implementation would require an async runtime to block on. - std::thread::sleep(std::time::Duration::from_millis(1)); // Replace with proper waiting if needed - } + Poll::Ready(Ok(())) } + other => other, } } } - -impl Write for AsyncToSync { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - let mut cx = std::task::Context::from_waker(futures::task::noop_waker_ref()); - loop { - match self.write_async(&mut cx, buf) { - Poll::Ready(Ok(n)) => return Ok(n), - Poll::Ready(Err(e)) => return Err(e), - Poll::Pending => { - // Here we are blocking and waiting for the async operation to complete. - std::thread::sleep(std::time::Duration::from_millis(1)); // Not efficient, see notes. - } - } - } - } - - fn flush(&mut self) -> std::io::Result<()> { - let mut cx = std::task::Context::from_waker(futures::task::noop_waker_ref()); - loop { - match self.flush_async(&mut cx) { - Poll::Ready(Ok(())) => return Ok(()), - Poll::Ready(Err(e)) => return Err(e), - Poll::Pending => { - // Again, blocking to wait for flush. - std::thread::sleep(std::time::Duration::from_millis(1)); // Not efficient, see notes. - } - } - } - } -} - -pub struct VecAsyncWriter { - buffer: Vec, -} - -impl VecAsyncWriter { - /// Create a new VecAsyncWriter with an empty Vec. - pub fn new(buffer: Vec) -> Self { - VecAsyncWriter { buffer } - } - - /// Retrieve the underlying buffer. - pub fn get_buffer(&self) -> &[u8] { - &self.buffer - } -} - -// Implementing AsyncWrite trait for VecAsyncWriter -impl AsyncWrite for VecAsyncWriter { - fn poll_write(self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &[u8]) -> Poll> { - let len = buf.len(); - - // Assume synchronous writing for simplicity - self.get_mut().buffer.extend_from_slice(buf); - - // Returning the length of written data - Poll::Ready(Ok(len)) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - // In this case, flushing is a no-op for a Vec - Poll::Ready(Ok(())) - } - - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - // Similar to flush, shutdown has no effect here - Poll::Ready(Ok(())) - } -} - -pub struct VecAsyncReader { - buffer: Vec, - position: usize, -} - -impl VecAsyncReader { - /// Create a new VecAsyncReader with the given Vec. - pub fn new(buffer: Vec) -> Self { - VecAsyncReader { buffer, position: 0 } - } - - /// Reset the reader position. - pub fn reset(&mut self) { - self.position = 0; - } -} - -// Implementing AsyncRead trait for VecAsyncReader -impl AsyncRead for VecAsyncReader { - fn poll_read(self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &mut ReadBuf) -> Poll> { - let this = self.get_mut(); - - // Check how many bytes are available to read - let len = this.buffer.len(); - let bytes_available = len - this.position; - - if bytes_available == 0 { - // If there's no more data to read, return ready with an Eof - return Poll::Ready(Ok(())); - } - - // Calculate how much we can read into the provided buffer - let to_read = std::cmp::min(bytes_available, buf.remaining()); - - // Write the data to the buf - buf.put_slice(&this.buffer[this.position..this.position + to_read]); - - // Update the position - this.position += to_read; - - // Indicate how many bytes were read - Poll::Ready(Ok(())) - } -} diff --git a/ecstore/src/metacache/writer.rs b/ecstore/src/metacache/writer.rs index c1bc1d98..bd1b576b 100644 --- a/ecstore/src/metacache/writer.rs +++ b/ecstore/src/metacache/writer.rs @@ -350,10 +350,9 @@ impl MetacacheReader { #[tokio::test] async fn test_writer() { - use crate::io::VecAsyncReader; - use crate::io::VecAsyncWriter; + use std::io::Cursor; - let mut f = VecAsyncWriter::new(Vec::new()); + let mut f = Cursor::new(Vec::new()); let mut w = MetacacheWriter::new(&mut f); @@ -373,16 +372,16 @@ async fn test_writer() { w.close().await.unwrap(); - let data = f.get_buffer().to_vec(); + let data = f.into_inner(); - let nf = VecAsyncReader::new(data); + let nf = Cursor::new(data); let mut r = MetacacheReader::new(nf); let nobjs = r.read_all().await.unwrap(); - for info in nobjs.iter() { - println!("new {:?}", &info); - } + // for info in nobjs.iter() { + // println!("new {:?}", &info); + // } assert_eq!(objs, nobjs) } diff --git a/ecstore/src/peer_rest_client.rs b/ecstore/src/peer_rest_client.rs index 8778af5c..71f2e3f2 100644 --- a/ecstore/src/peer_rest_client.rs +++ b/ecstore/src/peer_rest_client.rs @@ -51,7 +51,7 @@ impl PeerRestClient { let eps = eps.clone(); let hosts = eps.hosts_sorted(); - let mut remote = vec![None; hosts.len()]; + let mut remote = Vec::with_capacity(hosts.len()); let mut all = vec![None; hosts.len()]; for (i, hs_host) in hosts.iter().enumerate() { if let Some(host) = hs_host { diff --git a/ecstore/src/pools.rs b/ecstore/src/pools.rs index d6a43f83..466098ce 100644 --- a/ecstore/src/pools.rs +++ b/ecstore/src/pools.rs @@ -116,7 +116,7 @@ impl PoolMeta { data.write_all(&buf)?; for pool in pools { - save_config(pool, POOL_META_NAME, &data).await?; + save_config(pool, POOL_META_NAME, data.clone()).await?; } Ok(()) diff --git a/ecstore/src/set_disk.rs b/ecstore/src/set_disk.rs index e91907bc..157881b9 100644 --- a/ecstore/src/set_disk.rs +++ b/ecstore/src/set_disk.rs @@ -1,6 +1,7 @@ use std::{ collections::{HashMap, HashSet}, io::{Cursor, Write}, + mem::replace, path::Path, sync::Arc, time::Duration, @@ -34,6 +35,7 @@ use crate::{ }, heal_ops::BG_HEALING_UUID, }, + io::{EtagReader, READ_BUFFER_SIZE}, quorum::{object_op_ignored_errs, reduce_read_quorum_errs, reduce_write_quorum_errs, QuorumError}, store_api::{ BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, FileInfo, GetObjectReader, HTTPRangeSpec, @@ -66,6 +68,7 @@ use futures::future::join_all; use glob::Pattern; use http::HeaderMap; use lock::{ + // drwmutex::Options, drwmutex::Options, namespace_lock::{new_nslock, NsLockMap}, LockApi, @@ -76,14 +79,12 @@ use rand::{ thread_rng, {seq::SliceRandom, Rng}, }; -use reader::reader::EtagReader; -use s3s::{dto::StreamingBlob, Body}; use sha2::{Digest, Sha256}; use std::hash::Hash; use std::time::SystemTime; use time::OffsetDateTime; use tokio::{ - io::DuplexStream, + io::{empty, AsyncWrite}, sync::{broadcast, RwLock}, }; use tokio::{ @@ -1785,19 +1786,22 @@ impl SetDisks { skip( writer,disks,fi,files), fields(start_time=?time::OffsetDateTime::now_utc()) )] - async fn get_object_with_fileinfo( + async fn get_object_with_fileinfo( // &self, bucket: &str, object: &str, offset: usize, length: usize, - writer: &mut DuplexStream, + writer: &mut W, fi: FileInfo, files: Vec, disks: &[Option], set_index: usize, pool_index: usize, - ) -> Result<()> { + ) -> Result<()> + where + W: AsyncWrite + Send + Sync + Unpin + 'static, + { let (disks, files) = Self::shuffle_disks_and_parts_metadata_by_index(disks, &files, &fi); let total_size = fi.size; @@ -1854,17 +1858,6 @@ impl SetDisks { // debug!("read part_path {}", &part_path); if let Some(disk) = disk_op { - // let filereader = { - // if let Some(ref data) = files[idx].data { - // FileReader::Buffer(BufferReader::new(data.clone())) - // } else { - // let disk = disk.clone(); - // let part_path = - // format!("{}/{}/part.{}", object, files[idx].data_dir.unwrap_or(Uuid::nil()), part_number); - - // disk.read_file(bucket, &part_path).await? - // } - // }; let checksum_info = files[idx].erasure.get_checksum_info(part_number); let reader = new_bitrot_filereader( disk.clone(), @@ -2223,10 +2216,10 @@ impl SetDisks { let mut outdate_disks = vec![None; disk_len]; let mut disks_to_heal_count = 0; - info!( - "errs: {:?}, data_errs_by_disk: {:?}, lastest_meta: {:?}", - errs, data_errs_by_disk, lastest_meta - ); + // info!( + // "errs: {:?}, data_errs_by_disk: {:?}, lastest_meta: {:?}", + // errs, data_errs_by_disk, lastest_meta + // ); for index in 0..available_disks.len() { let (yes, reason) = should_heal_object_on_disk( &errs[index], @@ -2415,7 +2408,7 @@ impl SetDisks { if let (Some(disk), Some(metadata)) = (disk, ©_parts_metadata[index]) { // let filereader = { // if let Some(ref data) = metadata.data { - // FileReader::Buffer(BufferReader::new(data.clone())) + // Box::new(BufferReader::new(data.clone())) // } else { // let disk = disk.clone(); // let part_path = format!("{}/{}/part.{}", object, src_data_dir, part.number); @@ -3614,7 +3607,7 @@ impl ObjectIO for SetDisks { } let reader = GetObjectReader { - stream: StreamingBlob::from(Body::from(Vec::new())), + stream: Box::new(Cursor::new(Vec::new())), object_info, }; return Ok(reader); @@ -3622,10 +3615,9 @@ impl ObjectIO for SetDisks { // TODO: remote - let (rd, mut wd) = tokio::io::duplex(fi.erasure.block_size); + let (rd, wd) = tokio::io::duplex(READ_BUFFER_SIZE); - let (reader, offset, length) = - GetObjectReader::new(StreamingBlob::wrap(tokio_util::io::ReaderStream::new(rd)), range, &object_info, opts, &h)?; + let (reader, offset, length) = GetObjectReader::new(Box::new(rd), range, &object_info, opts, &h)?; // let disks = disks.clone(); let bucket = bucket.to_owned(); @@ -3634,12 +3626,23 @@ impl ObjectIO for SetDisks { let pool_index = self.pool_index; tokio::spawn(async move { if let Err(e) = Self::get_object_with_fileinfo( - &bucket, &object, offset, length, &mut wd, fi, files, &disks, set_index, pool_index, + &bucket, + &object, + offset, + length, + &mut Box::new(wd), + fi, + files, + &disks, + set_index, + pool_index, ) .await { error!("get_object_with_fileinfo err {:?}", e); }; + + // error!("get_object_with_fileinfo end"); }); Ok(reader) @@ -3769,8 +3772,10 @@ impl ObjectIO for SetDisks { } } + let stream = replace(&mut data.stream, Box::new(empty())); + let mut etag_stream = EtagReader::new(stream); + // TODO: etag from header - let mut etag_stream = EtagReader::new(&mut data.stream, None, None); let w_size = erasure .encode(&mut etag_stream, &mut writers, data.content_length, write_quorum) @@ -4356,7 +4361,8 @@ impl StorageAPI for SetDisks { let mut erasure = Erasure::new(fi.erasure.data_blocks, fi.erasure.parity_blocks, fi.erasure.block_size); - let mut etag_stream = EtagReader::new(&mut data.stream, None, None); + let stream = replace(&mut data.stream, Box::new(empty())); + let mut etag_stream = EtagReader::new(stream); let w_size = erasure .encode(&mut etag_stream, &mut writers, data.content_length, write_quorum) @@ -4841,25 +4847,28 @@ impl StorageAPI for SetDisks { } } + // TODO: 优化 cleanupMultipartPath for p in curr_fi.parts.iter() { - self.remove_part_meta( - bucket, - object, - upload_id, - curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(), - p.number, - ) - .await?; - - if !fi.parts.iter().any(|v| v.number == p.number) { - self.remove_object_part( + let _ = self + .remove_part_meta( bucket, object, upload_id, curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(), p.number, ) - .await?; + .await; + + if !fi.parts.iter().any(|v| v.number == p.number) { + let _ = self + .remove_object_part( + bucket, + object, + upload_id, + curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(), + p.number, + ) + .await; } } @@ -5235,7 +5244,7 @@ async fn disks_with_all_parts( } } } - info!("meta_errs: {:?}, errs: {:?}", meta_errs, errs); + // info!("meta_errs: {:?}, errs: {:?}", meta_errs, errs); meta_errs.iter().enumerate().for_each(|(index, err)| { if err.is_some() { let part_err = conv_part_err_to_int(err); @@ -5245,7 +5254,7 @@ async fn disks_with_all_parts( } }); - info!("data_errs_by_part: {:?}, data_errs_by_disk: {:?}", data_errs_by_part, data_errs_by_disk); + // info!("data_errs_by_part: {:?}, data_errs_by_disk: {:?}", data_errs_by_part, data_errs_by_disk); for (index, disk) in online_disks.iter().enumerate() { if meta_errs[index].is_some() { continue; @@ -5332,7 +5341,7 @@ async fn disks_with_all_parts( } } } - info!("data_errs_by_part: {:?}, data_errs_by_disk: {:?}", data_errs_by_part, data_errs_by_disk); + // info!("data_errs_by_part: {:?}, data_errs_by_disk: {:?}", data_errs_by_part, data_errs_by_disk); for (part, disks) in data_errs_by_part.iter() { for (idx, disk) in disks.iter().enumerate() { if let Some(vec) = data_errs_by_disk.get_mut(&idx) { @@ -5340,7 +5349,7 @@ async fn disks_with_all_parts( } } } - info!("data_errs_by_part: {:?}, data_errs_by_disk: {:?}", data_errs_by_part, data_errs_by_disk); + // info!("data_errs_by_part: {:?}, data_errs_by_disk: {:?}", data_errs_by_part, data_errs_by_disk); for (i, disk) in online_disks.iter().enumerate() { if meta_errs[i].is_none() && disk.is_some() && !has_part_err(&data_errs_by_disk[&i]) { available_disks[i] = Some(disk.clone().unwrap()); diff --git a/ecstore/src/sets.rs b/ecstore/src/sets.rs index e9923856..149c0e62 100644 --- a/ecstore/src/sets.rs +++ b/ecstore/src/sets.rs @@ -138,7 +138,7 @@ impl Sets { if let Some(_disk_id) = has_disk_id { set_drive.push(disk); } else { - warn!("sets new set_drive {}-{} get_disk_id is none", i, j); + error!("sets new set_drive {}-{} get_disk_id is none", i, j); set_drive.push(None); } } @@ -207,7 +207,7 @@ impl Sets { }, _ = cloned_token.cancelled() => { - warn!("ctx cancelled"); + warn!("monitor_and_connect_endpoints ctx cancelled"); break; } } diff --git a/ecstore/src/store_api.rs b/ecstore/src/store_api.rs index 091ee1e8..69299616 100644 --- a/ecstore/src/store_api.rs +++ b/ecstore/src/store_api.rs @@ -1,4 +1,5 @@ use crate::heal::heal_ops::HealSequence; +use crate::io::FileReader; use crate::store_utils::clean_metadata; use crate::{ disk::DiskStore, @@ -7,15 +8,16 @@ use crate::{ utils::path::decode_dir_object, xhttp, }; -use futures::StreamExt; use http::{HeaderMap, HeaderValue}; use madmin::heal_commands::HealResultItem; use rmp_serde::Serializer; -use s3s::{dto::StreamingBlob, Body}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; +use std::fmt::Debug; +use std::io::Cursor; use std::sync::Arc; use time::OffsetDateTime; +use tokio::io::AsyncReadExt; use uuid::Uuid; pub const ERASURE_ALGORITHM: &str = "rs-vandermonde"; @@ -416,35 +418,42 @@ pub struct DeleteBucketOptions { pub srdelete_op: SRBucketDeleteOp, } -#[derive(Debug)] pub struct PutObjReader { - pub stream: StreamingBlob, + pub stream: FileReader, pub content_length: usize, } +impl Debug for PutObjReader { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("PutObjReader") + .field("content_length", &self.content_length) + .finish() + } +} + impl PutObjReader { - pub fn new(stream: StreamingBlob, content_length: usize) -> Self { + pub fn new(stream: FileReader, content_length: usize) -> Self { PutObjReader { stream, content_length } } pub fn from_vec(data: Vec) -> Self { let content_length = data.len(); PutObjReader { - stream: Body::from(data).into(), + stream: Box::new(Cursor::new(data)), content_length, } } } pub struct GetObjectReader { - pub stream: StreamingBlob, + pub stream: FileReader, pub object_info: ObjectInfo, } impl GetObjectReader { #[tracing::instrument(level = "debug", skip(reader))] pub fn new( - reader: StreamingBlob, + reader: FileReader, rs: Option, oi: &ObjectInfo, opts: &ObjectOptions, @@ -482,14 +491,15 @@ impl GetObjectReader { } pub async fn read_all(&mut self) -> Result> { let mut data = Vec::new(); + self.stream.read_to_end(&mut data).await?; - while let Some(x) = self.stream.next().await { - let buf = match x { - Ok(res) => res, - Err(e) => return Err(Error::msg(e.to_string())), - }; - data.extend_from_slice(buf.as_ref()); - } + // while let Some(x) = self.stream.next().await { + // let buf = match x { + // Ok(res) => res, + // Err(e) => return Err(Error::msg(e.to_string())), + // }; + // data.extend_from_slice(buf.as_ref()); + // } Ok(data) } diff --git a/iam/src/store/object.rs b/iam/src/store/object.rs index 8b11ae15..40c9e97a 100644 --- a/iam/src/store/object.rs +++ b/iam/src/store/object.rs @@ -370,7 +370,7 @@ impl Store for ObjectStore { let mut data = serde_json::to_vec(&item)?; data = Self::encrypt_data(&data)?; - save_config(self.object_api.clone(), path.as_ref(), &data).await + save_config(self.object_api.clone(), path.as_ref(), data).await } async fn delete_iam_config(&self, path: impl AsRef + Send) -> Result<()> { delete_config(self.object_api.clone(), path.as_ref()).await diff --git a/reader/Cargo.toml b/reader/Cargo.toml deleted file mode 100644 index 2e171e94..00000000 --- a/reader/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "reader" -edition.workspace = true -license.workspace = true -repository.workspace = true -rust-version.workspace = true -version.workspace = true - -[lints] -workspace = true - -[dependencies] -tracing.workspace = true -s3s.workspace = true -thiserror.workspace = true -bytes.workspace = true -pin-project-lite.workspace = true -hex-simd = "0.8.0" -md-5.workspace = true -sha2 = { version = "0.11.0-pre.4" } -futures.workspace = true - -[dev-dependencies] -tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } diff --git a/reader/src/error.rs b/reader/src/error.rs deleted file mode 100644 index 9c5017ee..00000000 --- a/reader/src/error.rs +++ /dev/null @@ -1,12 +0,0 @@ -#[derive(Debug, thiserror::Error, PartialEq, Eq)] -pub enum ReaderError { - #[error("stream input error {0}")] - StreamInput(String), - // - #[error("etag: expected ETag {0} does not match computed ETag {1}")] - VerifyError(String, String), - #[error("Bad checksum: Want {0} does not match calculated {1}")] - ChecksumMismatch(String, String), - #[error("Bad sha256: Expected {0} does not match calculated {1}")] - SHA256Mismatch(String, String), -} diff --git a/reader/src/hasher.rs b/reader/src/hasher.rs deleted file mode 100644 index 37f28509..00000000 --- a/reader/src/hasher.rs +++ /dev/null @@ -1,170 +0,0 @@ -use md5::{Digest as Md5Digest, Md5}; -use sha2::{ - digest::{Reset, Update}, - Digest, Sha256 as sha_sha256, -}; -pub trait Hasher { - fn write(&mut self, bytes: &[u8]); - fn reset(&mut self); - fn sum(&mut self) -> String; - fn size(&self) -> usize; - fn block_size(&self) -> usize; -} - -#[derive(Default)] -pub enum HashType { - #[default] - Undefined, - Uuid(Uuid), - Md5(MD5), - Sha256(Sha256), -} - -impl Hasher for HashType { - fn write(&mut self, bytes: &[u8]) { - match self { - HashType::Md5(md5) => md5.write(bytes), - HashType::Sha256(sha256) => sha256.write(bytes), - HashType::Uuid(uuid) => uuid.write(bytes), - HashType::Undefined => (), - } - } - - fn reset(&mut self) { - match self { - HashType::Md5(md5) => md5.reset(), - HashType::Sha256(sha256) => sha256.reset(), - HashType::Uuid(uuid) => uuid.reset(), - HashType::Undefined => (), - } - } - - fn sum(&mut self) -> String { - match self { - HashType::Md5(md5) => md5.sum(), - HashType::Sha256(sha256) => sha256.sum(), - HashType::Uuid(uuid) => uuid.sum(), - HashType::Undefined => "".to_owned(), - } - } - - fn size(&self) -> usize { - match self { - HashType::Md5(md5) => md5.size(), - HashType::Sha256(sha256) => sha256.size(), - HashType::Uuid(uuid) => uuid.size(), - HashType::Undefined => 0, - } - } - - fn block_size(&self) -> usize { - match self { - HashType::Md5(md5) => md5.block_size(), - HashType::Sha256(sha256) => sha256.block_size(), - HashType::Uuid(uuid) => uuid.block_size(), - HashType::Undefined => 64, - } - } -} - -pub struct Sha256 { - hasher: sha_sha256, -} - -impl Sha256 { - pub fn new() -> Self { - Self { - hasher: sha_sha256::new(), - } - } -} -impl Default for Sha256 { - fn default() -> Self { - Self::new() - } -} - -impl Hasher for Sha256 { - fn write(&mut self, bytes: &[u8]) { - Update::update(&mut self.hasher, bytes); - } - - fn reset(&mut self) { - Reset::reset(&mut self.hasher); - } - - fn sum(&mut self) -> String { - hex_simd::encode_to_string(self.hasher.clone().finalize(), hex_simd::AsciiCase::Lower) - } - - fn size(&self) -> usize { - 32 - } - - fn block_size(&self) -> usize { - 64 - } -} - -pub struct MD5 { - hasher: Md5, -} - -impl MD5 { - pub fn new() -> Self { - Self { hasher: Md5::new() } - } -} -impl Default for MD5 { - fn default() -> Self { - Self::new() - } -} - -impl Hasher for MD5 { - fn write(&mut self, bytes: &[u8]) { - self.hasher.update(bytes); - } - - fn reset(&mut self) {} - - fn sum(&mut self) -> String { - hex_simd::encode_to_string(self.hasher.clone().finalize(), hex_simd::AsciiCase::Lower) - } - - fn size(&self) -> usize { - 32 - } - - fn block_size(&self) -> usize { - 64 - } -} - -pub struct Uuid { - id: String, -} - -impl Uuid { - pub fn new(id: String) -> Self { - Self { id } - } -} - -impl Hasher for Uuid { - fn write(&mut self, _bytes: &[u8]) {} - - fn reset(&mut self) {} - - fn sum(&mut self) -> String { - self.id.clone() - } - - fn size(&self) -> usize { - self.id.len() - } - - fn block_size(&self) -> usize { - 64 - } -} diff --git a/reader/src/lib.rs b/reader/src/lib.rs deleted file mode 100644 index 433caaa2..00000000 --- a/reader/src/lib.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub mod error; -pub mod hasher; -pub mod reader; - -pub fn hex(data: impl AsRef<[u8]>) -> String { - hex_simd::encode_to_string(data, hex_simd::AsciiCase::Lower) -} diff --git a/reader/src/reader.rs b/reader/src/reader.rs deleted file mode 100644 index 1758036c..00000000 --- a/reader/src/reader.rs +++ /dev/null @@ -1,493 +0,0 @@ -use bytes::Bytes; -use s3s::StdError; -use std::collections::VecDeque; - -use std::pin::Pin; -use std::task::Poll; - -use crate::{ - error::ReaderError, - hasher::{HashType, Uuid}, -}; - -// use futures::stream::Stream; -use super::hasher::{Hasher, Sha256, MD5}; -use futures::Stream; - -pin_project_lite::pin_project! { - #[derive(Default)] - pub struct EtagReader { - #[pin] - inner: S, - md5: HashType, - checksum:Option, - bytes_read:usize, - } -} - -impl EtagReader { - pub fn new(inner: S, etag: Option, force_md5: Option) -> Self { - let md5 = { - if let Some(m) = force_md5 { - HashType::Uuid(Uuid::new(m)) - } else { - HashType::Md5(MD5::new()) - } - }; - Self { - inner, - md5, - checksum: etag, - bytes_read: 0, - } - } - - pub fn etag(&mut self) -> String { - self.md5.sum() - } -} - -impl Stream for EtagReader -where - S: Stream>, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { - let this = self.project(); - let poll = this.inner.poll_next(cx); - - if let Poll::Ready(ref res) = poll { - match res { - Some(Ok(bytes)) => { - *this.bytes_read += bytes.len(); - this.md5.write(bytes); - } - Some(Err(err)) => { - return Poll::Ready(Some(Err(Box::new(ReaderError::StreamInput(err.to_string()))))); - } - None => { - if let Some(etag) = this.checksum { - let got = this.md5.sum(); - if got.as_str() != etag.as_str() { - return Poll::Ready(Some(Err(Box::new(ReaderError::VerifyError(etag.to_owned(), got))))); - } - } - } - } - } - - poll - } -} - -pin_project_lite::pin_project! { - #[derive(Default)] - pub struct HashReader { - #[pin] - inner: S, - sha256: Option, - md5: Option, - md5_hex:Option, - sha256_hex:Option, - size:usize, - actual_size: usize, - bytes_read:usize, - } -} - -impl HashReader { - pub fn new(inner: S, size: usize, md5_hex: Option, sha256_hex: Option, actual_size: usize) -> Self { - let md5 = { - if md5_hex.is_some() { - Some(MD5::new()) - } else { - None - } - }; - let sha256 = { - if sha256_hex.is_some() { - Some(Sha256::new()) - } else { - None - } - }; - Self { - inner, - size, - actual_size, - md5_hex, - sha256_hex, - bytes_read: 0, - md5, - sha256, - } - } -} - -impl Stream for HashReader -where - S: Stream>, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { - let this = self.project(); - let poll = this.inner.poll_next(cx); - - if let Poll::Ready(ref res) = poll { - match res { - Some(Ok(bytes)) => { - *this.bytes_read += bytes.len(); - if let Some(sha) = this.sha256 { - sha.write(bytes); - } - - if let Some(md5) = this.md5 { - md5.write(bytes); - } - } - Some(Err(err)) => { - return Poll::Ready(Some(Err(Box::new(ReaderError::StreamInput(err.to_string()))))); - } - None => { - if let Some(hash) = this.sha256 { - if let Some(hex) = this.sha256_hex { - let got = hash.sum(); - let src = hex.as_str(); - if src != got.as_str() { - println!("sha256 err src:{},got:{}", src, got); - return Poll::Ready(Some(Err(Box::new(ReaderError::SHA256Mismatch(src.to_string(), got))))); - } - } - } - - if let Some(hash) = this.md5 { - if let Some(hex) = this.md5_hex { - let got = hash.sum(); - let src = hex.as_str(); - if src != got.as_str() { - // TODO: ERR - println!("md5 err src:{},got:{}", src, got); - return Poll::Ready(Some(Err(Box::new(ReaderError::ChecksumMismatch(src.to_string(), got))))); - } - } - } - } - } - } - - // println!("poll {:?}", poll); - - poll - } -} - -pin_project_lite::pin_project! { - pub struct ChunkedStream { - #[pin] - inner: S, - chuck_size: usize, - streams: VecDeque, - remaining:Vec, - } -} - -impl ChunkedStream { - pub fn new(inner: S, chuck_size: usize) -> Self { - Self { - inner, - chuck_size, - streams: VecDeque::new(), - remaining: Vec::new(), - } - } -} - -impl Stream for ChunkedStream -where - S: Stream> + Send + Sync, - // E: std::error::Error + Send + Sync, -{ - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { - let (items, op_items) = self.inner.size_hint(); - let this = self.project(); - - if let Some(b) = this.streams.pop_front() { - return Poll::Ready(Some(Ok(b))); - } - - let poll = this.inner.poll_next(cx); - - match poll { - Poll::Ready(res_op) => match res_op { - Some(res) => match res { - Ok(bytes) => { - let chuck_size = *this.chuck_size; - let mut bytes = bytes; - - // println!("get len {}", bytes.len()); - // 如果有剩余 - if !this.remaining.is_empty() { - let need_size = chuck_size - this.remaining.len(); - // 传入的数据大小需要补齐的大小,使用传入数据补齐 - if bytes.len() >= need_size { - let add_bytes = bytes.split_to(need_size); - this.remaining.extend_from_slice(&add_bytes); - this.streams.push_back(Bytes::from(this.remaining.clone())); - this.remaining.clear(); - } else { - // 不够,直接追加 - let need_size = bytes.len(); - let add_bytes = bytes.split_to(need_size); - this.remaining.extend_from_slice(&add_bytes); - } - } - - loop { - if bytes.len() < chuck_size { - break; - } - let chuck = bytes.split_to(chuck_size); - this.streams.push_back(chuck); - } - - if !bytes.is_empty() { - this.remaining.extend_from_slice(&bytes); - } - - if let Some(b) = this.streams.pop_front() { - return Poll::Ready(Some(Ok(b))); - } - - if items > 0 || op_items.is_some() { - return Poll::Pending; - } - - if !this.remaining.is_empty() { - let b = this.remaining.clone(); - this.remaining.clear(); - return Poll::Ready(Some(Ok(Bytes::from(b)))); - } - Poll::Ready(None) - } - Err(err) => Poll::Ready(Some(Err(err))), - }, - None => { - // println!("get empty"); - if let Some(b) = this.streams.pop_front() { - return Poll::Ready(Some(Ok(b))); - } - if !this.remaining.is_empty() { - let b = this.remaining.clone(); - this.remaining.clear(); - return Poll::Ready(Some(Ok(Bytes::from(b)))); - } - Poll::Ready(None) - } - }, - Poll::Pending => { - // println!("get Pending"); - Poll::Pending - } - } - - // if let Poll::Ready(Some(res)) = poll { - // warn!("poll res ..."); - // match res { - // Ok(bytes) => { - // let chuck_size = *this.chuck_size; - // let mut bytes = bytes; - // if this.remaining.len() > 0 { - // let need_size = chuck_size - this.remaining.len(); - // let add_bytes = bytes.split_to(need_size); - // this.remaining.extend_from_slice(&add_bytes); - // warn!("poll push_back remaining ...1"); - // this.streams.push_back(Bytes::from(this.remaining.clone())); - // this.remaining.clear(); - // } - - // loop { - // if bytes.len() < chuck_size { - // break; - // } - // let chuck = bytes.split_to(chuck_size); - // warn!("poll push_back ...1"); - // this.streams.push_back(chuck); - // } - - // warn!("poll remaining extend_from_slice...1"); - // this.remaining.extend_from_slice(&bytes); - // } - // Err(err) => return Poll::Ready(Some(Err(err))), - // } - // } - - // if let Some(b) = this.streams.pop_front() { - // warn!("poll pop_front ..."); - // return Poll::Ready(Some(Ok(b))); - // } - - // if this.remaining.len() > 0 { - // let b = this.remaining.clone(); - // this.remaining.clear(); - - // warn!("poll remaining ...1"); - // return Poll::Ready(Some(Ok(Bytes::from(b)))); - // } - // Poll::Pending - } - - fn size_hint(&self) -> (usize, Option) { - let mut items = self.streams.len(); - if !self.remaining.is_empty() { - items += 1; - } - (items, Some(items)) - } -} - -#[cfg(test)] -mod test { - - use super::*; - use futures::StreamExt; - - #[tokio::test] - async fn test_etag_reader() { - let data1 = vec![1u8; 60]; // 65536 - let data2 = vec![0u8; 32]; // 65536 - let chunk1 = Bytes::from(data1); - let chunk2 = Bytes::from(data2); - - let chunk_results: Vec> = vec![Ok(chunk1), Ok(chunk2)]; - - let mut stream = futures::stream::iter(chunk_results); - - let mut hash_reader = EtagReader::new(&mut stream, None, None); - - // let chunk_size = 8; - - // let mut chunked_stream = ChunkStream::new(&mut hash_reader, chunk_size); - - loop { - match hash_reader.next().await { - Some(res) => match res { - Ok(bytes) => { - println!("bytes: {}, {:?}", bytes.len(), bytes); - } - Err(err) => { - println!("err:{:?}", err); - break; - } - }, - None => { - println!("next none"); - break; - } - } - } - - println!("etag:{}", hash_reader.etag()); - - // 9a7dfa2fcd7b69c89a30cfd3a9be11ab58cb6172628bd7e967fad1e187456d45 - // println!("md5: {:?}", hash_reader.hex()); - } - - #[tokio::test] - async fn test_hash_reader() { - let data1 = vec![1u8; 60]; // 65536 - let data2 = vec![0u8; 32]; // 65536 - let size = data1.len() + data2.len(); - let chunk1 = Bytes::from(data1); - let chunk2 = Bytes::from(data2); - - let chunk_results: Vec> = vec![Ok(chunk1), Ok(chunk2)]; - - let mut stream = futures::stream::iter(chunk_results); - - let mut hash_reader = HashReader::new( - &mut stream, - size, - Some("d94c485610a7a00a574df55e45d3cc0c".to_string()), - Some("9a7dfa2fcd7b69c89a30cfd3a9be11ab58cb6172628bd7e967fad1e187456d45".to_string()), - 0, - ); - - // let chunk_size = 8; - - // let mut chunked_stream = ChunkStream::new(&mut hash_reader, chunk_size); - - loop { - match hash_reader.next().await { - Some(res) => match res { - Ok(bytes) => { - println!("bytes: {}, {:?}", bytes.len(), bytes); - } - Err(err) => { - println!("err:{:?}", err); - break; - } - }, - None => { - println!("next none"); - break; - } - } - } - - // BUG: borrow of moved value: `md5_stream` - - // 9a7dfa2fcd7b69c89a30cfd3a9be11ab58cb6172628bd7e967fad1e187456d45 - // println!("md5: {:?}", hash_reader.hex()); - } - - #[tokio::test] - async fn test_chunked_stream() { - let data1 = vec![1u8; 60]; // 65536 - let data2 = vec![0u8; 33]; // 65536 - let data3 = vec![4u8; 5]; // 65536 - let chunk1 = Bytes::from(data1); - let chunk2 = Bytes::from(data2); - let chunk3 = Bytes::from(data3); - - let chunk_results: Vec> = vec![Ok(chunk1), Ok(chunk2), Ok(chunk3)]; - - let mut stream = futures::stream::iter(chunk_results); - // let mut hash_reader = HashReader::new( - // &mut stream, - // size, - // Some("d94c485610a7a00a574df55e45d3cc0c".to_string()), - // Some("9a7dfa2fcd7b69c89a30cfd3a9be11ab58cb6172628bd7e967fad1e187456d45".to_string()), - // 0, - // ); - - let chunk_size = 8; - - let mut etag_reader = EtagReader::new(&mut stream, None, None); - - let mut chunked_stream = ChunkedStream::new(&mut etag_reader, chunk_size); - - loop { - match chunked_stream.next().await { - Some(res) => match res { - Ok(bytes) => { - println!("bytes: {}, {:?}", bytes.len(), bytes); - } - Err(err) => { - println!("err:{:?}", err); - break; - } - }, - None => { - println!("next none"); - break; - } - } - } - - println!("etag:{}", etag_reader.etag()); - } -} diff --git a/reader/src/readme.md b/reader/src/readme.md deleted file mode 100644 index 516bf842..00000000 --- a/reader/src/readme.md +++ /dev/null @@ -1,5 +0,0 @@ -# 流程 - -## 写入 - -http::Body -> HashReader -> ...(other reader) -> ChuckedReader -> BitrotWriter -> FileWriter diff --git a/rustfs/src/admin/rpc.rs b/rustfs/src/admin/rpc.rs index 42bbb0ac..5fc85da8 100644 --- a/rustfs/src/admin/rpc.rs +++ b/rustfs/src/admin/rpc.rs @@ -4,6 +4,7 @@ use super::router::S3Router; use crate::storage::ecfs::bytes_stream; use common::error::Result; use ecstore::disk::DiskAPI; +use ecstore::io::READ_BUFFER_SIZE; use ecstore::store::find_local_disk; use futures::TryStreamExt; use http::StatusCode; @@ -71,7 +72,10 @@ impl Operation for ReadFile { Ok(S3Response::new(( StatusCode::OK, - Body::from(StreamingBlob::wrap(bytes_stream(ReaderStream::new(file), query.length))), + Body::from(StreamingBlob::wrap(bytes_stream( + ReaderStream::with_capacity(file, READ_BUFFER_SIZE), + query.length, + ))), ))) } } diff --git a/rustfs/src/main.rs b/rustfs/src/main.rs index d3e5b718..35187ba9 100644 --- a/rustfs/src/main.rs +++ b/rustfs/src/main.rs @@ -152,9 +152,13 @@ async fn run(opt: config::Opt) -> Result<()> { for (i, eps) in endpoint_pools.as_ref().iter().enumerate() { info!( - "created endpoints {}, set_count:{}, drives_per_set: {}, cmd: {:?}, \n{:?}", - i, eps.set_count, eps.drives_per_set, eps.cmd_line, eps + "created endpoints {}, set_count:{}, drives_per_set: {}, cmd: {:?}", + i, eps.set_count, eps.drives_per_set, eps.cmd_line ); + + for ep in eps.endpoints.as_ref().iter() { + info!(" - {}", ep); + } } set_global_addr(&opt.address).await; diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 95ef13cc..edb936dc 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -20,6 +20,7 @@ use ecstore::bucket::policy_sys::PolicySys; use ecstore::bucket::tagging::decode_tags; use ecstore::bucket::tagging::encode_tags; use ecstore::bucket::versioning_sys::BucketVersioningSys; +use ecstore::io::READ_BUFFER_SIZE; use ecstore::new_object_layer_fn; use ecstore::store_api::BucketOptions; use ecstore::store_api::CompletePart; @@ -51,6 +52,8 @@ use s3s::S3; use s3s::{S3Request, S3Response}; use std::fmt::Debug; use std::str::FromStr; +use tokio_util::io::ReaderStream; +use tokio_util::io::StreamReader; use tracing::debug; use tracing::error; use tracing::info; @@ -464,8 +467,13 @@ impl S3 for FS { }; let last_modified = info.mod_time.map(Timestamp::from); + let body = Some(StreamingBlob::wrap(bytes_stream( + ReaderStream::with_capacity(reader.stream, READ_BUFFER_SIZE), + info.size, + ))); + let output = GetObjectOutput { - body: Some(reader.stream), + body, content_length: Some(info.size as i64), last_modified, content_type, @@ -799,6 +807,10 @@ impl S3 for FS { } }; + let body = Box::new(StreamReader::new( + body.map(|f| f.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))), + )); + let mut reader = PutObjReader::new(body, content_length as usize); let Some(store) = new_object_layer_fn() else { @@ -911,6 +923,10 @@ impl S3 for FS { } }; + let body = Box::new(StreamReader::new( + body.map(|f| f.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))), + )); + // mc cp step 4 let mut data = PutObjReader::new(body, content_length as usize); let opts = ObjectOptions::default(); From ff4769ca1e236c42a9337fb2403723884b656ddb Mon Sep 17 00:00:00 2001 From: weisd Date: Sun, 16 Mar 2025 00:16:32 +0800 Subject: [PATCH 5/5] update todo --- TODO.md | 40 +++++----------------------------------- 1 file changed, 5 insertions(+), 35 deletions(-) diff --git a/TODO.md b/TODO.md index 597a00f4..3519bd92 100644 --- a/TODO.md +++ b/TODO.md @@ -60,38 +60,8 @@ -scp ./target/ubuntu22.04/release/rustfs.zip root@8.130.183.154:~/ -scp ./target/ubuntu22.04/release/rustfs.zip root@8.130.177.182:~/ -scp ./target/ubuntu22.04/release/rustfs.zip root@8.130.91.189:~/ -scp ./target/ubuntu22.04/release/rustfs.zip root@8.130.182.114:~/ - -scp ./target/x86_64-unknown-linux-musl/release/rustfs root@8.130.183.154:~/ -scp ./target/x86_64-unknown-linux-musl/release/rustfs root@8.130.177.182:~/ -scp ./target/x86_64-unknown-linux-musl/release/rustfs root@8.130.91.189:~/ -scp ./target/x86_64-unknown-linux-musl/release/rustfs root@8.130.182.114:~/ - - - - - - 2025-03-11T06:18:50.011565Z DEBUG s3s::service: req: Request { method: PUT, uri: /rustfs/rpc/put_file_stream?disk=http://node2:9000/data/rustfs2&volume=.rustfs.sys/tmp&path=a45ade1a-e09b-4eb4-bac1-8b5f55f7d438/235da61f-a705-4f9a-aa21-7801d2eaf61d/part.1&append=false, version: HTTP/1.1, headers: {"accept": "*/*", "host": "node2:9000", "transfer-encoding": "chunked"}, body: Body { hyper: Body(Streaming) } } - at /Users/weisd/.cargo/git/checkouts/s3s-58426f2d17c34859/ab139f7/crates/s3s/src/service.rs:81 - in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 - - 2025-03-11T06:18:50.011603Z DEBUG s3s::ops: parsing path-style request, decoded_uri_path: "/rustfs/rpc/put_file_stream" - at /Users/weisd/.cargo/git/checkouts/s3s-58426f2d17c34859/ab139f7/crates/s3s/src/ops/mod.rs:266 - in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 - - 2025-03-11T06:18:50.011651Z DEBUG s3s::ops: body_changed: false, decoded_content_length: None, has_multipart: false - at /Users/weisd/.cargo/git/checkouts/s3s-58426f2d17c34859/ab139f7/crates/s3s/src/ops/mod.rs:342 - in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 - - 2025-03-11T06:18:50.011687Z WARN rustfs::admin::rpc: handle PutFile - at rustfs/src/admin/rpc.rs:120 - in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 - - 2025-03-11T06:18:50.011716Z DEBUG s3s::ops: custom route returns error, err: S3Error(Inner { code: InvalidArgument, message: Some("get query failed1 Error(\"missing field `size`\")"), request_id: None, status_code: None, source: None, headers: None }) - at /Users/weisd/.cargo/git/checkouts/s3s-58426f2d17c34859/ab139f7/crates/s3s/src/ops/mod.rs:227 - in s3s::service::call with start_time: 2025-03-11 6:18:50.011550933 +00:00:00 - - 2025-03-11T06:18:50.011751Z DEBUG s3s::service: res: Response { status: 400, version: HTTP/1.1, headers: {"content-type": "application/xml"}, body: Body { once: b"InvalidArgumentget query failed1 Error("missing field `size`")" } } \ No newline at end of file +## 性能优化 +- [ ] bitrot impl AsyncRead/AsyncWrite +- [ ] erasure 并发读写 +- [ ] 完善删除逻辑, 并发处理,先移动到回收站,空间不足时清空回收站 +- [ ] list_object 使用reader传输 \ No newline at end of file