diff --git a/Cargo.lock b/Cargo.lock index d3955d72..c8a44c21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -362,6 +362,15 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +[[package]] +name = "common" +version = "0.0.1" +dependencies = [ + "lazy_static", + "tokio", + "tracing-error", +] + [[package]] name = "cpufeatures" version = "0.2.12" @@ -441,11 +450,13 @@ dependencies = [ "base64-simd", "byteorder", "bytes", + "common", "crc32fast", "futures", "hex-simd", "http", "lazy_static", + "lock", "netif", "openssl", "path-absolutize", @@ -949,8 +960,20 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" name = "lock" version = "0.0.1" dependencies = [ - "ecstore", + "async-trait", + "backon", + "common", + "lazy_static", + "protos", + "rand", + "serde", + "serde_json", + "tokio", + "tonic", "tracing", + "tracing-error", + "url", + "uuid", ] [[package]] @@ -1582,6 +1605,7 @@ dependencies = [ "async-trait", "bytes", "clap", + "common", "ecstore", "flatbuffers", "futures", @@ -1591,6 +1615,7 @@ dependencies = [ "http-body", "hyper", "hyper-util", + "lock", "mime", "netif", "pin-project-lite", @@ -2299,6 +2324,18 @@ dependencies = [ "getrandom", "rand", "serde", + "uuid-macro-internal", +] + +[[package]] +name = "uuid-macro-internal" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee1cd046f83ea2c4e920d6ee9f7c3537ef928d75dce5d84a87c2c5d6b3999a3a" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index ae95a0ab..285272c0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [workspace] resolver = "2" -members = ["rustfs", "ecstore", "e2e_test", "common/lock", "common/protos"] +members = ["rustfs", "ecstore", "e2e_test", "common/common", "common/lock", "common/protos"] [workspace.package] edition = "2021" @@ -18,6 +18,7 @@ ecstore = { path = "./ecstore" } flatbuffers = "24.3.25" futures = "0.3.30" futures-util = "0.3.30" +common = { path = "./common/common" } hyper = "1.3.1" hyper-util = { version = "0.1.5", features = [ "tokio", @@ -26,6 +27,8 @@ hyper-util = { version = "0.1.5", features = [ ] } http = "1.1.0" http-body = "1.0.0" +lock = { path = "./common/lock" } +lazy_static = "1.5.0" mime = "0.3.17" netif = "0.1.6" pin-project-lite = "0.2" @@ -35,6 +38,7 @@ prost-build = "0.13.1" prost-types = "0.13.1" protobuf = "3.2" protos = { path = "./common/protos" } +rand = "0.8.5" s3s = { version = "0.10.1", default-features = true, features = ["tower"] } serde = { version = "1.0.203", features = ["derive"] } serde_json = "1.0.117" @@ -55,4 +59,6 @@ tower = { version = "0.4.13", features = ["timeout"] } tracing = "0.1.40" tracing-error = "0.2.0" tracing-subscriber = { version = "0.3.18", features = ["env-filter", "time"] } -transform-stream = "0.3.0" \ No newline at end of file +transform-stream = "0.3.0" +url = "2.5.2" +uuid = { version = "1.10.0", features = ["v4", "fast-rng", "macro-diagnostics"] } \ No newline at end of file diff --git a/common/common/Cargo.toml b/common/common/Cargo.toml new file mode 100644 index 00000000..2d8a7070 --- /dev/null +++ b/common/common/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "common" +version.workspace = true +edition.workspace = true + +[dependencies] +lazy_static.workspace = true +tokio.workspace = true +tracing-error.workspace = true \ No newline at end of file diff --git a/ecstore/src/error.rs b/common/common/src/error.rs similarity index 100% rename from ecstore/src/error.rs rename to common/common/src/error.rs diff --git a/common/common/src/globals.rs b/common/common/src/globals.rs new file mode 100644 index 00000000..0439b4d8 --- /dev/null +++ b/common/common/src/globals.rs @@ -0,0 +1,8 @@ +use lazy_static::lazy_static; +use tokio::sync::RwLock; + +lazy_static! { + pub static ref GLOBAL_Local_Node_Name: RwLock = RwLock::new("".to_string()); + pub static ref GLOBAL_Rustfs_Host: RwLock = RwLock::new("".to_string()); + pub static ref GLOBAL_Rustfs_Port: RwLock = RwLock::new("9000".to_string()); +} diff --git a/common/common/src/lib.rs b/common/common/src/lib.rs new file mode 100644 index 00000000..b7a0605d --- /dev/null +++ b/common/common/src/lib.rs @@ -0,0 +1,2 @@ +pub mod error; +pub mod globals; diff --git a/common/lock/Cargo.toml b/common/lock/Cargo.toml index 767215f8..a057759c 100644 --- a/common/lock/Cargo.toml +++ b/common/lock/Cargo.toml @@ -4,5 +4,17 @@ version.workspace = true edition.workspace = true [dependencies] -ecstore.workspace = true -tracing.workspace = true \ No newline at end of file +async-trait.workspace = true +backon.workspace = true +common.workspace = true +lazy_static.workspace = true +protos.workspace = true +rand.workspace = true +serde.workspace = true +serde_json.workspace = true +tokio.workspace = true +tonic.workspace = true +tracing.workspace = true +tracing-error.workspace = true +url.workspace = true +uuid.workspace = true \ No newline at end of file diff --git a/common/lock/src/drwmutex.rs b/common/lock/src/drwmutex.rs new file mode 100644 index 00000000..086a488c --- /dev/null +++ b/common/lock/src/drwmutex.rs @@ -0,0 +1,347 @@ +use std::time::{Duration, Instant}; + +use tokio::{sync::mpsc::Sender, time::sleep}; +use tracing::{info, warn}; + +use crate::{lock_args::LockArgs, LockApi, Locker}; + +const DRW_MUTEX_REFRESH_INTERVAL: Duration = Duration::from_secs(10); +const LOCK_RETRY_MIN_INTERVAL: Duration = Duration::from_millis(250); + +#[derive(Debug)] +pub struct DRWMutex { + owner: String, + names: Vec, + write_locks: Vec, + read_locks: Vec, + cancel_refresh_sender: Option>, + // rng: ThreadRng, + lockers: Vec, + refresh_interval: Duration, + lock_retry_min_interval: Duration, +} + +#[derive(Debug, Default, Clone)] +pub struct Granted { + index: usize, + lock_uid: String, +} + +impl Granted { + fn is_locked(&self) -> bool { + is_locked(&self.lock_uid) + } +} + +fn is_locked(uid: &String) -> bool { + uid.len() > 0 +} + +#[derive(Debug, Clone)] +pub struct Options { + pub timeout: Duration, + pub retry_interval: Duration, +} + +impl DRWMutex { + pub fn new(owner: String, names: Vec, lockers: Vec) -> Self { + let mut names = names; + names.sort(); + Self { + owner, + names, + write_locks: Vec::with_capacity(lockers.len()), + read_locks: Vec::with_capacity(lockers.len()), + cancel_refresh_sender: None, + // rng: rand::thread_rng(), + lockers, + refresh_interval: DRW_MUTEX_REFRESH_INTERVAL, + lock_retry_min_interval: LOCK_RETRY_MIN_INTERVAL, + } + } +} + +impl DRWMutex { + pub async fn lock(&mut self, id: &String, source: &String) { + let is_read_lock = false; + let opts = Options { + timeout: Duration::from_secs(10), + retry_interval: Duration::from_millis(50), + }; + self.lock_blocking(id, source, is_read_lock, &opts).await; + } + + pub async fn get_lock(&mut self, id: &String, source: &String, opts: &Options) -> bool { + let is_read_lock = false; + self.lock_blocking(id, source, is_read_lock, opts).await + } + + pub async fn r_lock(&mut self, id: &String, source: &String) { + let is_read_lock = true; + let opts = Options { + timeout: Duration::from_secs(10), + retry_interval: Duration::from_millis(50), + }; + self.lock_blocking(id, source, is_read_lock, &opts).await; + } + + pub async fn get_r_lock(&mut self, id: &String, source: &String, opts: &Options) -> bool { + let is_read_lock = true; + self.lock_blocking(id, source, is_read_lock, opts).await + } + + pub async fn lock_blocking(&mut self, id: &String, source: &String, is_read_lock: bool, opts: &Options) -> bool { + let locker_len = self.lockers.len(); + let mut tolerance = locker_len / 2; + let mut quorum = locker_len - tolerance; + if !is_read_lock { + // In situations for write locks, as a special case + // to avoid split brains we make sure to acquire + // quorum + 1 when tolerance is exactly half of the + // total locker clients. + if quorum == tolerance { + quorum += 1; + } + } + info!("lockBlocking {}/{} for {:?}: lockType readLock({}), additional opts: {:?}, quorum: {}, tolerance: {}, lockClients: {}\n", id, source, self.names, is_read_lock, opts, quorum, tolerance, locker_len); + + tolerance = locker_len - quorum; + let mut attempt = 0; + let mut locks = Vec::with_capacity(self.lockers.len()); + + loop { + if self.inner_lock(&mut locks, id, source, is_read_lock, tolerance, quorum).await { + if is_read_lock { + self.read_locks = locks; + } else { + self.write_locks = locks; + } + + info!("lock_blocking {}/{} for {:?}: granted", id, source, self.names); + + return true; + } + + attempt += 1; + if attempt >= 10 { + break; + } + sleep(opts.retry_interval).await; + } + + false + } + + async fn inner_lock( + &mut self, + locks: &mut Vec, + id: &String, + source: &String, + is_read_lock: bool, + tolerance: usize, + quorum: usize, + ) -> bool { + locks.iter_mut().for_each(|lock| *lock = "".to_string()); + + let mut granteds = Vec::with_capacity(self.lockers.len()); + let args = LockArgs { + uid: id.to_string(), + resources: self.names.clone(), + owner: self.owner.clone(), + source: source.to_string(), + quorum, + }; + + for (index, locker) in self.lockers.iter_mut().enumerate() { + let mut granted = Granted { + index, + ..Default::default() + }; + + if is_read_lock { + match locker.rlock(&args).await { + Ok(locked) => { + if locked { + granted.lock_uid = id.to_string(); + } + } + Err(err) => { + warn!("Unable to call RLock failed with {} for {} at {:?}", err, args, locker); + } + } + } else { + match locker.lock(&args).await { + Ok(locked) => { + if locked { + granted.lock_uid = id.to_string(); + } + } + Err(err) => { + warn!("Unable to call Lock failed with {} for {} at {:?}", err, args, locker); + } + } + } + + granteds.push(granted); + } + + granteds.iter().for_each(|granted| { + locks[granted.index] = granted.lock_uid.clone(); + }); + + let quorum_locked = check_quorum_locked(locks, quorum); + if !quorum_locked { + info!("Unable to acquire lock in quorum, {}", args); + if !self.release_all(tolerance, locks, is_read_lock).await { + info!("Unable to release acquired locks, these locks will expire automatically {}", args); + } + } + + quorum_locked + } + + pub async fn un_lock(&mut self) { + if self.write_locks.is_empty() || !self.write_locks.iter().any(|w_lock| is_locked(w_lock)) { + panic!("Trying to un_lock() while no lock() is active") + } + + let tolerance = self.lockers.len() / 2; + let is_read_lock = false; + let mut locks = self.write_locks.clone(); + let start = Instant::now(); + loop { + if self.release_all(tolerance, &mut locks, is_read_lock).await { + return; + } + + sleep(self.lock_retry_min_interval).await; + if Instant::now().duration_since(start) > Duration::from_secs(30) { + return; + } + } + } + + pub async fn un_r_lock(&mut self) { + if self.write_locks.is_empty() || !self.write_locks.iter().any(|w_lock| is_locked(w_lock)) { + panic!("Trying to un_r_lock() while no r_lock() is active") + } + + let tolerance = self.lockers.len() / 2; + let is_read_lock = true; + let mut locks = self.write_locks.clone(); + let start = Instant::now(); + loop { + if self.release_all(tolerance, &mut locks, is_read_lock).await { + return; + } + + sleep(self.lock_retry_min_interval).await; + if Instant::now().duration_since(start) > Duration::from_secs(30) { + return; + } + } + } + + async fn release_all(&mut self, tolerance: usize, locks: &mut Vec, is_read_lock: bool) -> bool { + for (index, locker) in self.lockers.iter_mut().enumerate() { + if send_release(locker, &locks[index], &self.owner, &self.names, is_read_lock).await { + locks[index] = "".to_string(); + } + } + + check_failed_unlocks(&locks, tolerance) + } +} + +// async fn start_continuous_lock_refresh(lockers: &Vec<&mut LockApi>, id: &String, source: &String, quorum: usize, refresh_interval: Duration, mut cancel_refresh_receiver: Receiver) { +// let uid = id.to_string(); +// tokio::spawn(async move { +// let mut ticker = interval(refresh_interval); +// let args = LockArgs { +// uid, +// ..Default::default() +// }; + +// loop { +// select! { +// _ = ticker.tick() => { +// for (index, locker) in lockers.iter().enumerate() { + +// } +// }, +// _ = cancel_refresh_receiver.recv() => { +// return; +// } +// } +// } +// }); +// } + +fn check_failed_unlocks(locks: &Vec, tolerance: usize) -> bool { + let mut un_locks_failed = 0; + locks.iter().for_each(|lock| { + if is_locked(lock) { + un_locks_failed += 1; + } + }); + + if locks.len() - tolerance == tolerance { + return un_locks_failed >= tolerance; + } + + un_locks_failed > tolerance +} + +async fn send_release(locker: &mut LockApi, uid: &String, owner: &String, names: &Vec, is_read_lock: bool) -> bool { + if uid.is_empty() { + return false; + } + + let args = LockArgs { + uid: uid.to_string(), + owner: owner.clone(), + resources: names.clone(), + ..Default::default() + }; + + if is_read_lock { + match locker.runlock(&args).await { + Ok(locked) => { + if !locked { + warn!("Unable to release runlock, args: {}", args); + return false; + } + } + Err(err) => { + warn!("Unable to call RLock failed with {} for {} at {:?}", err, args, locker); + return false; + } + } + } else { + match locker.unlock(&args).await { + Ok(locked) => { + if !locked { + warn!("Unable to release unlock, args: {}", args); + return false; + } + } + Err(err) => { + warn!("Unable to call Lock failed with {} for {} at {:?}", err, args, locker); + return false; + } + } + } + + true +} + +fn check_quorum_locked(locks: &Vec, quorum: usize) -> bool { + let mut count = 0; + locks.iter().for_each(|lock| { + if is_locked(lock) { + count += 1; + } + }); + + count >= quorum +} diff --git a/common/lock/src/lib.rs b/common/lock/src/lib.rs index 6f48c64d..8e3cd730 100644 --- a/common/lock/src/lib.rs +++ b/common/lock/src/lib.rs @@ -1,2 +1,115 @@ -pub mod local_disk; +use std::sync::Arc; + +use async_trait::async_trait; +use common::error::Result; +use lazy_static::lazy_static; +use local_locker::LocalLocker; +use lock_args::LockArgs; +use remote_client::RemoteClinet; +use tokio::sync::RwLock; + +pub mod drwmutex; +pub mod local_locker; pub mod lock_args; +pub mod lrwmutex; +pub mod namespace_lock; +pub mod remote_client; + +lazy_static! { + pub static ref GLOBAL_LOCAL_SERVER: Arc>> = Arc::new(Box::new(RwLock::new(LocalLocker::new()))); +} + +type LockClient = dyn Locker; + +#[async_trait] +pub trait Locker { + async fn lock(&mut self, args: &LockArgs) -> Result; + async fn unlock(&mut self, args: &LockArgs) -> Result; + async fn rlock(&mut self, args: &LockArgs) -> Result; + async fn runlock(&mut self, args: &LockArgs) -> Result; + async fn refresh(&mut self, args: &LockArgs) -> Result; + async fn force_unlock(&mut self, args: &LockArgs) -> Result; + async fn close(&self); + async fn is_online(&self) -> bool; + async fn is_local(&self) -> bool; +} + +#[derive(Debug, Clone)] +pub enum LockApi { + Local, + Remote(RemoteClinet), +} + +#[async_trait] +impl Locker for LockApi { + async fn lock(&mut self, args: &LockArgs) -> Result { + match self { + LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.lock(args).await, + LockApi::Remote(r) => r.lock(args).await, + } + } + + async fn unlock(&mut self, args: &LockArgs) -> Result { + match self { + LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.unlock(args).await, + LockApi::Remote(r) => r.unlock(args).await, + } + } + + async fn rlock(&mut self, args: &LockArgs) -> Result { + match self { + LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.rlock(args).await, + LockApi::Remote(r) => r.rlock(args).await, + } + } + + async fn runlock(&mut self, args: &LockArgs) -> Result { + match self { + LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.runlock(args).await, + LockApi::Remote(r) => r.runlock(args).await, + } + } + + async fn refresh(&mut self, args: &LockArgs) -> Result { + match self { + LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.refresh(args).await, + LockApi::Remote(r) => r.refresh(args).await, + } + } + + async fn force_unlock(&mut self, args: &LockArgs) -> Result { + match self { + LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.force_unlock(args).await, + LockApi::Remote(r) => r.force_unlock(args).await, + } + } + + async fn close(&self) { + match self { + LockApi::Local => GLOBAL_LOCAL_SERVER.read().await.close().await, + LockApi::Remote(r) => r.close().await, + } + } + + async fn is_online(&self) -> bool { + match self { + LockApi::Local => GLOBAL_LOCAL_SERVER.read().await.is_online().await, + LockApi::Remote(r) => r.is_online().await, + } + } + + async fn is_local(&self) -> bool { + match self { + LockApi::Local => GLOBAL_LOCAL_SERVER.write().await.is_local().await, + LockApi::Remote(r) => r.is_local().await, + } + } +} + +pub fn new_lock_api(is_local: bool, url: Option) -> LockApi { + if is_local { + return LockApi::Local; + } + + LockApi::Remote(RemoteClinet::new(url.unwrap())) +} diff --git a/common/lock/src/local_disk.rs b/common/lock/src/local_locker.rs similarity index 75% rename from common/lock/src/local_disk.rs rename to common/lock/src/local_locker.rs index 26bbb9b4..2e47dcd2 100644 --- a/common/lock/src/local_disk.rs +++ b/common/lock/src/local_locker.rs @@ -1,7 +1,11 @@ -use ecstore::error::{Error, Result}; -use std::{collections::HashMap, time::{Duration, Instant}}; +use async_trait::async_trait; +use common::error::{Error, Result}; +use std::{ + collections::HashMap, + time::{Duration, Instant}, +}; -use crate::lock_args::LockArgs; +use crate::{lock_args::LockArgs, Locker}; const MAX_DELETE_LIST: usize = 1000; @@ -54,7 +58,7 @@ pub struct LocalLocker { } impl LocalLocker { - fn new() -> Self { + pub fn new() -> Self { LocalLocker::default() } } @@ -64,7 +68,55 @@ impl LocalLocker { resource.iter().fold(true, |acc, x| !self.lock_map.contains_key(x) && acc) } - pub fn lock(&mut self, args: LockArgs) -> Result { + pub fn stats(&self) -> LockStats { + let mut st = LockStats { + total: self.lock_map.len(), + ..Default::default() + }; + + self.lock_map.iter().for_each(|(_, value)| { + if value.len() > 0 { + if value[0].writer { + st.writes += 1; + } else { + st.reads += 1; + } + } + }); + + return st; + } + + fn dump_lock_map(&mut self) -> HashMap> { + let mut lock_copy = HashMap::new(); + self.lock_map.iter().for_each(|(key, value)| { + lock_copy.insert(key.to_string(), value.to_vec()); + }); + + return lock_copy; + } + + fn expire_old_locks(&mut self, interval: Duration) { + self.lock_map.iter_mut().for_each(|(_, lris)| { + lris.retain(|lri| { + if Instant::now().duration_since(lri.time_last_refresh) > interval { + let mut key = lri.uid.to_string(); + format_uuid(&mut key, &lri.idx); + self.lock_uid.remove(&key); + return false; + } + + true + }); + }); + + return; + } +} + +#[async_trait] +impl Locker for LocalLocker { + async fn lock(&mut self, args: &LockArgs) -> Result { if args.resources.len() > MAX_DELETE_LIST { return Err(Error::from_string(format!( "internal error: LocalLocker.lock called with more than {} resources", @@ -100,7 +152,7 @@ impl LocalLocker { Ok(true) } - pub fn unlock(&mut self, args: LockArgs) -> Result { + async fn unlock(&mut self, args: &LockArgs) -> Result { if args.resources.len() > MAX_DELETE_LIST { return Err(Error::from_string(format!( "internal error: LocalLocker.unlock called with more than {} resources", @@ -128,24 +180,24 @@ impl LocalLocker { reply |= true; return false; } - + true }); } if lris.len() == 0 { self.lock_map.remove(resource); } - }, + } None => { continue; } }; - }; + } Ok(reply) } - pub fn rlock(&mut self, args: LockArgs) -> Result { + async fn rlock(&mut self, args: &LockArgs) -> Result { if args.resources.len() != 1 { return Err(Error::from_string("internal error: localLocker.RLock called with more than one resource")); } @@ -166,17 +218,20 @@ impl LocalLocker { } else { return Ok(false); } - }, + } None => { - self.lock_map.insert(resource.to_string(), vec![LockRequesterInfo { - name: resource.to_string(), - writer: false, - source: args.source.to_string(), - owner: args.owner.to_string(), - uid: args.uid.to_string(), - quorum: args.quorum, - ..Default::default() - }]); + self.lock_map.insert( + resource.to_string(), + vec![LockRequesterInfo { + name: resource.to_string(), + writer: false, + source: args.source.to_string(), + owner: args.owner.to_string(), + uid: args.uid.to_string(), + quorum: args.quorum, + ..Default::default() + }], + ); } } let mut uuid = args.uid.to_string(); @@ -186,7 +241,7 @@ impl LocalLocker { Ok(true) } - pub fn runlock(&mut self, args: LockArgs) -> Result { + async fn runlock(&mut self, args: &LockArgs) -> Result { if args.resources.len() != 1 { return Err(Error::from_string("internal error: localLocker.RLock called with more than one resource")); } @@ -206,14 +261,14 @@ impl LocalLocker { reply |= true; return false; } - + true }); } if lris.len() == 0 { self.lock_map.remove(resource); } - }, + } None => { return Ok(reply || true); } @@ -222,64 +277,32 @@ impl LocalLocker { Ok(reply) } - pub fn stats(&self) -> LockStats { - let mut st = LockStats { - total: self.lock_map.len(), - ..Default::default() - }; + async fn close(&self) {} - self.lock_map.iter().for_each(|(_, value)| { - if value.len() > 0 { - if value[0].writer { - st.writes += 1; - } else { - st.reads += 1; - } - } - }); - - return st; - } - - pub fn dump_lock_map(&mut self) -> HashMap> { - let mut lock_copy = HashMap::new(); - self.lock_map.iter().for_each(|(key, value)| { - lock_copy.insert(key.to_string(), value.to_vec()); - }); - - return lock_copy; - } - - pub fn close(&self) { - - } - - pub fn is_online(&self) ->bool { + async fn is_online(&self) -> bool { true } - pub fn is_local(&self) -> bool { + async fn is_local(&self) -> bool { true } // TODO: need add timeout mechanism - pub fn force_unlock(&mut self, args: LockArgs) -> Result { + async fn force_unlock(&mut self, args: &LockArgs) -> Result { let mut reply = false; if args.uid.is_empty() { - args.resources.iter().for_each(|resource| { - match self.lock_map.get(resource) { - Some(lris) => { - lris.iter().for_each(|lri| { - let mut key = lri.uid.to_string(); - format_uuid(&mut key, &lri.idx); - self.lock_uid.remove(&key); - }); - if lris.len() == 0 { - self.lock_map.remove(resource); - } - }, - None => (), + args.resources.iter().for_each(|resource| match self.lock_map.get(resource) { + Some(lris) => { + lris.iter().for_each(|lri| { + let mut key = lri.uid.to_string(); + format_uuid(&mut key, &lri.idx); + self.lock_uid.remove(&key); + }); + if lris.len() == 0 { + self.lock_map.remove(resource); + } } + None => (), }); return Ok(true); @@ -291,32 +314,30 @@ impl LocalLocker { let mut map_id = args.uid.to_string(); format_uuid(&mut map_id, &idx); match self.lock_uid.get(&map_id) { - Some(resource) => { - match self.lock_map.get_mut(resource) { - Some(lris) => { - reply = true; - { - lris.retain(|lri| { - if lri.uid == args.uid && (args.owner.is_empty() || lri.owner == args.owner) { - let mut key = args.uid.to_string(); - format_uuid(&mut key, &lri.idx); - need_remove_map_id.push(key); - return false; - } - - true - }); - } - idx += 1; - if lris.len() == 0 { - need_remove_resource.push(resource.to_string()); - } - }, - None => { - need_remove_map_id.push(map_id); - idx += 1; - continue; + Some(resource) => match self.lock_map.get_mut(resource) { + Some(lris) => { + reply = true; + { + lris.retain(|lri| { + if lri.uid == args.uid && (args.owner.is_empty() || lri.owner == args.owner) { + let mut key = args.uid.to_string(); + format_uuid(&mut key, &lri.idx); + need_remove_map_id.push(key); + return false; + } + + true + }); } + idx += 1; + if lris.len() == 0 { + need_remove_resource.push(resource.to_string()); + } + } + None => { + need_remove_map_id.push(map_id); + idx += 1; + continue; } }, None => { @@ -335,7 +356,7 @@ impl LocalLocker { Ok(reply) } - pub fn refresh(&mut self, args: LockArgs) -> Result { + async fn refresh(&mut self, args: &LockArgs) -> Result { let mut idx = 0; let mut key = args.uid.to_string(); format_uuid(&mut key, &idx); @@ -344,9 +365,7 @@ impl LocalLocker { let mut resource = resource; loop { match self.lock_map.get_mut(resource) { - Some(lris) => { - - }, + Some(_lris) => {} None => { let mut key = args.uid.to_string(); format_uuid(&mut key, &0); @@ -363,29 +382,12 @@ impl LocalLocker { None => return Ok(true), }; } - }, + } None => { return Ok(false); } } } - - fn expire_old_locks(&mut self, interval: Duration) { - self.lock_map.iter_mut().for_each(|(_, lris)| { - lris.retain(|lri| { - if Instant::now().duration_since(lri.time_last_refresh) > interval { - let mut key = lri.uid.to_string(); - format_uuid(&mut key, &lri.idx); - self.lock_uid.remove(&key); - return false; - } - - true - }); - }); - - return; - } } fn format_uuid(s: &mut String, idx: &usize) { @@ -394,12 +396,13 @@ fn format_uuid(s: &mut String, idx: &usize) { #[cfg(test)] mod test { - use crate::lock_args::LockArgs; - use ecstore::error::Result; use super::LocalLocker; + use crate::{lock_args::LockArgs, Locker}; + use common::error::Result; + use tokio; - #[test] - fn test_lock_unlock() -> Result<()> { + #[tokio::test] + async fn test_lock_unlock() -> Result<()> { let mut local_locker = LocalLocker::new(); let args = LockArgs { uid: "1111".to_string(), @@ -408,11 +411,11 @@ mod test { source: "".to_string(), quorum: 3, }; - local_locker.lock(args.clone())?; + local_locker.lock(&args).await?; println!("lock local_locker: {:?} \n", local_locker); - local_locker.unlock(args)?; + local_locker.unlock(&args).await?; println!("unlock local_locker: {:?}", local_locker); Ok(()) diff --git a/common/lock/src/lock_args.rs b/common/lock/src/lock_args.rs index f3b7e61f..c41cf1b5 100644 --- a/common/lock/src/lock_args.rs +++ b/common/lock/src/lock_args.rs @@ -1,4 +1,8 @@ -#[derive(Clone, Debug, Default)] +use std::fmt::Display; + +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct LockArgs { pub uid: String, pub resources: Vec, @@ -6,3 +10,13 @@ pub struct LockArgs { pub source: String, pub quorum: usize, } + +impl Display for LockArgs { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "LockArgs[ uid: {}, resources: {:?}, owner: {}, source:{}, quorum: {} ]", + self.uid, self.resources, self.owner, self.source, self.quorum + ) + } +} diff --git a/common/lock/src/lrwmutex.rs b/common/lock/src/lrwmutex.rs new file mode 100644 index 00000000..d99c959c --- /dev/null +++ b/common/lock/src/lrwmutex.rs @@ -0,0 +1,187 @@ +use std::time::{Duration, Instant}; + +use rand::Rng; +use tokio::{sync::RwLock, time::sleep}; + +#[derive(Debug, Default)] +pub struct LRWMutex { + id: RwLock, + source: RwLock, + is_write: RwLock, + refrence: RwLock, +} + +impl LRWMutex { + pub async fn lock(&self) -> bool { + let is_write = true; + let id = self.id.read().await.clone(); + let source = self.source.read().await.clone(); + let timeout = Duration::from_secs(10000); + let x = self + .look_loop( + &id, &source, &timeout, // big enough + is_write, + ) + .await; + x + } + + pub async fn get_lock(&self, id: &str, source: &str, timeout: &Duration) -> bool { + let is_write = true; + self.look_loop(id, source, timeout, is_write).await + } + + pub async fn r_lock(&self) -> bool { + let is_write: bool = false; + let id = self.id.read().await.clone(); + let source = self.source.read().await.clone(); + let timeout = Duration::from_secs(10000); + let x = self + .look_loop( + &id, &source, &timeout, // big enough + is_write, + ) + .await; + x + } + + pub async fn get_r_lock(&self, id: &str, source: &str, timeout: &Duration) -> bool { + let is_write = false; + self.look_loop(id, source, timeout, is_write).await + } + + async fn inner_lock(&self, id: &str, source: &str, is_write: bool) -> bool { + *self.id.write().await = id.to_string(); + *self.source.write().await = source.to_string(); + + let mut locked = false; + if is_write { + if *self.refrence.read().await == 0 && !*self.is_write.read().await { + *self.refrence.write().await = 1; + *self.is_write.write().await = true; + locked = true; + } + } else { + if !*self.is_write.read().await { + *self.refrence.write().await += 1; + locked = true; + } + } + + locked + } + + async fn look_loop(&self, id: &str, source: &str, timeout: &Duration, is_write: bool) -> bool { + let start = Instant::now(); + loop { + if self.inner_lock(id, source, is_write).await { + return true; + } else { + if Instant::now().duration_since(start) > *timeout { + return false; + } + let sleep_time: u64; + { + let mut rng = rand::thread_rng(); + sleep_time = rng.gen_range(10..=50); + } + sleep(Duration::from_millis(sleep_time)).await; + } + } + } + + pub async fn un_lock(&self) { + let is_write = true; + if !self.unlock(is_write).await { + panic!("Trying to un_lock() while no Lock() is active") + } + } + + pub async fn un_r_lock(&self) { + let is_write = false; + if !self.unlock(is_write).await { + panic!("Trying to un_r_lock() while no Lock() is active") + } + } + + async fn unlock(&self, is_write: bool) -> bool { + let mut unlocked = false; + if is_write { + if *self.is_write.read().await && *self.refrence.read().await == 1 { + *self.refrence.write().await = 0; + *self.is_write.write().await = false; + unlocked = true; + } + } else { + if !*self.is_write.read().await { + if *self.refrence.read().await > 0 { + *self.refrence.write().await -= 1; + unlocked = true; + } + } + } + + unlocked + } + + pub async fn force_un_lock(&self) { + *self.refrence.write().await = 0; + *self.is_write.write().await = false; + } +} + +#[cfg(test)] +mod test { + use std::{sync::Arc, time::Duration}; + + use common::error::Result; + use tokio::time::sleep; + + use crate::lrwmutex::LRWMutex; + + #[tokio::test] + async fn test_lock_unlock() -> Result<()> { + let l_rw_lock = LRWMutex::default(); + let id = "foo"; + let source = "dandan"; + let timeout = Duration::from_secs(5); + assert_eq!(true, l_rw_lock.get_lock(id, source, &timeout).await); + l_rw_lock.un_lock().await; + + l_rw_lock.lock().await; + + assert_eq!(false, l_rw_lock.get_r_lock(id, source, &timeout).await); + l_rw_lock.un_lock().await; + assert_eq!(true, l_rw_lock.get_r_lock(id, source, &timeout).await); + + Ok(()) + } + + #[tokio::test] + async fn multi_thread_test() -> Result<()> { + let l_rw_lock = Arc::new(LRWMutex::default()); + let id = "foo"; + let source = "dandan"; + + let one_fn = async { + let one = Arc::clone(&l_rw_lock); + let timeout = Duration::from_secs(1); + assert_eq!(true, one.get_lock(id, source, &timeout).await); + sleep(Duration::from_secs(5)).await; + l_rw_lock.un_lock().await; + }; + + let two_fn = async { + let two = Arc::clone(&l_rw_lock); + let timeout = Duration::from_secs(2); + assert_eq!(false, two.get_r_lock(id, source, &timeout).await); + sleep(Duration::from_secs(5)).await; + assert_eq!(true, two.get_r_lock(id, source, &timeout).await); + two.un_r_lock().await; + }; + + tokio::join!(one_fn, two_fn); + + Ok(()) + } +} diff --git a/common/lock/src/namespace_lock.rs b/common/lock/src/namespace_lock.rs new file mode 100644 index 00000000..6b8a43e4 --- /dev/null +++ b/common/lock/src/namespace_lock.rs @@ -0,0 +1,274 @@ +use std::{collections::HashMap, path::Path, sync::Arc, time::Duration}; + +use async_trait::async_trait; +use tokio::sync::RwLock; +use uuid::Uuid; + +use crate::{ + drwmutex::{DRWMutex, Options}, + lrwmutex::LRWMutex, + LockApi, +}; +use common::error::Result; + +pub type RWLockerImpl = Box; + +#[async_trait] +pub trait RWLocker { + async fn get_lock(&mut self, opts: &Options) -> Result; + async fn un_lock(&mut self) -> Result<()>; + async fn get_u_lock(&mut self, opts: &Options) -> Result; + async fn un_r_lock(&mut self) -> Result<()>; +} + +#[derive(Debug)] +struct NsLock { + reference: usize, + lock: LRWMutex, +} + +#[derive(Debug, Default)] +pub struct NsLockMap { + is_dist_erasure: bool, + lock_map: RwLock>, +} + +impl NsLockMap { + async fn lock( + &mut self, + volume: &String, + path: &String, + lock_source: &String, + ops_id: &String, + read_lock: bool, + timeout: Duration, + ) -> bool { + let resource = Path::new(volume).join(path).to_str().unwrap().to_string(); + let mut w_lock_map = self.lock_map.write().await; + let nslk = w_lock_map.entry(resource.clone()).or_insert(NsLock { + reference: 0, + lock: LRWMutex::default(), + }); + nslk.reference += 1; + + let locked: bool; + if read_lock { + locked = nslk.lock.get_r_lock(ops_id, lock_source, &timeout).await; + } else { + locked = nslk.lock.get_lock(ops_id, lock_source, &timeout).await; + } + + if !locked { + nslk.reference -= 1; + if nslk.reference == 0 { + w_lock_map.remove(&resource); + } + } + + return locked; + } + + async fn un_lock(&mut self, volume: &String, path: &String, read_lock: bool) { + let resource = Path::new(volume).join(path).to_str().unwrap().to_string(); + let mut w_lock_map = self.lock_map.write().await; + if let Some(nslk) = w_lock_map.get_mut(&resource) { + if read_lock { + nslk.lock.un_r_lock().await; + } else { + nslk.lock.un_lock().await; + } + + nslk.reference -= 0; + + if nslk.reference == 0 { + w_lock_map.remove(&resource); + } + } else { + return; + } + } +} + +pub async fn new_nslock( + ns: Arc>, + owner: String, + volume: String, + paths: Vec, + lockers: Vec, +) -> RWLockerImpl { + if ns.read().await.is_dist_erasure { + let names = paths + .iter() + .map(|path| Path::new(&volume).join(path).to_str().unwrap().to_string()) + .collect(); + return Box::new(DistLockInstance::new(owner, names, lockers)); + } + + Box::new(LocalLockInstance::new(ns, volume, paths)) +} + +struct DistLockInstance { + lock: Box, + ops_id: String, +} + +impl DistLockInstance { + fn new(owner: String, names: Vec, lockers: Vec) -> Self { + let ops_id = Uuid::new_v4().to_string(); + Self { + lock: Box::new(DRWMutex::new(owner, names, lockers)), + ops_id, + } + } +} + +#[async_trait] +impl RWLocker for DistLockInstance { + async fn get_lock(&mut self, opts: &Options) -> Result { + let source = "".to_string(); + + Ok(self.lock.get_lock(&self.ops_id, &source, opts).await) + } + + async fn un_lock(&mut self) -> Result<()> { + Ok(self.lock.un_lock().await) + } + + async fn get_u_lock(&mut self, opts: &Options) -> Result { + let source = "".to_string(); + + Ok(self.lock.get_r_lock(&self.ops_id, &source, opts).await) + } + + async fn un_r_lock(&mut self) -> Result<()> { + Ok(self.lock.un_r_lock().await) + } +} + +struct LocalLockInstance { + ns: Arc>, + volume: String, + paths: Vec, + ops_id: String, +} + +impl LocalLockInstance { + fn new(ns: Arc>, volume: String, paths: Vec) -> Self { + let ops_id = Uuid::new_v4().to_string(); + Self { + ns, + volume, + paths, + ops_id, + } + } +} + +#[async_trait] +impl RWLocker for LocalLockInstance { + async fn get_lock(&mut self, opts: &Options) -> Result { + let source = "".to_string(); + let read_lock = false; + let mut success = vec![false; self.paths.len()]; + for (idx, path) in self.paths.iter().enumerate() { + if !self + .ns + .write() + .await + .lock(&self.volume, path, &source, &self.ops_id, read_lock, opts.timeout) + .await + { + for (i, x) in success.iter().enumerate() { + if *x { + self.ns.write().await.un_lock(&self.volume, &self.paths[i], read_lock).await; + } + } + + return Ok(false); + } + + success[idx] = true; + } + Ok(true) + } + + async fn un_lock(&mut self) -> Result<()> { + let read_lock = false; + for path in self.paths.iter() { + self.ns.write().await.un_lock(&self.volume, path, read_lock).await; + } + + Ok(()) + } + + async fn get_u_lock(&mut self, opts: &Options) -> Result { + let source = "".to_string(); + let read_lock = true; + let mut success = Vec::with_capacity(self.paths.len()); + for (idx, path) in self.paths.iter().enumerate() { + if !self + .ns + .write() + .await + .lock(&self.volume, path, &source, &self.ops_id, read_lock, opts.timeout) + .await + { + for (i, x) in success.iter().enumerate() { + if *x { + self.ns.write().await.un_lock(&self.volume, &self.paths[i], read_lock).await; + } + } + + return Ok(false); + } + + success[idx] = true; + } + Ok(true) + } + + async fn un_r_lock(&mut self) -> Result<()> { + let read_lock = true; + for path in self.paths.iter() { + self.ns.write().await.un_lock(&self.volume, path, read_lock).await; + } + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use std::{sync::Arc, time::Duration}; + + use common::error::Result; + use tokio::sync::RwLock; + + use crate::{ + drwmutex::Options, + namespace_lock::{new_nslock, NsLockMap}, + }; + + #[tokio::test] + async fn test_local_instance() -> Result<()> { + let ns_lock_map = Arc::new(RwLock::new(NsLockMap::default())); + let mut ns = new_nslock( + Arc::clone(&ns_lock_map), + "local".to_string(), + "test".to_string(), + vec!["foo".to_string()], + Vec::new(), + ) + .await; + + let result = ns + .get_lock(&Options { + timeout: Duration::from_secs(5), + retry_interval: Duration::from_secs(1), + }) + .await?; + + assert_eq!(result, true); + Ok(()) + } +} diff --git a/common/lock/src/remote_client.rs b/common/lock/src/remote_client.rs new file mode 100644 index 00000000..09118761 --- /dev/null +++ b/common/lock/src/remote_client.rs @@ -0,0 +1,127 @@ +use async_trait::async_trait; +use common::error::{Error, Result}; +use protos::proto_gen::node_service::{node_service_client::NodeServiceClient, GenerallyLockRequest}; +use tonic::Request; +use tracing::info; + +use crate::{lock_args::LockArgs, Locker}; + +#[derive(Debug, Clone)] +pub struct RemoteClinet { + url: url::Url, +} + +impl RemoteClinet { + pub fn new(url: url::Url) -> Self { + Self { url } + } + + async fn get_client_v2(&self) -> Result> { + // Ok(NodeServiceClient::connect("http://220.181.1.138:9000").await?) + let addr = format!("{}://{}:{}", self.url.scheme(), self.url.host_str().unwrap(), self.url.port().unwrap()); + Ok(NodeServiceClient::connect(addr).await?) + } +} + +#[async_trait] +impl Locker for RemoteClinet { + async fn lock(&mut self, args: &LockArgs) -> Result { + info!("remote lock"); + let args = serde_json::to_string(args)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(GenerallyLockRequest { args }); + + let response = client.lock(request).await?.into_inner(); + + if let Some(error_info) = response.error_info { + return Err(Error::from_string(error_info)); + } + + Ok(response.success) + } + + async fn unlock(&mut self, args: &LockArgs) -> Result { + info!("remote unlock"); + let args = serde_json::to_string(args)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(GenerallyLockRequest { args }); + + let response = client.un_lock(request).await?.into_inner(); + + if let Some(error_info) = response.error_info { + return Err(Error::from_string(error_info)); + } + + Ok(response.success) + } + + async fn rlock(&mut self, args: &LockArgs) -> Result { + info!("remote rlock"); + let args = serde_json::to_string(args)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(GenerallyLockRequest { args }); + + let response = client.r_lock(request).await?.into_inner(); + + if let Some(error_info) = response.error_info { + return Err(Error::from_string(error_info)); + } + + Ok(response.success) + } + + async fn runlock(&mut self, args: &LockArgs) -> Result { + info!("remote runlock"); + let args = serde_json::to_string(args)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(GenerallyLockRequest { args }); + + let response = client.r_un_lock(request).await?.into_inner(); + + if let Some(error_info) = response.error_info { + return Err(Error::from_string(error_info)); + } + + Ok(response.success) + } + + async fn force_unlock(&mut self, args: &LockArgs) -> Result { + info!("remote force_unlock"); + let args = serde_json::to_string(args)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(GenerallyLockRequest { args }); + + let response = client.force_un_lock(request).await?.into_inner(); + + if let Some(error_info) = response.error_info { + return Err(Error::from_string(error_info)); + } + + Ok(response.success) + } + + async fn refresh(&mut self, args: &LockArgs) -> Result { + info!("remote refresh"); + let args = serde_json::to_string(args)?; + let mut client = self.get_client_v2().await?; + let request = Request::new(GenerallyLockRequest { args }); + + let response = client.refresh(request).await?.into_inner(); + + if let Some(error_info) = response.error_info { + return Err(Error::from_string(error_info)); + } + + Ok(response.success) + } + + async fn is_local(&self) -> bool { + false + } + + async fn close(&self) {} + + async fn is_online(&self) -> bool { + true + } +} diff --git a/common/protos/src/generated/flatbuffers_generated/models.rs b/common/protos/src/generated/flatbuffers_generated/models.rs index aa1f6ae2..e4949fdc 100644 --- a/common/protos/src/generated/flatbuffers_generated/models.rs +++ b/common/protos/src/generated/flatbuffers_generated/models.rs @@ -1,10 +1,9 @@ // automatically generated by the FlatBuffers compiler, do not modify - // @generated -use core::mem; use core::cmp::Ordering; +use core::mem; extern crate flatbuffers; use self::flatbuffers::{EndianScalar, Follow}; @@ -12,112 +11,114 @@ use self::flatbuffers::{EndianScalar, Follow}; #[allow(unused_imports, dead_code)] pub mod models { - use core::mem; - use core::cmp::Ordering; + use core::cmp::Ordering; + use core::mem; - extern crate flatbuffers; - use self::flatbuffers::{EndianScalar, Follow}; + extern crate flatbuffers; + use self::flatbuffers::{EndianScalar, Follow}; -pub enum PingBodyOffset {} -#[derive(Copy, Clone, PartialEq)] + pub enum PingBodyOffset {} + #[derive(Copy, Clone, PartialEq)] -pub struct PingBody<'a> { - pub _tab: flatbuffers::Table<'a>, -} - -impl<'a> flatbuffers::Follow<'a> for PingBody<'a> { - type Inner = PingBody<'a>; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - Self { _tab: flatbuffers::Table::new(buf, loc) } - } -} - -impl<'a> PingBody<'a> { - pub const VT_PAYLOAD: flatbuffers::VOffsetT = 4; - - pub const fn get_fully_qualified_name() -> &'static str { - "models.PingBody" - } - - #[inline] - pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { - PingBody { _tab: table } - } - #[allow(unused_mut)] - pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>( - _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, - args: &'args PingBodyArgs<'args> - ) -> flatbuffers::WIPOffset> { - let mut builder = PingBodyBuilder::new(_fbb); - if let Some(x) = args.payload { builder.add_payload(x); } - builder.finish() - } - - - #[inline] - pub fn payload(&self) -> Option> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::>>(PingBody::VT_PAYLOAD, None)} - } -} - -impl flatbuffers::Verifiable for PingBody<'_> { - #[inline] - fn run_verifier( - v: &mut flatbuffers::Verifier, pos: usize - ) -> Result<(), flatbuffers::InvalidFlatbuffer> { - use self::flatbuffers::Verifiable; - v.visit_table(pos)? - .visit_field::>>("payload", Self::VT_PAYLOAD, false)? - .finish(); - Ok(()) - } -} -pub struct PingBodyArgs<'a> { - pub payload: Option>>, -} -impl<'a> Default for PingBodyArgs<'a> { - #[inline] - fn default() -> Self { - PingBodyArgs { - payload: None, + pub struct PingBody<'a> { + pub _tab: flatbuffers::Table<'a>, } - } -} -pub struct PingBodyBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { - fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, - start_: flatbuffers::WIPOffset, -} -impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> PingBodyBuilder<'a, 'b, A> { - #[inline] - pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset>) { - self.fbb_.push_slot_always::>(PingBody::VT_PAYLOAD, payload); - } - #[inline] - pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> PingBodyBuilder<'a, 'b, A> { - let start = _fbb.start_table(); - PingBodyBuilder { - fbb_: _fbb, - start_: start, + impl<'a> flatbuffers::Follow<'a> for PingBody<'a> { + type Inner = PingBody<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } } - } - #[inline] - pub fn finish(self) -> flatbuffers::WIPOffset> { - let o = self.fbb_.end_table(self.start_); - flatbuffers::WIPOffset::new(o.value()) - } -} -impl core::fmt::Debug for PingBody<'_> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let mut ds = f.debug_struct("PingBody"); - ds.field("payload", &self.payload()); - ds.finish() - } -} -} // pub mod models + impl<'a> PingBody<'a> { + pub const VT_PAYLOAD: flatbuffers::VOffsetT = 4; + pub const fn get_fully_qualified_name() -> &'static str { + "models.PingBody" + } + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + PingBody { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args PingBodyArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = PingBodyBuilder::new(_fbb); + if let Some(x) = args.payload { + builder.add_payload(x); + } + builder.finish() + } + + #[inline] + pub fn payload(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>(PingBody::VT_PAYLOAD, None) + } + } + } + + impl flatbuffers::Verifiable for PingBody<'_> { + #[inline] + fn run_verifier(v: &mut flatbuffers::Verifier, pos: usize) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>>("payload", Self::VT_PAYLOAD, false)? + .finish(); + Ok(()) + } + } + pub struct PingBodyArgs<'a> { + pub payload: Option>>, + } + impl<'a> Default for PingBodyArgs<'a> { + #[inline] + fn default() -> Self { + PingBodyArgs { payload: None } + } + } + + pub struct PingBodyBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> PingBodyBuilder<'a, 'b, A> { + #[inline] + pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset>) { + self.fbb_ + .push_slot_always::>(PingBody::VT_PAYLOAD, payload); + } + #[inline] + pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> PingBodyBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + PingBodyBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for PingBody<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("PingBody"); + ds.field("payload", &self.payload()); + ds.finish() + } + } +} // pub mod models diff --git a/common/protos/src/generated/proto_gen/node_service.rs b/common/protos/src/generated/proto_gen/node_service.rs index c2a7041f..53c124e9 100644 --- a/common/protos/src/generated/proto_gen/node_service.rs +++ b/common/protos/src/generated/proto_gen/node_service.rs @@ -473,11 +473,26 @@ pub struct DeleteVolumeResponse { #[prost(string, optional, tag = "2")] pub error_info: ::core::option::Option<::prost::alloc::string::String>, } +/// lock api have same argument type +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GenerallyLockRequest { + #[prost(string, tag = "1")] + pub args: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GenerallyLockResponse { + #[prost(bool, tag = "1")] + pub success: bool, + #[prost(string, optional, tag = "2")] + pub error_info: ::core::option::Option<::prost::alloc::string::String>, +} /// Generated client implementations. pub mod node_service_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; use tonic::codegen::http::Uri; + use tonic::codegen::*; #[derive(Debug, Clone)] pub struct NodeServiceClient { inner: tonic::client::Grpc, @@ -508,22 +523,15 @@ pub mod node_service_client { let inner = tonic::client::Grpc::with_origin(inner, origin); Self { inner } } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> NodeServiceClient> + pub fn with_interceptor(inner: T, interceptor: F) -> NodeServiceClient> where F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< http::Request, - Response = http::Response< - >::ResponseBody, - >, + Response = http::Response<>::ResponseBody>, >, - , - >>::Error: Into + Send + Sync, + >>::Error: Into + Send + Sync, { NodeServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -566,16 +574,9 @@ pub mod node_service_client { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/Ping", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Ping"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Ping")); @@ -584,23 +585,13 @@ pub mod node_service_client { pub async fn list_bucket( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/ListBucket", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ListBucket"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ListBucket")); @@ -609,23 +600,13 @@ pub mod node_service_client { pub async fn make_bucket( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/MakeBucket", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/MakeBucket"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "MakeBucket")); @@ -634,23 +615,13 @@ pub mod node_service_client { pub async fn get_bucket_info( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/GetBucketInfo", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/GetBucketInfo"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "GetBucketInfo")); @@ -659,23 +630,13 @@ pub mod node_service_client { pub async fn delete_bucket( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/DeleteBucket", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteBucket"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteBucket")); @@ -684,23 +645,13 @@ pub mod node_service_client { pub async fn read_all( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/ReadAll", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadAll"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadAll")); @@ -709,23 +660,13 @@ pub mod node_service_client { pub async fn write_all( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/WriteAll", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/WriteAll"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "WriteAll")); @@ -738,16 +679,9 @@ pub mod node_service_client { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/Delete", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Delete"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Delete")); @@ -756,23 +690,13 @@ pub mod node_service_client { pub async fn rename_file( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/RenameFile", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RenameFile"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "RenameFile")); @@ -785,16 +709,9 @@ pub mod node_service_client { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/Write", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Write"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "Write")); @@ -803,23 +720,13 @@ pub mod node_service_client { pub async fn write_stream( &mut self, request: impl tonic::IntoStreamingRequest, - ) -> std::result::Result< - tonic::Response>, - tonic::Status, - > { + ) -> std::result::Result>, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/WriteStream", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/WriteStream"); let mut req = request.into_streaming_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "WriteStream")); @@ -829,23 +736,13 @@ pub mod node_service_client { pub async fn read_at( &mut self, request: impl tonic::IntoStreamingRequest, - ) -> std::result::Result< - tonic::Response>, - tonic::Status, - > { + ) -> std::result::Result>, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/ReadAt", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadAt"); let mut req = request.into_streaming_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadAt")); @@ -854,23 +751,13 @@ pub mod node_service_client { pub async fn list_dir( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/ListDir", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ListDir"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ListDir")); @@ -879,23 +766,13 @@ pub mod node_service_client { pub async fn walk_dir( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/WalkDir", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/WalkDir"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "WalkDir")); @@ -904,23 +781,13 @@ pub mod node_service_client { pub async fn rename_data( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/RenameData", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RenameData"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "RenameData")); @@ -929,23 +796,13 @@ pub mod node_service_client { pub async fn make_volumes( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/MakeVolumes", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/MakeVolumes"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "MakeVolumes")); @@ -954,23 +811,13 @@ pub mod node_service_client { pub async fn make_volume( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/MakeVolume", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/MakeVolume"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "MakeVolume")); @@ -979,23 +826,13 @@ pub mod node_service_client { pub async fn list_volumes( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/ListVolumes", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ListVolumes"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ListVolumes")); @@ -1004,23 +841,13 @@ pub mod node_service_client { pub async fn stat_volume( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/StatVolume", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/StatVolume"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "StatVolume")); @@ -1029,23 +856,13 @@ pub mod node_service_client { pub async fn write_metadata( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/WriteMetadata", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/WriteMetadata"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "WriteMetadata")); @@ -1054,23 +871,13 @@ pub mod node_service_client { pub async fn read_version( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/ReadVersion", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadVersion"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadVersion")); @@ -1083,16 +890,9 @@ pub mod node_service_client { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/ReadXL", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadXL"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadXL")); @@ -1101,23 +901,13 @@ pub mod node_service_client { pub async fn delete_versions( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/DeleteVersions", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteVersions"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteVersions")); @@ -1126,23 +916,13 @@ pub mod node_service_client { pub async fn read_multiple( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/ReadMultiple", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadMultiple"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "ReadMultiple")); @@ -1151,28 +931,108 @@ pub mod node_service_client { pub async fn delete_volume( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + ) -> std::result::Result, tonic::Status> { self.inner .ready() .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/node_service.NodeService/DeleteVolume", - ); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/DeleteVolume"); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("node_service.NodeService", "DeleteVolume")); self.inner.unary(req, path, codec).await } + pub async fn lock( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Lock"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "Lock")); + self.inner.unary(req, path, codec).await + } + pub async fn un_lock( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/UnLock"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "UnLock")); + self.inner.unary(req, path, codec).await + } + pub async fn r_lock( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RLock"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "RLock")); + self.inner.unary(req, path, codec).await + } + pub async fn r_un_lock( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/RUnLock"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "RUnLock")); + self.inner.unary(req, path, codec).await + } + pub async fn force_un_lock( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ForceUnLock"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "ForceUnLock")); + self.inner.unary(req, path, codec).await + } + pub async fn refresh( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| tonic::Status::new(tonic::Code::Unknown, format!("Service was not ready: {}", e.into())))?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/Refresh"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("node_service.NodeService", "Refresh")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -1190,31 +1050,19 @@ pub mod node_service_server { async fn list_bucket( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn make_bucket( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn get_bucket_info( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn delete_bucket( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn read_all( &self, request: tonic::Request, @@ -1222,10 +1070,7 @@ pub mod node_service_server { async fn write_all( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn delete( &self, request: tonic::Request, @@ -1233,31 +1078,21 @@ pub mod node_service_server { async fn rename_file( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn write( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the WriteStream method. - type WriteStreamStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result, - > + type WriteStreamStream: tonic::codegen::tokio_stream::Stream> + Send + 'static; async fn write_stream( &self, request: tonic::Request>, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the ReadAt method. - type ReadAtStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result, - > + type ReadAtStream: tonic::codegen::tokio_stream::Stream> + Send + 'static; /// rpc Append(AppendRequest) returns (AppendResponse) {}; @@ -1276,52 +1111,31 @@ pub mod node_service_server { async fn rename_data( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn make_volumes( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn make_volume( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn list_volumes( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn stat_volume( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn write_metadata( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn read_version( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn read_xl( &self, request: tonic::Request, @@ -1329,24 +1143,39 @@ pub mod node_service_server { async fn delete_versions( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn read_multiple( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn delete_volume( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; + async fn lock( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn un_lock( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn r_lock( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn r_un_lock( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn force_un_lock( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn refresh( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; } #[derive(Debug)] pub struct NodeServiceServer { @@ -1369,10 +1198,7 @@ pub mod node_service_server { max_encoding_message_size: None, } } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService where F: tonic::service::Interceptor, { @@ -1416,10 +1242,7 @@ pub mod node_service_server { type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { @@ -1427,21 +1250,12 @@ pub mod node_service_server { "/node_service.NodeService/Ping" => { #[allow(non_camel_case_types)] struct PingSvc(pub Arc); - impl tonic::server::UnaryService - for PingSvc { + impl tonic::server::UnaryService for PingSvc { type Response = super::PingResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::ping(&inner, request).await - }; + let fut = async move { ::ping(&inner, request).await }; Box::pin(fut) } } @@ -1454,14 +1268,8 @@ pub mod node_service_server { let method = PingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -1470,23 +1278,12 @@ pub mod node_service_server { "/node_service.NodeService/ListBucket" => { #[allow(non_camel_case_types)] struct ListBucketSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for ListBucketSvc { + impl tonic::server::UnaryService for ListBucketSvc { type Response = super::ListBucketResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::list_bucket(&inner, request).await - }; + let fut = async move { ::list_bucket(&inner, request).await }; Box::pin(fut) } } @@ -1499,14 +1296,8 @@ pub mod node_service_server { let method = ListBucketSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -1515,23 +1306,12 @@ pub mod node_service_server { "/node_service.NodeService/MakeBucket" => { #[allow(non_camel_case_types)] struct MakeBucketSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for MakeBucketSvc { + impl tonic::server::UnaryService for MakeBucketSvc { type Response = super::MakeBucketResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::make_bucket(&inner, request).await - }; + let fut = async move { ::make_bucket(&inner, request).await }; Box::pin(fut) } } @@ -1544,14 +1324,8 @@ pub mod node_service_server { let method = MakeBucketSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -1560,23 +1334,12 @@ pub mod node_service_server { "/node_service.NodeService/GetBucketInfo" => { #[allow(non_camel_case_types)] struct GetBucketInfoSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for GetBucketInfoSvc { + impl tonic::server::UnaryService for GetBucketInfoSvc { type Response = super::GetBucketInfoResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::get_bucket_info(&inner, request).await - }; + let fut = async move { ::get_bucket_info(&inner, request).await }; Box::pin(fut) } } @@ -1589,14 +1352,8 @@ pub mod node_service_server { let method = GetBucketInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -1605,23 +1362,12 @@ pub mod node_service_server { "/node_service.NodeService/DeleteBucket" => { #[allow(non_camel_case_types)] struct DeleteBucketSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for DeleteBucketSvc { + impl tonic::server::UnaryService for DeleteBucketSvc { type Response = super::DeleteBucketResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::delete_bucket(&inner, request).await - }; + let fut = async move { ::delete_bucket(&inner, request).await }; Box::pin(fut) } } @@ -1634,14 +1380,8 @@ pub mod node_service_server { let method = DeleteBucketSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -1650,23 +1390,12 @@ pub mod node_service_server { "/node_service.NodeService/ReadAll" => { #[allow(non_camel_case_types)] struct ReadAllSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for ReadAllSvc { + impl tonic::server::UnaryService for ReadAllSvc { type Response = super::ReadAllResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::read_all(&inner, request).await - }; + let fut = async move { ::read_all(&inner, request).await }; Box::pin(fut) } } @@ -1679,14 +1408,8 @@ pub mod node_service_server { let method = ReadAllSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -1695,23 +1418,12 @@ pub mod node_service_server { "/node_service.NodeService/WriteAll" => { #[allow(non_camel_case_types)] struct WriteAllSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for WriteAllSvc { + impl tonic::server::UnaryService for WriteAllSvc { type Response = super::WriteAllResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::write_all(&inner, request).await - }; + let fut = async move { ::write_all(&inner, request).await }; Box::pin(fut) } } @@ -1724,14 +1436,8 @@ pub mod node_service_server { let method = WriteAllSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -1740,23 +1446,12 @@ pub mod node_service_server { "/node_service.NodeService/Delete" => { #[allow(non_camel_case_types)] struct DeleteSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for DeleteSvc { + impl tonic::server::UnaryService for DeleteSvc { type Response = super::DeleteResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::delete(&inner, request).await - }; + let fut = async move { ::delete(&inner, request).await }; Box::pin(fut) } } @@ -1769,14 +1464,8 @@ pub mod node_service_server { let method = DeleteSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -1785,23 +1474,12 @@ pub mod node_service_server { "/node_service.NodeService/RenameFile" => { #[allow(non_camel_case_types)] struct RenameFileSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for RenameFileSvc { + impl tonic::server::UnaryService for RenameFileSvc { type Response = super::RenameFileResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::rename_file(&inner, request).await - }; + let fut = async move { ::rename_file(&inner, request).await }; Box::pin(fut) } } @@ -1814,14 +1492,8 @@ pub mod node_service_server { let method = RenameFileSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -1830,21 +1502,12 @@ pub mod node_service_server { "/node_service.NodeService/Write" => { #[allow(non_camel_case_types)] struct WriteSvc(pub Arc); - impl tonic::server::UnaryService - for WriteSvc { + impl tonic::server::UnaryService for WriteSvc { type Response = super::WriteResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::write(&inner, request).await - }; + let fut = async move { ::write(&inner, request).await }; Box::pin(fut) } } @@ -1857,14 +1520,8 @@ pub mod node_service_server { let method = WriteSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -1873,26 +1530,13 @@ pub mod node_service_server { "/node_service.NodeService/WriteStream" => { #[allow(non_camel_case_types)] struct WriteStreamSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::StreamingService - for WriteStreamSvc { + impl tonic::server::StreamingService for WriteStreamSvc { type Response = super::WriteResponse; type ResponseStream = T::WriteStreamStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - tonic::Streaming, - >, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request>) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::write_stream(&inner, request).await - }; + let fut = async move { ::write_stream(&inner, request).await }; Box::pin(fut) } } @@ -1905,14 +1549,8 @@ pub mod node_service_server { let method = WriteStreamSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.streaming(method, req).await; Ok(res) }; @@ -1921,26 +1559,13 @@ pub mod node_service_server { "/node_service.NodeService/ReadAt" => { #[allow(non_camel_case_types)] struct ReadAtSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::StreamingService - for ReadAtSvc { + impl tonic::server::StreamingService for ReadAtSvc { type Response = super::ReadAtResponse; type ResponseStream = T::ReadAtStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - tonic::Streaming, - >, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request>) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::read_at(&inner, request).await - }; + let fut = async move { ::read_at(&inner, request).await }; Box::pin(fut) } } @@ -1953,14 +1578,8 @@ pub mod node_service_server { let method = ReadAtSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.streaming(method, req).await; Ok(res) }; @@ -1969,23 +1588,12 @@ pub mod node_service_server { "/node_service.NodeService/ListDir" => { #[allow(non_camel_case_types)] struct ListDirSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for ListDirSvc { + impl tonic::server::UnaryService for ListDirSvc { type Response = super::ListDirResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::list_dir(&inner, request).await - }; + let fut = async move { ::list_dir(&inner, request).await }; Box::pin(fut) } } @@ -1998,14 +1606,8 @@ pub mod node_service_server { let method = ListDirSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2014,23 +1616,12 @@ pub mod node_service_server { "/node_service.NodeService/WalkDir" => { #[allow(non_camel_case_types)] struct WalkDirSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for WalkDirSvc { + impl tonic::server::UnaryService for WalkDirSvc { type Response = super::WalkDirResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::walk_dir(&inner, request).await - }; + let fut = async move { ::walk_dir(&inner, request).await }; Box::pin(fut) } } @@ -2043,14 +1634,8 @@ pub mod node_service_server { let method = WalkDirSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2059,23 +1644,12 @@ pub mod node_service_server { "/node_service.NodeService/RenameData" => { #[allow(non_camel_case_types)] struct RenameDataSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for RenameDataSvc { + impl tonic::server::UnaryService for RenameDataSvc { type Response = super::RenameDataResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::rename_data(&inner, request).await - }; + let fut = async move { ::rename_data(&inner, request).await }; Box::pin(fut) } } @@ -2088,14 +1662,8 @@ pub mod node_service_server { let method = RenameDataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2104,23 +1672,12 @@ pub mod node_service_server { "/node_service.NodeService/MakeVolumes" => { #[allow(non_camel_case_types)] struct MakeVolumesSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for MakeVolumesSvc { + impl tonic::server::UnaryService for MakeVolumesSvc { type Response = super::MakeVolumesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::make_volumes(&inner, request).await - }; + let fut = async move { ::make_volumes(&inner, request).await }; Box::pin(fut) } } @@ -2133,14 +1690,8 @@ pub mod node_service_server { let method = MakeVolumesSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2149,23 +1700,12 @@ pub mod node_service_server { "/node_service.NodeService/MakeVolume" => { #[allow(non_camel_case_types)] struct MakeVolumeSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for MakeVolumeSvc { + impl tonic::server::UnaryService for MakeVolumeSvc { type Response = super::MakeVolumeResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::make_volume(&inner, request).await - }; + let fut = async move { ::make_volume(&inner, request).await }; Box::pin(fut) } } @@ -2178,14 +1718,8 @@ pub mod node_service_server { let method = MakeVolumeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2194,23 +1728,12 @@ pub mod node_service_server { "/node_service.NodeService/ListVolumes" => { #[allow(non_camel_case_types)] struct ListVolumesSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for ListVolumesSvc { + impl tonic::server::UnaryService for ListVolumesSvc { type Response = super::ListVolumesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::list_volumes(&inner, request).await - }; + let fut = async move { ::list_volumes(&inner, request).await }; Box::pin(fut) } } @@ -2223,14 +1746,8 @@ pub mod node_service_server { let method = ListVolumesSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2239,23 +1756,12 @@ pub mod node_service_server { "/node_service.NodeService/StatVolume" => { #[allow(non_camel_case_types)] struct StatVolumeSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for StatVolumeSvc { + impl tonic::server::UnaryService for StatVolumeSvc { type Response = super::StatVolumeResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::stat_volume(&inner, request).await - }; + let fut = async move { ::stat_volume(&inner, request).await }; Box::pin(fut) } } @@ -2268,14 +1774,8 @@ pub mod node_service_server { let method = StatVolumeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2284,23 +1784,12 @@ pub mod node_service_server { "/node_service.NodeService/WriteMetadata" => { #[allow(non_camel_case_types)] struct WriteMetadataSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for WriteMetadataSvc { + impl tonic::server::UnaryService for WriteMetadataSvc { type Response = super::WriteMetadataResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::write_metadata(&inner, request).await - }; + let fut = async move { ::write_metadata(&inner, request).await }; Box::pin(fut) } } @@ -2313,14 +1802,8 @@ pub mod node_service_server { let method = WriteMetadataSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2329,23 +1812,12 @@ pub mod node_service_server { "/node_service.NodeService/ReadVersion" => { #[allow(non_camel_case_types)] struct ReadVersionSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for ReadVersionSvc { + impl tonic::server::UnaryService for ReadVersionSvc { type Response = super::ReadVersionResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::read_version(&inner, request).await - }; + let fut = async move { ::read_version(&inner, request).await }; Box::pin(fut) } } @@ -2358,14 +1830,8 @@ pub mod node_service_server { let method = ReadVersionSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2374,23 +1840,12 @@ pub mod node_service_server { "/node_service.NodeService/ReadXL" => { #[allow(non_camel_case_types)] struct ReadXLSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for ReadXLSvc { + impl tonic::server::UnaryService for ReadXLSvc { type Response = super::ReadXlResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::read_xl(&inner, request).await - }; + let fut = async move { ::read_xl(&inner, request).await }; Box::pin(fut) } } @@ -2403,14 +1858,8 @@ pub mod node_service_server { let method = ReadXLSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2419,23 +1868,12 @@ pub mod node_service_server { "/node_service.NodeService/DeleteVersions" => { #[allow(non_camel_case_types)] struct DeleteVersionsSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for DeleteVersionsSvc { + impl tonic::server::UnaryService for DeleteVersionsSvc { type Response = super::DeleteVersionsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::delete_versions(&inner, request).await - }; + let fut = async move { ::delete_versions(&inner, request).await }; Box::pin(fut) } } @@ -2448,14 +1886,8 @@ pub mod node_service_server { let method = DeleteVersionsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2464,23 +1896,12 @@ pub mod node_service_server { "/node_service.NodeService/ReadMultiple" => { #[allow(non_camel_case_types)] struct ReadMultipleSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for ReadMultipleSvc { + impl tonic::server::UnaryService for ReadMultipleSvc { type Response = super::ReadMultipleResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::read_multiple(&inner, request).await - }; + let fut = async move { ::read_multiple(&inner, request).await }; Box::pin(fut) } } @@ -2493,14 +1914,8 @@ pub mod node_service_server { let method = ReadMultipleSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; @@ -2509,23 +1924,12 @@ pub mod node_service_server { "/node_service.NodeService/DeleteVolume" => { #[allow(non_camel_case_types)] struct DeleteVolumeSvc(pub Arc); - impl< - T: NodeService, - > tonic::server::UnaryService - for DeleteVolumeSvc { + impl tonic::server::UnaryService for DeleteVolumeSvc { type Response = super::DeleteVolumeResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::delete_volume(&inner, request).await - }; + let fut = async move { ::delete_volume(&inner, request).await }; Box::pin(fut) } } @@ -2538,34 +1942,189 @@ pub mod node_service_server { let method = DeleteVolumeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } - _ => { - Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", tonic::Code::Unimplemented as i32) - .header( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ) - .body(empty_body()) - .unwrap(), - ) - }) + "/node_service.NodeService/Lock" => { + #[allow(non_camel_case_types)] + struct LockSvc(pub Arc); + impl tonic::server::UnaryService for LockSvc { + type Response = super::GenerallyLockResponse; + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { ::lock(&inner, request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = LockSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) } + "/node_service.NodeService/UnLock" => { + #[allow(non_camel_case_types)] + struct UnLockSvc(pub Arc); + impl tonic::server::UnaryService for UnLockSvc { + type Response = super::GenerallyLockResponse; + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { ::un_lock(&inner, request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = UnLockSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/RLock" => { + #[allow(non_camel_case_types)] + struct RLockSvc(pub Arc); + impl tonic::server::UnaryService for RLockSvc { + type Response = super::GenerallyLockResponse; + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { ::r_lock(&inner, request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = RLockSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/RUnLock" => { + #[allow(non_camel_case_types)] + struct RUnLockSvc(pub Arc); + impl tonic::server::UnaryService for RUnLockSvc { + type Response = super::GenerallyLockResponse; + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { ::r_un_lock(&inner, request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = RUnLockSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/ForceUnLock" => { + #[allow(non_camel_case_types)] + struct ForceUnLockSvc(pub Arc); + impl tonic::server::UnaryService for ForceUnLockSvc { + type Response = super::GenerallyLockResponse; + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { ::force_un_lock(&inner, request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ForceUnLockSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/node_service.NodeService/Refresh" => { + #[allow(non_camel_case_types)] + struct RefreshSvc(pub Arc); + impl tonic::server::UnaryService for RefreshSvc { + type Response = super::GenerallyLockResponse; + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { ::refresh(&inner, request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = RefreshSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config(accept_compression_encodings, send_compression_encodings) + .apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => Box::pin(async move { + Ok(http::Response::builder() + .status(200) + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header(http::header::CONTENT_TYPE, tonic::metadata::GRPC_CONTENT_TYPE) + .body(empty_body()) + .unwrap()) + }), } } } diff --git a/common/protos/src/node.proto b/common/protos/src/node.proto index c6f43499..d34fbca2 100644 --- a/common/protos/src/node.proto +++ b/common/protos/src/node.proto @@ -292,6 +292,16 @@ message DeleteVolumeResponse { optional string error_info = 2; } +// lock api have same argument type +message GenerallyLockRequest { + string args = 1; +} + +message GenerallyLockResponse { + bool success = 1; + optional string error_info = 2; +} + /* -------------------------------------------------------------------- */ service NodeService { @@ -325,4 +335,13 @@ service NodeService { rpc DeleteVersions(DeleteVersionsRequest) returns (DeleteVersionsResponse) {}; rpc ReadMultiple(ReadMultipleRequest) returns (ReadMultipleResponse) {}; rpc DeleteVolume(DeleteVolumeRequest) returns (DeleteVolumeResponse) {}; + +/* -------------------------------lock service-------------------------- */ + + rpc Lock(GenerallyLockRequest) returns (GenerallyLockResponse) {}; + rpc UnLock(GenerallyLockRequest) returns (GenerallyLockResponse) {}; + rpc RLock(GenerallyLockRequest) returns (GenerallyLockResponse) {}; + rpc RUnLock(GenerallyLockRequest) returns (GenerallyLockResponse) {}; + rpc ForceUnLock(GenerallyLockRequest) returns (GenerallyLockResponse) {}; + rpc Refresh(GenerallyLockRequest) returns (GenerallyLockResponse) {}; } diff --git a/ecstore/Cargo.toml b/ecstore/Cargo.toml index ada0fc7a..58a88d46 100644 --- a/ecstore/Cargo.toml +++ b/ecstore/Cargo.toml @@ -9,22 +9,24 @@ rust-version.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +async-trait.workspace = true backon.workspace = true bytes.workspace = true thiserror.workspace = true futures.workspace = true -async-trait.workspace = true +common.workspace = true tracing.workspace = true serde.workspace = true time.workspace = true serde_json.workspace = true tracing-error.workspace = true http.workspace = true -url = "2.5.2" +url.workspace = true uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] } reed-solomon-erasure = "6.0.0" transform-stream = "0.3.0" -lazy_static = "1.5.0" +lazy_static.workspace = true +lock.workspace = true regex = "1.10.5" netif = "0.1.6" path-absolutize = "3.1.1" diff --git a/ecstore/src/bucket_meta.rs b/ecstore/src/bucket_meta.rs index aedeb9cf..6d280449 100644 --- a/ecstore/src/bucket_meta.rs +++ b/ecstore/src/bucket_meta.rs @@ -2,7 +2,7 @@ use rmp_serde::Serializer; use serde::{Deserialize, Serialize}; use time::OffsetDateTime; -use crate::error::Result; +use common::error::Result; use crate::disk::BUCKET_META_PREFIX; diff --git a/ecstore/src/chunk_stream.rs b/ecstore/src/chunk_stream.rs index 96930ef3..531448c7 100644 --- a/ecstore/src/chunk_stream.rs +++ b/ecstore/src/chunk_stream.rs @@ -1,5 +1,5 @@ -use crate::error::StdError; use bytes::Bytes; +use common::error::StdError; use futures::pin_mut; use futures::stream::{Stream, StreamExt}; use std::future::Future; diff --git a/ecstore/src/disk/endpoint.rs b/ecstore/src/disk/endpoint.rs index 78ae38a2..71c6c65f 100644 --- a/ecstore/src/disk/endpoint.rs +++ b/ecstore/src/disk/endpoint.rs @@ -1,5 +1,5 @@ -use crate::error::{Error, Result}; use crate::utils::net; +use common::error::{Error, Result}; use path_absolutize::Absolutize; use path_clean::PathClean; use std::{fmt::Display, path::Path}; diff --git a/ecstore/src/disk/error.rs b/ecstore/src/disk/error.rs index 0b5e3630..97dd81a6 100644 --- a/ecstore/src/disk/error.rs +++ b/ecstore/src/disk/error.rs @@ -1,4 +1,4 @@ -use crate::error::{Error, Result}; +use common::error::{Error, Result}; #[derive(Debug, thiserror::Error)] pub enum DiskError { diff --git a/ecstore/src/disk/format.rs b/ecstore/src/disk/format.rs index 0c37e086..a1719239 100644 --- a/ecstore/src/disk/format.rs +++ b/ecstore/src/disk/format.rs @@ -1,5 +1,5 @@ use super::error::DiskError; -use crate::error::{Error, Result}; +use common::error::{Error, Result}; use serde::{Deserialize, Serialize}; use serde_json::Error as JsonError; use uuid::Uuid; diff --git a/ecstore/src/disk/local.rs b/ecstore/src/disk/local.rs index e2c349cf..4f45d221 100644 --- a/ecstore/src/disk/local.rs +++ b/ecstore/src/disk/local.rs @@ -5,12 +5,12 @@ use super::{ }; use crate::disk::{LocalFileReader, LocalFileWriter, STORAGE_FORMAT_FILE}; use crate::{ - error::{Error, Result}, file_meta::FileMeta, store_api::{FileInfo, RawFileInfo}, utils, }; use bytes::Bytes; +use common::error::{Error, Result}; use path_absolutize::Absolutize; use std::{ fs::Metadata, diff --git a/ecstore/src/disk/mod.rs b/ecstore/src/disk/mod.rs index 1eb98097..78f07805 100644 --- a/ecstore/src/disk/mod.rs +++ b/ecstore/src/disk/mod.rs @@ -2,7 +2,7 @@ pub mod endpoint; pub mod error; pub mod format; mod local; -mod remote; +pub mod remote; pub const RUSTFS_META_BUCKET: &str = ".rustfs.sys"; pub const RUSTFS_META_MULTIPART_BUCKET: &str = ".rustfs.sys/multipart"; @@ -14,11 +14,11 @@ const STORAGE_FORMAT_FILE: &str = "xl.meta"; use crate::{ erasure::{ReadAt, Write}, - error::{Error, Result}, file_meta::FileMeta, store_api::{FileInfo, RawFileInfo}, }; use bytes::Bytes; +use common::error::{Error, Result}; use futures::StreamExt; use protos::proto_gen::node_service::{ node_service_client::NodeServiceClient, ReadAtRequest, ReadAtResponse, WriteRequest, WriteResponse, diff --git a/ecstore/src/disk/remote.rs b/ecstore/src/disk/remote.rs index 165df90f..8516e31f 100644 --- a/ecstore/src/disk/remote.rs +++ b/ecstore/src/disk/remote.rs @@ -1,6 +1,7 @@ use std::{path::PathBuf, sync::Arc, time::Duration}; use bytes::Bytes; +use common::error::{Error, Result}; use futures::lock::Mutex; use protos::{ node_service_time_out_client, @@ -12,7 +13,7 @@ use protos::{ }, DEFAULT_GRPC_SERVER_MESSAGE_LEN, }; -use tokio::{fs, sync::RwLock}; +use tokio::sync::RwLock; use tonic::{ transport::{Channel, Endpoint as tonic_Endpoint}, Request, @@ -23,7 +24,6 @@ use uuid::Uuid; use crate::{ disk::error::DiskError, - error::{Error, Result}, store_api::{FileInfo, RawFileInfo}, }; @@ -35,15 +35,16 @@ use super::{ #[derive(Debug)] pub struct RemoteDisk { - id: Mutex>, - channel: Arc>>, - url: url::Url, + pub id: Mutex>, + pub channel: Arc>>, + pub url: url::Url, pub root: PathBuf, } impl RemoteDisk { pub async fn new(ep: &Endpoint, _opt: &DiskOption) -> Result { - let root = fs::canonicalize(ep.url.path()).await?; + // let root = fs::canonicalize(ep.url.path()).await?; + let root = PathBuf::from(ep.url.path()); Ok(Self { channel: Arc::new(RwLock::new(None)), diff --git a/ecstore/src/disks_layout.rs b/ecstore/src/disks_layout.rs index b83c41e8..1d3f3e92 100644 --- a/ecstore/src/disks_layout.rs +++ b/ecstore/src/disks_layout.rs @@ -1,5 +1,5 @@ -use crate::error::{Error, Result}; use crate::utils::ellipses::*; +use common::error::{Error, Result}; use serde::Deserialize; use std::collections::HashSet; diff --git a/ecstore/src/endpoints.rs b/ecstore/src/endpoints.rs index 9af5b963..c6151659 100644 --- a/ecstore/src/endpoints.rs +++ b/ecstore/src/endpoints.rs @@ -1,9 +1,9 @@ use crate::{ disk::endpoint::{Endpoint, EndpointType}, disks_layout::DisksLayout, - error::{Error, Result}, utils::net, }; +use common::error::{Error, Result}; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, net::IpAddr, diff --git a/ecstore/src/erasure.rs b/ecstore/src/erasure.rs index 13cb5af1..af5a91d3 100644 --- a/ecstore/src/erasure.rs +++ b/ecstore/src/erasure.rs @@ -1,5 +1,5 @@ -use crate::error::{Error, Result, StdError}; use bytes::Bytes; +use common::error::{Error, Result, StdError}; use futures::future::join_all; use futures::{Stream, StreamExt}; use reed_solomon_erasure::galois_8::ReedSolomon; diff --git a/ecstore/src/lib.rs b/ecstore/src/lib.rs index 85e1c901..11980984 100644 --- a/ecstore/src/lib.rs +++ b/ecstore/src/lib.rs @@ -4,7 +4,6 @@ pub mod disk; pub mod disks_layout; pub mod endpoints; pub mod erasure; -pub mod error; mod file_meta; pub mod peer; pub mod set_disk; diff --git a/ecstore/src/peer.rs b/ecstore/src/peer.rs index 4588d335..644cc27d 100644 --- a/ecstore/src/peer.rs +++ b/ecstore/src/peer.rs @@ -1,4 +1,5 @@ use async_trait::async_trait; +use common::error::{Error, Result}; use futures::future::join_all; use protos::proto_gen::node_service::node_service_client::NodeServiceClient; use protos::proto_gen::node_service::{DeleteBucketRequest, GetBucketInfoRequest, ListBucketRequest, MakeBucketRequest}; @@ -15,7 +16,6 @@ use crate::store::all_local_disk; use crate::{ disk::{self, error::DiskError, VolumeInfo}, endpoints::{EndpointServerPools, Node}, - error::{Error, Result}, store_api::{BucketInfo, BucketOptions, MakeBucketOptions}, }; diff --git a/ecstore/src/sets.rs b/ecstore/src/sets.rs index 4d363595..69aefce4 100644 --- a/ecstore/src/sets.rs +++ b/ecstore/src/sets.rs @@ -1,7 +1,11 @@ -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; +use common::error::{Error, Result}; +use common::globals::GLOBAL_Local_Node_Name; use futures::future::join_all; use http::HeaderMap; +use lock::{namespace_lock::NsLockMap, new_lock_api, LockApi}; +use tokio::sync::RwLock; use uuid::Uuid; use crate::{ @@ -10,7 +14,6 @@ use crate::{ DiskStore, }, endpoints::PoolEndpoints, - error::{Error, Result}, set_disk::SetDisks, store::{GLOBAL_IsDistErasure, GLOBAL_LOCAL_DISK_SET_DRIVES}, store_api::{ @@ -26,6 +29,7 @@ pub struct Sets { // pub sets: Vec, // pub disk_set: Vec>>, // [set_count_idx][set_drive_count_idx] = disk_idx pub disk_set: Vec, // [set_count_idx][set_drive_count_idx] = disk_idx + pub lockers: Vec>, pub pool_idx: usize, pub endpoints: PoolEndpoints, pub format: FormatV3, @@ -46,6 +50,20 @@ impl Sets { let set_count = fm.erasure.sets.len(); let set_drive_count = fm.erasure.sets[0].len(); + let mut unique: Vec> = vec![vec![]; set_count]; + let mut lockers: Vec> = vec![vec![]; set_count]; + endpoints.endpoints.as_ref().iter().enumerate().for_each(|(idx, endpoint)| { + let set_idx = idx / set_drive_count; + if !unique[set_idx].contains(&endpoint.url.host_str().unwrap().to_string()) { + if endpoint.is_local { + unique[set_idx].push(endpoint.url.host_str().unwrap().to_string()); + lockers[set_idx].push(new_lock_api(true, None)); + } else { + lockers[set_idx].push(new_lock_api(false, Some(endpoint.url.clone()))); + } + } + }); + let mut disk_set = Vec::with_capacity(set_count); for i in 0..set_count { @@ -83,6 +101,9 @@ impl Sets { let set_disks = SetDisks { disks: set_drive, + lockers: lockers[i].clone(), + locker_owner: GLOBAL_Local_Node_Name.read().await.to_string(), + ns_mutex: Arc::new(RwLock::new(NsLockMap::default())), set_drive_count, parity_count: partiy_count, set_index: i, @@ -96,6 +117,7 @@ impl Sets { id: fm.id, // sets: todo!(), disk_set, + lockers, pool_idx, endpoints: endpoints.clone(), format: fm.clone(), diff --git a/ecstore/src/store.rs b/ecstore/src/store.rs index 41219bf7..58b924e3 100644 --- a/ecstore/src/store.rs +++ b/ecstore/src/store.rs @@ -1,10 +1,10 @@ use crate::{ bucket_meta::BucketMetadata, disk::{ - error::DiskError, new_disk, DeleteOptions, DiskOption, DiskStore, WalkDirOptions, BUCKET_META_PREFIX, RUSTFS_META_BUCKET, + endpoint::EndpointType, error::DiskError, new_disk, DeleteOptions, DiskOption, DiskStore, WalkDirOptions, + BUCKET_META_PREFIX, RUSTFS_META_BUCKET, }, endpoints::{EndpointServerPools, SetupType}, - error::{Error, Result}, peer::S3PeerSys, sets::Sets, store_api::{ @@ -15,6 +15,8 @@ use crate::{ store_init, utils, }; use backon::{ExponentialBuilder, Retryable}; +use common::error::{Error, Result}; +use common::globals::{GLOBAL_Local_Node_Name, GLOBAL_Rustfs_Host, GLOBAL_Rustfs_Port}; use futures::future::join_all; use http::HeaderMap; use s3s::{dto::StreamingBlob, Body}; @@ -170,6 +172,13 @@ impl ECStore { let mut local_disks = Vec::new(); + init_local_peer( + &endpoint_pools, + &GLOBAL_Rustfs_Host.read().await.to_string(), + &GLOBAL_Rustfs_Port.read().await.to_string(), + ) + .await; + info!("endpoint_pools: {:?}", endpoint_pools); for (i, pool_eps) in endpoint_pools.as_ref().iter().enumerate() { @@ -791,3 +800,24 @@ impl StorageAPI for ECStore { Ok(()) } } + +async fn init_local_peer(endpoint_pools: &EndpointServerPools, host: &String, port: &String) { + let mut peer_set = Vec::new(); + endpoint_pools.as_ref().iter().for_each(|endpoints| { + endpoints.endpoints.as_ref().iter().for_each(|endpoint| { + if endpoint.get_type() == EndpointType::Url && endpoint.is_local && endpoint.url.has_host() { + peer_set.push(endpoint.url.host_str().unwrap().to_string()); + } + }); + }); + + if peer_set.is_empty() { + if !host.is_empty() { + *GLOBAL_Local_Node_Name.write().await = format!("{}:{}", host, port); + } + + *GLOBAL_Local_Node_Name.write().await = format!("127.0.0.1:{}", port); + } + + *GLOBAL_Local_Node_Name.write().await = peer_set[0].clone(); +} diff --git a/ecstore/src/store_api.rs b/ecstore/src/store_api.rs index 2cbe8005..e6bf97fa 100644 --- a/ecstore/src/store_api.rs +++ b/ecstore/src/store_api.rs @@ -1,4 +1,4 @@ -use crate::error::{Error, Result}; +use common::error::{Error, Result}; use http::HeaderMap; use rmp_serde::Serializer; use s3s::dto::StreamingBlob; @@ -366,6 +366,7 @@ pub struct ObjectOptions { pub delete_prefix: bool, pub version_id: String, + pub no_lock: bool, } // impl Default for ObjectOptions { diff --git a/ecstore/src/store_init.rs b/ecstore/src/store_init.rs index 6e6eb2fc..32a09251 100644 --- a/ecstore/src/store_init.rs +++ b/ecstore/src/store_init.rs @@ -5,8 +5,8 @@ use crate::{ new_disk, DiskOption, DiskStore, FORMAT_CONFIG_FILE, RUSTFS_META_BUCKET, }, endpoints::Endpoints, - error::{Error, Result}, }; +use common::error::{Error, Result}; use futures::future::join_all; use std::{ collections::{hash_map::Entry, HashMap}, diff --git a/ecstore/src/utils/ellipses.rs b/ecstore/src/utils/ellipses.rs index 9badb80c..fcd10989 100644 --- a/ecstore/src/utils/ellipses.rs +++ b/ecstore/src/utils/ellipses.rs @@ -1,4 +1,4 @@ -use crate::error::{Error, Result}; +use common::error::{Error, Result}; use lazy_static::*; use regex::Regex; diff --git a/ecstore/src/utils/net.rs b/ecstore/src/utils/net.rs index 91f453e8..3f1f96df 100644 --- a/ecstore/src/utils/net.rs +++ b/ecstore/src/utils/net.rs @@ -1,4 +1,4 @@ -use crate::error::{Error, Result}; +use common::error::{Error, Result}; use lazy_static::lazy_static; use std::{ collections::HashSet, diff --git a/rustfs/Cargo.toml b/rustfs/Cargo.toml index 60a6e2ea..0fe06b4b 100644 --- a/rustfs/Cargo.toml +++ b/rustfs/Cargo.toml @@ -12,6 +12,7 @@ rust-version.workspace = true async-trait.workspace = true bytes.workspace = true clap.workspace = true +common.workspace = true ecstore.workspace = true flatbuffers.workspace = true futures.workspace = true @@ -21,6 +22,7 @@ hyper.workspace = true hyper-util.workspace = true http.workspace = true http-body.workspace = true +lock.workspace = true mime.workspace = true netif.workspace = true pin-project-lite.workspace = true diff --git a/rustfs/src/grpc.rs b/rustfs/src/grpc.rs index 8d849c06..3b3d8c7d 100644 --- a/rustfs/src/grpc.rs +++ b/rustfs/src/grpc.rs @@ -8,6 +8,7 @@ use ecstore::{ store_api::{BucketOptions, FileInfo, MakeBucketOptions}, }; use futures::{Stream, StreamExt}; +use lock::{lock_args::LockArgs, Locker, GLOBAL_LOCAL_SERVER}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status, Streaming}; @@ -18,13 +19,14 @@ use protos::{ proto_gen::node_service::{ node_service_server::{NodeService as Node, NodeServiceServer as NodeServer}, DeleteBucketRequest, DeleteBucketResponse, DeleteRequest, DeleteResponse, DeleteVersionsRequest, DeleteVersionsResponse, - DeleteVolumeRequest, DeleteVolumeResponse, GetBucketInfoRequest, GetBucketInfoResponse, ListBucketRequest, - ListBucketResponse, ListDirRequest, ListDirResponse, ListVolumesRequest, ListVolumesResponse, MakeBucketRequest, - MakeBucketResponse, MakeVolumeRequest, MakeVolumeResponse, MakeVolumesRequest, MakeVolumesResponse, PingRequest, - PingResponse, ReadAllRequest, ReadAllResponse, ReadAtRequest, ReadAtResponse, ReadMultipleRequest, ReadMultipleResponse, - ReadVersionRequest, ReadVersionResponse, ReadXlRequest, ReadXlResponse, RenameDataRequest, RenameDataResponse, - RenameFileRequst, RenameFileResponse, StatVolumeRequest, StatVolumeResponse, WalkDirRequest, WalkDirResponse, - WriteAllRequest, WriteAllResponse, WriteMetadataRequest, WriteMetadataResponse, WriteRequest, WriteResponse, + DeleteVolumeRequest, DeleteVolumeResponse, GenerallyLockRequest, GenerallyLockResponse, GetBucketInfoRequest, + GetBucketInfoResponse, ListBucketRequest, ListBucketResponse, ListDirRequest, ListDirResponse, ListVolumesRequest, + ListVolumesResponse, MakeBucketRequest, MakeBucketResponse, MakeVolumeRequest, MakeVolumeResponse, MakeVolumesRequest, + MakeVolumesResponse, PingRequest, PingResponse, ReadAllRequest, ReadAllResponse, ReadAtRequest, ReadAtResponse, + ReadMultipleRequest, ReadMultipleResponse, ReadVersionRequest, ReadVersionResponse, ReadXlRequest, ReadXlResponse, + RenameDataRequest, RenameDataResponse, RenameFileRequst, RenameFileResponse, StatVolumeRequest, StatVolumeResponse, + WalkDirRequest, WalkDirResponse, WriteAllRequest, WriteAllResponse, WriteMetadataRequest, WriteMetadataResponse, + WriteRequest, WriteResponse, }, }; @@ -55,11 +57,10 @@ fn match_for_io_error(err_status: &Status) -> Option<&std::io::Error> { #[derive(Debug)] struct NodeService { - pub local_peer: LocalPeerS3Client, + local_peer: LocalPeerS3Client, } pub fn make_server() -> NodeServer { - // let local_disks = all_local_disk().await; let local_peer = LocalPeerS3Client::new(None, None); NodeServer::new(NodeService { local_peer }) } @@ -67,20 +68,10 @@ pub fn make_server() -> NodeServer { impl NodeService { async fn find_disk(&self, disk_path: &String) -> Option { find_local_disk(disk_path).await - // let disk_path = match fs::canonicalize(disk_path).await { - // Ok(disk_path) => disk_path, - // Err(_) => return None, - // }; - // self.local_peer.local_disks.iter().find(|&x| x.path() == disk_path).cloned() } async fn all_disk(&self) -> Vec { all_local_disk_path().await - // self.local_peer - // .local_disks - // .iter() - // .map(|disk| disk.path().to_string_lossy().to_string()) - // .collect() } } @@ -982,4 +973,124 @@ impl Node for NodeService { })) } } + + async fn lock(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + match &serde_json::from_str::(&request.args) { + Ok(args) => match GLOBAL_LOCAL_SERVER.write().await.lock(args).await { + Ok(result) => Ok(tonic::Response::new(GenerallyLockResponse { + success: result, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(GenerallyLockResponse { + success: false, + error_info: Some(format!("can not lock, args: {}, err: {}", args, err.to_string())), + })), + }, + Err(err) => Ok(tonic::Response::new(GenerallyLockResponse { + success: false, + error_info: Some(format!("can not decode args, err: {}", err.to_string())), + })), + } + } + + async fn un_lock(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + match &serde_json::from_str::(&request.args) { + Ok(args) => match GLOBAL_LOCAL_SERVER.write().await.unlock(args).await { + Ok(result) => Ok(tonic::Response::new(GenerallyLockResponse { + success: result, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(GenerallyLockResponse { + success: false, + error_info: Some(format!("can not unlock, args: {}, err: {}", args, err.to_string())), + })), + }, + Err(err) => Ok(tonic::Response::new(GenerallyLockResponse { + success: false, + error_info: Some(format!("can not decode args, err: {}", err.to_string())), + })), + } + } + + async fn r_lock(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + match &serde_json::from_str::(&request.args) { + Ok(args) => match GLOBAL_LOCAL_SERVER.write().await.rlock(args).await { + Ok(result) => Ok(tonic::Response::new(GenerallyLockResponse { + success: result, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(GenerallyLockResponse { + success: false, + error_info: Some(format!("can not rlock, args: {}, err: {}", args, err.to_string())), + })), + }, + Err(err) => Ok(tonic::Response::new(GenerallyLockResponse { + success: false, + error_info: Some(format!("can not decode args, err: {}", err.to_string())), + })), + } + } + + async fn r_un_lock(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + match &serde_json::from_str::(&request.args) { + Ok(args) => match GLOBAL_LOCAL_SERVER.write().await.runlock(args).await { + Ok(result) => Ok(tonic::Response::new(GenerallyLockResponse { + success: result, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(GenerallyLockResponse { + success: false, + error_info: Some(format!("can not runlock, args: {}, err: {}", args, err.to_string())), + })), + }, + Err(err) => Ok(tonic::Response::new(GenerallyLockResponse { + success: false, + error_info: Some(format!("can not decode args, err: {}", err.to_string())), + })), + } + } + + async fn force_un_lock(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + match &serde_json::from_str::(&request.args) { + Ok(args) => match GLOBAL_LOCAL_SERVER.write().await.force_unlock(args).await { + Ok(result) => Ok(tonic::Response::new(GenerallyLockResponse { + success: result, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(GenerallyLockResponse { + success: false, + error_info: Some(format!("can not force_unlock, args: {}, err: {}", args, err.to_string())), + })), + }, + Err(err) => Ok(tonic::Response::new(GenerallyLockResponse { + success: false, + error_info: Some(format!("can not decode args, err: {}", err.to_string())), + })), + } + } + + async fn refresh(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + match &serde_json::from_str::(&request.args) { + Ok(args) => match GLOBAL_LOCAL_SERVER.write().await.refresh(args).await { + Ok(result) => Ok(tonic::Response::new(GenerallyLockResponse { + success: result, + error_info: None, + })), + Err(err) => Ok(tonic::Response::new(GenerallyLockResponse { + success: false, + error_info: Some(format!("can not refresh, args: {}, err: {}", args, err.to_string())), + })), + }, + Err(err) => Ok(tonic::Response::new(GenerallyLockResponse { + success: false, + error_info: Some(format!("can not decode args, err: {}", err.to_string())), + })), + } + } } diff --git a/rustfs/src/main.rs b/rustfs/src/main.rs index aa51bc44..c4bb74cb 100644 --- a/rustfs/src/main.rs +++ b/rustfs/src/main.rs @@ -4,9 +4,9 @@ mod service; mod storage; use clap::Parser; +use common::error::Result; use ecstore::{ endpoints::EndpointServerPools, - error::Result, store::{init_local_disks, update_erasure_type, ECStore}, }; use grpc::make_server; diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 6a03c51a..3209f7a5 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -25,7 +25,7 @@ use std::str::FromStr; use transform_stream::AsyncTryStream; use uuid::Uuid; -use ecstore::error::Result; +use common::error::Result; use tracing::debug; macro_rules! try_ {