mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
@@ -1,10 +1,9 @@
|
||||
// automatically generated by the FlatBuffers compiler, do not modify
|
||||
|
||||
|
||||
// @generated
|
||||
|
||||
use core::mem;
|
||||
use core::cmp::Ordering;
|
||||
use core::mem;
|
||||
|
||||
extern crate flatbuffers;
|
||||
use self::flatbuffers::{EndianScalar, Follow};
|
||||
@@ -12,112 +11,114 @@ use self::flatbuffers::{EndianScalar, Follow};
|
||||
#[allow(unused_imports, dead_code)]
|
||||
pub mod models {
|
||||
|
||||
use core::mem;
|
||||
use core::cmp::Ordering;
|
||||
use core::cmp::Ordering;
|
||||
use core::mem;
|
||||
|
||||
extern crate flatbuffers;
|
||||
use self::flatbuffers::{EndianScalar, Follow};
|
||||
extern crate flatbuffers;
|
||||
use self::flatbuffers::{EndianScalar, Follow};
|
||||
|
||||
pub enum PingBodyOffset {}
|
||||
#[derive(Copy, Clone, PartialEq)]
|
||||
pub enum PingBodyOffset {}
|
||||
#[derive(Copy, Clone, PartialEq)]
|
||||
|
||||
pub struct PingBody<'a> {
|
||||
pub _tab: flatbuffers::Table<'a>,
|
||||
}
|
||||
|
||||
impl<'a> flatbuffers::Follow<'a> for PingBody<'a> {
|
||||
type Inner = PingBody<'a>;
|
||||
#[inline]
|
||||
unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
|
||||
Self { _tab: flatbuffers::Table::new(buf, loc) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> PingBody<'a> {
|
||||
pub const VT_PAYLOAD: flatbuffers::VOffsetT = 4;
|
||||
|
||||
pub const fn get_fully_qualified_name() -> &'static str {
|
||||
"models.PingBody"
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
|
||||
PingBody { _tab: table }
|
||||
}
|
||||
#[allow(unused_mut)]
|
||||
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>(
|
||||
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>,
|
||||
args: &'args PingBodyArgs<'args>
|
||||
) -> flatbuffers::WIPOffset<PingBody<'bldr>> {
|
||||
let mut builder = PingBodyBuilder::new(_fbb);
|
||||
if let Some(x) = args.payload { builder.add_payload(x); }
|
||||
builder.finish()
|
||||
}
|
||||
|
||||
|
||||
#[inline]
|
||||
pub fn payload(&self) -> Option<flatbuffers::Vector<'a, u8>> {
|
||||
// Safety:
|
||||
// Created from valid Table for this object
|
||||
// which contains a valid value in this slot
|
||||
unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(PingBody::VT_PAYLOAD, None)}
|
||||
}
|
||||
}
|
||||
|
||||
impl flatbuffers::Verifiable for PingBody<'_> {
|
||||
#[inline]
|
||||
fn run_verifier(
|
||||
v: &mut flatbuffers::Verifier, pos: usize
|
||||
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
|
||||
use self::flatbuffers::Verifiable;
|
||||
v.visit_table(pos)?
|
||||
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u8>>>("payload", Self::VT_PAYLOAD, false)?
|
||||
.finish();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
pub struct PingBodyArgs<'a> {
|
||||
pub payload: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u8>>>,
|
||||
}
|
||||
impl<'a> Default for PingBodyArgs<'a> {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
PingBodyArgs {
|
||||
payload: None,
|
||||
pub struct PingBody<'a> {
|
||||
pub _tab: flatbuffers::Table<'a>,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PingBodyBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> {
|
||||
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,
|
||||
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
|
||||
}
|
||||
impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> PingBodyBuilder<'a, 'b, A> {
|
||||
#[inline]
|
||||
pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u8>>) {
|
||||
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(PingBody::VT_PAYLOAD, payload);
|
||||
}
|
||||
#[inline]
|
||||
pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> PingBodyBuilder<'a, 'b, A> {
|
||||
let start = _fbb.start_table();
|
||||
PingBodyBuilder {
|
||||
fbb_: _fbb,
|
||||
start_: start,
|
||||
impl<'a> flatbuffers::Follow<'a> for PingBody<'a> {
|
||||
type Inner = PingBody<'a>;
|
||||
#[inline]
|
||||
unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
|
||||
Self {
|
||||
_tab: flatbuffers::Table::new(buf, loc),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub fn finish(self) -> flatbuffers::WIPOffset<PingBody<'a>> {
|
||||
let o = self.fbb_.end_table(self.start_);
|
||||
flatbuffers::WIPOffset::new(o.value())
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Debug for PingBody<'_> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
let mut ds = f.debug_struct("PingBody");
|
||||
ds.field("payload", &self.payload());
|
||||
ds.finish()
|
||||
}
|
||||
}
|
||||
} // pub mod models
|
||||
impl<'a> PingBody<'a> {
|
||||
pub const VT_PAYLOAD: flatbuffers::VOffsetT = 4;
|
||||
|
||||
pub const fn get_fully_qualified_name() -> &'static str {
|
||||
"models.PingBody"
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
|
||||
PingBody { _tab: table }
|
||||
}
|
||||
#[allow(unused_mut)]
|
||||
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>(
|
||||
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>,
|
||||
args: &'args PingBodyArgs<'args>,
|
||||
) -> flatbuffers::WIPOffset<PingBody<'bldr>> {
|
||||
let mut builder = PingBodyBuilder::new(_fbb);
|
||||
if let Some(x) = args.payload {
|
||||
builder.add_payload(x);
|
||||
}
|
||||
builder.finish()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn payload(&self) -> Option<flatbuffers::Vector<'a, u8>> {
|
||||
// Safety:
|
||||
// Created from valid Table for this object
|
||||
// which contains a valid value in this slot
|
||||
unsafe {
|
||||
self._tab
|
||||
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(PingBody::VT_PAYLOAD, None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl flatbuffers::Verifiable for PingBody<'_> {
|
||||
#[inline]
|
||||
fn run_verifier(v: &mut flatbuffers::Verifier, pos: usize) -> Result<(), flatbuffers::InvalidFlatbuffer> {
|
||||
use self::flatbuffers::Verifiable;
|
||||
v.visit_table(pos)?
|
||||
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u8>>>("payload", Self::VT_PAYLOAD, false)?
|
||||
.finish();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
pub struct PingBodyArgs<'a> {
|
||||
pub payload: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u8>>>,
|
||||
}
|
||||
impl<'a> Default for PingBodyArgs<'a> {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
PingBodyArgs { payload: None }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PingBodyBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> {
|
||||
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,
|
||||
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
|
||||
}
|
||||
impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> PingBodyBuilder<'a, 'b, A> {
|
||||
#[inline]
|
||||
pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset<flatbuffers::Vector<'b, u8>>) {
|
||||
self.fbb_
|
||||
.push_slot_always::<flatbuffers::WIPOffset<_>>(PingBody::VT_PAYLOAD, payload);
|
||||
}
|
||||
#[inline]
|
||||
pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> PingBodyBuilder<'a, 'b, A> {
|
||||
let start = _fbb.start_table();
|
||||
PingBodyBuilder {
|
||||
fbb_: _fbb,
|
||||
start_: start,
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
pub fn finish(self) -> flatbuffers::WIPOffset<PingBody<'a>> {
|
||||
let o = self.fbb_.end_table(self.start_);
|
||||
flatbuffers::WIPOffset::new(o.value())
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Debug for PingBody<'_> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
let mut ds = f.debug_struct("PingBody");
|
||||
ds.field("payload", &self.payload());
|
||||
ds.finish()
|
||||
}
|
||||
}
|
||||
} // pub mod models
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,9 @@
|
||||
use crate::{
|
||||
disk::{DiskAPI, DiskStore, MetaCacheEntries, MetaCacheEntry, WalkDirOptions},
|
||||
error::{Error, Result},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use std::{future::Future, pin::Pin, sync::Arc};
|
||||
|
||||
use tokio::{
|
||||
spawn,
|
||||
sync::{
|
||||
@@ -8,11 +12,7 @@ use tokio::{
|
||||
RwLock,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
disk::{DiskAPI, DiskStore, MetaCacheEntries, MetaCacheEntry, WalkDirOptions},
|
||||
error::{Error, Result},
|
||||
};
|
||||
use tracing::info;
|
||||
|
||||
type AgreedFn = Box<dyn Fn(MetaCacheEntry) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
|
||||
type PartialFn = Box<dyn Fn(MetaCacheEntries, &[Option<Error>]) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
|
||||
@@ -58,18 +58,20 @@ impl Clone for ListPathRawOptions {
|
||||
|
||||
pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -> Result<()> {
|
||||
if opts.disks.is_empty() {
|
||||
info!("list_path_raw 0 drives provided");
|
||||
return Err(Error::from_string("list_path_raw: 0 drives provided"));
|
||||
}
|
||||
|
||||
let mut readers = Vec::with_capacity(opts.disks.len());
|
||||
let fds = Arc::new(RwLock::new(opts.fallback_disks.clone()));
|
||||
let mut futures = Vec::with_capacity(opts.disks.len());
|
||||
for disk in opts.disks.iter() {
|
||||
let disk = disk.clone();
|
||||
let opts_clone = opts.clone();
|
||||
let fds_clone = fds.clone();
|
||||
let (m_tx, m_rx) = mpsc::channel::<MetaCacheEntry>(100);
|
||||
readers.push(m_rx);
|
||||
spawn(async move {
|
||||
futures.push(async move {
|
||||
let mut need_fallback = false;
|
||||
if disk.is_none() {
|
||||
need_fallback = true;
|
||||
@@ -136,21 +138,25 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
drop(m_tx);
|
||||
});
|
||||
}
|
||||
|
||||
let _ = join_all(futures).await;
|
||||
|
||||
let errs: Vec<Option<Error>> = vec![None; readers.len()];
|
||||
loop {
|
||||
let mut current = MetaCacheEntry::default();
|
||||
let (mut at_eof, mut has_err, mut agree) = (0, 0, 0);
|
||||
if rx.try_recv().is_ok() {
|
||||
info!("list_path_raw canceled");
|
||||
return Err(Error::from_string("canceled"));
|
||||
}
|
||||
let mut top_entries: Vec<MetaCacheEntry> = Vec::with_capacity(readers.len());
|
||||
let mut top_entries: Vec<MetaCacheEntry> = vec![MetaCacheEntry::default(); readers.len()];
|
||||
// top_entries.clear();
|
||||
|
||||
for (i, r) in readers.iter_mut().enumerate() {
|
||||
if errs[i].is_none() {
|
||||
if errs[i].is_some() {
|
||||
has_err += 1;
|
||||
continue;
|
||||
}
|
||||
@@ -163,20 +169,20 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
};
|
||||
// If no current, add it.
|
||||
if current.name.is_empty() {
|
||||
top_entries.insert(i, entry.clone());
|
||||
top_entries[i] = entry.clone();
|
||||
current = entry;
|
||||
agree += 1;
|
||||
continue;
|
||||
}
|
||||
// If exact match, we agree.
|
||||
if let Ok((_, true)) = current.matches(&entry, true) {
|
||||
top_entries.insert(i, entry);
|
||||
top_entries[i] = entry;
|
||||
agree += 1;
|
||||
continue;
|
||||
}
|
||||
// If only the name matches we didn't agree, but add it for resolution.
|
||||
if entry.name == current.name {
|
||||
top_entries.insert(i, entry);
|
||||
top_entries[i] = entry;
|
||||
continue;
|
||||
}
|
||||
// We got different entries
|
||||
@@ -185,9 +191,11 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
}
|
||||
// We got a new, better current.
|
||||
// Clear existing entries.
|
||||
top_entries.clear();
|
||||
for i in 0..top_entries.len() {
|
||||
top_entries[i] = MetaCacheEntry::default();
|
||||
}
|
||||
agree += 1;
|
||||
top_entries.insert(i, entry.clone());
|
||||
top_entries[i] = entry.clone();
|
||||
current = entry;
|
||||
}
|
||||
|
||||
@@ -205,14 +213,16 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
}
|
||||
_ => {}
|
||||
});
|
||||
|
||||
info!("list_path_raw failed, err: {:?}", combined_err);
|
||||
return Err(Error::from_string(combined_err.join(", ")));
|
||||
}
|
||||
|
||||
// Break if all at EOF or error.
|
||||
if at_eof + has_err == readers.len() && has_err > 0 {
|
||||
if at_eof + has_err == readers.len() {
|
||||
if let Some(finished_fn) = opts.finished.as_ref() {
|
||||
finished_fn(&errs).await;
|
||||
if has_err > 0 {
|
||||
finished_fn(&errs).await;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -40,16 +40,15 @@ pub static DEFAULT_MONITOR_NEW_DISK_INTERVAL: Duration = Duration::from_secs(10)
|
||||
|
||||
pub async fn init_auto_heal() {
|
||||
init_background_healing().await;
|
||||
if let Ok(v) = env::var("_RUSTFS_AUTO_DRIVE_HEALING") {
|
||||
if v == "on" {
|
||||
info!("start monitor local disks and heal");
|
||||
GLOBAL_BackgroundHealState
|
||||
.push_heal_local_disks(&get_local_disks_to_heal().await)
|
||||
.await;
|
||||
spawn(async {
|
||||
monitor_local_disks_and_heal().await;
|
||||
});
|
||||
}
|
||||
let v = env::var("_RUSTFS_AUTO_DRIVE_HEALING").unwrap_or("on".to_string());
|
||||
if v == "on" {
|
||||
info!("start monitor local disks and heal");
|
||||
GLOBAL_BackgroundHealState
|
||||
.push_heal_local_disks(&get_local_disks_to_heal().await)
|
||||
.await;
|
||||
spawn(async {
|
||||
monitor_local_disks_and_heal().await;
|
||||
});
|
||||
}
|
||||
spawn(async {
|
||||
GLOBAL_MRFState.heal_routine().await;
|
||||
@@ -73,12 +72,14 @@ pub async fn get_local_disks_to_heal() -> Vec<Endpoint> {
|
||||
if let Some(disk) = disk {
|
||||
if let Err(err) = disk.disk_info(&DiskInfoOptions::default()).await {
|
||||
if let Some(DiskError::UnformattedDisk) = err.downcast_ref() {
|
||||
info!("get_local_disks_to_heal, disk is unformatted: {}", err);
|
||||
disks_to_heal.push(disk.endpoint());
|
||||
}
|
||||
}
|
||||
let h = disk.healing().await;
|
||||
if let Some(h) = h {
|
||||
if !h.finished {
|
||||
info!("get_local_disks_to_heal, disk healing not finished");
|
||||
disks_to_heal.push(disk.endpoint());
|
||||
}
|
||||
}
|
||||
@@ -104,8 +105,11 @@ async fn monitor_local_disks_and_heal() {
|
||||
continue;
|
||||
}
|
||||
|
||||
info!("heal local disks: {:?}", heal_disks);
|
||||
|
||||
let store = new_object_layer_fn().expect("errServerNotInitialized");
|
||||
if let (_, Some(err)) = store.heal_format(false).await.expect("heal format failed") {
|
||||
if let (result, Some(err)) = store.heal_format(false).await.expect("heal format failed") {
|
||||
error!("heal local disk format error: {}", err);
|
||||
if let Some(DiskError::NoHealRequired) = err.downcast_ref::<DiskError>() {
|
||||
} else {
|
||||
info!("heal format err: {}", err.to_string());
|
||||
|
||||
@@ -388,6 +388,7 @@ pub async fn load_healing_tracker(disk: &Option<DiskStore>) -> Result<HealingTra
|
||||
)));
|
||||
}
|
||||
healing_tracker.id = disk_id;
|
||||
healing_tracker.disk = Some(disk.clone());
|
||||
Ok(healing_tracker)
|
||||
} else {
|
||||
Err(Error::from_string("loadHealingTracker: disk not have id"))
|
||||
@@ -400,7 +401,10 @@ pub async fn load_healing_tracker(disk: &Option<DiskStore>) -> Result<HealingTra
|
||||
pub async fn init_healing_tracker(disk: DiskStore, heal_id: &str) -> Result<HealingTracker> {
|
||||
let disk_location = disk.get_disk_location();
|
||||
Ok(HealingTracker {
|
||||
id: disk.get_disk_id().await?.map_or("".to_string(), |id| id.to_string()),
|
||||
id: disk
|
||||
.get_disk_id()
|
||||
.await
|
||||
.map_or("".to_string(), |id| id.map_or("".to_string(), |id| id.to_string())),
|
||||
heal_id: heal_id.to_string(),
|
||||
path: disk.to_string(),
|
||||
endpoint: disk.endpoint().to_string(),
|
||||
|
||||
@@ -1258,12 +1258,20 @@ impl SetDisks {
|
||||
Err(e) => {
|
||||
warn!("connect_endpoint err {:?}", &e);
|
||||
if ep.is_local && DiskError::UnformattedDisk.is(&e) {
|
||||
// TODO: pushHealLocalDisks
|
||||
GLOBAL_BackgroundHealState.push_heal_local_disks(&[ep.clone()]).await;
|
||||
}
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if new_disk.is_local() {
|
||||
if let Some(h) = new_disk.healing().await {
|
||||
if !h.finished {
|
||||
GLOBAL_BackgroundHealState.push_heal_local_disks(&[new_disk.endpoint()]).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let (set_idx, disk_idx) = match self.find_disk_index(&fm) {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
@@ -3022,6 +3030,7 @@ impl SetDisks {
|
||||
let mut ret_err = None;
|
||||
for bucket in buckets.iter() {
|
||||
if tracker.read().await.is_healed(bucket).await {
|
||||
info!("bucket{} was healed", bucket);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -3142,6 +3151,7 @@ impl SetDisks {
|
||||
let bg_seq_clone = bg_seq.clone();
|
||||
let send_clone = send.clone();
|
||||
let heal_entry = Arc::new(move |bucket: String, entry: MetaCacheEntry| {
|
||||
info!("heal entry, bucket: {}, entry: {:?}", bucket, entry);
|
||||
let jt_clone = jt_clone.clone();
|
||||
let self_clone = self_clone.clone();
|
||||
let started = started_clone;
|
||||
|
||||
@@ -589,6 +589,7 @@ impl StorageAPI for Sets {
|
||||
.await;
|
||||
let (formats, errs) = load_format_erasure_all(&disks, true).await;
|
||||
if let Err(err) = check_format_erasure_values(&formats, self.set_drive_count) {
|
||||
info!("failed to check formats erasure values: {}", err);
|
||||
return Ok((HealResultItem::default(), Some(err)));
|
||||
}
|
||||
let ref_format = match get_format_erasure_in_quorum(&formats) {
|
||||
@@ -614,9 +615,10 @@ impl StorageAPI for Sets {
|
||||
return Ok((res, Some(Error::new(DiskError::NoHealRequired))));
|
||||
}
|
||||
|
||||
if !self.format.eq(&ref_format) {
|
||||
return Ok((res, Some(Error::new(DiskError::CorruptedFormat))));
|
||||
}
|
||||
// if !self.format.eq(&ref_format) {
|
||||
// info!("format ({:?}) not eq ref_format ({:?})", self.format, ref_format);
|
||||
// return Ok((res, Some(Error::new(DiskError::CorruptedFormat))));
|
||||
// }
|
||||
|
||||
let format_op_id = Uuid::new_v4().to_string();
|
||||
let (new_format_sets, _) = new_heal_format_sets(&ref_format, self.set_count, self.set_drive_count, &formats, &errs);
|
||||
|
||||
@@ -1909,6 +1909,7 @@ impl StorageAPI for ECStore {
|
||||
counts
|
||||
}
|
||||
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)> {
|
||||
info!("heal_format");
|
||||
let mut r = HealResultItem {
|
||||
heal_item_type: HEAL_ITEM_METADATA.to_string(),
|
||||
detail: "disk-format".to_string(),
|
||||
@@ -1936,6 +1937,7 @@ impl StorageAPI for ECStore {
|
||||
if count_no_heal == self.pools.len() {
|
||||
return Ok((r, Some(Error::new(DiskError::NoHealRequired))));
|
||||
}
|
||||
info!("heal format success result: {:?}", r);
|
||||
Ok((r, None))
|
||||
}
|
||||
|
||||
|
||||
@@ -16,8 +16,7 @@ use std::{
|
||||
fmt::Debug,
|
||||
};
|
||||
|
||||
use crate::config::error::ConfigError;
|
||||
use tracing::{debug, warn};
|
||||
use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub async fn init_disks(eps: &Endpoints, opt: &DiskOption) -> (Vec<Option<DiskStore>>, Vec<Option<Error>>) {
|
||||
|
||||
Reference in New Issue
Block a user