ecstore update ec/disk/error

This commit is contained in:
weisd
2025-06-04 14:26:46 +08:00
committed by weisd
parent 7fe0cc74d2
commit 9384b831ec
102 changed files with 18806 additions and 4864 deletions

264
Cargo.lock generated
View File

@@ -122,6 +122,12 @@ dependencies = [
"libc",
]
[[package]]
name = "anes"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]]
name = "anstream"
version = "0.6.18"
@@ -1022,7 +1028,18 @@ checksum = "cc97b8f16f944bba54f0433f07e30be199b6dc2bd25937444bbad560bcea29bd"
dependencies = [
"alloc-no-stdlib",
"alloc-stdlib",
"brotli-decompressor",
"brotli-decompressor 4.0.3",
]
[[package]]
name = "brotli"
version = "8.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9991eea70ea4f293524138648e41ee89b0b2b12ddef3b255effa43c8056e0e0d"
dependencies = [
"alloc-no-stdlib",
"alloc-stdlib",
"brotli-decompressor 5.0.0",
]
[[package]]
@@ -1035,6 +1052,16 @@ dependencies = [
"alloc-stdlib",
]
[[package]]
name = "brotli-decompressor"
version = "5.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "874bb8112abecc98cbd6d81ea4fa7e94fb9449648c93cc89aa40c81c24d7de03"
dependencies = [
"alloc-no-stdlib",
"alloc-stdlib",
]
[[package]]
name = "bumpalo"
version = "3.17.0"
@@ -1144,6 +1171,12 @@ dependencies = [
"thiserror 2.0.12",
]
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cc"
version = "1.2.19"
@@ -1736,6 +1769,44 @@ dependencies = [
"crc",
]
[[package]]
name = "criterion"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
dependencies = [
"anes",
"cast",
"ciborium",
"clap",
"criterion-plot",
"futures",
"is-terminal",
"itertools 0.10.5",
"num-traits",
"once_cell",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_derive",
"serde_json",
"tinytemplate",
"tokio",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
dependencies = [
"cast",
"itertools 0.10.5",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.15"
@@ -1745,6 +1816,25 @@ dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-queue"
version = "0.3.12"
@@ -3115,6 +3205,7 @@ dependencies = [
"madmin",
"protos",
"rmp-serde",
"rustfs-filemeta",
"serde",
"serde_json",
"tokio",
@@ -3163,6 +3254,9 @@ dependencies = [
"rmp",
"rmp-serde",
"rustfs-config",
"rustfs-filemeta",
"rustfs-rio",
"rustfs-utils",
"s3s",
"s3s-policy",
"serde",
@@ -4084,6 +4178,12 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc"
[[package]]
name = "hermit-abi"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f154ce46856750ed433c8649605bf7ed2de3bc35fd9d2a9f30cddd873c80cb08"
[[package]]
name = "hex"
version = "0.4.3"
@@ -4584,6 +4684,17 @@ dependencies = [
"serde",
]
[[package]]
name = "is-terminal"
version = "0.4.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
dependencies = [
"hermit-abi 0.5.1",
"libc",
"windows-sys 0.59.0",
]
[[package]]
name = "is_debug"
version = "1.1.0"
@@ -4596,6 +4707,15 @@ version = "1.70.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]]
name = "itertools"
version = "0.12.1"
@@ -5079,6 +5199,25 @@ dependencies = [
"hashbrown 0.12.3",
]
[[package]]
name = "lz4"
version = "1.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a20b523e860d03443e98350ceaac5e71c6ba89aea7d960769ec3ce37f4de5af4"
dependencies = [
"lz4-sys",
]
[[package]]
name = "lz4-sys"
version = "1.11.1+lz4-1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6"
dependencies = [
"cc",
"libc",
]
[[package]]
name = "lz4_flex"
version = "0.11.3"
@@ -5923,6 +6062,12 @@ version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "oorandom"
version = "11.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
[[package]]
name = "opaque-debug"
version = "0.3.1"
@@ -6188,7 +6333,7 @@ dependencies = [
"arrow-schema",
"arrow-select",
"base64 0.22.1",
"brotli",
"brotli 7.0.0",
"bytes",
"chrono",
"flate2",
@@ -6555,6 +6700,34 @@ version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
[[package]]
name = "plotters"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
[[package]]
name = "plotters-svg"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
dependencies = [
"plotters-backend",
]
[[package]]
name = "png"
version = "0.17.16"
@@ -7070,6 +7243,26 @@ version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20675572f6f24e9e76ef639bc5552774ed45f1c30e2951e1e99c59888861c539"
[[package]]
name = "rayon"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "rdkafka"
version = "0.37.0"
@@ -7578,6 +7771,25 @@ dependencies = [
"uuid",
]
[[package]]
name = "rustfs-filemeta"
version = "0.0.1"
dependencies = [
"byteorder",
"crc32fast",
"criterion",
"rmp",
"rmp-serde",
"rustfs-utils",
"serde",
"thiserror 2.0.12",
"time",
"tokio",
"tracing",
"uuid",
"xxhash-rust",
]
[[package]]
name = "rustfs-gui"
version = "0.0.1"
@@ -7630,17 +7842,55 @@ dependencies = [
"tracing-subscriber",
]
[[package]]
name = "rustfs-rio"
version = "0.0.1"
dependencies = [
"aes-gcm",
"async-trait",
"base64-simd",
"brotli 8.0.1",
"bytes",
"crc32fast",
"criterion",
"flate2",
"futures",
"hex-simd",
"http",
"lz4",
"md-5",
"pin-project-lite",
"rand 0.8.5",
"reqwest",
"rustfs-utils",
"snap",
"tokio",
"tokio-util",
"zstd",
]
[[package]]
name = "rustfs-utils"
version = "0.0.1"
dependencies = [
"blake3",
"highway",
"lazy_static",
"local-ip-address",
"md-5",
"netif",
"nix 0.30.1",
"rustfs-config",
"rustls 0.23.27",
"rustls-pemfile",
"rustls-pki-types",
"serde",
"sha2 0.10.9",
"tempfile",
"tokio",
"tracing",
"url",
"winapi",
]
[[package]]
@@ -8965,6 +9215,16 @@ dependencies = [
"zerovec",
]
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "tinyvec"
version = "1.9.0"

View File

@@ -58,6 +58,8 @@ rustfs-event-notifier = { path = "crates/event-notifier", version = "0.0.1" }
rustfs-utils = { path = "crates/utils", version = "0.0.1" }
rustfs-rio = { path = "crates/rio", version = "0.0.1" }
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.1" }
rustfs-disk = { path = "crates/disk", version = "0.0.1" }
rustfs-error = { path = "crates/error", version = "0.0.1" }
workers = { path = "./common/workers", version = "0.0.1" }
tokio-tar = "0.3.1"
atoi = "2.0.0"

33
crates/disk/Cargo.toml Normal file
View File

@@ -0,0 +1,33 @@
[package]
name = "rustfs-disk"
edition.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
version.workspace = true
[dependencies]
url.workspace = true
rustfs-filemeta.workspace = true
rustfs-error.workspace = true
rustfs-rio.workspace = true
serde.workspace = true
serde_json.workspace = true
uuid.workspace = true
tracing.workspace = true
tokio.workspace = true
path-absolutize = "3.1.1"
rustfs-utils = {workspace = true, features =["net"]}
async-trait.workspace = true
time.workspace = true
rustfs-metacache.workspace = true
futures.workspace = true
madmin.workspace = true
protos.workspace = true
tonic.workspace = true
urlencoding = "2.1.3"
rmp-serde.workspace = true
http.workspace = true
[lints]
workspace = true

667
crates/disk/src/api.rs Normal file
View File

@@ -0,0 +1,667 @@
use crate::{endpoint::Endpoint, local::LocalDisk, remote::RemoteDisk};
use madmin::DiskMetrics;
use rustfs_error::{Error, Result};
use rustfs_filemeta::{FileInfo, FileInfoVersions, RawFileInfo};
use serde::{Deserialize, Serialize};
use std::{fmt::Debug, path::PathBuf, sync::Arc};
use time::OffsetDateTime;
use tokio::io::{AsyncRead, AsyncWrite};
use uuid::Uuid;
pub const RUSTFS_META_BUCKET: &str = ".rustfs.sys";
pub const RUSTFS_META_MULTIPART_BUCKET: &str = ".rustfs.sys/multipart";
pub const RUSTFS_META_TMP_BUCKET: &str = ".rustfs.sys/tmp";
pub const RUSTFS_META_TMP_DELETED_BUCKET: &str = ".rustfs.sys/tmp/.trash";
pub const BUCKET_META_PREFIX: &str = "buckets";
pub const FORMAT_CONFIG_FILE: &str = "format.json";
pub const STORAGE_FORMAT_FILE: &str = "xl.meta";
pub const STORAGE_FORMAT_FILE_BACKUP: &str = "xl.meta.bkp";
pub type DiskStore = Arc<Disk>;
#[derive(Debug)]
pub enum Disk {
Local(LocalDisk),
Remote(RemoteDisk),
}
#[async_trait::async_trait]
impl DiskAPI for Disk {
#[tracing::instrument(skip(self))]
fn to_string(&self) -> String {
match self {
Disk::Local(local_disk) => local_disk.to_string(),
Disk::Remote(remote_disk) => remote_disk.to_string(),
}
}
#[tracing::instrument(skip(self))]
fn is_local(&self) -> bool {
match self {
Disk::Local(local_disk) => local_disk.is_local(),
Disk::Remote(remote_disk) => remote_disk.is_local(),
}
}
#[tracing::instrument(skip(self))]
fn host_name(&self) -> String {
match self {
Disk::Local(local_disk) => local_disk.host_name(),
Disk::Remote(remote_disk) => remote_disk.host_name(),
}
}
#[tracing::instrument(skip(self))]
async fn is_online(&self) -> bool {
match self {
Disk::Local(local_disk) => local_disk.is_online().await,
Disk::Remote(remote_disk) => remote_disk.is_online().await,
}
}
#[tracing::instrument(skip(self))]
fn endpoint(&self) -> Endpoint {
match self {
Disk::Local(local_disk) => local_disk.endpoint(),
Disk::Remote(remote_disk) => remote_disk.endpoint(),
}
}
#[tracing::instrument(skip(self))]
async fn close(&self) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.close().await,
Disk::Remote(remote_disk) => remote_disk.close().await,
}
}
#[tracing::instrument(skip(self))]
fn path(&self) -> PathBuf {
match self {
Disk::Local(local_disk) => local_disk.path(),
Disk::Remote(remote_disk) => remote_disk.path(),
}
}
#[tracing::instrument(skip(self))]
fn get_disk_location(&self) -> DiskLocation {
match self {
Disk::Local(local_disk) => local_disk.get_disk_location(),
Disk::Remote(remote_disk) => remote_disk.get_disk_location(),
}
}
#[tracing::instrument(skip(self))]
async fn get_disk_id(&self) -> Result<Option<Uuid>> {
match self {
Disk::Local(local_disk) => local_disk.get_disk_id().await,
Disk::Remote(remote_disk) => remote_disk.get_disk_id().await,
}
}
#[tracing::instrument(skip(self))]
async fn set_disk_id(&self, id: Option<Uuid>) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.set_disk_id(id).await,
Disk::Remote(remote_disk) => remote_disk.set_disk_id(id).await,
}
}
#[tracing::instrument(skip(self))]
async fn read_all(&self, volume: &str, path: &str) -> Result<Vec<u8>> {
match self {
Disk::Local(local_disk) => local_disk.read_all(volume, path).await,
Disk::Remote(remote_disk) => remote_disk.read_all(volume, path).await,
}
}
#[tracing::instrument(skip(self))]
async fn write_all(&self, volume: &str, path: &str, data: Vec<u8>) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.write_all(volume, path, data).await,
Disk::Remote(remote_disk) => remote_disk.write_all(volume, path, data).await,
}
}
#[tracing::instrument(skip(self))]
async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.delete(volume, path, opt).await,
Disk::Remote(remote_disk) => remote_disk.delete(volume, path, opt).await,
}
}
#[tracing::instrument(skip(self))]
async fn verify_file(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
match self {
Disk::Local(local_disk) => local_disk.verify_file(volume, path, fi).await,
Disk::Remote(remote_disk) => remote_disk.verify_file(volume, path, fi).await,
}
}
#[tracing::instrument(skip(self))]
async fn check_parts(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
match self {
Disk::Local(local_disk) => local_disk.check_parts(volume, path, fi).await,
Disk::Remote(remote_disk) => remote_disk.check_parts(volume, path, fi).await,
}
}
#[tracing::instrument(skip(self))]
async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Vec<u8>) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.rename_part(src_volume, src_path, dst_volume, dst_path, meta).await,
Disk::Remote(remote_disk) => {
remote_disk
.rename_part(src_volume, src_path, dst_volume, dst_path, meta)
.await
}
}
}
#[tracing::instrument(skip(self))]
async fn rename_file(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.rename_file(src_volume, src_path, dst_volume, dst_path).await,
Disk::Remote(remote_disk) => remote_disk.rename_file(src_volume, src_path, dst_volume, dst_path).await,
}
}
#[tracing::instrument(skip(self))]
async fn create_file(&self, _origvolume: &str, volume: &str, path: &str, _file_size: usize) -> Result<Box<dyn AsyncWrite>> {
match self {
Disk::Local(local_disk) => local_disk.create_file(_origvolume, volume, path, _file_size).await,
Disk::Remote(remote_disk) => remote_disk.create_file(_origvolume, volume, path, _file_size).await,
}
}
#[tracing::instrument(skip(self))]
async fn append_file(&self, volume: &str, path: &str) -> Result<Box<dyn AsyncWrite>> {
match self {
Disk::Local(local_disk) => local_disk.append_file(volume, path).await,
Disk::Remote(remote_disk) => remote_disk.append_file(volume, path).await,
}
}
#[tracing::instrument(skip(self))]
async fn read_file(&self, volume: &str, path: &str) -> Result<Box<dyn AsyncRead>> {
match self {
Disk::Local(local_disk) => local_disk.read_file(volume, path).await,
Disk::Remote(remote_disk) => remote_disk.read_file(volume, path).await,
}
}
#[tracing::instrument(skip(self))]
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<Box<dyn AsyncRead>> {
match self {
Disk::Local(local_disk) => local_disk.read_file_stream(volume, path, offset, length).await,
Disk::Remote(remote_disk) => remote_disk.read_file_stream(volume, path, offset, length).await,
}
}
#[tracing::instrument(skip(self))]
async fn list_dir(&self, _origvolume: &str, volume: &str, _dir_path: &str, _count: i32) -> Result<Vec<String>> {
match self {
Disk::Local(local_disk) => local_disk.list_dir(_origvolume, volume, _dir_path, _count).await,
Disk::Remote(remote_disk) => remote_disk.list_dir(_origvolume, volume, _dir_path, _count).await,
}
}
#[tracing::instrument(skip(self, wr))]
async fn walk_dir<W: AsyncWrite + Unpin + Send + Sync + 'static>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.walk_dir(opts, wr).await,
Disk::Remote(remote_disk) => remote_disk.walk_dir(opts, wr).await,
}
}
#[tracing::instrument(skip(self))]
async fn rename_data(
&self,
src_volume: &str,
src_path: &str,
fi: FileInfo,
dst_volume: &str,
dst_path: &str,
) -> Result<RenameDataResp> {
match self {
Disk::Local(local_disk) => local_disk.rename_data(src_volume, src_path, fi, dst_volume, dst_path).await,
Disk::Remote(remote_disk) => remote_disk.rename_data(src_volume, src_path, fi, dst_volume, dst_path).await,
}
}
#[tracing::instrument(skip(self))]
async fn make_volumes(&self, volumes: Vec<&str>) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.make_volumes(volumes).await,
Disk::Remote(remote_disk) => remote_disk.make_volumes(volumes).await,
}
}
#[tracing::instrument(skip(self))]
async fn make_volume(&self, volume: &str) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.make_volume(volume).await,
Disk::Remote(remote_disk) => remote_disk.make_volume(volume).await,
}
}
#[tracing::instrument(skip(self))]
async fn list_volumes(&self) -> Result<Vec<VolumeInfo>> {
match self {
Disk::Local(local_disk) => local_disk.list_volumes().await,
Disk::Remote(remote_disk) => remote_disk.list_volumes().await,
}
}
#[tracing::instrument(skip(self))]
async fn stat_volume(&self, volume: &str) -> Result<VolumeInfo> {
match self {
Disk::Local(local_disk) => local_disk.stat_volume(volume).await,
Disk::Remote(remote_disk) => remote_disk.stat_volume(volume).await,
}
}
#[tracing::instrument(skip(self))]
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.delete_paths(volume, paths).await,
Disk::Remote(remote_disk) => remote_disk.delete_paths(volume, paths).await,
}
}
#[tracing::instrument(skip(self))]
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.update_metadata(volume, path, fi, opts).await,
Disk::Remote(remote_disk) => remote_disk.update_metadata(volume, path, fi, opts).await,
}
}
#[tracing::instrument(skip(self))]
async fn write_metadata(&self, _org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.write_metadata(_org_volume, volume, path, fi).await,
Disk::Remote(remote_disk) => remote_disk.write_metadata(_org_volume, volume, path, fi).await,
}
}
#[tracing::instrument(level = "debug", skip(self))]
async fn read_version(
&self,
_org_volume: &str,
volume: &str,
path: &str,
version_id: &str,
opts: &ReadOptions,
) -> Result<FileInfo> {
match self {
Disk::Local(local_disk) => local_disk.read_version(_org_volume, volume, path, version_id, opts).await,
Disk::Remote(remote_disk) => remote_disk.read_version(_org_volume, volume, path, version_id, opts).await,
}
}
#[tracing::instrument(skip(self))]
async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result<RawFileInfo> {
match self {
Disk::Local(local_disk) => local_disk.read_xl(volume, path, read_data).await,
Disk::Remote(remote_disk) => remote_disk.read_xl(volume, path, read_data).await,
}
}
#[tracing::instrument(skip(self))]
async fn delete_version(
&self,
volume: &str,
path: &str,
fi: FileInfo,
force_del_marker: bool,
opts: DeleteOptions,
) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.delete_version(volume, path, fi, force_del_marker, opts).await,
Disk::Remote(remote_disk) => remote_disk.delete_version(volume, path, fi, force_del_marker, opts).await,
}
}
#[tracing::instrument(skip(self))]
async fn delete_versions(
&self,
volume: &str,
versions: Vec<FileInfoVersions>,
opts: DeleteOptions,
) -> Result<Vec<Option<Error>>> {
match self {
Disk::Local(local_disk) => local_disk.delete_versions(volume, versions, opts).await,
Disk::Remote(remote_disk) => remote_disk.delete_versions(volume, versions, opts).await,
}
}
#[tracing::instrument(skip(self))]
async fn read_multiple(&self, req: ReadMultipleReq) -> Result<Vec<ReadMultipleResp>> {
match self {
Disk::Local(local_disk) => local_disk.read_multiple(req).await,
Disk::Remote(remote_disk) => remote_disk.read_multiple(req).await,
}
}
#[tracing::instrument(skip(self))]
async fn delete_volume(&self, volume: &str) -> Result<()> {
match self {
Disk::Local(local_disk) => local_disk.delete_volume(volume).await,
Disk::Remote(remote_disk) => remote_disk.delete_volume(volume).await,
}
}
#[tracing::instrument(skip(self))]
async fn disk_info(&self, opts: &DiskInfoOptions) -> Result<DiskInfo> {
match self {
Disk::Local(local_disk) => local_disk.disk_info(opts).await,
Disk::Remote(remote_disk) => remote_disk.disk_info(opts).await,
}
}
// #[tracing::instrument(skip(self, cache, we_sleep, scan_mode))]
// async fn ns_scanner(
// &self,
// cache: &DataUsageCache,
// updates: Sender<DataUsageEntry>,
// scan_mode: HealScanMode,
// we_sleep: ShouldSleepFn,
// ) -> Result<DataUsageCache> {
// match self {
// Disk::Local(local_disk) => local_disk.ns_scanner(cache, updates, scan_mode, we_sleep).await,
// Disk::Remote(remote_disk) => remote_disk.ns_scanner(cache, updates, scan_mode, we_sleep).await,
// }
// }
// #[tracing::instrument(skip(self))]
// async fn healing(&self) -> Option<HealingTracker> {
// match self {
// Disk::Local(local_disk) => local_disk.healing().await,
// Disk::Remote(remote_disk) => remote_disk.healing().await,
// }
// }
}
pub async fn new_disk(ep: &Endpoint, opt: &DiskOption) -> Result<DiskStore> {
if ep.is_local {
Ok(Arc::new(Disk::Local(LocalDisk::new(ep, opt.cleanup).await?)))
} else {
Ok(Arc::new(Disk::Remote(RemoteDisk::new(ep, opt).await?)))
}
}
#[async_trait::async_trait]
pub trait DiskAPI: Debug + Send + Sync + 'static {
fn to_string(&self) -> String;
async fn is_online(&self) -> bool;
fn is_local(&self) -> bool;
// LastConn
fn host_name(&self) -> String;
fn endpoint(&self) -> Endpoint;
async fn close(&self) -> Result<()>;
async fn get_disk_id(&self) -> Result<Option<Uuid>>;
async fn set_disk_id(&self, id: Option<Uuid>) -> Result<()>;
fn path(&self) -> PathBuf;
fn get_disk_location(&self) -> DiskLocation;
// Healing
// DiskInfo
// NSScanner
// Volume operations.
async fn make_volume(&self, volume: &str) -> Result<()>;
async fn make_volumes(&self, volume: Vec<&str>) -> Result<()>;
async fn list_volumes(&self) -> Result<Vec<VolumeInfo>>;
async fn stat_volume(&self, volume: &str) -> Result<VolumeInfo>;
async fn delete_volume(&self, volume: &str) -> Result<()>;
// 并发边读边写 w <- MetaCacheEntry
async fn walk_dir<W: AsyncWrite + Unpin + Send + Sync + 'static>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()>;
// Metadata operations
async fn delete_version(
&self,
volume: &str,
path: &str,
fi: FileInfo,
force_del_marker: bool,
opts: DeleteOptions,
) -> Result<()>;
async fn delete_versions(
&self,
volume: &str,
versions: Vec<FileInfoVersions>,
opts: DeleteOptions,
) -> Result<Vec<Option<Error>>>;
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()>;
async fn write_metadata(&self, org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()>;
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()>;
async fn read_version(
&self,
org_volume: &str,
volume: &str,
path: &str,
version_id: &str,
opts: &ReadOptions,
) -> Result<FileInfo>;
async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result<RawFileInfo>;
async fn rename_data(
&self,
src_volume: &str,
src_path: &str,
file_info: FileInfo,
dst_volume: &str,
dst_path: &str,
) -> Result<RenameDataResp>;
// File operations.
// 读目录下的所有文件、目录
async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result<Vec<String>>;
async fn read_file(&self, volume: &str, path: &str) -> Result<Box<dyn AsyncRead>>;
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<Box<dyn AsyncRead>>;
async fn append_file(&self, volume: &str, path: &str) -> Result<Box<dyn AsyncWrite>>;
async fn create_file(&self, origvolume: &str, volume: &str, path: &str, file_size: usize) -> Result<Box<dyn AsyncWrite>>;
// ReadFileStream
async fn rename_file(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str) -> Result<()>;
async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Vec<u8>) -> Result<()>;
async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()>;
// VerifyFile
async fn verify_file(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp>;
// CheckParts
async fn check_parts(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp>;
// StatInfoFile
// ReadParts
async fn read_multiple(&self, req: ReadMultipleReq) -> Result<Vec<ReadMultipleResp>>;
// CleanAbandonedData
async fn write_all(&self, volume: &str, path: &str, data: Vec<u8>) -> Result<()>;
async fn read_all(&self, volume: &str, path: &str) -> Result<Vec<u8>>;
async fn disk_info(&self, opts: &DiskInfoOptions) -> Result<DiskInfo>;
// async fn ns_scanner(
// &self,
// cache: &DataUsageCache,
// updates: Sender<DataUsageEntry>,
// scan_mode: HealScanMode,
// we_sleep: ShouldSleepFn,
// ) -> Result<DataUsageCache>;
// async fn healing(&self) -> Option<HealingTracker>;
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct CheckPartsResp {
pub results: Vec<usize>,
}
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct UpdateMetadataOpts {
pub no_persistence: bool,
}
pub struct DiskLocation {
pub pool_idx: Option<usize>,
pub set_idx: Option<usize>,
pub disk_idx: Option<usize>,
}
impl DiskLocation {
pub fn valid(&self) -> bool {
self.pool_idx.is_some() && self.set_idx.is_some() && self.disk_idx.is_some()
}
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct DiskInfoOptions {
pub disk_id: String,
pub metrics: bool,
pub noop: bool,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct DiskInfo {
pub total: u64,
pub free: u64,
pub used: u64,
pub used_inodes: u64,
pub free_inodes: u64,
pub major: u64,
pub minor: u64,
pub nr_requests: u64,
pub fs_type: String,
pub root_disk: bool,
pub healing: bool,
pub scanning: bool,
pub endpoint: String,
pub mount_path: String,
pub id: String,
pub rotational: bool,
pub metrics: DiskMetrics,
pub error: String,
}
#[derive(Clone, Debug, Default)]
pub struct Info {
pub total: u64,
pub free: u64,
pub used: u64,
pub files: u64,
pub ffree: u64,
pub fstype: String,
pub major: u64,
pub minor: u64,
pub name: String,
pub rotational: bool,
pub nrrequests: u64,
}
// #[derive(Debug, Default, Clone, Serialize, Deserialize)]
// pub struct FileInfoVersions {
// // Name of the volume.
// pub volume: String,
// // Name of the file.
// pub name: String,
// // Represents the latest mod time of the
// // latest version.
// pub latest_mod_time: Option<OffsetDateTime>,
// pub versions: Vec<FileInfo>,
// pub free_versions: Vec<FileInfo>,
// }
// impl FileInfoVersions {
// pub fn find_version_index(&self, v: &str) -> Option<usize> {
// if v.is_empty() {
// return None;
// }
// let vid = Uuid::parse_str(v).unwrap_or(Uuid::nil());
// self.versions.iter().position(|v| v.version_id == Some(vid))
// }
// }
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct WalkDirOptions {
// Bucket to scanner
pub bucket: String,
// Directory inside the bucket.
pub base_dir: String,
// Do a full recursive scan.
pub recursive: bool,
// ReportNotFound will return errFileNotFound if all disks reports the BaseDir cannot be found.
pub report_notfound: bool,
// FilterPrefix will only return results with given prefix within folder.
// Should never contain a slash.
pub filter_prefix: Option<String>,
// ForwardTo will forward to the given object path.
pub forward_to: Option<String>,
// Limit the number of returned objects if > 0.
pub limit: i32,
// DiskID contains the disk ID of the disk.
// Leave empty to not check disk ID.
pub disk_id: String,
}
// move metacache to metacache.rs
#[derive(Clone, Debug, Default)]
pub struct DiskOption {
pub cleanup: bool,
pub health_check: bool,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct RenameDataResp {
pub old_data_dir: Option<Uuid>,
pub sign: Option<Vec<u8>>,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct DeleteOptions {
pub recursive: bool,
pub immediate: bool,
pub undo_write: bool,
pub old_data_dir: Option<Uuid>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReadMultipleReq {
pub bucket: String,
pub prefix: String,
pub files: Vec<String>,
pub max_size: usize,
pub metadata_only: bool,
pub abort404: bool,
pub max_results: usize,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ReadMultipleResp {
pub bucket: String,
pub prefix: String,
pub file: String,
pub exists: bool,
pub error: String,
pub data: Vec<u8>,
pub mod_time: Option<OffsetDateTime>,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct VolumeInfo {
pub name: String,
pub created: Option<OffsetDateTime>,
}
#[derive(Deserialize, Serialize, Debug, Default)]
pub struct ReadOptions {
pub incl_free_versions: bool,
pub read_data: bool,
pub healing: bool,
}

379
crates/disk/src/endpoint.rs Normal file
View File

@@ -0,0 +1,379 @@
use path_absolutize::Absolutize;
use rustfs_utils::{is_local_host, is_socket_addr};
use std::{fmt::Display, path::Path};
use url::{ParseError, Url};
/// enum for endpoint type.
#[derive(PartialEq, Eq, Debug)]
pub enum EndpointType {
/// path style endpoint type enum.
Path,
/// URL style endpoint type enum.
Url,
}
/// any type of endpoint.
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
pub struct Endpoint {
pub url: url::Url,
pub is_local: bool,
pub pool_idx: i32,
pub set_idx: i32,
pub disk_idx: i32,
}
impl Display for Endpoint {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.url.scheme() == "file" {
write!(f, "{}", self.get_file_path())
} else {
write!(f, "{}", self.url)
}
}
}
impl TryFrom<&str> for Endpoint {
/// The type returned in the event of a conversion error.
type Error = std::io::Error;
/// Performs the conversion.
fn try_from(value: &str) -> core::result::Result<Self, Self::Error> {
// check whether given path is not empty.
if ["", "/", "\\"].iter().any(|&v| v.eq(value)) {
return Err(std::io::Error::other("empty or root endpoint is not supported"));
}
let mut is_local = false;
let url = match Url::parse(value) {
#[allow(unused_mut)]
Ok(mut url) if url.has_host() => {
// URL style of endpoint.
// Valid URL style endpoint is
// - Scheme field must contain "http" or "https"
// - All field should be empty except Host and Path.
if !((url.scheme() == "http" || url.scheme() == "https")
&& url.username().is_empty()
&& url.fragment().is_none()
&& url.query().is_none())
{
return Err(std::io::Error::other("invalid URL endpoint format"));
}
let path = url.path().to_string();
#[cfg(not(windows))]
let path = Path::new(&path).absolutize()?;
// On windows having a preceding SlashSeparator will cause problems, if the
// command line already has C:/<export-folder/ in it. Final resulting
// path on windows might become C:/C:/ this will cause problems
// of starting rustfs server properly in distributed mode on windows.
// As a special case make sure to trim the separator.
#[cfg(windows)]
let path = Path::new(&path[1..]).absolutize()?;
if path.parent().is_none() || Path::new("").eq(&path) {
return Err(std::io::Error::other("empty or root path is not supported in URL endpoint"));
}
match path.to_str() {
Some(v) => url.set_path(v),
None => return Err(std::io::Error::other("invalid path")),
}
url
}
Ok(_) => {
// like d:/foo
is_local = true;
url_parse_from_file_path(value)?
}
Err(e) => match e {
ParseError::InvalidPort => {
return Err(std::io::Error::other(
"invalid URL endpoint format: port number must be between 1 to 65535",
))
}
ParseError::EmptyHost => return Err(std::io::Error::other("invalid URL endpoint format: empty host name")),
ParseError::RelativeUrlWithoutBase => {
// like /foo
is_local = true;
url_parse_from_file_path(value)?
}
_ => return Err(std::io::Error::other(format!("invalid URL endpoint format: {}", e))),
},
};
Ok(Endpoint {
url,
is_local,
pool_idx: -1,
set_idx: -1,
disk_idx: -1,
})
}
}
impl Endpoint {
/// returns type of endpoint.
pub fn get_type(&self) -> EndpointType {
if self.url.scheme() == "file" {
EndpointType::Path
} else {
EndpointType::Url
}
}
/// sets a specific pool number to this node
pub fn set_pool_index(&mut self, idx: usize) {
self.pool_idx = idx as i32
}
/// sets a specific set number to this node
pub fn set_set_index(&mut self, idx: usize) {
self.set_idx = idx as i32
}
/// sets a specific disk number to this node
pub fn set_disk_index(&mut self, idx: usize) {
self.disk_idx = idx as i32
}
/// resolves the host and updates if it is local or not.
pub fn update_is_local(&mut self, local_port: u16) -> std::io::Result<()> {
match (self.url.scheme(), self.url.host()) {
(v, Some(host)) if v != "file" => {
self.is_local = is_local_host(host, self.url.port().unwrap_or_default(), local_port)?;
}
_ => {}
}
Ok(())
}
/// returns the host to be used for grid connections.
pub fn grid_host(&self) -> String {
match (self.url.host(), self.url.port()) {
(Some(host), Some(port)) => format!("{}://{}:{}", self.url.scheme(), host, port),
(Some(host), None) => format!("{}://{}", self.url.scheme(), host),
_ => String::new(),
}
}
pub fn host_port(&self) -> String {
match (self.url.host(), self.url.port()) {
(Some(host), Some(port)) => format!("{}:{}", host, port),
(Some(host), None) => format!("{}", host),
_ => String::new(),
}
}
pub fn get_file_path(&self) -> &str {
let path = self.url.path();
#[cfg(windows)]
let path = &path[1..];
path
}
}
/// parse a file path into an URL.
fn url_parse_from_file_path(value: &str) -> std::io::Result<url::Url> {
// Only check if the arg is an ip address and ask for scheme since its absent.
// localhost, example.com, any FQDN cannot be disambiguated from a regular file path such as
// /mnt/export1. So we go ahead and start the rustfs server in FS modes in these cases.
let addr: Vec<&str> = value.splitn(2, '/').collect();
if is_socket_addr(addr[0]) {
return Err(std::io::Error::other("invalid URL endpoint format: missing scheme http or https"));
}
let file_path = match Path::new(value).absolutize() {
Ok(path) => path,
Err(err) => return Err(std::io::Error::other(format!("absolute path failed: {}", err))),
};
match Url::from_file_path(file_path) {
Ok(url) => Ok(url),
Err(_) => Err(std::io::Error::other("Convert a file path into an URL failed")),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_new_endpoint() {
#[derive(Default)]
struct TestCase<'a> {
arg: &'a str,
expected_endpoint: Option<Endpoint>,
expected_type: Option<EndpointType>,
expected_err: Option<std::io::Error>,
}
let u2 = url::Url::parse("https://example.org/path").unwrap();
let u4 = url::Url::parse("http://192.168.253.200/path").unwrap();
let u6 = url::Url::parse("http://server:/path").unwrap();
let root_slash_foo = url::Url::from_file_path("/foo").unwrap();
let test_cases = [
TestCase {
arg: "/foo",
expected_endpoint: Some(Endpoint {
url: root_slash_foo,
is_local: true,
pool_idx: -1,
set_idx: -1,
disk_idx: -1,
}),
expected_type: Some(EndpointType::Path),
expected_err: None,
},
TestCase {
arg: "https://example.org/path",
expected_endpoint: Some(Endpoint {
url: u2,
is_local: false,
pool_idx: -1,
set_idx: -1,
disk_idx: -1,
}),
expected_type: Some(EndpointType::Url),
expected_err: None,
},
TestCase {
arg: "http://192.168.253.200/path",
expected_endpoint: Some(Endpoint {
url: u4,
is_local: false,
pool_idx: -1,
set_idx: -1,
disk_idx: -1,
}),
expected_type: Some(EndpointType::Url),
expected_err: None,
},
TestCase {
arg: "",
expected_endpoint: None,
expected_type: None,
expected_err: Some(std::io::Error::other("empty or root endpoint is not supported")),
},
TestCase {
arg: "/",
expected_endpoint: None,
expected_type: None,
expected_err: Some(std::io::Error::other("empty or root endpoint is not supported")),
},
TestCase {
arg: "\\",
expected_endpoint: None,
expected_type: None,
expected_err: Some(std::io::Error::other("empty or root endpoint is not supported")),
},
TestCase {
arg: "c://foo",
expected_endpoint: None,
expected_type: None,
expected_err: Some(std::io::Error::other("invalid URL endpoint format")),
},
TestCase {
arg: "ftp://foo",
expected_endpoint: None,
expected_type: None,
expected_err: Some(std::io::Error::other("invalid URL endpoint format")),
},
TestCase {
arg: "http://server/path?location",
expected_endpoint: None,
expected_type: None,
expected_err: Some(std::io::Error::other("invalid URL endpoint format")),
},
TestCase {
arg: "http://:/path",
expected_endpoint: None,
expected_type: None,
expected_err: Some(std::io::Error::other("invalid URL endpoint format: empty host name")),
},
TestCase {
arg: "http://:8080/path",
expected_endpoint: None,
expected_type: None,
expected_err: Some(std::io::Error::other("invalid URL endpoint format: empty host name")),
},
TestCase {
arg: "http://server:/path",
expected_endpoint: Some(Endpoint {
url: u6,
is_local: false,
pool_idx: -1,
set_idx: -1,
disk_idx: -1,
}),
expected_type: Some(EndpointType::Url),
expected_err: None,
},
TestCase {
arg: "https://93.184.216.34:808080/path",
expected_endpoint: None,
expected_type: None,
expected_err: Some(std::io::Error::other(
"invalid URL endpoint format: port number must be between 1 to 65535",
)),
},
TestCase {
arg: "http://server:8080//",
expected_endpoint: None,
expected_type: None,
expected_err: Some(std::io::Error::other("empty or root path is not supported in URL endpoint")),
},
TestCase {
arg: "http://server:8080/",
expected_endpoint: None,
expected_type: None,
expected_err: Some(std::io::Error::other("empty or root path is not supported in URL endpoint")),
},
TestCase {
arg: "192.168.1.210:9000",
expected_endpoint: None,
expected_type: None,
expected_err: Some(std::io::Error::other("invalid URL endpoint format: missing scheme http or https")),
},
];
for test_case in test_cases {
let ret = Endpoint::try_from(test_case.arg);
if test_case.expected_err.is_none() && ret.is_err() {
panic!("{}: error: expected = <nil>, got = {:?}", test_case.arg, ret);
}
if test_case.expected_err.is_some() && ret.is_ok() {
panic!("{}: error: expected = {:?}, got = <nil>", test_case.arg, test_case.expected_err);
}
match (test_case.expected_err, ret) {
(None, Err(e)) => panic!("{}: error: expected = <nil>, got = {}", test_case.arg, e),
(None, Ok(mut ep)) => {
let _ = ep.update_is_local(9000);
if test_case.expected_type != Some(ep.get_type()) {
panic!(
"{}: type: expected = {:?}, got = {:?}",
test_case.arg,
test_case.expected_type,
ep.get_type()
);
}
assert_eq!(test_case.expected_endpoint, Some(ep), "{}: endpoint", test_case.arg);
}
(Some(e), Ok(_)) => panic!("{}: error: expected = {}, got = <nil>", test_case.arg, e),
(Some(e), Err(e2)) => {
assert_eq!(e.to_string(), e2.to_string(), "{}: error: expected = {}, got = {}", test_case.arg, e, e2)
}
}
}
}
}

594
crates/disk/src/error.rs Normal file
View File

@@ -0,0 +1,594 @@
use std::io::{self, ErrorKind};
use std::path::PathBuf;
use tracing::error;
use crate::quorum::CheckErrorFn;
use crate::utils::ERROR_TYPE_MASK;
use common::error::{Error, Result};
// DiskError == StorageErr
#[derive(Debug, thiserror::Error)]
pub enum DiskError {
#[error("maximum versions exceeded, please delete few versions to proceed")]
MaxVersionsExceeded,
#[error("unexpected error")]
Unexpected,
#[error("corrupted format")]
CorruptedFormat,
#[error("corrupted backend")]
CorruptedBackend,
#[error("unformatted disk error")]
UnformattedDisk,
#[error("inconsistent drive found")]
InconsistentDisk,
#[error("drive does not support O_DIRECT")]
UnsupportedDisk,
#[error("drive path full")]
DiskFull,
#[error("disk not a dir")]
DiskNotDir,
#[error("disk not found")]
DiskNotFound,
#[error("drive still did not complete the request")]
DiskOngoingReq,
#[error("drive is part of root drive, will not be used")]
DriveIsRoot,
#[error("remote drive is faulty")]
FaultyRemoteDisk,
#[error("drive is faulty")]
FaultyDisk,
#[error("drive access denied")]
DiskAccessDenied,
#[error("file not found")]
FileNotFound,
#[error("file version not found")]
FileVersionNotFound,
#[error("too many open files, please increase 'ulimit -n'")]
TooManyOpenFiles,
#[error("file name too long")]
FileNameTooLong,
#[error("volume already exists")]
VolumeExists,
#[error("not of regular file type")]
IsNotRegular,
#[error("path not found")]
PathNotFound,
#[error("volume not found")]
VolumeNotFound,
#[error("volume is not empty")]
VolumeNotEmpty,
#[error("volume access denied")]
VolumeAccessDenied,
#[error("disk access denied")]
FileAccessDenied,
#[error("file is corrupted")]
FileCorrupt,
#[error("bit-rot hash algorithm is invalid")]
BitrotHashAlgoInvalid,
#[error("Rename across devices not allowed, please fix your backend configuration")]
CrossDeviceLink,
#[error("less data available than what was requested")]
LessData,
#[error("more data was sent than what was advertised")]
MoreData,
#[error("outdated XL meta")]
OutdatedXLMeta,
#[error("part missing or corrupt")]
PartMissingOrCorrupt,
#[error("No healing is required")]
NoHealRequired,
}
impl DiskError {
/// Checks if the given array of errors contains fatal disk errors.
/// If all errors are of the same fatal disk error type, returns the corresponding error.
/// Otherwise, returns Ok.
///
/// # Parameters
/// - `errs`: A slice of optional errors.
///
/// # Returns
/// If all errors are of the same fatal disk error type, returns the corresponding error.
/// Otherwise, returns Ok.
pub fn check_disk_fatal_errs(errs: &[Option<Error>]) -> Result<()> {
if DiskError::UnsupportedDisk.count_errs(errs) == errs.len() {
return Err(DiskError::UnsupportedDisk.into());
}
if DiskError::FileAccessDenied.count_errs(errs) == errs.len() {
return Err(DiskError::FileAccessDenied.into());
}
if DiskError::DiskNotDir.count_errs(errs) == errs.len() {
return Err(DiskError::DiskNotDir.into());
}
Ok(())
}
pub fn count_errs(&self, errs: &[Option<Error>]) -> usize {
errs.iter()
.filter(|&err| match err {
None => false,
Some(e) => self.is(e),
})
.count()
}
pub fn quorum_unformatted_disks(errs: &[Option<Error>]) -> bool {
DiskError::UnformattedDisk.count_errs(errs) > (errs.len() / 2)
}
pub fn should_init_erasure_disks(errs: &[Option<Error>]) -> bool {
DiskError::UnformattedDisk.count_errs(errs) == errs.len()
}
/// Check if the error is a disk error
pub fn is(&self, err: &Error) -> bool {
if let Some(e) = err.downcast_ref::<DiskError>() {
e == self
} else {
false
}
}
}
impl DiskError {
pub fn to_u32(&self) -> u32 {
match self {
DiskError::MaxVersionsExceeded => 0x01,
DiskError::Unexpected => 0x02,
DiskError::CorruptedFormat => 0x03,
DiskError::CorruptedBackend => 0x04,
DiskError::UnformattedDisk => 0x05,
DiskError::InconsistentDisk => 0x06,
DiskError::UnsupportedDisk => 0x07,
DiskError::DiskFull => 0x08,
DiskError::DiskNotDir => 0x09,
DiskError::DiskNotFound => 0x0A,
DiskError::DiskOngoingReq => 0x0B,
DiskError::DriveIsRoot => 0x0C,
DiskError::FaultyRemoteDisk => 0x0D,
DiskError::FaultyDisk => 0x0E,
DiskError::DiskAccessDenied => 0x0F,
DiskError::FileNotFound => 0x10,
DiskError::FileVersionNotFound => 0x11,
DiskError::TooManyOpenFiles => 0x12,
DiskError::FileNameTooLong => 0x13,
DiskError::VolumeExists => 0x14,
DiskError::IsNotRegular => 0x15,
DiskError::PathNotFound => 0x16,
DiskError::VolumeNotFound => 0x17,
DiskError::VolumeNotEmpty => 0x18,
DiskError::VolumeAccessDenied => 0x19,
DiskError::FileAccessDenied => 0x1A,
DiskError::FileCorrupt => 0x1B,
DiskError::BitrotHashAlgoInvalid => 0x1C,
DiskError::CrossDeviceLink => 0x1D,
DiskError::LessData => 0x1E,
DiskError::MoreData => 0x1F,
DiskError::OutdatedXLMeta => 0x20,
DiskError::PartMissingOrCorrupt => 0x21,
DiskError::NoHealRequired => 0x22,
}
}
pub fn from_u32(error: u32) -> Option<Self> {
match error & ERROR_TYPE_MASK {
0x01 => Some(DiskError::MaxVersionsExceeded),
0x02 => Some(DiskError::Unexpected),
0x03 => Some(DiskError::CorruptedFormat),
0x04 => Some(DiskError::CorruptedBackend),
0x05 => Some(DiskError::UnformattedDisk),
0x06 => Some(DiskError::InconsistentDisk),
0x07 => Some(DiskError::UnsupportedDisk),
0x08 => Some(DiskError::DiskFull),
0x09 => Some(DiskError::DiskNotDir),
0x0A => Some(DiskError::DiskNotFound),
0x0B => Some(DiskError::DiskOngoingReq),
0x0C => Some(DiskError::DriveIsRoot),
0x0D => Some(DiskError::FaultyRemoteDisk),
0x0E => Some(DiskError::FaultyDisk),
0x0F => Some(DiskError::DiskAccessDenied),
0x10 => Some(DiskError::FileNotFound),
0x11 => Some(DiskError::FileVersionNotFound),
0x12 => Some(DiskError::TooManyOpenFiles),
0x13 => Some(DiskError::FileNameTooLong),
0x14 => Some(DiskError::VolumeExists),
0x15 => Some(DiskError::IsNotRegular),
0x16 => Some(DiskError::PathNotFound),
0x17 => Some(DiskError::VolumeNotFound),
0x18 => Some(DiskError::VolumeNotEmpty),
0x19 => Some(DiskError::VolumeAccessDenied),
0x1A => Some(DiskError::FileAccessDenied),
0x1B => Some(DiskError::FileCorrupt),
0x1C => Some(DiskError::BitrotHashAlgoInvalid),
0x1D => Some(DiskError::CrossDeviceLink),
0x1E => Some(DiskError::LessData),
0x1F => Some(DiskError::MoreData),
0x20 => Some(DiskError::OutdatedXLMeta),
0x21 => Some(DiskError::PartMissingOrCorrupt),
0x22 => Some(DiskError::NoHealRequired),
_ => None,
}
}
}
impl PartialEq for DiskError {
fn eq(&self, other: &Self) -> bool {
core::mem::discriminant(self) == core::mem::discriminant(other)
}
}
impl CheckErrorFn for DiskError {
fn is(&self, e: &Error) -> bool {
self.is(e)
}
}
pub fn clone_disk_err(e: &DiskError) -> Error {
match e {
DiskError::MaxVersionsExceeded => Error::new(DiskError::MaxVersionsExceeded),
DiskError::Unexpected => Error::new(DiskError::Unexpected),
DiskError::CorruptedFormat => Error::new(DiskError::CorruptedFormat),
DiskError::CorruptedBackend => Error::new(DiskError::CorruptedBackend),
DiskError::UnformattedDisk => Error::new(DiskError::UnformattedDisk),
DiskError::InconsistentDisk => Error::new(DiskError::InconsistentDisk),
DiskError::UnsupportedDisk => Error::new(DiskError::UnsupportedDisk),
DiskError::DiskFull => Error::new(DiskError::DiskFull),
DiskError::DiskNotDir => Error::new(DiskError::DiskNotDir),
DiskError::DiskNotFound => Error::new(DiskError::DiskNotFound),
DiskError::DiskOngoingReq => Error::new(DiskError::DiskOngoingReq),
DiskError::DriveIsRoot => Error::new(DiskError::DriveIsRoot),
DiskError::FaultyRemoteDisk => Error::new(DiskError::FaultyRemoteDisk),
DiskError::FaultyDisk => Error::new(DiskError::FaultyDisk),
DiskError::DiskAccessDenied => Error::new(DiskError::DiskAccessDenied),
DiskError::FileNotFound => Error::new(DiskError::FileNotFound),
DiskError::FileVersionNotFound => Error::new(DiskError::FileVersionNotFound),
DiskError::TooManyOpenFiles => Error::new(DiskError::TooManyOpenFiles),
DiskError::FileNameTooLong => Error::new(DiskError::FileNameTooLong),
DiskError::VolumeExists => Error::new(DiskError::VolumeExists),
DiskError::IsNotRegular => Error::new(DiskError::IsNotRegular),
DiskError::PathNotFound => Error::new(DiskError::PathNotFound),
DiskError::VolumeNotFound => Error::new(DiskError::VolumeNotFound),
DiskError::VolumeNotEmpty => Error::new(DiskError::VolumeNotEmpty),
DiskError::VolumeAccessDenied => Error::new(DiskError::VolumeAccessDenied),
DiskError::FileAccessDenied => Error::new(DiskError::FileAccessDenied),
DiskError::FileCorrupt => Error::new(DiskError::FileCorrupt),
DiskError::BitrotHashAlgoInvalid => Error::new(DiskError::BitrotHashAlgoInvalid),
DiskError::CrossDeviceLink => Error::new(DiskError::CrossDeviceLink),
DiskError::LessData => Error::new(DiskError::LessData),
DiskError::MoreData => Error::new(DiskError::MoreData),
DiskError::OutdatedXLMeta => Error::new(DiskError::OutdatedXLMeta),
DiskError::PartMissingOrCorrupt => Error::new(DiskError::PartMissingOrCorrupt),
DiskError::NoHealRequired => Error::new(DiskError::NoHealRequired),
}
}
pub fn os_err_to_file_err(e: io::Error) -> Error {
match e.kind() {
io::ErrorKind::NotFound => Error::new(DiskError::FileNotFound),
io::ErrorKind::PermissionDenied => Error::new(DiskError::FileAccessDenied),
// io::ErrorKind::ConnectionRefused => todo!(),
// io::ErrorKind::ConnectionReset => todo!(),
// io::ErrorKind::HostUnreachable => todo!(),
// io::ErrorKind::NetworkUnreachable => todo!(),
// io::ErrorKind::ConnectionAborted => todo!(),
// io::ErrorKind::NotConnected => todo!(),
// io::ErrorKind::AddrInUse => todo!(),
// io::ErrorKind::AddrNotAvailable => todo!(),
// io::ErrorKind::NetworkDown => todo!(),
// io::ErrorKind::BrokenPipe => todo!(),
// io::ErrorKind::AlreadyExists => todo!(),
// io::ErrorKind::WouldBlock => todo!(),
// io::ErrorKind::NotADirectory => DiskError::FileNotFound,
// io::ErrorKind::IsADirectory => DiskError::FileNotFound,
// io::ErrorKind::DirectoryNotEmpty => DiskError::VolumeNotEmpty,
// io::ErrorKind::ReadOnlyFilesystem => todo!(),
// io::ErrorKind::FilesystemLoop => todo!(),
// io::ErrorKind::StaleNetworkFileHandle => todo!(),
// io::ErrorKind::InvalidInput => todo!(),
// io::ErrorKind::InvalidData => todo!(),
// io::ErrorKind::TimedOut => todo!(),
// io::ErrorKind::WriteZero => todo!(),
// io::ErrorKind::StorageFull => DiskError::DiskFull,
// io::ErrorKind::NotSeekable => todo!(),
// io::ErrorKind::FilesystemQuotaExceeded => todo!(),
// io::ErrorKind::FileTooLarge => todo!(),
// io::ErrorKind::ResourceBusy => todo!(),
// io::ErrorKind::ExecutableFileBusy => todo!(),
// io::ErrorKind::Deadlock => todo!(),
// io::ErrorKind::CrossesDevices => todo!(),
// io::ErrorKind::TooManyLinks =>DiskError::TooManyOpenFiles,
// io::ErrorKind::InvalidFilename => todo!(),
// io::ErrorKind::ArgumentListTooLong => todo!(),
// io::ErrorKind::Interrupted => todo!(),
// io::ErrorKind::Unsupported => todo!(),
// io::ErrorKind::UnexpectedEof => todo!(),
// io::ErrorKind::OutOfMemory => todo!(),
// io::ErrorKind::Other => todo!(),
// TODO: 把不支持的king用字符串处理
_ => Error::new(e),
}
}
#[derive(Debug, thiserror::Error)]
pub struct FileAccessDeniedWithContext {
pub path: PathBuf,
#[source]
pub source: std::io::Error,
}
impl std::fmt::Display for FileAccessDeniedWithContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "访问文件 '{}' 被拒绝: {}", self.path.display(), self.source)
}
}
pub fn is_unformatted_disk(err: &Error) -> bool {
matches!(err.downcast_ref::<DiskError>(), Some(DiskError::UnformattedDisk))
}
pub fn is_err_file_not_found(err: &Error) -> bool {
if let Some(ioerr) = err.downcast_ref::<io::Error>() {
return ioerr.kind() == ErrorKind::NotFound;
}
matches!(err.downcast_ref::<DiskError>(), Some(DiskError::FileNotFound))
}
pub fn is_err_file_version_not_found(err: &Error) -> bool {
matches!(err.downcast_ref::<DiskError>(), Some(DiskError::FileVersionNotFound))
}
pub fn is_err_volume_not_found(err: &Error) -> bool {
matches!(err.downcast_ref::<DiskError>(), Some(DiskError::VolumeNotFound))
}
pub fn is_err_eof(err: &Error) -> bool {
if let Some(ioerr) = err.downcast_ref::<io::Error>() {
return ioerr.kind() == ErrorKind::UnexpectedEof;
}
false
}
pub fn is_sys_err_no_space(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 28;
}
false
}
pub fn is_sys_err_invalid_arg(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 22;
}
false
}
// TODO: ??
pub fn is_sys_err_io(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 5;
}
false
}
pub fn is_sys_err_is_dir(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 21;
}
false
}
pub fn is_sys_err_not_dir(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 20;
}
false
}
pub fn is_sys_err_too_long(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 63;
}
false
}
pub fn is_sys_err_too_many_symlinks(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 62;
}
false
}
pub fn is_sys_err_not_empty(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
if no == 66 {
return true;
}
if cfg!(target_os = "solaris") && no == 17 {
return true;
}
if cfg!(target_os = "windows") && no == 145 {
return true;
}
}
false
}
pub fn is_sys_err_path_not_found(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
if cfg!(target_os = "windows") {
if no == 3 {
return true;
}
} else if no == 2 {
return true;
}
}
false
}
pub fn is_sys_err_handle_invalid(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
if cfg!(target_os = "windows") {
if no == 6 {
return true;
}
} else {
return false;
}
}
false
}
pub fn is_sys_err_cross_device(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 18;
}
false
}
pub fn is_sys_err_too_many_files(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 23 || no == 24;
}
false
}
// pub fn os_is_not_exist(e: &io::Error) -> bool {
// e.kind() == ErrorKind::NotFound
// }
pub fn os_is_permission(e: &io::Error) -> bool {
if e.kind() == ErrorKind::PermissionDenied {
return true;
}
if let Some(no) = e.raw_os_error() {
if no == 30 {
return true;
}
}
false
}
// pub fn os_is_exist(e: &io::Error) -> bool {
// e.kind() == ErrorKind::AlreadyExists
// }
// // map_err_not_exists
// pub fn map_err_not_exists(e: io::Error) -> Error {
// if os_is_not_exist(&e) {
// return Error::new(DiskError::VolumeNotEmpty);
// } else if is_sys_err_io(&e) {
// return Error::new(DiskError::FaultyDisk);
// }
// Error::new(e)
// }
// pub fn convert_access_error(e: io::Error, per_err: DiskError) -> Error {
// if os_is_not_exist(&e) {
// return Error::new(DiskError::VolumeNotEmpty);
// } else if is_sys_err_io(&e) {
// return Error::new(DiskError::FaultyDisk);
// } else if os_is_permission(&e) {
// return Error::new(per_err);
// }
// Error::new(e)
// }
pub fn is_all_not_found(errs: &[Option<Error>]) -> bool {
for err in errs.iter() {
if let Some(err) = err {
if let Some(err) = err.downcast_ref::<DiskError>() {
match err {
DiskError::FileNotFound | DiskError::VolumeNotFound | &DiskError::FileVersionNotFound => {
continue;
}
_ => return false,
}
}
}
return false;
}
!errs.is_empty()
}
pub fn is_all_volume_not_found(errs: &[Option<Error>]) -> bool {
DiskError::VolumeNotFound.count_errs(errs) == errs.len()
}
pub fn is_all_buckets_not_found(errs: &[Option<Error>]) -> bool {
if errs.is_empty() {
return false;
}
let mut not_found_count = 0;
for err in errs.iter().flatten() {
match err.downcast_ref() {
Some(DiskError::VolumeNotFound) | Some(DiskError::DiskNotFound) => {
not_found_count += 1;
}
_ => {}
}
}
errs.len() == not_found_count
}
pub fn is_err_os_not_exist(err: &Error) -> bool {
if let Some(os_err) = err.downcast_ref::<io::Error>() {
os_err.kind() == ErrorKind::NotFound
} else {
false
}
}
pub fn is_err_os_disk_full(err: &Error) -> bool {
if let Some(os_err) = err.downcast_ref::<io::Error>() {
is_sys_err_no_space(os_err)
} else if let Some(e) = err.downcast_ref::<DiskError>() {
e == &DiskError::DiskFull
} else {
false
}
}

273
crates/disk/src/format.rs Normal file
View File

@@ -0,0 +1,273 @@
// use super::{error::DiskError, DiskInfo};
use rustfs_error::{Error, Result};
use serde::{Deserialize, Serialize};
use serde_json::Error as JsonError;
use uuid::Uuid;
use crate::api::DiskInfo;
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub enum FormatMetaVersion {
#[serde(rename = "1")]
V1,
#[serde(other)]
Unknown,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub enum FormatBackend {
#[serde(rename = "xl")]
Erasure,
#[serde(rename = "xl-single")]
ErasureSingle,
#[serde(other)]
Unknown,
}
/// Represents the V3 backend disk structure version
/// under `.rustfs.sys` and actual data namespace.
///
/// FormatErasureV3 - structure holds format config version '3'.
///
/// The V3 format to support "large bucket" support where a bucket
/// can span multiple erasure sets.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct FormatErasureV3 {
/// Version of 'xl' format.
pub version: FormatErasureVersion,
/// This field carries assigned disk uuid.
pub this: Uuid,
/// Sets field carries the input disk order generated the first
/// time when fresh disks were supplied, it is a two dimensional
/// array second dimension represents list of disks used per set.
pub sets: Vec<Vec<Uuid>>,
/// Distribution algorithm represents the hashing algorithm
/// to pick the right set index for an object.
#[serde(rename = "distributionAlgo")]
pub distribution_algo: DistributionAlgoVersion,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub enum FormatErasureVersion {
#[serde(rename = "1")]
V1,
#[serde(rename = "2")]
V2,
#[serde(rename = "3")]
V3,
#[serde(other)]
Unknown,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub enum DistributionAlgoVersion {
#[serde(rename = "CRCMOD")]
V1,
#[serde(rename = "SIPMOD")]
V2,
#[serde(rename = "SIPMOD+PARITY")]
V3,
}
/// format.json currently has the format:
///
/// ```json
/// {
/// "version": "1",
/// "format": "XXXXX",
/// "id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
/// "XXXXX": {
//
/// }
/// }
/// ```
///
/// Ideally we will never have a situation where we will have to change the
/// fields of this struct and deal with related migration.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct FormatV3 {
/// Version of the format config.
pub version: FormatMetaVersion,
/// Format indicates the backend format type, supports two values 'xl' and 'xl-single'.
pub format: FormatBackend,
/// ID is the identifier for the rustfs deployment
pub id: Uuid,
#[serde(rename = "xl")]
pub erasure: FormatErasureV3,
// /// DiskInfo is an extended type which returns current
// /// disk usage per path.
#[serde(skip)]
pub disk_info: Option<DiskInfo>,
}
impl TryFrom<&[u8]> for FormatV3 {
type Error = JsonError;
fn try_from(data: &[u8]) -> core::result::Result<Self, JsonError> {
serde_json::from_slice(data)
}
}
impl TryFrom<&str> for FormatV3 {
type Error = JsonError;
fn try_from(data: &str) -> core::result::Result<Self, JsonError> {
serde_json::from_str(data)
}
}
impl FormatV3 {
/// Create a new format config with the given number of sets and set length.
pub fn new(num_sets: usize, set_len: usize) -> Self {
let format = if set_len == 1 {
FormatBackend::ErasureSingle
} else {
FormatBackend::Erasure
};
let erasure = FormatErasureV3 {
version: FormatErasureVersion::V3,
this: Uuid::nil(),
sets: (0..num_sets)
.map(|_| (0..set_len).map(|_| Uuid::new_v4()).collect())
.collect(),
distribution_algo: DistributionAlgoVersion::V3,
};
Self {
version: FormatMetaVersion::V1,
format,
id: Uuid::new_v4(),
erasure,
disk_info: None,
}
}
/// Returns the number of drives in the erasure set.
pub fn drives(&self) -> usize {
self.erasure.sets.iter().map(|v| v.len()).sum()
}
pub fn to_json(&self) -> Result<String> {
Ok(serde_json::to_string(self)?)
}
/// returns the i,j'th position of the input `diskID` against the reference
///
/// format, after successful validation.
/// - i'th position is the set index
/// - j'th position is the disk index in the current set
pub fn find_disk_index_by_disk_id(&self, disk_id: Uuid) -> Result<(usize, usize)> {
if disk_id == Uuid::nil() {
return Err(Error::DiskNotFound);
}
if disk_id == Uuid::max() {
return Err(Error::msg("disk offline"));
}
for (i, set) in self.erasure.sets.iter().enumerate() {
for (j, d) in set.iter().enumerate() {
if disk_id.eq(d) {
return Ok((i, j));
}
}
}
Err(Error::msg(format!("disk id not found {}", disk_id)))
}
pub fn check_other(&self, other: &FormatV3) -> Result<()> {
let mut tmp = other.clone();
let this = tmp.erasure.this;
tmp.erasure.this = Uuid::nil();
if self.erasure.sets.len() != other.erasure.sets.len() {
return Err(Error::msg(format!(
"Expected number of sets {}, got {}",
self.erasure.sets.len(),
other.erasure.sets.len()
)));
}
for i in 0..self.erasure.sets.len() {
if self.erasure.sets[i].len() != other.erasure.sets[i].len() {
return Err(Error::msg(format!(
"Each set should be of same size, expected {}, got {}",
self.erasure.sets[i].len(),
other.erasure.sets[i].len()
)));
}
for j in 0..self.erasure.sets[i].len() {
if self.erasure.sets[i][j] != other.erasure.sets[i][j] {
return Err(Error::msg(format!(
"UUID on positions {}:{} do not match with, expected {:?} got {:?}: (%w)",
i,
j,
self.erasure.sets[i][j].to_string(),
other.erasure.sets[i][j].to_string(),
)));
}
}
}
for i in 0..tmp.erasure.sets.len() {
for j in 0..tmp.erasure.sets[i].len() {
if this == tmp.erasure.sets[i][j] {
return Ok(());
}
}
}
Err(Error::msg(format!(
"DriveID {:?} not found in any drive sets {:?}",
this, other.erasure.sets
)))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_format_v1() {
let format = FormatV3::new(1, 4);
let str = serde_json::to_string(&format);
println!("{:?}", str);
let data = r#"
{
"version": "1",
"format": "xl",
"id": "321b3874-987d-4c15-8fa5-757c956b1243",
"xl": {
"version": "1",
"this": null,
"sets": [
[
"8ab9a908-f869-4f1f-8e42-eb067ffa7eb5",
"c26315da-05cf-4778-a9ea-b44ea09f58c5",
"fb87a891-18d3-44cf-a46f-bcc15093a038",
"356a925c-57b9-4313-88b3-053edf1104dc"
]
],
"distributionAlgo": "CRCMOD"
}
}"#;
let p = FormatV3::try_from(data);
println!("{:?}", p);
}
}

179
crates/disk/src/fs.rs Normal file
View File

@@ -0,0 +1,179 @@
use std::{fs::Metadata, path::Path};
use tokio::{
fs::{self, File},
io,
};
#[cfg(not(windows))]
pub fn same_file(f1: &Metadata, f2: &Metadata) -> bool {
use std::os::unix::fs::MetadataExt;
if f1.dev() != f2.dev() {
return false;
}
if f1.ino() != f2.ino() {
return false;
}
if f1.size() != f2.size() {
return false;
}
if f1.permissions() != f2.permissions() {
return false;
}
if f1.mtime() != f2.mtime() {
return false;
}
true
}
#[cfg(windows)]
pub fn same_file(f1: &Metadata, f2: &Metadata) -> bool {
if f1.permissions() != f2.permissions() {
return false;
}
if f1.file_type() != f2.file_type() {
return false;
}
if f1.len() != f2.len() {
return false;
}
true
}
type FileMode = usize;
pub const O_RDONLY: FileMode = 0x00000;
pub const O_WRONLY: FileMode = 0x00001;
pub const O_RDWR: FileMode = 0x00002;
pub const O_CREATE: FileMode = 0x00040;
// pub const O_EXCL: FileMode = 0x00080;
// pub const O_NOCTTY: FileMode = 0x00100;
pub const O_TRUNC: FileMode = 0x00200;
// pub const O_NONBLOCK: FileMode = 0x00800;
pub const O_APPEND: FileMode = 0x00400;
// pub const O_SYNC: FileMode = 0x01000;
// pub const O_ASYNC: FileMode = 0x02000;
// pub const O_CLOEXEC: FileMode = 0x80000;
// read: bool,
// write: bool,
// append: bool,
// truncate: bool,
// create: bool,
// create_new: bool,
pub async fn open_file(path: impl AsRef<Path>, mode: FileMode) -> io::Result<File> {
let mut opts = fs::OpenOptions::new();
match mode & (O_RDONLY | O_WRONLY | O_RDWR) {
O_RDONLY => {
opts.read(true);
}
O_WRONLY => {
opts.write(true);
}
O_RDWR => {
opts.read(true);
opts.write(true);
}
_ => (),
};
if mode & O_CREATE != 0 {
opts.create(true);
}
if mode & O_APPEND != 0 {
opts.append(true);
}
if mode & O_TRUNC != 0 {
opts.truncate(true);
}
opts.open(path.as_ref()).await
}
pub async fn access(path: impl AsRef<Path>) -> io::Result<()> {
fs::metadata(path).await?;
Ok(())
}
pub fn access_std(path: impl AsRef<Path>) -> io::Result<()> {
std::fs::metadata(path)?;
Ok(())
}
pub async fn lstat(path: impl AsRef<Path>) -> io::Result<Metadata> {
fs::metadata(path).await
}
pub fn lstat_std(path: impl AsRef<Path>) -> io::Result<Metadata> {
std::fs::metadata(path)
}
pub async fn make_dir_all(path: impl AsRef<Path>) -> io::Result<()> {
fs::create_dir_all(path.as_ref()).await
}
#[tracing::instrument(level = "debug", skip_all)]
pub async fn remove(path: impl AsRef<Path>) -> io::Result<()> {
let meta = fs::metadata(path.as_ref()).await?;
if meta.is_dir() {
fs::remove_dir(path.as_ref()).await
} else {
fs::remove_file(path.as_ref()).await
}
}
pub async fn remove_all(path: impl AsRef<Path>) -> io::Result<()> {
let meta = fs::metadata(path.as_ref()).await?;
if meta.is_dir() {
fs::remove_dir_all(path.as_ref()).await
} else {
fs::remove_file(path.as_ref()).await
}
}
#[tracing::instrument(level = "debug", skip_all)]
pub fn remove_std(path: impl AsRef<Path>) -> io::Result<()> {
let meta = std::fs::metadata(path.as_ref())?;
if meta.is_dir() {
std::fs::remove_dir(path.as_ref())
} else {
std::fs::remove_file(path.as_ref())
}
}
pub fn remove_all_std(path: impl AsRef<Path>) -> io::Result<()> {
let meta = std::fs::metadata(path.as_ref())?;
if meta.is_dir() {
std::fs::remove_dir_all(path.as_ref())
} else {
std::fs::remove_file(path.as_ref())
}
}
pub async fn mkdir(path: impl AsRef<Path>) -> io::Result<()> {
fs::create_dir(path.as_ref()).await
}
pub async fn rename(from: impl AsRef<Path>, to: impl AsRef<Path>) -> io::Result<()> {
fs::rename(from, to).await
}
pub fn rename_std(from: impl AsRef<Path>, to: impl AsRef<Path>) -> io::Result<()> {
std::fs::rename(from, to)
}
#[tracing::instrument(level = "debug", skip_all)]
pub async fn read_file(path: impl AsRef<Path>) -> io::Result<Vec<u8>> {
fs::read(path.as_ref()).await
}

12
crates/disk/src/lib.rs Normal file
View File

@@ -0,0 +1,12 @@
pub mod endpoint;
// pub mod error;
pub mod format;
pub mod fs;
pub mod local;
// pub mod metacache;
pub mod api;
pub mod local_list;
pub mod os;
pub mod path;
pub mod remote;
pub mod utils;

2048
crates/disk/src/local.rs Normal file

File diff suppressed because it is too large Load Diff

2364
crates/disk/src/local_bak.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,535 @@
use crate::{
api::{DiskAPI, DiskStore, WalkDirOptions, STORAGE_FORMAT_FILE},
local::LocalDisk,
os::is_empty_dir,
path::{self, decode_dir_object, GLOBAL_DIR_SUFFIX_WITH_SLASH, SLASH_SEPARATOR},
};
use futures::future::join_all;
use rustfs_error::{Error, Result};
use rustfs_metacache::{MetaCacheEntries, MetaCacheEntry, MetacacheReader, MetacacheWriter};
use std::{collections::HashSet, future::Future, pin::Pin, sync::Arc};
use tokio::{io::AsyncWrite, spawn, sync::broadcast::Receiver as B_Receiver};
use tracing::{error, info, warn};
pub type AgreedFn = Box<dyn Fn(MetaCacheEntry) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
pub type PartialFn = Box<dyn Fn(MetaCacheEntries, &[Option<Error>]) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
type FinishedFn = Box<dyn Fn(&[Option<Error>]) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
#[derive(Default)]
pub struct ListPathRawOptions {
pub disks: Vec<Option<DiskStore>>,
pub fallback_disks: Vec<Option<DiskStore>>,
pub bucket: String,
pub path: String,
pub recursive: bool,
pub filter_prefix: Option<String>,
pub forward_to: Option<String>,
pub min_disks: usize,
pub report_not_found: bool,
pub per_disk_limit: i32,
pub agreed: Option<AgreedFn>,
pub partial: Option<PartialFn>,
pub finished: Option<FinishedFn>,
}
impl Clone for ListPathRawOptions {
fn clone(&self) -> Self {
Self {
disks: self.disks.clone(),
fallback_disks: self.fallback_disks.clone(),
bucket: self.bucket.clone(),
path: self.path.clone(),
recursive: self.recursive,
filter_prefix: self.filter_prefix.clone(),
forward_to: self.forward_to.clone(),
min_disks: self.min_disks,
report_not_found: self.report_not_found,
per_disk_limit: self.per_disk_limit,
..Default::default()
}
}
}
pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -> Result<()> {
if opts.disks.is_empty() {
return Err(Error::msg("list_path_raw: 0 drives provided"));
}
let mut jobs: Vec<tokio::task::JoinHandle<std::result::Result<(), Error>>> = Vec::new();
let mut readers = Vec::with_capacity(opts.disks.len());
let fds = Arc::new(opts.fallback_disks.clone());
for disk in opts.disks.iter() {
let opdisk = disk.clone();
let opts_clone = opts.clone();
let fds_clone = fds.clone();
let (rd, mut wr) = tokio::io::duplex(64);
readers.push(MetacacheReader::new(rd));
jobs.push(spawn(async move {
let walk_opts = WalkDirOptions {
bucket: opts_clone.bucket.clone(),
base_dir: opts_clone.path.clone(),
recursive: opts_clone.recursive,
report_notfound: opts_clone.report_not_found,
filter_prefix: opts_clone.filter_prefix.clone(),
forward_to: opts_clone.forward_to.clone(),
limit: opts_clone.per_disk_limit,
..Default::default()
};
let mut need_fallback = false;
if let Some(disk) = opdisk {
match disk.walk_dir(walk_opts, &mut wr).await {
Ok(_res) => {}
Err(err) => {
error!("walk dir err {:?}", &err);
need_fallback = true;
}
}
} else {
need_fallback = true;
}
while need_fallback {
let disk = match fds_clone.iter().find(|d| d.is_some()) {
Some(d) => {
if let Some(disk) = d.clone() {
disk
} else {
break;
}
}
None => break,
};
match disk
.walk_dir(
WalkDirOptions {
bucket: opts_clone.bucket.clone(),
base_dir: opts_clone.path.clone(),
recursive: opts_clone.recursive,
report_notfound: opts_clone.report_not_found,
filter_prefix: opts_clone.filter_prefix.clone(),
forward_to: opts_clone.forward_to.clone(),
limit: opts_clone.per_disk_limit,
..Default::default()
},
&mut wr,
)
.await
{
Ok(_r) => {
need_fallback = false;
}
Err(err) => {
error!("walk dir2 err {:?}", &err);
break;
}
}
}
Ok(())
}));
}
let revjob = spawn(async move {
let mut errs: Vec<Option<Error>> = Vec::with_capacity(readers.len());
for _ in 0..readers.len() {
errs.push(None);
}
loop {
let mut current = MetaCacheEntry::default();
if rx.try_recv().is_ok() {
return Err(Error::msg("canceled"));
}
let mut top_entries: Vec<Option<MetaCacheEntry>> = vec![None; readers.len()];
let mut at_eof = 0;
let mut fnf = 0;
let mut vnf = 0;
let mut has_err = 0;
let mut agree = 0;
for (i, r) in readers.iter_mut().enumerate() {
if errs[i].is_some() {
has_err += 1;
continue;
}
let entry = match r.peek().await {
Ok(res) => {
if let Some(entry) = res {
entry
} else {
at_eof += 1;
continue;
}
}
Err(err) => {
if err == Error::FaultyDisk {
at_eof += 1;
continue;
} else if err == Error::FileNotFound {
at_eof += 1;
fnf += 1;
continue;
} else if err == Error::VolumeNotFound {
at_eof += 1;
fnf += 1;
vnf += 1;
continue;
} else {
has_err += 1;
errs[i] = Some(err);
continue;
}
}
};
// If no current, add it.
if current.name.is_empty() {
top_entries[i] = Some(entry.clone());
current = entry;
agree += 1;
continue;
}
// If exact match, we agree.
if let (_, true) = current.matches(Some(&entry), true) {
top_entries[i] = Some(entry);
agree += 1;
continue;
}
// If only the name matches we didn't agree, but add it for resolution.
if entry.name == current.name {
top_entries[i] = Some(entry);
continue;
}
// We got different entries
if entry.name > current.name {
continue;
}
for item in top_entries.iter_mut().take(i) {
*item = None;
}
agree = 1;
top_entries[i] = Some(entry.clone());
current = entry;
}
if vnf > 0 && vnf >= (readers.len() - opts.min_disks) {
return Err(Error::VolumeNotFound);
}
if fnf > 0 && fnf >= (readers.len() - opts.min_disks) {
return Err(Error::FileNotFound);
}
if has_err > 0 && has_err > opts.disks.len() - opts.min_disks {
if let Some(finished_fn) = opts.finished.as_ref() {
finished_fn(&errs).await;
}
let mut combined_err = Vec::new();
errs.iter().zip(opts.disks.iter()).for_each(|(err, disk)| match (err, disk) {
(Some(err), Some(disk)) => {
combined_err.push(format!("drive {} returned: {}", disk.to_string(), err));
}
(Some(err), None) => {
combined_err.push(err.to_string());
}
_ => {}
});
return Err(Error::msg(combined_err.join(", ")));
}
// Break if all at EOF or error.
if at_eof + has_err == readers.len() {
if has_err > 0 {
if let Some(finished_fn) = opts.finished.as_ref() {
finished_fn(&errs).await;
}
}
break;
}
if agree == readers.len() {
for r in readers.iter_mut() {
let _ = r.skip(1).await;
}
if let Some(agreed_fn) = opts.agreed.as_ref() {
agreed_fn(current).await;
}
continue;
}
for (i, r) in readers.iter_mut().enumerate() {
if top_entries[i].is_some() {
let _ = r.skip(1).await;
}
}
if let Some(partial_fn) = opts.partial.as_ref() {
partial_fn(MetaCacheEntries(top_entries), &errs).await;
}
}
Ok(())
});
jobs.push(revjob);
let results = join_all(jobs).await;
for result in results {
if let Err(err) = result {
error!("list_path_raw err {:?}", err);
}
}
Ok(())
}
impl LocalDisk {
pub(crate) async fn scan_dir<W: AsyncWrite + Unpin>(
&self,
current: &mut String,
opts: &WalkDirOptions,
out: &mut MetacacheWriter<W>,
objs_returned: &mut i32,
) -> Result<()> {
let forward = {
opts.forward_to.as_ref().filter(|v| v.starts_with(&*current)).map(|v| {
let forward = v.trim_start_matches(&*current);
if let Some(idx) = forward.find('/') {
forward[..idx].to_owned()
} else {
forward.to_owned()
}
})
// if let Some(forward_to) = &opts.forward_to {
// } else {
// None
// }
// if !opts.forward_to.is_empty() && opts.forward_to.starts_with(&*current) {
// let forward = opts.forward_to.trim_start_matches(&*current);
// if let Some(idx) = forward.find('/') {
// &forward[..idx]
// } else {
// forward
// }
// } else {
// ""
// }
};
if opts.limit > 0 && *objs_returned >= opts.limit {
return Ok(());
}
let mut entries = match self.list_dir("", &opts.bucket, current, -1).await {
Ok(res) => res,
Err(e) => {
if e != Error::VolumeNotFound && e != Error::FileNotFound {
info!("scan list_dir {}, err {:?}", &current, &e);
}
if opts.report_notfound && (e == Error::VolumeNotFound || e == Error::FileNotFound) && current == &opts.base_dir {
return Err(Error::FileNotFound);
}
return Ok(());
}
};
if entries.is_empty() {
return Ok(());
}
let s = SLASH_SEPARATOR.chars().next().unwrap_or_default();
*current = current.trim_matches(s).to_owned();
let bucket = opts.bucket.as_str();
let mut dir_objes = HashSet::new();
// 第一层过滤
for item in entries.iter_mut() {
let entry = item.clone();
// check limit
if opts.limit > 0 && *objs_returned >= opts.limit {
return Ok(());
}
// check prefix
if let Some(filter_prefix) = &opts.filter_prefix {
if !entry.starts_with(filter_prefix) {
*item = "".to_owned();
continue;
}
}
if let Some(forward) = &forward {
if &entry < forward {
*item = "".to_owned();
continue;
}
}
if entry.ends_with(SLASH_SEPARATOR) {
if entry.ends_with(GLOBAL_DIR_SUFFIX_WITH_SLASH) {
let entry = format!("{}{}", entry.as_str().trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH), SLASH_SEPARATOR);
dir_objes.insert(entry.clone());
*item = entry;
continue;
}
*item = entry.trim_end_matches(SLASH_SEPARATOR).to_owned();
continue;
}
*item = "".to_owned();
if entry.ends_with(STORAGE_FORMAT_FILE) {
//
let metadata = self
.read_metadata(self.get_object_path(bucket, format!("{}/{}", &current, &entry).as_str())?)
.await?;
// 用 strip_suffix 只删除一次
let entry = entry.strip_suffix(STORAGE_FORMAT_FILE).unwrap_or_default().to_owned();
let name = entry.trim_end_matches(SLASH_SEPARATOR);
let name = decode_dir_object(format!("{}/{}", &current, &name).as_str());
out.write_obj(&MetaCacheEntry {
name,
metadata,
..Default::default()
})
.await?;
*objs_returned += 1;
return Ok(());
}
}
entries.sort();
let mut entries = entries.as_slice();
if let Some(forward) = &forward {
for (i, entry) in entries.iter().enumerate() {
if entry >= forward || forward.starts_with(entry.as_str()) {
entries = &entries[i..];
break;
}
}
}
let mut dir_stack: Vec<String> = Vec::with_capacity(5);
for entry in entries.iter() {
if opts.limit > 0 && *objs_returned >= opts.limit {
return Ok(());
}
if entry.is_empty() {
continue;
}
let name = path::path_join_buf(&[current, entry]);
if !dir_stack.is_empty() {
if let Some(pop) = dir_stack.pop() {
if pop < name {
//
out.write_obj(&MetaCacheEntry {
name: pop.clone(),
..Default::default()
})
.await?;
if opts.recursive {
let mut opts = opts.clone();
opts.filter_prefix = None;
if let Err(er) = Box::pin(self.scan_dir(&mut pop.clone(), &opts, out, objs_returned)).await {
error!("scan_dir err {:?}", er);
}
}
}
}
}
let mut meta = MetaCacheEntry {
name,
..Default::default()
};
let mut is_dir_obj = false;
if let Some(_dir) = dir_objes.get(entry) {
is_dir_obj = true;
meta.name
.truncate(meta.name.len() - meta.name.chars().last().unwrap().len_utf8());
meta.name.push_str(GLOBAL_DIR_SUFFIX_WITH_SLASH);
}
let fname = format!("{}/{}", &meta.name, STORAGE_FORMAT_FILE);
match self.read_metadata(self.get_object_path(&opts.bucket, fname.as_str())?).await {
Ok(res) => {
if is_dir_obj {
meta.name = meta.name.trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH).to_owned();
meta.name.push_str(SLASH_SEPARATOR);
}
meta.metadata = res;
out.write_obj(&meta).await?;
*objs_returned += 1;
}
Err(err) => {
if err == Error::FileNotFound || err == Error::IsNotRegular {
// NOT an object, append to stack (with slash)
// If dirObject, but no metadata (which is unexpected) we skip it.
if !is_dir_obj && !is_empty_dir(self.get_object_path(&opts.bucket, &meta.name)?).await {
meta.name.push_str(SLASH_SEPARATOR);
dir_stack.push(meta.name);
}
}
continue;
}
};
}
while let Some(dir) = dir_stack.pop() {
if opts.limit > 0 && *objs_returned >= opts.limit {
return Ok(());
}
out.write_obj(&MetaCacheEntry {
name: dir.clone(),
..Default::default()
})
.await?;
*objs_returned += 1;
if opts.recursive {
let mut dir = dir;
let mut opts = opts.clone();
opts.filter_prefix = None;
if let Err(er) = Box::pin(self.scan_dir(&mut dir, &opts, out, objs_returned)).await {
warn!("scan_dir err {:?}", &er);
}
}
}
Ok(())
}
}

View File

@@ -0,0 +1,608 @@
use crate::bucket::metadata_sys::get_versioning_config;
use crate::bucket::versioning::VersioningApi;
use crate::store_api::ObjectInfo;
use crate::utils::path::SLASH_SEPARATOR;
use rustfs_error::{Error, Result};
use rustfs_filemeta::merge_file_meta_versions;
use rustfs_filemeta::FileInfo;
use rustfs_filemeta::FileInfoVersions;
use rustfs_filemeta::FileMeta;
use rustfs_filemeta::FileMetaShallowVersion;
use rustfs_filemeta::VersionType;
use serde::Deserialize;
use serde::Serialize;
use std::cmp::Ordering;
use time::OffsetDateTime;
use tracing::warn;
#[derive(Clone, Debug, Default)]
pub struct MetadataResolutionParams {
pub dir_quorum: usize,
pub obj_quorum: usize,
pub requested_versions: usize,
pub bucket: String,
pub strict: bool,
pub candidates: Vec<Vec<FileMetaShallowVersion>>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
pub struct MetaCacheEntry {
// name is the full name of the object including prefixes
pub name: String,
// Metadata. If none is present it is not an object but only a prefix.
// Entries without metadata will only be present in non-recursive scans.
pub metadata: Vec<u8>,
// cached contains the metadata if decoded.
pub cached: Option<FileMeta>,
// Indicates the entry can be reused and only one reference to metadata is expected.
pub reusable: bool,
}
impl MetaCacheEntry {
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
let mut wr = Vec::new();
rmp::encode::write_bool(&mut wr, true)?;
rmp::encode::write_str(&mut wr, &self.name)?;
rmp::encode::write_bin(&mut wr, &self.metadata)?;
Ok(wr)
}
pub fn is_dir(&self) -> bool {
self.metadata.is_empty() && self.name.ends_with('/')
}
pub fn is_in_dir(&self, dir: &str, separator: &str) -> bool {
if dir.is_empty() {
let idx = self.name.find(separator);
return idx.is_none() || idx.unwrap() == self.name.len() - separator.len();
}
let ext = self.name.trim_start_matches(dir);
if ext.len() != self.name.len() {
let idx = ext.find(separator);
return idx.is_none() || idx.unwrap() == ext.len() - separator.len();
}
false
}
pub fn is_object(&self) -> bool {
!self.metadata.is_empty()
}
pub fn is_object_dir(&self) -> bool {
!self.metadata.is_empty() && self.name.ends_with(SLASH_SEPARATOR)
}
pub fn is_latest_delete_marker(&mut self) -> bool {
if let Some(cached) = &self.cached {
if cached.versions.is_empty() {
return true;
}
return cached.versions[0].header.version_type == VersionType::Delete;
}
if !FileMeta::is_xl2_v1_format(&self.metadata) {
return false;
}
match FileMeta::check_xl2_v1(&self.metadata) {
Ok((meta, _, _)) => {
if !meta.is_empty() {
return FileMeta::is_latest_delete_marker(meta);
}
}
Err(_) => return true,
}
match self.xl_meta() {
Ok(res) => {
if res.versions.is_empty() {
return true;
}
res.versions[0].header.version_type == VersionType::Delete
}
Err(_) => true,
}
}
#[tracing::instrument(level = "debug", skip(self))]
pub fn to_fileinfo(&self, bucket: &str) -> Result<FileInfo> {
if self.is_dir() {
return Ok(FileInfo {
volume: bucket.to_owned(),
name: self.name.clone(),
..Default::default()
});
}
if self.cached.is_some() {
let fm = self.cached.as_ref().unwrap();
if fm.versions.is_empty() {
return Ok(FileInfo {
volume: bucket.to_owned(),
name: self.name.clone(),
deleted: true,
is_latest: true,
mod_time: Some(OffsetDateTime::UNIX_EPOCH),
..Default::default()
});
}
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false)?;
return Ok(fi);
}
let mut fm = FileMeta::new();
fm.unmarshal_msg(&self.metadata)?;
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false)?;
return Ok(fi);
}
pub fn file_info_versions(&self, bucket: &str) -> Result<FileInfoVersions> {
if self.is_dir() {
return Ok(FileInfoVersions {
volume: bucket.to_string(),
name: self.name.clone(),
versions: vec![FileInfo {
volume: bucket.to_string(),
name: self.name.clone(),
..Default::default()
}],
..Default::default()
});
}
let mut fm = FileMeta::new();
fm.unmarshal_msg(&self.metadata)?;
fm.into_file_info_versions(bucket, self.name.as_str(), false)
}
pub fn matches(&self, other: Option<&MetaCacheEntry>, strict: bool) -> (Option<MetaCacheEntry>, bool) {
if other.is_none() {
return (None, false);
}
let other = other.unwrap();
let mut prefer = None;
if self.name != other.name {
if self.name < other.name {
return (Some(self.clone()), false);
}
return (Some(other.clone()), false);
}
if other.is_dir() || self.is_dir() {
if self.is_dir() {
return (Some(self.clone()), other.is_dir() == self.is_dir());
}
return (Some(other.clone()), other.is_dir() == self.is_dir());
}
let self_vers = match &self.cached {
Some(file_meta) => file_meta.clone(),
None => match FileMeta::load(&self.metadata) {
Ok(meta) => meta,
Err(_) => {
return (None, false);
}
},
};
let other_vers = match &other.cached {
Some(file_meta) => file_meta.clone(),
None => match FileMeta::load(&other.metadata) {
Ok(meta) => meta,
Err(_) => {
return (None, false);
}
},
};
if self_vers.versions.len() != other_vers.versions.len() {
match self_vers.lastest_mod_time().cmp(&other_vers.lastest_mod_time()) {
Ordering::Greater => {
return (Some(self.clone()), false);
}
Ordering::Less => {
return (Some(other.clone()), false);
}
_ => {}
}
if self_vers.versions.len() > other_vers.versions.len() {
return (Some(self.clone()), false);
}
return (Some(other.clone()), false);
}
for (s_version, o_version) in self_vers.versions.iter().zip(other_vers.versions.iter()) {
if s_version.header != o_version.header {
if s_version.header.has_ec() != o_version.header.has_ec() {
// One version has EC and the other doesn't - may have been written later.
// Compare without considering EC.
let (mut a, mut b) = (s_version.header.clone(), o_version.header.clone());
(a.ec_n, a.ec_m, b.ec_n, b.ec_m) = (0, 0, 0, 0);
if a == b {
continue;
}
}
if !strict && s_version.header.matches_not_strict(&o_version.header) {
if prefer.is_none() {
if s_version.header.sorts_before(&o_version.header) {
prefer = Some(self.clone());
} else {
prefer = Some(other.clone());
}
}
continue;
}
if prefer.is_some() {
return (prefer, false);
}
if s_version.header.sorts_before(&o_version.header) {
return (Some(self.clone()), false);
}
return (Some(other.clone()), false);
}
}
if prefer.is_none() {
prefer = Some(self.clone());
}
(prefer, true)
}
pub fn xl_meta(&mut self) -> Result<FileMeta> {
if self.is_dir() {
return Err(Error::FileNotFound);
}
if let Some(meta) = &self.cached {
Ok(meta.clone())
} else {
if self.metadata.is_empty() {
return Err(Error::FileNotFound);
}
let meta = FileMeta::load(&self.metadata)?;
self.cached = Some(meta.clone());
Ok(meta)
}
}
}
#[derive(Debug, Default)]
pub struct MetaCacheEntries(pub Vec<Option<MetaCacheEntry>>);
impl MetaCacheEntries {
#[allow(clippy::should_implement_trait)]
pub fn as_ref(&self) -> &[Option<MetaCacheEntry>] {
&self.0
}
pub fn resolve(&self, mut params: MetadataResolutionParams) -> Option<MetaCacheEntry> {
if self.0.is_empty() {
warn!("decommission_pool: entries resolve empty");
return None;
}
let mut dir_exists = 0;
let mut selected = None;
params.candidates.clear();
let mut objs_agree = 0;
let mut objs_valid = 0;
for entry in self.0.iter().flatten() {
let mut entry = entry.clone();
warn!("decommission_pool: entries resolve entry {:?}", entry.name);
if entry.name.is_empty() {
continue;
}
if entry.is_dir() {
dir_exists += 1;
selected = Some(entry.clone());
warn!("decommission_pool: entries resolve entry dir {:?}", entry.name);
continue;
}
let xl = match entry.xl_meta() {
Ok(xl) => xl,
Err(e) => {
warn!("decommission_pool: entries resolve entry xl_meta {:?}", e);
continue;
}
};
objs_valid += 1;
params.candidates.push(xl.versions.clone());
if selected.is_none() {
selected = Some(entry.clone());
objs_agree = 1;
warn!("decommission_pool: entries resolve entry selected {:?}", entry.name);
continue;
}
if let (prefer, true) = entry.matches(selected.as_ref(), params.strict) {
selected = prefer;
objs_agree += 1;
warn!("decommission_pool: entries resolve entry prefer {:?}", entry.name);
continue;
}
}
let Some(selected) = selected else {
warn!("decommission_pool: entries resolve entry no selected");
return None;
};
if selected.is_dir() && dir_exists >= params.dir_quorum {
warn!("decommission_pool: entries resolve entry dir selected {:?}", selected.name);
return Some(selected);
}
// If we would never be able to reach read quorum.
if objs_valid < params.obj_quorum {
warn!(
"decommission_pool: entries resolve entry not enough objects {} < {}",
objs_valid, params.obj_quorum
);
return None;
}
if objs_agree == objs_valid {
warn!("decommission_pool: entries resolve entry all agree {} == {}", objs_agree, objs_valid);
return Some(selected);
}
let Some(cached) = selected.cached else {
warn!("decommission_pool: entries resolve entry no cached");
return None;
};
let versions = merge_file_meta_versions(params.obj_quorum, params.strict, params.requested_versions, &params.candidates);
if versions.is_empty() {
warn!("decommission_pool: entries resolve entry no versions");
return None;
}
let metadata = match cached.marshal_msg() {
Ok(meta) => meta,
Err(e) => {
warn!("decommission_pool: entries resolve entry marshal_msg {:?}", e);
return None;
}
};
// Merge if we have disagreement.
// Create a new merged result.
let new_selected = MetaCacheEntry {
name: selected.name.clone(),
cached: Some(FileMeta {
meta_ver: cached.meta_ver,
versions,
..Default::default()
}),
reusable: true,
metadata,
};
warn!("decommission_pool: entries resolve entry selected {:?}", new_selected.name);
Some(new_selected)
}
pub fn first_found(&self) -> (Option<MetaCacheEntry>, usize) {
(self.0.iter().find(|x| x.is_some()).cloned().unwrap_or_default(), self.0.len())
}
}
#[derive(Debug, Default)]
pub struct MetaCacheEntriesSortedResult {
pub entries: Option<MetaCacheEntriesSorted>,
pub err: Option<Error>,
}
// impl MetaCacheEntriesSortedResult {
// pub fn entriy_list(&self) -> Vec<&MetaCacheEntry> {
// if let Some(entries) = &self.entries {
// entries.entries()
// } else {
// Vec::new()
// }
// }
// }
#[derive(Debug, Default)]
pub struct MetaCacheEntriesSorted {
pub o: MetaCacheEntries,
pub list_id: Option<String>,
pub reuse: bool,
pub last_skipped_entry: Option<String>,
}
impl MetaCacheEntriesSorted {
pub fn entries(&self) -> Vec<&MetaCacheEntry> {
let entries: Vec<&MetaCacheEntry> = self.o.0.iter().flatten().collect();
entries
}
pub fn forward_past(&mut self, marker: Option<String>) {
if let Some(val) = marker {
// TODO: reuse
if let Some(idx) = self.o.0.iter().flatten().position(|v| v.name > val) {
self.o.0 = self.o.0.split_off(idx);
}
}
}
pub async fn file_infos(&self, bucket: &str, prefix: &str, delimiter: Option<String>) -> Vec<ObjectInfo> {
let vcfg = get_versioning_config(bucket).await.ok();
let mut objects = Vec::with_capacity(self.o.as_ref().len());
let mut prev_prefix = "";
for entry in self.o.as_ref().iter().flatten() {
if entry.is_object() {
if let Some(delimiter) = &delimiter {
if let Some(idx) = entry.name.trim_start_matches(prefix).find(delimiter) {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
continue;
}
}
if let Ok(fi) = entry.to_fileinfo(bucket) {
// TODO:VersionPurgeStatus
let versioned = vcfg.clone().map(|v| v.0.versioned(&entry.name)).unwrap_or_default();
objects.push(ObjectInfo::from_file_info(&fi, bucket, &entry.name, versioned));
}
continue;
}
if entry.is_dir() {
if let Some(delimiter) = &delimiter {
if let Some(idx) = entry.name.trim_start_matches(prefix).find(delimiter) {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
}
}
}
}
objects
}
pub async fn file_info_versions(
&self,
bucket: &str,
prefix: &str,
delimiter: Option<String>,
after_v: Option<String>,
) -> Vec<ObjectInfo> {
let vcfg = get_versioning_config(bucket).await.ok();
let mut objects = Vec::with_capacity(self.o.as_ref().len());
let mut prev_prefix = "";
let mut after_v = after_v;
for entry in self.o.as_ref().iter().flatten() {
if entry.is_object() {
if let Some(delimiter) = &delimiter {
if let Some(idx) = entry.name.trim_start_matches(prefix).find(delimiter) {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
continue;
}
}
let mut fiv = match entry.file_info_versions(bucket) {
Ok(res) => res,
Err(_err) => {
//
continue;
}
};
let fi_versions = 'c: {
if let Some(after_val) = &after_v {
if let Some(idx) = fiv.find_version_index(after_val) {
after_v = None;
break 'c fiv.versions.split_off(idx + 1);
}
after_v = None;
break 'c fiv.versions;
} else {
break 'c fiv.versions;
}
};
for fi in fi_versions.into_iter() {
// VersionPurgeStatus
let versioned = vcfg.clone().map(|v| v.0.versioned(&entry.name)).unwrap_or_default();
objects.push(ObjectInfo::from_file_info(&fi, bucket, &entry.name, versioned));
}
continue;
}
if entry.is_dir() {
if let Some(delimiter) = &delimiter {
if let Some(idx) = entry.name.trim_start_matches(prefix).find(delimiter) {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
}
}
}
}
objects
}
}

206
crates/disk/src/os.rs Normal file
View File

@@ -0,0 +1,206 @@
use super::fs;
use rustfs_error::{to_access_error, Error, Result};
use rustfs_utils::os::same_disk;
use std::{
io,
path::{Component, Path},
};
pub fn check_path_length(path_name: &str) -> Result<()> {
// Apple OS X path length is limited to 1016
if cfg!(target_os = "macos") && path_name.len() > 1016 {
return Err(Error::FileNameTooLong);
}
// Disallow more than 1024 characters on windows, there
// are no known name_max limits on Windows.
if cfg!(target_os = "windows") && path_name.len() > 1024 {
return Err(Error::FileNameTooLong);
}
// On Unix we reject paths if they are just '.', '..' or '/'
let invalid_paths = [".", "..", "/"];
if invalid_paths.contains(&path_name) {
return Err(Error::FileAccessDenied);
}
// Check each path segment length is > 255 on all Unix
// platforms, look for this value as NAME_MAX in
// /usr/include/linux/limits.h
let mut count = 0usize;
for c in path_name.chars() {
match c {
'/' | '\\' if cfg!(target_os = "windows") => count = 0, // Reset
_ => {
count += 1;
if count > 255 {
return Err(Error::FileNameTooLong);
}
}
}
}
// Success.
Ok(())
}
pub fn is_root_disk(disk_path: &str, root_disk: &str) -> Result<bool> {
if cfg!(target_os = "windows") {
return Ok(false);
}
Ok(same_disk(disk_path, root_disk)?)
}
pub async fn make_dir_all(path: impl AsRef<Path>, base_dir: impl AsRef<Path>) -> Result<()> {
check_path_length(path.as_ref().to_string_lossy().to_string().as_str())?;
if let Err(e) = reliable_mkdir_all(path.as_ref(), base_dir.as_ref()).await {
return Err(to_access_error(e, Error::FileAccessDenied).into());
}
Ok(())
}
pub async fn is_empty_dir(path: impl AsRef<Path>) -> bool {
read_dir(path.as_ref(), 1).await.is_ok_and(|v| v.is_empty())
}
// read_dir count read limit. when count == 0 unlimit.
pub async fn read_dir(path: impl AsRef<Path>, count: i32) -> std::io::Result<Vec<String>> {
let mut entries = tokio::fs::read_dir(path.as_ref()).await?;
let mut volumes = Vec::new();
let mut count = count;
while let Some(entry) = entries.next_entry().await? {
let name = entry.file_name().to_string_lossy().to_string();
if name.is_empty() || name == "." || name == ".." {
continue;
}
let file_type = entry.file_type().await?;
if file_type.is_file() {
volumes.push(name);
} else if file_type.is_dir() {
volumes.push(format!("{}{}", name, super::path::SLASH_SEPARATOR));
}
count -= 1;
if count == 0 {
break;
}
}
Ok(volumes)
}
#[tracing::instrument(level = "debug", skip_all)]
pub async fn rename_all(
src_file_path: impl AsRef<Path>,
dst_file_path: impl AsRef<Path>,
base_dir: impl AsRef<Path>,
) -> Result<()> {
reliable_rename(src_file_path, dst_file_path.as_ref(), base_dir)
.await
.map_err(|e| to_access_error(e, Error::FileAccessDenied))?;
Ok(())
}
pub async fn reliable_rename(
src_file_path: impl AsRef<Path>,
dst_file_path: impl AsRef<Path>,
base_dir: impl AsRef<Path>,
) -> io::Result<()> {
if let Some(parent) = dst_file_path.as_ref().parent() {
if !file_exists(parent) {
// info!("reliable_rename reliable_mkdir_all parent: {:?}", parent);
reliable_mkdir_all(parent, base_dir.as_ref()).await?;
}
}
let mut i = 0;
loop {
if let Err(e) = fs::rename_std(src_file_path.as_ref(), dst_file_path.as_ref()) {
if e.kind() == std::io::ErrorKind::NotFound && i == 0 {
i += 1;
continue;
}
// info!(
// "reliable_rename failed. src_file_path: {:?}, dst_file_path: {:?}, base_dir: {:?}, err: {:?}",
// src_file_path.as_ref(),
// dst_file_path.as_ref(),
// base_dir.as_ref(),
// e
// );
return Err(e);
}
break;
}
Ok(())
}
pub async fn reliable_mkdir_all(path: impl AsRef<Path>, base_dir: impl AsRef<Path>) -> io::Result<()> {
let mut i = 0;
let mut base_dir = base_dir.as_ref();
loop {
if let Err(e) = os_mkdir_all(path.as_ref(), base_dir).await {
if e.kind() == std::io::ErrorKind::NotFound && i == 0 {
i += 1;
if let Some(base_parent) = base_dir.parent() {
if let Some(c) = base_parent.components().next() {
if c != Component::RootDir {
base_dir = base_parent
}
}
}
continue;
}
return Err(e);
}
break;
}
Ok(())
}
pub async fn os_mkdir_all(dir_path: impl AsRef<Path>, base_dir: impl AsRef<Path>) -> io::Result<()> {
if !base_dir.as_ref().to_string_lossy().is_empty() && base_dir.as_ref().starts_with(dir_path.as_ref()) {
return Ok(());
}
if let Some(parent) = dir_path.as_ref().parent() {
// 不支持递归直接create_dir_all了
if let Err(e) = fs::make_dir_all(&parent).await {
if e.kind() == std::io::ErrorKind::AlreadyExists {
return Ok(());
}
return Err(e);
}
// Box::pin(os_mkdir_all(&parent, &base_dir)).await?;
}
if let Err(e) = fs::mkdir(dir_path.as_ref()).await {
if e.kind() == std::io::ErrorKind::AlreadyExists {
return Ok(());
}
return Err(e);
}
Ok(())
}
pub fn file_exists(path: impl AsRef<Path>) -> bool {
std::fs::metadata(path.as_ref()).map(|_| true).unwrap_or(false)
}

308
crates/disk/src/path.rs Normal file
View File

@@ -0,0 +1,308 @@
use std::path::Path;
use std::path::PathBuf;
pub const GLOBAL_DIR_SUFFIX: &str = "__XLDIR__";
pub const SLASH_SEPARATOR: &str = "/";
pub const GLOBAL_DIR_SUFFIX_WITH_SLASH: &str = "__XLDIR__/";
pub fn has_suffix(s: &str, suffix: &str) -> bool {
if cfg!(target_os = "windows") {
s.to_lowercase().ends_with(&suffix.to_lowercase())
} else {
s.ends_with(suffix)
}
}
pub fn encode_dir_object(object: &str) -> String {
if has_suffix(object, SLASH_SEPARATOR) {
format!("{}{}", object.trim_end_matches(SLASH_SEPARATOR), GLOBAL_DIR_SUFFIX)
} else {
object.to_string()
}
}
pub fn is_dir_object(object: &str) -> bool {
let obj = encode_dir_object(object);
obj.ends_with(GLOBAL_DIR_SUFFIX)
}
#[allow(dead_code)]
pub fn decode_dir_object(object: &str) -> String {
if has_suffix(object, GLOBAL_DIR_SUFFIX) {
format!("{}{}", object.trim_end_matches(GLOBAL_DIR_SUFFIX), SLASH_SEPARATOR)
} else {
object.to_string()
}
}
pub fn retain_slash(s: &str) -> String {
if s.is_empty() {
return s.to_string();
}
if s.ends_with(SLASH_SEPARATOR) {
s.to_string()
} else {
format!("{}{}", s, SLASH_SEPARATOR)
}
}
pub fn strings_has_prefix_fold(s: &str, prefix: &str) -> bool {
s.len() >= prefix.len() && (s[..prefix.len()] == *prefix || s[..prefix.len()].eq_ignore_ascii_case(prefix))
}
pub fn has_prefix(s: &str, prefix: &str) -> bool {
if cfg!(target_os = "windows") {
return strings_has_prefix_fold(s, prefix);
}
s.starts_with(prefix)
}
pub fn path_join(elem: &[PathBuf]) -> PathBuf {
let mut joined_path = PathBuf::new();
for path in elem {
joined_path.push(path);
}
joined_path
}
pub fn path_join_buf(elements: &[&str]) -> String {
let trailing_slash = !elements.is_empty() && elements.last().unwrap().ends_with(SLASH_SEPARATOR);
let mut dst = String::new();
let mut added = 0;
for e in elements {
if added > 0 || !e.is_empty() {
if added > 0 {
dst.push_str(SLASH_SEPARATOR);
}
dst.push_str(e);
added += e.len();
}
}
let result = dst.to_string();
let cpath = Path::new(&result).components().collect::<PathBuf>();
let clean_path = cpath.to_string_lossy();
if trailing_slash {
return format!("{}{}", clean_path, SLASH_SEPARATOR);
}
clean_path.to_string()
}
pub fn path_to_bucket_object_with_base_path(bash_path: &str, path: &str) -> (String, String) {
let path = path.trim_start_matches(bash_path).trim_start_matches(SLASH_SEPARATOR);
if let Some(m) = path.find(SLASH_SEPARATOR) {
return (path[..m].to_string(), path[m + SLASH_SEPARATOR.len()..].to_string());
}
(path.to_string(), "".to_string())
}
pub fn path_to_bucket_object(s: &str) -> (String, String) {
path_to_bucket_object_with_base_path("", s)
}
pub fn base_dir_from_prefix(prefix: &str) -> String {
let mut base_dir = dir(prefix).to_owned();
if base_dir == "." || base_dir == "./" || base_dir == "/" {
base_dir = "".to_owned();
}
if !prefix.contains('/') {
base_dir = "".to_owned();
}
if !base_dir.is_empty() && !base_dir.ends_with(SLASH_SEPARATOR) {
base_dir.push_str(SLASH_SEPARATOR);
}
base_dir
}
pub struct LazyBuf {
s: String,
buf: Option<Vec<u8>>,
w: usize,
}
impl LazyBuf {
pub fn new(s: String) -> Self {
LazyBuf { s, buf: None, w: 0 }
}
pub fn index(&self, i: usize) -> u8 {
if let Some(ref buf) = self.buf {
buf[i]
} else {
self.s.as_bytes()[i]
}
}
pub fn append(&mut self, c: u8) {
if self.buf.is_none() {
if self.w < self.s.len() && self.s.as_bytes()[self.w] == c {
self.w += 1;
return;
}
let mut new_buf = vec![0; self.s.len()];
new_buf[..self.w].copy_from_slice(&self.s.as_bytes()[..self.w]);
self.buf = Some(new_buf);
}
if let Some(ref mut buf) = self.buf {
buf[self.w] = c;
self.w += 1;
}
}
pub fn string(&self) -> String {
if let Some(ref buf) = self.buf {
String::from_utf8(buf[..self.w].to_vec()).unwrap()
} else {
self.s[..self.w].to_string()
}
}
}
pub fn clean(path: &str) -> String {
if path.is_empty() {
return ".".to_string();
}
let rooted = path.starts_with('/');
let n = path.len();
let mut out = LazyBuf::new(path.to_string());
let mut r = 0;
let mut dotdot = 0;
if rooted {
out.append(b'/');
r = 1;
dotdot = 1;
}
while r < n {
match path.as_bytes()[r] {
b'/' => {
// Empty path element
r += 1;
}
b'.' if r + 1 == n || path.as_bytes()[r + 1] == b'/' => {
// . element
r += 1;
}
b'.' if path.as_bytes()[r + 1] == b'.' && (r + 2 == n || path.as_bytes()[r + 2] == b'/') => {
// .. element: remove to last /
r += 2;
if out.w > dotdot {
// Can backtrack
out.w -= 1;
while out.w > dotdot && out.index(out.w) != b'/' {
out.w -= 1;
}
} else if !rooted {
// Cannot backtrack but not rooted, so append .. element.
if out.w > 0 {
out.append(b'/');
}
out.append(b'.');
out.append(b'.');
dotdot = out.w;
}
}
_ => {
// Real path element.
// Add slash if needed
if (rooted && out.w != 1) || (!rooted && out.w != 0) {
out.append(b'/');
}
// Copy element
while r < n && path.as_bytes()[r] != b'/' {
out.append(path.as_bytes()[r]);
r += 1;
}
}
}
}
// Turn empty string into "."
if out.w == 0 {
return ".".to_string();
}
out.string()
}
pub fn split(path: &str) -> (&str, &str) {
// Find the last occurrence of the '/' character
if let Some(i) = path.rfind('/') {
// Return the directory (up to and including the last '/') and the file name
return (&path[..i + 1], &path[i + 1..]);
}
// If no '/' is found, return an empty string for the directory and the whole path as the file name
(path, "")
}
pub fn dir(path: &str) -> String {
let (a, _) = split(path);
clean(a)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_base_dir_from_prefix() {
let a = "da/";
println!("---- in {}", a);
let a = base_dir_from_prefix(a);
println!("---- out {}", a);
}
#[test]
fn test_clean() {
assert_eq!(clean(""), ".");
assert_eq!(clean("abc"), "abc");
assert_eq!(clean("abc/def"), "abc/def");
assert_eq!(clean("a/b/c"), "a/b/c");
assert_eq!(clean("."), ".");
assert_eq!(clean(".."), "..");
assert_eq!(clean("../.."), "../..");
assert_eq!(clean("../../abc"), "../../abc");
assert_eq!(clean("/abc"), "/abc");
assert_eq!(clean("/"), "/");
assert_eq!(clean("abc/"), "abc");
assert_eq!(clean("abc/def/"), "abc/def");
assert_eq!(clean("a/b/c/"), "a/b/c");
assert_eq!(clean("./"), ".");
assert_eq!(clean("../"), "..");
assert_eq!(clean("../../"), "../..");
assert_eq!(clean("/abc/"), "/abc");
assert_eq!(clean("abc//def//ghi"), "abc/def/ghi");
assert_eq!(clean("//abc"), "/abc");
assert_eq!(clean("///abc"), "/abc");
assert_eq!(clean("//abc//"), "/abc");
assert_eq!(clean("abc//"), "abc");
assert_eq!(clean("abc/./def"), "abc/def");
assert_eq!(clean("/./abc/def"), "/abc/def");
assert_eq!(clean("abc/."), "abc");
assert_eq!(clean("abc/./../def"), "def");
assert_eq!(clean("abc//./../def"), "def");
assert_eq!(clean("abc/../../././../def"), "../../def");
assert_eq!(clean("abc/def/ghi/../jkl"), "abc/def/jkl");
assert_eq!(clean("abc/def/../ghi/../jkl"), "abc/jkl");
assert_eq!(clean("abc/def/.."), "abc");
assert_eq!(clean("abc/def/../.."), ".");
assert_eq!(clean("/abc/def/../.."), "/");
assert_eq!(clean("abc/def/../../.."), "..");
assert_eq!(clean("/abc/def/../../.."), "/");
assert_eq!(clean("abc/def/../../../ghi/jkl/../../../mno"), "../../mno");
}
}

908
crates/disk/src/remote.rs Normal file
View File

@@ -0,0 +1,908 @@
use std::path::PathBuf;
use crate::api::CheckPartsResp;
use crate::api::DeleteOptions;
use crate::api::DiskAPI;
use crate::api::DiskInfo;
use crate::api::DiskInfoOptions;
use crate::api::DiskLocation;
use crate::api::DiskOption;
use crate::api::ReadMultipleReq;
use crate::api::ReadMultipleResp;
use crate::api::ReadOptions;
use crate::api::RenameDataResp;
use crate::api::UpdateMetadataOpts;
use crate::api::VolumeInfo;
use crate::api::WalkDirOptions;
use crate::endpoint::Endpoint;
use futures::StreamExt as _;
use http::HeaderMap;
use http::Method;
use protos::node_service_time_out_client;
use protos::proto_gen::node_service::CheckPartsRequest;
use protos::proto_gen::node_service::DeletePathsRequest;
use protos::proto_gen::node_service::DeleteRequest;
use protos::proto_gen::node_service::DeleteVersionRequest;
use protos::proto_gen::node_service::DeleteVersionsRequest;
use protos::proto_gen::node_service::DeleteVolumeRequest;
use protos::proto_gen::node_service::DiskInfoRequest;
use protos::proto_gen::node_service::ListDirRequest;
use protos::proto_gen::node_service::ListVolumesRequest;
use protos::proto_gen::node_service::MakeVolumeRequest;
use protos::proto_gen::node_service::MakeVolumesRequest;
use protos::proto_gen::node_service::ReadAllRequest;
use protos::proto_gen::node_service::ReadMultipleRequest;
use protos::proto_gen::node_service::ReadVersionRequest;
use protos::proto_gen::node_service::ReadXlRequest;
use protos::proto_gen::node_service::RenameDataRequest;
use protos::proto_gen::node_service::RenameFileRequst;
use protos::proto_gen::node_service::RenamePartRequst;
use protos::proto_gen::node_service::StatVolumeRequest;
use protos::proto_gen::node_service::UpdateMetadataRequest;
use protos::proto_gen::node_service::VerifyFileRequest;
use protos::proto_gen::node_service::WalkDirRequest;
use protos::proto_gen::node_service::WriteAllRequest;
use protos::proto_gen::node_service::WriteMetadataRequest;
use rmp_serde::Serializer;
use rustfs_error::Error;
use rustfs_error::Result;
use rustfs_filemeta::FileInfo;
use rustfs_filemeta::FileInfoVersions;
use rustfs_filemeta::RawFileInfo;
use rustfs_metacache::MetaCacheEntry;
use rustfs_metacache::MetacacheWriter;
use rustfs_rio::HttpReader;
use rustfs_rio::HttpWriter;
use serde::Serialize as _;
use tokio::io::AsyncRead;
use tokio::io::AsyncWrite;
use tokio::sync::Mutex;
use tonic::Request;
use tracing::info;
use uuid::Uuid;
#[derive(Debug)]
pub struct RemoteDisk {
pub id: Mutex<Option<Uuid>>,
pub addr: String,
pub url: url::Url,
pub root: PathBuf,
endpoint: Endpoint,
}
impl RemoteDisk {
pub async fn new(ep: &Endpoint, _opt: &DiskOption) -> Result<Self> {
// let root = fs::canonicalize(ep.url.path()).await?;
let root = PathBuf::from(ep.get_file_path());
let addr = format!("{}://{}:{}", ep.url.scheme(), ep.url.host_str().unwrap(), ep.url.port().unwrap());
Ok(Self {
id: Mutex::new(None),
addr,
url: ep.url.clone(),
root,
endpoint: ep.clone(),
})
}
}
// TODO: all api need to handle errors
#[async_trait::async_trait]
impl DiskAPI for RemoteDisk {
#[tracing::instrument(skip(self))]
fn to_string(&self) -> String {
self.endpoint.to_string()
}
#[tracing::instrument(skip(self))]
fn is_local(&self) -> bool {
false
}
#[tracing::instrument(skip(self))]
fn host_name(&self) -> String {
self.endpoint.host_port()
}
#[tracing::instrument(skip(self))]
async fn is_online(&self) -> bool {
// TODO: 连接状态
if (node_service_time_out_client(&self.addr).await).is_ok() {
return true;
}
false
}
#[tracing::instrument(skip(self))]
fn endpoint(&self) -> Endpoint {
self.endpoint.clone()
}
#[tracing::instrument(skip(self))]
async fn close(&self) -> Result<()> {
Ok(())
}
#[tracing::instrument(skip(self))]
fn path(&self) -> PathBuf {
self.root.clone()
}
#[tracing::instrument(skip(self))]
fn get_disk_location(&self) -> DiskLocation {
DiskLocation {
pool_idx: {
if self.endpoint.pool_idx < 0 {
None
} else {
Some(self.endpoint.pool_idx as usize)
}
},
set_idx: {
if self.endpoint.set_idx < 0 {
None
} else {
Some(self.endpoint.set_idx as usize)
}
},
disk_idx: {
if self.endpoint.disk_idx < 0 {
None
} else {
Some(self.endpoint.disk_idx as usize)
}
},
}
}
#[tracing::instrument(skip(self))]
async fn get_disk_id(&self) -> Result<Option<Uuid>> {
Ok(*self.id.lock().await)
}
#[tracing::instrument(skip(self))]
async fn set_disk_id(&self, id: Option<Uuid>) -> Result<()> {
let mut lock = self.id.lock().await;
*lock = id;
Ok(())
}
#[tracing::instrument(skip(self))]
async fn read_all(&self, volume: &str, path: &str) -> Result<Vec<u8>> {
info!("read_all {}/{}", volume, path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(ReadAllRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
});
let response = client.read_all(request).await?.into_inner();
if !response.success {
return Err(Error::FileNotFound);
}
Ok(response.data)
}
#[tracing::instrument(skip(self))]
async fn write_all(&self, volume: &str, path: &str, data: Vec<u8>) -> Result<()> {
info!("write_all");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(WriteAllRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
data,
});
let response = client.write_all(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()> {
info!("delete {}/{}/{}", self.endpoint.to_string(), volume, path);
let options = serde_json::to_string(&opt)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
options,
});
let response = client.delete(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn verify_file(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
info!("verify_file");
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(VerifyFileRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info,
});
let response = client.verify_file(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let check_parts_resp = serde_json::from_str::<CheckPartsResp>(&response.check_parts_resp)?;
Ok(check_parts_resp)
}
#[tracing::instrument(skip(self))]
async fn check_parts(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
info!("check_parts");
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(CheckPartsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info,
});
let response = client.check_parts(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let check_parts_resp = serde_json::from_str::<CheckPartsResp>(&response.check_parts_resp)?;
Ok(check_parts_resp)
}
#[tracing::instrument(skip(self))]
async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Vec<u8>) -> Result<()> {
info!("rename_part {}/{}", src_volume, src_path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(RenamePartRequst {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
src_path: src_path.to_string(),
dst_volume: dst_volume.to_string(),
dst_path: dst_path.to_string(),
meta,
});
let response = client.rename_part(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(level = "debug", skip(self))]
async fn rename_file(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str) -> Result<()> {
info!("rename_file");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(RenameFileRequst {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
src_path: src_path.to_string(),
dst_volume: dst_volume.to_string(),
dst_path: dst_path.to_string(),
});
let response = client.rename_file(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(level = "debug", skip(self))]
async fn create_file(&self, _origvolume: &str, volume: &str, path: &str, file_size: usize) -> Result<Box<dyn AsyncWrite>> {
info!("create_file {}/{}/{}", self.endpoint.to_string(), volume, path);
let url = format!(
"{}/rustfs/rpc/put_file_stream?disk={}&volume={}&path={}&append={}&size={}",
self.endpoint.grid_host(),
urlencoding::encode(&self.endpoint.to_string()),
urlencoding::encode(volume),
urlencoding::encode(path),
false,
file_size
);
let wd = HttpWriter::new(url, Method::PUT, HeaderMap::new()).await?;
Ok(Box::new(wd))
}
#[tracing::instrument(level = "debug", skip(self))]
async fn append_file(&self, volume: &str, path: &str) -> Result<Box<dyn AsyncWrite>> {
info!("append_file {}/{}", volume, path);
let url = format!(
"{}/rustfs/rpc/put_file_stream?disk={}&volume={}&path={}&append={}&size={}",
self.endpoint.grid_host(),
urlencoding::encode(&self.endpoint.to_string()),
urlencoding::encode(volume),
urlencoding::encode(path),
true,
0
);
let wd = HttpWriter::new(url, Method::PUT, HeaderMap::new()).await?;
Ok(Box::new(wd))
}
#[tracing::instrument(level = "debug", skip(self))]
async fn read_file(&self, volume: &str, path: &str) -> Result<Box<dyn AsyncRead>> {
info!("read_file {}/{}", volume, path);
let url = format!(
"{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}",
self.endpoint.grid_host(),
urlencoding::encode(&self.endpoint.to_string()),
urlencoding::encode(volume),
urlencoding::encode(path),
0,
0
);
let rd = HttpReader::new(url, Method::GET, HeaderMap::new()).await?;
Ok(Box::new(rd))
}
#[tracing::instrument(level = "debug", skip(self))]
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<Box<dyn AsyncRead>> {
info!("read_file_stream {}/{}/{}", self.endpoint.to_string(), volume, path);
let url = format!(
"{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}",
self.endpoint.grid_host(),
urlencoding::encode(&self.endpoint.to_string()),
urlencoding::encode(volume),
urlencoding::encode(path),
offset,
length
);
let rd = HttpReader::new(url, Method::GET, HeaderMap::new()).await?;
Ok(Box::new(rd))
}
#[tracing::instrument(skip(self))]
async fn list_dir(&self, _origvolume: &str, volume: &str, _dir_path: &str, _count: i32) -> Result<Vec<String>> {
info!("list_dir {}/{}", volume, _dir_path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(ListDirRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
});
let response = client.list_dir(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(response.volumes)
}
// FIXME: TODO: use writer
#[tracing::instrument(skip(self, wr))]
async fn walk_dir<W: AsyncWrite + Unpin + Send>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()> {
let now = std::time::SystemTime::now();
info!("walk_dir {}/{}/{:?}", self.endpoint.to_string(), opts.bucket, opts.filter_prefix);
let mut wr = wr;
let mut out = MetacacheWriter::new(&mut wr);
let mut buf = Vec::new();
opts.serialize(&mut Serializer::new(&mut buf))?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(WalkDirRequest {
disk: self.endpoint.to_string(),
walk_dir_options: buf,
});
let mut response = client.walk_dir(request).await?.into_inner();
loop {
match response.next().await {
Some(Ok(resp)) => {
if !resp.success {
return Err(Error::msg(resp.error_info.unwrap_or("".to_string())));
}
let entry = serde_json::from_str::<MetaCacheEntry>(&resp.meta_cache_entry)
.map_err(|_| Error::msg(format!("Unexpected response: {:?}", response)))?;
out.write_obj(&entry).await?;
}
None => break,
_ => return Err(Error::msg(format!("Unexpected response: {:?}", response))),
}
}
info!(
"walk_dir {}/{:?} done {:?}",
opts.bucket,
opts.filter_prefix,
now.elapsed().unwrap_or_default()
);
Ok(())
}
#[tracing::instrument(skip(self))]
async fn rename_data(
&self,
src_volume: &str,
src_path: &str,
fi: FileInfo,
dst_volume: &str,
dst_path: &str,
) -> Result<RenameDataResp> {
info!("rename_data {}/{}/{}/{}", self.addr, self.endpoint.to_string(), dst_volume, dst_path);
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(RenameDataRequest {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
src_path: src_path.to_string(),
file_info,
dst_volume: dst_volume.to_string(),
dst_path: dst_path.to_string(),
});
let response = client.rename_data(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let rename_data_resp = serde_json::from_str::<RenameDataResp>(&response.rename_data_resp)?;
Ok(rename_data_resp)
}
#[tracing::instrument(skip(self))]
async fn make_volumes(&self, volumes: Vec<&str>) -> Result<()> {
info!("make_volumes");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(MakeVolumesRequest {
disk: self.endpoint.to_string(),
volumes: volumes.iter().map(|s| (*s).to_string()).collect(),
});
let response = client.make_volumes(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn make_volume(&self, volume: &str) -> Result<()> {
info!("make_volume");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(MakeVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
});
let response = client.make_volume(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn list_volumes(&self) -> Result<Vec<VolumeInfo>> {
info!("list_volumes");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(ListVolumesRequest {
disk: self.endpoint.to_string(),
});
let response = client.list_volumes(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let infos = response
.volume_infos
.into_iter()
.filter_map(|json_str| serde_json::from_str::<VolumeInfo>(&json_str).ok())
.collect();
Ok(infos)
}
#[tracing::instrument(skip(self))]
async fn stat_volume(&self, volume: &str) -> Result<VolumeInfo> {
info!("stat_volume");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(StatVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
});
let response = client.stat_volume(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let volume_info = serde_json::from_str::<VolumeInfo>(&response.volume_info)?;
Ok(volume_info)
}
#[tracing::instrument(skip(self))]
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> {
info!("delete_paths");
let paths = paths.to_owned();
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeletePathsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
paths,
});
let response = client.delete_paths(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()> {
info!("update_metadata");
let file_info = serde_json::to_string(&fi)?;
let opts = serde_json::to_string(&opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(UpdateMetadataRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info,
opts,
});
let response = client.update_metadata(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn write_metadata(&self, _org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()> {
info!("write_metadata {}/{}", volume, path);
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(WriteMetadataRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info,
});
let response = client.write_metadata(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn read_version(
&self,
_org_volume: &str,
volume: &str,
path: &str,
version_id: &str,
opts: &ReadOptions,
) -> Result<FileInfo> {
info!("read_version");
let opts = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(ReadVersionRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
version_id: version_id.to_string(),
opts,
});
let response = client.read_version(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let file_info = serde_json::from_str::<FileInfo>(&response.file_info)?;
Ok(file_info)
}
#[tracing::instrument(level = "debug", skip(self))]
async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result<RawFileInfo> {
info!("read_xl {}/{}/{}", self.endpoint.to_string(), volume, path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(ReadXlRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
read_data,
});
let response = client.read_xl(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let raw_file_info = serde_json::from_str::<RawFileInfo>(&response.raw_file_info)?;
Ok(raw_file_info)
}
#[tracing::instrument(skip(self))]
async fn delete_version(
&self,
volume: &str,
path: &str,
fi: FileInfo,
force_del_marker: bool,
opts: DeleteOptions,
) -> Result<()> {
info!("delete_version");
let file_info = serde_json::to_string(&fi)?;
let opts = serde_json::to_string(&opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteVersionRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info,
force_del_marker,
opts,
});
let response = client.delete_version(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
// let raw_file_info = serde_json::from_str::<RawFileInfo>(&response.raw_file_info)?;
Ok(())
}
#[tracing::instrument(skip(self))]
async fn delete_versions(
&self,
volume: &str,
versions: Vec<FileInfoVersions>,
opts: DeleteOptions,
) -> Result<Vec<Option<Error>>> {
info!("delete_versions");
let opts = serde_json::to_string(&opts)?;
let mut versions_str = Vec::with_capacity(versions.len());
for file_info_versions in versions.iter() {
versions_str.push(serde_json::to_string(file_info_versions)?);
}
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteVersionsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
versions: versions_str,
opts,
});
let response = client.delete_versions(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let errors = response
.errors
.iter()
.map(|error| {
if error.is_empty() {
None
} else {
use std::str::FromStr;
Some(Error::from_str(error).unwrap_or(Error::msg(error)))
}
})
.collect();
Ok(errors)
}
#[tracing::instrument(skip(self))]
async fn read_multiple(&self, req: ReadMultipleReq) -> Result<Vec<ReadMultipleResp>> {
info!("read_multiple {}/{}/{}", self.endpoint.to_string(), req.bucket, req.prefix);
let read_multiple_req = serde_json::to_string(&req)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(ReadMultipleRequest {
disk: self.endpoint.to_string(),
read_multiple_req,
});
let response = client.read_multiple(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let read_multiple_resps = response
.read_multiple_resps
.into_iter()
.filter_map(|json_str| serde_json::from_str::<ReadMultipleResp>(&json_str).ok())
.collect();
Ok(read_multiple_resps)
}
#[tracing::instrument(skip(self))]
async fn delete_volume(&self, volume: &str) -> Result<()> {
info!("delete_volume {}/{}", self.endpoint.to_string(), volume);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
});
let response = client.delete_volume(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn disk_info(&self, opts: &DiskInfoOptions) -> Result<DiskInfo> {
let opts = serde_json::to_string(&opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(DiskInfoRequest {
disk: self.endpoint.to_string(),
opts,
});
let response = client.disk_info(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let disk_info = serde_json::from_str::<DiskInfo>(&response.disk_info)?;
Ok(disk_info)
}
// #[tracing::instrument(skip(self, cache, scan_mode, _we_sleep))]
// async fn ns_scanner(
// &self,
// cache: &DataUsageCache,
// updates: Sender<DataUsageEntry>,
// scan_mode: HealScanMode,
// _we_sleep: ShouldSleepFn,
// ) -> Result<DataUsageCache> {
// info!("ns_scanner");
// let cache = serde_json::to_string(cache)?;
// let mut client = node_service_time_out_client(&self.addr)
// .await
// .map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
// let (tx, rx) = mpsc::channel(10);
// let in_stream = ReceiverStream::new(rx);
// let mut response = client.ns_scanner(in_stream).await?.into_inner();
// let request = NsScannerRequest {
// disk: self.endpoint.to_string(),
// cache,
// scan_mode: scan_mode as u64,
// };
// tx.send(request)
// .await
// .map_err(|err| Error::msg(format!("can not send request, err: {}", err)))?;
// loop {
// match response.next().await {
// Some(Ok(resp)) => {
// if !resp.update.is_empty() {
// let data_usage_cache = serde_json::from_str::<DataUsageEntry>(&resp.update)?;
// let _ = updates.send(data_usage_cache).await;
// } else if !resp.data_usage_cache.is_empty() {
// let data_usage_cache = serde_json::from_str::<DataUsageCache>(&resp.data_usage_cache)?;
// return Ok(data_usage_cache);
// } else {
// return Err(Error::msg("scan was interrupted"));
// }
// }
// _ => return Err(Error::msg("scan was interrupted")),
// }
// }
// }
// #[tracing::instrument(skip(self))]
// async fn healing(&self) -> Option<HealingTracker> {
// None
// }
}

View File

@@ -0,0 +1,862 @@
use std::path::PathBuf;
use super::{
endpoint::Endpoint, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption,
FileInfoVersions, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo,
WalkDirOptions,
};
use crate::heal::{
data_scanner::ShouldSleepFn,
data_usage_cache::{DataUsageCache, DataUsageEntry},
heal_commands::{HealScanMode, HealingTracker},
};
use crate::io::{FileReader, FileWriter, HttpFileReader, HttpFileWriter};
use crate::{disk::metacache::MetaCacheEntry, metacache::writer::MetacacheWriter};
use futures::lock::Mutex;
use protos::proto_gen::node_service::RenamePartRequst;
use protos::{
node_service_time_out_client,
proto_gen::node_service::{
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, NsScannerRequest,
ReadAllRequest, ReadMultipleRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest, RenameFileRequst,
StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WalkDirRequest, WriteAllRequest, WriteMetadataRequest,
},
};
use rmp_serde::Serializer;
use rustfs_error::{Error, Result};
use rustfs_filemeta::{FileInfo, RawFileInfo};
use serde::Serialize;
use tokio::{
io::AsyncWrite,
sync::mpsc::{self, Sender},
};
use tokio_stream::{wrappers::ReceiverStream, StreamExt};
use tonic::Request;
use tracing::info;
use uuid::Uuid;
#[derive(Debug)]
pub struct RemoteDisk {
pub id: Mutex<Option<Uuid>>,
pub addr: String,
pub url: url::Url,
pub root: PathBuf,
endpoint: Endpoint,
}
impl RemoteDisk {
pub async fn new(ep: &Endpoint, _opt: &DiskOption) -> Result<Self> {
// let root = fs::canonicalize(ep.url.path()).await?;
let root = PathBuf::from(ep.get_file_path());
let addr = format!("{}://{}:{}", ep.url.scheme(), ep.url.host_str().unwrap(), ep.url.port().unwrap());
Ok(Self {
id: Mutex::new(None),
addr,
url: ep.url.clone(),
root,
endpoint: ep.clone(),
})
}
}
// TODO: all api need to handle errors
#[async_trait::async_trait]
impl DiskAPI for RemoteDisk {
#[tracing::instrument(skip(self))]
fn to_string(&self) -> String {
self.endpoint.to_string()
}
#[tracing::instrument(skip(self))]
fn is_local(&self) -> bool {
false
}
#[tracing::instrument(skip(self))]
fn host_name(&self) -> String {
self.endpoint.host_port()
}
#[tracing::instrument(skip(self))]
async fn is_online(&self) -> bool {
// TODO: 连接状态
if (node_service_time_out_client(&self.addr).await).is_ok() {
return true;
}
false
}
#[tracing::instrument(skip(self))]
fn endpoint(&self) -> Endpoint {
self.endpoint.clone()
}
#[tracing::instrument(skip(self))]
async fn close(&self) -> Result<()> {
Ok(())
}
#[tracing::instrument(skip(self))]
fn path(&self) -> PathBuf {
self.root.clone()
}
#[tracing::instrument(skip(self))]
fn get_disk_location(&self) -> DiskLocation {
DiskLocation {
pool_idx: {
if self.endpoint.pool_idx < 0 {
None
} else {
Some(self.endpoint.pool_idx as usize)
}
},
set_idx: {
if self.endpoint.set_idx < 0 {
None
} else {
Some(self.endpoint.set_idx as usize)
}
},
disk_idx: {
if self.endpoint.disk_idx < 0 {
None
} else {
Some(self.endpoint.disk_idx as usize)
}
},
}
}
#[tracing::instrument(skip(self))]
async fn get_disk_id(&self) -> Result<Option<Uuid>> {
Ok(*self.id.lock().await)
}
#[tracing::instrument(skip(self))]
async fn set_disk_id(&self, id: Option<Uuid>) -> Result<()> {
let mut lock = self.id.lock().await;
*lock = id;
Ok(())
}
#[tracing::instrument(skip(self))]
async fn read_all(&self, volume: &str, path: &str) -> Result<Vec<u8>> {
info!("read_all {}/{}", volume, path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(ReadAllRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
});
let response = client.read_all(request).await?.into_inner();
if !response.success {
return Err(Error::FileNotFound);
}
Ok(response.data)
}
#[tracing::instrument(skip(self))]
async fn write_all(&self, volume: &str, path: &str, data: Vec<u8>) -> Result<()> {
info!("write_all");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(WriteAllRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
data,
});
let response = client.write_all(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()> {
info!("delete {}/{}/{}", self.endpoint.to_string(), volume, path);
let options = serde_json::to_string(&opt)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
options,
});
let response = client.delete(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn verify_file(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
info!("verify_file");
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(VerifyFileRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info,
});
let response = client.verify_file(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let check_parts_resp = serde_json::from_str::<CheckPartsResp>(&response.check_parts_resp)?;
Ok(check_parts_resp)
}
#[tracing::instrument(skip(self))]
async fn check_parts(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
info!("check_parts");
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(CheckPartsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info,
});
let response = client.check_parts(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let check_parts_resp = serde_json::from_str::<CheckPartsResp>(&response.check_parts_resp)?;
Ok(check_parts_resp)
}
#[tracing::instrument(skip(self))]
async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Vec<u8>) -> Result<()> {
info!("rename_part {}/{}", src_volume, src_path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(RenamePartRequst {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
src_path: src_path.to_string(),
dst_volume: dst_volume.to_string(),
dst_path: dst_path.to_string(),
meta,
});
let response = client.rename_part(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(level = "debug", skip(self))]
async fn rename_file(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str) -> Result<()> {
info!("rename_file");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(RenameFileRequst {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
src_path: src_path.to_string(),
dst_volume: dst_volume.to_string(),
dst_path: dst_path.to_string(),
});
let response = client.rename_file(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(level = "debug", skip(self))]
async fn create_file(&self, _origvolume: &str, volume: &str, path: &str, file_size: usize) -> Result<FileWriter> {
info!("create_file {}/{}/{}", self.endpoint.to_string(), volume, path);
Ok(Box::new(HttpFileWriter::new(
self.endpoint.grid_host().as_str(),
self.endpoint.to_string().as_str(),
volume,
path,
file_size,
false,
)?))
}
#[tracing::instrument(level = "debug", skip(self))]
async fn append_file(&self, volume: &str, path: &str) -> Result<FileWriter> {
info!("append_file {}/{}", volume, path);
Ok(Box::new(HttpFileWriter::new(
self.endpoint.grid_host().as_str(),
self.endpoint.to_string().as_str(),
volume,
path,
0,
true,
)?))
}
#[tracing::instrument(level = "debug", skip(self))]
async fn read_file(&self, volume: &str, path: &str) -> Result<FileReader> {
info!("read_file {}/{}", volume, path);
Ok(Box::new(
HttpFileReader::new(self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), volume, path, 0, 0)
.await?,
))
}
#[tracing::instrument(level = "debug", skip(self))]
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<FileReader> {
info!("read_file_stream {}/{}/{}", self.endpoint.to_string(), volume, path);
Ok(Box::new(
HttpFileReader::new(
self.endpoint.grid_host().as_str(),
self.endpoint.to_string().as_str(),
volume,
path,
offset,
length,
)
.await?,
))
}
#[tracing::instrument(skip(self))]
async fn list_dir(&self, _origvolume: &str, volume: &str, _dir_path: &str, _count: i32) -> Result<Vec<String>> {
info!("list_dir {}/{}", volume, _dir_path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(ListDirRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
});
let response = client.list_dir(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(response.volumes)
}
// FIXME: TODO: use writer
#[tracing::instrument(skip(self, wr))]
async fn walk_dir<W: AsyncWrite + Unpin + Send>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()> {
let now = std::time::SystemTime::now();
info!("walk_dir {}/{}/{:?}", self.endpoint.to_string(), opts.bucket, opts.filter_prefix);
let mut wr = wr;
let mut out = MetacacheWriter::new(&mut wr);
let mut buf = Vec::new();
opts.serialize(&mut Serializer::new(&mut buf))?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(WalkDirRequest {
disk: self.endpoint.to_string(),
walk_dir_options: buf,
});
let mut response = client.walk_dir(request).await?.into_inner();
loop {
match response.next().await {
Some(Ok(resp)) => {
if !resp.success {
return Err(Error::msg(resp.error_info.unwrap_or("".to_string())));
}
let entry = serde_json::from_str::<MetaCacheEntry>(&resp.meta_cache_entry)
.map_err(|_| Error::msg(format!("Unexpected response: {:?}", response)))?;
out.write_obj(&entry).await?;
}
None => break,
_ => return Err(Error::msg(format!("Unexpected response: {:?}", response))),
}
}
info!(
"walk_dir {}/{:?} done {:?}",
opts.bucket,
opts.filter_prefix,
now.elapsed().unwrap_or_default()
);
Ok(())
}
#[tracing::instrument(skip(self))]
async fn rename_data(
&self,
src_volume: &str,
src_path: &str,
fi: FileInfo,
dst_volume: &str,
dst_path: &str,
) -> Result<RenameDataResp> {
info!("rename_data {}/{}/{}/{}", self.addr, self.endpoint.to_string(), dst_volume, dst_path);
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(RenameDataRequest {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
src_path: src_path.to_string(),
file_info,
dst_volume: dst_volume.to_string(),
dst_path: dst_path.to_string(),
});
let response = client.rename_data(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let rename_data_resp = serde_json::from_str::<RenameDataResp>(&response.rename_data_resp)?;
Ok(rename_data_resp)
}
#[tracing::instrument(skip(self))]
async fn make_volumes(&self, volumes: Vec<&str>) -> Result<()> {
info!("make_volumes");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(MakeVolumesRequest {
disk: self.endpoint.to_string(),
volumes: volumes.iter().map(|s| (*s).to_string()).collect(),
});
let response = client.make_volumes(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn make_volume(&self, volume: &str) -> Result<()> {
info!("make_volume");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(MakeVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
});
let response = client.make_volume(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn list_volumes(&self) -> Result<Vec<VolumeInfo>> {
info!("list_volumes");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(ListVolumesRequest {
disk: self.endpoint.to_string(),
});
let response = client.list_volumes(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let infos = response
.volume_infos
.into_iter()
.filter_map(|json_str| serde_json::from_str::<VolumeInfo>(&json_str).ok())
.collect();
Ok(infos)
}
#[tracing::instrument(skip(self))]
async fn stat_volume(&self, volume: &str) -> Result<VolumeInfo> {
info!("stat_volume");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(StatVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
});
let response = client.stat_volume(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let volume_info = serde_json::from_str::<VolumeInfo>(&response.volume_info)?;
Ok(volume_info)
}
#[tracing::instrument(skip(self))]
async fn delete_paths(&self, volume: &str, paths: &[String]) -> Result<()> {
info!("delete_paths");
let paths = paths.to_owned();
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeletePathsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
paths,
});
let response = client.delete_paths(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn update_metadata(&self, volume: &str, path: &str, fi: FileInfo, opts: &UpdateMetadataOpts) -> Result<()> {
info!("update_metadata");
let file_info = serde_json::to_string(&fi)?;
let opts = serde_json::to_string(&opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(UpdateMetadataRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info,
opts,
});
let response = client.update_metadata(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn write_metadata(&self, _org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()> {
info!("write_metadata {}/{}", volume, path);
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(WriteMetadataRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info,
});
let response = client.write_metadata(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn read_version(
&self,
_org_volume: &str,
volume: &str,
path: &str,
version_id: &str,
opts: &ReadOptions,
) -> Result<FileInfo> {
info!("read_version");
let opts = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(ReadVersionRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
version_id: version_id.to_string(),
opts,
});
let response = client.read_version(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let file_info = serde_json::from_str::<FileInfo>(&response.file_info)?;
Ok(file_info)
}
#[tracing::instrument(level = "debug", skip(self))]
async fn read_xl(&self, volume: &str, path: &str, read_data: bool) -> Result<RawFileInfo> {
info!("read_xl {}/{}/{}", self.endpoint.to_string(), volume, path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(ReadXlRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
read_data,
});
let response = client.read_xl(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let raw_file_info = serde_json::from_str::<RawFileInfo>(&response.raw_file_info)?;
Ok(raw_file_info)
}
#[tracing::instrument(skip(self))]
async fn delete_version(
&self,
volume: &str,
path: &str,
fi: FileInfo,
force_del_marker: bool,
opts: DeleteOptions,
) -> Result<()> {
info!("delete_version");
let file_info = serde_json::to_string(&fi)?;
let opts = serde_json::to_string(&opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteVersionRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
path: path.to_string(),
file_info,
force_del_marker,
opts,
});
let response = client.delete_version(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
// let raw_file_info = serde_json::from_str::<RawFileInfo>(&response.raw_file_info)?;
Ok(())
}
#[tracing::instrument(skip(self))]
async fn delete_versions(
&self,
volume: &str,
versions: Vec<FileInfoVersions>,
opts: DeleteOptions,
) -> Result<Vec<Option<Error>>> {
info!("delete_versions");
let opts = serde_json::to_string(&opts)?;
let mut versions_str = Vec::with_capacity(versions.len());
for file_info_versions in versions.iter() {
versions_str.push(serde_json::to_string(file_info_versions)?);
}
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteVersionsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
versions: versions_str,
opts,
});
let response = client.delete_versions(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let errors = response
.errors
.iter()
.map(|error| {
if error.is_empty() {
None
} else {
use std::str::FromStr;
Some(Error::from_str(error).unwrap_or(Error::msg(error)))
}
})
.collect();
Ok(errors)
}
#[tracing::instrument(skip(self))]
async fn read_multiple(&self, req: ReadMultipleReq) -> Result<Vec<ReadMultipleResp>> {
info!("read_multiple {}/{}/{}", self.endpoint.to_string(), req.bucket, req.prefix);
let read_multiple_req = serde_json::to_string(&req)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(ReadMultipleRequest {
disk: self.endpoint.to_string(),
read_multiple_req,
});
let response = client.read_multiple(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let read_multiple_resps = response
.read_multiple_resps
.into_iter()
.filter_map(|json_str| serde_json::from_str::<ReadMultipleResp>(&json_str).ok())
.collect();
Ok(read_multiple_resps)
}
#[tracing::instrument(skip(self))]
async fn delete_volume(&self, volume: &str) -> Result<()> {
info!("delete_volume {}/{}", self.endpoint.to_string(), volume);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
});
let response = client.delete_volume(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
Ok(())
}
#[tracing::instrument(skip(self))]
async fn disk_info(&self, opts: &DiskInfoOptions) -> Result<DiskInfo> {
let opts = serde_json::to_string(&opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let request = Request::new(DiskInfoRequest {
disk: self.endpoint.to_string(),
opts,
});
let response = client.disk_info(request).await?.into_inner();
if !response.success {
return Err(response.error.unwrap_or_default().into());
}
let disk_info = serde_json::from_str::<DiskInfo>(&response.disk_info)?;
Ok(disk_info)
}
#[tracing::instrument(skip(self, cache, scan_mode, _we_sleep))]
async fn ns_scanner(
&self,
cache: &DataUsageCache,
updates: Sender<DataUsageEntry>,
scan_mode: HealScanMode,
_we_sleep: ShouldSleepFn,
) -> Result<DataUsageCache> {
info!("ns_scanner");
let cache = serde_json::to_string(cache)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::msg(format!("can not get client, err: {}", err)))?;
let (tx, rx) = mpsc::channel(10);
let in_stream = ReceiverStream::new(rx);
let mut response = client.ns_scanner(in_stream).await?.into_inner();
let request = NsScannerRequest {
disk: self.endpoint.to_string(),
cache,
scan_mode: scan_mode as u64,
};
tx.send(request)
.await
.map_err(|err| Error::msg(format!("can not send request, err: {}", err)))?;
loop {
match response.next().await {
Some(Ok(resp)) => {
if !resp.update.is_empty() {
let data_usage_cache = serde_json::from_str::<DataUsageEntry>(&resp.update)?;
let _ = updates.send(data_usage_cache).await;
} else if !resp.data_usage_cache.is_empty() {
let data_usage_cache = serde_json::from_str::<DataUsageCache>(&resp.data_usage_cache)?;
return Ok(data_usage_cache);
} else {
return Err(Error::msg("scan was interrupted"));
}
}
_ => return Err(Error::msg("scan was interrupted")),
}
}
}
#[tracing::instrument(skip(self))]
async fn healing(&self) -> Option<HealingTracker> {
None
}
}

35
crates/disk/src/utils.rs Normal file
View File

@@ -0,0 +1,35 @@
use std::{fs::Metadata, path::Path};
use rustfs_error::{to_file_error, Error, Result};
pub async fn read_file_exists(path: impl AsRef<Path>) -> Result<(Vec<u8>, Option<Metadata>)> {
let p = path.as_ref();
let (data, meta) = match read_file_all(&p).await {
Ok((data, meta)) => (data, Some(meta)),
Err(e) => {
if e == Error::FileNotFound {
(Vec::new(), None)
} else {
return Err(e);
}
}
};
Ok((data, meta))
}
pub async fn read_file_all(path: impl AsRef<Path>) -> Result<(Vec<u8>, Metadata)> {
let p = path.as_ref();
let meta = read_file_metadata(&path).await?;
let data = read_all(&p).await?;
Ok((data, meta))
}
pub async fn read_file_metadata(p: impl AsRef<Path>) -> Result<Metadata> {
Ok(tokio::fs::metadata(&p).await.map_err(to_file_error)?)
}
pub async fn read_all(p: impl AsRef<Path>) -> Result<Vec<u8>> {
tokio::fs::read(&p).await.map_err(|e| to_file_error(e).into())
}

22
crates/error/Cargo.toml Normal file
View File

@@ -0,0 +1,22 @@
[package]
name = "rustfs-error"
edition.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
version.workspace = true
[dependencies]
protos.workspace = true
rmp.workspace = true
rmp-serde.workspace = true
serde.workspace = true
serde_json.workspace = true
thiserror.workspace = true
time.workspace = true
tonic.workspace = true
uuid.workspace = true
[lints]
workspace = true

View File

@@ -0,0 +1,27 @@
use crate::Error;
pub const CHECK_PART_UNKNOWN: usize = 0;
pub const CHECK_PART_SUCCESS: usize = 1;
pub const CHECK_PART_DISK_NOT_FOUND: usize = 2;
pub const CHECK_PART_VOLUME_NOT_FOUND: usize = 3;
pub const CHECK_PART_FILE_NOT_FOUND: usize = 4;
pub const CHECK_PART_FILE_CORRUPT: usize = 5;
pub fn conv_part_err_to_int(err: &Option<Error>) -> usize {
if let Some(err) = err {
match err {
Error::FileNotFound | Error::FileVersionNotFound => CHECK_PART_FILE_NOT_FOUND,
Error::FileCorrupt => CHECK_PART_FILE_CORRUPT,
Error::VolumeNotFound => CHECK_PART_VOLUME_NOT_FOUND,
Error::DiskNotFound => CHECK_PART_DISK_NOT_FOUND,
Error::Nil => CHECK_PART_SUCCESS,
_ => CHECK_PART_UNKNOWN,
}
} else {
CHECK_PART_SUCCESS
}
}
pub fn has_part_err(part_errs: &[usize]) -> bool {
part_errs.iter().any(|err| *err != CHECK_PART_SUCCESS)
}

View File

@@ -0,0 +1,92 @@
use crate::Error;
pub fn to_file_error(io_err: std::io::Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::NotFound => Error::FileNotFound.into(),
std::io::ErrorKind::PermissionDenied => Error::FileAccessDenied.into(),
std::io::ErrorKind::IsADirectory => Error::IsNotRegular.into(),
std::io::ErrorKind::NotADirectory => Error::FileAccessDenied.into(),
std::io::ErrorKind::DirectoryNotEmpty => Error::FileAccessDenied.into(),
std::io::ErrorKind::UnexpectedEof => Error::FaultyDisk.into(),
std::io::ErrorKind::TooManyLinks => Error::TooManyOpenFiles.into(),
std::io::ErrorKind::InvalidInput => Error::FileNotFound.into(),
std::io::ErrorKind::InvalidData => Error::FileCorrupt.into(),
std::io::ErrorKind::StorageFull => Error::DiskFull.into(),
_ => io_err,
}
}
pub fn to_volume_error(io_err: std::io::Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::NotFound => Error::VolumeNotFound.into(),
std::io::ErrorKind::PermissionDenied => Error::DiskAccessDenied.into(),
std::io::ErrorKind::DirectoryNotEmpty => Error::VolumeNotEmpty.into(),
std::io::ErrorKind::NotADirectory => Error::IsNotRegular.into(),
std::io::ErrorKind::Other => {
let err = Error::from(io_err.to_string());
match err {
Error::FileNotFound => Error::VolumeNotFound.into(),
Error::FileAccessDenied => Error::DiskAccessDenied.into(),
_ => to_file_error(io_err),
}
}
_ => to_file_error(io_err),
}
}
pub fn to_disk_error(io_err: std::io::Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::NotFound => Error::DiskNotFound.into(),
std::io::ErrorKind::PermissionDenied => Error::DiskAccessDenied.into(),
std::io::ErrorKind::Other => {
let err = Error::from(io_err.to_string());
match err {
Error::FileNotFound => Error::DiskNotFound.into(),
Error::VolumeNotFound => Error::DiskNotFound.into(),
Error::FileAccessDenied => Error::DiskAccessDenied.into(),
Error::VolumeAccessDenied => Error::DiskAccessDenied.into(),
_ => to_volume_error(io_err),
}
}
_ => to_volume_error(io_err),
}
}
// only errors from FileSystem operations
pub fn to_access_error(io_err: std::io::Error, per_err: Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::PermissionDenied => per_err.into(),
std::io::ErrorKind::NotADirectory => per_err.into(),
std::io::ErrorKind::NotFound => Error::VolumeNotFound.into(),
std::io::ErrorKind::UnexpectedEof => Error::FaultyDisk.into(),
std::io::ErrorKind::Other => {
let err = Error::from(io_err.to_string());
match err {
Error::DiskAccessDenied => per_err.into(),
Error::FileAccessDenied => per_err.into(),
Error::FileNotFound => Error::VolumeNotFound.into(),
_ => to_volume_error(io_err),
}
}
_ => to_volume_error(io_err),
}
}
pub fn to_unformatted_disk_error(io_err: std::io::Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::NotFound => Error::UnformattedDisk.into(),
std::io::ErrorKind::PermissionDenied => Error::DiskAccessDenied.into(),
std::io::ErrorKind::Other => {
let err = Error::from(io_err.to_string());
match err {
Error::FileNotFound => Error::UnformattedDisk.into(),
Error::DiskNotFound => Error::UnformattedDisk.into(),
Error::VolumeNotFound => Error::UnformattedDisk.into(),
Error::FileAccessDenied => Error::DiskAccessDenied.into(),
Error::DiskAccessDenied => Error::DiskAccessDenied.into(),
_ => Error::CorruptedBackend.into(),
}
}
_ => Error::CorruptedBackend.into(),
}
}

586
crates/error/src/error.rs Normal file
View File

@@ -0,0 +1,586 @@
use std::hash::Hash;
use std::str::FromStr;
const ERROR_PREFIX: &str = "[RUSTFS error] ";
pub type Result<T> = core::result::Result<T, Error>;
#[derive(thiserror::Error, Default, Debug)]
pub enum Error {
#[default]
#[error("[RUSTFS error] Nil")]
Nil,
#[error("I/O error: {0}")]
IoError(std::io::Error),
#[error("[RUSTFS error] Erasure Read quorum not met")]
ErasureReadQuorum,
#[error("[RUSTFS error] Erasure Write quorum not met")]
ErasureWriteQuorum,
#[error("[RUSTFS error] Disk not found")]
DiskNotFound,
#[error("[RUSTFS error] Faulty disk")]
FaultyDisk,
#[error("[RUSTFS error] Faulty remote disk")]
FaultyRemoteDisk,
#[error("[RUSTFS error] Unsupported disk")]
UnsupportedDisk,
#[error("[RUSTFS error] Unformatted disk")]
UnformattedDisk,
#[error("[RUSTFS error] Corrupted backend")]
CorruptedBackend,
#[error("[RUSTFS error] Disk access denied")]
DiskAccessDenied,
#[error("[RUSTFS error] Disk ongoing request")]
DiskOngoingReq,
#[error("[RUSTFS error] Disk full")]
DiskFull,
#[error("[RUSTFS error] Volume not found")]
VolumeNotFound,
#[error("[RUSTFS error] Volume not empty")]
VolumeNotEmpty,
#[error("[RUSTFS error] Volume access denied")]
VolumeAccessDenied,
#[error("[RUSTFS error] Volume exists")]
VolumeExists,
#[error("[RUSTFS error] Disk not a directory")]
DiskNotDir,
#[error("[RUSTFS error] File not found")]
FileNotFound,
#[error("[RUSTFS error] File corrupt")]
FileCorrupt,
#[error("[RUSTFS error] File access denied")]
FileAccessDenied,
#[error("[RUSTFS error] Too many open files")]
TooManyOpenFiles,
#[error("[RUSTFS error] Is not a regular file")]
IsNotRegular,
#[error("[RUSTFS error] File version not found")]
FileVersionNotFound,
#[error("[RUSTFS error] Less data than expected")]
LessData,
#[error("[RUSTFS error] Short write")]
ShortWrite,
#[error("[RUSTFS error] Done for now")]
DoneForNow,
#[error("[RUSTFS error] Method not allowed")]
MethodNotAllowed,
#[error("[RUSTFS error] Inconsistent disk")]
InconsistentDisk,
#[error("[RUSTFS error] File name too long")]
FileNameTooLong,
#[error("[RUSTFS error] Scan ignore file contribution")]
ScanIgnoreFileContrib,
#[error("[RUSTFS error] Scan skip file")]
ScanSkipFile,
#[error("[RUSTFS error] Scan heal stop signaled")]
ScanHealStopSignal,
#[error("[RUSTFS error] Scan heal idle timeout")]
ScanHealIdleTimeout,
#[error("[RUSTFS error] Scan retry healing")]
ScanRetryHealing,
#[error("[RUSTFS error] {0}")]
Other(String),
}
// Generic From implementation removed to avoid conflicts with std::convert::From<T> for T
impl FromStr for Error {
type Err = Error;
fn from_str(s: &str) -> core::result::Result<Self, Self::Err> {
// Only strip prefix for non-IoError
let s = if s.starts_with("I/O error: ") {
s
} else {
s.strip_prefix(ERROR_PREFIX).unwrap_or(s)
};
match s {
"Nil" => Ok(Error::Nil),
"ErasureReadQuorum" => Ok(Error::ErasureReadQuorum),
"ErasureWriteQuorum" => Ok(Error::ErasureWriteQuorum),
"DiskNotFound" | "Disk not found" => Ok(Error::DiskNotFound),
"FaultyDisk" | "Faulty disk" => Ok(Error::FaultyDisk),
"FaultyRemoteDisk" | "Faulty remote disk" => Ok(Error::FaultyRemoteDisk),
"UnformattedDisk" | "Unformatted disk" => Ok(Error::UnformattedDisk),
"DiskAccessDenied" | "Disk access denied" => Ok(Error::DiskAccessDenied),
"DiskOngoingReq" | "Disk ongoing request" => Ok(Error::DiskOngoingReq),
"FileNotFound" | "File not found" => Ok(Error::FileNotFound),
"FileCorrupt" | "File corrupt" => Ok(Error::FileCorrupt),
"FileVersionNotFound" | "File version not found" => Ok(Error::FileVersionNotFound),
"LessData" | "Less data than expected" => Ok(Error::LessData),
"ShortWrite" | "Short write" => Ok(Error::ShortWrite),
"VolumeNotFound" | "Volume not found" => Ok(Error::VolumeNotFound),
"VolumeNotEmpty" | "Volume not empty" => Ok(Error::VolumeNotEmpty),
"VolumeExists" | "Volume exists" => Ok(Error::VolumeExists),
"VolumeAccessDenied" | "Volume access denied" => Ok(Error::VolumeAccessDenied),
"DiskNotDir" | "Disk not a directory" => Ok(Error::DiskNotDir),
"FileAccessDenied" | "File access denied" => Ok(Error::FileAccessDenied),
"TooManyOpenFiles" | "Too many open files" => Ok(Error::TooManyOpenFiles),
"IsNotRegular" | "Is not a regular file" => Ok(Error::IsNotRegular),
"CorruptedBackend" | "Corrupted backend" => Ok(Error::CorruptedBackend),
"UnsupportedDisk" | "Unsupported disk" => Ok(Error::UnsupportedDisk),
"InconsistentDisk" | "Inconsistent disk" => Ok(Error::InconsistentDisk),
"DiskFull" | "Disk full" => Ok(Error::DiskFull),
"FileNameTooLong" | "File name too long" => Ok(Error::FileNameTooLong),
"ScanIgnoreFileContrib" | "Scan ignore file contribution" => Ok(Error::ScanIgnoreFileContrib),
"ScanSkipFile" | "Scan skip file" => Ok(Error::ScanSkipFile),
"ScanHealStopSignal" | "Scan heal stop signaled" => Ok(Error::ScanHealStopSignal),
"ScanHealIdleTimeout" | "Scan heal idle timeout" => Ok(Error::ScanHealIdleTimeout),
"ScanRetryHealing" | "Scan retry healing" => Ok(Error::ScanRetryHealing),
s if s.starts_with("I/O error: ") => {
Ok(Error::IoError(std::io::Error::other(s.strip_prefix("I/O error: ").unwrap_or(""))))
}
"DoneForNow" | "Done for now" => Ok(Error::DoneForNow),
"MethodNotAllowed" | "Method not allowed" => Ok(Error::MethodNotAllowed),
str => Err(Error::IoError(std::io::Error::other(str.to_string()))),
}
}
}
impl From<Error> for std::io::Error {
fn from(err: Error) -> Self {
match err {
Error::IoError(e) => e,
e => std::io::Error::other(e),
}
}
}
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Self {
match e.kind() {
// convert Error from string to Error
std::io::ErrorKind::Other => Error::from(e.to_string()),
_ => Error::IoError(e),
}
}
}
impl From<String> for Error {
fn from(s: String) -> Self {
Error::from_str(&s).unwrap_or(Error::IoError(std::io::Error::other(s)))
}
}
impl From<&str> for Error {
fn from(s: &str) -> Self {
Error::from_str(s).unwrap_or(Error::IoError(std::io::Error::other(s)))
}
}
// Common error type conversions for ? operator
impl From<std::num::ParseIntError> for Error {
fn from(e: std::num::ParseIntError) -> Self {
Error::Other(format!("Parse int error: {}", e))
}
}
impl From<std::num::ParseFloatError> for Error {
fn from(e: std::num::ParseFloatError) -> Self {
Error::Other(format!("Parse float error: {}", e))
}
}
impl From<std::str::Utf8Error> for Error {
fn from(e: std::str::Utf8Error) -> Self {
Error::Other(format!("UTF-8 error: {}", e))
}
}
impl From<std::string::FromUtf8Error> for Error {
fn from(e: std::string::FromUtf8Error) -> Self {
Error::Other(format!("UTF-8 conversion error: {}", e))
}
}
impl From<std::fmt::Error> for Error {
fn from(e: std::fmt::Error) -> Self {
Error::Other(format!("Format error: {}", e))
}
}
impl From<Box<dyn std::error::Error + Send + Sync>> for Error {
fn from(e: Box<dyn std::error::Error + Send + Sync>) -> Self {
Error::Other(e.to_string())
}
}
impl From<time::error::ComponentRange> for Error {
fn from(e: time::error::ComponentRange) -> Self {
Error::Other(format!("Time component range error: {}", e))
}
}
impl From<rmp::decode::NumValueReadError<std::io::Error>> for Error {
fn from(e: rmp::decode::NumValueReadError<std::io::Error>) -> Self {
Error::Other(format!("NumValueReadError: {}", e))
}
}
impl From<rmp::encode::ValueWriteError> for Error {
fn from(e: rmp::encode::ValueWriteError) -> Self {
Error::Other(format!("ValueWriteError: {}", e))
}
}
impl From<rmp::decode::ValueReadError> for Error {
fn from(e: rmp::decode::ValueReadError) -> Self {
Error::Other(format!("ValueReadError: {}", e))
}
}
impl From<uuid::Error> for Error {
fn from(e: uuid::Error) -> Self {
Error::Other(format!("UUID error: {}", e))
}
}
impl From<rmp_serde::decode::Error> for Error {
fn from(e: rmp_serde::decode::Error) -> Self {
Error::Other(format!("rmp_serde::decode::Error: {}", e))
}
}
impl From<rmp_serde::encode::Error> for Error {
fn from(e: rmp_serde::encode::Error) -> Self {
Error::Other(format!("rmp_serde::encode::Error: {}", e))
}
}
impl From<serde::de::value::Error> for Error {
fn from(e: serde::de::value::Error) -> Self {
Error::Other(format!("serde::de::value::Error: {}", e))
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Self {
Error::Other(format!("serde_json::Error: {}", e))
}
}
impl From<std::collections::TryReserveError> for Error {
fn from(e: std::collections::TryReserveError) -> Self {
Error::Other(format!("TryReserveError: {}", e))
}
}
impl From<tonic::Status> for Error {
fn from(e: tonic::Status) -> Self {
Error::Other(format!("tonic::Status: {}", e.message()))
}
}
impl From<protos::proto_gen::node_service::Error> for Error {
fn from(e: protos::proto_gen::node_service::Error) -> Self {
Error::from_str(&e.error_info).unwrap_or(Error::Other(format!("Proto_Error: {}", e.error_info)))
}
}
impl From<Error> for protos::proto_gen::node_service::Error {
fn from(val: Error) -> Self {
protos::proto_gen::node_service::Error {
code: 0,
error_info: val.to_string(),
}
}
}
impl Hash for Error {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Error::IoError(err) => {
err.kind().hash(state);
err.to_string().hash(state);
}
e => e.to_string().hash(state),
}
}
}
impl Clone for Error {
fn clone(&self) -> Self {
match self {
Error::IoError(err) => Error::IoError(std::io::Error::new(err.kind(), err.to_string())),
Error::ErasureReadQuorum => Error::ErasureReadQuorum,
Error::ErasureWriteQuorum => Error::ErasureWriteQuorum,
Error::DiskNotFound => Error::DiskNotFound,
Error::FaultyDisk => Error::FaultyDisk,
Error::FaultyRemoteDisk => Error::FaultyRemoteDisk,
Error::UnformattedDisk => Error::UnformattedDisk,
Error::DiskAccessDenied => Error::DiskAccessDenied,
Error::DiskOngoingReq => Error::DiskOngoingReq,
Error::FileNotFound => Error::FileNotFound,
Error::FileCorrupt => Error::FileCorrupt,
Error::FileVersionNotFound => Error::FileVersionNotFound,
Error::LessData => Error::LessData,
Error::ShortWrite => Error::ShortWrite,
Error::VolumeNotFound => Error::VolumeNotFound,
Error::VolumeNotEmpty => Error::VolumeNotEmpty,
Error::VolumeAccessDenied => Error::VolumeAccessDenied,
Error::VolumeExists => Error::VolumeExists,
Error::DiskNotDir => Error::DiskNotDir,
Error::FileAccessDenied => Error::FileAccessDenied,
Error::TooManyOpenFiles => Error::TooManyOpenFiles,
Error::IsNotRegular => Error::IsNotRegular,
Error::CorruptedBackend => Error::CorruptedBackend,
Error::UnsupportedDisk => Error::UnsupportedDisk,
Error::DiskFull => Error::DiskFull,
Error::Nil => Error::Nil,
Error::DoneForNow => Error::DoneForNow,
Error::MethodNotAllowed => Error::MethodNotAllowed,
Error::InconsistentDisk => Error::InconsistentDisk,
Error::FileNameTooLong => Error::FileNameTooLong,
Error::ScanIgnoreFileContrib => Error::ScanIgnoreFileContrib,
Error::ScanSkipFile => Error::ScanSkipFile,
Error::ScanHealStopSignal => Error::ScanHealStopSignal,
Error::ScanHealIdleTimeout => Error::ScanHealIdleTimeout,
Error::ScanRetryHealing => Error::ScanRetryHealing,
Error::Other(msg) => Error::Other(msg.clone()),
}
}
}
impl PartialEq for Error {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Error::IoError(e1), Error::IoError(e2)) => e1.kind() == e2.kind() && e1.to_string() == e2.to_string(),
(Error::ErasureReadQuorum, Error::ErasureReadQuorum) => true,
(Error::ErasureWriteQuorum, Error::ErasureWriteQuorum) => true,
(Error::DiskNotFound, Error::DiskNotFound) => true,
(Error::FaultyDisk, Error::FaultyDisk) => true,
(Error::FaultyRemoteDisk, Error::FaultyRemoteDisk) => true,
(Error::UnformattedDisk, Error::UnformattedDisk) => true,
(Error::DiskAccessDenied, Error::DiskAccessDenied) => true,
(Error::DiskOngoingReq, Error::DiskOngoingReq) => true,
(Error::FileNotFound, Error::FileNotFound) => true,
(Error::FileCorrupt, Error::FileCorrupt) => true,
(Error::FileVersionNotFound, Error::FileVersionNotFound) => true,
(Error::LessData, Error::LessData) => true,
(Error::ShortWrite, Error::ShortWrite) => true,
(Error::VolumeNotFound, Error::VolumeNotFound) => true,
(Error::VolumeNotEmpty, Error::VolumeNotEmpty) => true,
(Error::VolumeAccessDenied, Error::VolumeAccessDenied) => true,
(Error::VolumeExists, Error::VolumeExists) => true,
(Error::DiskNotDir, Error::DiskNotDir) => true,
(Error::FileAccessDenied, Error::FileAccessDenied) => true,
(Error::TooManyOpenFiles, Error::TooManyOpenFiles) => true,
(Error::IsNotRegular, Error::IsNotRegular) => true,
(Error::CorruptedBackend, Error::CorruptedBackend) => true,
(Error::UnsupportedDisk, Error::UnsupportedDisk) => true,
(Error::DiskFull, Error::DiskFull) => true,
(Error::Nil, Error::Nil) => true,
(Error::DoneForNow, Error::DoneForNow) => true,
(Error::MethodNotAllowed, Error::MethodNotAllowed) => true,
(Error::InconsistentDisk, Error::InconsistentDisk) => true,
(Error::FileNameTooLong, Error::FileNameTooLong) => true,
(Error::ScanIgnoreFileContrib, Error::ScanIgnoreFileContrib) => true,
(Error::ScanSkipFile, Error::ScanSkipFile) => true,
(Error::ScanHealStopSignal, Error::ScanHealStopSignal) => true,
(Error::ScanHealIdleTimeout, Error::ScanHealIdleTimeout) => true,
(Error::ScanRetryHealing, Error::ScanRetryHealing) => true,
(Error::Other(s1), Error::Other(s2)) => s1 == s2,
_ => false,
}
}
}
impl Eq for Error {}
impl Error {
/// Create an error from a message string (for backward compatibility)
pub fn msg<S: Into<String>>(message: S) -> Self {
Error::Other(message.into())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::io;
#[test]
fn test_display_and_debug() {
let e = Error::DiskNotFound;
assert_eq!(format!("{}", e), format!("{ERROR_PREFIX}Disk not found"));
assert_eq!(format!("{:?}", e), "DiskNotFound");
let io_err = Error::IoError(io::Error::other("fail"));
assert_eq!(format!("{}", io_err), "I/O error: fail");
}
#[test]
fn test_partial_eq_and_eq() {
assert_eq!(Error::DiskNotFound, Error::DiskNotFound);
assert_ne!(Error::DiskNotFound, Error::FaultyDisk);
let e1 = Error::IoError(io::Error::other("fail"));
let e2 = Error::IoError(io::Error::other("fail"));
assert_eq!(e1, e2);
let e3 = Error::IoError(io::Error::new(io::ErrorKind::NotFound, "fail"));
assert_ne!(e1, e3);
}
#[test]
fn test_clone() {
let e = Error::DiskAccessDenied;
let cloned = e.clone();
assert_eq!(e, cloned);
let io_err = Error::IoError(io::Error::other("fail"));
let cloned_io = io_err.clone();
assert_eq!(io_err, cloned_io);
}
#[test]
fn test_hash() {
let e1 = Error::DiskNotFound;
let e2 = Error::DiskNotFound;
let mut h1 = DefaultHasher::new();
let mut h2 = DefaultHasher::new();
e1.hash(&mut h1);
e2.hash(&mut h2);
assert_eq!(h1.finish(), h2.finish());
let io_err1 = Error::IoError(io::Error::other("fail"));
let io_err2 = Error::IoError(io::Error::other("fail"));
let mut h3 = DefaultHasher::new();
let mut h4 = DefaultHasher::new();
io_err1.hash(&mut h3);
io_err2.hash(&mut h4);
assert_eq!(h3.finish(), h4.finish());
}
#[test]
fn test_from_error_for_io_error() {
let e = Error::DiskNotFound;
let io_err: io::Error = e.into();
assert_eq!(io_err.kind(), io::ErrorKind::Other);
assert_eq!(io_err.to_string(), format!("{ERROR_PREFIX}Disk not found"));
assert_eq!(Error::from(io_err.to_string()), Error::DiskNotFound);
let orig = io::Error::other("fail");
let e2 = Error::IoError(orig.kind().into());
let io_err2: io::Error = e2.into();
assert_eq!(io_err2.kind(), io::ErrorKind::Other);
}
#[test]
fn test_from_io_error_for_error() {
let orig = io::Error::other("fail");
let e: Error = orig.into();
match e {
Error::IoError(ioe) => {
assert_eq!(ioe.kind(), io::ErrorKind::Other);
assert_eq!(ioe.to_string(), "fail");
}
_ => panic!("Expected IoError variant"),
}
}
#[test]
fn test_default() {
let e = Error::default();
assert_eq!(e, Error::Nil);
}
#[test]
fn test_from_str() {
use std::str::FromStr;
assert_eq!(Error::from_str("Nil"), Ok(Error::Nil));
assert_eq!(Error::from_str("DiskNotFound"), Ok(Error::DiskNotFound));
assert_eq!(Error::from_str("ErasureReadQuorum"), Ok(Error::ErasureReadQuorum));
assert_eq!(Error::from_str("I/O error: fail"), Ok(Error::IoError(io::Error::other("fail"))));
assert_eq!(Error::from_str(&format!("{ERROR_PREFIX}Disk not found")), Ok(Error::DiskNotFound));
assert_eq!(
Error::from_str("UnknownError"),
Err(Error::IoError(std::io::Error::other("UnknownError")))
);
}
#[test]
fn test_from_string() {
let e: Error = format!("{ERROR_PREFIX}Disk not found").parse().unwrap();
assert_eq!(e, Error::DiskNotFound);
let e2: Error = "I/O error: fail".to_string().parse().unwrap();
assert_eq!(e2, Error::IoError(std::io::Error::other("fail")));
}
#[test]
fn test_from_io_error() {
let e = Error::IoError(io::Error::other("fail"));
let io_err: io::Error = e.clone().into();
assert_eq!(io_err.to_string(), "fail");
let e2: Error = io::Error::other("fail").into();
assert_eq!(e2, Error::IoError(io::Error::other("fail")));
let result = Error::from(io::Error::other("fail"));
assert_eq!(result, Error::IoError(io::Error::other("fail")));
let io_err2: std::io::Error = Error::CorruptedBackend.into();
assert_eq!(io_err2.to_string(), "[RUSTFS error] Corrupted backend");
assert_eq!(Error::from(io_err2), Error::CorruptedBackend);
let io_err3: std::io::Error = Error::DiskNotFound.into();
assert_eq!(io_err3.to_string(), "[RUSTFS error] Disk not found");
assert_eq!(Error::from(io_err3), Error::DiskNotFound);
let io_err4: std::io::Error = Error::DiskAccessDenied.into();
assert_eq!(io_err4.to_string(), "[RUSTFS error] Disk access denied");
assert_eq!(Error::from(io_err4), Error::DiskAccessDenied);
}
#[test]
fn test_question_mark_operator() {
fn parse_number(s: &str) -> Result<i32> {
let num = s.parse::<i32>()?; // ParseIntError automatically converts to Error
Ok(num)
}
fn format_string() -> Result<String> {
use std::fmt::Write;
let mut s = String::new();
write!(&mut s, "test")?; // fmt::Error automatically converts to Error
Ok(s)
}
fn utf8_conversion() -> Result<String> {
let bytes = vec![0xFF, 0xFE]; // Invalid UTF-8
let s = String::from_utf8(bytes)?; // FromUtf8Error automatically converts to Error
Ok(s)
}
// Test successful case
assert_eq!(parse_number("42").unwrap(), 42);
// Test error conversion
let err = parse_number("not_a_number").unwrap_err();
assert!(matches!(err, Error::Other(_)));
assert!(err.to_string().contains("Parse int error"));
// Test format error conversion
assert_eq!(format_string().unwrap(), "test");
// Test UTF-8 error conversion
let err = utf8_conversion().unwrap_err();
assert!(matches!(err, Error::Other(_)));
assert!(err.to_string().contains("UTF-8 conversion error"));
}
}

View File

@@ -0,0 +1,11 @@
use crate::Error;
pub static OBJECT_OP_IGNORED_ERRS: &[Error] = &[
Error::DiskNotFound,
Error::FaultyDisk,
Error::FaultyRemoteDisk,
Error::DiskAccessDenied,
Error::DiskOngoingReq,
Error::UnformattedDisk,
];
pub static BASE_IGNORED_ERRS: &[Error] = &[Error::DiskNotFound, Error::FaultyDisk, Error::FaultyRemoteDisk];

14
crates/error/src/lib.rs Normal file
View File

@@ -0,0 +1,14 @@
mod error;
pub use error::*;
mod reduce;
pub use reduce::*;
mod ignored;
pub use ignored::*;
mod convert;
pub use convert::*;
mod bitrot;
pub use bitrot::*;

138
crates/error/src/reduce.rs Normal file
View File

@@ -0,0 +1,138 @@
use crate::error::Error;
pub fn reduce_write_quorum_errs(errors: &[Option<Error>], ignored_errs: &[Error], quorun: usize) -> Option<Error> {
reduce_quorum_errs(errors, ignored_errs, quorun, Error::ErasureWriteQuorum)
}
pub fn reduce_read_quorum_errs(errors: &[Option<Error>], ignored_errs: &[Error], quorun: usize) -> Option<Error> {
reduce_quorum_errs(errors, ignored_errs, quorun, Error::ErasureReadQuorum)
}
pub fn reduce_quorum_errs(errors: &[Option<Error>], ignored_errs: &[Error], quorun: usize, quorun_err: Error) -> Option<Error> {
let (max_count, err) = reduce_errs(errors, ignored_errs);
if max_count >= quorun {
err
} else {
Some(quorun_err)
}
}
pub fn reduce_errs(errors: &[Option<Error>], ignored_errs: &[Error]) -> (usize, Option<Error>) {
let err_counts =
errors
.iter()
.map(|e| e.as_ref().unwrap_or(&Error::Nil))
.fold(std::collections::HashMap::new(), |mut acc, e| {
if is_ignored_err(ignored_errs, e) {
return acc;
}
*acc.entry(e.clone()).or_insert(0) += 1;
acc
});
let (err, max_count) = err_counts
.into_iter()
.max_by(|(e1, c1), (e2, c2)| {
// Prefer Error::Nil if present in a tie
let count_cmp = c1.cmp(c2);
if count_cmp == std::cmp::Ordering::Equal {
match (e1, e2) {
(Error::Nil, _) => std::cmp::Ordering::Greater,
(_, Error::Nil) => std::cmp::Ordering::Less,
_ => format!("{e1:?}").cmp(&format!("{e2:?}")),
}
} else {
count_cmp
}
})
.unwrap_or((Error::Nil, 0));
(max_count, if err == Error::Nil { None } else { Some(err) })
}
pub fn is_ignored_err(ignored_errs: &[Error], err: &Error) -> bool {
ignored_errs.iter().any(|e| e == err)
}
pub fn count_errs(errors: &[Option<Error>], err: Error) -> usize {
errors
.iter()
.map(|e| if e.is_none() { &Error::Nil } else { e.as_ref().unwrap() })
.filter(|&e| e == &err)
.count()
}
#[cfg(test)]
mod tests {
use super::*;
fn err_io(msg: &str) -> Error {
Error::IoError(std::io::Error::other(msg))
}
#[test]
fn test_reduce_errs_basic() {
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e1.clone()), Some(e2.clone()), None];
let ignored = vec![];
let (count, err) = reduce_errs(&errors, &ignored);
assert_eq!(count, 2);
assert_eq!(err, Some(e1));
}
#[test]
fn test_reduce_errs_ignored() {
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e2.clone()), Some(e1.clone()), Some(e2.clone()), None];
let ignored = vec![e2.clone()];
let (count, err) = reduce_errs(&errors, &ignored);
assert_eq!(count, 2);
assert_eq!(err, Some(e1));
}
#[test]
fn test_reduce_quorum_errs() {
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e1.clone()), Some(e2.clone()), None];
let ignored = vec![];
// quorum = 2, should return e1
let res = reduce_quorum_errs(&errors, &ignored, 2, Error::ErasureReadQuorum);
assert_eq!(res, Some(e1));
// quorum = 3, should return quorum error
let res = reduce_quorum_errs(&errors, &ignored, 3, Error::ErasureReadQuorum);
assert_eq!(res, Some(Error::ErasureReadQuorum));
}
#[test]
fn test_count_errs() {
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e2.clone()), Some(e1.clone()), None];
assert_eq!(count_errs(&errors, e1.clone()), 2);
assert_eq!(count_errs(&errors, e2.clone()), 1);
}
#[test]
fn test_is_ignored_err() {
let e1 = err_io("a");
let e2 = err_io("b");
let ignored = vec![e1.clone()];
assert!(is_ignored_err(&ignored, &e1));
assert!(!is_ignored_err(&ignored, &e2));
}
#[test]
fn test_reduce_errs_nil_tiebreak() {
// Error::Nil and another error have the same count, should prefer Nil
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e2.clone()), None, Some(e1.clone()), None]; // e1:1, Nil:1
let ignored = vec![];
let (count, err) = reduce_errs(&errors, &ignored);
assert_eq!(count, 2);
assert_eq!(err, None); // None means Error::Nil is preferred
}
}

View File

@@ -1,12 +1,15 @@
pub type Result<T> = core::result::Result<T, Error>;
#[derive(thiserror::Error, Debug, Clone)]
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("File not found")]
FileNotFound,
#[error("File version not found")]
FileVersionNotFound,
#[error("Volume not found")]
VolumeNotFound,
#[error("File corrupt")]
FileCorrupt,
@@ -16,8 +19,11 @@ pub enum Error {
#[error("Method not allowed")]
MethodNotAllowed,
#[error("Unexpected error")]
Unexpected,
#[error("I/O error: {0}")]
Io(String),
Io(std::io::Error),
#[error("rmp serde decode error: {0}")]
RmpSerdeDecode(String),
@@ -64,7 +70,8 @@ impl PartialEq for Error {
(Error::MethodNotAllowed, Error::MethodNotAllowed) => true,
(Error::FileNotFound, Error::FileNotFound) => true,
(Error::FileVersionNotFound, Error::FileVersionNotFound) => true,
(Error::Io(e1), Error::Io(e2)) => e1 == e2,
(Error::VolumeNotFound, Error::VolumeNotFound) => true,
(Error::Io(e1), Error::Io(e2)) => e1.kind() == e2.kind() && e1.to_string() == e2.to_string(),
(Error::RmpSerdeDecode(e1), Error::RmpSerdeDecode(e2)) => e1 == e2,
(Error::RmpSerdeEncode(e1), Error::RmpSerdeEncode(e2)) => e1 == e2,
(Error::RmpDecodeValueRead(e1), Error::RmpDecodeValueRead(e2)) => e1 == e2,
@@ -72,14 +79,39 @@ impl PartialEq for Error {
(Error::RmpDecodeNumValueRead(e1), Error::RmpDecodeNumValueRead(e2)) => e1 == e2,
(Error::TimeComponentRange(e1), Error::TimeComponentRange(e2)) => e1 == e2,
(Error::UuidParse(e1), Error::UuidParse(e2)) => e1 == e2,
(Error::Unexpected, Error::Unexpected) => true,
(a, b) => a.to_string() == b.to_string(),
}
}
}
impl Clone for Error {
fn clone(&self) -> Self {
match self {
Error::FileNotFound => Error::FileNotFound,
Error::FileVersionNotFound => Error::FileVersionNotFound,
Error::FileCorrupt => Error::FileCorrupt,
Error::DoneForNow => Error::DoneForNow,
Error::MethodNotAllowed => Error::MethodNotAllowed,
Error::VolumeNotFound => Error::VolumeNotFound,
Error::Io(e) => Error::Io(std::io::Error::new(e.kind(), e.to_string())),
Error::RmpSerdeDecode(s) => Error::RmpSerdeDecode(s.clone()),
Error::RmpSerdeEncode(s) => Error::RmpSerdeEncode(s.clone()),
Error::FromUtf8(s) => Error::FromUtf8(s.clone()),
Error::RmpDecodeValueRead(s) => Error::RmpDecodeValueRead(s.clone()),
Error::RmpEncodeValueWrite(s) => Error::RmpEncodeValueWrite(s.clone()),
Error::RmpDecodeNumValueRead(s) => Error::RmpDecodeNumValueRead(s.clone()),
Error::RmpDecodeMarkerRead(s) => Error::RmpDecodeMarkerRead(s.clone()),
Error::TimeComponentRange(s) => Error::TimeComponentRange(s.clone()),
Error::UuidParse(s) => Error::UuidParse(s.clone()),
Error::Unexpected => Error::Unexpected,
}
}
}
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Self {
Error::Io(e.to_string())
Error::Io(e)
}
}

View File

@@ -7,6 +7,7 @@ mod metacache;
pub mod test_data;
pub use error::*;
pub use fileinfo::*;
pub use filemeta::*;
pub use filemeta_inline::*;

View File

@@ -732,7 +732,7 @@ impl<R: AsyncRead + Unpin> MetacacheReader<R> {
}
}
pub type UpdateFn<T> = Box<dyn Fn() -> Pin<Box<dyn Future<Output = Result<T>> + Send>> + Send + Sync + 'static>;
pub type UpdateFn<T> = Box<dyn Fn() -> Pin<Box<dyn Future<Output = std::io::Result<T>> + Send>> + Send + Sync + 'static>;
#[derive(Clone, Debug, Default)]
pub struct Opts {
@@ -763,7 +763,7 @@ impl<T: Clone + Debug + Send + 'static> Cache<T> {
}
#[allow(unsafe_code)]
pub async fn get(self: Arc<Self>) -> Result<T> {
pub async fn get(self: Arc<Self>) -> std::io::Result<T> {
let v_ptr = self.val.load(AtomicOrdering::SeqCst);
let v = if v_ptr.is_null() {
None
@@ -816,7 +816,7 @@ impl<T: Clone + Debug + Send + 'static> Cache<T> {
}
}
async fn update(&self) -> Result<()> {
async fn update(&self) -> std::io::Result<()> {
match (self.update_fn)().await {
Ok(val) => {
self.val.store(Box::into_raw(Box::new(val)), AtomicOrdering::SeqCst);

View File

@@ -181,7 +181,7 @@ pub async fn bitrot_verify<R: AsyncRead + Unpin + Send>(
let mut left = want_size;
if left != bitrot_shard_file_size(part_size, shard_size, algo.clone()) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "bitrot shard file size mismatch"));
return Err(std::io::Error::other("bitrot shard file size mismatch"));
}
while left > 0 {
@@ -197,7 +197,7 @@ pub async fn bitrot_verify<R: AsyncRead + Unpin + Send>(
let actual_hash = algo.hash_encode(&buf);
if actual_hash != hash_buf[0..n] {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "bitrot hash mismatch"));
return Err(std::io::Error::other("bitrot hash mismatch"));
}
left -= read;

View File

@@ -101,3 +101,11 @@ impl<R> Reader for crate::DecryptReader<R> where R: Reader {}
impl<R> Reader for crate::CompressReader<R> where R: Reader {}
impl<R> Reader for crate::DecompressReader<R> where R: Reader {}
impl Reader for tokio::fs::File {}
impl HashReaderDetector for tokio::fs::File {}
impl EtagResolvable for tokio::fs::File {}
impl Reader for tokio::io::DuplexStream {}
impl HashReaderDetector for tokio::io::DuplexStream {}
impl EtagResolvable for tokio::io::DuplexStream {}

View File

@@ -7,16 +7,31 @@ rust-version.workspace = true
version.workspace = true
[dependencies]
blake3 = { version = "1.8.2", optional = true }
highway = { workspace = true, optional = true }
lazy_static= { workspace = true , optional = true}
local-ip-address = { workspace = true, optional = true }
md-5 = { workspace = true, optional = true }
netif= { workspace = true , optional = true}
nix = { workspace = true, optional = true }
rustfs-config = { workspace = true }
rustls = { workspace = true, optional = true }
rustls-pemfile = { workspace = true, optional = true }
rustls-pki-types = { workspace = true, optional = true }
serde = { workspace = true, optional = true }
sha2 = { workspace = true, optional = true }
tempfile = { workspace = true, optional = true }
tokio = { workspace = true, optional = true, features = ["io-util", "macros"] }
tracing = { workspace = true }
url = { workspace = true , optional = true}
[dev-dependencies]
tempfile = { workspace = true }
[target.'cfg(windows)'.dependencies]
winapi = { workspace = true, optional = true, features = ["std", "fileapi", "minwindef", "ntdef", "winnt"] }
[lints]
workspace = true
@@ -24,6 +39,10 @@ workspace = true
default = ["ip"] # features that are enabled by default
ip = ["dep:local-ip-address"] # ip characteristics and their dependencies
tls = ["dep:rustls", "dep:rustls-pemfile", "dep:rustls-pki-types"] # tls characteristics and their dependencies
net = ["ip"] # empty network features
net = ["ip","dep:url", "dep:netif", "dep:lazy_static"] # empty network features
io = ["dep:tokio"]
path = []
hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde"]
os = ["dep:nix", "dep:tempfile", "winapi"] # operating system utilities
integration = [] # integration test features
full = ["ip", "tls", "net", "integration"] # all features
full = ["ip", "tls", "net", "io","hash", "os", "integration","path"] # all features

143
crates/utils/src/hash.rs Normal file
View File

@@ -0,0 +1,143 @@
use highway::{HighwayHash, HighwayHasher, Key};
use md5::{Digest, Md5};
use serde::{Deserialize, Serialize};
use sha2::Sha256;
/// The fixed key for HighwayHash256. DO NOT change for compatibility.
const HIGHWAY_HASH256_KEY: [u64; 4] = [3, 4, 2, 1];
#[derive(Serialize, Deserialize, Debug, PartialEq, Default, Clone, Eq, Hash)]
/// Supported hash algorithms for bitrot protection.
pub enum HashAlgorithm {
// SHA256 represents the SHA-256 hash function
SHA256,
// HighwayHash256 represents the HighwayHash-256 hash function
HighwayHash256,
// HighwayHash256S represents the Streaming HighwayHash-256 hash function
#[default]
HighwayHash256S,
// BLAKE2b512 represents the BLAKE2b-512 hash function
BLAKE2b512,
/// MD5 (128-bit)
Md5,
/// No hash (for testing or unprotected data)
None,
}
impl HashAlgorithm {
/// Hash the input data and return the hash result as Vec<u8>.
pub fn hash_encode(&self, data: &[u8]) -> Vec<u8> {
match self {
HashAlgorithm::Md5 => Md5::digest(data).to_vec(),
HashAlgorithm::HighwayHash256 => {
let mut hasher = HighwayHasher::new(Key(HIGHWAY_HASH256_KEY));
hasher.append(data);
hasher.finalize256().iter().flat_map(|&n| n.to_le_bytes()).collect()
}
HashAlgorithm::SHA256 => Sha256::digest(data).to_vec(),
HashAlgorithm::HighwayHash256S => {
let mut hasher = HighwayHasher::new(Key(HIGHWAY_HASH256_KEY));
hasher.append(data);
hasher.finalize256().iter().flat_map(|&n| n.to_le_bytes()).collect()
}
HashAlgorithm::BLAKE2b512 => blake3::hash(data).as_bytes().to_vec(),
HashAlgorithm::None => Vec::new(),
}
}
/// Return the output size in bytes for the hash algorithm.
pub fn size(&self) -> usize {
match self {
HashAlgorithm::SHA256 => 32,
HashAlgorithm::HighwayHash256 => 32,
HashAlgorithm::HighwayHash256S => 32,
HashAlgorithm::BLAKE2b512 => 32, // blake3 outputs 32 bytes by default
HashAlgorithm::Md5 => 16,
HashAlgorithm::None => 0,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_hash_algorithm_sizes() {
assert_eq!(HashAlgorithm::Md5.size(), 16);
assert_eq!(HashAlgorithm::HighwayHash256.size(), 32);
assert_eq!(HashAlgorithm::HighwayHash256S.size(), 32);
assert_eq!(HashAlgorithm::SHA256.size(), 32);
assert_eq!(HashAlgorithm::BLAKE2b512.size(), 32);
assert_eq!(HashAlgorithm::None.size(), 0);
}
#[test]
fn test_hash_encode_none() {
let data = b"test data";
let hash = HashAlgorithm::None.hash_encode(data);
assert_eq!(hash.len(), 0);
}
#[test]
fn test_hash_encode_md5() {
let data = b"test data";
let hash = HashAlgorithm::Md5.hash_encode(data);
assert_eq!(hash.len(), 16);
// MD5 should be deterministic
let hash2 = HashAlgorithm::Md5.hash_encode(data);
assert_eq!(hash, hash2);
}
#[test]
fn test_hash_encode_highway() {
let data = b"test data";
let hash = HashAlgorithm::HighwayHash256.hash_encode(data);
assert_eq!(hash.len(), 32);
// HighwayHash should be deterministic
let hash2 = HashAlgorithm::HighwayHash256.hash_encode(data);
assert_eq!(hash, hash2);
}
#[test]
fn test_hash_encode_sha256() {
let data = b"test data";
let hash = HashAlgorithm::SHA256.hash_encode(data);
assert_eq!(hash.len(), 32);
// SHA256 should be deterministic
let hash2 = HashAlgorithm::SHA256.hash_encode(data);
assert_eq!(hash, hash2);
}
#[test]
fn test_hash_encode_blake2b512() {
let data = b"test data";
let hash = HashAlgorithm::BLAKE2b512.hash_encode(data);
assert_eq!(hash.len(), 32); // blake3 outputs 32 bytes by default
// BLAKE2b512 should be deterministic
let hash2 = HashAlgorithm::BLAKE2b512.hash_encode(data);
assert_eq!(hash, hash2);
}
#[test]
fn test_different_data_different_hashes() {
let data1 = b"test data 1";
let data2 = b"test data 2";
let md5_hash1 = HashAlgorithm::Md5.hash_encode(data1);
let md5_hash2 = HashAlgorithm::Md5.hash_encode(data2);
assert_ne!(md5_hash1, md5_hash2);
let highway_hash1 = HashAlgorithm::HighwayHash256.hash_encode(data1);
let highway_hash2 = HashAlgorithm::HighwayHash256.hash_encode(data2);
assert_ne!(highway_hash1, highway_hash2);
let sha256_hash1 = HashAlgorithm::SHA256.hash_encode(data1);
let sha256_hash2 = HashAlgorithm::SHA256.hash_encode(data2);
assert_ne!(sha256_hash1, sha256_hash2);
let blake_hash1 = HashAlgorithm::BLAKE2b512.hash_encode(data1);
let blake_hash2 = HashAlgorithm::BLAKE2b512.hash_encode(data2);
assert_ne!(blake_hash1, blake_hash2);
}
}

231
crates/utils/src/io.rs Normal file
View File

@@ -0,0 +1,231 @@
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
/// Write all bytes from buf to writer, returning the total number of bytes written.
pub async fn write_all<W: AsyncWrite + Send + Sync + Unpin>(writer: &mut W, buf: &[u8]) -> std::io::Result<usize> {
let mut total = 0;
while total < buf.len() {
match writer.write(&buf[total..]).await {
Ok(0) => {
break;
}
Ok(n) => total += n,
Err(e) => return Err(e),
}
}
Ok(total)
}
/// Read exactly buf.len() bytes into buf, or return an error if EOF is reached before.
/// Like Go's io.ReadFull.
#[allow(dead_code)]
pub async fn read_full<R: AsyncRead + Send + Sync + Unpin>(mut reader: R, mut buf: &mut [u8]) -> std::io::Result<usize> {
let mut total = 0;
while !buf.is_empty() {
let n = match reader.read(buf).await {
Ok(n) => n,
Err(e) => {
if total == 0 {
return Err(e);
}
return Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
format!("read {} bytes, error: {}", total, e),
));
}
};
if n == 0 {
if total > 0 {
return Ok(total);
}
return Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "early EOF"));
}
buf = &mut buf[n..];
total += n;
}
Ok(total)
}
/// Encodes a u64 into buf and returns the number of bytes written.
/// Panics if buf is too small.
pub fn put_uvarint(buf: &mut [u8], x: u64) -> usize {
let mut i = 0;
let mut x = x;
while x >= 0x80 {
buf[i] = (x as u8) | 0x80;
x >>= 7;
i += 1;
}
buf[i] = x as u8;
i + 1
}
pub fn put_uvarint_len(x: u64) -> usize {
let mut i = 0;
let mut x = x;
while x >= 0x80 {
x >>= 7;
i += 1;
}
i + 1
}
/// Decodes a u64 from buf and returns (value, number of bytes read).
/// If buf is too small, returns (0, 0).
/// If overflow, returns (0, -(n as isize)), where n is the number of bytes read.
pub fn uvarint(buf: &[u8]) -> (u64, isize) {
let mut x: u64 = 0;
let mut s: u32 = 0;
for (i, &b) in buf.iter().enumerate() {
if i == 10 {
// MaxVarintLen64 = 10
return (0, -((i + 1) as isize));
}
if b < 0x80 {
if i == 9 && b > 1 {
return (0, -((i + 1) as isize));
}
return (x | ((b as u64) << s), (i + 1) as isize);
}
x |= ((b & 0x7F) as u64) << s;
s += 7;
}
(0, 0)
}
#[cfg(test)]
mod tests {
use super::*;
use tokio::io::BufReader;
#[tokio::test]
async fn test_read_full_exact() {
// let data = b"abcdef";
let data = b"channel async callback test data!";
let mut reader = BufReader::new(&data[..]);
let size = data.len();
let mut total = 0;
let mut rev = vec![0u8; size];
let mut count = 0;
while total < size {
let mut buf = [0u8; 8];
let n = read_full(&mut reader, &mut buf).await.unwrap();
total += n;
rev[total - n..total].copy_from_slice(&buf[..n]);
count += 1;
println!("count: {}, total: {}, n: {}", count, total, n);
}
assert_eq!(total, size);
assert_eq!(&rev, data);
}
#[tokio::test]
async fn test_read_full_short() {
let data = b"abc";
let mut reader = BufReader::new(&data[..]);
let mut buf = [0u8; 6];
let n = read_full(&mut reader, &mut buf).await.unwrap();
assert_eq!(n, 3);
assert_eq!(&buf[..n], data);
}
#[tokio::test]
async fn test_read_full_1m() {
let size = 1024 * 1024;
let data = vec![42u8; size];
let mut reader = BufReader::new(&data[..]);
let mut buf = vec![0u8; size / 3];
read_full(&mut reader, &mut buf).await.unwrap();
assert_eq!(buf, data[..size / 3]);
}
#[test]
fn test_put_uvarint_and_uvarint_zero() {
let mut buf = [0u8; 16];
let n = put_uvarint(&mut buf, 0);
let (decoded, m) = uvarint(&buf[..n]);
assert_eq!(decoded, 0);
assert_eq!(m as usize, n);
}
#[test]
fn test_put_uvarint_and_uvarint_max() {
let mut buf = [0u8; 16];
let n = put_uvarint(&mut buf, u64::MAX);
let (decoded, m) = uvarint(&buf[..n]);
assert_eq!(decoded, u64::MAX);
assert_eq!(m as usize, n);
}
#[test]
fn test_put_uvarint_and_uvarint_various() {
let mut buf = [0u8; 16];
for &v in &[1u64, 127, 128, 255, 300, 16384, u32::MAX as u64] {
let n = put_uvarint(&mut buf, v);
let (decoded, m) = uvarint(&buf[..n]);
assert_eq!(decoded, v, "decode mismatch for {}", v);
assert_eq!(m as usize, n, "length mismatch for {}", v);
}
}
#[test]
fn test_uvarint_incomplete() {
let buf = [0x80u8, 0x80, 0x80];
let (v, n) = uvarint(&buf);
assert_eq!(v, 0);
assert_eq!(n, 0);
}
#[test]
fn test_uvarint_overflow_case() {
let buf = [0xFFu8; 11];
let (v, n) = uvarint(&buf);
assert_eq!(v, 0);
assert!(n < 0);
}
#[tokio::test]
async fn test_write_all_basic() {
let data = b"hello world!";
let mut buf = Vec::new();
let n = write_all(&mut buf, data).await.unwrap();
assert_eq!(n, data.len());
assert_eq!(&buf, data);
}
#[tokio::test]
async fn test_write_all_partial() {
struct PartialWriter {
inner: Vec<u8>,
max_write: usize,
}
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::AsyncWrite;
impl AsyncWrite for PartialWriter {
fn poll_write(mut self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &[u8]) -> Poll<std::io::Result<usize>> {
let n = buf.len().min(self.max_write);
self.inner.extend_from_slice(&buf[..n]);
Poll::Ready(Ok(n))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
Poll::Ready(Ok(()))
}
}
let data = b"abcdefghijklmnopqrstuvwxyz";
let mut writer = PartialWriter {
inner: Vec::new(),
max_write: 5,
};
let n = write_all(&mut writer, data).await.unwrap();
assert_eq!(n, data.len());
assert_eq!(&writer.inner, data);
}
}

View File

@@ -4,8 +4,26 @@ mod certs;
mod ip;
#[cfg(feature = "net")]
mod net;
#[cfg(feature = "net")]
pub use net::*;
#[cfg(feature = "io")]
mod io;
#[cfg(feature = "hash")]
mod hash;
#[cfg(feature = "os")]
pub mod os;
#[cfg(feature = "path")]
pub mod path;
#[cfg(feature = "tls")]
pub use certs::*;
#[cfg(feature = "hash")]
pub use hash::*;
#[cfg(feature = "io")]
pub use io::*;
#[cfg(feature = "ip")]
pub use ip::*;

View File

@@ -1 +1,499 @@
use lazy_static::lazy_static;
use std::{
collections::HashSet,
fmt::Display,
net::{IpAddr, Ipv6Addr, SocketAddr, TcpListener, ToSocketAddrs},
};
use url::Host;
lazy_static! {
static ref LOCAL_IPS: Vec<IpAddr> = must_get_local_ips().unwrap();
}
/// helper for validating if the provided arg is an ip address.
pub fn is_socket_addr(addr: &str) -> bool {
// TODO IPv6 zone information?
addr.parse::<SocketAddr>().is_ok() || addr.parse::<IpAddr>().is_ok()
}
/// checks if server_addr is valid and local host.
pub fn check_local_server_addr(server_addr: &str) -> std::io::Result<SocketAddr> {
let addr: Vec<SocketAddr> = match server_addr.to_socket_addrs() {
Ok(addr) => addr.collect(),
Err(err) => return Err(std::io::Error::other(err)),
};
// 0.0.0.0 is a wildcard address and refers to local network
// addresses. I.e, 0.0.0.0:9000 like ":9000" refers to port
// 9000 on localhost.
for a in addr {
if a.ip().is_unspecified() {
return Ok(a);
}
let host = match a {
SocketAddr::V4(a) => Host::<&str>::Ipv4(*a.ip()),
SocketAddr::V6(a) => Host::Ipv6(*a.ip()),
};
if is_local_host(host, 0, 0)? {
return Ok(a);
}
}
Err(std::io::Error::other("host in server address should be this server"))
}
/// checks if the given parameter correspond to one of
/// the local IP of the current machine
pub fn is_local_host(host: Host<&str>, port: u16, local_port: u16) -> std::io::Result<bool> {
let local_set: HashSet<IpAddr> = LOCAL_IPS.iter().copied().collect();
let is_local_host = match host {
Host::Domain(domain) => {
let ips = match (domain, 0).to_socket_addrs().map(|v| v.map(|v| v.ip()).collect::<Vec<_>>()) {
Ok(ips) => ips,
Err(err) => return Err(std::io::Error::other(err)),
};
ips.iter().any(|ip| local_set.contains(ip))
}
Host::Ipv4(ip) => local_set.contains(&IpAddr::V4(ip)),
Host::Ipv6(ip) => local_set.contains(&IpAddr::V6(ip)),
};
if port > 0 {
return Ok(is_local_host && port == local_port);
}
Ok(is_local_host)
}
/// returns IP address of given host.
pub fn get_host_ip(host: Host<&str>) -> std::io::Result<HashSet<IpAddr>> {
match host {
Host::Domain(domain) => match (domain, 0)
.to_socket_addrs()
.map(|v| v.map(|v| v.ip()).collect::<HashSet<_>>())
{
Ok(ips) => Ok(ips),
Err(err) => Err(std::io::Error::other(err)),
},
Host::Ipv4(ip) => {
let mut set = HashSet::with_capacity(1);
set.insert(IpAddr::V4(ip));
Ok(set)
}
Host::Ipv6(ip) => {
let mut set = HashSet::with_capacity(1);
set.insert(IpAddr::V6(ip));
Ok(set)
}
}
}
pub fn get_available_port() -> u16 {
TcpListener::bind("0.0.0.0:0").unwrap().local_addr().unwrap().port()
}
/// returns IPs of local interface
pub(crate) fn must_get_local_ips() -> std::io::Result<Vec<IpAddr>> {
match netif::up() {
Ok(up) => Ok(up.map(|x| x.address().to_owned()).collect()),
Err(err) => Err(std::io::Error::other(format!("Unable to get IP addresses of this host: {}", err))),
}
}
#[derive(Debug, Clone)]
pub struct XHost {
pub name: String,
pub port: u16,
pub is_port_set: bool,
}
impl Display for XHost {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if !self.is_port_set {
write!(f, "{}", self.name)
} else if self.name.contains(':') {
write!(f, "[{}]:{}", self.name, self.port)
} else {
write!(f, "{}:{}", self.name, self.port)
}
}
}
impl TryFrom<String> for XHost {
type Error = std::io::Error;
fn try_from(value: String) -> std::result::Result<Self, Self::Error> {
if let Some(addr) = value.to_socket_addrs()?.next() {
Ok(Self {
name: addr.ip().to_string(),
port: addr.port(),
is_port_set: addr.port() > 0,
})
} else {
Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "value invalid"))
}
}
}
/// parses the address string, process the ":port" format for double-stack binding,
/// and resolve the host name or IP address. If the port is 0, an available port is assigned.
pub fn parse_and_resolve_address(addr_str: &str) -> std::io::Result<SocketAddr> {
let resolved_addr: SocketAddr = if let Some(port) = addr_str.strip_prefix(":") {
// Process the ":port" format for double stack binding
let port_str = port;
let port: u16 = port_str
.parse()
.map_err(|e| std::io::Error::other(format!("Invalid port format: {}, err:{:?}", addr_str, e)))?;
let final_port = if port == 0 {
get_available_port() // assume get_available_port is available here
} else {
port
};
// Using IPv6 without address specified [::], it should handle both IPv4 and IPv6
SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), final_port)
} else {
// Use existing logic to handle regular address formats
let mut addr = check_local_server_addr(addr_str)?; // assume check_local_server_addr is available here
if addr.port() == 0 {
addr.set_port(get_available_port());
}
addr
};
Ok(resolved_addr)
}
#[cfg(test)]
mod test {
use std::net::{Ipv4Addr, Ipv6Addr};
use super::*;
#[test]
fn test_is_socket_addr() {
let test_cases = [
// Valid IP addresses
("192.168.1.0", true),
("127.0.0.1", true),
("10.0.0.1", true),
("0.0.0.0", true),
("255.255.255.255", true),
// Valid IPv6 addresses
("2001:db8::1", true),
("::1", true),
("::", true),
("fe80::1", true),
// Valid socket addresses
("192.168.1.0:8080", true),
("127.0.0.1:9000", true),
("[2001:db8::1]:9000", true),
("[::1]:8080", true),
("0.0.0.0:0", true),
// Invalid addresses
("localhost", false),
("localhost:9000", false),
("example.com", false),
("example.com:8080", false),
("http://192.168.1.0", false),
("http://192.168.1.0:9000", false),
("256.256.256.256", false),
("192.168.1", false),
("192.168.1.0.1", false),
("", false),
(":", false),
(":::", false),
("invalid_ip", false),
];
for (addr, expected) in test_cases {
let result = is_socket_addr(addr);
assert_eq!(expected, result, "addr: '{}', expected: {}, got: {}", addr, expected, result);
}
}
#[test]
fn test_check_local_server_addr() {
// Test valid local addresses
let valid_cases = ["localhost:54321", "127.0.0.1:9000", "0.0.0.0:9000", "[::1]:8080", "::1:8080"];
for addr in valid_cases {
let result = check_local_server_addr(addr);
assert!(result.is_ok(), "Expected '{}' to be valid, but got error: {:?}", addr, result);
}
// Test invalid addresses
let invalid_cases = [
("localhost", "invalid socket address"),
("", "invalid socket address"),
("example.org:54321", "host in server address should be this server"),
("8.8.8.8:53", "host in server address should be this server"),
(":-10", "invalid port value"),
("invalid:port", "invalid port value"),
];
for (addr, expected_error_pattern) in invalid_cases {
let result = check_local_server_addr(addr);
assert!(result.is_err(), "Expected '{}' to be invalid, but it was accepted: {:?}", addr, result);
let error_msg = result.unwrap_err().to_string();
assert!(
error_msg.contains(expected_error_pattern) || error_msg.contains("invalid socket address"),
"Error message '{}' doesn't contain expected pattern '{}' for address '{}'",
error_msg,
expected_error_pattern,
addr
);
}
}
#[test]
fn test_is_local_host() {
// Test localhost domain
let localhost_host = Host::Domain("localhost");
assert!(is_local_host(localhost_host, 0, 0).unwrap());
// Test loopback IP addresses
let ipv4_loopback = Host::Ipv4(Ipv4Addr::new(127, 0, 0, 1));
assert!(is_local_host(ipv4_loopback, 0, 0).unwrap());
let ipv6_loopback = Host::Ipv6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
assert!(is_local_host(ipv6_loopback, 0, 0).unwrap());
// Test port matching
let localhost_with_port1 = Host::Domain("localhost");
assert!(is_local_host(localhost_with_port1, 8080, 8080).unwrap());
let localhost_with_port2 = Host::Domain("localhost");
assert!(!is_local_host(localhost_with_port2, 8080, 9000).unwrap());
// Test non-local host
let external_host = Host::Ipv4(Ipv4Addr::new(8, 8, 8, 8));
assert!(!is_local_host(external_host, 0, 0).unwrap());
// Test invalid domain should return error
let invalid_host = Host::Domain("invalid.nonexistent.domain.example");
assert!(is_local_host(invalid_host, 0, 0).is_err());
}
#[test]
fn test_get_host_ip() {
// Test IPv4 address
let ipv4_host = Host::Ipv4(Ipv4Addr::new(192, 168, 1, 1));
let ipv4_result = get_host_ip(ipv4_host).unwrap();
assert_eq!(ipv4_result.len(), 1);
assert!(ipv4_result.contains(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1))));
// Test IPv6 address
let ipv6_host = Host::Ipv6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1));
let ipv6_result = get_host_ip(ipv6_host).unwrap();
assert_eq!(ipv6_result.len(), 1);
assert!(ipv6_result.contains(&IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1))));
// Test localhost domain
let localhost_host = Host::Domain("localhost");
let localhost_result = get_host_ip(localhost_host).unwrap();
assert!(!localhost_result.is_empty());
// Should contain at least loopback address
assert!(
localhost_result.contains(&IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)))
|| localhost_result.contains(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)))
);
// Test invalid domain
let invalid_host = Host::Domain("invalid.nonexistent.domain.example");
assert!(get_host_ip(invalid_host).is_err());
}
#[test]
fn test_get_available_port() {
let port1 = get_available_port();
let port2 = get_available_port();
// Port should be in valid range (u16 max is always <= 65535)
assert!(port1 > 0);
assert!(port2 > 0);
// Different calls should typically return different ports
assert_ne!(port1, port2);
}
#[test]
fn test_must_get_local_ips() {
let local_ips = must_get_local_ips().unwrap();
let local_set: HashSet<IpAddr> = local_ips.into_iter().collect();
// Should contain loopback addresses
assert!(local_set.contains(&IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))));
// Should not be empty
assert!(!local_set.is_empty());
// All IPs should be valid
for ip in &local_set {
match ip {
IpAddr::V4(_) | IpAddr::V6(_) => {} // Valid
}
}
}
#[test]
fn test_xhost_display() {
// Test without port
let host_no_port = XHost {
name: "example.com".to_string(),
port: 0,
is_port_set: false,
};
assert_eq!(host_no_port.to_string(), "example.com");
// Test with port (IPv4-like name)
let host_with_port = XHost {
name: "192.168.1.1".to_string(),
port: 8080,
is_port_set: true,
};
assert_eq!(host_with_port.to_string(), "192.168.1.1:8080");
// Test with port (IPv6-like name)
let host_ipv6_with_port = XHost {
name: "2001:db8::1".to_string(),
port: 9000,
is_port_set: true,
};
assert_eq!(host_ipv6_with_port.to_string(), "[2001:db8::1]:9000");
// Test domain name with port
let host_domain_with_port = XHost {
name: "example.com".to_string(),
port: 443,
is_port_set: true,
};
assert_eq!(host_domain_with_port.to_string(), "example.com:443");
}
#[test]
fn test_xhost_try_from() {
// Test valid IPv4 address with port
let result = XHost::try_from("192.168.1.1:8080".to_string()).unwrap();
assert_eq!(result.name, "192.168.1.1");
assert_eq!(result.port, 8080);
assert!(result.is_port_set);
// Test valid IPv4 address without port
let result = XHost::try_from("192.168.1.1:0".to_string()).unwrap();
assert_eq!(result.name, "192.168.1.1");
assert_eq!(result.port, 0);
assert!(!result.is_port_set);
// Test valid IPv6 address with port
let result = XHost::try_from("[2001:db8::1]:9000".to_string()).unwrap();
assert_eq!(result.name, "2001:db8::1");
assert_eq!(result.port, 9000);
assert!(result.is_port_set);
// Test localhost with port (localhost may resolve to either IPv4 or IPv6)
let result = XHost::try_from("localhost:3000".to_string()).unwrap();
// localhost can resolve to either 127.0.0.1 or ::1 depending on system configuration
assert!(result.name == "127.0.0.1" || result.name == "::1");
assert_eq!(result.port, 3000);
assert!(result.is_port_set);
// Test invalid format
let result = XHost::try_from("invalid_format".to_string());
assert!(result.is_err());
// Test empty string
let result = XHost::try_from("".to_string());
assert!(result.is_err());
}
#[test]
fn test_parse_and_resolve_address() {
// Test port-only format
let result = parse_and_resolve_address(":8080").unwrap();
assert_eq!(result.ip(), IpAddr::V6(Ipv6Addr::UNSPECIFIED));
assert_eq!(result.port(), 8080);
// Test port-only format with port 0 (should get available port)
let result = parse_and_resolve_address(":0").unwrap();
assert_eq!(result.ip(), IpAddr::V6(Ipv6Addr::UNSPECIFIED));
assert!(result.port() > 0);
// Test localhost with port
let result = parse_and_resolve_address("localhost:9000").unwrap();
assert_eq!(result.port(), 9000);
// Test localhost with port 0 (should get available port)
let result = parse_and_resolve_address("localhost:0").unwrap();
assert!(result.port() > 0);
// Test 0.0.0.0 with port
let result = parse_and_resolve_address("0.0.0.0:7000").unwrap();
assert_eq!(result.ip(), IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)));
assert_eq!(result.port(), 7000);
// Test invalid port format
let result = parse_and_resolve_address(":invalid_port");
assert!(result.is_err());
// Test invalid address
let result = parse_and_resolve_address("example.org:8080");
assert!(result.is_err());
}
#[test]
fn test_edge_cases() {
// Test empty string for is_socket_addr
assert!(!is_socket_addr(""));
// Test single colon for is_socket_addr
assert!(!is_socket_addr(":"));
// Test malformed IPv6 for is_socket_addr
assert!(!is_socket_addr("[::]"));
assert!(!is_socket_addr("[::1"));
// Test very long strings
let long_string = "a".repeat(1000);
assert!(!is_socket_addr(&long_string));
// Test unicode characters
assert!(!is_socket_addr("测试.example.com"));
// Test special characters
assert!(!is_socket_addr("test@example.com:8080"));
assert!(!is_socket_addr("http://example.com:8080"));
}
#[test]
fn test_boundary_values() {
// Test port boundaries
assert!(is_socket_addr("127.0.0.1:0"));
assert!(is_socket_addr("127.0.0.1:65535"));
assert!(!is_socket_addr("127.0.0.1:65536"));
// Test IPv4 boundaries
assert!(is_socket_addr("0.0.0.0"));
assert!(is_socket_addr("255.255.255.255"));
assert!(!is_socket_addr("256.0.0.0"));
assert!(!is_socket_addr("0.0.0.256"));
// Test XHost with boundary ports
let host_max_port = XHost {
name: "example.com".to_string(),
port: 65535,
is_port_set: true,
};
assert_eq!(host_max_port.to_string(), "example.com:65535");
let host_zero_port = XHost {
name: "example.com".to_string(),
port: 0,
is_port_set: true,
};
assert_eq!(host_zero_port.to_string(), "example.com:0");
}
}

View File

@@ -0,0 +1,185 @@
use nix::sys::stat::{self, stat};
use nix::sys::statfs::{self, statfs, FsType};
use std::fs::File;
use std::io::{self, BufRead, Error, ErrorKind};
use std::path::Path;
use super::{DiskInfo, IOStats};
/// Returns total and free bytes available in a directory, e.g. `/`.
pub fn get_info(p: impl AsRef<Path>) -> std::io::Result<DiskInfo> {
let stat_fs = statfs(p.as_ref())?;
let bsize = stat_fs.block_size() as u64;
let bfree = stat_fs.blocks_free() as u64;
let bavail = stat_fs.blocks_available() as u64;
let blocks = stat_fs.blocks() as u64;
let reserved = match bfree.checked_sub(bavail) {
Some(reserved) => reserved,
None => {
return Err(Error::new(
ErrorKind::Other,
format!(
"detected f_bavail space ({}) > f_bfree space ({}), fs corruption at ({}). please run 'fsck'",
bavail,
bfree,
p.as_ref().display()
),
))
}
};
let total = match blocks.checked_sub(reserved) {
Some(total) => total * bsize,
None => {
return Err(Error::new(
ErrorKind::Other,
format!(
"detected reserved space ({}) > blocks space ({}), fs corruption at ({}). please run 'fsck'",
reserved,
blocks,
p.as_ref().display()
),
))
}
};
let free = bavail * bsize;
let used = match total.checked_sub(free) {
Some(used) => used,
None => {
return Err(Error::new(
ErrorKind::Other,
format!(
"detected free space ({}) > total drive space ({}), fs corruption at ({}). please run 'fsck'",
free,
total,
p.as_ref().display()
),
))
}
};
let st = stat(p.as_ref())?;
Ok(DiskInfo {
total,
free,
used,
files: stat_fs.files(),
ffree: stat_fs.files_free(),
fstype: get_fs_type(stat_fs.filesystem_type()).to_string(),
major: stat::major(st.st_dev),
minor: stat::minor(st.st_dev),
..Default::default()
})
}
/// Returns the filesystem type of the underlying mounted filesystem
///
/// TODO The following mapping could not find the corresponding constant in `nix`:
///
/// "137d" => "EXT",
/// "4244" => "HFS",
/// "5346544e" => "NTFS",
/// "61756673" => "AUFS",
/// "ef51" => "EXT2OLD",
/// "2fc12fc1" => "zfs",
/// "ff534d42" => "cifs",
/// "53464846" => "wslfs",
fn get_fs_type(fs_type: FsType) -> &'static str {
match fs_type {
statfs::TMPFS_MAGIC => "TMPFS",
statfs::MSDOS_SUPER_MAGIC => "MSDOS",
// statfs::XFS_SUPER_MAGIC => "XFS",
statfs::NFS_SUPER_MAGIC => "NFS",
statfs::EXT4_SUPER_MAGIC => "EXT4",
statfs::ECRYPTFS_SUPER_MAGIC => "ecryptfs",
statfs::OVERLAYFS_SUPER_MAGIC => "overlayfs",
statfs::REISERFS_SUPER_MAGIC => "REISERFS",
_ => "UNKNOWN",
}
}
pub fn same_disk(disk1: &str, disk2: &str) -> std::io::Result<bool> {
let stat1 = stat(disk1)?;
let stat2 = stat(disk2)?;
Ok(stat1.st_dev == stat2.st_dev)
}
pub fn get_drive_stats(major: u32, minor: u32) -> std::io::Result<IOStats> {
read_drive_stats(&format!("/sys/dev/block/{}:{}/stat", major, minor))
}
fn read_drive_stats(stats_file: &str) -> std::io::Result<IOStats> {
let stats = read_stat(stats_file)?;
if stats.len() < 11 {
return Err(Error::new(
ErrorKind::InvalidData,
format!("found invalid format while reading {}", stats_file),
));
}
let mut io_stats = IOStats {
read_ios: stats[0],
read_merges: stats[1],
read_sectors: stats[2],
read_ticks: stats[3],
write_ios: stats[4],
write_merges: stats[5],
write_sectors: stats[6],
write_ticks: stats[7],
current_ios: stats[8],
total_ticks: stats[9],
req_ticks: stats[10],
..Default::default()
};
if stats.len() > 14 {
io_stats.discard_ios = stats[11];
io_stats.discard_merges = stats[12];
io_stats.discard_sectors = stats[13];
io_stats.discard_ticks = stats[14];
}
Ok(io_stats)
}
fn read_stat(file_name: &str) -> std::io::Result<Vec<u64>> {
// Open file
let path = Path::new(file_name);
let file = File::open(path)?;
// Create a BufReader
let reader = io::BufReader::new(file);
// Read first line
let mut stats = Vec::new();
if let Some(line) = reader.lines().next() {
let line = line?;
// Split line and parse as u64
// https://rust-lang.github.io/rust-clippy/master/index.html#trim_split_whitespace
for token in line.split_whitespace() {
let ui64: u64 = token
.parse()
.map_err(|e| Error::new(ErrorKind::InvalidData, format!("failed to parse '{}' as u64: {}", token, e)))?;
stats.push(ui64);
}
}
Ok(stats)
}
#[cfg(test)]
mod test {
use super::get_drive_stats;
#[ignore] // FIXME: failed in github actions
#[test]
fn test_stats() {
let major = 7;
let minor = 11;
let s = get_drive_stats(major, minor).unwrap();
println!("{:?}", s);
}
}

110
crates/utils/src/os/mod.rs Normal file
View File

@@ -0,0 +1,110 @@
#[cfg(target_os = "linux")]
mod linux;
#[cfg(all(unix, not(target_os = "linux")))]
mod unix;
#[cfg(target_os = "windows")]
mod windows;
#[cfg(target_os = "linux")]
pub use linux::{get_drive_stats, get_info, same_disk};
// pub use linux::same_disk;
#[cfg(all(unix, not(target_os = "linux")))]
pub use unix::{get_drive_stats, get_info, same_disk};
#[cfg(target_os = "windows")]
pub use windows::{get_drive_stats, get_info, same_disk};
#[derive(Debug, Default, PartialEq)]
pub struct IOStats {
pub read_ios: u64,
pub read_merges: u64,
pub read_sectors: u64,
pub read_ticks: u64,
pub write_ios: u64,
pub write_merges: u64,
pub write_sectors: u64,
pub write_ticks: u64,
pub current_ios: u64,
pub total_ticks: u64,
pub req_ticks: u64,
pub discard_ios: u64,
pub discard_merges: u64,
pub discard_sectors: u64,
pub discard_ticks: u64,
pub flush_ios: u64,
pub flush_ticks: u64,
}
#[derive(Debug, Default, PartialEq)]
pub struct DiskInfo {
pub total: u64,
pub free: u64,
pub used: u64,
pub files: u64,
pub ffree: u64,
pub fstype: String,
pub major: u64,
pub minor: u64,
pub name: String,
pub rotational: bool,
pub nrrequests: u64,
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
#[test]
fn test_get_info_valid_path() {
let temp_dir = tempfile::tempdir().unwrap();
let info = get_info(temp_dir.path()).unwrap();
println!("Disk Info: {:?}", info);
assert!(info.total > 0);
assert!(info.free > 0);
assert!(info.used > 0);
assert!(info.files > 0);
assert!(info.ffree > 0);
assert!(!info.fstype.is_empty());
}
#[test]
fn test_get_info_invalid_path() {
let invalid_path = PathBuf::from("/invalid/path");
let result = get_info(&invalid_path);
assert!(result.is_err());
}
#[test]
fn test_same_disk_same_path() {
let temp_dir = tempfile::tempdir().unwrap();
let path = temp_dir.path().to_str().unwrap();
let result = same_disk(path, path).unwrap();
assert!(result);
}
#[test]
fn test_same_disk_different_paths() {
let temp_dir1 = tempfile::tempdir().unwrap();
let temp_dir2 = tempfile::tempdir().unwrap();
let path1 = temp_dir1.path().to_str().unwrap();
let path2 = temp_dir2.path().to_str().unwrap();
let result = same_disk(path1, path2).unwrap();
// Since both temporary directories are created in the same file system,
// they should be on the same disk in most cases
println!("Path1: {}, Path2: {}, Same disk: {}", path1, path2, result);
// Test passes if the function doesn't panic - the actual result depends on test environment
}
#[test]
fn test_get_drive_stats_default() {
let stats = get_drive_stats(0, 0).unwrap();
assert_eq!(stats, IOStats::default());
}
}

View File

@@ -0,0 +1,72 @@
use super::{DiskInfo, IOStats};
use nix::sys::{stat::stat, statfs::statfs};
use std::io::Error;
use std::path::Path;
/// Returns total and free bytes available in a directory, e.g. `/`.
pub fn get_info(p: impl AsRef<Path>) -> std::io::Result<DiskInfo> {
let stat = statfs(p.as_ref())?;
let bsize = stat.block_size() as u64;
let bfree = stat.blocks_free() as u64;
let bavail = stat.blocks_available() as u64;
let blocks = stat.blocks() as u64;
let reserved = match bfree.checked_sub(bavail) {
Some(reserved) => reserved,
None => {
return Err(Error::other(format!(
"detected f_bavail space ({}) > f_bfree space ({}), fs corruption at ({}). please run 'fsck'",
bavail,
bfree,
p.as_ref().display()
)))
}
};
let total = match blocks.checked_sub(reserved) {
Some(total) => total * bsize,
None => {
return Err(Error::other(format!(
"detected reserved space ({}) > blocks space ({}), fs corruption at ({}). please run 'fsck'",
reserved,
blocks,
p.as_ref().display()
)))
}
};
let free = bavail * bsize;
let used = match total.checked_sub(free) {
Some(used) => used,
None => {
return Err(Error::other(format!(
"detected free space ({}) > total drive space ({}), fs corruption at ({}). please run 'fsck'",
free,
total,
p.as_ref().display()
)))
}
};
Ok(DiskInfo {
total,
free,
used,
files: stat.files(),
ffree: stat.files_free(),
fstype: stat.filesystem_type_name().to_string(),
..Default::default()
})
}
pub fn same_disk(disk1: &str, disk2: &str) -> std::io::Result<bool> {
let stat1 = stat(disk1)?;
let stat2 = stat(disk2)?;
Ok(stat1.st_dev == stat2.st_dev)
}
pub fn get_drive_stats(_major: u32, _minor: u32) -> std::io::Result<IOStats> {
Ok(IOStats::default())
}

View File

@@ -0,0 +1,142 @@
#![allow(unsafe_code)] // TODO: audit unsafe code
use super::{DiskInfo, IOStats};
use std::io::{Error, ErrorKind};
use std::mem;
use std::os::windows::ffi::OsStrExt;
use std::path::Path;
use winapi::shared::minwindef::{DWORD, MAX_PATH};
use winapi::shared::ntdef::ULARGE_INTEGER;
use winapi::um::fileapi::{GetDiskFreeSpaceExW, GetDiskFreeSpaceW, GetVolumeInformationW, GetVolumePathNameW};
use winapi::um::winnt::{LPCWSTR, WCHAR};
/// Returns total and free bytes available in a directory, e.g. `C:\`.
pub fn get_info(p: impl AsRef<Path>) -> std::io::Result<DiskInfo> {
let path_wide: Vec<WCHAR> = p
.as_ref()
.canonicalize()?
.into_os_string()
.encode_wide()
.chain(std::iter::once(0)) // Null-terminate the string
.collect();
let mut lp_free_bytes_available: ULARGE_INTEGER = unsafe { mem::zeroed() };
let mut lp_total_number_of_bytes: ULARGE_INTEGER = unsafe { mem::zeroed() };
let mut lp_total_number_of_free_bytes: ULARGE_INTEGER = unsafe { mem::zeroed() };
let success = unsafe {
GetDiskFreeSpaceExW(
path_wide.as_ptr(),
&mut lp_free_bytes_available,
&mut lp_total_number_of_bytes,
&mut lp_total_number_of_free_bytes,
)
};
if success == 0 {
return Err(Error::last_os_error());
}
let total = unsafe { *lp_total_number_of_bytes.QuadPart() };
let free = unsafe { *lp_total_number_of_free_bytes.QuadPart() };
if free > total {
return Err(Error::new(
ErrorKind::Other,
format!(
"detected free space ({}) > total drive space ({}), fs corruption at ({}). please run 'fsck'",
free,
total,
p.as_ref().display()
),
));
}
let mut lp_sectors_per_cluster: DWORD = 0;
let mut lp_bytes_per_sector: DWORD = 0;
let mut lp_number_of_free_clusters: DWORD = 0;
let mut lp_total_number_of_clusters: DWORD = 0;
let success = unsafe {
GetDiskFreeSpaceW(
path_wide.as_ptr(),
&mut lp_sectors_per_cluster,
&mut lp_bytes_per_sector,
&mut lp_number_of_free_clusters,
&mut lp_total_number_of_clusters,
)
};
if success == 0 {
return Err(Error::last_os_error());
}
Ok(DiskInfo {
total,
free,
used: total - free,
files: lp_total_number_of_clusters as u64,
ffree: lp_number_of_free_clusters as u64,
fstype: get_fs_type(&path_wide)?,
..Default::default()
})
}
/// Returns leading volume name.
fn get_volume_name(v: &[WCHAR]) -> std::io::Result<LPCWSTR> {
let volume_name_size: DWORD = MAX_PATH as _;
let mut lp_volume_name_buffer: [WCHAR; MAX_PATH] = [0; MAX_PATH];
let success = unsafe { GetVolumePathNameW(v.as_ptr(), lp_volume_name_buffer.as_mut_ptr(), volume_name_size) };
if success == 0 {
return Err(Error::last_os_error());
}
Ok(lp_volume_name_buffer.as_ptr())
}
fn utf16_to_string(v: &[WCHAR]) -> String {
let len = v.iter().position(|&x| x == 0).unwrap_or(v.len());
String::from_utf16_lossy(&v[..len])
}
/// Returns the filesystem type of the underlying mounted filesystem
fn get_fs_type(p: &[WCHAR]) -> std::io::Result<String> {
let path = get_volume_name(p)?;
let volume_name_size: DWORD = MAX_PATH as _;
let n_file_system_name_size: DWORD = MAX_PATH as _;
let mut lp_volume_serial_number: DWORD = 0;
let mut lp_maximum_component_length: DWORD = 0;
let mut lp_file_system_flags: DWORD = 0;
let mut lp_volume_name_buffer: [WCHAR; MAX_PATH] = [0; MAX_PATH];
let mut lp_file_system_name_buffer: [WCHAR; MAX_PATH] = [0; MAX_PATH];
let success = unsafe {
GetVolumeInformationW(
path,
lp_volume_name_buffer.as_mut_ptr(),
volume_name_size,
&mut lp_volume_serial_number,
&mut lp_maximum_component_length,
&mut lp_file_system_flags,
lp_file_system_name_buffer.as_mut_ptr(),
n_file_system_name_size,
)
};
if success == 0 {
return Err(Error::last_os_error());
}
Ok(utf16_to_string(&lp_file_system_name_buffer))
}
pub fn same_disk(_disk1: &str, _disk2: &str) -> std::io::Result<bool> {
Ok(false)
}
pub fn get_drive_stats(_major: u32, _minor: u32) -> std::io::Result<IOStats> {
Ok(IOStats::default())
}

308
crates/utils/src/path.rs Normal file
View File

@@ -0,0 +1,308 @@
use std::path::Path;
use std::path::PathBuf;
pub const GLOBAL_DIR_SUFFIX: &str = "__XLDIR__";
pub const SLASH_SEPARATOR: &str = "/";
pub const GLOBAL_DIR_SUFFIX_WITH_SLASH: &str = "__XLDIR__/";
pub fn has_suffix(s: &str, suffix: &str) -> bool {
if cfg!(target_os = "windows") {
s.to_lowercase().ends_with(&suffix.to_lowercase())
} else {
s.ends_with(suffix)
}
}
pub fn encode_dir_object(object: &str) -> String {
if has_suffix(object, SLASH_SEPARATOR) {
format!("{}{}", object.trim_end_matches(SLASH_SEPARATOR), GLOBAL_DIR_SUFFIX)
} else {
object.to_string()
}
}
pub fn is_dir_object(object: &str) -> bool {
let obj = encode_dir_object(object);
obj.ends_with(GLOBAL_DIR_SUFFIX)
}
#[allow(dead_code)]
pub fn decode_dir_object(object: &str) -> String {
if has_suffix(object, GLOBAL_DIR_SUFFIX) {
format!("{}{}", object.trim_end_matches(GLOBAL_DIR_SUFFIX), SLASH_SEPARATOR)
} else {
object.to_string()
}
}
pub fn retain_slash(s: &str) -> String {
if s.is_empty() {
return s.to_string();
}
if s.ends_with(SLASH_SEPARATOR) {
s.to_string()
} else {
format!("{}{}", s, SLASH_SEPARATOR)
}
}
pub fn strings_has_prefix_fold(s: &str, prefix: &str) -> bool {
s.len() >= prefix.len() && (s[..prefix.len()] == *prefix || s[..prefix.len()].eq_ignore_ascii_case(prefix))
}
pub fn has_prefix(s: &str, prefix: &str) -> bool {
if cfg!(target_os = "windows") {
return strings_has_prefix_fold(s, prefix);
}
s.starts_with(prefix)
}
pub fn path_join(elem: &[PathBuf]) -> PathBuf {
let mut joined_path = PathBuf::new();
for path in elem {
joined_path.push(path);
}
joined_path
}
pub fn path_join_buf(elements: &[&str]) -> String {
let trailing_slash = !elements.is_empty() && elements.last().unwrap().ends_with(SLASH_SEPARATOR);
let mut dst = String::new();
let mut added = 0;
for e in elements {
if added > 0 || !e.is_empty() {
if added > 0 {
dst.push_str(SLASH_SEPARATOR);
}
dst.push_str(e);
added += e.len();
}
}
let result = dst.to_string();
let cpath = Path::new(&result).components().collect::<PathBuf>();
let clean_path = cpath.to_string_lossy();
if trailing_slash {
return format!("{}{}", clean_path, SLASH_SEPARATOR);
}
clean_path.to_string()
}
pub fn path_to_bucket_object_with_base_path(bash_path: &str, path: &str) -> (String, String) {
let path = path.trim_start_matches(bash_path).trim_start_matches(SLASH_SEPARATOR);
if let Some(m) = path.find(SLASH_SEPARATOR) {
return (path[..m].to_string(), path[m + SLASH_SEPARATOR.len()..].to_string());
}
(path.to_string(), "".to_string())
}
pub fn path_to_bucket_object(s: &str) -> (String, String) {
path_to_bucket_object_with_base_path("", s)
}
pub fn base_dir_from_prefix(prefix: &str) -> String {
let mut base_dir = dir(prefix).to_owned();
if base_dir == "." || base_dir == "./" || base_dir == "/" {
base_dir = "".to_owned();
}
if !prefix.contains('/') {
base_dir = "".to_owned();
}
if !base_dir.is_empty() && !base_dir.ends_with(SLASH_SEPARATOR) {
base_dir.push_str(SLASH_SEPARATOR);
}
base_dir
}
pub struct LazyBuf {
s: String,
buf: Option<Vec<u8>>,
w: usize,
}
impl LazyBuf {
pub fn new(s: String) -> Self {
LazyBuf { s, buf: None, w: 0 }
}
pub fn index(&self, i: usize) -> u8 {
if let Some(ref buf) = self.buf {
buf[i]
} else {
self.s.as_bytes()[i]
}
}
pub fn append(&mut self, c: u8) {
if self.buf.is_none() {
if self.w < self.s.len() && self.s.as_bytes()[self.w] == c {
self.w += 1;
return;
}
let mut new_buf = vec![0; self.s.len()];
new_buf[..self.w].copy_from_slice(&self.s.as_bytes()[..self.w]);
self.buf = Some(new_buf);
}
if let Some(ref mut buf) = self.buf {
buf[self.w] = c;
self.w += 1;
}
}
pub fn string(&self) -> String {
if let Some(ref buf) = self.buf {
String::from_utf8(buf[..self.w].to_vec()).unwrap()
} else {
self.s[..self.w].to_string()
}
}
}
pub fn clean(path: &str) -> String {
if path.is_empty() {
return ".".to_string();
}
let rooted = path.starts_with('/');
let n = path.len();
let mut out = LazyBuf::new(path.to_string());
let mut r = 0;
let mut dotdot = 0;
if rooted {
out.append(b'/');
r = 1;
dotdot = 1;
}
while r < n {
match path.as_bytes()[r] {
b'/' => {
// Empty path element
r += 1;
}
b'.' if r + 1 == n || path.as_bytes()[r + 1] == b'/' => {
// . element
r += 1;
}
b'.' if path.as_bytes()[r + 1] == b'.' && (r + 2 == n || path.as_bytes()[r + 2] == b'/') => {
// .. element: remove to last /
r += 2;
if out.w > dotdot {
// Can backtrack
out.w -= 1;
while out.w > dotdot && out.index(out.w) != b'/' {
out.w -= 1;
}
} else if !rooted {
// Cannot backtrack but not rooted, so append .. element.
if out.w > 0 {
out.append(b'/');
}
out.append(b'.');
out.append(b'.');
dotdot = out.w;
}
}
_ => {
// Real path element.
// Add slash if needed
if (rooted && out.w != 1) || (!rooted && out.w != 0) {
out.append(b'/');
}
// Copy element
while r < n && path.as_bytes()[r] != b'/' {
out.append(path.as_bytes()[r]);
r += 1;
}
}
}
}
// Turn empty string into "."
if out.w == 0 {
return ".".to_string();
}
out.string()
}
pub fn split(path: &str) -> (&str, &str) {
// Find the last occurrence of the '/' character
if let Some(i) = path.rfind('/') {
// Return the directory (up to and including the last '/') and the file name
return (&path[..i + 1], &path[i + 1..]);
}
// If no '/' is found, return an empty string for the directory and the whole path as the file name
(path, "")
}
pub fn dir(path: &str) -> String {
let (a, _) = split(path);
clean(a)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_base_dir_from_prefix() {
let a = "da/";
println!("---- in {}", a);
let a = base_dir_from_prefix(a);
println!("---- out {}", a);
}
#[test]
fn test_clean() {
assert_eq!(clean(""), ".");
assert_eq!(clean("abc"), "abc");
assert_eq!(clean("abc/def"), "abc/def");
assert_eq!(clean("a/b/c"), "a/b/c");
assert_eq!(clean("."), ".");
assert_eq!(clean(".."), "..");
assert_eq!(clean("../.."), "../..");
assert_eq!(clean("../../abc"), "../../abc");
assert_eq!(clean("/abc"), "/abc");
assert_eq!(clean("/"), "/");
assert_eq!(clean("abc/"), "abc");
assert_eq!(clean("abc/def/"), "abc/def");
assert_eq!(clean("a/b/c/"), "a/b/c");
assert_eq!(clean("./"), ".");
assert_eq!(clean("../"), "..");
assert_eq!(clean("../../"), "../..");
assert_eq!(clean("/abc/"), "/abc");
assert_eq!(clean("abc//def//ghi"), "abc/def/ghi");
assert_eq!(clean("//abc"), "/abc");
assert_eq!(clean("///abc"), "/abc");
assert_eq!(clean("//abc//"), "/abc");
assert_eq!(clean("abc//"), "abc");
assert_eq!(clean("abc/./def"), "abc/def");
assert_eq!(clean("/./abc/def"), "/abc/def");
assert_eq!(clean("abc/."), "abc");
assert_eq!(clean("abc/./../def"), "def");
assert_eq!(clean("abc//./../def"), "def");
assert_eq!(clean("abc/../../././../def"), "../../def");
assert_eq!(clean("abc/def/ghi/../jkl"), "abc/def/jkl");
assert_eq!(clean("abc/def/../ghi/../jkl"), "abc/jkl");
assert_eq!(clean("abc/def/.."), "abc");
assert_eq!(clean("abc/def/../.."), ".");
assert_eq!(clean("/abc/def/../.."), "/");
assert_eq!(clean("abc/def/../../.."), "..");
assert_eq!(clean("/abc/def/../../.."), "/");
assert_eq!(clean("abc/def/../../../ghi/jkl/../../../mno"), "../../mno");
}
}

View File

@@ -27,4 +27,5 @@ tokio = { workspace = true }
tower.workspace = true
url.workspace = true
madmin.workspace =true
common.workspace = true
common.workspace = true
rustfs-filemeta.workspace = true

View File

@@ -1,7 +1,7 @@
#![cfg(test)]
use ecstore::disk::{MetaCacheEntry, VolumeInfo, WalkDirOptions};
use ecstore::metacache::writer::{MetacacheReader, MetacacheWriter};
use ecstore::disk::{VolumeInfo, WalkDirOptions};
use futures::future::join_all;
use protos::proto_gen::node_service::WalkDirRequest;
use protos::{
@@ -12,6 +12,7 @@ use protos::{
},
};
use rmp_serde::{Deserializer, Serializer};
use rustfs_filemeta::{MetaCacheEntry, MetacacheReader, MetacacheWriter};
use serde::{Deserialize, Serialize};
use std::{error::Error, io::Cursor};
use tokio::spawn;

View File

@@ -71,6 +71,9 @@ reqwest = { workspace = true }
urlencoding = "2.1.3"
smallvec = { workspace = true }
shadow-rs.workspace = true
rustfs-filemeta.workspace = true
rustfs-utils ={workspace = true, features=["full"]}
rustfs-rio.workspace = true
[target.'cfg(not(windows))'.dependencies]
nix = { workspace = true }

View File

@@ -1,15 +1,15 @@
use crate::disk::error::{Error, Result};
use crate::{
disk::{error::DiskError, Disk, DiskAPI},
erasure::{ReadAt, Writer},
io::{FileReader, FileWriter},
store_api::BitrotAlgorithm,
};
use blake2::Blake2b512;
use blake2::Digest as _;
use bytes::Bytes;
use common::error::{Error, Result};
use highway::{HighwayHash, HighwayHasher, Key};
use lazy_static::lazy_static;
use rustfs_utils::HashAlgorithm;
use sha2::{digest::core_api::BlockSizeUser, Digest, Sha256};
use std::{any::Any, collections::HashMap, io::Cursor, sync::Arc};
use tokio::io::{AsyncReadExt as _, AsyncWriteExt};
@@ -576,7 +576,7 @@ pub async fn new_bitrot_filewriter(
volume: &str,
path: &str,
inline: bool,
algo: BitrotAlgorithm,
algo: HashAlgorithm,
shard_size: usize,
) -> Result<BitrotWriter> {
let w = BitrotFileWriter::new(disk, volume, path, inline, algo, shard_size).await?;

View File

@@ -1,6 +1,6 @@
use common::error::Error;
use crate::error::Error;
#[derive(Debug, thiserror::Error, PartialEq, Eq)]
#[derive(Debug, thiserror::Error)]
pub enum BucketMetadataError {
#[error("tagging not found")]
TaggingNotFound,
@@ -18,18 +18,55 @@ pub enum BucketMetadataError {
BucketReplicationConfigNotFound,
#[error("bucket remote target not found")]
BucketRemoteTargetNotFound,
#[error("Io error: {0}")]
Io(std::io::Error),
}
impl BucketMetadataError {
pub fn is(&self, err: &Error) -> bool {
if let Some(e) = err.downcast_ref::<BucketMetadataError>() {
e == self
} else {
false
pub fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
BucketMetadataError::Io(std::io::Error::other(error))
}
}
impl From<BucketMetadataError> for Error {
fn from(e: BucketMetadataError) -> Self {
Error::other(e)
}
}
impl From<Error> for BucketMetadataError {
fn from(e: Error) -> Self {
match e {
Error::Io(e) => e.into(),
_ => BucketMetadataError::other(e),
}
}
}
impl From<std::io::Error> for BucketMetadataError {
fn from(e: std::io::Error) -> Self {
e.downcast::<BucketMetadataError>()
.unwrap_or_else(|e| BucketMetadataError::other(e))
}
}
impl PartialEq for BucketMetadataError {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(BucketMetadataError::Io(e1), BucketMetadataError::Io(e2)) => {
e1.kind() == e2.kind() && e1.to_string() == e2.to_string()
}
(e1, e2) => e1.to_u32() == e2.to_u32(),
}
}
}
impl Eq for BucketMetadataError {}
impl BucketMetadataError {
pub fn to_u32(&self) -> u32 {
match self {
@@ -41,6 +78,7 @@ impl BucketMetadataError {
BucketMetadataError::BucketQuotaConfigNotFound => 0x06,
BucketMetadataError::BucketReplicationConfigNotFound => 0x07,
BucketMetadataError::BucketRemoteTargetNotFound => 0x08,
BucketMetadataError::Io(_) => 0x09,
}
}
@@ -54,6 +92,7 @@ impl BucketMetadataError {
0x06 => Some(BucketMetadataError::BucketQuotaConfigNotFound),
0x07 => Some(BucketMetadataError::BucketReplicationConfigNotFound),
0x08 => Some(BucketMetadataError::BucketRemoteTargetNotFound),
0x09 => Some(BucketMetadataError::Io(std::io::Error::new(std::io::ErrorKind::Other, "Io error"))),
_ => None,
}
}

View File

@@ -17,8 +17,8 @@ use time::OffsetDateTime;
use tracing::error;
use crate::config::com::{read_config, save_config};
use crate::{config, new_object_layer_fn};
use common::error::{Error, Result};
use crate::error::{Error, Result};
use crate::new_object_layer_fn;
use crate::disk::BUCKET_META_PREFIX;
use crate::store::ECStore;
@@ -177,7 +177,7 @@ impl BucketMetadata {
pub fn check_header(buf: &[u8]) -> Result<()> {
if buf.len() <= 4 {
return Err(Error::msg("read_bucket_metadata: data invalid"));
return Err(Error::other("read_bucket_metadata: data invalid"));
}
let format = LittleEndian::read_u16(&buf[0..2]);
@@ -185,12 +185,12 @@ impl BucketMetadata {
match format {
BUCKET_METADATA_FORMAT => {}
_ => return Err(Error::msg("read_bucket_metadata: format invalid")),
_ => return Err(Error::other("read_bucket_metadata: format invalid")),
}
match version {
BUCKET_METADATA_VERSION => {}
_ => return Err(Error::msg("read_bucket_metadata: version invalid")),
_ => return Err(Error::other("read_bucket_metadata: version invalid")),
}
Ok(())
@@ -281,7 +281,7 @@ impl BucketMetadata {
self.tagging_config_xml = data;
self.tagging_config_updated_at = updated;
}
_ => return Err(Error::msg(format!("config file not found : {}", config_file))),
_ => return Err(Error::other(format!("config file not found : {}", config_file))),
}
Ok(updated)
@@ -292,7 +292,9 @@ impl BucketMetadata {
}
pub async fn save(&mut self) -> Result<()> {
let Some(store) = new_object_layer_fn() else { return Err(Error::msg("errServerNotInitialized")) };
let Some(store) = new_object_layer_fn() else {
return Err(Error::other("errServerNotInitialized"));
};
self.parse_all_configs(store.clone())?;
@@ -358,7 +360,7 @@ pub async fn load_bucket_metadata_parse(api: Arc<ECStore>, bucket: &str, parse:
let mut bm = match read_bucket_metadata(api.clone(), bucket).await {
Ok(res) => res,
Err(err) => {
if !config::error::is_err_config_not_found(&err) {
if err != Error::ConfigNotFound {
return Err(err);
}
@@ -382,7 +384,7 @@ pub async fn load_bucket_metadata_parse(api: Arc<ECStore>, bucket: &str, parse:
async fn read_bucket_metadata(api: Arc<ECStore>, bucket: &str) -> Result<BucketMetadata> {
if bucket.is_empty() {
error!("bucket name empty");
return Err(Error::msg("invalid argument"));
return Err(Error::other("invalid argument"));
}
let bm = BucketMetadata::new(bucket);
@@ -397,7 +399,7 @@ async fn read_bucket_metadata(api: Arc<ECStore>, bucket: &str) -> Result<BucketM
Ok(bm)
}
fn _write_time<S>(t: &OffsetDateTime, s: S) -> Result<S::Ok, S::Error>
fn _write_time<S>(t: &OffsetDateTime, s: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{

View File

@@ -6,14 +6,12 @@ use std::{collections::HashMap, sync::Arc};
use crate::bucket::error::BucketMetadataError;
use crate::bucket::metadata::{load_bucket_metadata_parse, BUCKET_LIFECYCLE_CONFIG};
use crate::bucket::utils::is_meta_bucketname;
use crate::config::error::ConfigError;
use crate::disk::error::DiskError;
use crate::error::{is_err_bucket_not_found, Error, Result};
use crate::global::{is_dist_erasure, is_erasure, new_object_layer_fn, GLOBAL_Endpoints};
use crate::heal::heal_commands::HealOpts;
use crate::store::ECStore;
use crate::utils::xml::deserialize;
use crate::{config, StorageAPI};
use common::error::{Error, Result};
use crate::StorageAPI;
use futures::future::join_all;
use policy::policy::BucketPolicy;
use s3s::dto::{
@@ -49,7 +47,7 @@ pub(super) fn get_bucket_metadata_sys() -> Result<Arc<RwLock<BucketMetadataSys>>
if let Some(sys) = GLOBAL_BucketMetadataSys.get() {
Ok(sys.clone())
} else {
Err(Error::msg("GLOBAL_BucketMetadataSys not init"))
Err(Error::other("GLOBAL_BucketMetadataSys not init"))
}
}
@@ -167,7 +165,7 @@ impl BucketMetadataSys {
if let Some(endpoints) = GLOBAL_Endpoints.get() {
endpoints.es_count() * 10
} else {
return Err(Error::msg("GLOBAL_Endpoints not init"));
return Err(Error::other("GLOBAL_Endpoints not init"));
}
};
@@ -245,14 +243,14 @@ impl BucketMetadataSys {
pub async fn get(&self, bucket: &str) -> Result<Arc<BucketMetadata>> {
if is_meta_bucketname(bucket) {
return Err(Error::new(ConfigError::NotFound));
return Err(Error::ConfigNotFound);
}
let map = self.metadata_map.read().await;
if let Some(bm) = map.get(bucket) {
Ok(bm.clone())
} else {
Err(Error::new(ConfigError::NotFound))
Err(Error::ConfigNotFound)
}
}
@@ -277,7 +275,7 @@ impl BucketMetadataSys {
let meta = match self.get_config_from_disk(bucket).await {
Ok(res) => res,
Err(err) => {
if !config::error::is_err_config_not_found(&err) {
if err != Error::ConfigNotFound {
return Err(err);
} else {
BucketMetadata::new(bucket)
@@ -301,16 +299,18 @@ impl BucketMetadataSys {
}
async fn update_and_parse(&mut self, bucket: &str, config_file: &str, data: Vec<u8>, parse: bool) -> Result<OffsetDateTime> {
let Some(store) = new_object_layer_fn() else { return Err(Error::msg("errServerNotInitialized")) };
let Some(store) = new_object_layer_fn() else {
return Err(Error::other("errServerNotInitialized"));
};
if is_meta_bucketname(bucket) {
return Err(Error::msg("errInvalidArgument"));
return Err(Error::other("errInvalidArgument"));
}
let mut bm = match load_bucket_metadata_parse(store, bucket, parse).await {
Ok(res) => res,
Err(err) => {
if !is_erasure().await && !is_dist_erasure().await && DiskError::VolumeNotFound.is(&err) {
if !is_erasure().await && !is_dist_erasure().await && is_err_bucket_not_found(&err) {
BucketMetadata::new(bucket)
} else {
return Err(err);
@@ -327,7 +327,7 @@ impl BucketMetadataSys {
async fn save(&self, bm: BucketMetadata) -> Result<()> {
if is_meta_bucketname(&bm.name) {
return Err(Error::msg("errInvalidArgument"));
return Err(Error::other("errInvalidArgument"));
}
let mut bm = bm;
@@ -341,7 +341,7 @@ impl BucketMetadataSys {
pub async fn get_config_from_disk(&self, bucket: &str) -> Result<BucketMetadata> {
if is_meta_bucketname(bucket) {
return Err(Error::msg("errInvalidArgument"));
return Err(Error::other("errInvalidArgument"));
}
load_bucket_metadata(self.api.clone(), bucket).await
@@ -360,7 +360,7 @@ impl BucketMetadataSys {
Ok(res) => res,
Err(err) => {
return if *self.initialized.read().await {
Err(Error::msg("errBucketMetadataNotInitialized"))
Err(Error::other("errBucketMetadataNotInitialized"))
} else {
Err(err)
}
@@ -381,7 +381,7 @@ impl BucketMetadataSys {
Ok((res, _)) => res,
Err(err) => {
warn!("get_versioning_config err {:?}", &err);
return if config::error::is_err_config_not_found(&err) {
return if err == Error::ConfigNotFound {
Ok((VersioningConfiguration::default(), OffsetDateTime::UNIX_EPOCH))
} else {
Err(err)
@@ -401,8 +401,8 @@ impl BucketMetadataSys {
Ok((res, _)) => res,
Err(err) => {
warn!("get_bucket_policy err {:?}", &err);
return if config::error::is_err_config_not_found(&err) {
Err(Error::new(BucketMetadataError::BucketPolicyNotFound))
return if err == Error::ConfigNotFound {
Err(BucketMetadataError::BucketPolicyNotFound.into())
} else {
Err(err)
};
@@ -412,7 +412,7 @@ impl BucketMetadataSys {
if let Some(config) = &bm.policy_config {
Ok((config.clone(), bm.policy_config_updated_at))
} else {
Err(Error::new(BucketMetadataError::BucketPolicyNotFound))
Err(BucketMetadataError::BucketPolicyNotFound.into())
}
}
@@ -421,8 +421,8 @@ impl BucketMetadataSys {
Ok((res, _)) => res,
Err(err) => {
warn!("get_tagging_config err {:?}", &err);
return if config::error::is_err_config_not_found(&err) {
Err(Error::new(BucketMetadataError::TaggingNotFound))
return if err == Error::ConfigNotFound {
Err(BucketMetadataError::TaggingNotFound.into())
} else {
Err(err)
};
@@ -432,7 +432,7 @@ impl BucketMetadataSys {
if let Some(config) = &bm.tagging_config {
Ok((config.clone(), bm.tagging_config_updated_at))
} else {
Err(Error::new(BucketMetadataError::TaggingNotFound))
Err(BucketMetadataError::TaggingNotFound.into())
}
}
@@ -441,8 +441,8 @@ impl BucketMetadataSys {
Ok((res, _)) => res,
Err(err) => {
warn!("get_object_lock_config err {:?}", &err);
return if config::error::is_err_config_not_found(&err) {
Err(Error::new(BucketMetadataError::BucketObjectLockConfigNotFound))
return if err == Error::ConfigNotFound {
Err(BucketMetadataError::BucketObjectLockConfigNotFound.into())
} else {
Err(err)
};
@@ -452,7 +452,7 @@ impl BucketMetadataSys {
if let Some(config) = &bm.object_lock_config {
Ok((config.clone(), bm.object_lock_config_updated_at))
} else {
Err(Error::new(BucketMetadataError::BucketObjectLockConfigNotFound))
Err(BucketMetadataError::BucketObjectLockConfigNotFound.into())
}
}
@@ -461,8 +461,8 @@ impl BucketMetadataSys {
Ok((res, _)) => res,
Err(err) => {
warn!("get_lifecycle_config err {:?}", &err);
return if config::error::is_err_config_not_found(&err) {
Err(Error::new(BucketMetadataError::BucketLifecycleNotFound))
return if err == Error::ConfigNotFound {
Err(BucketMetadataError::BucketLifecycleNotFound.into())
} else {
Err(err)
};
@@ -471,12 +471,12 @@ impl BucketMetadataSys {
if let Some(config) = &bm.lifecycle_config {
if config.rules.is_empty() {
Err(Error::new(BucketMetadataError::BucketLifecycleNotFound))
Err(BucketMetadataError::BucketLifecycleNotFound.into())
} else {
Ok((config.clone(), bm.lifecycle_config_updated_at))
}
} else {
Err(Error::new(BucketMetadataError::BucketLifecycleNotFound))
Err(BucketMetadataError::BucketLifecycleNotFound.into())
}
}
@@ -485,7 +485,7 @@ impl BucketMetadataSys {
Ok((bm, _)) => bm.notification_config.clone(),
Err(err) => {
warn!("get_notification_config err {:?}", &err);
if config::error::is_err_config_not_found(&err) {
if err == Error::ConfigNotFound {
None
} else {
return Err(err);
@@ -501,8 +501,8 @@ impl BucketMetadataSys {
Ok((res, _)) => res,
Err(err) => {
warn!("get_sse_config err {:?}", &err);
return if config::error::is_err_config_not_found(&err) {
Err(Error::new(BucketMetadataError::BucketSSEConfigNotFound))
return if err == Error::ConfigNotFound {
Err(BucketMetadataError::BucketSSEConfigNotFound.into())
} else {
Err(err)
};
@@ -512,7 +512,7 @@ impl BucketMetadataSys {
if let Some(config) = &bm.sse_config {
Ok((config.clone(), bm.encryption_config_updated_at))
} else {
Err(Error::new(BucketMetadataError::BucketSSEConfigNotFound))
Err(BucketMetadataError::BucketSSEConfigNotFound.into())
}
}
@@ -532,8 +532,8 @@ impl BucketMetadataSys {
Ok((res, _)) => res,
Err(err) => {
warn!("get_quota_config err {:?}", &err);
return if config::error::is_err_config_not_found(&err) {
Err(Error::new(BucketMetadataError::BucketQuotaConfigNotFound))
return if err == Error::ConfigNotFound {
Err(BucketMetadataError::BucketQuotaConfigNotFound.into())
} else {
Err(err)
};
@@ -543,7 +543,7 @@ impl BucketMetadataSys {
if let Some(config) = &bm.quota_config {
Ok((config.clone(), bm.quota_config_updated_at))
} else {
Err(Error::new(BucketMetadataError::BucketQuotaConfigNotFound))
Err(BucketMetadataError::BucketQuotaConfigNotFound.into())
}
}
@@ -552,8 +552,8 @@ impl BucketMetadataSys {
Ok(res) => res,
Err(err) => {
warn!("get_replication_config err {:?}", &err);
return if config::error::is_err_config_not_found(&err) {
Err(Error::new(BucketMetadataError::BucketReplicationConfigNotFound))
return if err == Error::ConfigNotFound {
Err(BucketMetadataError::BucketReplicationConfigNotFound.into())
} else {
Err(err)
};
@@ -567,7 +567,7 @@ impl BucketMetadataSys {
Ok((config.clone(), bm.replication_config_updated_at))
} else {
Err(Error::new(BucketMetadataError::BucketReplicationConfigNotFound))
Err(BucketMetadataError::BucketReplicationConfigNotFound.into())
}
}
@@ -576,8 +576,8 @@ impl BucketMetadataSys {
Ok(res) => res,
Err(err) => {
warn!("get_replication_config err {:?}", &err);
return if config::error::is_err_config_not_found(&err) {
Err(Error::new(BucketMetadataError::BucketRemoteTargetNotFound))
return if err == Error::ConfigNotFound {
Err(BucketMetadataError::BucketRemoteTargetNotFound.into())
} else {
Err(err)
};
@@ -591,7 +591,7 @@ impl BucketMetadataSys {
Ok(config.clone())
} else {
Err(Error::new(BucketMetadataError::BucketRemoteTargetNotFound))
Err(BucketMetadataError::BucketRemoteTargetNotFound.into())
}
}
}

View File

@@ -1,5 +1,5 @@
use super::{error::BucketMetadataError, metadata_sys::get_bucket_metadata_sys};
use common::error::Result;
use crate::error::Result;
use policy::policy::{BucketPolicy, BucketPolicyArgs};
use tracing::warn;
@@ -10,8 +10,9 @@ impl PolicySys {
match Self::get(args.bucket).await {
Ok(cfg) => return cfg.is_allowed(args),
Err(err) => {
if !BucketMetadataError::BucketPolicyNotFound.is(&err) {
warn!("config get err {:?}", err);
let berr: BucketMetadataError = err.into();
if berr != BucketMetadataError::BucketPolicyNotFound {
warn!("config get err {:?}", berr);
}
}
}

View File

@@ -1,4 +1,4 @@
use common::error::Result;
use crate::error::Result;
use rmp_serde::Serializer as rmpSerializer;
use serde::{Deserialize, Serialize};

View File

@@ -1,4 +1,4 @@
use common::error::Result;
use crate::error::Result;
use rmp_serde::Serializer as rmpSerializer;
use serde::{Deserialize, Serialize};
use std::time::Duration;

View File

@@ -1,5 +1,5 @@
use crate::disk::RUSTFS_META_BUCKET;
use common::error::{Error, Result};
use crate::error::{Error, Result};
pub fn is_meta_bucketname(name: &str) -> bool {
name.starts_with(RUSTFS_META_BUCKET)
@@ -13,60 +13,60 @@ lazy_static::lazy_static! {
static ref IP_ADDRESS: Regex = Regex::new(r"^(\d+\.){3}\d+$").unwrap();
}
pub fn check_bucket_name_common(bucket_name: &str, strict: bool) -> Result<(), Error> {
pub fn check_bucket_name_common(bucket_name: &str, strict: bool) -> Result<()> {
let bucket_name_trimmed = bucket_name.trim();
if bucket_name_trimmed.is_empty() {
return Err(Error::msg("Bucket name cannot be empty"));
return Err(Error::other("Bucket name cannot be empty"));
}
if bucket_name_trimmed.len() < 3 {
return Err(Error::msg("Bucket name cannot be shorter than 3 characters"));
return Err(Error::other("Bucket name cannot be shorter than 3 characters"));
}
if bucket_name_trimmed.len() > 63 {
return Err(Error::msg("Bucket name cannot be longer than 63 characters"));
return Err(Error::other("Bucket name cannot be longer than 63 characters"));
}
if bucket_name_trimmed == "rustfs" {
return Err(Error::msg("Bucket name cannot be rustfs"));
return Err(Error::other("Bucket name cannot be rustfs"));
}
if IP_ADDRESS.is_match(bucket_name_trimmed) {
return Err(Error::msg("Bucket name cannot be an IP address"));
return Err(Error::other("Bucket name cannot be an IP address"));
}
if bucket_name_trimmed.contains("..") || bucket_name_trimmed.contains(".-") || bucket_name_trimmed.contains("-.") {
return Err(Error::msg("Bucket name contains invalid characters"));
return Err(Error::other("Bucket name contains invalid characters"));
}
if strict {
if !VALID_BUCKET_NAME_STRICT.is_match(bucket_name_trimmed) {
return Err(Error::msg("Bucket name contains invalid characters"));
return Err(Error::other("Bucket name contains invalid characters"));
}
} else if !VALID_BUCKET_NAME.is_match(bucket_name_trimmed) {
return Err(Error::msg("Bucket name contains invalid characters"));
return Err(Error::other("Bucket name contains invalid characters"));
}
Ok(())
}
pub fn check_valid_bucket_name(bucket_name: &str) -> Result<(), Error> {
pub fn check_valid_bucket_name(bucket_name: &str) -> Result<()> {
check_bucket_name_common(bucket_name, false)
}
pub fn check_valid_bucket_name_strict(bucket_name: &str) -> Result<(), Error> {
pub fn check_valid_bucket_name_strict(bucket_name: &str) -> Result<()> {
check_bucket_name_common(bucket_name, true)
}
pub fn check_valid_object_name_prefix(object_name: &str) -> Result<(), Error> {
pub fn check_valid_object_name_prefix(object_name: &str) -> Result<()> {
if object_name.len() > 1024 {
return Err(Error::msg("Object name cannot be longer than 1024 characters"));
return Err(Error::other("Object name cannot be longer than 1024 characters"));
}
if !object_name.is_ascii() {
return Err(Error::msg("Object name with non-UTF-8 strings are not supported"));
return Err(Error::other("Object name with non-UTF-8 strings are not supported"));
}
Ok(())
}
pub fn check_valid_object_name(object_name: &str) -> Result<(), Error> {
pub fn check_valid_object_name(object_name: &str) -> Result<()> {
if object_name.trim().is_empty() {
return Err(Error::msg("Object name cannot be empty"));
return Err(Error::other("Object name cannot be empty"));
}
check_valid_object_name_prefix(object_name)
}

View File

@@ -1,6 +1,6 @@
use super::{metadata_sys::get_bucket_metadata_sys, versioning::VersioningApi};
use crate::disk::RUSTFS_META_BUCKET;
use common::error::Result;
use crate::error::Result;
use s3s::dto::VersioningConfiguration;
use tracing::warn;

View File

@@ -1,17 +1,15 @@
use crate::disk::{DiskAPI, DiskStore, MetaCacheEntries, MetaCacheEntry, WalkDirOptions};
use crate::{
disk::error::{is_err_eof, is_err_file_not_found, is_err_volume_not_found, DiskError},
metacache::writer::MetacacheReader,
};
use common::error::{Error, Result};
use crate::disk::error::DiskError;
use crate::disk::{self, DiskAPI, DiskStore, WalkDirOptions};
use futures::future::join_all;
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetacacheReader};
use std::{future::Future, pin::Pin, sync::Arc};
use tokio::{spawn, sync::broadcast::Receiver as B_Receiver};
use tracing::error;
pub type AgreedFn = Box<dyn Fn(MetaCacheEntry) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
pub type PartialFn = Box<dyn Fn(MetaCacheEntries, &[Option<Error>]) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
type FinishedFn = Box<dyn Fn(&[Option<Error>]) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
pub type PartialFn =
Box<dyn Fn(MetaCacheEntries, &[Option<DiskError>]) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
type FinishedFn = Box<dyn Fn(&[Option<DiskError>]) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
#[derive(Default)]
pub struct ListPathRawOptions {
@@ -51,13 +49,13 @@ impl Clone for ListPathRawOptions {
}
}
pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -> Result<()> {
pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -> disk::error::Result<()> {
// println!("list_path_raw {},{}", &opts.bucket, &opts.path);
if opts.disks.is_empty() {
return Err(Error::from_string("list_path_raw: 0 drives provided"));
return Err(DiskError::other("list_path_raw: 0 drives provided"));
}
let mut jobs: Vec<tokio::task::JoinHandle<std::result::Result<(), Error>>> = Vec::new();
let mut jobs: Vec<tokio::task::JoinHandle<std::result::Result<(), DiskError>>> = Vec::new();
let mut readers = Vec::with_capacity(opts.disks.len());
let fds = Arc::new(opts.fallback_disks.clone());
@@ -137,7 +135,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
}
let revjob = spawn(async move {
let mut errs: Vec<Option<Error>> = Vec::with_capacity(readers.len());
let mut errs: Vec<Option<DiskError>> = Vec::with_capacity(readers.len());
for _ in 0..readers.len() {
errs.push(None);
}
@@ -146,7 +144,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
let mut current = MetaCacheEntry::default();
if rx.try_recv().is_ok() {
return Err(Error::from_string("canceled"));
return Err(DiskError::other("canceled"));
}
let mut top_entries: Vec<Option<MetaCacheEntry>> = vec![None; readers.len()];
@@ -175,21 +173,21 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
}
}
Err(err) => {
if is_err_eof(&err) {
if err == rustfs_filemeta::Error::Unexpected {
at_eof += 1;
continue;
} else if is_err_file_not_found(&err) {
} else if err == rustfs_filemeta::Error::FileNotFound {
at_eof += 1;
fnf += 1;
continue;
} else if is_err_volume_not_found(&err) {
} else if err == rustfs_filemeta::Error::VolumeNotFound {
at_eof += 1;
fnf += 1;
vnf += 1;
continue;
} else {
has_err += 1;
errs[i] = Some(err);
errs[i] = Some(err.into());
continue;
}
}
@@ -230,11 +228,11 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
}
if vnf > 0 && vnf >= (readers.len() - opts.min_disks) {
return Err(Error::new(DiskError::VolumeNotFound));
return Err(DiskError::VolumeNotFound);
}
if fnf > 0 && fnf >= (readers.len() - opts.min_disks) {
return Err(Error::new(DiskError::FileNotFound));
return Err(DiskError::FileNotFound);
}
if has_err > 0 && has_err > opts.disks.len() - opts.min_disks {
@@ -252,7 +250,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
_ => {}
});
return Err(Error::from_string(combined_err.join(", ")));
return Err(DiskError::other(combined_err.join(", ")));
}
// Break if all at EOF or error.

View File

@@ -1,14 +1,11 @@
use super::error::{is_err_config_not_found, ConfigError};
use super::{storageclass, Config, GLOBAL_StorageClass};
use crate::disk::RUSTFS_META_BUCKET;
use crate::error::{Error, Result};
use crate::store_api::{ObjectInfo, ObjectOptions, PutObjReader, StorageAPI};
use crate::store_err::is_err_object_not_found;
use crate::utils::path::SLASH_SEPARATOR;
use common::error::{Error, Result};
use http::HeaderMap;
use lazy_static::lazy_static;
use std::collections::HashSet;
use std::io::Cursor;
use std::sync::Arc;
use tracing::{error, warn};
@@ -41,8 +38,8 @@ pub async fn read_config_with_metadata<S: StorageAPI>(
.get_object_reader(RUSTFS_META_BUCKET, file, None, h, opts)
.await
.map_err(|err| {
if is_err_object_not_found(&err) {
Error::new(ConfigError::NotFound)
if err == Error::FileNotFound || matches!(err, Error::ObjectNotFound(_, _)) {
Error::ConfigNotFound
} else {
err
}
@@ -51,7 +48,7 @@ pub async fn read_config_with_metadata<S: StorageAPI>(
let data = rd.read_all().await?;
if data.is_empty() {
return Err(Error::new(ConfigError::NotFound));
return Err(Error::ConfigNotFound);
}
Ok((data, rd.object_info))
@@ -85,8 +82,8 @@ pub async fn delete_config<S: StorageAPI>(api: Arc<S>, file: &str) -> Result<()>
{
Ok(_) => Ok(()),
Err(err) => {
if is_err_object_not_found(&err) {
Err(Error::new(ConfigError::NotFound))
if err == Error::FileNotFound || matches!(err, Error::ObjectNotFound(_, _)) {
Err(Error::ConfigNotFound)
} else {
Err(err)
}
@@ -95,9 +92,8 @@ pub async fn delete_config<S: StorageAPI>(api: Arc<S>, file: &str) -> Result<()>
}
pub async fn save_config_with_opts<S: StorageAPI>(api: Arc<S>, file: &str, data: Vec<u8>, opts: &ObjectOptions) -> Result<()> {
let size = data.len();
let _ = api
.put_object(RUSTFS_META_BUCKET, file, &mut PutObjReader::new(Box::new(Cursor::new(data)), size), opts)
.put_object(RUSTFS_META_BUCKET, file, &mut PutObjReader::from_vec(data), opts)
.await?;
Ok(())
}
@@ -119,7 +115,7 @@ pub async fn read_config_without_migrate<S: StorageAPI>(api: Arc<S>) -> Result<C
let data = match read_config(api.clone(), config_file.as_str()).await {
Ok(res) => res,
Err(err) => {
return if is_err_config_not_found(&err) {
return if err == Error::ConfigNotFound {
warn!("config not found, start to init");
let cfg = new_and_save_server_config(api).await?;
warn!("config init done");
@@ -141,7 +137,7 @@ async fn read_server_config<S: StorageAPI>(api: Arc<S>, data: &[u8]) -> Result<C
let cfg_data = match read_config(api.clone(), config_file.as_str()).await {
Ok(res) => res,
Err(err) => {
return if is_err_config_not_found(&err) {
return if err == Error::ConfigNotFound {
warn!("config not found init start");
let cfg = new_and_save_server_config(api).await?;
warn!("config not found init done");

View File

@@ -1,45 +0,0 @@
use crate::{disk, store_err::is_err_object_not_found};
use common::error::Error;
#[derive(Debug, PartialEq, thiserror::Error)]
pub enum ConfigError {
#[error("config not found")]
NotFound,
}
impl ConfigError {
/// Returns `true` if the config error is [`NotFound`].
///
/// [`NotFound`]: ConfigError::NotFound
#[must_use]
pub fn is_not_found(&self) -> bool {
matches!(self, Self::NotFound)
}
}
impl ConfigError {
pub fn to_u32(&self) -> u32 {
match self {
ConfigError::NotFound => 0x01,
}
}
pub fn from_u32(error: u32) -> Option<Self> {
match error {
0x01 => Some(Self::NotFound),
_ => None,
}
}
}
pub fn is_err_config_not_found(err: &Error) -> bool {
if let Some(e) = err.downcast_ref::<ConfigError>() {
ConfigError::is_not_found(e)
} else if let Some(e) = err.downcast_ref::<disk::error::DiskError>() {
matches!(e, disk::error::DiskError::FileNotFound)
} else if is_err_object_not_found(err) {
return true;
} else {
false
}
}

View File

@@ -1,7 +1,6 @@
use std::time::Duration;
use crate::error::{Error, Result};
use crate::utils::bool_flag::parse_bool;
use common::error::{Error, Result};
use std::time::Duration;
#[derive(Debug, Default)]
pub struct Config {
@@ -42,13 +41,13 @@ fn parse_bitrot_config(s: &str) -> Result<Duration> {
}
Err(_) => {
if !s.ends_with("m") {
return Err(Error::from_string("unknown format"));
return Err(Error::other("unknown format"));
}
match s.trim_end_matches('m').parse::<u64>() {
Ok(months) => {
if months < RUSTFS_BITROT_CYCLE_IN_MONTHS {
return Err(Error::from_string(format!(
return Err(Error::other(format!(
"minimum bitrot cycle is {} month(s)",
RUSTFS_BITROT_CYCLE_IN_MONTHS
)));
@@ -56,7 +55,7 @@ fn parse_bitrot_config(s: &str) -> Result<Duration> {
Ok(Duration::from_secs(months * 30 * 24 * 60))
}
Err(err) => Err(err.into()),
Err(err) => Err(Error::other(err)),
}
}
}

View File

@@ -1,12 +1,11 @@
pub mod com;
pub mod error;
#[allow(dead_code)]
pub mod heal;
pub mod storageclass;
use crate::error::Result;
use crate::store::ECStore;
use com::{lookup_configs, read_config_without_migrate, STORAGE_CLASS_SUB_SYS};
use common::error::Result;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;

View File

@@ -1,11 +1,9 @@
use std::env;
use crate::config::KV;
use common::error::{Error, Result};
use super::KVS;
use crate::config::KV;
use crate::error::{Error, Result};
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use std::env;
use tracing::warn;
// default_parity_count 默认配置,根据磁盘总数分配校验磁盘数量
@@ -199,7 +197,7 @@ pub fn lookup_config(kvs: &KVS, set_drive_count: usize) -> Result<Config> {
}
block.as_u64() as usize
} else {
return Err(Error::msg(format!("parse {} format failed", INLINE_BLOCK_ENV)));
return Err(Error::other(format!("parse {} format failed", INLINE_BLOCK_ENV)));
}
} else {
DEFAULT_INLINE_BLOCK
@@ -220,7 +218,7 @@ pub fn parse_storage_class(env: &str) -> Result<StorageClass> {
// only two elements allowed in the string - "scheme" and "number of parity drives"
if s.len() != 2 {
return Err(Error::msg(format!(
return Err(Error::other(format!(
"Invalid storage class format: {}. Expected 'Scheme:Number of parity drives'.",
env
)));
@@ -228,13 +226,13 @@ pub fn parse_storage_class(env: &str) -> Result<StorageClass> {
// only allowed scheme is "EC"
if s[0] != SCHEME_PREFIX {
return Err(Error::msg(format!("Unsupported scheme {}. Supported scheme is EC.", s[0])));
return Err(Error::other(format!("Unsupported scheme {}. Supported scheme is EC.", s[0])));
}
// Number of parity drives should be integer
let parity_drives: usize = match s[1].parse() {
Ok(num) => num,
Err(_) => return Err(Error::msg(format!("Failed to parse parity value: {}.", s[1]))),
Err(_) => return Err(Error::other(format!("Failed to parse parity value: {}.", s[1]))),
};
Ok(StorageClass { parity: parity_drives })
@@ -243,14 +241,14 @@ pub fn parse_storage_class(env: &str) -> Result<StorageClass> {
// ValidateParity validates standard storage class parity.
pub fn validate_parity(ss_parity: usize, set_drive_count: usize) -> Result<()> {
// if ss_parity > 0 && ss_parity < MIN_PARITY_DRIVES {
// return Err(Error::msg(format!(
// return Err(Error::other(format!(
// "parity {} should be greater than or equal to {}",
// ss_parity, MIN_PARITY_DRIVES
// )));
// }
if ss_parity > set_drive_count / 2 {
return Err(Error::msg(format!(
return Err(Error::other(format!(
"parity {} should be less than or equal to {}",
ss_parity,
set_drive_count / 2
@@ -263,7 +261,7 @@ pub fn validate_parity(ss_parity: usize, set_drive_count: usize) -> Result<()> {
// Validates the parity drives.
pub fn validate_parity_inner(ss_parity: usize, rrs_parity: usize, set_drive_count: usize) -> Result<()> {
// if ss_parity > 0 && ss_parity < MIN_PARITY_DRIVES {
// return Err(Error::msg(format!(
// return Err(Error::other(format!(
// "Standard storage class parity {} should be greater than or equal to {}",
// ss_parity, MIN_PARITY_DRIVES
// )));
@@ -272,7 +270,7 @@ pub fn validate_parity_inner(ss_parity: usize, rrs_parity: usize, set_drive_coun
// RRS parity drives should be greater than or equal to minParityDrives.
// Parity below minParityDrives is not supported.
// if rrs_parity > 0 && rrs_parity < MIN_PARITY_DRIVES {
// return Err(Error::msg(format!(
// return Err(Error::other(format!(
// "Reduced redundancy storage class parity {} should be greater than or equal to {}",
// rrs_parity, MIN_PARITY_DRIVES
// )));
@@ -280,7 +278,7 @@ pub fn validate_parity_inner(ss_parity: usize, rrs_parity: usize, set_drive_coun
if set_drive_count > 2 {
if ss_parity > set_drive_count / 2 {
return Err(Error::msg(format!(
return Err(Error::other(format!(
"Standard storage class parity {} should be less than or equal to {}",
ss_parity,
set_drive_count / 2
@@ -288,7 +286,7 @@ pub fn validate_parity_inner(ss_parity: usize, rrs_parity: usize, set_drive_coun
}
if rrs_parity > set_drive_count / 2 {
return Err(Error::msg(format!(
return Err(Error::other(format!(
"Reduced redundancy storage class parity {} should be less than or equal to {}",
rrs_parity,
set_drive_count / 2
@@ -297,7 +295,7 @@ pub fn validate_parity_inner(ss_parity: usize, rrs_parity: usize, set_drive_coun
}
if ss_parity > 0 && rrs_parity > 0 && ss_parity < rrs_parity {
return Err(Error::msg(format!("Standard storage class parity drives {} should be greater than or equal to Reduced redundancy storage class parity drives {}", ss_parity, rrs_parity)));
return Err(Error::other(format!("Standard storage class parity drives {} should be greater than or equal to Reduced redundancy storage class parity drives {}", ss_parity, rrs_parity)));
}
Ok(())
}

View File

@@ -1,6 +1,7 @@
use super::error::{Error, Result};
use crate::utils::net;
use common::error::{Error, Result};
use path_absolutize::Absolutize;
use rustfs_utils::is_local_host;
use std::{fmt::Display, path::Path};
use url::{ParseError, Url};
@@ -40,10 +41,10 @@ impl TryFrom<&str> for Endpoint {
type Error = Error;
/// Performs the conversion.
fn try_from(value: &str) -> Result<Self, Self::Error> {
fn try_from(value: &str) -> std::result::Result<Self, Self::Error> {
// check whether given path is not empty.
if ["", "/", "\\"].iter().any(|&v| v.eq(value)) {
return Err(Error::from_string("empty or root endpoint is not supported"));
return Err(Error::other("empty or root endpoint is not supported"));
}
let mut is_local = false;
@@ -59,7 +60,7 @@ impl TryFrom<&str> for Endpoint {
&& url.fragment().is_none()
&& url.query().is_none())
{
return Err(Error::from_string("invalid URL endpoint format"));
return Err(Error::other("invalid URL endpoint format"));
}
let path = url.path().to_string();
@@ -76,12 +77,12 @@ impl TryFrom<&str> for Endpoint {
let path = Path::new(&path[1..]).absolutize()?;
if path.parent().is_none() || Path::new("").eq(&path) {
return Err(Error::from_string("empty or root path is not supported in URL endpoint"));
return Err(Error::other("empty or root path is not supported in URL endpoint"));
}
match path.to_str() {
Some(v) => url.set_path(v),
None => return Err(Error::from_string("invalid path")),
None => return Err(Error::other("invalid path")),
}
url
@@ -93,15 +94,15 @@ impl TryFrom<&str> for Endpoint {
}
Err(e) => match e {
ParseError::InvalidPort => {
return Err(Error::from_string("invalid URL endpoint format: port number must be between 1 to 65535"))
return Err(Error::other("invalid URL endpoint format: port number must be between 1 to 65535"))
}
ParseError::EmptyHost => return Err(Error::from_string("invalid URL endpoint format: empty host name")),
ParseError::EmptyHost => return Err(Error::other("invalid URL endpoint format: empty host name")),
ParseError::RelativeUrlWithoutBase => {
// like /foo
is_local = true;
url_parse_from_file_path(value)?
}
_ => return Err(Error::from_string(format!("invalid URL endpoint format: {}", e))),
_ => return Err(Error::other(format!("invalid URL endpoint format: {}", e))),
},
};
@@ -144,7 +145,7 @@ impl Endpoint {
pub fn update_is_local(&mut self, local_port: u16) -> Result<()> {
match (self.url.scheme(), self.url.host()) {
(v, Some(host)) if v != "file" => {
self.is_local = net::is_local_host(host, self.url.port().unwrap_or_default(), local_port)?;
self.is_local = is_local_host(host, self.url.port().unwrap_or_default(), local_port)?;
}
_ => {}
}
@@ -186,17 +187,17 @@ fn url_parse_from_file_path(value: &str) -> Result<Url> {
// /mnt/export1. So we go ahead and start the rustfs server in FS modes in these cases.
let addr: Vec<&str> = value.splitn(2, '/').collect();
if net::is_socket_addr(addr[0]) {
return Err(Error::from_string("invalid URL endpoint format: missing scheme http or https"));
return Err(Error::other("invalid URL endpoint format: missing scheme http or https"));
}
let file_path = match Path::new(value).absolutize() {
Ok(path) => path,
Err(err) => return Err(Error::from_string(format!("absolute path failed: {}", err))),
Err(err) => return Err(Error::other(format!("absolute path failed: {}", err))),
};
match Url::from_file_path(file_path) {
Ok(url) => Ok(url),
Err(_) => Err(Error::from_string("Convert a file path into an URL failed")),
Err(_) => Err(Error::other("Convert a file path into an URL failed")),
}
}
@@ -260,49 +261,49 @@ mod test {
arg: "",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::from_string("empty or root endpoint is not supported")),
expected_err: Some(Error::other("empty or root endpoint is not supported")),
},
TestCase {
arg: "/",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::from_string("empty or root endpoint is not supported")),
expected_err: Some(Error::other("empty or root endpoint is not supported")),
},
TestCase {
arg: "\\",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::from_string("empty or root endpoint is not supported")),
expected_err: Some(Error::other("empty or root endpoint is not supported")),
},
TestCase {
arg: "c://foo",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::from_string("invalid URL endpoint format")),
expected_err: Some(Error::other("invalid URL endpoint format")),
},
TestCase {
arg: "ftp://foo",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::from_string("invalid URL endpoint format")),
expected_err: Some(Error::other("invalid URL endpoint format")),
},
TestCase {
arg: "http://server/path?location",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::from_string("invalid URL endpoint format")),
expected_err: Some(Error::other("invalid URL endpoint format")),
},
TestCase {
arg: "http://:/path",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::from_string("invalid URL endpoint format: empty host name")),
expected_err: Some(Error::other("invalid URL endpoint format: empty host name")),
},
TestCase {
arg: "http://:8080/path",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::from_string("invalid URL endpoint format: empty host name")),
expected_err: Some(Error::other("invalid URL endpoint format: empty host name")),
},
TestCase {
arg: "http://server:/path",
@@ -320,25 +321,25 @@ mod test {
arg: "https://93.184.216.34:808080/path",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::from_string("invalid URL endpoint format: port number must be between 1 to 65535")),
expected_err: Some(Error::other("invalid URL endpoint format: port number must be between 1 to 65535")),
},
TestCase {
arg: "http://server:8080//",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::from_string("empty or root path is not supported in URL endpoint")),
expected_err: Some(Error::other("empty or root path is not supported in URL endpoint")),
},
TestCase {
arg: "http://server:8080/",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::from_string("empty or root path is not supported in URL endpoint")),
expected_err: Some(Error::other("empty or root path is not supported in URL endpoint")),
},
TestCase {
arg: "192.168.1.210:9000",
expected_endpoint: None,
expected_type: None,
expected_err: Some(Error::from_string("invalid URL endpoint format: missing scheme http or https")),
expected_err: Some(Error::other("invalid URL endpoint format: missing scheme http or https")),
},
];

View File

@@ -1,11 +1,11 @@
use std::io::{self, ErrorKind};
// use crate::quorum::CheckErrorFn;
use std::hash::{Hash, Hasher};
use std::io::{self};
use std::path::PathBuf;
use tracing::error;
use crate::quorum::CheckErrorFn;
use crate::utils::ERROR_TYPE_MASK;
use common::error::{Error, Result};
pub type Error = DiskError;
pub type Result<T> = core::result::Result<T, Error>;
// DiskError == StorageErr
#[derive(Debug, thiserror::Error)]
@@ -91,6 +91,9 @@ pub enum DiskError {
#[error("file is corrupted")]
FileCorrupt,
#[error("short write")]
ShortWrite,
#[error("bit-rot hash algorithm is invalid")]
BitrotHashAlgoInvalid,
@@ -111,58 +114,238 @@ pub enum DiskError {
#[error("No healing is required")]
NoHealRequired,
#[error("method not allowed")]
MethodNotAllowed,
#[error("erasure write quorum")]
ErasureWriteQuorum,
#[error("erasure read quorum")]
ErasureReadQuorum,
#[error("io error")]
Io(io::Error),
}
impl DiskError {
/// Checks if the given array of errors contains fatal disk errors.
/// If all errors are of the same fatal disk error type, returns the corresponding error.
/// Otherwise, returns Ok.
///
/// # Parameters
/// - `errs`: A slice of optional errors.
///
/// # Returns
/// If all errors are of the same fatal disk error type, returns the corresponding error.
/// Otherwise, returns Ok.
pub fn check_disk_fatal_errs(errs: &[Option<Error>]) -> Result<()> {
if DiskError::UnsupportedDisk.count_errs(errs) == errs.len() {
return Err(DiskError::UnsupportedDisk.into());
pub fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
DiskError::Io(std::io::Error::other(error))
}
pub fn is_all_not_found(errs: &[Option<DiskError>]) -> bool {
for err in errs.iter() {
if let Some(err) = err {
if err == &DiskError::FileNotFound || err == &DiskError::FileVersionNotFound {
continue;
}
return false;
}
return false;
}
if DiskError::FileAccessDenied.count_errs(errs) == errs.len() {
return Err(DiskError::FileAccessDenied.into());
!errs.is_empty()
}
pub fn is_err_object_not_found(err: &DiskError) -> bool {
matches!(err, &DiskError::FileNotFound) || matches!(err, &DiskError::VolumeNotFound)
}
pub fn is_err_version_not_found(err: &DiskError) -> bool {
matches!(err, &DiskError::FileVersionNotFound)
}
// /// If all errors are of the same fatal disk error type, returns the corresponding error.
// /// Otherwise, returns Ok.
// pub fn check_disk_fatal_errs(errs: &[Option<Error>]) -> Result<()> {
// if DiskError::UnsupportedDisk.count_errs(errs) == errs.len() {
// return Err(DiskError::UnsupportedDisk.into());
// }
// if DiskError::FileAccessDenied.count_errs(errs) == errs.len() {
// return Err(DiskError::FileAccessDenied.into());
// }
// if DiskError::DiskNotDir.count_errs(errs) == errs.len() {
// return Err(DiskError::DiskNotDir.into());
// }
// Ok(())
// }
// pub fn count_errs(&self, errs: &[Option<Error>]) -> usize {
// errs.iter()
// .filter(|&err| match err {
// None => false,
// Some(e) => self.is(e),
// })
// .count()
// }
// pub fn quorum_unformatted_disks(errs: &[Option<Error>]) -> bool {
// DiskError::UnformattedDisk.count_errs(errs) > (errs.len() / 2)
// }
// pub fn should_init_erasure_disks(errs: &[Option<Error>]) -> bool {
// DiskError::UnformattedDisk.count_errs(errs) == errs.len()
// }
// // Check if the error is a disk error
// pub fn is(&self, err: &DiskError) -> bool {
// if let Some(e) = err.downcast_ref::<DiskError>() {
// e == self
// } else {
// false
// }
// }
}
impl From<rustfs_filemeta::Error> for DiskError {
fn from(e: rustfs_filemeta::Error) -> Self {
match e {
rustfs_filemeta::Error::Io(e) => DiskError::other(e),
rustfs_filemeta::Error::FileNotFound => DiskError::FileNotFound,
rustfs_filemeta::Error::FileVersionNotFound => DiskError::FileVersionNotFound,
rustfs_filemeta::Error::FileCorrupt => DiskError::FileCorrupt,
rustfs_filemeta::Error::MethodNotAllowed => DiskError::MethodNotAllowed,
e => DiskError::other(e),
}
}
}
if DiskError::DiskNotDir.count_errs(errs) == errs.len() {
return Err(DiskError::DiskNotDir.into());
impl From<std::io::Error> for DiskError {
fn from(e: std::io::Error) -> Self {
e.downcast::<DiskError>().unwrap_or_else(DiskError::Io)
}
}
impl From<DiskError> for std::io::Error {
fn from(e: DiskError) -> Self {
match e {
DiskError::Io(io_error) => io_error,
e => std::io::Error::other(e),
}
Ok(())
}
}
pub fn count_errs(&self, errs: &[Option<Error>]) -> usize {
errs.iter()
.filter(|&err| match err {
None => false,
Some(e) => self.is(e),
})
.count()
impl From<tonic::Status> for DiskError {
fn from(e: tonic::Status) -> Self {
DiskError::other(e.message().to_string())
}
}
pub fn quorum_unformatted_disks(errs: &[Option<Error>]) -> bool {
DiskError::UnformattedDisk.count_errs(errs) > (errs.len() / 2)
}
pub fn should_init_erasure_disks(errs: &[Option<Error>]) -> bool {
DiskError::UnformattedDisk.count_errs(errs) == errs.len()
}
/// Check if the error is a disk error
pub fn is(&self, err: &Error) -> bool {
if let Some(e) = err.downcast_ref::<DiskError>() {
e == self
impl From<protos::proto_gen::node_service::Error> for DiskError {
fn from(e: protos::proto_gen::node_service::Error) -> Self {
if let Some(err) = DiskError::from_u32(e.code) {
if matches!(err, DiskError::Io(_)) {
DiskError::other(e.error_info)
} else {
err
}
} else {
false
DiskError::other(e.error_info)
}
}
}
impl Into<protos::proto_gen::node_service::Error> for DiskError {
fn into(self) -> protos::proto_gen::node_service::Error {
protos::proto_gen::node_service::Error {
code: self.to_u32(),
error_info: self.to_string(),
}
}
}
impl From<serde_json::Error> for DiskError {
fn from(e: serde_json::Error) -> Self {
DiskError::other(e)
}
}
impl From<rmp_serde::encode::Error> for DiskError {
fn from(e: rmp_serde::encode::Error) -> Self {
DiskError::other(e)
}
}
impl From<rmp::encode::ValueWriteError> for DiskError {
fn from(e: rmp::encode::ValueWriteError) -> Self {
DiskError::other(e)
}
}
impl From<rmp::decode::ValueReadError> for DiskError {
fn from(e: rmp::decode::ValueReadError) -> Self {
DiskError::other(e)
}
}
impl From<std::string::FromUtf8Error> for DiskError {
fn from(e: std::string::FromUtf8Error) -> Self {
DiskError::other(e)
}
}
impl From<rmp::decode::NumValueReadError> for DiskError {
fn from(e: rmp::decode::NumValueReadError) -> Self {
DiskError::other(e)
}
}
impl From<tokio::task::JoinError> for DiskError {
fn from(e: tokio::task::JoinError) -> Self {
DiskError::other(e)
}
}
impl Clone for DiskError {
fn clone(&self) -> Self {
match self {
DiskError::Io(io_error) => DiskError::Io(std::io::Error::new(io_error.kind(), io_error.to_string())),
DiskError::MaxVersionsExceeded => DiskError::MaxVersionsExceeded,
DiskError::Unexpected => DiskError::Unexpected,
DiskError::CorruptedFormat => DiskError::CorruptedFormat,
DiskError::CorruptedBackend => DiskError::CorruptedBackend,
DiskError::UnformattedDisk => DiskError::UnformattedDisk,
DiskError::InconsistentDisk => DiskError::InconsistentDisk,
DiskError::UnsupportedDisk => DiskError::UnsupportedDisk,
DiskError::DiskFull => DiskError::DiskFull,
DiskError::DiskNotDir => DiskError::DiskNotDir,
DiskError::DiskNotFound => DiskError::DiskNotFound,
DiskError::DiskOngoingReq => DiskError::DiskOngoingReq,
DiskError::DriveIsRoot => DiskError::DriveIsRoot,
DiskError::FaultyRemoteDisk => DiskError::FaultyRemoteDisk,
DiskError::FaultyDisk => DiskError::FaultyDisk,
DiskError::DiskAccessDenied => DiskError::DiskAccessDenied,
DiskError::FileNotFound => DiskError::FileNotFound,
DiskError::FileVersionNotFound => DiskError::FileVersionNotFound,
DiskError::TooManyOpenFiles => DiskError::TooManyOpenFiles,
DiskError::FileNameTooLong => DiskError::FileNameTooLong,
DiskError::VolumeExists => DiskError::VolumeExists,
DiskError::IsNotRegular => DiskError::IsNotRegular,
DiskError::PathNotFound => DiskError::PathNotFound,
DiskError::VolumeNotFound => DiskError::VolumeNotFound,
DiskError::VolumeNotEmpty => DiskError::VolumeNotEmpty,
DiskError::VolumeAccessDenied => DiskError::VolumeAccessDenied,
DiskError::FileAccessDenied => DiskError::FileAccessDenied,
DiskError::FileCorrupt => DiskError::FileCorrupt,
DiskError::BitrotHashAlgoInvalid => DiskError::BitrotHashAlgoInvalid,
DiskError::CrossDeviceLink => DiskError::CrossDeviceLink,
DiskError::LessData => DiskError::LessData,
DiskError::MoreData => DiskError::MoreData,
DiskError::OutdatedXLMeta => DiskError::OutdatedXLMeta,
DiskError::PartMissingOrCorrupt => DiskError::PartMissingOrCorrupt,
DiskError::NoHealRequired => DiskError::NoHealRequired,
DiskError::MethodNotAllowed => DiskError::MethodNotAllowed,
DiskError::ErasureWriteQuorum => DiskError::ErasureWriteQuorum,
DiskError::ErasureReadQuorum => DiskError::ErasureReadQuorum,
DiskError::ShortWrite => DiskError::ShortWrite,
}
}
}
@@ -204,11 +387,16 @@ impl DiskError {
DiskError::OutdatedXLMeta => 0x20,
DiskError::PartMissingOrCorrupt => 0x21,
DiskError::NoHealRequired => 0x22,
DiskError::MethodNotAllowed => 0x23,
DiskError::Io(_) => 0x24,
DiskError::ErasureWriteQuorum => 0x25,
DiskError::ErasureReadQuorum => 0x26,
DiskError::ShortWrite => 0x27,
}
}
pub fn from_u32(error: u32) -> Option<Self> {
match error & ERROR_TYPE_MASK {
match error {
0x01 => Some(DiskError::MaxVersionsExceeded),
0x02 => Some(DiskError::Unexpected),
0x03 => Some(DiskError::CorruptedFormat),
@@ -243,6 +431,11 @@ impl DiskError {
0x20 => Some(DiskError::OutdatedXLMeta),
0x21 => Some(DiskError::PartMissingOrCorrupt),
0x22 => Some(DiskError::NoHealRequired),
0x23 => Some(DiskError::MethodNotAllowed),
0x24 => Some(DiskError::Io(std::io::Error::other(String::new()))),
0x25 => Some(DiskError::ErasureWriteQuorum),
0x26 => Some(DiskError::ErasureReadQuorum),
0x27 => Some(DiskError::ShortWrite),
_ => None,
}
}
@@ -250,101 +443,116 @@ impl DiskError {
impl PartialEq for DiskError {
fn eq(&self, other: &Self) -> bool {
core::mem::discriminant(self) == core::mem::discriminant(other)
match (self, other) {
(DiskError::Io(e1), DiskError::Io(e2)) => e1.kind() == e2.kind() && e1.to_string() == e2.to_string(),
_ => self.to_u32() == other.to_u32(),
}
}
}
impl CheckErrorFn for DiskError {
fn is(&self, e: &Error) -> bool {
self.is(e)
impl Eq for DiskError {}
impl Hash for DiskError {
fn hash<H: Hasher>(&self, state: &mut H) {
match self {
DiskError::Io(e) => e.to_string().hash(state),
_ => self.to_u32().hash(state),
}
}
}
pub fn clone_disk_err(e: &DiskError) -> Error {
match e {
DiskError::MaxVersionsExceeded => Error::new(DiskError::MaxVersionsExceeded),
DiskError::Unexpected => Error::new(DiskError::Unexpected),
DiskError::CorruptedFormat => Error::new(DiskError::CorruptedFormat),
DiskError::CorruptedBackend => Error::new(DiskError::CorruptedBackend),
DiskError::UnformattedDisk => Error::new(DiskError::UnformattedDisk),
DiskError::InconsistentDisk => Error::new(DiskError::InconsistentDisk),
DiskError::UnsupportedDisk => Error::new(DiskError::UnsupportedDisk),
DiskError::DiskFull => Error::new(DiskError::DiskFull),
DiskError::DiskNotDir => Error::new(DiskError::DiskNotDir),
DiskError::DiskNotFound => Error::new(DiskError::DiskNotFound),
DiskError::DiskOngoingReq => Error::new(DiskError::DiskOngoingReq),
DiskError::DriveIsRoot => Error::new(DiskError::DriveIsRoot),
DiskError::FaultyRemoteDisk => Error::new(DiskError::FaultyRemoteDisk),
DiskError::FaultyDisk => Error::new(DiskError::FaultyDisk),
DiskError::DiskAccessDenied => Error::new(DiskError::DiskAccessDenied),
DiskError::FileNotFound => Error::new(DiskError::FileNotFound),
DiskError::FileVersionNotFound => Error::new(DiskError::FileVersionNotFound),
DiskError::TooManyOpenFiles => Error::new(DiskError::TooManyOpenFiles),
DiskError::FileNameTooLong => Error::new(DiskError::FileNameTooLong),
DiskError::VolumeExists => Error::new(DiskError::VolumeExists),
DiskError::IsNotRegular => Error::new(DiskError::IsNotRegular),
DiskError::PathNotFound => Error::new(DiskError::PathNotFound),
DiskError::VolumeNotFound => Error::new(DiskError::VolumeNotFound),
DiskError::VolumeNotEmpty => Error::new(DiskError::VolumeNotEmpty),
DiskError::VolumeAccessDenied => Error::new(DiskError::VolumeAccessDenied),
DiskError::FileAccessDenied => Error::new(DiskError::FileAccessDenied),
DiskError::FileCorrupt => Error::new(DiskError::FileCorrupt),
DiskError::BitrotHashAlgoInvalid => Error::new(DiskError::BitrotHashAlgoInvalid),
DiskError::CrossDeviceLink => Error::new(DiskError::CrossDeviceLink),
DiskError::LessData => Error::new(DiskError::LessData),
DiskError::MoreData => Error::new(DiskError::MoreData),
DiskError::OutdatedXLMeta => Error::new(DiskError::OutdatedXLMeta),
DiskError::PartMissingOrCorrupt => Error::new(DiskError::PartMissingOrCorrupt),
DiskError::NoHealRequired => Error::new(DiskError::NoHealRequired),
}
}
// impl CheckErrorFn for DiskError {
// fn is(&self, e: &DiskError) -> bool {
pub fn os_err_to_file_err(e: io::Error) -> Error {
match e.kind() {
ErrorKind::NotFound => Error::new(DiskError::FileNotFound),
ErrorKind::PermissionDenied => Error::new(DiskError::FileAccessDenied),
// io::ErrorKind::ConnectionRefused => todo!(),
// io::ErrorKind::ConnectionReset => todo!(),
// io::ErrorKind::HostUnreachable => todo!(),
// io::ErrorKind::NetworkUnreachable => todo!(),
// io::ErrorKind::ConnectionAborted => todo!(),
// io::ErrorKind::NotConnected => todo!(),
// io::ErrorKind::AddrInUse => todo!(),
// io::ErrorKind::AddrNotAvailable => todo!(),
// io::ErrorKind::NetworkDown => todo!(),
// io::ErrorKind::BrokenPipe => todo!(),
// io::ErrorKind::AlreadyExists => todo!(),
// io::ErrorKind::WouldBlock => todo!(),
// io::ErrorKind::NotADirectory => DiskError::FileNotFound,
// io::ErrorKind::IsADirectory => DiskError::FileNotFound,
// io::ErrorKind::DirectoryNotEmpty => DiskError::VolumeNotEmpty,
// io::ErrorKind::ReadOnlyFilesystem => todo!(),
// io::ErrorKind::FilesystemLoop => todo!(),
// io::ErrorKind::StaleNetworkFileHandle => todo!(),
// io::ErrorKind::InvalidInput => todo!(),
// io::ErrorKind::InvalidData => todo!(),
// io::ErrorKind::TimedOut => todo!(),
// io::ErrorKind::WriteZero => todo!(),
// io::ErrorKind::StorageFull => DiskError::DiskFull,
// io::ErrorKind::NotSeekable => todo!(),
// io::ErrorKind::FilesystemQuotaExceeded => todo!(),
// io::ErrorKind::FileTooLarge => todo!(),
// io::ErrorKind::ResourceBusy => todo!(),
// io::ErrorKind::ExecutableFileBusy => todo!(),
// io::ErrorKind::Deadlock => todo!(),
// io::ErrorKind::CrossesDevices => todo!(),
// io::ErrorKind::TooManyLinks =>DiskError::TooManyOpenFiles,
// io::ErrorKind::InvalidFilename => todo!(),
// io::ErrorKind::ArgumentListTooLong => todo!(),
// io::ErrorKind::Interrupted => todo!(),
// io::ErrorKind::Unsupported => todo!(),
// io::ErrorKind::UnexpectedEof => todo!(),
// io::ErrorKind::OutOfMemory => todo!(),
// io::ErrorKind::Other => todo!(),
// TODO: 把不支持的 king 用字符串处理
_ => Error::new(e),
}
}
// }
// }
// pub fn clone_disk_err(e: &DiskError) -> Error {
// match e {
// DiskError::MaxVersionsExceeded => DiskError::MaxVersionsExceeded,
// DiskError::Unexpected => DiskError::Unexpected,
// DiskError::CorruptedFormat => DiskError::CorruptedFormat,
// DiskError::CorruptedBackend => DiskError::CorruptedBackend,
// DiskError::UnformattedDisk => DiskError::UnformattedDisk,
// DiskError::InconsistentDisk => DiskError::InconsistentDisk,
// DiskError::UnsupportedDisk => DiskError::UnsupportedDisk,
// DiskError::DiskFull => DiskError::DiskFull,
// DiskError::DiskNotDir => DiskError::DiskNotDir,
// DiskError::DiskNotFound => DiskError::DiskNotFound,
// DiskError::DiskOngoingReq => DiskError::DiskOngoingReq,
// DiskError::DriveIsRoot => DiskError::DriveIsRoot,
// DiskError::FaultyRemoteDisk => DiskError::FaultyRemoteDisk,
// DiskError::FaultyDisk => DiskError::FaultyDisk,
// DiskError::DiskAccessDenied => DiskError::DiskAccessDenied,
// DiskError::FileNotFound => DiskError::FileNotFound,
// DiskError::FileVersionNotFound => DiskError::FileVersionNotFound,
// DiskError::TooManyOpenFiles => DiskError::TooManyOpenFiles,
// DiskError::FileNameTooLong => DiskError::FileNameTooLong,
// DiskError::VolumeExists => DiskError::VolumeExists,
// DiskError::IsNotRegular => DiskError::IsNotRegular,
// DiskError::PathNotFound => DiskError::PathNotFound,
// DiskError::VolumeNotFound => DiskError::VolumeNotFound,
// DiskError::VolumeNotEmpty => DiskError::VolumeNotEmpty,
// DiskError::VolumeAccessDenied => DiskError::VolumeAccessDenied,
// DiskError::FileAccessDenied => DiskError::FileAccessDenied,
// DiskError::FileCorrupt => DiskError::FileCorrupt,
// DiskError::BitrotHashAlgoInvalid => DiskError::BitrotHashAlgoInvalid,
// DiskError::CrossDeviceLink => DiskError::CrossDeviceLink,
// DiskError::LessData => DiskError::LessData,
// DiskError::MoreData => DiskError::MoreData,
// DiskError::OutdatedXLMeta => DiskError::OutdatedXLMeta,
// DiskError::PartMissingOrCorrupt => DiskError::PartMissingOrCorrupt,
// DiskError::NoHealRequired => DiskError::NoHealRequired,
// DiskError::Other(s) => DiskError::Other(s.clone()),
// }
// }
// pub fn os_err_to_file_err(e: io::Error) -> Error {
// match e.kind() {
// ErrorKind::NotFound => Error::new(DiskError::FileNotFound),
// ErrorKind::PermissionDenied => Error::new(DiskError::FileAccessDenied),
// // io::ErrorKind::ConnectionRefused => todo!(),
// // io::ErrorKind::ConnectionReset => todo!(),
// // io::ErrorKind::HostUnreachable => todo!(),
// // io::ErrorKind::NetworkUnreachable => todo!(),
// // io::ErrorKind::ConnectionAborted => todo!(),
// // io::ErrorKind::NotConnected => todo!(),
// // io::ErrorKind::AddrInUse => todo!(),
// // io::ErrorKind::AddrNotAvailable => todo!(),
// // io::ErrorKind::NetworkDown => todo!(),
// // io::ErrorKind::BrokenPipe => todo!(),
// // io::ErrorKind::AlreadyExists => todo!(),
// // io::ErrorKind::WouldBlock => todo!(),
// // io::ErrorKind::NotADirectory => DiskError::FileNotFound,
// // io::ErrorKind::IsADirectory => DiskError::FileNotFound,
// // io::ErrorKind::DirectoryNotEmpty => DiskError::VolumeNotEmpty,
// // io::ErrorKind::ReadOnlyFilesystem => todo!(),
// // io::ErrorKind::FilesystemLoop => todo!(),
// // io::ErrorKind::StaleNetworkFileHandle => todo!(),
// // io::ErrorKind::InvalidInput => todo!(),
// // io::ErrorKind::InvalidData => todo!(),
// // io::ErrorKind::TimedOut => todo!(),
// // io::ErrorKind::WriteZero => todo!(),
// // io::ErrorKind::StorageFull => DiskError::DiskFull,
// // io::ErrorKind::NotSeekable => todo!(),
// // io::ErrorKind::FilesystemQuotaExceeded => todo!(),
// // io::ErrorKind::FileTooLarge => todo!(),
// // io::ErrorKind::ResourceBusy => todo!(),
// // io::ErrorKind::ExecutableFileBusy => todo!(),
// // io::ErrorKind::Deadlock => todo!(),
// // io::ErrorKind::CrossesDevices => todo!(),
// // io::ErrorKind::TooManyLinks =>DiskError::TooManyOpenFiles,
// // io::ErrorKind::InvalidFilename => todo!(),
// // io::ErrorKind::ArgumentListTooLong => todo!(),
// // io::ErrorKind::Interrupted => todo!(),
// // io::ErrorKind::Unsupported => todo!(),
// // io::ErrorKind::UnexpectedEof => todo!(),
// // io::ErrorKind::OutOfMemory => todo!(),
// // io::ErrorKind::Other => todo!(),
// // TODO: 把不支持的 king 用字符串处理
// _ => Error::new(e),
// }
// }
#[derive(Debug, thiserror::Error)]
pub struct FileAccessDeniedWithContext {
@@ -359,235 +567,235 @@ impl std::fmt::Display for FileAccessDeniedWithContext {
}
}
pub fn is_unformatted_disk(err: &Error) -> bool {
matches!(err.downcast_ref::<DiskError>(), Some(DiskError::UnformattedDisk))
}
// pub fn is_unformatted_disk(err: &Error) -> bool {
// matches!(err.downcast_ref::<DiskError>(), Some(DiskError::UnformattedDisk))
// }
pub fn is_err_file_not_found(err: &Error) -> bool {
if let Some(ioerr) = err.downcast_ref::<io::Error>() {
return ioerr.kind() == ErrorKind::NotFound;
}
// pub fn is_err_file_not_found(err: &Error) -> bool {
// if let Some(ioerr) = err.downcast_ref::<io::Error>() {
// return ioerr.kind() == ErrorKind::NotFound;
// }
matches!(err.downcast_ref::<DiskError>(), Some(DiskError::FileNotFound))
}
// matches!(err.downcast_ref::<DiskError>(), Some(DiskError::FileNotFound))
// }
pub fn is_err_file_version_not_found(err: &Error) -> bool {
matches!(err.downcast_ref::<DiskError>(), Some(DiskError::FileVersionNotFound))
}
// pub fn is_err_file_version_not_found(err: &Error) -> bool {
// matches!(err.downcast_ref::<DiskError>(), Some(DiskError::FileVersionNotFound))
// }
pub fn is_err_volume_not_found(err: &Error) -> bool {
matches!(err.downcast_ref::<DiskError>(), Some(DiskError::VolumeNotFound))
}
// pub fn is_err_volume_not_found(err: &Error) -> bool {
// matches!(err.downcast_ref::<DiskError>(), Some(DiskError::VolumeNotFound))
// }
pub fn is_err_eof(err: &Error) -> bool {
if let Some(ioerr) = err.downcast_ref::<io::Error>() {
return ioerr.kind() == ErrorKind::UnexpectedEof;
}
false
}
// pub fn is_err_eof(err: &Error) -> bool {
// if let Some(ioerr) = err.downcast_ref::<io::Error>() {
// return ioerr.kind() == ErrorKind::UnexpectedEof;
// }
// false
// }
pub fn is_sys_err_no_space(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 28;
}
false
}
// pub fn is_sys_err_no_space(e: &io::Error) -> bool {
// if let Some(no) = e.raw_os_error() {
// return no == 28;
// }
// false
// }
pub fn is_sys_err_invalid_arg(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 22;
}
false
}
// pub fn is_sys_err_invalid_arg(e: &io::Error) -> bool {
// if let Some(no) = e.raw_os_error() {
// return no == 22;
// }
// false
// }
pub fn is_sys_err_io(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 5;
}
false
}
// pub fn is_sys_err_io(e: &io::Error) -> bool {
// if let Some(no) = e.raw_os_error() {
// return no == 5;
// }
// false
// }
pub fn is_sys_err_is_dir(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 21;
}
false
}
// pub fn is_sys_err_is_dir(e: &io::Error) -> bool {
// if let Some(no) = e.raw_os_error() {
// return no == 21;
// }
// false
// }
pub fn is_sys_err_not_dir(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 20;
}
false
}
// pub fn is_sys_err_not_dir(e: &io::Error) -> bool {
// if let Some(no) = e.raw_os_error() {
// return no == 20;
// }
// false
// }
pub fn is_sys_err_too_long(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 63;
}
false
}
// pub fn is_sys_err_too_long(e: &io::Error) -> bool {
// if let Some(no) = e.raw_os_error() {
// return no == 63;
// }
// false
// }
pub fn is_sys_err_too_many_symlinks(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 62;
}
false
}
// pub fn is_sys_err_too_many_symlinks(e: &io::Error) -> bool {
// if let Some(no) = e.raw_os_error() {
// return no == 62;
// }
// false
// }
pub fn is_sys_err_not_empty(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
if no == 66 {
return true;
}
// pub fn is_sys_err_not_empty(e: &io::Error) -> bool {
// if let Some(no) = e.raw_os_error() {
// if no == 66 {
// return true;
// }
if cfg!(target_os = "solaris") && no == 17 {
return true;
}
// if cfg!(target_os = "solaris") && no == 17 {
// return true;
// }
if cfg!(target_os = "windows") && no == 145 {
return true;
}
}
false
}
// if cfg!(target_os = "windows") && no == 145 {
// return true;
// }
// }
// false
// }
pub fn is_sys_err_path_not_found(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
if cfg!(target_os = "windows") {
if no == 3 {
return true;
}
} else if no == 2 {
return true;
}
}
false
}
// pub fn is_sys_err_path_not_found(e: &io::Error) -> bool {
// if let Some(no) = e.raw_os_error() {
// if cfg!(target_os = "windows") {
// if no == 3 {
// return true;
// }
// } else if no == 2 {
// return true;
// }
// }
// false
// }
pub fn is_sys_err_handle_invalid(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
if cfg!(target_os = "windows") {
if no == 6 {
return true;
}
} else {
return false;
}
}
false
}
// pub fn is_sys_err_handle_invalid(e: &io::Error) -> bool {
// if let Some(no) = e.raw_os_error() {
// if cfg!(target_os = "windows") {
// if no == 6 {
// return true;
// }
// } else {
// return false;
// }
// }
// false
// }
pub fn is_sys_err_cross_device(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 18;
}
false
}
// pub fn is_sys_err_cross_device(e: &io::Error) -> bool {
// if let Some(no) = e.raw_os_error() {
// return no == 18;
// }
// false
// }
pub fn is_sys_err_too_many_files(e: &io::Error) -> bool {
if let Some(no) = e.raw_os_error() {
return no == 23 || no == 24;
}
false
}
// pub fn is_sys_err_too_many_files(e: &io::Error) -> bool {
// if let Some(no) = e.raw_os_error() {
// return no == 23 || no == 24;
// }
// false
// }
pub fn os_is_not_exist(e: &io::Error) -> bool {
e.kind() == ErrorKind::NotFound
}
// pub fn os_is_not_exist(e: &io::Error) -> bool {
// e.kind() == ErrorKind::NotFound
// }
pub fn os_is_permission(e: &io::Error) -> bool {
if e.kind() == ErrorKind::PermissionDenied {
return true;
}
if let Some(no) = e.raw_os_error() {
if no == 30 {
return true;
}
}
// pub fn os_is_permission(e: &io::Error) -> bool {
// if e.kind() == ErrorKind::PermissionDenied {
// return true;
// }
// if let Some(no) = e.raw_os_error() {
// if no == 30 {
// return true;
// }
// }
false
}
// false
// }
pub fn os_is_exist(e: &io::Error) -> bool {
e.kind() == ErrorKind::AlreadyExists
}
// pub fn os_is_exist(e: &io::Error) -> bool {
// e.kind() == ErrorKind::AlreadyExists
// }
// map_err_not_exists
pub fn map_err_not_exists(e: io::Error) -> Error {
if os_is_not_exist(&e) {
return Error::new(DiskError::VolumeNotEmpty);
} else if is_sys_err_io(&e) {
return Error::new(DiskError::FaultyDisk);
}
// // map_err_not_exists
// pub fn map_err_not_exists(e: io::Error) -> Error {
// if os_is_not_exist(&e) {
// return Error::new(DiskError::VolumeNotEmpty);
// } else if is_sys_err_io(&e) {
// return Error::new(DiskError::FaultyDisk);
// }
Error::new(e)
}
// Error::new(e)
// }
pub fn convert_access_error(e: io::Error, per_err: DiskError) -> Error {
if os_is_not_exist(&e) {
return Error::new(DiskError::VolumeNotEmpty);
} else if is_sys_err_io(&e) {
return Error::new(DiskError::FaultyDisk);
} else if os_is_permission(&e) {
return Error::new(per_err);
}
// pub fn convert_access_error(e: io::Error, per_err: DiskError) -> Error {
// if os_is_not_exist(&e) {
// return Error::new(DiskError::VolumeNotEmpty);
// } else if is_sys_err_io(&e) {
// return Error::new(DiskError::FaultyDisk);
// } else if os_is_permission(&e) {
// return Error::new(per_err);
// }
Error::new(e)
}
// Error::new(e)
// }
pub fn is_all_not_found(errs: &[Option<Error>]) -> bool {
for err in errs.iter() {
if let Some(err) = err {
if let Some(err) = err.downcast_ref::<DiskError>() {
match err {
DiskError::FileNotFound | DiskError::VolumeNotFound | &DiskError::FileVersionNotFound => {
continue;
}
_ => return false,
}
}
}
return false;
}
// pub fn is_all_not_found(errs: &[Option<Error>]) -> bool {
// for err in errs.iter() {
// if let Some(err) = err {
// if let Some(err) = err.downcast_ref::<DiskError>() {
// match err {
// DiskError::FileNotFound | DiskError::VolumeNotFound | &DiskError::FileVersionNotFound => {
// continue;
// }
// _ => return false,
// }
// }
// }
// return false;
// }
!errs.is_empty()
}
// !errs.is_empty()
// }
pub fn is_all_volume_not_found(errs: &[Option<Error>]) -> bool {
DiskError::VolumeNotFound.count_errs(errs) == errs.len()
}
// pub fn is_all_volume_not_found(errs: &[Option<Error>]) -> bool {
// DiskError::VolumeNotFound.count_errs(errs) == errs.len()
// }
pub fn is_all_buckets_not_found(errs: &[Option<Error>]) -> bool {
if errs.is_empty() {
return false;
}
let mut not_found_count = 0;
for err in errs.iter().flatten() {
match err.downcast_ref() {
Some(DiskError::VolumeNotFound) | Some(DiskError::DiskNotFound) => {
not_found_count += 1;
}
_ => {}
}
}
errs.len() == not_found_count
}
// pub fn is_all_buckets_not_found(errs: &[Option<Error>]) -> bool {
// if errs.is_empty() {
// return false;
// }
// let mut not_found_count = 0;
// for err in errs.iter().flatten() {
// match err.downcast_ref() {
// Some(DiskError::VolumeNotFound) | Some(DiskError::DiskNotFound) => {
// not_found_count += 1;
// }
// _ => {}
// }
// }
// errs.len() == not_found_count
// }
pub fn is_err_os_not_exist(err: &Error) -> bool {
if let Some(os_err) = err.downcast_ref::<io::Error>() {
os_is_not_exist(os_err)
} else {
false
}
}
// pub fn is_err_os_not_exist(err: &Error) -> bool {
// if let Some(os_err) = err.downcast_ref::<io::Error>() {
// os_is_not_exist(os_err)
// } else {
// false
// }
// }
pub fn is_err_os_disk_full(err: &Error) -> bool {
if let Some(os_err) = err.downcast_ref::<io::Error>() {
is_sys_err_no_space(os_err)
} else if let Some(e) = err.downcast_ref::<DiskError>() {
e == &DiskError::DiskFull
} else {
false
}
}
// pub fn is_err_os_disk_full(err: &Error) -> bool {
// if let Some(os_err) = err.downcast_ref::<io::Error>() {
// is_sys_err_no_space(os_err)
// } else if let Some(e) = err.downcast_ref::<DiskError>() {
// e == &DiskError::DiskFull
// } else {
// false
// }
// }

View File

@@ -0,0 +1,440 @@
use super::error::DiskError;
pub fn to_file_error(io_err: std::io::Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::NotFound => DiskError::FileNotFound.into(),
std::io::ErrorKind::PermissionDenied => DiskError::FileAccessDenied.into(),
std::io::ErrorKind::IsADirectory => DiskError::IsNotRegular.into(),
std::io::ErrorKind::NotADirectory => DiskError::FileAccessDenied.into(),
std::io::ErrorKind::DirectoryNotEmpty => DiskError::FileAccessDenied.into(),
std::io::ErrorKind::UnexpectedEof => DiskError::FaultyDisk.into(),
std::io::ErrorKind::TooManyLinks => DiskError::TooManyOpenFiles.into(),
std::io::ErrorKind::InvalidInput => DiskError::FileNotFound.into(),
std::io::ErrorKind::InvalidData => DiskError::FileCorrupt.into(),
std::io::ErrorKind::StorageFull => DiskError::DiskFull.into(),
_ => io_err,
}
}
pub fn to_volume_error(io_err: std::io::Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::NotFound => DiskError::VolumeNotFound.into(),
std::io::ErrorKind::PermissionDenied => DiskError::DiskAccessDenied.into(),
std::io::ErrorKind::DirectoryNotEmpty => DiskError::VolumeNotEmpty.into(),
std::io::ErrorKind::NotADirectory => DiskError::IsNotRegular.into(),
std::io::ErrorKind::Other => match io_err.downcast::<DiskError>() {
Ok(err) => match err {
DiskError::FileNotFound => DiskError::VolumeNotFound.into(),
DiskError::FileAccessDenied => DiskError::DiskAccessDenied.into(),
err => err.into(),
},
Err(err) => to_file_error(err),
},
_ => to_file_error(io_err),
}
}
pub fn to_disk_error(io_err: std::io::Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::NotFound => DiskError::DiskNotFound.into(),
std::io::ErrorKind::PermissionDenied => DiskError::DiskAccessDenied.into(),
std::io::ErrorKind::Other => match io_err.downcast::<DiskError>() {
Ok(err) => match err {
DiskError::FileNotFound => DiskError::DiskNotFound.into(),
DiskError::VolumeNotFound => DiskError::DiskNotFound.into(),
DiskError::FileAccessDenied => DiskError::DiskAccessDenied.into(),
DiskError::VolumeAccessDenied => DiskError::DiskAccessDenied.into(),
err => err.into(),
},
Err(err) => to_volume_error(err),
},
_ => to_volume_error(io_err),
}
}
// only errors from FileSystem operations
pub fn to_access_error(io_err: std::io::Error, per_err: DiskError) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::PermissionDenied => per_err.into(),
std::io::ErrorKind::NotADirectory => per_err.into(),
std::io::ErrorKind::NotFound => DiskError::VolumeNotFound.into(),
std::io::ErrorKind::UnexpectedEof => DiskError::FaultyDisk.into(),
std::io::ErrorKind::Other => match io_err.downcast::<DiskError>() {
Ok(err) => match err {
DiskError::DiskAccessDenied => per_err.into(),
DiskError::FileAccessDenied => per_err.into(),
DiskError::FileNotFound => DiskError::VolumeNotFound.into(),
err => err.into(),
},
Err(err) => to_volume_error(err),
},
_ => to_volume_error(io_err),
}
}
pub fn to_unformatted_disk_error(io_err: std::io::Error) -> std::io::Error {
match io_err.kind() {
std::io::ErrorKind::NotFound => DiskError::UnformattedDisk.into(),
std::io::ErrorKind::PermissionDenied => DiskError::DiskAccessDenied.into(),
std::io::ErrorKind::Other => match io_err.downcast::<DiskError>() {
Ok(err) => match err {
DiskError::FileNotFound => DiskError::UnformattedDisk.into(),
DiskError::DiskNotFound => DiskError::UnformattedDisk.into(),
DiskError::VolumeNotFound => DiskError::UnformattedDisk.into(),
DiskError::FileAccessDenied => DiskError::DiskAccessDenied.into(),
DiskError::DiskAccessDenied => DiskError::DiskAccessDenied.into(),
_ => DiskError::CorruptedBackend.into(),
},
Err(err) => to_unformatted_disk_error(err),
},
_ => to_unformatted_disk_error(io_err),
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::{Error as IoError, ErrorKind};
// Helper function to create IO errors with specific kinds
fn create_io_error(kind: ErrorKind) -> IoError {
IoError::new(kind, "test error")
}
// Helper function to create IO errors with DiskError as the source
fn create_io_error_with_disk_error(disk_error: DiskError) -> IoError {
IoError::other(disk_error)
}
// Helper function to check if an IoError contains a specific DiskError
fn contains_disk_error(io_error: IoError, expected: DiskError) -> bool {
if let Ok(disk_error) = io_error.downcast::<DiskError>() {
std::mem::discriminant(&disk_error) == std::mem::discriminant(&expected)
} else {
false
}
}
#[test]
fn test_to_file_error_basic_conversions() {
// Test NotFound -> FileNotFound
let result = to_file_error(create_io_error(ErrorKind::NotFound));
assert!(contains_disk_error(result, DiskError::FileNotFound));
// Test PermissionDenied -> FileAccessDenied
let result = to_file_error(create_io_error(ErrorKind::PermissionDenied));
assert!(contains_disk_error(result, DiskError::FileAccessDenied));
// Test IsADirectory -> IsNotRegular
let result = to_file_error(create_io_error(ErrorKind::IsADirectory));
assert!(contains_disk_error(result, DiskError::IsNotRegular));
// Test NotADirectory -> FileAccessDenied
let result = to_file_error(create_io_error(ErrorKind::NotADirectory));
assert!(contains_disk_error(result, DiskError::FileAccessDenied));
// Test DirectoryNotEmpty -> FileAccessDenied
let result = to_file_error(create_io_error(ErrorKind::DirectoryNotEmpty));
assert!(contains_disk_error(result, DiskError::FileAccessDenied));
// Test UnexpectedEof -> FaultyDisk
let result = to_file_error(create_io_error(ErrorKind::UnexpectedEof));
assert!(contains_disk_error(result, DiskError::FaultyDisk));
// Test TooManyLinks -> TooManyOpenFiles
#[cfg(unix)]
{
let result = to_file_error(create_io_error(ErrorKind::TooManyLinks));
assert!(contains_disk_error(result, DiskError::TooManyOpenFiles));
}
// Test InvalidInput -> FileNotFound
let result = to_file_error(create_io_error(ErrorKind::InvalidInput));
assert!(contains_disk_error(result, DiskError::FileNotFound));
// Test InvalidData -> FileCorrupt
let result = to_file_error(create_io_error(ErrorKind::InvalidData));
assert!(contains_disk_error(result, DiskError::FileCorrupt));
// Test StorageFull -> DiskFull
#[cfg(unix)]
{
let result = to_file_error(create_io_error(ErrorKind::StorageFull));
assert!(contains_disk_error(result, DiskError::DiskFull));
}
}
#[test]
fn test_to_file_error_passthrough_unknown() {
// Test that unknown error kinds are passed through unchanged
let original = create_io_error(ErrorKind::Interrupted);
let result = to_file_error(original);
assert_eq!(result.kind(), ErrorKind::Interrupted);
}
#[test]
fn test_to_volume_error_basic_conversions() {
// Test NotFound -> VolumeNotFound
let result = to_volume_error(create_io_error(ErrorKind::NotFound));
assert!(contains_disk_error(result, DiskError::VolumeNotFound));
// Test PermissionDenied -> DiskAccessDenied
let result = to_volume_error(create_io_error(ErrorKind::PermissionDenied));
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
// Test DirectoryNotEmpty -> VolumeNotEmpty
let result = to_volume_error(create_io_error(ErrorKind::DirectoryNotEmpty));
assert!(contains_disk_error(result, DiskError::VolumeNotEmpty));
// Test NotADirectory -> IsNotRegular
let result = to_volume_error(create_io_error(ErrorKind::NotADirectory));
assert!(contains_disk_error(result, DiskError::IsNotRegular));
}
#[test]
fn test_to_volume_error_other_with_disk_error() {
// Test Other error kind with FileNotFound DiskError -> VolumeNotFound
let io_error = create_io_error_with_disk_error(DiskError::FileNotFound);
let result = to_volume_error(io_error);
assert!(contains_disk_error(result, DiskError::VolumeNotFound));
// Test Other error kind with FileAccessDenied DiskError -> DiskAccessDenied
let io_error = create_io_error_with_disk_error(DiskError::FileAccessDenied);
let result = to_volume_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
// Test Other error kind with other DiskError -> passthrough
let io_error = create_io_error_with_disk_error(DiskError::DiskFull);
let result = to_volume_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskFull));
}
#[test]
fn test_to_volume_error_fallback_to_file_error() {
// Test fallback to to_file_error for unknown error kinds
let result = to_volume_error(create_io_error(ErrorKind::Interrupted));
assert_eq!(result.kind(), ErrorKind::Interrupted);
}
#[test]
fn test_to_disk_error_basic_conversions() {
// Test NotFound -> DiskNotFound
let result = to_disk_error(create_io_error(ErrorKind::NotFound));
assert!(contains_disk_error(result, DiskError::DiskNotFound));
// Test PermissionDenied -> DiskAccessDenied
let result = to_disk_error(create_io_error(ErrorKind::PermissionDenied));
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
}
#[test]
fn test_to_disk_error_other_with_disk_error() {
// Test Other error kind with FileNotFound DiskError -> DiskNotFound
let io_error = create_io_error_with_disk_error(DiskError::FileNotFound);
let result = to_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskNotFound));
// Test Other error kind with VolumeNotFound DiskError -> DiskNotFound
let io_error = create_io_error_with_disk_error(DiskError::VolumeNotFound);
let result = to_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskNotFound));
// Test Other error kind with FileAccessDenied DiskError -> DiskAccessDenied
let io_error = create_io_error_with_disk_error(DiskError::FileAccessDenied);
let result = to_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
// Test Other error kind with VolumeAccessDenied DiskError -> DiskAccessDenied
let io_error = create_io_error_with_disk_error(DiskError::VolumeAccessDenied);
let result = to_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
// Test Other error kind with other DiskError -> passthrough
let io_error = create_io_error_with_disk_error(DiskError::DiskFull);
let result = to_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskFull));
}
#[test]
fn test_to_disk_error_fallback_to_volume_error() {
// Test fallback to to_volume_error for unknown error kinds
let result = to_disk_error(create_io_error(ErrorKind::Interrupted));
assert_eq!(result.kind(), ErrorKind::Interrupted);
}
#[test]
fn test_to_access_error_basic_conversions() {
let permission_error = DiskError::FileAccessDenied;
// Test PermissionDenied -> specified permission error
let result = to_access_error(create_io_error(ErrorKind::PermissionDenied), permission_error);
assert!(contains_disk_error(result, DiskError::FileAccessDenied));
// Test NotADirectory -> specified permission error
let result = to_access_error(create_io_error(ErrorKind::NotADirectory), DiskError::FileAccessDenied);
assert!(contains_disk_error(result, DiskError::FileAccessDenied));
// Test NotFound -> VolumeNotFound
let result = to_access_error(create_io_error(ErrorKind::NotFound), DiskError::FileAccessDenied);
assert!(contains_disk_error(result, DiskError::VolumeNotFound));
// Test UnexpectedEof -> FaultyDisk
let result = to_access_error(create_io_error(ErrorKind::UnexpectedEof), DiskError::FileAccessDenied);
assert!(contains_disk_error(result, DiskError::FaultyDisk));
}
#[test]
fn test_to_access_error_other_with_disk_error() {
let permission_error = DiskError::VolumeAccessDenied;
// Test Other error kind with DiskAccessDenied -> specified permission error
let io_error = create_io_error_with_disk_error(DiskError::DiskAccessDenied);
let result = to_access_error(io_error, permission_error);
assert!(contains_disk_error(result, DiskError::VolumeAccessDenied));
// Test Other error kind with FileAccessDenied -> specified permission error
let io_error = create_io_error_with_disk_error(DiskError::FileAccessDenied);
let result = to_access_error(io_error, DiskError::VolumeAccessDenied);
assert!(contains_disk_error(result, DiskError::VolumeAccessDenied));
// Test Other error kind with FileNotFound -> VolumeNotFound
let io_error = create_io_error_with_disk_error(DiskError::FileNotFound);
let result = to_access_error(io_error, DiskError::VolumeAccessDenied);
assert!(contains_disk_error(result, DiskError::VolumeNotFound));
// Test Other error kind with other DiskError -> passthrough
let io_error = create_io_error_with_disk_error(DiskError::DiskFull);
let result = to_access_error(io_error, DiskError::VolumeAccessDenied);
assert!(contains_disk_error(result, DiskError::DiskFull));
}
#[test]
fn test_to_access_error_fallback_to_volume_error() {
let permission_error = DiskError::FileAccessDenied;
// Test fallback to to_volume_error for unknown error kinds
let result = to_access_error(create_io_error(ErrorKind::Interrupted), permission_error);
assert_eq!(result.kind(), ErrorKind::Interrupted);
}
#[test]
fn test_to_unformatted_disk_error_basic_conversions() {
// Test NotFound -> UnformattedDisk
let result = to_unformatted_disk_error(create_io_error(ErrorKind::NotFound));
assert!(contains_disk_error(result, DiskError::UnformattedDisk));
// Test PermissionDenied -> DiskAccessDenied
let result = to_unformatted_disk_error(create_io_error(ErrorKind::PermissionDenied));
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
}
#[test]
fn test_to_unformatted_disk_error_other_with_disk_error() {
// Test Other error kind with FileNotFound -> UnformattedDisk
let io_error = create_io_error_with_disk_error(DiskError::FileNotFound);
let result = to_unformatted_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::UnformattedDisk));
// Test Other error kind with DiskNotFound -> UnformattedDisk
let io_error = create_io_error_with_disk_error(DiskError::DiskNotFound);
let result = to_unformatted_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::UnformattedDisk));
// Test Other error kind with VolumeNotFound -> UnformattedDisk
let io_error = create_io_error_with_disk_error(DiskError::VolumeNotFound);
let result = to_unformatted_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::UnformattedDisk));
// Test Other error kind with FileAccessDenied -> DiskAccessDenied
let io_error = create_io_error_with_disk_error(DiskError::FileAccessDenied);
let result = to_unformatted_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
// Test Other error kind with DiskAccessDenied -> DiskAccessDenied
let io_error = create_io_error_with_disk_error(DiskError::DiskAccessDenied);
let result = to_unformatted_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::DiskAccessDenied));
// Test Other error kind with other DiskError -> CorruptedBackend
let io_error = create_io_error_with_disk_error(DiskError::DiskFull);
let result = to_unformatted_disk_error(io_error);
assert!(contains_disk_error(result, DiskError::CorruptedBackend));
}
#[test]
fn test_to_unformatted_disk_error_recursive_behavior() {
// Test recursive call with non-Other error kind
let result = to_unformatted_disk_error(create_io_error(ErrorKind::Interrupted));
// This should recursively call to_unformatted_disk_error, which should then
// treat it as Other kind and eventually produce CorruptedBackend or similar
assert!(result.downcast::<DiskError>().is_ok());
}
#[test]
fn test_error_chain_conversions() {
// Test complex error conversion chains
let original_error = create_io_error(ErrorKind::NotFound);
// Chain: NotFound -> FileNotFound (via to_file_error) -> VolumeNotFound (via to_volume_error)
let file_error = to_file_error(original_error);
let volume_error = to_volume_error(file_error);
assert!(contains_disk_error(volume_error, DiskError::VolumeNotFound));
}
#[test]
fn test_cross_platform_error_kinds() {
// Test error kinds that may not be available on all platforms
#[cfg(unix)]
{
let result = to_file_error(create_io_error(ErrorKind::TooManyLinks));
assert!(contains_disk_error(result, DiskError::TooManyOpenFiles));
}
#[cfg(unix)]
{
let result = to_file_error(create_io_error(ErrorKind::StorageFull));
assert!(contains_disk_error(result, DiskError::DiskFull));
}
}
#[test]
fn test_error_conversion_with_different_kinds() {
// Test multiple error kinds to ensure comprehensive coverage
let test_cases = vec![
(ErrorKind::NotFound, DiskError::FileNotFound),
(ErrorKind::PermissionDenied, DiskError::FileAccessDenied),
(ErrorKind::IsADirectory, DiskError::IsNotRegular),
(ErrorKind::InvalidData, DiskError::FileCorrupt),
];
for (kind, expected_disk_error) in test_cases {
let result = to_file_error(create_io_error(kind));
assert!(
contains_disk_error(result, expected_disk_error.clone()),
"Failed for ErrorKind::{:?} -> DiskError::{:?}",
kind,
expected_disk_error
);
}
}
#[test]
fn test_volume_error_conversion_chain() {
// Test volume error conversion with different input types
let test_cases = vec![
(ErrorKind::NotFound, DiskError::VolumeNotFound),
(ErrorKind::PermissionDenied, DiskError::DiskAccessDenied),
(ErrorKind::DirectoryNotEmpty, DiskError::VolumeNotEmpty),
];
for (kind, expected_disk_error) in test_cases {
let result = to_volume_error(create_io_error(kind));
assert!(
contains_disk_error(result, expected_disk_error.clone()),
"Failed for ErrorKind::{:?} -> DiskError::{:?}",
kind,
expected_disk_error
);
}
}
}

View File

@@ -0,0 +1,170 @@
use super::error::Error;
pub static OBJECT_OP_IGNORED_ERRS: &[Error] = &[
Error::DiskNotFound,
Error::FaultyDisk,
Error::FaultyRemoteDisk,
Error::DiskAccessDenied,
Error::DiskOngoingReq,
Error::UnformattedDisk,
];
pub static BUCKET_OP_IGNORED_ERRS: &[Error] = &[
Error::DiskNotFound,
Error::FaultyDisk,
Error::FaultyRemoteDisk,
Error::DiskAccessDenied,
Error::UnformattedDisk,
];
pub static BASE_IGNORED_ERRS: &[Error] = &[Error::DiskNotFound, Error::FaultyDisk, Error::FaultyRemoteDisk];
pub fn reduce_write_quorum_errs(errors: &[Option<Error>], ignored_errs: &[Error], quorun: usize) -> Option<Error> {
reduce_quorum_errs(errors, ignored_errs, quorun, Error::ErasureWriteQuorum)
}
pub fn reduce_read_quorum_errs(errors: &[Option<Error>], ignored_errs: &[Error], quorun: usize) -> Option<Error> {
reduce_quorum_errs(errors, ignored_errs, quorun, Error::ErasureReadQuorum)
}
pub fn reduce_quorum_errs(errors: &[Option<Error>], ignored_errs: &[Error], quorun: usize, quorun_err: Error) -> Option<Error> {
let (max_count, err) = reduce_errs(errors, ignored_errs);
if max_count >= quorun {
err
} else {
Some(quorun_err)
}
}
pub fn reduce_errs(errors: &[Option<Error>], ignored_errs: &[Error]) -> (usize, Option<Error>) {
let nil_error = Error::other("nil".to_string());
let err_counts =
errors
.iter()
.map(|e| e.as_ref().unwrap_or(&nil_error).clone())
.fold(std::collections::HashMap::new(), |mut acc, e| {
if is_ignored_err(ignored_errs, &e) {
return acc;
}
*acc.entry(e).or_insert(0) += 1;
acc
});
let (err, max_count) = err_counts
.into_iter()
.max_by(|(e1, c1), (e2, c2)| {
// Prefer Error::Nil if present in a tie
let count_cmp = c1.cmp(c2);
if count_cmp == std::cmp::Ordering::Equal {
match (e1.to_string().as_str(), e2.to_string().as_str()) {
("nil", _) => std::cmp::Ordering::Greater,
(_, "nil") => std::cmp::Ordering::Less,
(a, b) => a.cmp(&b),
}
} else {
count_cmp
}
})
.unwrap_or((nil_error.clone(), 0));
(max_count, if err == nil_error { None } else { Some(err) })
}
pub fn is_ignored_err(ignored_errs: &[Error], err: &Error) -> bool {
ignored_errs.iter().any(|e| e == err)
}
pub fn count_errs(errors: &[Option<Error>], err: &Error) -> usize {
errors.iter().filter(|&e| e.as_ref() == Some(err)).count()
}
pub fn is_all_buckets_not_found(errs: &[Option<Error>]) -> bool {
for err in errs.iter() {
if let Some(err) = err {
if err == &Error::DiskNotFound || err == &Error::VolumeNotFound {
continue;
}
return false;
}
return false;
}
!errs.is_empty()
}
#[cfg(test)]
mod tests {
use super::*;
fn err_io(msg: &str) -> Error {
Error::Io(std::io::Error::other(msg))
}
#[test]
fn test_reduce_errs_basic() {
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e1.clone()), Some(e2.clone()), None];
let ignored = vec![];
let (count, err) = reduce_errs(&errors, &ignored);
assert_eq!(count, 2);
assert_eq!(err, Some(e1));
}
#[test]
fn test_reduce_errs_ignored() {
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e2.clone()), Some(e1.clone()), Some(e2.clone()), None];
let ignored = vec![e2.clone()];
let (count, err) = reduce_errs(&errors, &ignored);
assert_eq!(count, 2);
assert_eq!(err, Some(e1));
}
#[test]
fn test_reduce_quorum_errs() {
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e1.clone()), Some(e2.clone()), None];
let ignored = vec![];
let quorum_err = Error::FaultyDisk;
// quorum = 2, should return e1
let res = reduce_quorum_errs(&errors, &ignored, 2, quorum_err.clone());
assert_eq!(res, Some(e1));
// quorum = 3, should return quorum error
let res = reduce_quorum_errs(&errors, &ignored, 3, quorum_err.clone());
assert_eq!(res, Some(quorum_err));
}
#[test]
fn test_count_errs() {
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e2.clone()), Some(e1.clone()), None];
assert_eq!(count_errs(&errors, &e1), 2);
assert_eq!(count_errs(&errors, &e2), 1);
}
#[test]
fn test_is_ignored_err() {
let e1 = err_io("a");
let e2 = err_io("b");
let ignored = vec![e1.clone()];
assert!(is_ignored_err(&ignored, &e1));
assert!(!is_ignored_err(&ignored, &e2));
}
#[test]
fn test_reduce_errs_nil_tiebreak() {
// Error::Nil and another error have the same count, should prefer Nil
let e1 = err_io("a");
let e2 = err_io("b");
let errors = vec![Some(e1.clone()), Some(e2.clone()), None, Some(e1.clone()), None]; // e1:1, Nil:1
let ignored = vec![];
let (count, err) = reduce_errs(&errors, &ignored);
assert_eq!(count, 2);
assert_eq!(err, None); // None means Error::Nil is preferred
}
}

View File

@@ -1,5 +1,5 @@
use super::error::{Error, Result};
use super::{error::DiskError, DiskInfo};
use common::error::{Error, Result};
use serde::{Deserialize, Serialize};
use serde_json::Error as JsonError;
use uuid::Uuid;
@@ -110,7 +110,7 @@ pub struct FormatV3 {
impl TryFrom<&[u8]> for FormatV3 {
type Error = JsonError;
fn try_from(data: &[u8]) -> Result<Self, JsonError> {
fn try_from(data: &[u8]) -> std::result::Result<Self, Self::Error> {
serde_json::from_slice(data)
}
}
@@ -118,7 +118,7 @@ impl TryFrom<&[u8]> for FormatV3 {
impl TryFrom<&str> for FormatV3 {
type Error = JsonError;
fn try_from(data: &str) -> Result<Self, JsonError> {
fn try_from(data: &str) -> std::result::Result<Self, Self::Error> {
serde_json::from_str(data)
}
}
@@ -155,7 +155,7 @@ impl FormatV3 {
self.erasure.sets.iter().map(|v| v.len()).sum()
}
pub fn to_json(&self) -> Result<String, JsonError> {
pub fn to_json(&self) -> std::result::Result<String, JsonError> {
serde_json::to_string(self)
}
@@ -169,7 +169,7 @@ impl FormatV3 {
return Err(Error::from(DiskError::DiskNotFound));
}
if disk_id == Uuid::max() {
return Err(Error::msg("disk offline"));
return Err(Error::other("disk offline"));
}
for (i, set) in self.erasure.sets.iter().enumerate() {
@@ -180,7 +180,7 @@ impl FormatV3 {
}
}
Err(Error::msg(format!("disk id not found {}", disk_id)))
Err(Error::other(format!("disk id not found {}", disk_id)))
}
pub fn check_other(&self, other: &FormatV3) -> Result<()> {
@@ -189,7 +189,7 @@ impl FormatV3 {
tmp.erasure.this = Uuid::nil();
if self.erasure.sets.len() != other.erasure.sets.len() {
return Err(Error::from_string(format!(
return Err(Error::other(format!(
"Expected number of sets {}, got {}",
self.erasure.sets.len(),
other.erasure.sets.len()
@@ -198,7 +198,7 @@ impl FormatV3 {
for i in 0..self.erasure.sets.len() {
if self.erasure.sets[i].len() != other.erasure.sets[i].len() {
return Err(Error::from_string(format!(
return Err(Error::other(format!(
"Each set should be of same size, expected {}, got {}",
self.erasure.sets[i].len(),
other.erasure.sets[i].len()
@@ -207,7 +207,7 @@ impl FormatV3 {
for j in 0..self.erasure.sets[i].len() {
if self.erasure.sets[i][j] != other.erasure.sets[i][j] {
return Err(Error::from_string(format!(
return Err(Error::other(format!(
"UUID on positions {}:{} do not match with, expected {:?} got {:?}: (%w)",
i,
j,
@@ -226,7 +226,7 @@ impl FormatV3 {
}
}
Err(Error::msg(format!(
Err(Error::other(format!(
"DriveID {:?} not found in any drive sets {:?}",
this, other.erasure.sets
)))

181
ecstore/src/disk/fs.rs Normal file
View File

@@ -0,0 +1,181 @@
use std::{fs::Metadata, path::Path};
use tokio::{
fs::{self, File},
io,
};
pub const SLASH_SEPARATOR: &str = "/";
#[cfg(not(windows))]
pub fn same_file(f1: &Metadata, f2: &Metadata) -> bool {
use std::os::unix::fs::MetadataExt;
if f1.dev() != f2.dev() {
return false;
}
if f1.ino() != f2.ino() {
return false;
}
if f1.size() != f2.size() {
return false;
}
if f1.permissions() != f2.permissions() {
return false;
}
if f1.mtime() != f2.mtime() {
return false;
}
true
}
#[cfg(windows)]
pub fn same_file(f1: &Metadata, f2: &Metadata) -> bool {
if f1.permissions() != f2.permissions() {
return false;
}
if f1.file_type() != f2.file_type() {
return false;
}
if f1.len() != f2.len() {
return false;
}
true
}
type FileMode = usize;
pub const O_RDONLY: FileMode = 0x00000;
pub const O_WRONLY: FileMode = 0x00001;
pub const O_RDWR: FileMode = 0x00002;
pub const O_CREATE: FileMode = 0x00040;
// pub const O_EXCL: FileMode = 0x00080;
// pub const O_NOCTTY: FileMode = 0x00100;
pub const O_TRUNC: FileMode = 0x00200;
// pub const O_NONBLOCK: FileMode = 0x00800;
pub const O_APPEND: FileMode = 0x00400;
// pub const O_SYNC: FileMode = 0x01000;
// pub const O_ASYNC: FileMode = 0x02000;
// pub const O_CLOEXEC: FileMode = 0x80000;
// read: bool,
// write: bool,
// append: bool,
// truncate: bool,
// create: bool,
// create_new: bool,
pub async fn open_file(path: impl AsRef<Path>, mode: FileMode) -> io::Result<File> {
let mut opts = fs::OpenOptions::new();
match mode & (O_RDONLY | O_WRONLY | O_RDWR) {
O_RDONLY => {
opts.read(true);
}
O_WRONLY => {
opts.write(true);
}
O_RDWR => {
opts.read(true);
opts.write(true);
}
_ => (),
};
if mode & O_CREATE != 0 {
opts.create(true);
}
if mode & O_APPEND != 0 {
opts.append(true);
}
if mode & O_TRUNC != 0 {
opts.truncate(true);
}
opts.open(path.as_ref()).await
}
pub async fn access(path: impl AsRef<Path>) -> io::Result<()> {
fs::metadata(path).await?;
Ok(())
}
pub fn access_std(path: impl AsRef<Path>) -> io::Result<()> {
std::fs::metadata(path)?;
Ok(())
}
pub async fn lstat(path: impl AsRef<Path>) -> io::Result<Metadata> {
fs::metadata(path).await
}
pub fn lstat_std(path: impl AsRef<Path>) -> io::Result<Metadata> {
std::fs::metadata(path)
}
pub async fn make_dir_all(path: impl AsRef<Path>) -> io::Result<()> {
fs::create_dir_all(path.as_ref()).await
}
#[tracing::instrument(level = "debug", skip_all)]
pub async fn remove(path: impl AsRef<Path>) -> io::Result<()> {
let meta = fs::metadata(path.as_ref()).await?;
if meta.is_dir() {
fs::remove_dir(path.as_ref()).await
} else {
fs::remove_file(path.as_ref()).await
}
}
pub async fn remove_all(path: impl AsRef<Path>) -> io::Result<()> {
let meta = fs::metadata(path.as_ref()).await?;
if meta.is_dir() {
fs::remove_dir_all(path.as_ref()).await
} else {
fs::remove_file(path.as_ref()).await
}
}
#[tracing::instrument(level = "debug", skip_all)]
pub fn remove_std(path: impl AsRef<Path>) -> io::Result<()> {
let meta = std::fs::metadata(path.as_ref())?;
if meta.is_dir() {
std::fs::remove_dir(path.as_ref())
} else {
std::fs::remove_file(path.as_ref())
}
}
pub fn remove_all_std(path: impl AsRef<Path>) -> io::Result<()> {
let meta = std::fs::metadata(path.as_ref())?;
if meta.is_dir() {
std::fs::remove_dir_all(path.as_ref())
} else {
std::fs::remove_file(path.as_ref())
}
}
pub async fn mkdir(path: impl AsRef<Path>) -> io::Result<()> {
fs::create_dir(path.as_ref()).await
}
pub async fn rename(from: impl AsRef<Path>, to: impl AsRef<Path>) -> io::Result<()> {
fs::rename(from, to).await
}
pub fn rename_std(from: impl AsRef<Path>, to: impl AsRef<Path>) -> io::Result<()> {
std::fs::rename(from, to)
}
#[tracing::instrument(level = "debug", skip_all)]
pub async fn read_file(path: impl AsRef<Path>) -> io::Result<Vec<u8>> {
fs::read(path.as_ref()).await
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -3,31 +3,28 @@ use std::{
path::{Component, Path},
};
use crate::{
disk::error::{is_sys_err_not_dir, is_sys_err_path_not_found, os_is_not_exist},
utils::{self, os::same_disk},
};
use common::error::{Error, Result};
use super::error::Result;
use crate::disk::error_conv::to_file_error;
use tokio::fs;
use super::error::{os_err_to_file_err, os_is_exist, DiskError};
use super::error::DiskError;
pub fn check_path_length(path_name: &str) -> Result<()> {
// Apple OS X path length is limited to 1016
if cfg!(target_os = "macos") && path_name.len() > 1016 {
return Err(Error::new(DiskError::FileNameTooLong));
return Err(DiskError::FileNameTooLong);
}
// Disallow more than 1024 characters on windows, there
// are no known name_max limits on Windows.
if cfg!(target_os = "windows") && path_name.len() > 1024 {
return Err(Error::new(DiskError::FileNameTooLong));
return Err(DiskError::FileNameTooLong);
}
// On Unix we reject paths if they are just '.', '..' or '/'
let invalid_paths = [".", "..", "/"];
if invalid_paths.contains(&path_name) {
return Err(Error::new(DiskError::FileAccessDenied));
return Err(DiskError::FileAccessDenied);
}
// Check each path segment length is > 255 on all Unix
@@ -40,7 +37,7 @@ pub fn check_path_length(path_name: &str) -> Result<()> {
_ => {
count += 1;
if count > 255 {
return Err(Error::new(DiskError::FileNameTooLong));
return Err(DiskError::FileNameTooLong);
}
}
}
@@ -55,19 +52,15 @@ pub fn is_root_disk(disk_path: &str, root_disk: &str) -> Result<bool> {
return Ok(false);
}
same_disk(disk_path, root_disk)
rustfs_utils::os::same_disk(disk_path, root_disk).map_err(|e| to_file_error(e).into())
}
pub async fn make_dir_all(path: impl AsRef<Path>, base_dir: impl AsRef<Path>) -> Result<()> {
check_path_length(path.as_ref().to_string_lossy().to_string().as_str())?;
if let Err(e) = reliable_mkdir_all(path.as_ref(), base_dir.as_ref()).await {
if is_sys_err_not_dir(&e) || is_sys_err_path_not_found(&e) {
return Err(Error::new(DiskError::FileAccessDenied));
}
return Err(os_err_to_file_err(e));
}
reliable_mkdir_all(path.as_ref(), base_dir.as_ref())
.await
.map_err(to_file_error)?;
Ok(())
}
@@ -77,7 +70,7 @@ pub async fn is_empty_dir(path: impl AsRef<Path>) -> bool {
}
// read_dir count read limit. when count == 0 unlimit.
pub async fn read_dir(path: impl AsRef<Path>, count: i32) -> Result<Vec<String>> {
pub async fn read_dir(path: impl AsRef<Path>, count: i32) -> std::io::Result<Vec<String>> {
let mut entries = fs::read_dir(path.as_ref()).await?;
let mut volumes = Vec::new();
@@ -96,7 +89,7 @@ pub async fn read_dir(path: impl AsRef<Path>, count: i32) -> Result<Vec<String>>
if file_type.is_file() {
volumes.push(name);
} else if file_type.is_dir() {
volumes.push(format!("{}{}", name, utils::path::SLASH_SEPARATOR));
volumes.push(format!("{}{}", name, super::fs::SLASH_SEPARATOR));
}
count -= 1;
if count == 0 {
@@ -115,17 +108,7 @@ pub async fn rename_all(
) -> Result<()> {
reliable_rename(src_file_path, dst_file_path.as_ref(), base_dir)
.await
.map_err(|e| {
if is_sys_err_not_dir(&e) || !os_is_not_exist(&e) || is_sys_err_path_not_found(&e) {
Error::new(DiskError::FileAccessDenied)
} else if os_is_not_exist(&e) {
Error::new(DiskError::FileNotFound)
} else if os_is_exist(&e) {
Error::new(DiskError::IsNotRegular)
} else {
Error::new(e)
}
})?;
.map_err(|e| to_file_error(e))?;
Ok(())
}
@@ -144,8 +127,8 @@ pub async fn reliable_rename(
let mut i = 0;
loop {
if let Err(e) = utils::fs::rename_std(src_file_path.as_ref(), dst_file_path.as_ref()) {
if os_is_not_exist(&e) && i == 0 {
if let Err(e) = super::fs::rename_std(src_file_path.as_ref(), dst_file_path.as_ref()) {
if e.kind() == io::ErrorKind::NotFound && i == 0 {
i += 1;
continue;
}
@@ -171,7 +154,7 @@ pub async fn reliable_mkdir_all(path: impl AsRef<Path>, base_dir: impl AsRef<Pat
let mut base_dir = base_dir.as_ref();
loop {
if let Err(e) = os_mkdir_all(path.as_ref(), base_dir).await {
if os_is_not_exist(&e) && i == 0 {
if e.kind() == io::ErrorKind::NotFound && i == 0 {
i += 1;
if let Some(base_parent) = base_dir.parent() {
@@ -200,8 +183,8 @@ pub async fn os_mkdir_all(dir_path: impl AsRef<Path>, base_dir: impl AsRef<Path>
if let Some(parent) = dir_path.as_ref().parent() {
// 不支持递归,直接 create_dir_all 了
if let Err(e) = utils::fs::make_dir_all(&parent).await {
if os_is_exist(&e) {
if let Err(e) = super::fs::make_dir_all(&parent).await {
if e.kind() == io::ErrorKind::AlreadyExists {
return Ok(());
}
@@ -210,8 +193,8 @@ pub async fn os_mkdir_all(dir_path: impl AsRef<Path>, base_dir: impl AsRef<Path>
// Box::pin(os_mkdir_all(&parent, &base_dir)).await?;
}
if let Err(e) = utils::fs::mkdir(dir_path.as_ref()).await {
if os_is_exist(&e) {
if let Err(e) = super::fs::mkdir(dir_path.as_ref()).await {
if e.kind() == io::ErrorKind::AlreadyExists {
return Ok(());
}

View File

@@ -1,6 +1,7 @@
use std::path::PathBuf;
use futures::lock::Mutex;
use http::{HeaderMap, Method};
use protos::{
node_service_time_out_client,
proto_gen::node_service::{
@@ -11,6 +12,8 @@ use protos::{
},
};
use rmp_serde::Serializer;
use rustfs_filemeta::{FileInfo, MetaCacheEntry, MetacacheWriter, RawFileInfo};
use rustfs_rio::{HttpReader, Reader};
use serde::Serialize;
use tokio::{
io::AsyncWrite,
@@ -21,26 +24,19 @@ use tonic::Request;
use tracing::info;
use uuid::Uuid;
use super::error::{Error, Result};
use super::{
endpoint::Endpoint, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption,
FileInfoVersions, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo,
WalkDirOptions,
};
use crate::{
disk::error::DiskError,
heal::{
data_scanner::ShouldSleepFn,
data_usage_cache::{DataUsageCache, DataUsageEntry},
heal_commands::{HealScanMode, HealingTracker},
},
store_api::{FileInfo, RawFileInfo},
use crate::heal::{
data_scanner::ShouldSleepFn,
data_usage_cache::{DataUsageCache, DataUsageEntry},
heal_commands::{HealScanMode, HealingTracker},
};
use crate::{disk::MetaCacheEntry, metacache::writer::MetacacheWriter};
use crate::{
io::{FileReader, FileWriter, HttpFileReader, HttpFileWriter},
utils::proto_err_to_err,
};
use common::error::{Error, Result};
use crate::io::{FileWriter, HttpFileWriter};
use protos::proto_gen::node_service::RenamePartRequst;
#[derive(Debug)]
@@ -150,7 +146,7 @@ impl DiskAPI for RemoteDisk {
info!("make_volume");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(MakeVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -159,11 +155,7 @@ impl DiskAPI for RemoteDisk {
let response = client.make_volume(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
Ok(())
@@ -174,7 +166,7 @@ impl DiskAPI for RemoteDisk {
info!("make_volumes");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(MakeVolumesRequest {
disk: self.endpoint.to_string(),
volumes: volumes.iter().map(|s| (*s).to_string()).collect(),
@@ -183,11 +175,7 @@ impl DiskAPI for RemoteDisk {
let response = client.make_volumes(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
Ok(())
@@ -198,7 +186,7 @@ impl DiskAPI for RemoteDisk {
info!("list_volumes");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(ListVolumesRequest {
disk: self.endpoint.to_string(),
});
@@ -206,11 +194,7 @@ impl DiskAPI for RemoteDisk {
let response = client.list_volumes(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
let infos = response
@@ -227,7 +211,7 @@ impl DiskAPI for RemoteDisk {
info!("stat_volume");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(StatVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -236,11 +220,7 @@ impl DiskAPI for RemoteDisk {
let response = client.stat_volume(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
let volume_info = serde_json::from_str::<VolumeInfo>(&response.volume_info)?;
@@ -253,7 +233,7 @@ impl DiskAPI for RemoteDisk {
info!("delete_volume {}/{}", self.endpoint.to_string(), volume);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -262,11 +242,7 @@ impl DiskAPI for RemoteDisk {
let response = client.delete_volume(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
Ok(())
@@ -283,7 +259,7 @@ impl DiskAPI for RemoteDisk {
opts.serialize(&mut Serializer::new(&mut buf))?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(WalkDirRequest {
disk: self.endpoint.to_string(),
walk_dir_options: buf,
@@ -294,14 +270,14 @@ impl DiskAPI for RemoteDisk {
match response.next().await {
Some(Ok(resp)) => {
if !resp.success {
return Err(Error::from_string(resp.error_info.unwrap_or("".to_string())));
return Err(Error::other(resp.error_info.unwrap_or_default()));
}
let entry = serde_json::from_str::<MetaCacheEntry>(&resp.meta_cache_entry)
.map_err(|_| Error::from_string(format!("Unexpected response: {:?}", response)))?;
.map_err(|_| Error::other(format!("Unexpected response: {:?}", response)))?;
out.write_obj(&entry).await?;
}
None => break,
_ => return Err(Error::from_string(format!("Unexpected response: {:?}", response))),
_ => return Err(Error::other(format!("Unexpected response: {:?}", response))),
}
}
@@ -329,7 +305,7 @@ impl DiskAPI for RemoteDisk {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteVersionRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -342,11 +318,7 @@ impl DiskAPI for RemoteDisk {
let response = client.delete_version(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
// let raw_file_info = serde_json::from_str::<RawFileInfo>(&response.raw_file_info)?;
@@ -369,7 +341,7 @@ impl DiskAPI for RemoteDisk {
}
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteVersionsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -377,13 +349,10 @@ impl DiskAPI for RemoteDisk {
opts,
});
// TODO: use Error not string
let response = client.delete_versions(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
let errors = response
.errors
@@ -392,7 +361,7 @@ impl DiskAPI for RemoteDisk {
if error.is_empty() {
None
} else {
Some(Error::from_string(error))
Some(Error::other(error.to_string()))
}
})
.collect();
@@ -406,7 +375,7 @@ impl DiskAPI for RemoteDisk {
let paths = paths.to_owned();
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeletePathsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -416,11 +385,7 @@ impl DiskAPI for RemoteDisk {
let response = client.delete_paths(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
Ok(())
@@ -432,7 +397,7 @@ impl DiskAPI for RemoteDisk {
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(WriteMetadataRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -443,11 +408,7 @@ impl DiskAPI for RemoteDisk {
let response = client.write_metadata(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
Ok(())
@@ -461,7 +422,7 @@ impl DiskAPI for RemoteDisk {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(UpdateMetadataRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -473,11 +434,7 @@ impl DiskAPI for RemoteDisk {
let response = client.update_metadata(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
Ok(())
@@ -496,7 +453,7 @@ impl DiskAPI for RemoteDisk {
let opts = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(ReadVersionRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -508,11 +465,7 @@ impl DiskAPI for RemoteDisk {
let response = client.read_version(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
let file_info = serde_json::from_str::<FileInfo>(&response.file_info)?;
@@ -525,7 +478,7 @@ impl DiskAPI for RemoteDisk {
info!("read_xl {}/{}/{}", self.endpoint.to_string(), volume, path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(ReadXlRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -536,11 +489,7 @@ impl DiskAPI for RemoteDisk {
let response = client.read_xl(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
let raw_file_info = serde_json::from_str::<RawFileInfo>(&response.raw_file_info)?;
@@ -561,7 +510,7 @@ impl DiskAPI for RemoteDisk {
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(RenameDataRequest {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
@@ -574,11 +523,7 @@ impl DiskAPI for RemoteDisk {
let response = client.rename_data(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
let rename_data_resp = serde_json::from_str::<RenameDataResp>(&response.rename_data_resp)?;
@@ -591,7 +536,7 @@ impl DiskAPI for RemoteDisk {
info!("list_dir {}/{}", volume, _dir_path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(ListDirRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -600,39 +545,43 @@ impl DiskAPI for RemoteDisk {
let response = client.list_dir(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
Ok(response.volumes)
}
#[tracing::instrument(level = "debug", skip(self))]
async fn read_file(&self, volume: &str, path: &str) -> Result<FileReader> {
async fn read_file(&self, volume: &str, path: &str) -> Result<Box<dyn Reader>> {
info!("read_file {}/{}", volume, path);
Ok(Box::new(
HttpFileReader::new(self.endpoint.grid_host().as_str(), self.endpoint.to_string().as_str(), volume, path, 0, 0)
.await?,
))
let url = format!(
"{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}",
self.endpoint.grid_host(),
urlencoding::encode(self.endpoint.to_string().as_str()),
urlencoding::encode(volume),
urlencoding::encode(path),
0,
0
);
Ok(Box::new(HttpReader::new(url, Method::GET, HeaderMap::new()).await?))
}
#[tracing::instrument(level = "debug", skip(self))]
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<FileReader> {
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<Box<dyn Reader>> {
info!("read_file_stream {}/{}/{}", self.endpoint.to_string(), volume, path);
Ok(Box::new(
HttpFileReader::new(
self.endpoint.grid_host().as_str(),
self.endpoint.to_string().as_str(),
volume,
path,
offset,
length,
)
.await?,
))
let url = format!(
"{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}",
self.endpoint.grid_host(),
urlencoding::encode(self.endpoint.to_string().as_str()),
urlencoding::encode(volume),
urlencoding::encode(path),
offset,
length
);
Ok(Box::new(HttpReader::new(url, Method::GET, HeaderMap::new()).await?))
}
#[tracing::instrument(level = "debug", skip(self))]
@@ -666,7 +615,7 @@ impl DiskAPI for RemoteDisk {
info!("rename_file");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(RenameFileRequst {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
@@ -678,11 +627,7 @@ impl DiskAPI for RemoteDisk {
let response = client.rename_file(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
Ok(())
@@ -693,7 +638,7 @@ impl DiskAPI for RemoteDisk {
info!("rename_part {}/{}", src_volume, src_path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(RenamePartRequst {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
@@ -706,11 +651,7 @@ impl DiskAPI for RemoteDisk {
let response = client.rename_part(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
Ok(())
@@ -722,7 +663,7 @@ impl DiskAPI for RemoteDisk {
let options = serde_json::to_string(&opt)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -733,11 +674,7 @@ impl DiskAPI for RemoteDisk {
let response = client.delete(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
Ok(())
@@ -749,7 +686,7 @@ impl DiskAPI for RemoteDisk {
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(VerifyFileRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -760,11 +697,7 @@ impl DiskAPI for RemoteDisk {
let response = client.verify_file(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
let check_parts_resp = serde_json::from_str::<CheckPartsResp>(&response.check_parts_resp)?;
@@ -778,7 +711,7 @@ impl DiskAPI for RemoteDisk {
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(CheckPartsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -789,11 +722,7 @@ impl DiskAPI for RemoteDisk {
let response = client.check_parts(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
let check_parts_resp = serde_json::from_str::<CheckPartsResp>(&response.check_parts_resp)?;
@@ -807,7 +736,7 @@ impl DiskAPI for RemoteDisk {
let read_multiple_req = serde_json::to_string(&req)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(ReadMultipleRequest {
disk: self.endpoint.to_string(),
read_multiple_req,
@@ -816,11 +745,7 @@ impl DiskAPI for RemoteDisk {
let response = client.read_multiple(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
let read_multiple_resps = response
@@ -837,7 +762,7 @@ impl DiskAPI for RemoteDisk {
info!("write_all");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(WriteAllRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -848,11 +773,7 @@ impl DiskAPI for RemoteDisk {
let response = client.write_all(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
Ok(())
@@ -863,7 +784,7 @@ impl DiskAPI for RemoteDisk {
info!("read_all {}/{}", volume, path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(ReadAllRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -873,7 +794,7 @@ impl DiskAPI for RemoteDisk {
let response = client.read_all(request).await?.into_inner();
if !response.success {
return Err(Error::new(DiskError::FileNotFound));
return Err(response.error.unwrap_or_default().into());
}
Ok(response.data)
@@ -884,7 +805,7 @@ impl DiskAPI for RemoteDisk {
let opts = serde_json::to_string(&opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(DiskInfoRequest {
disk: self.endpoint.to_string(),
opts,
@@ -893,11 +814,7 @@ impl DiskAPI for RemoteDisk {
let response = client.disk_info(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
} else {
Err(Error::from_string(""))
};
return Err(response.error.unwrap_or_default().into());
}
let disk_info = serde_json::from_str::<DiskInfo>(&response.disk_info)?;
@@ -917,7 +834,7 @@ impl DiskAPI for RemoteDisk {
let cache = serde_json::to_string(cache)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let (tx, rx) = mpsc::channel(10);
let in_stream = ReceiverStream::new(rx);
@@ -927,7 +844,9 @@ impl DiskAPI for RemoteDisk {
cache,
scan_mode: scan_mode as u64,
};
tx.send(request).await?;
tx.send(request)
.await
.map_err(|err| Error::other(format!("can not send request, err: {}", err)))?;
loop {
match response.next().await {
@@ -939,10 +858,10 @@ impl DiskAPI for RemoteDisk {
let data_usage_cache = serde_json::from_str::<DataUsageCache>(&resp.data_usage_cache)?;
return Ok(data_usage_cache);
} else {
return Err(Error::from_string("scan was interrupted"));
return Err(Error::other("scan was interrupted"));
}
}
_ => return Err(Error::from_string("scan was interrupted")),
_ => return Err(Error::other("scan was interrupted")),
}
}
}

View File

@@ -1,9 +1,8 @@
use crate::bitrot::{BitrotReader, BitrotWriter};
use crate::error::clone_err;
use crate::disk::error::{Error, Result};
use crate::disk::error_reduce::{reduce_write_quorum_errs, OBJECT_OP_IGNORED_ERRS};
use crate::io::Etag;
use crate::quorum::{object_op_ignored_errs, reduce_write_quorum_errs};
use bytes::{Bytes, BytesMut};
use common::error::{Error, Result};
use futures::future::join_all;
use reed_solomon_erasure::galois_8::ReedSolomon;
use smallvec::SmallVec;
@@ -91,7 +90,7 @@ impl Erasure {
if let ErrorKind::UnexpectedEof = e.kind() {
break;
} else {
return Err(Error::new(e));
return Err(e.into());
}
}
};
@@ -115,7 +114,7 @@ impl Erasure {
if let Some(w) = w_op {
w.write(blocks_inner[i_inner].clone()).await.err()
} else {
Some(Error::new(DiskError::DiskNotFound))
Some(DiskError::DiskNotFound)
}
}
});
@@ -128,7 +127,7 @@ impl Erasure {
continue;
}
if let Some(err) = reduce_write_quorum_errs(&errs, object_op_ignored_errs().as_ref(), write_quorum) {
if let Some(err) = reduce_write_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, write_quorum) {
warn!("Erasure encode errs {:?}", &errs);
return Err(err);
}
@@ -210,7 +209,7 @@ impl Erasure {
if bytes_writed != length {
// debug!("bytes_writed != length: {} != {} ", bytes_writed, length);
return (bytes_writed, Some(Error::msg("erasure decode less data")));
return (bytes_writed, Some(Error::other("erasure decode less data")));
}
(bytes_writed, None)
@@ -228,7 +227,7 @@ impl Erasure {
W: AsyncWrite + Send + Unpin + 'static,
{
if bufs.len() < data_blocks {
return Err(Error::msg("read bufs not match data_blocks"));
return Err(Error::other("read bufs not match data_blocks"));
}
let data_len: usize = bufs
@@ -238,7 +237,7 @@ impl Erasure {
.map(|v| v.as_ref().unwrap().len())
.sum();
if data_len < length {
return Err(Error::msg(format!("write_data_blocks data_len < length {} < {}", data_len, length)));
return Err(Error::other(format!("write_data_blocks data_len < length {} < {}", data_len, length)));
}
let mut offset = offset;
@@ -304,7 +303,7 @@ impl Erasure {
// partiy 数量大于 0 才 ec
if self.parity_shards > 0 {
self.encoder.as_ref().unwrap().encode(data_slices)?;
self.encoder.as_ref().unwrap().encode(data_slices).map_err(Error::other)?;
}
}
@@ -321,7 +320,7 @@ impl Erasure {
pub fn decode_data(&self, shards: &mut [Option<Vec<u8>>]) -> Result<()> {
if self.parity_shards > 0 {
self.encoder.as_ref().unwrap().reconstruct(shards)?;
self.encoder.as_ref().unwrap().reconstruct(shards).map_err(Error::other)?;
}
Ok(())
@@ -382,7 +381,7 @@ impl Erasure {
total_length
);
if writers.len() != self.parity_shards + self.data_shards {
return Err(Error::from_string("invalid argument"));
return Err(Error::other("invalid argument"));
}
let mut reader = ShardReader::new(readers, self, 0, total_length);
@@ -397,12 +396,12 @@ impl Erasure {
let mut bufs = reader.read().await?;
if self.parity_shards > 0 {
self.encoder.as_ref().unwrap().reconstruct(&mut bufs)?;
self.encoder.as_ref().unwrap().reconstruct(&mut bufs).map_err(Error::other)?;
}
let shards = bufs.into_iter().flatten().map(Bytes::from).collect::<Vec<_>>();
if shards.len() != self.parity_shards + self.data_shards {
return Err(Error::from_string("can not reconstruct data"));
return Err(Error::other("can not reconstruct data"));
}
for (i, w) in writers.iter_mut().enumerate() {
@@ -419,7 +418,7 @@ impl Erasure {
}
}
if !errs.is_empty() {
return Err(clone_err(&errs[0]));
return Err(errs[0].clone().into());
}
Ok(())
@@ -494,7 +493,7 @@ impl ShardReader {
if let Some(disk) = disk {
disk.read_at(offset, read_length).await
} else {
Err(Error::new(DiskError::DiskNotFound))
Err(DiskError::DiskNotFound)
}
});
}
@@ -517,7 +516,7 @@ impl ShardReader {
warn!("ec decode read ress {:?}", &ress);
warn!("ec decode read errors {:?}", &errors);
return Err(Error::msg("shard reader read faild"));
return Err(Error::other("shard reader read faild"));
}
self.offset += self.shard_size;

View File

@@ -0,0 +1,263 @@
use super::Erasure;
use crate::disk::error::Error;
use crate::disk::error_reduce::reduce_errs;
use futures::future::join_all;
use pin_project_lite::pin_project;
use rustfs_rio::BitrotReader;
use std::io;
use std::io::ErrorKind;
use tokio::io::AsyncWriteExt;
use tracing::error;
pin_project! {
pub(crate) struct ParallelReader {
#[pin]
readers: Vec<Option<BitrotReader>>,
offset: usize,
shard_size: usize,
shard_file_size: usize,
data_shards: usize,
total_shards: usize,
}
}
impl ParallelReader {
// readers传入前应处理disk错误确保每个reader达到可用数量的BitrotReader
pub fn new(readers: Vec<Option<BitrotReader>>, e: Erasure, offset: usize, total_length: usize) -> Self {
let shard_size = e.shard_size();
let shard_file_size = e.shard_file_size(total_length);
let offset = (offset / e.block_size) * shard_size;
// 确保offset不超过shard_file_size
ParallelReader {
readers,
offset,
shard_size,
shard_file_size,
data_shards: e.data_shards,
total_shards: e.data_shards + e.parity_shards,
}
}
}
impl ParallelReader {
pub async fn read(&mut self) -> (Vec<Option<Vec<u8>>>, Vec<Option<Error>>) {
// if self.readers.len() != self.total_shards {
// return Err(io::Error::new(ErrorKind::InvalidInput, "Invalid number of readers"));
// }
let shard_size = if self.offset + self.shard_size > self.shard_file_size {
self.shard_file_size - self.offset
} else {
self.shard_size
};
if shard_size == 0 {
return (vec![None; self.readers.len()], vec![None; self.readers.len()]);
}
// 使用并发读取所有分片
let read_futs: Vec<_> = self
.readers
.iter_mut()
.enumerate()
.map(|(i, opt_reader)| {
if let Some(reader) = opt_reader.as_mut() {
let mut buf = vec![0u8; shard_size];
// 需要move i, buf
Some(async move {
match reader.read(&mut buf).await {
Ok(n) => {
buf.truncate(n);
(i, Ok(buf))
}
Err(e) => (i, Err(Error::from(e))),
}
})
} else {
None
}
})
.collect();
// 过滤掉Nonejoin_all
let mut results = join_all(read_futs.into_iter().flatten()).await;
let mut shards: Vec<Option<Vec<u8>>> = vec![None; self.readers.len()];
let mut errs = vec![None; self.readers.len()];
for (i, shard) in results.drain(..) {
match shard {
Ok(data) => {
if !data.is_empty() {
shards[i] = Some(data);
}
}
Err(e) => {
error!("Error reading shard {}: {}", i, e);
errs[i] = Some(e);
}
}
}
self.offset += shard_size;
(shards, errs)
}
pub fn can_decode(&self, shards: &[Option<Vec<u8>>]) -> bool {
shards.iter().filter(|s| s.is_some()).count() >= self.data_shards
}
}
/// 获取数据块总长度
fn get_data_block_len(shards: &[Option<Vec<u8>>], data_blocks: usize) -> usize {
let mut size = 0;
for shard in shards.iter().take(data_blocks).flatten() {
size += shard.len();
}
size
}
/// 将编码块中的数据块写入目标,支持 offset 和 length
async fn write_data_blocks<W>(
writer: &mut W,
en_blocks: &[Option<Vec<u8>>],
data_blocks: usize,
mut offset: usize,
length: usize,
) -> std::io::Result<usize>
where
W: tokio::io::AsyncWrite + Send + Sync + Unpin,
{
if get_data_block_len(en_blocks, data_blocks) < length {
return Err(io::Error::new(ErrorKind::UnexpectedEof, "Not enough data blocks to write"));
}
let mut total_written = 0;
let mut write_left = length;
for block_op in &en_blocks[..data_blocks] {
if block_op.is_none() {
return Err(io::Error::new(ErrorKind::UnexpectedEof, "Missing data block"));
}
let block = block_op.as_ref().unwrap();
if offset >= block.len() {
offset -= block.len();
continue;
}
let block_slice = &block[offset..];
offset = 0;
if write_left < block.len() {
writer.write_all(&block_slice[..write_left]).await?;
total_written += write_left;
break;
}
let n = block_slice.len();
writer.write_all(block_slice).await?;
write_left -= n;
total_written += n;
}
Ok(total_written)
}
impl Erasure {
pub async fn decode<W>(
&self,
writer: &mut W,
readers: Vec<Option<BitrotReader>>,
offset: usize,
length: usize,
total_length: usize,
) -> (usize, Option<std::io::Error>)
where
W: tokio::io::AsyncWrite + Send + Sync + Unpin + 'static,
{
if readers.len() != self.data_shards + self.parity_shards {
return (0, Some(io::Error::new(ErrorKind::InvalidInput, "Invalid number of readers")));
}
if offset + length > total_length {
return (0, Some(io::Error::new(ErrorKind::InvalidInput, "offset + length exceeds total length")));
}
let mut ret_err = None;
if length == 0 {
return (0, ret_err);
}
let mut written = 0;
let mut reader = ParallelReader::new(readers, self.clone(), offset, total_length);
let start = offset / self.block_size;
let end = (offset + length) / self.block_size;
for i in start..=end {
let (block_offset, block_length) = if start == end {
(offset % self.block_size, length)
} else if i == start {
(offset % self.block_size, self.block_size - (offset % self.block_size))
} else if i == end {
(0, (offset + length) % self.block_size)
} else {
(0, self.block_size)
};
if block_length == 0 {
break;
}
let (mut shards, errs) = reader.read().await;
if ret_err.is_none() {
if let (_, Some(err)) = reduce_errs(&errs, &[]) {
if err == Error::FileNotFound || err == Error::FileCorrupt {
ret_err = Some(err.into());
}
}
}
if !reader.can_decode(&shards) {
ret_err = Some(Error::ErasureReadQuorum.into());
break;
}
// Decode the shards
if let Err(e) = self.decode_data(&mut shards) {
ret_err = Some(e);
break;
}
let n = match write_data_blocks(writer, &shards, self.data_shards, block_offset, block_length).await {
Ok(n) => n,
Err(e) => {
ret_err = Some(e);
break;
}
};
written += n;
}
if written < length {
ret_err = Some(Error::LessData.into());
}
(written, ret_err)
}
}

View File

@@ -0,0 +1,139 @@
use bytes::Bytes;
use rustfs_rio::BitrotWriter;
use rustfs_rio::Reader;
// use std::io::Cursor;
// use std::mem;
use super::Erasure;
use crate::disk::error::Error;
use crate::disk::error_reduce::count_errs;
use crate::disk::error_reduce::{reduce_write_quorum_errs, OBJECT_OP_IGNORED_ERRS};
use std::sync::Arc;
use std::vec;
use tokio::sync::mpsc;
pub(crate) struct MultiWriter<'a> {
writers: &'a mut [Option<BitrotWriter>],
write_quorum: usize,
errs: Vec<Option<Error>>,
}
impl<'a> MultiWriter<'a> {
pub fn new(writers: &'a mut [Option<BitrotWriter>], write_quorum: usize) -> Self {
let length = writers.len();
MultiWriter {
writers,
write_quorum,
errs: vec![None; length],
}
}
#[allow(clippy::needless_range_loop)]
pub async fn write(&mut self, data: Vec<Bytes>) -> std::io::Result<()> {
for i in 0..self.writers.len() {
if self.errs[i].is_some() {
continue; // Skip if we already have an error for this writer
}
let writer_opt = &mut self.writers[i];
let shard = &data[i];
if let Some(writer) = writer_opt {
match writer.write(shard).await {
Ok(n) => {
if n < shard.len() {
self.errs[i] = Some(Error::ShortWrite);
self.writers[i] = None; // Mark as failed
} else {
self.errs[i] = None;
}
}
Err(e) => {
self.errs[i] = Some(Error::from(e));
}
}
} else {
self.errs[i] = Some(Error::DiskNotFound);
}
}
let nil_count = self.errs.iter().filter(|&e| e.is_none()).count();
if nil_count > self.write_quorum {
return Ok(());
}
if let Some(write_err) = reduce_write_quorum_errs(&self.errs, OBJECT_OP_IGNORED_ERRS, self.write_quorum) {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!(
"Failed to write data: {} (offline-disks={}/{})",
write_err,
count_errs(&self.errs, &Error::DiskNotFound),
self.writers.len()
),
));
}
Err(std::io::Error::other(format!(
"Failed to write data: (offline-disks={}/{})",
count_errs(&self.errs, &Error::DiskNotFound),
self.writers.len()
)))
}
}
impl Erasure {
pub async fn encode<R>(
self: Arc<Self>,
mut reader: R,
writers: &mut [Option<BitrotWriter>],
quorum: usize,
) -> std::io::Result<(R, usize)>
where
R: Reader + Send + Sync + Unpin + 'static,
{
let (tx, mut rx) = mpsc::channel::<Vec<Bytes>>(8);
let task = tokio::spawn(async move {
let block_size = self.block_size;
let mut total = 0;
loop {
let mut buf = vec![0u8; block_size];
match rustfs_utils::read_full(&mut reader, &mut buf).await {
Ok(n) if n > 0 => {
total += n;
let res = self.encode_data(&buf[..n])?;
if let Err(err) = tx.send(res).await {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("Failed to send encoded data : {}", err),
));
}
}
Ok(_) => break,
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
break;
}
Err(e) => {
return Err(e);
}
}
buf.clear();
}
Ok((reader, total))
});
let mut writers = MultiWriter::new(writers, quorum);
while let Some(block) = rx.recv().await {
if block.is_empty() {
break;
}
writers.write(block).await?;
}
let (reader, total) = task.await??;
Ok((reader, total))
}
}

View File

@@ -0,0 +1,433 @@
use bytes::{Bytes, BytesMut};
use reed_solomon_erasure::galois_8::ReedSolomon;
// use rustfs_rio::Reader;
use smallvec::SmallVec;
use std::io;
use std::io::ErrorKind;
use tracing::error;
use tracing::warn;
use uuid::Uuid;
/// Erasure coding utility for data reliability using Reed-Solomon codes.
///
/// This struct provides encoding and decoding of data into data and parity shards.
/// It supports splitting data into multiple shards, generating parity for fault tolerance,
/// and reconstructing lost shards.
///
/// # Fields
/// - `data_shards`: Number of data shards.
/// - `parity_shards`: Number of parity shards.
/// - `encoder`: Optional ReedSolomon encoder instance.
/// - `block_size`: Block size for each shard.
/// - `_id`: Unique identifier for the erasure instance.
/// - `_buf`: Internal buffer for block operations.
///
/// # Example
/// ```
/// use erasure_coding::Erasure;
/// let erasure = Erasure::new(4, 2, 8);
/// let data = b"hello world";
/// let shards = erasure.encode_data(data).unwrap();
/// // Simulate loss and recovery...
/// ```
#[derive(Default, Clone)]
pub struct Erasure {
pub data_shards: usize,
pub parity_shards: usize,
encoder: Option<ReedSolomon>,
pub block_size: usize,
_id: Uuid,
_buf: Vec<u8>,
}
impl Erasure {
/// Create a new Erasure instance.
///
/// # Arguments
/// * `data_shards` - Number of data shards.
/// * `parity_shards` - Number of parity shards.
/// * `block_size` - Block size for each shard.
pub fn new(data_shards: usize, parity_shards: usize, block_size: usize) -> Self {
let encoder = if parity_shards > 0 {
Some(ReedSolomon::new(data_shards, parity_shards).unwrap())
} else {
None
};
Erasure {
data_shards,
parity_shards,
block_size,
encoder,
_id: Uuid::new_v4(),
_buf: vec![0u8; block_size],
}
}
/// Encode data into data and parity shards.
///
/// # Arguments
/// * `data` - The input data to encode.
///
/// # Returns
/// A vector of encoded shards as `Bytes`.
#[tracing::instrument(level = "info", skip_all, fields(data_len=data.len()))]
pub fn encode_data(&self, data: &[u8]) -> io::Result<Vec<Bytes>> {
// let shard_size = self.shard_size();
// let total_size = shard_size * self.total_shard_count();
// 数据切片数量
let per_shard_size = data.len().div_ceil(self.data_shards);
// 总需求大小
let need_total_size = per_shard_size * self.total_shard_count();
// Create a new buffer with the required total length for all shards
let mut data_buffer = BytesMut::with_capacity(need_total_size);
// Copy source data
data_buffer.extend_from_slice(data);
data_buffer.resize(need_total_size, 0u8);
{
// EC encode, the result will be written into data_buffer
let data_slices: SmallVec<[&mut [u8]; 16]> = data_buffer.chunks_exact_mut(per_shard_size).collect();
// Only do EC if parity_shards > 0
if self.parity_shards > 0 {
if let Some(encoder) = self.encoder.as_ref() {
encoder.encode(data_slices).map_err(|e| {
error!("encode data error: {:?}", e);
io::Error::new(ErrorKind::Other, format!("encode data error {:?}", e))
})?;
} else {
warn!("parity_shards > 0, but encoder is None");
}
}
}
// Zero-copy split, all shards reference data_buffer
let mut data_buffer = data_buffer.freeze();
let mut shards = Vec::with_capacity(self.total_shard_count());
for _ in 0..self.total_shard_count() {
let shard = data_buffer.split_to(per_shard_size);
shards.push(shard);
}
Ok(shards)
}
/// Decode and reconstruct missing shards in-place.
///
/// # Arguments
/// * `shards` - Mutable slice of optional shard data. Missing shards should be `None`.
///
/// # Returns
/// Ok if reconstruction succeeds, error otherwise.
pub fn decode_data(&self, shards: &mut [Option<Vec<u8>>]) -> io::Result<()> {
if self.parity_shards > 0 {
if let Some(encoder) = self.encoder.as_ref() {
encoder.reconstruct(shards).map_err(|e| {
error!("decode data error: {:?}", e);
io::Error::new(ErrorKind::Other, format!("decode data error {:?}", e))
})?;
} else {
warn!("parity_shards > 0, but encoder is None");
}
}
Ok(())
}
/// Get the total number of shards (data + parity).
pub fn total_shard_count(&self) -> usize {
self.data_shards + self.parity_shards
}
// /// Calculate the shard size and total size for a given data size.
// // Returns (shard_size, total_size) for the given data size
// fn need_size(&self, data_size: usize) -> (usize, usize) {
// let shard_size = self.shard_size(data_size);
// (shard_size, shard_size * (self.total_shard_count()))
// }
/// Calculate the size of each shard.
pub fn shard_size(&self) -> usize {
self.block_size.div_ceil(self.data_shards)
}
/// Calculate the total erasure file size for a given original size.
// Returns the final erasure size from the original size
pub fn shard_file_size(&self, total_length: usize) -> usize {
if total_length == 0 {
return 0;
}
let num_shards = total_length / self.block_size;
let last_block_size = total_length % self.block_size;
let last_shard_size = last_block_size.div_ceil(self.data_shards);
num_shards * self.shard_size() + last_shard_size
}
/// Calculate the offset in the erasure file where reading begins.
// Returns the offset in the erasure file where reading begins
pub fn shard_file_offset(&self, start_offset: usize, length: usize, total_length: usize) -> usize {
let shard_size = self.shard_size();
let shard_file_size = self.shard_file_size(total_length);
let end_shard = (start_offset + length) / self.block_size;
let mut till_offset = end_shard * shard_size + shard_size;
if till_offset > shard_file_size {
till_offset = shard_file_size;
}
till_offset
}
/// Encode all data from a rustfs_rio::Reader in blocks, calling an async callback for each encoded block.
/// This method is async and returns the reader and total bytes read after all blocks are processed.
///
/// # Arguments
/// * `reader` - A rustfs_rio::Reader to read data from.
/// * `mut on_block` - Async callback: FnMut(Result<Vec<Bytes>, std::io::Error>) -> Future<Output=Result<(), E>> + Send
///
/// # Returns
/// Result<(reader, total_bytes_read), E> after all data has been processed or on callback error.
pub async fn encode_stream_callback_async<F, Fut, E, R>(
self: std::sync::Arc<Self>,
reader: &mut R,
mut on_block: F,
) -> Result<usize, E>
where
R: rustfs_rio::Reader + Send + Sync + Unpin,
F: FnMut(std::io::Result<Vec<Bytes>>) -> Fut + Send,
Fut: std::future::Future<Output = Result<(), E>> + Send,
{
let block_size = self.block_size;
let mut total = 0;
loop {
let mut buf = vec![0u8; block_size];
match rustfs_utils::read_full(&mut *reader, &mut buf).await {
Ok(n) if n > 0 => {
total += n;
let res = self.encode_data(&buf[..n]);
on_block(res).await?
}
Ok(_) => break,
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
break;
}
Err(e) => {
on_block(Err(e)).await?;
break;
}
}
buf.clear();
}
Ok(total)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_shard_file_size_cases() {
let erasure = Erasure::new(4, 2, 8);
// Case 1: total_length == 0
assert_eq!(erasure.shard_file_size(0), 0);
// Case 2: total_length < block_size
assert_eq!(erasure.shard_file_size(5), 2); // 5 div_ceil 4 = 2
// Case 3: total_length == block_size
assert_eq!(erasure.shard_file_size(8), 2);
// Case 4: total_length > block_size, not aligned
assert_eq!(erasure.shard_file_size(13), 4); // 8/8=1, last=5, 5 div_ceil 4=2, 1*2+2=4
// Case 5: total_length > block_size, aligned
assert_eq!(erasure.shard_file_size(16), 4); // 16/8=2, last=0, 2*2+0=4
assert_eq!(erasure.shard_file_size(1248739), 312185); // 1248739/8=156092, last=3, 3 div_ceil 4=1, 156092*2+1=312185
assert_eq!(erasure.shard_file_size(43), 11); // 43/8=5, last=3, 3 div_ceil 4=1, 5*2+1=11
}
#[test]
fn test_encode_decode_roundtrip() {
let data_shards = 4;
let parity_shards = 2;
let block_size = 8;
let erasure = Erasure::new(data_shards, parity_shards, block_size);
// let data = b"hello erasure coding!";
let data = b"channel async callback test data!";
let shards = erasure.encode_data(data).unwrap();
// Simulate the loss of one shard
let mut shards_opt: Vec<Option<Vec<u8>>> = shards.iter().map(|b| Some(b.to_vec())).collect();
shards_opt[2] = None;
// Decode
erasure.decode_data(&mut shards_opt).unwrap();
// Recover original data
let mut recovered = Vec::new();
for shard in shards_opt.iter().take(data_shards) {
recovered.extend_from_slice(shard.as_ref().unwrap());
}
recovered.truncate(data.len());
assert_eq!(&recovered, data);
}
#[test]
fn test_encode_all_zero_data() {
let data_shards = 3;
let parity_shards = 2;
let block_size = 6;
let erasure = Erasure::new(data_shards, parity_shards, block_size);
let data = vec![0u8; block_size];
let shards = erasure.encode_data(&data).unwrap();
assert_eq!(shards.len(), data_shards + parity_shards);
let total_len: usize = shards.iter().map(|b| b.len()).sum();
assert_eq!(total_len, erasure.shard_size() * (data_shards + parity_shards));
}
#[test]
fn test_shard_size_and_file_size() {
let erasure = Erasure::new(4, 2, 8);
assert_eq!(erasure.shard_file_size(33), 9);
assert_eq!(erasure.shard_file_size(0), 0);
}
#[test]
fn test_shard_file_offset() {
let erasure = Erasure::new(4, 2, 8);
let offset = erasure.shard_file_offset(0, 16, 32);
assert!(offset > 0);
}
#[test]
fn test_encode_decode_large_1m() {
// Test encoding and decoding 1MB data, simulating the loss of 2 shards
let data_shards = 6;
let parity_shards = 3;
let block_size = 128 * 1024; // 128KB
let erasure = Erasure::new(data_shards, parity_shards, block_size);
let data = vec![0x5Au8; 1024 * 1024]; // 1MB fixed content
let shards = erasure.encode_data(&data).unwrap();
// Simulate the loss of 2 shards
let mut shards_opt: Vec<Option<Vec<u8>>> = shards.iter().map(|b| Some(b.to_vec())).collect();
shards_opt[1] = None;
shards_opt[7] = None;
// Decode
erasure.decode_data(&mut shards_opt).unwrap();
// Recover original data
let mut recovered = Vec::new();
for shard in shards_opt.iter().take(data_shards) {
recovered.extend_from_slice(shard.as_ref().unwrap());
}
recovered.truncate(data.len());
assert_eq!(&recovered, &data);
}
#[tokio::test]
async fn test_encode_stream_callback_async_error_propagation() {
use std::sync::Arc;
use tokio::io::BufReader;
use tokio::sync::mpsc;
let data_shards = 3;
let parity_shards = 3;
let block_size = 8;
let erasure = Arc::new(Erasure::new(data_shards, parity_shards, block_size));
let data = b"async stream callback error propagation!123";
let mut rio_reader = BufReader::new(&data[..]);
let (tx, mut rx) = mpsc::channel::<Vec<Bytes>>(8);
let erasure_clone = erasure.clone();
let mut call_count = 0;
let handle = tokio::spawn(async move {
let result = erasure_clone
.encode_stream_callback_async::<_, _, &'static str, _>(&mut rio_reader, move |res| {
let tx = tx.clone();
call_count += 1;
async move {
if call_count == 2 {
Err("user error")
} else {
let shards = res.unwrap();
tx.send(shards).await.unwrap();
Ok(())
}
}
})
.await;
assert!(result.is_err());
assert_eq!(result.unwrap_err(), "user error");
});
let mut all_blocks = Vec::new();
while let Some(block) = rx.recv().await {
println!("Received block: {:?}", block[0].len());
all_blocks.push(block);
}
handle.await.unwrap();
// 只处理了第一个 block
assert_eq!(all_blocks.len(), 1);
// 对第一个 block 使用 decode_data 修复并校验
let block = &all_blocks[0];
let mut shards_opt: Vec<Option<Vec<u8>>> = block.iter().map(|b| Some(b.to_vec())).collect();
// 模拟丢失一个分片
shards_opt[0] = None;
erasure.decode_data(&mut shards_opt).unwrap();
let mut recovered = Vec::new();
for shard in shards_opt.iter().take(data_shards) {
recovered.extend_from_slice(shard.as_ref().unwrap());
}
// 只恢复第一个 block 的原始数据
let block_data_len = std::cmp::min(block_size, data.len());
recovered.truncate(block_data_len);
assert_eq!(&recovered, &data[..block_data_len]);
}
#[tokio::test]
async fn test_encode_stream_callback_async_channel_decode() {
use std::sync::Arc;
use tokio::io::BufReader;
use tokio::sync::mpsc;
let data_shards = 4;
let parity_shards = 2;
let block_size = 8;
let erasure = Arc::new(Erasure::new(data_shards, parity_shards, block_size));
let data = b"channel async callback test data!";
let mut rio_reader = BufReader::new(&data[..]);
let (tx, mut rx) = mpsc::channel::<Vec<Bytes>>(8);
let erasure_clone = erasure.clone();
let handle = tokio::spawn(async move {
erasure_clone
.encode_stream_callback_async::<_, _, (), _>(&mut rio_reader, move |res| {
let tx = tx.clone();
async move {
let shards = res.unwrap();
tx.send(shards).await.unwrap();
Ok(())
}
})
.await
.unwrap();
});
let mut all_blocks = Vec::new();
while let Some(block) = rx.recv().await {
all_blocks.push(block);
}
handle.await.unwrap();
// 对每个 block模拟丢失一个分片并恢复
let mut recovered = Vec::new();
for block in &all_blocks {
let mut shards_opt: Vec<Option<Vec<u8>>> = block.iter().map(|b| Some(b.to_vec())).collect();
// 模拟丢失一个分片
shards_opt[0] = None;
erasure.decode_data(&mut shards_opt).unwrap();
for shard in shards_opt.iter().take(data_shards) {
recovered.extend_from_slice(shard.as_ref().unwrap());
}
}
recovered.truncate(data.len());
assert_eq!(&recovered, data);
}
}

View File

@@ -0,0 +1,56 @@
use super::decode::ParallelReader;
use crate::disk::error::{Error, Result};
use crate::erasure_coding::encode::MultiWriter;
use bytes::Bytes;
use rustfs_rio::BitrotReader;
use rustfs_rio::BitrotWriter;
use tracing::info;
impl super::Erasure {
pub async fn heal(
&self,
writers: &mut [Option<BitrotWriter>],
readers: Vec<Option<BitrotReader>>,
total_length: usize,
_prefer: &[bool],
) -> Result<()> {
info!(
"Erasure heal, writers len: {}, readers len: {}, total_length: {}",
writers.len(),
readers.len(),
total_length
);
if writers.len() != self.parity_shards + self.data_shards {
return Err(Error::other("invalid argument"));
}
let mut reader = ParallelReader::new(readers, self.clone(), 0, total_length);
let start_block = 0;
let mut end_block = total_length / self.block_size;
if total_length % self.block_size != 0 {
end_block += 1;
}
for _ in start_block..end_block {
let (mut shards, errs) = reader.read().await;
if errs.iter().filter(|e| e.is_none()).count() < self.data_shards {
return Err(Error::other(format!("can not reconstruct data: not enough data shards {:?}", errs)));
}
if self.parity_shards > 0 {
self.decode_data(&mut shards)?;
}
let shards = shards
.into_iter()
.map(|s| Bytes::from(s.unwrap_or_default()))
.collect::<Vec<_>>();
let mut writers = MultiWriter::new(writers, self.data_shards);
writers.write(shards).await?;
}
Ok(())
}
}

View File

@@ -0,0 +1,6 @@
pub mod decode;
pub mod encode;
pub mod erasure;
pub mod heal;
pub use erasure::Erasure;

View File

@@ -1,122 +1,874 @@
use crate::disk::error::{clone_disk_err, DiskError};
use common::error::Error;
use std::io;
// use tracing_error::{SpanTrace, SpanTraceStatus};
use rustfs_utils::path::decode_dir_object;
// pub type StdError = Box<dyn std::error::Error + Send + Sync + 'static>;
use crate::disk::error::DiskError;
// pub type Result<T = (), E = Error> = std::result::Result<T, E>;
pub type Error = StorageError;
pub type Result<T> = core::result::Result<T, Error>;
// #[derive(Debug)]
// pub struct Error {
// inner: Box<dyn std::error::Error + Send + Sync + 'static>,
// span_trace: SpanTrace,
// }
#[derive(Debug, thiserror::Error)]
pub enum StorageError {
#[error("Faulty disk")]
FaultyDisk,
// impl Error {
// /// Create a new error from a `std::error::Error`.
// #[must_use]
// #[track_caller]
// pub fn new<T: std::error::Error + Send + Sync + 'static>(source: T) -> Self {
// Self::from_std_error(source.into())
// }
#[error("Disk full")]
DiskFull,
// /// Create a new error from a `std::error::Error`.
// #[must_use]
// #[track_caller]
// pub fn from_std_error(inner: StdError) -> Self {
// Self {
// inner,
// span_trace: SpanTrace::capture(),
// }
// }
#[error("Volume not found")]
VolumeNotFound,
// /// Create a new error from a string.
// #[must_use]
// #[track_caller]
// pub fn from_string(s: impl Into<String>) -> Self {
// Self::msg(s)
// }
#[error("Volume exists")]
VolumeExists,
// /// Create a new error from a string.
// #[must_use]
// #[track_caller]
// pub fn msg(s: impl Into<String>) -> Self {
// Self::from_std_error(s.into().into())
// }
#[error("File not found")]
FileNotFound,
// /// Returns `true` if the inner type is the same as `T`.
// #[inline]
// pub fn is<T: std::error::Error + 'static>(&self) -> bool {
// self.inner.is::<T>()
// }
#[error("File version not found")]
FileVersionNotFound,
// /// Returns some reference to the inner value if it is of type `T`, or
// /// `None` if it isn't.
// #[inline]
// pub fn downcast_ref<T: std::error::Error + 'static>(&self) -> Option<&T> {
// self.inner.downcast_ref()
// }
#[error("File name too long")]
FileNameTooLong,
// /// Returns some mutable reference to the inner value if it is of type `T`, or
// /// `None` if it isn't.
// #[inline]
// pub fn downcast_mut<T: std::error::Error + 'static>(&mut self) -> Option<&mut T> {
// self.inner.downcast_mut()
// }
#[error("File access denied")]
FileAccessDenied,
// pub fn to_io_err(&self) -> Option<io::Error> {
// self.downcast_ref::<io::Error>()
// .map(|e| io::Error::new(e.kind(), e.to_string()))
// }
// }
#[error("File is corrupted")]
FileCorrupt,
// impl<T: std::error::Error + Send + Sync + 'static> From<T> for Error {
// fn from(e: T) -> Self {
// Self::new(e)
// }
// }
#[error("Not a regular file")]
IsNotRegular,
// impl std::fmt::Display for Error {
// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// write!(f, "{}", self.inner)?;
#[error("Volume not empty")]
VolumeNotEmpty,
// if self.span_trace.status() != SpanTraceStatus::EMPTY {
// write!(f, "\nspan_trace:\n{}", self.span_trace)?;
// }
#[error("Volume access denied")]
VolumeAccessDenied,
// Ok(())
// }
// }
#[error("Corrupted format")]
CorruptedFormat,
// impl Clone for Error {
// fn clone(&self) -> Self {
// if let Some(e) = self.downcast_ref::<DiskError>() {
// clone_disk_err(e)
// } else if let Some(e) = self.downcast_ref::<io::Error>() {
// if let Some(code) = e.raw_os_error() {
// Error::new(io::Error::from_raw_os_error(code))
// } else {
// Error::new(io::Error::new(e.kind(), e.to_string()))
// }
// } else {
// // TODO: 优化其他类型
// Error::msg(self.to_string())
// }
// }
// }
#[error("Corrupted backend")]
CorruptedBackend,
pub fn clone_err(e: &Error) -> Error {
if let Some(e) = e.downcast_ref::<DiskError>() {
clone_disk_err(e)
} else if let Some(e) = e.downcast_ref::<io::Error>() {
if let Some(code) = e.raw_os_error() {
Error::new(io::Error::from_raw_os_error(code))
} else {
Error::new(io::Error::new(e.kind(), e.to_string()))
}
} else {
//TODO: 优化其他类型
Error::msg(e.to_string())
#[error("Unformatted disk")]
UnformattedDisk,
#[error("Disk not found")]
DiskNotFound,
#[error("Drive is root")]
DriveIsRoot,
#[error("Faulty remote disk")]
FaultyRemoteDisk,
#[error("Disk access denied")]
DiskAccessDenied,
#[error("Unexpected error")]
Unexpected,
#[error("Too many open files")]
TooManyOpenFiles,
#[error("No heal required")]
NoHealRequired,
#[error("Config not found")]
ConfigNotFound,
#[error("not implemented")]
NotImplemented,
#[error("Invalid arguments provided for {0}/{1}-{2}")]
InvalidArgument(String, String, String),
#[error("method not allowed")]
MethodNotAllowed,
#[error("Bucket not found: {0}")]
BucketNotFound(String),
#[error("Bucket not empty: {0}")]
BucketNotEmpty(String),
#[error("Bucket name invalid: {0}")]
BucketNameInvalid(String),
#[error("Object name invalid: {0}/{1}")]
ObjectNameInvalid(String, String),
#[error("Bucket exists: {0}")]
BucketExists(String),
#[error("Storage reached its minimum free drive threshold.")]
StorageFull,
#[error("Please reduce your request rate")]
SlowDown,
#[error("Prefix access is denied:{0}/{1}")]
PrefixAccessDenied(String, String),
#[error("Invalid UploadID KeyCombination: {0}/{1}")]
InvalidUploadIDKeyCombination(String, String),
#[error("Malformed UploadID: {0}")]
MalformedUploadID(String),
#[error("Object name too long: {0}/{1}")]
ObjectNameTooLong(String, String),
#[error("Object name contains forward slash as prefix: {0}/{1}")]
ObjectNamePrefixAsSlash(String, String),
#[error("Object not found: {0}/{1}")]
ObjectNotFound(String, String),
#[error("Version not found: {0}/{1}-{2}")]
VersionNotFound(String, String, String),
#[error("Invalid upload id: {0}/{1}-{2}")]
InvalidUploadID(String, String, String),
#[error("Specified part could not be found. PartNumber {0}, Expected {1}, got {2}")]
InvalidPart(usize, String, String),
#[error("Invalid version id: {0}/{1}-{2}")]
InvalidVersionID(String, String, String),
#[error("invalid data movement operation, source and destination pool are the same for : {0}/{1}-{2}")]
DataMovementOverwriteErr(String, String, String),
#[error("Object exists on :{0} as directory {1}")]
ObjectExistsAsDirectory(String, String),
// #[error("Storage resources are insufficient for the read operation")]
// InsufficientReadQuorum,
// #[error("Storage resources are insufficient for the write operation")]
// InsufficientWriteQuorum,
#[error("Decommission not started")]
DecommissionNotStarted,
#[error("Decommission already running")]
DecommissionAlreadyRunning,
#[error("DoneForNow")]
DoneForNow,
#[error("erasure read quorum")]
ErasureReadQuorum,
#[error("erasure write quorum")]
ErasureWriteQuorum,
#[error("not first disk")]
NotFirstDisk,
#[error("first disk wiat")]
FirstDiskWait,
#[error("Io error: {0}")]
Io(std::io::Error),
}
impl StorageError {
pub fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
StorageError::Io(std::io::Error::other(error))
}
}
impl From<DiskError> for StorageError {
fn from(e: DiskError) -> Self {
match e {
DiskError::Io(io_error) => StorageError::Io(io_error),
// DiskError::MaxVersionsExceeded => todo!(),
DiskError::Unexpected => StorageError::Unexpected,
DiskError::CorruptedFormat => StorageError::CorruptedFormat,
DiskError::CorruptedBackend => StorageError::CorruptedBackend,
DiskError::UnformattedDisk => StorageError::UnformattedDisk,
// DiskError::InconsistentDisk => StorageError::InconsistentDisk,
// DiskError::UnsupportedDisk => StorageError::UnsupportedDisk,
DiskError::DiskFull => StorageError::DiskFull,
// DiskError::DiskNotDir => StorageError::DiskNotDir,
DiskError::DiskNotFound => StorageError::DiskNotFound,
// DiskError::DiskOngoingReq => StorageError::DiskOngoingReq,
DiskError::DriveIsRoot => StorageError::DriveIsRoot,
DiskError::FaultyRemoteDisk => StorageError::FaultyRemoteDisk,
DiskError::FaultyDisk => StorageError::FaultyDisk,
DiskError::DiskAccessDenied => StorageError::DiskAccessDenied,
DiskError::FileNotFound => StorageError::FileNotFound,
DiskError::FileVersionNotFound => StorageError::FileVersionNotFound,
DiskError::TooManyOpenFiles => StorageError::TooManyOpenFiles,
DiskError::FileNameTooLong => StorageError::FileNameTooLong,
DiskError::VolumeExists => StorageError::VolumeExists,
DiskError::IsNotRegular => StorageError::IsNotRegular,
// DiskError::PathNotFound => StorageError::PathNotFound,
DiskError::VolumeNotFound => StorageError::VolumeNotFound,
DiskError::VolumeNotEmpty => StorageError::VolumeNotEmpty,
DiskError::VolumeAccessDenied => StorageError::VolumeAccessDenied,
DiskError::FileAccessDenied => StorageError::FileAccessDenied,
DiskError::FileCorrupt => StorageError::FileCorrupt,
// DiskError::BitrotHashAlgoInvalid => StorageError::BitrotHashAlgoInvalid,
// DiskError::CrossDeviceLink => StorageError::CrossDeviceLink,
// DiskError::LessData => StorageError::LessData,
// DiskError::MoreData => StorageError::MoreData,
// DiskError::OutdatedXLMeta => StorageError::OutdatedXLMeta,
// DiskError::PartMissingOrCorrupt => StorageError::PartMissingOrCorrupt,
DiskError::NoHealRequired => StorageError::NoHealRequired,
DiskError::MethodNotAllowed => StorageError::MethodNotAllowed,
DiskError::ErasureReadQuorum => StorageError::ErasureReadQuorum,
DiskError::ErasureWriteQuorum => StorageError::ErasureWriteQuorum,
_ => StorageError::Io(std::io::Error::other(e)),
}
}
}
impl Into<DiskError> for StorageError {
fn into(self) -> DiskError {
match self {
StorageError::Io(io_error) => io_error.into(),
StorageError::Unexpected => DiskError::Unexpected,
StorageError::FileNotFound => DiskError::FileNotFound,
StorageError::FileVersionNotFound => DiskError::FileVersionNotFound,
StorageError::FileCorrupt => DiskError::FileCorrupt,
StorageError::MethodNotAllowed => DiskError::MethodNotAllowed,
StorageError::StorageFull => DiskError::DiskFull,
StorageError::SlowDown => DiskError::TooManyOpenFiles,
StorageError::ErasureReadQuorum => DiskError::ErasureReadQuorum,
StorageError::ErasureWriteQuorum => DiskError::ErasureWriteQuorum,
StorageError::TooManyOpenFiles => DiskError::TooManyOpenFiles,
StorageError::NoHealRequired => DiskError::NoHealRequired,
StorageError::CorruptedFormat => DiskError::CorruptedFormat,
StorageError::CorruptedBackend => DiskError::CorruptedBackend,
StorageError::UnformattedDisk => DiskError::UnformattedDisk,
StorageError::DiskNotFound => DiskError::DiskNotFound,
StorageError::FaultyDisk => DiskError::FaultyDisk,
StorageError::DiskFull => DiskError::DiskFull,
StorageError::VolumeNotFound => DiskError::VolumeNotFound,
StorageError::VolumeExists => DiskError::VolumeExists,
StorageError::FileNameTooLong => DiskError::FileNameTooLong,
_ => DiskError::other(self),
}
}
}
impl From<std::io::Error> for StorageError {
fn from(e: std::io::Error) -> Self {
match e.downcast::<StorageError>() {
Ok(storage_error) => storage_error,
Err(io_error) => match io_error.downcast::<DiskError>() {
Ok(disk_error) => disk_error.into(),
Err(io_error) => StorageError::Io(io_error),
},
}
}
}
impl From<StorageError> for std::io::Error {
fn from(e: StorageError) -> Self {
match e {
StorageError::Io(io_error) => io_error,
e => std::io::Error::other(e),
}
}
}
impl From<rustfs_filemeta::Error> for StorageError {
fn from(e: rustfs_filemeta::Error) -> Self {
match e {
rustfs_filemeta::Error::DoneForNow => StorageError::DoneForNow,
rustfs_filemeta::Error::MethodNotAllowed => StorageError::MethodNotAllowed,
rustfs_filemeta::Error::VolumeNotFound => StorageError::VolumeNotFound,
rustfs_filemeta::Error::FileNotFound => StorageError::FileNotFound,
rustfs_filemeta::Error::FileVersionNotFound => StorageError::FileVersionNotFound,
rustfs_filemeta::Error::FileCorrupt => StorageError::FileCorrupt,
rustfs_filemeta::Error::Unexpected => StorageError::Unexpected,
rustfs_filemeta::Error::Io(io_error) => io_error.into(),
_ => StorageError::Io(std::io::Error::other(e)),
}
}
}
impl Into<rustfs_filemeta::Error> for StorageError {
fn into(self) -> rustfs_filemeta::Error {
match self {
StorageError::Unexpected => rustfs_filemeta::Error::Unexpected,
StorageError::FileNotFound => rustfs_filemeta::Error::FileNotFound,
StorageError::FileVersionNotFound => rustfs_filemeta::Error::FileVersionNotFound,
StorageError::FileCorrupt => rustfs_filemeta::Error::FileCorrupt,
StorageError::DoneForNow => rustfs_filemeta::Error::DoneForNow,
StorageError::MethodNotAllowed => rustfs_filemeta::Error::MethodNotAllowed,
StorageError::VolumeNotFound => rustfs_filemeta::Error::VolumeNotFound,
StorageError::Io(io_error) => io_error.into(),
_ => rustfs_filemeta::Error::other(self),
}
}
}
impl PartialEq for StorageError {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(StorageError::Io(e1), StorageError::Io(e2)) => e1.kind() == e2.kind() && e1.to_string() == e2.to_string(),
(e1, e2) => e1.to_u32() == e2.to_u32(),
}
}
}
impl Clone for StorageError {
fn clone(&self) -> Self {
match self {
StorageError::Io(e) => StorageError::Io(std::io::Error::new(e.kind(), e.to_string())),
StorageError::FaultyDisk => StorageError::FaultyDisk,
StorageError::DiskFull => StorageError::DiskFull,
StorageError::VolumeNotFound => StorageError::VolumeNotFound,
StorageError::VolumeExists => StorageError::VolumeExists,
StorageError::FileNotFound => StorageError::FileNotFound,
StorageError::FileVersionNotFound => StorageError::FileVersionNotFound,
StorageError::FileNameTooLong => StorageError::FileNameTooLong,
StorageError::FileAccessDenied => StorageError::FileAccessDenied,
StorageError::FileCorrupt => StorageError::FileCorrupt,
StorageError::IsNotRegular => StorageError::IsNotRegular,
StorageError::VolumeNotEmpty => StorageError::VolumeNotEmpty,
StorageError::VolumeAccessDenied => StorageError::VolumeAccessDenied,
StorageError::CorruptedFormat => StorageError::CorruptedFormat,
StorageError::CorruptedBackend => StorageError::CorruptedBackend,
StorageError::UnformattedDisk => StorageError::UnformattedDisk,
StorageError::DiskNotFound => StorageError::DiskNotFound,
StorageError::DriveIsRoot => StorageError::DriveIsRoot,
StorageError::FaultyRemoteDisk => StorageError::FaultyRemoteDisk,
StorageError::DiskAccessDenied => StorageError::DiskAccessDenied,
StorageError::Unexpected => StorageError::Unexpected,
StorageError::ConfigNotFound => StorageError::ConfigNotFound,
StorageError::NotImplemented => StorageError::NotImplemented,
StorageError::InvalidArgument(a, b, c) => StorageError::InvalidArgument(a.clone(), b.clone(), c.clone()),
StorageError::MethodNotAllowed => StorageError::MethodNotAllowed,
StorageError::BucketNotFound(a) => StorageError::BucketNotFound(a.clone()),
StorageError::BucketNotEmpty(a) => StorageError::BucketNotEmpty(a.clone()),
StorageError::BucketNameInvalid(a) => StorageError::BucketNameInvalid(a.clone()),
StorageError::ObjectNameInvalid(a, b) => StorageError::ObjectNameInvalid(a.clone(), b.clone()),
StorageError::BucketExists(a) => StorageError::BucketExists(a.clone()),
StorageError::StorageFull => StorageError::StorageFull,
StorageError::SlowDown => StorageError::SlowDown,
StorageError::PrefixAccessDenied(a, b) => StorageError::PrefixAccessDenied(a.clone(), b.clone()),
StorageError::InvalidUploadIDKeyCombination(a, b) => {
StorageError::InvalidUploadIDKeyCombination(a.clone(), b.clone())
}
StorageError::MalformedUploadID(a) => StorageError::MalformedUploadID(a.clone()),
StorageError::ObjectNameTooLong(a, b) => StorageError::ObjectNameTooLong(a.clone(), b.clone()),
StorageError::ObjectNamePrefixAsSlash(a, b) => StorageError::ObjectNamePrefixAsSlash(a.clone(), b.clone()),
StorageError::ObjectNotFound(a, b) => StorageError::ObjectNotFound(a.clone(), b.clone()),
StorageError::VersionNotFound(a, b, c) => StorageError::VersionNotFound(a.clone(), b.clone(), c.clone()),
StorageError::InvalidUploadID(a, b, c) => StorageError::InvalidUploadID(a.clone(), b.clone(), c.clone()),
StorageError::InvalidVersionID(a, b, c) => StorageError::InvalidVersionID(a.clone(), b.clone(), c.clone()),
StorageError::DataMovementOverwriteErr(a, b, c) => {
StorageError::DataMovementOverwriteErr(a.clone(), b.clone(), c.clone())
}
StorageError::ObjectExistsAsDirectory(a, b) => StorageError::ObjectExistsAsDirectory(a.clone(), b.clone()),
// StorageError::InsufficientReadQuorum => StorageError::InsufficientReadQuorum,
// StorageError::InsufficientWriteQuorum => StorageError::InsufficientWriteQuorum,
StorageError::DecommissionNotStarted => StorageError::DecommissionNotStarted,
StorageError::DecommissionAlreadyRunning => StorageError::DecommissionAlreadyRunning,
StorageError::DoneForNow => StorageError::DoneForNow,
StorageError::InvalidPart(a, b, c) => StorageError::InvalidPart(a.clone(), b.clone(), c.clone()),
StorageError::ErasureReadQuorum => StorageError::ErasureReadQuorum,
StorageError::ErasureWriteQuorum => StorageError::ErasureWriteQuorum,
StorageError::NotFirstDisk => StorageError::NotFirstDisk,
StorageError::FirstDiskWait => StorageError::FirstDiskWait,
StorageError::TooManyOpenFiles => StorageError::TooManyOpenFiles,
StorageError::NoHealRequired => StorageError::NoHealRequired,
}
}
}
impl StorageError {
pub fn to_u32(&self) -> u32 {
match self {
StorageError::Io(_) => 0x01,
StorageError::FaultyDisk => 0x02,
StorageError::DiskFull => 0x03,
StorageError::VolumeNotFound => 0x04,
StorageError::VolumeExists => 0x05,
StorageError::FileNotFound => 0x06,
StorageError::FileVersionNotFound => 0x07,
StorageError::FileNameTooLong => 0x08,
StorageError::FileAccessDenied => 0x09,
StorageError::FileCorrupt => 0x0A,
StorageError::IsNotRegular => 0x0B,
StorageError::VolumeNotEmpty => 0x0C,
StorageError::VolumeAccessDenied => 0x0D,
StorageError::CorruptedFormat => 0x0E,
StorageError::CorruptedBackend => 0x0F,
StorageError::UnformattedDisk => 0x10,
StorageError::DiskNotFound => 0x11,
StorageError::DriveIsRoot => 0x12,
StorageError::FaultyRemoteDisk => 0x13,
StorageError::DiskAccessDenied => 0x14,
StorageError::Unexpected => 0x15,
StorageError::NotImplemented => 0x16,
StorageError::InvalidArgument(_, _, _) => 0x17,
StorageError::MethodNotAllowed => 0x18,
StorageError::BucketNotFound(_) => 0x19,
StorageError::BucketNotEmpty(_) => 0x1A,
StorageError::BucketNameInvalid(_) => 0x1B,
StorageError::ObjectNameInvalid(_, _) => 0x1C,
StorageError::BucketExists(_) => 0x1D,
StorageError::StorageFull => 0x1E,
StorageError::SlowDown => 0x1F,
StorageError::PrefixAccessDenied(_, _) => 0x20,
StorageError::InvalidUploadIDKeyCombination(_, _) => 0x21,
StorageError::MalformedUploadID(_) => 0x22,
StorageError::ObjectNameTooLong(_, _) => 0x23,
StorageError::ObjectNamePrefixAsSlash(_, _) => 0x24,
StorageError::ObjectNotFound(_, _) => 0x25,
StorageError::VersionNotFound(_, _, _) => 0x26,
StorageError::InvalidUploadID(_, _, _) => 0x27,
StorageError::InvalidVersionID(_, _, _) => 0x28,
StorageError::DataMovementOverwriteErr(_, _, _) => 0x29,
StorageError::ObjectExistsAsDirectory(_, _) => 0x2A,
// StorageError::InsufficientReadQuorum => 0x2B,
// StorageError::InsufficientWriteQuorum => 0x2C,
StorageError::DecommissionNotStarted => 0x2D,
StorageError::InvalidPart(_, _, _) => 0x2E,
StorageError::DoneForNow => 0x2F,
StorageError::DecommissionAlreadyRunning => 0x30,
StorageError::ErasureReadQuorum => 0x31,
StorageError::ErasureWriteQuorum => 0x32,
StorageError::NotFirstDisk => 0x33,
StorageError::FirstDiskWait => 0x34,
StorageError::ConfigNotFound => 0x35,
StorageError::TooManyOpenFiles => 0x36,
StorageError::NoHealRequired => 0x37,
}
}
pub fn from_u32(error: u32) -> Option<Self> {
match error {
0x01 => Some(StorageError::Io(std::io::Error::new(std::io::ErrorKind::Other, "Io error"))),
0x02 => Some(StorageError::FaultyDisk),
0x03 => Some(StorageError::DiskFull),
0x04 => Some(StorageError::VolumeNotFound),
0x05 => Some(StorageError::VolumeExists),
0x06 => Some(StorageError::FileNotFound),
0x07 => Some(StorageError::FileVersionNotFound),
0x08 => Some(StorageError::FileNameTooLong),
0x09 => Some(StorageError::FileAccessDenied),
0x0A => Some(StorageError::FileCorrupt),
0x0B => Some(StorageError::IsNotRegular),
0x0C => Some(StorageError::VolumeNotEmpty),
0x0D => Some(StorageError::VolumeAccessDenied),
0x0E => Some(StorageError::CorruptedFormat),
0x0F => Some(StorageError::CorruptedBackend),
0x10 => Some(StorageError::UnformattedDisk),
0x11 => Some(StorageError::DiskNotFound),
0x12 => Some(StorageError::DriveIsRoot),
0x13 => Some(StorageError::FaultyRemoteDisk),
0x14 => Some(StorageError::DiskAccessDenied),
0x15 => Some(StorageError::Unexpected),
0x16 => Some(StorageError::NotImplemented),
0x17 => Some(StorageError::InvalidArgument(Default::default(), Default::default(), Default::default())),
0x18 => Some(StorageError::MethodNotAllowed),
0x19 => Some(StorageError::BucketNotFound(Default::default())),
0x1A => Some(StorageError::BucketNotEmpty(Default::default())),
0x1B => Some(StorageError::BucketNameInvalid(Default::default())),
0x1C => Some(StorageError::ObjectNameInvalid(Default::default(), Default::default())),
0x1D => Some(StorageError::BucketExists(Default::default())),
0x1E => Some(StorageError::StorageFull),
0x1F => Some(StorageError::SlowDown),
0x20 => Some(StorageError::PrefixAccessDenied(Default::default(), Default::default())),
0x21 => Some(StorageError::InvalidUploadIDKeyCombination(Default::default(), Default::default())),
0x22 => Some(StorageError::MalformedUploadID(Default::default())),
0x23 => Some(StorageError::ObjectNameTooLong(Default::default(), Default::default())),
0x24 => Some(StorageError::ObjectNamePrefixAsSlash(Default::default(), Default::default())),
0x25 => Some(StorageError::ObjectNotFound(Default::default(), Default::default())),
0x26 => Some(StorageError::VersionNotFound(Default::default(), Default::default(), Default::default())),
0x27 => Some(StorageError::InvalidUploadID(Default::default(), Default::default(), Default::default())),
0x28 => Some(StorageError::InvalidVersionID(Default::default(), Default::default(), Default::default())),
0x29 => Some(StorageError::DataMovementOverwriteErr(
Default::default(),
Default::default(),
Default::default(),
)),
0x2A => Some(StorageError::ObjectExistsAsDirectory(Default::default(), Default::default())),
// 0x2B => Some(StorageError::InsufficientReadQuorum),
// 0x2C => Some(StorageError::InsufficientWriteQuorum),
0x2D => Some(StorageError::DecommissionNotStarted),
0x2E => Some(StorageError::InvalidPart(Default::default(), Default::default(), Default::default())),
0x2F => Some(StorageError::DoneForNow),
0x30 => Some(StorageError::DecommissionAlreadyRunning),
0x31 => Some(StorageError::ErasureReadQuorum),
0x32 => Some(StorageError::ErasureWriteQuorum),
0x33 => Some(StorageError::NotFirstDisk),
0x34 => Some(StorageError::FirstDiskWait),
0x35 => Some(StorageError::ConfigNotFound),
0x36 => Some(StorageError::TooManyOpenFiles),
0x37 => Some(StorageError::NoHealRequired),
_ => None,
}
}
}
impl From<tokio::task::JoinError> for StorageError {
fn from(e: tokio::task::JoinError) -> Self {
StorageError::other(e)
}
}
impl From<serde_json::Error> for StorageError {
fn from(e: serde_json::Error) -> Self {
StorageError::other(e)
}
}
impl From<rmp_serde::encode::Error> for Error {
fn from(e: rmp_serde::encode::Error) -> Self {
Error::other(e)
}
}
impl From<rmp::encode::ValueWriteError> for Error {
fn from(e: rmp::encode::ValueWriteError) -> Self {
Error::other(e)
}
}
impl From<rmp::decode::ValueReadError> for Error {
fn from(e: rmp::decode::ValueReadError) -> Self {
Error::other(e)
}
}
impl From<std::string::FromUtf8Error> for Error {
fn from(e: std::string::FromUtf8Error) -> Self {
Error::other(e)
}
}
impl From<rmp::decode::NumValueReadError> for Error {
fn from(e: rmp::decode::NumValueReadError) -> Self {
Error::other(e)
}
}
impl From<rmp_serde::decode::Error> for Error {
fn from(e: rmp_serde::decode::Error) -> Self {
Error::other(e)
}
}
impl From<s3s::xml::SerError> for Error {
fn from(e: s3s::xml::SerError) -> Self {
Error::other(e)
}
}
impl From<s3s::xml::DeError> for Error {
fn from(e: s3s::xml::DeError) -> Self {
Error::other(e)
}
}
impl From<tonic::Status> for Error {
fn from(e: tonic::Status) -> Self {
Error::other(e.to_string())
}
}
impl From<uuid::Error> for Error {
fn from(e: uuid::Error) -> Self {
Error::other(e)
}
}
impl From<time::error::ComponentRange> for Error {
fn from(e: time::error::ComponentRange) -> Self {
Error::other(e)
}
}
pub fn is_err_object_not_found(err: &Error) -> bool {
matches!(err, &Error::FileNotFound) || matches!(err, &Error::ObjectNotFound(_, _))
}
pub fn is_err_version_not_found(err: &Error) -> bool {
matches!(err, &Error::FileVersionNotFound) || matches!(err, &Error::VersionNotFound(_, _, _))
}
pub fn is_err_bucket_exists(err: &Error) -> bool {
matches!(err, &StorageError::BucketExists(_))
}
pub fn is_err_read_quorum(err: &Error) -> bool {
matches!(err, &StorageError::ErasureReadQuorum)
}
pub fn is_err_invalid_upload_id(err: &Error) -> bool {
matches!(err, &StorageError::InvalidUploadID(_, _, _))
}
pub fn is_err_bucket_not_found(err: &Error) -> bool {
matches!(err, &StorageError::VolumeNotFound)
| matches!(err, &StorageError::DiskNotFound)
| matches!(err, &StorageError::BucketNotFound(_))
}
pub fn is_err_data_movement_overwrite(err: &Error) -> bool {
matches!(err, &StorageError::DataMovementOverwriteErr(_, _, _))
}
pub fn is_all_not_found(errs: &[Option<Error>]) -> bool {
for err in errs.iter() {
if let Some(err) = err {
if is_err_object_not_found(err) || is_err_version_not_found(err) || is_err_bucket_not_found(err) {
continue;
}
return false;
}
return false;
}
!errs.is_empty()
}
pub fn is_all_volume_not_found(errs: &[Option<Error>]) -> bool {
for err in errs.iter() {
if let Some(err) = err {
if is_err_bucket_not_found(err) {
continue;
}
return false;
}
return false;
}
!errs.is_empty()
}
// pub fn is_all_not_found(errs: &[Option<Error>]) -> bool {
// for err in errs.iter() {
// if let Some(err) = err {
// if let Some(err) = err.downcast_ref::<DiskError>() {
// match err {
// DiskError::FileNotFound | DiskError::VolumeNotFound | &DiskError::FileVersionNotFound => {
// continue;
// }
// _ => return false,
// }
// }
// }
// return false;
// }
// !errs.is_empty()
// }
pub fn to_object_err(err: Error, params: Vec<&str>) -> Error {
match err {
StorageError::DiskFull => StorageError::StorageFull,
StorageError::FileNotFound => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
StorageError::ObjectNotFound(bucket, object)
}
StorageError::FileVersionNotFound => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
let version = params.get(2).cloned().unwrap_or_default().to_owned();
StorageError::VersionNotFound(bucket, object, version)
}
StorageError::TooManyOpenFiles => StorageError::SlowDown,
StorageError::FileNameTooLong => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
StorageError::ObjectNameInvalid(bucket, object)
}
StorageError::VolumeExists => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
StorageError::BucketExists(bucket)
}
StorageError::IsNotRegular => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
StorageError::ObjectExistsAsDirectory(bucket, object)
}
StorageError::VolumeNotFound => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
StorageError::BucketNotFound(bucket)
}
StorageError::VolumeNotEmpty => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
StorageError::BucketNotEmpty(bucket)
}
StorageError::FileAccessDenied => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
return StorageError::PrefixAccessDenied(bucket, object);
}
_ => err,
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::{Error as IoError, ErrorKind};
#[test]
fn test_storage_error_to_u32() {
// Test Io error uses 0x01
let io_error = StorageError::Io(IoError::new(ErrorKind::Other, "test"));
assert_eq!(io_error.to_u32(), 0x01);
// Test other errors have correct codes
assert_eq!(StorageError::FaultyDisk.to_u32(), 0x02);
assert_eq!(StorageError::DiskFull.to_u32(), 0x03);
assert_eq!(StorageError::VolumeNotFound.to_u32(), 0x04);
assert_eq!(StorageError::VolumeExists.to_u32(), 0x05);
assert_eq!(StorageError::FileNotFound.to_u32(), 0x06);
assert_eq!(StorageError::DecommissionAlreadyRunning.to_u32(), 0x30);
}
#[test]
fn test_storage_error_from_u32() {
// Test Io error conversion
assert!(matches!(StorageError::from_u32(0x01), Some(StorageError::Io(_))));
// Test other error conversions
assert!(matches!(StorageError::from_u32(0x02), Some(StorageError::FaultyDisk)));
assert!(matches!(StorageError::from_u32(0x03), Some(StorageError::DiskFull)));
assert!(matches!(StorageError::from_u32(0x04), Some(StorageError::VolumeNotFound)));
assert!(matches!(StorageError::from_u32(0x30), Some(StorageError::DecommissionAlreadyRunning)));
// Test invalid code returns None
assert!(StorageError::from_u32(0xFF).is_none());
}
#[test]
fn test_storage_error_partial_eq() {
// Test IO error comparison
let io1 = StorageError::Io(IoError::new(ErrorKind::NotFound, "file not found"));
let io2 = StorageError::Io(IoError::new(ErrorKind::NotFound, "file not found"));
let io3 = StorageError::Io(IoError::new(ErrorKind::PermissionDenied, "access denied"));
assert_eq!(io1, io2);
assert_ne!(io1, io3);
// Test non-IO error comparison
let bucket1 = StorageError::BucketExists("test".to_string());
let bucket2 = StorageError::BucketExists("different".to_string());
assert_eq!(bucket1, bucket2); // Same error type, different parameters
let disk_error = StorageError::DiskFull;
assert_ne!(bucket1, disk_error);
}
#[test]
fn test_storage_error_from_disk_error() {
// Test conversion from DiskError
let disk_io = DiskError::Io(IoError::new(ErrorKind::Other, "disk io error"));
let storage_error: StorageError = disk_io.into();
assert!(matches!(storage_error, StorageError::Io(_)));
let disk_full = DiskError::DiskFull;
let storage_error: StorageError = disk_full.into();
assert_eq!(storage_error, StorageError::DiskFull);
let file_not_found = DiskError::FileNotFound;
let storage_error: StorageError = file_not_found.into();
assert_eq!(storage_error, StorageError::FileNotFound);
}
#[test]
fn test_storage_error_from_io_error() {
// Test direct IO error conversion
let io_error = IoError::new(ErrorKind::NotFound, "test error");
let storage_error: StorageError = io_error.into();
assert!(matches!(storage_error, StorageError::Io(_)));
// Test IO error containing DiskError
let disk_error = DiskError::DiskFull;
let io_with_disk_error = IoError::other(disk_error);
let storage_error: StorageError = io_with_disk_error.into();
assert_eq!(storage_error, StorageError::DiskFull);
// Test IO error containing StorageError
let original_storage_error = StorageError::BucketNotFound("test".to_string());
let io_with_storage_error = IoError::other(original_storage_error.clone());
let recovered_storage_error: StorageError = io_with_storage_error.into();
assert_eq!(recovered_storage_error, original_storage_error);
}
#[test]
fn test_storage_error_to_io_error() {
// Test conversion to IO error
let storage_error = StorageError::DiskFull;
let io_error: IoError = storage_error.into();
assert_eq!(io_error.kind(), ErrorKind::Other);
// Test IO error round trip
let original_io = IoError::new(ErrorKind::PermissionDenied, "access denied");
let storage_error = StorageError::Io(original_io);
let converted_io: IoError = storage_error.into();
assert_eq!(converted_io.kind(), ErrorKind::PermissionDenied);
}
#[test]
fn test_bucket_and_object_errors() {
let bucket_not_found = StorageError::BucketNotFound("mybucket".to_string());
let object_not_found = StorageError::ObjectNotFound("mybucket".to_string(), "myobject".to_string());
let version_not_found = StorageError::VersionNotFound("mybucket".to_string(), "myobject".to_string(), "v1".to_string());
// Test different error codes
assert_ne!(bucket_not_found.to_u32(), object_not_found.to_u32());
assert_ne!(object_not_found.to_u32(), version_not_found.to_u32());
// Test error messages contain expected information
assert!(bucket_not_found.to_string().contains("mybucket"));
assert!(object_not_found.to_string().contains("mybucket"));
assert!(object_not_found.to_string().contains("myobject"));
assert!(version_not_found.to_string().contains("v1"));
}
#[test]
fn test_upload_id_errors() {
let invalid_upload = StorageError::InvalidUploadID("bucket".to_string(), "object".to_string(), "uploadid".to_string());
let malformed_upload = StorageError::MalformedUploadID("badid".to_string());
assert_ne!(invalid_upload.to_u32(), malformed_upload.to_u32());
assert!(invalid_upload.to_string().contains("uploadid"));
assert!(malformed_upload.to_string().contains("badid"));
}
#[test]
fn test_round_trip_conversion() {
// Test that to_u32 and from_u32 are consistent for all variants
let test_errors = vec![
StorageError::FaultyDisk,
StorageError::DiskFull,
StorageError::VolumeNotFound,
StorageError::BucketExists("test".to_string()),
StorageError::ObjectNotFound("bucket".to_string(), "object".to_string()),
StorageError::DecommissionAlreadyRunning,
];
for original_error in test_errors {
let code = original_error.to_u32();
if let Some(recovered_error) = StorageError::from_u32(code) {
// For errors with parameters, we only check the variant type
assert_eq!(std::mem::discriminant(&original_error), std::mem::discriminant(&recovered_error));
} else {
panic!("Failed to recover error from code: {:#x}", code);
}
}
}
}

View File

@@ -1,7 +1,7 @@
use crate::disk::FileInfoVersions;
use crate::file_meta_inline::InlineData;
use crate::store_api::RawFileInfo;
use crate::store_err::StorageError;
use crate::error::StorageError;
use crate::{
disk::error::DiskError,
store_api::{ErasureInfo, FileInfo, ObjectPartInfo, ERASURE_ALGORITHM},

View File

@@ -16,6 +16,7 @@ use super::{
heal_commands::HealOpts,
heal_ops::{new_bg_heal_sequence, HealSequence},
};
use crate::error::{Error, Result};
use crate::global::GLOBAL_MRFState;
use crate::heal::error::ERR_RETRY_HEALING;
use crate::heal::heal_commands::{HealScanMode, HEAL_ITEM_BUCKET};
@@ -35,7 +36,6 @@ use crate::{
store_api::{BucketInfo, BucketOptions, StorageAPI},
utils::path::{path_join, SLASH_SEPARATOR},
};
use common::error::{Error, Result};
pub static DEFAULT_MONITOR_NEW_DISK_INTERVAL: Duration = Duration::from_secs(10);
@@ -72,7 +72,7 @@ pub async fn get_local_disks_to_heal() -> Vec<Endpoint> {
for (_, disk) in GLOBAL_LOCAL_DISK_MAP.read().await.iter() {
if let Some(disk) = disk {
if let Err(err) = disk.disk_info(&DiskInfoOptions::default()).await {
if let Some(DiskError::UnformattedDisk) = err.downcast_ref() {
if err == DiskError::UnformattedDisk {
info!("get_local_disks_to_heal, disk is unformatted: {}", err);
disks_to_heal.push(disk.endpoint());
}
@@ -111,7 +111,7 @@ async fn monitor_local_disks_and_heal() {
let store = new_object_layer_fn().expect("errServerNotInitialized");
if let (_result, Some(err)) = store.heal_format(false).await.expect("heal format failed") {
error!("heal local disk format error: {}", err);
if let Some(DiskError::NoHealRequired) = err.downcast_ref::<DiskError>() {
if err == Error::NoHealRequired {
} else {
info!("heal format err: {}", err.to_string());
interval.reset();
@@ -146,7 +146,7 @@ async fn heal_fresh_disk(endpoint: &Endpoint) -> Result<()> {
let disk = match get_disk_via_endpoint(endpoint).await {
Some(disk) => disk,
None => {
return Err(Error::from_string(format!(
return Err(Error::other(format!(
"Unexpected error disk must be initialized by now after formatting: {}",
endpoint
)))
@@ -154,13 +154,13 @@ async fn heal_fresh_disk(endpoint: &Endpoint) -> Result<()> {
};
if let Err(err) = disk.disk_info(&DiskInfoOptions::default()).await {
match err.downcast_ref() {
Some(DiskError::DriveIsRoot) => {
match err {
DiskError::DriveIsRoot => {
return Ok(());
}
Some(DiskError::UnformattedDisk) => {}
DiskError::UnformattedDisk => {}
_ => {
return Err(err);
return Err(err.into());
}
}
}
@@ -168,8 +168,8 @@ async fn heal_fresh_disk(endpoint: &Endpoint) -> Result<()> {
let mut tracker = match load_healing_tracker(&Some(disk.clone())).await {
Ok(tracker) => tracker,
Err(err) => {
match err.downcast_ref() {
Some(DiskError::FileNotFound) => {
match err {
DiskError::FileNotFound => {
return Ok(());
}
_ => {
@@ -189,7 +189,9 @@ async fn heal_fresh_disk(endpoint: &Endpoint) -> Result<()> {
endpoint.to_string()
);
let Some(store) = new_object_layer_fn() else { return Err(Error::msg("errServerNotInitialized")) };
let Some(store) = new_object_layer_fn() else {
return Err(Error::other("errServerNotInitialized"));
};
let mut buckets = store.list_bucket(&BucketOptions::default()).await?;
buckets.push(BucketInfo {
@@ -238,7 +240,7 @@ async fn heal_fresh_disk(endpoint: &Endpoint) -> Result<()> {
if let Err(err) = tracker_w.update().await {
info!("update tracker failed: {}", err.to_string());
}
return Err(Error::from_string(ERR_RETRY_HEALING));
return Err(Error::other(ERR_RETRY_HEALING));
}
if tracker_w.items_failed > 0 {
@@ -272,7 +274,9 @@ async fn heal_fresh_disk(endpoint: &Endpoint) -> Result<()> {
error!("delete tracker failed: {}", err.to_string());
}
}
let Some(store) = new_object_layer_fn() else { return Err(Error::msg("errServerNotInitialized")) };
let Some(store) = new_object_layer_fn() else {
return Err(Error::other("errServerNotInitialized"));
};
let disks = store.get_disks(pool_idx, set_idx).await?;
for disk in disks.into_iter() {
if disk.is_none() {
@@ -281,8 +285,8 @@ async fn heal_fresh_disk(endpoint: &Endpoint) -> Result<()> {
let mut tracker = match load_healing_tracker(&disk).await {
Ok(tracker) => tracker,
Err(err) => {
match err.downcast_ref() {
Some(DiskError::FileNotFound) => {}
match err {
DiskError::FileNotFound => {}
_ => {
info!("Unable to load healing tracker on '{:?}': {}, re-initializing..", disk, err.to_string());
}
@@ -362,7 +366,7 @@ impl HealRoutine {
Some(task) => {
info!("got task: {:?}", task);
if task.bucket == NOP_HEAL {
d_err = Some(Error::from_string("skip file"));
d_err = Some(Error::other("skip file"));
} else if task.bucket == SLASH_SEPARATOR {
match heal_disk_format(task.opts).await {
Ok((res, err)) => {
@@ -426,7 +430,9 @@ impl HealRoutine {
// }
async fn heal_disk_format(opts: HealOpts) -> Result<(HealResultItem, Option<Error>)> {
let Some(store) = new_object_layer_fn() else { return Err(Error::msg("errServerNotInitialized")) };
let Some(store) = new_object_layer_fn() else {
return Err(Error::other("errServerNotInitialized"));
};
let (res, err) = store.heal_format(opts.dry_run).await?;
// return any error, ignore error returned when disks have

View File

@@ -20,6 +20,7 @@ use super::{
};
use crate::{
bucket::{versioning::VersioningApi, versioning_sys::BucketVersioningSys},
disk,
heal::data_usage::DATA_USAGE_ROOT,
};
use crate::{
@@ -28,7 +29,7 @@ use crate::{
com::{read_config, save_config},
heal::Config,
},
disk::{error::DiskError, DiskInfoOptions, DiskStore, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams},
disk::{DiskInfoOptions, DiskStore},
global::{GLOBAL_BackgroundHealState, GLOBAL_IsErasure, GLOBAL_IsErasureSD},
heal::{
data_usage::BACKGROUND_HEAL_INFO_PATH,
@@ -42,16 +43,17 @@ use crate::{
store::ECStore,
utils::path::{path_join, path_to_bucket_object, path_to_bucket_object_with_base_path, SLASH_SEPARATOR},
};
use crate::{disk::local::LocalDisk, heal::data_scanner_metric::current_path_updater};
use crate::{
disk::DiskAPI,
store_api::{FileInfo, ObjectInfo},
disk::error::DiskError,
error::{Error, Result},
};
use crate::{disk::local::LocalDisk, heal::data_scanner_metric::current_path_updater};
use crate::{disk::DiskAPI, store_api::ObjectInfo};
use chrono::{DateTime, Utc};
use common::error::{Error, Result};
use lazy_static::lazy_static;
use rand::Rng;
use rmp_serde::{Deserializer, Serializer};
use rustfs_filemeta::{FileInfo, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams};
use s3s::dto::{BucketLifecycleConfiguration, ExpirationStatus, LifecycleRule, ReplicationConfiguration, ReplicationRuleStatus};
use serde::{Deserialize, Serialize};
use tokio::{
@@ -460,7 +462,7 @@ impl CurrentScannerCycle {
Deserialize::deserialize(&mut Deserializer::new(&buf[..])).expect("Deserialization failed");
self.cycle_completed = u;
}
name => return Err(Error::msg(format!("not support field name {}", name))),
name => return Err(Error::other(format!("not support field name {}", name))),
}
}
@@ -540,7 +542,12 @@ impl ScannerItem {
if self.lifecycle.is_none() {
for info in fives.iter() {
object_infos.push(info.to_object_info(&self.bucket, &self.object_path().to_string_lossy(), versioned));
object_infos.push(ObjectInfo::from_file_info(
info,
&self.bucket,
&self.object_path().to_string_lossy(),
versioned,
));
}
return Ok(object_infos);
}
@@ -594,7 +601,7 @@ struct CachedFolder {
}
pub type GetSizeFn =
Box<dyn Fn(&ScannerItem) -> Pin<Box<dyn Future<Output = Result<SizeSummary>> + Send>> + Send + Sync + 'static>;
Box<dyn Fn(&ScannerItem) -> Pin<Box<dyn Future<Output = std::io::Result<SizeSummary>> + Send>> + Send + Sync + 'static>;
pub type UpdateCurrentPathFn = Arc<dyn Fn(&str) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync + 'static>;
pub type ShouldSleepFn = Option<Arc<dyn Fn() -> bool + Send + Sync + 'static>>;
@@ -929,7 +936,7 @@ impl FolderScanner {
}
})
})),
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<Error>]| {
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<DiskError>]| {
Box::pin({
let update_current_path_partial = update_current_path_partial.clone();
// let tx_partial = tx_partial.clone();
@@ -973,8 +980,8 @@ impl FolderScanner {
)
.await
{
match err.downcast_ref() {
Some(DiskError::FileNotFound) | Some(DiskError::FileVersionNotFound) => {}
match err {
Error::FileNotFound | Error::FileVersionNotFound => {}
_ => {
info!("{}", err.to_string());
}
@@ -1018,7 +1025,7 @@ impl FolderScanner {
}
})
})),
finished: Some(Box::new(move |_: &[Option<Error>]| {
finished: Some(Box::new(move |_: &[Option<DiskError>]| {
Box::pin({
let tx_finished = tx_finished.clone();
async move {
@@ -1077,7 +1084,7 @@ impl FolderScanner {
if !into.compacted {
self.new_cache.reduce_children_of(
&this_hash,
DATA_SCANNER_COMPACT_AT_CHILDREN.try_into()?,
DATA_SCANNER_COMPACT_AT_CHILDREN as usize,
self.new_cache.info.name != folder.name,
);
}
@@ -1234,9 +1241,9 @@ pub async fn scan_data_folder(
get_size_fn: GetSizeFn,
heal_scan_mode: HealScanMode,
should_sleep: ShouldSleepFn,
) -> Result<DataUsageCache> {
) -> disk::error::Result<DataUsageCache> {
if cache.info.name.is_empty() || cache.info.name == DATA_USAGE_ROOT {
return Err(Error::from_string("internal error: root scan attempted"));
return Err(DiskError::other("internal error: root scan attempted"));
}
let base_path = drive.to_string();

View File

@@ -1,16 +1,13 @@
use crate::error::{Error, Result};
use crate::{
bucket::metadata_sys::get_replication_config,
config::{
com::{read_config, save_config},
error::is_err_config_not_found,
},
config::com::{read_config, save_config},
disk::{BUCKET_META_PREFIX, RUSTFS_META_BUCKET},
error::to_object_err,
new_object_layer_fn,
store::ECStore,
store_err::to_object_err,
utils::path::SLASH_SEPARATOR,
};
use common::error::Result;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, sync::Arc, time::SystemTime};
@@ -146,7 +143,7 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
Ok(data) => data,
Err(e) => {
error!("Failed to read data usage info from backend: {}", e);
if is_err_config_not_found(&e) {
if e == Error::ConfigNotFound {
return Ok(DataUsageInfo::default());
}

View File

@@ -1,11 +1,10 @@
use crate::config::com::save_config;
use crate::disk::error::DiskError;
use crate::disk::{BUCKET_META_PREFIX, RUSTFS_META_BUCKET};
use crate::error::{Error, Result};
use crate::new_object_layer_fn;
use crate::set_disk::SetDisks;
use crate::store_api::{BucketInfo, ObjectIO, ObjectOptions};
use bytesize::ByteSize;
use common::error::{Error, Result};
use http::HeaderMap;
use path_clean::PathClean;
use rand::Rng;
@@ -402,8 +401,8 @@ impl DataUsageCache {
}
Err(err) => {
// warn!("Failed to load data usage cache from backend: {}", &err);
match err.downcast_ref::<DiskError>() {
Some(DiskError::FileNotFound) | Some(DiskError::VolumeNotFound) => {
match err {
Error::FileNotFound | Error::VolumeNotFound => {
match store
.get_object_reader(
RUSTFS_META_BUCKET,
@@ -423,8 +422,8 @@ impl DataUsageCache {
}
break;
}
Err(_) => match err.downcast_ref::<DiskError>() {
Some(DiskError::FileNotFound) | Some(DiskError::VolumeNotFound) => {
Err(_) => match err {
Error::FileNotFound | Error::VolumeNotFound => {
break;
}
_ => {}
@@ -448,7 +447,9 @@ impl DataUsageCache {
}
pub async fn save(&self, name: &str) -> Result<()> {
let Some(store) = new_object_layer_fn() else { return Err(Error::msg("errServerNotInitialized")) };
let Some(store) = new_object_layer_fn() else {
return Err(Error::other("errServerNotInitialized"));
};
let buf = self.marshal_msg()?;
let buf_clone = buf.clone();
@@ -460,7 +461,8 @@ impl DataUsageCache {
tokio::spawn(async move {
let _ = save_config(store_clone, &format!("{}{}", &name_clone, ".bkp"), buf_clone).await;
});
save_config(store, &name, buf).await
save_config(store, &name, buf).await?;
Ok(())
}
pub fn replace(&mut self, path: &str, parent: &str, e: DataUsageEntry) {

View File

@@ -6,15 +6,15 @@ use std::{
use crate::{
config::storageclass::{RRS, STANDARD},
disk::{DeleteOptions, DiskAPI, DiskStore, BUCKET_META_PREFIX, RUSTFS_META_BUCKET},
disk::{error::DiskError, DeleteOptions, DiskAPI, DiskStore, BUCKET_META_PREFIX, RUSTFS_META_BUCKET},
global::GLOBAL_BackgroundHealState,
heal::heal_ops::HEALING_TRACKER_FILENAME,
new_object_layer_fn,
store_api::{BucketInfo, StorageAPI},
utils::fs::read_file,
};
use crate::{disk, error::Result};
use chrono::{DateTime, Utc};
use common::error::{Error, Result};
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use time::OffsetDateTime;
@@ -124,12 +124,12 @@ pub struct HealingTracker {
}
impl HealingTracker {
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
serde_json::to_vec(self).map_err(|err| Error::from_string(err.to_string()))
pub fn marshal_msg(&self) -> disk::error::Result<Vec<u8>> {
Ok(serde_json::to_vec(self)?)
}
pub fn unmarshal_msg(data: &[u8]) -> Result<Self> {
serde_json::from_slice::<HealingTracker>(data).map_err(|err| Error::from_string(err.to_string()))
pub fn unmarshal_msg(data: &[u8]) -> disk::error::Result<Self> {
Ok(serde_json::from_slice::<HealingTracker>(data)?)
}
pub async fn reset_healing(&mut self) {
@@ -195,10 +195,10 @@ impl HealingTracker {
}
}
pub async fn update(&mut self) -> Result<()> {
pub async fn update(&mut self) -> disk::error::Result<()> {
if let Some(disk) = &self.disk {
if healing(disk.path().to_string_lossy().as_ref()).await?.is_none() {
return Err(Error::from_string(format!("healingTracker: drive {} is not marked as healing", self.id)));
return Err(DiskError::other(format!("healingTracker: drive {} is not marked as healing", self.id)));
}
let _ = self.mu.write().await;
if self.id.is_empty() || self.pool_index.is_none() || self.set_index.is_none() || self.disk_index.is_none() {
@@ -213,12 +213,16 @@ impl HealingTracker {
self.save().await
}
pub async fn save(&mut self) -> Result<()> {
pub async fn save(&mut self) -> disk::error::Result<()> {
let _ = self.mu.write().await;
if self.pool_index.is_none() || self.set_index.is_none() || self.disk_index.is_none() {
let Some(store) = new_object_layer_fn() else { return Err(Error::msg("errServerNotInitialized")) };
let Some(store) = new_object_layer_fn() else {
return Err(DiskError::other("errServerNotInitialized"));
};
(self.pool_index, self.set_index, self.disk_index) = store.get_pool_and_set(&self.id).await?;
// TODO: check error type
(self.pool_index, self.set_index, self.disk_index) =
store.get_pool_and_set(&self.id).await.map_err(|_| DiskError::DiskNotFound)?;
}
self.last_update = Some(SystemTime::now());
@@ -229,9 +233,8 @@ impl HealingTracker {
if let Some(disk) = &self.disk {
let file_path = Path::new(BUCKET_META_PREFIX).join(HEALING_TRACKER_FILENAME);
return disk
.write_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap(), htracker_bytes)
.await;
disk.write_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap(), htracker_bytes)
.await?;
}
Ok(())
}
@@ -239,17 +242,16 @@ impl HealingTracker {
pub async fn delete(&self) -> Result<()> {
if let Some(disk) = &self.disk {
let file_path = Path::new(BUCKET_META_PREFIX).join(HEALING_TRACKER_FILENAME);
return disk
.delete(
RUSTFS_META_BUCKET,
file_path.to_str().unwrap(),
DeleteOptions {
recursive: false,
immediate: false,
..Default::default()
},
)
.await;
disk.delete(
RUSTFS_META_BUCKET,
file_path.to_str().unwrap(),
DeleteOptions {
recursive: false,
immediate: false,
..Default::default()
},
)
.await?;
}
Ok(())
@@ -372,7 +374,7 @@ impl Clone for HealingTracker {
}
}
pub async fn load_healing_tracker(disk: &Option<DiskStore>) -> Result<HealingTracker> {
pub async fn load_healing_tracker(disk: &Option<DiskStore>) -> disk::error::Result<HealingTracker> {
if let Some(disk) = disk {
let disk_id = disk.get_disk_id().await?;
if let Some(disk_id) = disk_id {
@@ -381,7 +383,7 @@ pub async fn load_healing_tracker(disk: &Option<DiskStore>) -> Result<HealingTra
let data = disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap()).await?;
let mut healing_tracker = HealingTracker::unmarshal_msg(&data)?;
if healing_tracker.id != disk_id && !healing_tracker.id.is_empty() {
return Err(Error::from_string(format!(
return Err(DiskError::other(format!(
"loadHealingTracker: drive id mismatch expected {}, got {}",
healing_tracker.id, disk_id
)));
@@ -390,14 +392,14 @@ pub async fn load_healing_tracker(disk: &Option<DiskStore>) -> Result<HealingTra
healing_tracker.disk = Some(disk.clone());
Ok(healing_tracker)
} else {
Err(Error::from_string("loadHealingTracker: disk not have id"))
Err(DiskError::other("loadHealingTracker: disk not have id"))
}
} else {
Err(Error::from_string("loadHealingTracker: nil drive given"))
Err(DiskError::other("loadHealingTracker: nil drive given"))
}
}
pub async fn init_healing_tracker(disk: DiskStore, heal_id: &str) -> Result<HealingTracker> {
pub async fn init_healing_tracker(disk: DiskStore, heal_id: &str) -> disk::error::Result<HealingTracker> {
let disk_location = disk.get_disk_location();
Ok(HealingTracker {
id: disk
@@ -416,7 +418,7 @@ pub async fn init_healing_tracker(disk: DiskStore, heal_id: &str) -> Result<Heal
})
}
pub async fn healing(derive_path: &str) -> Result<Option<HealingTracker>> {
pub async fn healing(derive_path: &str) -> disk::error::Result<Option<HealingTracker>> {
let healing_file = Path::new(derive_path)
.join(RUSTFS_META_BUCKET)
.join(BUCKET_META_PREFIX)

View File

@@ -4,6 +4,7 @@ use super::{
error::ERR_SKIP_FILE,
heal_commands::{HealOpts, HealScanMode, HealStopSuccess, HealingTracker, HEAL_ITEM_BUCKET_METADATA},
};
use crate::error::{Error, Result};
use crate::store_api::StorageAPI;
use crate::{
config::com::CONFIG_PREFIX,
@@ -12,7 +13,7 @@ use crate::{
heal::{error::ERR_HEAL_STOP_SIGNALLED, heal_commands::DRIVE_STATE_OK},
};
use crate::{
disk::{endpoint::Endpoint, MetaCacheEntry},
disk::endpoint::Endpoint,
endpoints::Endpoints,
global::GLOBAL_IsDistErasure,
heal::heal_commands::{HealStartSuccess, HEAL_UNKNOWN_SCAN},
@@ -24,10 +25,10 @@ use crate::{
utils::path::path_join,
};
use chrono::Utc;
use common::error::{Error, Result};
use futures::join;
use lazy_static::lazy_static;
use madmin::heal_commands::{HealDriveInfo, HealItemType, HealResultItem};
use rustfs_filemeta::MetaCacheEntry;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
@@ -285,10 +286,10 @@ impl HealSequence {
}
_ = self.is_done() => {
return Err(Error::from_string("stopped"));
return Err(Error::other("stopped"));
}
_ = interval_timer.tick() => {
return Err(Error::from_string("timeout"));
return Err(Error::other("timeout"));
}
}
} else {
@@ -412,7 +413,9 @@ impl HealSequence {
async fn heal_rustfs_sys_meta(h: Arc<HealSequence>, meta_prefix: &str) -> Result<()> {
info!("heal_rustfs_sys_meta, h: {:?}", h);
let Some(store) = new_object_layer_fn() else { return Err(Error::msg("errServerNotInitialized")) };
let Some(store) = new_object_layer_fn() else {
return Err(Error::other("errServerNotInitialized"));
};
let setting = h.setting;
store
.heal_objects(RUSTFS_META_BUCKET, meta_prefix, &setting, h.clone(), true)
@@ -450,7 +453,9 @@ impl HealSequence {
}
(hs.object.clone(), hs.setting)
};
let Some(store) = new_object_layer_fn() else { return Err(Error::msg("errServerNotInitialized")) };
let Some(store) = new_object_layer_fn() else {
return Err(Error::other("errServerNotInitialized"));
};
store.heal_objects(bucket, &object, &setting, hs.clone(), false).await
}
@@ -464,7 +469,7 @@ impl HealSequence {
info!("heal_object");
if hs.is_quitting().await {
info!("heal_object hs is quitting");
return Err(Error::from_string(ERR_HEAL_STOP_SIGNALLED));
return Err(Error::other(ERR_HEAL_STOP_SIGNALLED));
}
info!("will queue task");
@@ -491,7 +496,7 @@ impl HealSequence {
_scan_mode: HealScanMode,
) -> Result<()> {
if hs.is_quitting().await {
return Err(Error::from_string(ERR_HEAL_STOP_SIGNALLED));
return Err(Error::other(ERR_HEAL_STOP_SIGNALLED));
}
hs.queue_heal_task(
@@ -615,7 +620,7 @@ impl AllHealState {
Some(h) => {
if client_token != h.client_token {
info!("err heal invalid client token");
return Err(Error::from_string("err heal invalid client token"));
return Err(Error::other("err heal invalid client token"));
}
let num_items = h.current_status.read().await.items.len();
let mut last_result_index = *h.last_sent_result_index.read().await;
@@ -634,7 +639,7 @@ impl AllHealState {
Err(e) => {
h.current_status.write().await.items.clear();
info!("json encode err, e: {}", e);
Err(Error::msg(e.to_string()))
Err(Error::other(e.to_string()))
}
}
}
@@ -644,7 +649,7 @@ impl AllHealState {
})
.map_err(|e| {
info!("json encode err, e: {}", e);
Error::msg(e.to_string())
Error::other(e.to_string())
}),
}
}
@@ -779,7 +784,7 @@ impl AllHealState {
self.stop_heal_sequence(path_s).await?;
} else if let Some(hs) = self.get_heal_sequence(path_s).await {
if !hs.has_ended().await {
return Err(Error::from_string(format!("Heal is already running on the given path (use force-start option to stop and start afresh). The heal was started by IP {} at {:?}, token is {}", heal_sequence.client_address, heal_sequence.start_time, heal_sequence.client_token)));
return Err(Error::other(format!("Heal is already running on the given path (use force-start option to stop and start afresh). The heal was started by IP {} at {:?}, token is {}", heal_sequence.client_address, heal_sequence.start_time, heal_sequence.client_token)));
}
}
@@ -787,7 +792,7 @@ impl AllHealState {
for (k, v) in self.heal_seq_map.read().await.iter() {
if (has_prefix(k, path_s) || has_prefix(path_s, k)) && !v.has_ended().await {
return Err(Error::from_string(format!(
return Err(Error::other(format!(
"The provided heal sequence path overlaps with an existing heal path: {}",
k
)));

View File

@@ -19,7 +19,7 @@ use tokio_util::io::StreamReader;
use tracing::error;
use tracing::warn;
pub type FileReader = Box<dyn AsyncRead + Send + Sync + Unpin>;
// pub type FileReader = Box<dyn AsyncRead + Send + Sync + Unpin>;
pub type FileWriter = Box<dyn AsyncWrite + Send + Sync + Unpin>;
pub const READ_BUFFER_SIZE: usize = 1024 * 1024;
@@ -93,37 +93,37 @@ impl AsyncWrite for HttpFileWriter {
}
}
pub struct HttpFileReader {
inner: FileReader,
}
// pub struct HttpFileReader {
// inner: FileReader,
// }
impl HttpFileReader {
pub async fn new(url: &str, disk: &str, volume: &str, path: &str, offset: usize, length: usize) -> io::Result<Self> {
let resp = reqwest::Client::new()
.get(format!(
"{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}",
url,
urlencoding::encode(disk),
urlencoding::encode(volume),
urlencoding::encode(path),
offset,
length
))
.send()
.await
.map_err(io::Error::other)?;
// impl HttpFileReader {
// pub async fn new(url: &str, disk: &str, volume: &str, path: &str, offset: usize, length: usize) -> io::Result<Self> {
// let resp = reqwest::Client::new()
// .get(format!(
// "{}/rustfs/rpc/read_file_stream?disk={}&volume={}&path={}&offset={}&length={}",
// url,
// urlencoding::encode(disk),
// urlencoding::encode(volume),
// urlencoding::encode(path),
// offset,
// length
// ))
// .send()
// .await
// .map_err(io::Error::other)?;
let inner = Box::new(StreamReader::new(resp.bytes_stream().map_err(io::Error::other)));
// let inner = Box::new(StreamReader::new(resp.bytes_stream().map_err(io::Error::other)));
Ok(Self { inner })
}
}
// Ok(Self { inner })
// }
// }
impl AsyncRead for HttpFileReader {
fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_read(cx, buf)
}
}
// impl AsyncRead for HttpFileReader {
// fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> {
// Pin::new(&mut self.inner).poll_read(cx, buf)
// }
// }
#[async_trait]
pub trait Etag {
@@ -277,65 +277,65 @@ mod tests {
assert!(writer.is_ok(), "HttpFileWriter creation should succeed even with invalid URL");
}
#[tokio::test]
async fn test_http_file_reader_creation() {
// Test creation without actually making HTTP requests
// We'll test the URL construction logic by checking the error messages
let result =
HttpFileReader::new("http://invalid-server:9999", "test-disk", "test-volume", "test-file.txt", 0, 1024).await;
// #[tokio::test]
// async fn test_http_file_reader_creation() {
// // Test creation without actually making HTTP requests
// // We'll test the URL construction logic by checking the error messages
// let result =
// HttpFileReader::new("http://invalid-server:9999", "test-disk", "test-volume", "test-file.txt", 0, 1024).await;
// May succeed or fail depending on network conditions, but should not panic
// The important thing is that the URL construction logic works
assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
}
// // May succeed or fail depending on network conditions, but should not panic
// // The important thing is that the URL construction logic works
// assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
// }
#[tokio::test]
async fn test_http_file_reader_with_offset_and_length() {
let result = HttpFileReader::new(
"http://invalid-server:9999",
"test-disk",
"test-volume",
"test-file.txt",
100, // offset
500, // length
)
.await;
// #[tokio::test]
// async fn test_http_file_reader_with_offset_and_length() {
// let result = HttpFileReader::new(
// "http://invalid-server:9999",
// "test-disk",
// "test-volume",
// "test-file.txt",
// 100, // offset
// 500, // length
// )
// .await;
// May succeed or fail, but this tests parameter handling
assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
}
// // May succeed or fail, but this tests parameter handling
// assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
// }
#[tokio::test]
async fn test_http_file_reader_zero_length() {
let result = HttpFileReader::new(
"http://invalid-server:9999",
"test-disk",
"test-volume",
"test-file.txt",
0,
0, // zero length
)
.await;
// #[tokio::test]
// async fn test_http_file_reader_zero_length() {
// let result = HttpFileReader::new(
// "http://invalid-server:9999",
// "test-disk",
// "test-volume",
// "test-file.txt",
// 0,
// 0, // zero length
// )
// .await;
// May succeed or fail, but this tests zero length handling
assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
}
// // May succeed or fail, but this tests zero length handling
// assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
// }
#[tokio::test]
async fn test_http_file_reader_with_special_characters() {
let result = HttpFileReader::new(
"http://invalid-server:9999",
"test disk with spaces",
"test/volume",
"test file with spaces & symbols.txt",
0,
1024,
)
.await;
// #[tokio::test]
// async fn test_http_file_reader_with_special_characters() {
// let result = HttpFileReader::new(
// "http://invalid-server:9999",
// "test disk with spaces",
// "test/volume",
// "test file with spaces & symbols.txt",
// 0,
// 1024,
// )
// .await;
// May succeed or fail, but this tests URL encoding
assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
}
// // May succeed or fail, but this tests URL encoding
// assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
// }
#[tokio::test]
async fn test_etag_reader_creation() {
@@ -439,17 +439,17 @@ mod tests {
assert_eq!(etag.len(), 32, "MD5 hash should be 32 characters");
}
#[tokio::test]
async fn test_file_reader_and_writer_types() {
// Test that the type aliases are correctly defined
let _reader: FileReader = Box::new(Cursor::new(b"test"));
let (_writer_tx, writer_rx) = tokio::io::duplex(1024);
let _writer: FileWriter = Box::new(writer_rx);
// #[tokio::test]
// async fn test_file_reader_and_writer_types() {
// // Test that the type aliases are correctly defined
// let _reader: FileReader = Box::new(Cursor::new(b"test"));
// let (_writer_tx, writer_rx) = tokio::io::duplex(1024);
// let _writer: FileWriter = Box::new(writer_rx);
// If this compiles, the types are correctly defined
// This is a placeholder test - remove meaningless assertion
// assert!(true);
}
// // If this compiles, the types are correctly defined
// // This is a placeholder test - remove meaningless assertion
// // assert!(true);
// }
#[tokio::test]
async fn test_etag_trait_implementation() {
@@ -503,45 +503,45 @@ mod tests {
assert_eq!(result1, "d41d8cd98f00b204e9800998ecf8427e");
}
#[tokio::test]
async fn test_edge_case_parameters() {
// Test HttpFileWriter with edge case parameters
let writer = HttpFileWriter::new(
"http://localhost:8080",
"", // empty disk
"", // empty volume
"", // empty path
0, // zero size
false,
);
assert!(writer.is_ok(), "HttpFileWriter should handle empty parameters");
// #[tokio::test]
// async fn test_edge_case_parameters() {
// // Test HttpFileWriter with edge case parameters
// let writer = HttpFileWriter::new(
// "http://localhost:8080",
// "", // empty disk
// "", // empty volume
// "", // empty path
// 0, // zero size
// false,
// );
// assert!(writer.is_ok(), "HttpFileWriter should handle empty parameters");
// Test HttpFileReader with edge case parameters
let result = HttpFileReader::new(
"http://invalid:9999",
"", // empty disk
"", // empty volume
"", // empty path
0, // zero offset
0, // zero length
)
.await;
// May succeed or fail, but parameters should be handled
assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
}
// // Test HttpFileReader with edge case parameters
// let result = HttpFileReader::new(
// "http://invalid:9999",
// "", // empty disk
// "", // empty volume
// "", // empty path
// 0, // zero offset
// 0, // zero length
// )
// .await;
// // May succeed or fail, but parameters should be handled
// assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
// }
#[tokio::test]
async fn test_url_encoding_edge_cases() {
// Test with characters that need URL encoding
let special_chars = "test file with spaces & symbols + % # ? = @ ! $ ( ) [ ] { } | \\ / : ; , . < > \" '";
// #[tokio::test]
// async fn test_url_encoding_edge_cases() {
// // Test with characters that need URL encoding
// let special_chars = "test file with spaces & symbols + % # ? = @ ! $ ( ) [ ] { } | \\ / : ; , . < > \" '";
let writer = HttpFileWriter::new("http://localhost:8080", special_chars, special_chars, special_chars, 1024, false);
assert!(writer.is_ok(), "HttpFileWriter should handle special characters");
// let writer = HttpFileWriter::new("http://localhost:8080", special_chars, special_chars, special_chars, 1024, false);
// assert!(writer.is_ok(), "HttpFileWriter should handle special characters");
let result = HttpFileReader::new("http://invalid:9999", special_chars, special_chars, special_chars, 0, 1024).await;
// May succeed or fail, but URL encoding should work
assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
}
// let result = HttpFileReader::new("http://invalid:9999", special_chars, special_chars, special_chars, 0, 1024).await;
// // May succeed or fail, but URL encoding should work
// assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
// }
#[tokio::test]
async fn test_etag_reader_with_binary_data() {

View File

@@ -1,5 +1,5 @@
pub mod admin_server_info;
pub mod bitrot;
// pub mod bitrot;
pub mod bucket;
pub mod cache_value;
mod chunk_stream;
@@ -7,26 +7,25 @@ pub mod config;
pub mod disk;
pub mod disks_layout;
pub mod endpoints;
pub mod erasure;
pub mod erasure_coding;
pub mod error;
mod file_meta;
pub mod file_meta_inline;
// mod file_meta;
// pub mod file_meta_inline;
pub mod global;
pub mod heal;
pub mod io;
pub mod metacache;
// pub mod metacache;
pub mod metrics_realtime;
pub mod notification_sys;
pub mod peer;
pub mod peer_rest_client;
pub mod pools;
mod quorum;
// mod quorum;
pub mod rebalance;
pub mod set_disk;
mod sets;
pub mod store;
pub mod store_api;
pub mod store_err;
mod store_init;
pub mod store_list_objects;
mod store_utils;

View File

@@ -1,9 +1,9 @@
use crate::admin_server_info::get_commit_id;
use crate::error::{Error, Result};
use crate::global::{get_global_endpoints, GLOBAL_BOOT_TIME};
use crate::peer_rest_client::PeerRestClient;
use crate::StorageAPI;
use crate::{endpoints::EndpointServerPools, new_object_layer_fn};
use common::error::{Error, Result};
use futures::future::join_all;
use lazy_static::lazy_static;
use madmin::{ItemState, ServerProperties};
@@ -18,7 +18,7 @@ lazy_static! {
pub async fn new_global_notification_sys(eps: EndpointServerPools) -> Result<()> {
let _ = GLOBAL_NotificationSys
.set(NotificationSys::new(eps).await)
.map_err(|_| Error::msg("init notification_sys fail"));
.map_err(|_| Error::other("init notification_sys fail"));
Ok(())
}

View File

@@ -1,22 +1,19 @@
use crate::disk::error::is_all_buckets_not_found;
use crate::disk::error::{Error, Result};
use crate::disk::error_reduce::{is_all_buckets_not_found, reduce_write_quorum_errs, BUCKET_OP_IGNORED_ERRS};
use crate::disk::{DiskAPI, DiskStore};
use crate::error::clone_err;
use crate::global::GLOBAL_LOCAL_DISK_MAP;
use crate::heal::heal_commands::{
HealOpts, DRIVE_STATE_CORRUPT, DRIVE_STATE_MISSING, DRIVE_STATE_OFFLINE, DRIVE_STATE_OK, HEAL_ITEM_BUCKET,
};
use crate::heal::heal_ops::RUSTFS_RESERVED_BUCKET;
use crate::quorum::{bucket_op_ignored_errs, reduce_write_quorum_errs};
use crate::store::all_local_disk;
use crate::utils::proto_err_to_err;
use crate::utils::wildcard::is_rustfs_meta_bucket_name;
use crate::{
disk::{self, error::DiskError, VolumeInfo},
disk::{self, VolumeInfo},
endpoints::{EndpointServerPools, Node},
store_api::{BucketInfo, BucketOptions, DeleteBucketOptions, MakeBucketOptions},
};
use async_trait::async_trait;
use common::error::{Error, Result};
use futures::future::join_all;
use madmin::heal_commands::{HealDriveInfo, HealResultItem};
use protos::node_service_time_out_client;
@@ -90,12 +87,12 @@ impl S3PeerSys {
for (i, client) in self.clients.iter().enumerate() {
if let Some(v) = client.get_pools() {
if v.contains(&pool_idx) {
per_pool_errs.push(errs[i].as_ref().map(clone_err));
per_pool_errs.push(errs[i].clone());
}
}
}
let qu = per_pool_errs.len() / 2;
pool_errs.push(reduce_write_quorum_errs(&per_pool_errs, &bucket_op_ignored_errs(), qu));
pool_errs.push(reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, qu));
}
if !opts.recreate {
@@ -125,12 +122,12 @@ impl S3PeerSys {
for (i, client) in self.clients.iter().enumerate() {
if let Some(v) = client.get_pools() {
if v.contains(&pool_idx) {
per_pool_errs.push(errs[i].as_ref().map(clone_err));
per_pool_errs.push(errs[i].clone());
}
}
}
let qu = per_pool_errs.len() / 2;
if let Some(pool_err) = reduce_write_quorum_errs(&per_pool_errs, &bucket_op_ignored_errs(), qu) {
if let Some(pool_err) = reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, qu) {
return Err(pool_err);
}
}
@@ -140,7 +137,7 @@ impl S3PeerSys {
return Ok(heal_bucket_results.read().await[i].clone());
}
}
Err(DiskError::VolumeNotFound.into())
Err(Error::VolumeNotFound.into())
}
pub async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> {
@@ -286,9 +283,7 @@ impl S3PeerSys {
}
}
ress.iter()
.find_map(|op| op.clone())
.ok_or(Error::new(DiskError::VolumeNotFound))
ress.iter().find_map(|op| op.clone()).ok_or(Error::VolumeNotFound)
}
pub fn get_pools(&self) -> Option<Vec<usize>> {
@@ -378,7 +373,7 @@ impl PeerS3Client for LocalPeerS3Client {
match disk.make_volume(bucket).await {
Ok(_) => Ok(()),
Err(e) => {
if opts.force_create && DiskError::VolumeExists.is(&e) {
if opts.force_create && matches!(e, Error::VolumeExists) {
return Ok(());
}
@@ -441,7 +436,7 @@ impl PeerS3Client for LocalPeerS3Client {
..Default::default()
})
})
.ok_or(Error::new(DiskError::VolumeNotFound))
.ok_or(Error::VolumeNotFound)
}
async fn delete_bucket(&self, bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> {
@@ -462,7 +457,7 @@ impl PeerS3Client for LocalPeerS3Client {
match res {
Ok(_) => errs.push(None),
Err(e) => {
if DiskError::VolumeNotEmpty.is(&e) {
if matches!(e, Error::VolumeNotEmpty) {
recreate = true;
}
errs.push(Some(e))
@@ -479,7 +474,7 @@ impl PeerS3Client for LocalPeerS3Client {
}
if recreate {
return Err(Error::new(DiskError::VolumeNotEmpty));
return Err(Error::VolumeNotEmpty);
}
// TODO: reduceWriteQuorumErrs
@@ -512,17 +507,17 @@ impl PeerS3Client for RemotePeerS3Client {
let options: String = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(HealBucketRequest {
bucket: bucket.to_string(),
options,
});
let response = client.heal_bucket(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
return if let Some(err) = response.error {
Err(err.into())
} else {
Err(Error::from_string(""))
Err(Error::other(""))
};
}
@@ -538,14 +533,14 @@ impl PeerS3Client for RemotePeerS3Client {
let options = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(ListBucketRequest { options });
let response = client.list_bucket(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
return if let Some(err) = response.error {
Err(err.into())
} else {
Err(Error::from_string(""))
Err(Error::other(""))
};
}
let bucket_infos = response
@@ -560,7 +555,7 @@ impl PeerS3Client for RemotePeerS3Client {
let options = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(MakeBucketRequest {
name: bucket.to_string(),
options,
@@ -569,10 +564,10 @@ impl PeerS3Client for RemotePeerS3Client {
// TODO: deal with error
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
return if let Some(err) = response.error {
Err(err.into())
} else {
Err(Error::from_string(""))
Err(Error::other(""))
};
}
@@ -582,17 +577,17 @@ impl PeerS3Client for RemotePeerS3Client {
let options = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(GetBucketInfoRequest {
bucket: bucket.to_string(),
options,
});
let response = client.get_bucket_info(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
return if let Some(err) = response.error {
Err(err.into())
} else {
Err(Error::from_string(""))
Err(Error::other(""))
};
}
let bucket_info = serde_json::from_str::<BucketInfo>(&response.bucket_info)?;
@@ -603,17 +598,17 @@ impl PeerS3Client for RemotePeerS3Client {
async fn delete_bucket(&self, bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::from_string(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
let request = Request::new(DeleteBucketRequest {
bucket: bucket.to_string(),
});
let response = client.delete_bucket(request).await?.into_inner();
if !response.success {
return if let Some(err) = &response.error {
Err(proto_err_to_err(err))
return if let Some(err) = response.error {
Err(err.into())
} else {
Err(Error::from_string(""))
Err(Error::other(""))
};
}
@@ -624,18 +619,18 @@ impl PeerS3Client for RemotePeerS3Client {
// 检查桶名是否有效
fn check_bucket_name(bucket_name: &str, strict: bool) -> Result<()> {
if bucket_name.trim().is_empty() {
return Err(Error::msg("Bucket name cannot be empty"));
return Err(Error::other("Bucket name cannot be empty"));
}
if bucket_name.len() < 3 {
return Err(Error::msg("Bucket name cannot be shorter than 3 characters"));
return Err(Error::other("Bucket name cannot be shorter than 3 characters"));
}
if bucket_name.len() > 63 {
return Err(Error::msg("Bucket name cannot be longer than 63 characters"));
return Err(Error::other("Bucket name cannot be longer than 63 characters"));
}
let ip_address_regex = Regex::new(r"^(\d+\.){3}\d+$").unwrap();
if ip_address_regex.is_match(bucket_name) {
return Err(Error::msg("Bucket name cannot be an IP address"));
return Err(Error::other("Bucket name cannot be an IP address"));
}
let valid_bucket_name_regex = if strict {
@@ -645,12 +640,12 @@ fn check_bucket_name(bucket_name: &str, strict: bool) -> Result<()> {
};
if !valid_bucket_name_regex.is_match(bucket_name) {
return Err(Error::msg("Bucket name contains invalid characters"));
return Err(Error::other("Bucket name contains invalid characters"));
}
// 检查包含 "..", ".-", "-."
if bucket_name.contains("..") || bucket_name.contains(".-") || bucket_name.contains("-.") {
return Err(Error::msg("Bucket name contains invalid characters"));
return Err(Error::other("Bucket name contains invalid characters"));
}
Ok(())
@@ -695,7 +690,7 @@ pub async fn heal_bucket_local(bucket: &str, opts: &HealOpts) -> Result<HealResu
None => {
bs_clone.write().await[index] = DRIVE_STATE_OFFLINE.to_string();
as_clone.write().await[index] = DRIVE_STATE_OFFLINE.to_string();
return Some(Error::new(DiskError::DiskNotFound));
return Some(Error::DiskNotFound);
}
};
bs_clone.write().await[index] = DRIVE_STATE_OK.to_string();
@@ -707,13 +702,13 @@ pub async fn heal_bucket_local(bucket: &str, opts: &HealOpts) -> Result<HealResu
match disk.stat_volume(&bucket).await {
Ok(_) => None,
Err(err) => match err.downcast_ref() {
Some(DiskError::DiskNotFound) => {
Err(err) => match err {
Error::DiskNotFound => {
bs_clone.write().await[index] = DRIVE_STATE_OFFLINE.to_string();
as_clone.write().await[index] = DRIVE_STATE_OFFLINE.to_string();
Some(err)
}
Some(DiskError::VolumeNotFound) => {
Error::VolumeNotFound => {
bs_clone.write().await[index] = DRIVE_STATE_MISSING.to_string();
as_clone.write().await[index] = DRIVE_STATE_MISSING.to_string();
Some(err)
@@ -761,7 +756,7 @@ pub async fn heal_bucket_local(bucket: &str, opts: &HealOpts) -> Result<HealResu
let _ = disk.delete_volume(&bucket).await;
None
}
None => Some(Error::new(DiskError::DiskNotFound)),
None => Some(Error::DiskNotFound),
}
});
}
@@ -776,7 +771,7 @@ pub async fn heal_bucket_local(bucket: &str, opts: &HealOpts) -> Result<HealResu
let bucket = bucket.to_string();
let bs_clone = before_state.clone();
let as_clone = after_state.clone();
let errs_clone = errs.iter().map(|e| e.as_ref().map(clone_err)).collect::<Vec<_>>();
let errs_clone = errs.iter().map(|e| e.as_ref().map(|e| e.clone())).collect::<Vec<_>>();
futures.push(async move {
if bs_clone.read().await[idx] == DRIVE_STATE_MISSING {
info!("bucket not find, will recreate");
@@ -790,7 +785,7 @@ pub async fn heal_bucket_local(bucket: &str, opts: &HealOpts) -> Result<HealResu
}
}
}
errs_clone[idx].as_ref().map(clone_err)
errs_clone[idx].as_ref().map(|e| e.clone())
});
}

View File

@@ -1,3 +1,4 @@
use crate::error::{Error, Result};
use crate::{
endpoints::EndpointServerPools,
global::is_dist_erasure,
@@ -5,7 +6,6 @@ use crate::{
metrics_realtime::{CollectMetricsOpts, MetricType},
utils::net::XHost,
};
use common::error::{Error, Result};
use madmin::{
health::{Cpus, MemInfo, OsInfo, Partitions, ProcInfo, SysConfig, SysErrors, SysService},
metrics::RealtimeMetrics,
@@ -76,15 +76,15 @@ impl PeerRestClient {
pub async fn local_storage_info(&self) -> Result<madmin::StorageInfo> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LocalStorageInfoRequest { metrics: true });
let response = client.local_storage_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.storage_info;
@@ -97,15 +97,15 @@ impl PeerRestClient {
pub async fn server_info(&self) -> Result<ServerProperties> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(ServerInfoRequest { metrics: true });
let response = client.server_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.server_properties;
@@ -118,15 +118,15 @@ impl PeerRestClient {
pub async fn get_cpus(&self) -> Result<Cpus> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetCpusRequest {});
let response = client.get_cpus(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.cpus;
@@ -139,15 +139,15 @@ impl PeerRestClient {
pub async fn get_net_info(&self) -> Result<NetInfo> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetNetInfoRequest {});
let response = client.get_net_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.net_info;
@@ -160,15 +160,15 @@ impl PeerRestClient {
pub async fn get_partitions(&self) -> Result<Partitions> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetPartitionsRequest {});
let response = client.get_partitions(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.partitions;
@@ -181,15 +181,15 @@ impl PeerRestClient {
pub async fn get_os_info(&self) -> Result<OsInfo> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetOsInfoRequest {});
let response = client.get_os_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.os_info;
@@ -202,15 +202,15 @@ impl PeerRestClient {
pub async fn get_se_linux_info(&self) -> Result<SysService> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetSeLinuxInfoRequest {});
let response = client.get_se_linux_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.sys_services;
@@ -223,15 +223,15 @@ impl PeerRestClient {
pub async fn get_sys_config(&self) -> Result<SysConfig> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetSysConfigRequest {});
let response = client.get_sys_config(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.sys_config;
@@ -244,15 +244,15 @@ impl PeerRestClient {
pub async fn get_sys_errors(&self) -> Result<SysErrors> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetSysErrorsRequest {});
let response = client.get_sys_errors(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.sys_errors;
@@ -265,15 +265,15 @@ impl PeerRestClient {
pub async fn get_mem_info(&self) -> Result<MemInfo> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetMemInfoRequest {});
let response = client.get_mem_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.mem_info;
@@ -286,7 +286,7 @@ impl PeerRestClient {
pub async fn get_metrics(&self, t: MetricType, opts: &CollectMetricsOpts) -> Result<RealtimeMetrics> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let mut buf_t = Vec::new();
t.serialize(&mut Serializer::new(&mut buf_t))?;
let mut buf_o = Vec::new();
@@ -299,9 +299,9 @@ impl PeerRestClient {
let response = client.get_metrics(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.realtime_metrics;
@@ -314,15 +314,15 @@ impl PeerRestClient {
pub async fn get_proc_info(&self) -> Result<ProcInfo> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(GetProcInfoRequest {});
let response = client.get_proc_info(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.proc_info;
@@ -335,7 +335,7 @@ impl PeerRestClient {
pub async fn start_profiling(&self, profiler: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(StartProfilingRequest {
profiler: profiler.to_string(),
});
@@ -343,9 +343,9 @@ impl PeerRestClient {
let response = client.start_profiling(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -369,7 +369,7 @@ impl PeerRestClient {
pub async fn load_bucket_metadata(&self, bucket: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadBucketMetadataRequest {
bucket: bucket.to_string(),
});
@@ -377,9 +377,9 @@ impl PeerRestClient {
let response = client.load_bucket_metadata(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -387,7 +387,7 @@ impl PeerRestClient {
pub async fn delete_bucket_metadata(&self, bucket: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(DeleteBucketMetadataRequest {
bucket: bucket.to_string(),
});
@@ -395,9 +395,9 @@ impl PeerRestClient {
let response = client.delete_bucket_metadata(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -405,7 +405,7 @@ impl PeerRestClient {
pub async fn delete_policy(&self, policy: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(DeletePolicyRequest {
policy_name: policy.to_string(),
});
@@ -413,9 +413,9 @@ impl PeerRestClient {
let response = client.delete_policy(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -423,7 +423,7 @@ impl PeerRestClient {
pub async fn load_policy(&self, policy: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadPolicyRequest {
policy_name: policy.to_string(),
});
@@ -431,9 +431,9 @@ impl PeerRestClient {
let response = client.load_policy(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -441,7 +441,7 @@ impl PeerRestClient {
pub async fn load_policy_mapping(&self, user_or_group: &str, user_type: u64, is_group: bool) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadPolicyMappingRequest {
user_or_group: user_or_group.to_string(),
user_type,
@@ -451,9 +451,9 @@ impl PeerRestClient {
let response = client.load_policy_mapping(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -461,7 +461,7 @@ impl PeerRestClient {
pub async fn delete_user(&self, access_key: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(DeleteUserRequest {
access_key: access_key.to_string(),
});
@@ -469,9 +469,9 @@ impl PeerRestClient {
let response = client.delete_user(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -479,7 +479,7 @@ impl PeerRestClient {
pub async fn delete_service_account(&self, access_key: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(DeleteServiceAccountRequest {
access_key: access_key.to_string(),
});
@@ -487,9 +487,9 @@ impl PeerRestClient {
let response = client.delete_service_account(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -497,7 +497,7 @@ impl PeerRestClient {
pub async fn load_user(&self, access_key: &str, temp: bool) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadUserRequest {
access_key: access_key.to_string(),
temp,
@@ -506,9 +506,9 @@ impl PeerRestClient {
let response = client.load_user(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -516,7 +516,7 @@ impl PeerRestClient {
pub async fn load_service_account(&self, access_key: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadServiceAccountRequest {
access_key: access_key.to_string(),
});
@@ -524,9 +524,9 @@ impl PeerRestClient {
let response = client.load_service_account(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -534,7 +534,7 @@ impl PeerRestClient {
pub async fn load_group(&self, group: &str) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadGroupRequest {
group: group.to_string(),
});
@@ -542,9 +542,9 @@ impl PeerRestClient {
let response = client.load_group(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -552,15 +552,15 @@ impl PeerRestClient {
pub async fn reload_site_replication_config(&self) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(ReloadSiteReplicationConfigRequest {});
let response = client.reload_site_replication_config(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -568,7 +568,7 @@ impl PeerRestClient {
pub async fn signal_service(&self, sig: u64, sub_sys: &str, dry_run: bool, _exec_at: SystemTime) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let mut vars = HashMap::new();
vars.insert(PEER_RESTSIGNAL.to_string(), sig.to_string());
vars.insert(PEER_RESTSUB_SYS.to_string(), sub_sys.to_string());
@@ -580,9 +580,9 @@ impl PeerRestClient {
let response = client.signal_service(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
}
@@ -590,15 +590,15 @@ impl PeerRestClient {
pub async fn background_heal_status(&self) -> Result<BgHealState> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(BackgroundHealStatusRequest {});
let response = client.background_heal_status(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
let data = response.bg_heal_state;
@@ -611,29 +611,29 @@ impl PeerRestClient {
pub async fn get_metacache_listing(&self) -> Result<()> {
let _client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
todo!()
}
pub async fn update_metacache_listing(&self) -> Result<()> {
let _client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
todo!()
}
pub async fn reload_pool_meta(&self) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(ReloadPoolMetaRequest {});
let response = client.reload_pool_meta(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
@@ -642,15 +642,15 @@ impl PeerRestClient {
pub async fn stop_rebalance(&self) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(StopRebalanceRequest {});
let response = client.stop_rebalance(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
@@ -659,7 +659,7 @@ impl PeerRestClient {
pub async fn load_rebalance_meta(&self, start_rebalance: bool) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadRebalanceMetaRequest { start_rebalance });
let response = client.load_rebalance_meta(request).await?.into_inner();
@@ -667,9 +667,9 @@ impl PeerRestClient {
warn!("load_rebalance_meta response {:?}", response);
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())
@@ -678,15 +678,15 @@ impl PeerRestClient {
pub async fn load_transition_tier_config(&self) -> Result<()> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::msg(err.to_string()))?;
.map_err(|err| Error::other(err.to_string()))?;
let request = Request::new(LoadTransitionTierConfigRequest {});
let response = client.load_transition_tier_config(request).await?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::msg(msg));
return Err(Error::other(msg));
}
return Err(Error::msg(""));
return Err(Error::other(""));
}
Ok(())

View File

@@ -1,9 +1,13 @@
use crate::bucket::versioning_sys::BucketVersioningSys;
use crate::cache_value::metacache_set::{list_path_raw, ListPathRawOptions};
use crate::config::com::{read_config, save_config, CONFIG_PREFIX};
use crate::config::error::ConfigError;
use crate::disk::error::is_err_volume_not_found;
use crate::disk::{MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, BUCKET_META_PREFIX, RUSTFS_META_BUCKET};
use crate::disk::error::DiskError;
use crate::disk::{BUCKET_META_PREFIX, RUSTFS_META_BUCKET};
use crate::error::{
is_err_bucket_exists, is_err_bucket_not_found, is_err_data_movement_overwrite, is_err_object_not_found,
is_err_version_not_found, StorageError,
};
use crate::error::{Error, Result};
use crate::heal::data_usage::DATA_USAGE_CACHE_NAME;
use crate::heal::heal_commands::HealOpts;
use crate::new_object_layer_fn;
@@ -12,18 +16,16 @@ use crate::set_disk::SetDisks;
use crate::store_api::{
BucketOptions, CompletePart, GetObjectReader, MakeBucketOptions, ObjectIO, ObjectOptions, PutObjReader, StorageAPI,
};
use crate::store_err::{
is_err_bucket_exists, is_err_data_movement_overwrite, is_err_object_not_found, is_err_version_not_found, StorageError,
};
use crate::utils::path::{encode_dir_object, path_join, SLASH_SEPARATOR};
use crate::{sets::Sets, store::ECStore};
use ::workers::workers::Workers;
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use common::defer;
use common::error::{Error, Result};
use futures::future::BoxFuture;
use http::HeaderMap;
use rmp_serde::{Deserializer, Serializer};
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams};
use rustfs_rio::{HashReader, Reader};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt::Display;
@@ -106,12 +108,12 @@ impl PoolMeta {
if data.is_empty() {
return Ok(());
} else if data.len() <= 4 {
return Err(Error::from_string("poolMeta: no data"));
return Err(Error::other("poolMeta: no data"));
}
data
}
Err(err) => {
if let Some(ConfigError::NotFound) = err.downcast_ref::<ConfigError>() {
if err == Error::ConfigNotFound {
return Ok(());
}
return Err(err);
@@ -119,11 +121,11 @@ impl PoolMeta {
};
let format = LittleEndian::read_u16(&data[0..2]);
if format != POOL_META_FORMAT {
return Err(Error::msg(format!("PoolMeta: unknown format: {}", format)));
return Err(Error::other(format!("PoolMeta: unknown format: {}", format)));
}
let version = LittleEndian::read_u16(&data[2..4]);
if version != POOL_META_VERSION {
return Err(Error::msg(format!("PoolMeta: unknown version: {}", version)));
return Err(Error::other(format!("PoolMeta: unknown version: {}", version)));
}
let mut buf = Deserializer::new(Cursor::new(&data[4..]));
@@ -131,7 +133,7 @@ impl PoolMeta {
*self = meta;
if self.version != POOL_META_VERSION {
return Err(Error::msg(format!("unexpected PoolMeta version: {}", self.version)));
return Err(Error::other(format!("unexpected PoolMeta version: {}", self.version)));
}
Ok(())
}
@@ -230,7 +232,7 @@ impl PoolMeta {
if let Some(pool) = self.pools.get_mut(idx) {
if let Some(ref info) = pool.decommission {
if !info.complete && !info.failed && !info.canceled {
return Err(Error::new(StorageError::DecommissionAlreadyRunning));
return Err(StorageError::DecommissionAlreadyRunning);
}
}
@@ -318,7 +320,7 @@ impl PoolMeta {
pub async fn update_after(&mut self, idx: usize, pools: Vec<Arc<Sets>>, duration: Duration) -> Result<bool> {
if !self.pools.get(idx).is_some_and(|v| v.decommission.is_some()) {
return Err(Error::msg("InvalidArgument"));
return Err(Error::other("InvalidArgument"));
}
let now = OffsetDateTime::now_utc();
@@ -377,7 +379,7 @@ impl PoolMeta {
pi.position + 1,
k
);
// return Err(Error::msg(format!(
// return Err(Error::other(format!(
// "pool({}) = {} is decommissioned, please remove from server command line",
// pi.position + 1,
// k
@@ -590,22 +592,22 @@ impl ECStore {
used: total - free,
})
} else {
Err(Error::msg("InvalidArgument"))
Err(Error::other("InvalidArgument"))
}
}
#[tracing::instrument(skip(self))]
pub async fn decommission_cancel(&self, idx: usize) -> Result<()> {
if self.single_pool() {
return Err(Error::msg("InvalidArgument"));
return Err(Error::other("InvalidArgument"));
}
let Some(has_canceler) = self.decommission_cancelers.get(idx) else {
return Err(Error::msg("InvalidArgument"));
return Err(Error::other("InvalidArgument"));
};
if has_canceler.is_none() {
return Err(Error::new(StorageError::DecommissionNotStarted));
return Err(StorageError::DecommissionNotStarted);
}
let mut lock = self.pool_meta.write().await;
@@ -638,11 +640,11 @@ impl ECStore {
pub async fn decommission(&self, rx: B_Receiver<bool>, indices: Vec<usize>) -> Result<()> {
warn!("decommission: {:?}", indices);
if indices.is_empty() {
return Err(Error::msg("errInvalidArgument"));
return Err(Error::other("InvalidArgument"));
}
if self.single_pool() {
return Err(Error::msg("errInvalidArgument"));
return Err(Error::other("InvalidArgument"));
}
self.start_decommission(indices.clone()).await?;
@@ -880,7 +882,7 @@ impl ECStore {
pool: Arc<Sets>,
bi: DecomBucketInfo,
) -> Result<()> {
let wk = Workers::new(pool.disk_set.len() * 2).map_err(|v| Error::from_string(v))?;
let wk = Workers::new(pool.disk_set.len() * 2).map_err(|v| Error::other(v))?;
// let mut vc = None;
// replication
@@ -942,7 +944,7 @@ impl ECStore {
}
Err(err) => {
error!("decommission_pool: list_objects_to_decommission {} err {:?}", set_id, &err);
if is_err_volume_not_found(&err) {
if is_err_bucket_not_found(&err) {
warn!("decommission_pool: list_objects_to_decommission {} volume not found", set_id);
break;
}
@@ -1008,7 +1010,7 @@ impl ECStore {
#[tracing::instrument(skip(self))]
pub async fn decommission_failed(&self, idx: usize) -> Result<()> {
if self.single_pool() {
return Err(Error::msg("errInvalidArgument"));
return Err(Error::other("errInvalidArgument"));
}
let mut pool_meta = self.pool_meta.write().await;
@@ -1028,7 +1030,7 @@ impl ECStore {
#[tracing::instrument(skip(self))]
pub async fn complete_decommission(&self, idx: usize) -> Result<()> {
if self.single_pool() {
return Err(Error::msg("errInvalidArgument"));
return Err(Error::other("errInvalidArgument"));
}
let mut pool_meta = self.pool_meta.write().await;
@@ -1102,11 +1104,11 @@ impl ECStore {
#[tracing::instrument(skip(self))]
pub async fn start_decommission(&self, indices: Vec<usize>) -> Result<()> {
if indices.is_empty() {
return Err(Error::msg("errInvalidArgument"));
return Err(Error::other("errInvalidArgument"));
}
if self.single_pool() {
return Err(Error::msg("errInvalidArgument"));
return Err(Error::other("errInvalidArgument"));
}
let decom_buckets = self.get_buckets_to_decommission().await?;
@@ -1220,9 +1222,7 @@ impl ECStore {
reader.read_exact(&mut chunk).await?;
// 每次从 reader 中读取一个 part 上传
let rd = Box::new(Cursor::new(chunk));
let mut data = PutObjReader::new(rd, part.size);
let mut data = PutObjReader::from_vec(chunk);
let pi = match self
.put_object_part(
@@ -1232,7 +1232,7 @@ impl ECStore {
part.number,
&mut data,
&ObjectOptions {
preserve_etag: part.e_tag.clone(),
preserve_etag: Some(part.etag.clone()),
..Default::default()
},
)
@@ -1249,7 +1249,7 @@ impl ECStore {
parts[i] = CompletePart {
part_num: pi.part_num,
e_tag: pi.etag,
etag: pi.etag,
};
}
@@ -1275,7 +1275,10 @@ impl ECStore {
return Ok(());
}
let mut data = PutObjReader::new(rd.stream, object_info.size);
let mut data = PutObjReader::new(
HashReader::new(rd.stream, object_info.size as i64, object_info.size as i64, None, false)?,
object_info.size,
);
if let Err(err) = self
.put_object(
@@ -1318,7 +1321,7 @@ impl SetDisks {
) -> Result<()> {
let (disks, _) = self.get_online_disks_with_healing(false).await;
if disks.is_empty() {
return Err(Error::msg("errNoDiskAvailable"));
return Err(Error::other("errNoDiskAvailable"));
}
let listing_quorum = self.set_drive_count.div_ceil(2);
@@ -1341,7 +1344,7 @@ impl SetDisks {
recursice: true,
min_disks: listing_quorum,
agreed: Some(Box::new(move |entry: MetaCacheEntry| Box::pin(cb1(entry)))),
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<Error>]| {
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<DiskError>]| {
let resolver = resolver.clone();
let cb_func = cb_func.clone();
match entries.resolve(resolver) {

View File

@@ -4,19 +4,20 @@ use std::time::SystemTime;
use crate::cache_value::metacache_set::{list_path_raw, ListPathRawOptions};
use crate::config::com::{read_config_with_metadata, save_config_with_opts};
use crate::config::error::is_err_config_not_found;
use crate::disk::{MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams};
use crate::disk::error::DiskError;
use crate::error::{is_err_data_movement_overwrite, is_err_object_not_found, is_err_version_not_found};
use crate::error::{Error, Result};
use crate::global::get_global_endpoints;
use crate::pools::ListCallback;
use crate::set_disk::SetDisks;
use crate::store::ECStore;
use crate::store_api::{CompletePart, FileInfo, GetObjectReader, ObjectIO, ObjectOptions, PutObjReader};
use crate::store_err::{is_err_data_movement_overwrite, is_err_object_not_found, is_err_version_not_found};
use crate::store_api::{CompletePart, GetObjectReader, ObjectIO, ObjectOptions, PutObjReader};
use crate::utils::path::encode_dir_object;
use crate::StorageAPI;
use common::defer;
use common::error::{Error, Result};
use http::HeaderMap;
use rustfs_filemeta::{FileInfo, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams};
use rustfs_rio::HashReader;
use serde::{Deserialize, Serialize};
use tokio::io::AsyncReadExt;
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
@@ -167,17 +168,17 @@ impl RebalanceMeta {
return Ok(());
}
if data.len() <= 4 {
return Err(Error::msg("rebalanceMeta: no data"));
return Err(Error::other("rebalanceMeta: no data"));
}
// Read header
match u16::from_le_bytes([data[0], data[1]]) {
REBAL_META_FMT => {}
fmt => return Err(Error::msg(format!("rebalanceMeta: unknown format: {}", fmt))),
fmt => return Err(Error::other(format!("rebalanceMeta: unknown format: {}", fmt))),
}
match u16::from_le_bytes([data[2], data[3]]) {
REBAL_META_VER => {}
ver => return Err(Error::msg(format!("rebalanceMeta: unknown version: {}", ver))),
ver => return Err(Error::other(format!("rebalanceMeta: unknown version: {}", ver))),
}
let meta: Self = rmp_serde::from_read(Cursor::new(&data[4..]))?;
@@ -238,7 +239,7 @@ impl ECStore {
}
}
Err(err) => {
if !is_err_config_not_found(&err) {
if err != Error::ConfigNotFound {
error!("rebalanceMeta: load rebalance meta err {:?}", &err);
return Err(err);
}
@@ -866,8 +867,7 @@ impl ECStore {
reader.read_exact(&mut chunk).await?;
// 每次从 reader 中读取一个 part 上传
let rd = Box::new(Cursor::new(chunk));
let mut data = PutObjReader::new(rd, part.size);
let mut data = PutObjReader::from_vec(chunk);
let pi = match self
.put_object_part(
@@ -877,7 +877,7 @@ impl ECStore {
part.number,
&mut data,
&ObjectOptions {
preserve_etag: part.e_tag.clone(),
preserve_etag: Some(part.etag.clone()),
..Default::default()
},
)
@@ -892,7 +892,7 @@ impl ECStore {
parts[i] = CompletePart {
part_num: pi.part_num,
e_tag: pi.etag,
etag: pi.etag,
};
}
@@ -917,7 +917,8 @@ impl ECStore {
return Ok(());
}
let mut data = PutObjReader::new(rd.stream, object_info.size);
let hrd = HashReader::new(rd.stream, object_info.size as i64, object_info.size as i64, None, false)?;
let mut data = PutObjReader::new(hrd, object_info.size);
if let Err(err) = self
.put_object(
@@ -956,7 +957,7 @@ impl ECStore {
let pool = self.pools[pool_index].clone();
let wk = Workers::new(pool.disk_set.len() * 2).map_err(|v| Error::from_string(v))?;
let wk = Workers::new(pool.disk_set.len() * 2).map_err(|v| Error::other(v))?;
for (set_idx, set) in pool.disk_set.iter().enumerate() {
wk.clone().take().await;
@@ -1054,7 +1055,7 @@ impl SetDisks {
// Placeholder for actual object listing logic
let (disks, _) = self.get_online_disks_with_healing(false).await;
if disks.is_empty() {
return Err(Error::msg("errNoDiskAvailable"));
return Err(Error::other("errNoDiskAvailable"));
}
let listing_quorum = self.set_drive_count.div_ceil(2);
@@ -1075,7 +1076,7 @@ impl SetDisks {
recursice: true,
min_disks: listing_quorum,
agreed: Some(Box::new(move |entry: MetaCacheEntry| Box::pin(cb1(entry)))),
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<Error>]| {
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<DiskError>]| {
// let cb = cb.clone();
let resolver = resolver.clone();
let cb = cb.clone();

File diff suppressed because it is too large Load Diff

View File

@@ -1,13 +1,16 @@
#![allow(clippy::map_entry)]
use std::{collections::HashMap, sync::Arc};
use crate::disk::error_reduce::count_errs;
use crate::error::{Error, Result};
use crate::{
disk::{
error::{is_unformatted_disk, DiskError},
error::DiskError,
format::{DistributionAlgoVersion, FormatV3},
new_disk, DiskAPI, DiskInfo, DiskOption, DiskStore,
},
endpoints::{Endpoints, PoolEndpoints},
error::StorageError,
global::{is_dist_erasure, GLOBAL_LOCAL_DISK_SET_DRIVES},
heal::heal_commands::{
HealOpts, DRIVE_STATE_CORRUPT, DRIVE_STATE_MISSING, DRIVE_STATE_OFFLINE, DRIVE_STATE_OK, HEAL_ITEM_METADATA,
@@ -18,11 +21,9 @@ use crate::{
ListMultipartsInfo, ListObjectVersionsInfo, ListObjectsV2Info, MakeBucketOptions, MultipartInfo, MultipartUploadResult,
ObjectIO, ObjectInfo, ObjectOptions, ObjectToDelete, PartInfo, PutObjReader, StorageAPI,
},
store_err::StorageError,
store_init::{check_format_erasure_values, get_format_erasure_in_quorum, load_format_erasure_all, save_format_file},
utils::{hash, path::path_join_buf},
};
use common::error::{Error, Result};
use common::globals::GLOBAL_Local_Node_Name;
use futures::future::join_all;
use http::HeaderMap;
@@ -122,7 +123,7 @@ impl Sets {
}
let has_disk_id = disk.as_ref().unwrap().get_disk_id().await.unwrap_or_else(|err| {
if is_unformatted_disk(&err) {
if err == DiskError::UnformattedDisk {
error!("get_disk_id err {:?}", err);
} else {
warn!("get_disk_id err {:?}", err);
@@ -452,11 +453,11 @@ impl StorageAPI for Sets {
return dst_set.put_object(dst_bucket, dst_object, put_object_reader, &put_opts).await;
}
Err(Error::new(StorageError::InvalidArgument(
Err(StorageError::InvalidArgument(
src_bucket.to_owned(),
src_object.to_owned(),
"put_object_reader2 is none".to_owned(),
)))
))
}
#[tracing::instrument(skip(self))]
@@ -705,9 +706,9 @@ impl StorageAPI for Sets {
res.before.drives.push(v.clone());
res.after.drives.push(v.clone());
}
if DiskError::UnformattedDisk.count_errs(&errs) == 0 {
if count_errs(&errs, &DiskError::UnformattedDisk) == 0 {
info!("disk formats success, NoHealRequired, errs: {:?}", errs);
return Ok((res, Some(Error::new(DiskError::NoHealRequired))));
return Ok((res, Some(StorageError::NoHealRequired)));
}
// if !self.format.eq(&ref_format) {
@@ -807,7 +808,7 @@ async fn _close_storage_disks(disks: &[Option<DiskStore>]) {
async fn init_storage_disks_with_errors(
endpoints: &Endpoints,
opts: &DiskOption,
) -> (Vec<Option<DiskStore>>, Vec<Option<Error>>) {
) -> (Vec<Option<DiskStore>>, Vec<Option<DiskError>>) {
// Bootstrap disks.
// let disks = Arc::new(RwLock::new(vec![None; endpoints.as_ref().len()]));
// let errs = Arc::new(RwLock::new(vec![None; endpoints.as_ref().len()]));
@@ -856,20 +857,21 @@ async fn init_storage_disks_with_errors(
(disks, errs)
}
fn formats_to_drives_info(endpoints: &Endpoints, formats: &[Option<FormatV3>], errs: &[Option<Error>]) -> Vec<HealDriveInfo> {
fn formats_to_drives_info(endpoints: &Endpoints, formats: &[Option<FormatV3>], errs: &[Option<DiskError>]) -> Vec<HealDriveInfo> {
let mut before_drives = Vec::with_capacity(endpoints.as_ref().len());
for (index, format) in formats.iter().enumerate() {
let drive = endpoints.get_string(index);
let state = if format.is_some() {
DRIVE_STATE_OK
} else {
if let Some(Some(err)) = errs.get(index) {
match err.downcast_ref::<DiskError>() {
Some(DiskError::UnformattedDisk) => DRIVE_STATE_MISSING,
Some(DiskError::DiskNotFound) => DRIVE_STATE_OFFLINE,
_ => DRIVE_STATE_CORRUPT,
};
} else if let Some(Some(err)) = errs.get(index) {
if *err == DiskError::UnformattedDisk {
DRIVE_STATE_MISSING
} else if *err == DiskError::DiskNotFound {
DRIVE_STATE_OFFLINE
} else {
DRIVE_STATE_CORRUPT
}
} else {
DRIVE_STATE_CORRUPT
};
@@ -892,14 +894,14 @@ fn new_heal_format_sets(
set_count: usize,
set_drive_count: usize,
formats: &[Option<FormatV3>],
errs: &[Option<Error>],
errs: &[Option<DiskError>],
) -> (Vec<Vec<Option<FormatV3>>>, Vec<Vec<DiskInfo>>) {
let mut new_formats = vec![vec![None; set_drive_count]; set_count];
let mut current_disks_info = vec![vec![DiskInfo::default(); set_drive_count]; set_count];
for (i, set) in ref_format.erasure.sets.iter().enumerate() {
for j in 0..set.len() {
if let Some(Some(err)) = errs.get(i * set_drive_count + j) {
if let Some(DiskError::UnformattedDisk) = err.downcast_ref::<DiskError>() {
if *err == DiskError::UnformattedDisk {
let mut fm = FormatV3::new(set_count, set_drive_count);
fm.id = ref_format.id;
fm.format = ref_format.format.clone();

View File

@@ -5,8 +5,11 @@ use crate::bucket::utils::{check_valid_bucket_name, check_valid_bucket_name_stri
use crate::config::storageclass;
use crate::config::GLOBAL_StorageClass;
use crate::disk::endpoint::{Endpoint, EndpointType};
use crate::disk::{DiskAPI, DiskInfo, DiskInfoOptions, MetaCacheEntry};
use crate::error::clone_err;
use crate::disk::{DiskAPI, DiskInfo, DiskInfoOptions};
use crate::error::{
is_err_bucket_exists, is_err_invalid_upload_id, is_err_object_not_found, is_err_read_quorum, is_err_version_not_found,
to_object_err, StorageError,
};
use crate::global::{
get_global_endpoints, is_dist_erasure, is_erasure_sd, set_global_deployment_id, set_object_layer, DISK_ASSUME_UNKNOWN_SIZE,
DISK_FILL_FRACTION, DISK_MIN_INODES, DISK_RESERVE_FRACTION, GLOBAL_BOOT_TIME, GLOBAL_LOCAL_DISK_MAP,
@@ -21,17 +24,13 @@ use crate::notification_sys::get_global_notification_sys;
use crate::pools::PoolMeta;
use crate::rebalance::RebalanceMeta;
use crate::store_api::{ListMultipartsInfo, ListObjectVersionsInfo, MultipartInfo, ObjectIO};
use crate::store_err::{
is_err_bucket_exists, is_err_decommission_already_running, is_err_invalid_upload_id, is_err_object_not_found,
is_err_read_quorum, is_err_version_not_found, to_object_err, StorageError,
};
use crate::store_init::ec_drives_no_config;
use crate::store_init::{check_disk_fatal_errs, ec_drives_no_config};
use crate::utils::crypto::base64_decode;
use crate::utils::path::{decode_dir_object, encode_dir_object, path_join_buf, SLASH_SEPARATOR};
use crate::utils::xml;
use crate::{
bucket::metadata::BucketMetadata,
disk::{error::DiskError, new_disk, DiskOption, DiskStore, BUCKET_META_PREFIX, RUSTFS_META_BUCKET},
disk::{new_disk, DiskOption, DiskStore, BUCKET_META_PREFIX, RUSTFS_META_BUCKET},
endpoints::EndpointServerPools,
peer::S3PeerSys,
sets::Sets,
@@ -43,7 +42,7 @@ use crate::{
store_init,
};
use common::error::{Error, Result};
use crate::error::{Error, Result};
use common::globals::{GLOBAL_Local_Node_Name, GLOBAL_Rustfs_Host, GLOBAL_Rustfs_Port};
use futures::future::join_all;
use glob::Pattern;
@@ -51,6 +50,7 @@ use http::HeaderMap;
use lazy_static::lazy_static;
use madmin::heal_commands::HealResultItem;
use rand::Rng;
use rustfs_filemeta::MetaCacheEntry;
use s3s::dto::{BucketVersioningStatus, ObjectLockConfiguration, ObjectLockEnabled, VersioningConfiguration};
use std::cmp::Ordering;
use std::process::exit;
@@ -62,8 +62,8 @@ use tokio::select;
use tokio::sync::mpsc::Sender;
use tokio::sync::{broadcast, mpsc, RwLock};
use tokio::time::{interval, sleep};
use tracing::error;
use tracing::{debug, info};
use tracing::{error, warn};
use uuid::Uuid;
const MAX_UPLOADS_LIST: usize = 10000;
@@ -144,7 +144,7 @@ impl ECStore {
)
.await;
DiskError::check_disk_fatal_errs(&errs)?;
check_disk_fatal_errs(&errs)?;
let fm = {
let mut times = 0;
@@ -166,7 +166,7 @@ impl ECStore {
interval *= 2;
}
if times > 10 {
return Err(Error::from_string("can not get formats"));
return Err(Error::other("can not get formats"));
}
info!("retrying get formats after {:?}", interval);
select! {
@@ -185,7 +185,7 @@ impl ECStore {
}
if deployment_id != Some(fm.id) {
return Err(Error::msg("deployment_id not same in one pool"));
return Err(Error::other("deployment_id not same in one pool"));
}
if deployment_id.is_some() && deployment_id.unwrap().is_nil() {
@@ -241,7 +241,7 @@ impl ECStore {
sleep(Duration::from_secs(wait_sec)).await;
if exit_count > 10 {
return Err(Error::msg("ec init faild"));
return Err(Error::other("ec init faild"));
}
exit_count += 1;
@@ -291,7 +291,7 @@ impl ECStore {
if let Some(idx) = endpoints.get_pool_idx(&p.cmd_line) {
pool_indeces.push(idx);
} else {
return Err(Error::msg(format!(
return Err(Error::other(format!(
"unexpected state present for decommission status pool({}) not found",
p.cmd_line
)));
@@ -310,7 +310,7 @@ impl ECStore {
tokio::time::sleep(Duration::from_secs(60 * 3)).await;
if let Err(err) = store.decommission(rx.resubscribe(), pool_indeces.clone()).await {
if is_err_decommission_already_running(&err) {
if err == StorageError::DecommissionAlreadyRunning {
for i in pool_indeces.iter() {
store.do_decommission_in_routine(rx.resubscribe(), *i).await;
}
@@ -341,7 +341,7 @@ impl ECStore {
// define in store_list_objects.rs
// pub async fn list_path(&self, opts: &ListPathOptions, delimiter: &str) -> Result<ListObjectsInfo> {
// // if opts.prefix.ends_with(SLASH_SEPARATOR) {
// // return Err(Error::msg("eof"));
// // return Err(Error::other("eof"));
// // }
// let mut opts = opts.clone();
@@ -614,7 +614,7 @@ impl ECStore {
if let Some(hit_idx) = self.get_available_pool_idx(bucket, object, size).await {
hit_idx
} else {
return Err(Error::new(DiskError::DiskFull));
return Err(Error::DiskFull);
}
}
};
@@ -633,7 +633,8 @@ impl ECStore {
if let Some(idx) = self.get_available_pool_idx(bucket, object, size).await {
idx
} else {
return Err(to_object_err(Error::new(DiskError::DiskFull), vec![bucket, object]));
warn!("get_pool_idx_no_lock: disk full {}/{}", bucket, object);
return Err(Error::DiskFull);
}
}
};
@@ -731,7 +732,7 @@ impl ECStore {
let err = pinfo.err.as_ref().unwrap();
if is_err_read_quorum(err) && !opts.metadata_chg {
if err == &Error::ErasureReadQuorum && !opts.metadata_chg {
return Ok((pinfo.clone(), self.pools_with_object(&ress, opts).await));
}
@@ -739,7 +740,7 @@ impl ECStore {
has_def_pool = true;
if !is_err_object_not_found(err) && !is_err_version_not_found(err) {
return Err(clone_err(err));
return Err(err.clone());
}
if pinfo.object_info.delete_marker && !pinfo.object_info.name.is_empty() {
@@ -751,7 +752,7 @@ impl ECStore {
return Ok((def_pool, Vec::new()));
}
Err(to_object_err(Error::new(DiskError::FileNotFound), vec![bucket, object]))
Err(Error::ObjectNotFound(bucket.to_owned(), object.to_owned()))
}
async fn pools_with_object(&self, pools: &[PoolObjInfo], opts: &ObjectOptions) -> Vec<PoolErr> {
@@ -767,10 +768,10 @@ impl ECStore {
}
if let Some(err) = &pool.err {
if is_err_read_quorum(err) {
if err == &Error::ErasureReadQuorum {
errs.push(PoolErr {
index: Some(pool.index),
err: Some(Error::new(StorageError::InsufficientReadQuorum)),
err: Some(Error::ErasureReadQuorum),
});
}
} else {
@@ -878,7 +879,7 @@ impl ECStore {
}
let _ = task.await;
if let Some(err) = first_err.read().await.as_ref() {
return Err(clone_err(err));
return Err(err.clone());
}
Ok(())
}
@@ -961,13 +962,13 @@ impl ECStore {
let object = decode_dir_object(object);
if opts.version_id.is_none() {
Err(Error::new(StorageError::ObjectNotFound(bucket.to_owned(), object.to_owned())))
Err(StorageError::ObjectNotFound(bucket.to_owned(), object.to_owned()))
} else {
Err(Error::new(StorageError::VersionNotFound(
Err(StorageError::VersionNotFound(
bucket.to_owned(),
object.to_owned(),
opts.version_id.clone().unwrap_or_default(),
)))
))
}
}
@@ -983,9 +984,9 @@ impl ECStore {
for pe in errs.iter() {
if let Some(err) = &pe.err {
if is_err_read_quorum(err) {
if err == &StorageError::ErasureWriteQuorum {
objs.push(None);
derrs.push(Some(Error::new(StorageError::InsufficientWriteQuorum)));
derrs.push(Some(StorageError::ErasureWriteQuorum));
continue;
}
}
@@ -1006,7 +1007,7 @@ impl ECStore {
}
if let Some(e) = &derrs[0] {
return Err(clone_err(e));
return Err(e.clone());
}
Ok(objs[0].as_ref().unwrap().clone())
@@ -1142,7 +1143,7 @@ impl Clone for PoolObjInfo {
Self {
index: self.index,
object_info: self.object_info.clone(),
err: self.err.as_ref().map(clone_err),
err: self.err.clone(),
}
}
}
@@ -1219,11 +1220,11 @@ impl ObjectIO for ECStore {
let idx = self.get_pool_idx(bucket, &object, data.content_length as i64).await?;
if opts.data_movement && idx == opts.src_pool_idx {
return Err(Error::new(StorageError::DataMovementOverwriteErr(
return Err(StorageError::DataMovementOverwriteErr(
bucket.to_owned(),
object.to_owned(),
opts.version_id.clone().unwrap_or_default(),
)));
));
}
self.pools[idx].put_object(bucket, &object, data, opts).await
@@ -1327,7 +1328,7 @@ impl StorageAPI for ECStore {
}
if let Err(err) = self.peer_sys.make_bucket(bucket, opts).await {
if !is_err_bucket_exists(&err) {
if !is_err_bucket_exists(&err.into()) {
let _ = self
.delete_bucket(
bucket,
@@ -1354,7 +1355,7 @@ impl StorageAPI for ECStore {
meta.versioning_config_xml = xml::serialize::<VersioningConfiguration>(&enableVersioningConfig)?;
}
meta.save().await.map_err(|e| to_object_err(e, vec![bucket]))?;
meta.save().await?;
set_bucket_metadata(bucket.to_string(), meta).await?;
@@ -1363,11 +1364,7 @@ impl StorageAPI for ECStore {
#[tracing::instrument(skip(self))]
async fn get_bucket_info(&self, bucket: &str, opts: &BucketOptions) -> Result<BucketInfo> {
let mut info = self
.peer_sys
.get_bucket_info(bucket, opts)
.await
.map_err(|e| to_object_err(e, vec![bucket]))?;
let mut info = self.peer_sys.get_bucket_info(bucket, opts).await?;
if let Ok(sys) = metadata_sys::get(bucket).await {
info.created = Some(sys.created);
@@ -1413,7 +1410,7 @@ impl StorageAPI for ECStore {
self.peer_sys
.delete_bucket(bucket, &opts)
.await
.map_err(|e| to_object_err(e, vec![bucket]))?;
.map_err(|e| to_object_err(e.into(), vec![bucket]))?;
// TODO: replication opts.srdelete_op
@@ -1537,11 +1534,11 @@ impl StorageAPI for ECStore {
.await;
}
Err(Error::new(StorageError::InvalidArgument(
Err(StorageError::InvalidArgument(
src_bucket.to_owned(),
src_object.to_owned(),
"put_object_reader is none".to_owned(),
)))
))
}
#[tracing::instrument(skip(self))]
async fn delete_object(&self, bucket: &str, object: &str, opts: ObjectOptions) -> Result<ObjectInfo> {
@@ -1563,7 +1560,7 @@ impl StorageAPI for ECStore {
.await
.map_err(|e| {
if is_err_read_quorum(&e) {
Error::new(StorageError::InsufficientWriteQuorum)
StorageError::ErasureWriteQuorum
} else {
e
}
@@ -1575,11 +1572,11 @@ impl StorageAPI for ECStore {
}
if opts.data_movement && opts.src_pool_idx == pinfo.index {
return Err(Error::new(StorageError::DataMovementOverwriteErr(
return Err(StorageError::DataMovementOverwriteErr(
bucket.to_owned(),
object.to_owned(),
opts.version_id.unwrap_or_default(),
)));
));
}
if opts.data_movement {
@@ -1608,10 +1605,10 @@ impl StorageAPI for ECStore {
}
if let Some(ver) = opts.version_id {
return Err(Error::new(StorageError::VersionNotFound(bucket.to_owned(), object.to_owned(), ver)));
return Err(StorageError::VersionNotFound(bucket.to_owned(), object.to_owned(), ver));
}
Err(Error::new(StorageError::ObjectNotFound(bucket.to_owned(), object.to_owned())))
Err(StorageError::ObjectNotFound(bucket.to_owned(), object.to_owned()))
}
// TODO: review
#[tracing::instrument(skip(self))]
@@ -1843,11 +1840,11 @@ impl StorageAPI for ECStore {
let idx = self.get_pool_idx(bucket, object, -1).await?;
if opts.data_movement && idx == opts.src_pool_idx {
return Err(Error::new(StorageError::DataMovementOverwriteErr(
return Err(StorageError::DataMovementOverwriteErr(
bucket.to_owned(),
object.to_owned(),
"".to_owned(),
)));
));
}
self.pools[idx].new_multipart_upload(bucket, object, opts).await
@@ -1914,11 +1911,7 @@ impl StorageAPI for ECStore {
}
}
Err(Error::new(StorageError::InvalidUploadID(
bucket.to_owned(),
object.to_owned(),
upload_id.to_owned(),
)))
Err(StorageError::InvalidUploadID(bucket.to_owned(), object.to_owned(), upload_id.to_owned()))
}
#[tracing::instrument(skip(self))]
@@ -1951,11 +1944,7 @@ impl StorageAPI for ECStore {
};
}
Err(Error::new(StorageError::InvalidUploadID(
bucket.to_owned(),
object.to_owned(),
upload_id.to_owned(),
)))
Err(StorageError::InvalidUploadID(bucket.to_owned(), object.to_owned(), upload_id.to_owned()))
}
#[tracing::instrument(skip(self))]
async fn abort_multipart_upload(&self, bucket: &str, object: &str, upload_id: &str, opts: &ObjectOptions) -> Result<()> {
@@ -1989,11 +1978,7 @@ impl StorageAPI for ECStore {
}
}
Err(Error::new(StorageError::InvalidUploadID(
bucket.to_owned(),
object.to_owned(),
upload_id.to_owned(),
)))
Err(StorageError::InvalidUploadID(bucket.to_owned(), object.to_owned(), upload_id.to_owned()))
}
#[tracing::instrument(skip(self))]
@@ -2038,11 +2023,7 @@ impl StorageAPI for ECStore {
}
}
Err(Error::new(StorageError::InvalidUploadID(
bucket.to_owned(),
object.to_owned(),
upload_id.to_owned(),
)))
Err(StorageError::InvalidUploadID(bucket.to_owned(), object.to_owned(), upload_id.to_owned()))
}
#[tracing::instrument(skip(self))]
@@ -2050,7 +2031,7 @@ impl StorageAPI for ECStore {
if pool_idx < self.pools.len() && set_idx < self.pools[pool_idx].disk_set.len() {
self.pools[pool_idx].disk_set[set_idx].get_disks(0, 0).await
} else {
Err(Error::msg(format!("pool idx {}, set idx {}, not found", pool_idx, set_idx)))
Err(Error::other(format!("pool idx {}, set idx {}, not found", pool_idx, set_idx)))
}
}
@@ -2129,8 +2110,8 @@ impl StorageAPI for ECStore {
for pool in self.pools.iter() {
let (mut result, err) = pool.heal_format(dry_run).await?;
if let Some(err) = err {
match err.downcast_ref::<DiskError>() {
Some(DiskError::NoHealRequired) => {
match err {
StorageError::NoHealRequired => {
count_no_heal += 1;
}
_ => {
@@ -2145,7 +2126,7 @@ impl StorageAPI for ECStore {
}
if count_no_heal == self.pools.len() {
info!("heal format success, NoHealRequired");
return Ok((r, Some(Error::new(DiskError::NoHealRequired))));
return Ok((r, Some(StorageError::NoHealRequired)));
}
info!("heal format success result: {:?}", r);
Ok((r, None))
@@ -2153,7 +2134,9 @@ impl StorageAPI for ECStore {
#[tracing::instrument(skip(self))]
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem> {
self.peer_sys.heal_bucket(bucket, opts).await
let res = self.peer_sys.heal_bucket(bucket, opts).await?;
Ok(res)
}
#[tracing::instrument(skip(self))]
async fn heal_object(
@@ -2212,10 +2195,12 @@ impl StorageAPI for ECStore {
// No pool returned a nil error, return the first non 'not found' error
for (index, err) in errs.iter().enumerate() {
match err {
Some(err) => match err.downcast_ref::<DiskError>() {
Some(DiskError::FileNotFound) | Some(DiskError::FileVersionNotFound) => {}
_ => return Ok((ress.remove(index), Some(clone_err(err)))),
},
Some(err) => {
if is_err_object_not_found(&err) || is_err_version_not_found(&err) {
continue;
}
return Ok((ress.remove(index), Some(err.clone())));
}
None => {
return Ok((ress.remove(index), None));
}
@@ -2224,10 +2209,10 @@ impl StorageAPI for ECStore {
// At this stage, all errors are 'not found'
if !version_id.is_empty() {
return Ok((HealResultItem::default(), Some(Error::new(DiskError::FileVersionNotFound))));
return Ok((HealResultItem::default(), Some(Error::FileVersionNotFound)));
}
Ok((HealResultItem::default(), Some(Error::new(DiskError::FileNotFound))))
Ok((HealResultItem::default(), Some(Error::FileNotFound)))
}
#[tracing::instrument(skip(self))]
@@ -2271,7 +2256,9 @@ impl StorageAPI for ECStore {
};
if opts_clone.remove && !opts_clone.dry_run {
let Some(store) = new_object_layer_fn() else { return Err(Error::msg("errServerNotInitialized")) };
let Some(store) = new_object_layer_fn() else {
return Err(Error::other("errServerNotInitialized"));
};
if let Err(err) = store.check_abandoned_parts(&bucket, &entry.name, &opts_clone).await {
info!("unable to check object {}/{} for abandoned data: {}", bucket, entry.name, err.to_string());
@@ -2288,8 +2275,8 @@ impl StorageAPI for ECStore {
)
.await
{
match err.downcast_ref() {
Some(DiskError::FileNotFound) | Some(DiskError::FileVersionNotFound) => {}
match err {
Error::FileNotFound | Error::FileVersionNotFound => {}
_ => {
return Err(err);
}
@@ -2304,8 +2291,8 @@ impl StorageAPI for ECStore {
)
.await
{
match err.downcast_ref() {
Some(DiskError::FileNotFound) | Some(DiskError::FileVersionNotFound) => {}
match err {
Error::FileNotFound | Error::FileVersionNotFound => {}
_ => {
return Err(err);
}
@@ -2354,7 +2341,7 @@ impl StorageAPI for ECStore {
}
}
Err(Error::new(DiskError::DiskNotFound))
Err(Error::DiskNotFound)
}
#[tracing::instrument(skip(self))]
@@ -2373,7 +2360,7 @@ impl StorageAPI for ECStore {
}
if !errs.is_empty() {
return Err(clone_err(&errs[0]));
return Err(errs[0].clone());
}
Ok(())
@@ -2417,11 +2404,11 @@ fn is_valid_object_name(object: &str) -> bool {
fn check_object_name_for_length_and_slash(bucket: &str, object: &str) -> Result<()> {
if object.len() > 1024 {
return Err(Error::new(StorageError::ObjectNameTooLong(bucket.to_owned(), object.to_owned())));
return Err(StorageError::ObjectNameTooLong(bucket.to_owned(), object.to_owned()));
}
if object.starts_with(SLASH_SEPARATOR) {
return Err(Error::new(StorageError::ObjectNamePrefixAsSlash(bucket.to_owned(), object.to_owned())));
return Err(StorageError::ObjectNamePrefixAsSlash(bucket.to_owned(), object.to_owned()));
}
#[cfg(target_os = "windows")]
@@ -2435,7 +2422,7 @@ fn check_object_name_for_length_and_slash(bucket: &str, object: &str) -> Result<
|| object.contains('<')
|| object.contains('>')
{
return Err(Error::new(StorageError::ObjectNameInvalid(bucket.to_owned(), object.to_owned())));
return Err(StorageError::ObjectNameInvalid(bucket.to_owned(), object.to_owned()));
}
}
@@ -2456,19 +2443,19 @@ fn check_del_obj_args(bucket: &str, object: &str) -> Result<()> {
fn check_bucket_and_object_names(bucket: &str, object: &str) -> Result<()> {
if !is_meta_bucketname(bucket) && check_valid_bucket_name_strict(bucket).is_err() {
return Err(Error::new(StorageError::BucketNameInvalid(bucket.to_string())));
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
}
if object.is_empty() {
return Err(Error::new(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string())));
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
}
if !is_valid_object_prefix(object) {
return Err(Error::new(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string())));
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
}
if cfg!(target_os = "windows") && object.contains('\\') {
return Err(Error::new(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string())));
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
}
Ok(())
@@ -2476,11 +2463,11 @@ fn check_bucket_and_object_names(bucket: &str, object: &str) -> Result<()> {
pub fn check_list_objs_args(bucket: &str, prefix: &str, _marker: &Option<String>) -> Result<()> {
if !is_meta_bucketname(bucket) && check_valid_bucket_name_strict(bucket).is_err() {
return Err(Error::new(StorageError::BucketNameInvalid(bucket.to_string())));
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
}
if !is_valid_object_prefix(prefix) {
return Err(Error::new(StorageError::ObjectNameInvalid(bucket.to_string(), prefix.to_string())));
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), prefix.to_string()));
}
Ok(())
@@ -2498,15 +2485,15 @@ fn check_list_multipart_args(
if let Some(upload_id_marker) = upload_id_marker {
if let Some(key_marker) = key_marker {
if key_marker.ends_with('/') {
return Err(Error::new(StorageError::InvalidUploadIDKeyCombination(
return Err(StorageError::InvalidUploadIDKeyCombination(
upload_id_marker.to_string(),
key_marker.to_string(),
)));
));
}
}
if let Err(_e) = base64_decode(upload_id_marker.as_bytes()) {
return Err(Error::new(StorageError::MalformedUploadID(upload_id_marker.to_owned())));
return Err(StorageError::MalformedUploadID(upload_id_marker.to_owned()));
}
}
@@ -2515,13 +2502,13 @@ fn check_list_multipart_args(
fn check_object_args(bucket: &str, object: &str) -> Result<()> {
if !is_meta_bucketname(bucket) && check_valid_bucket_name_strict(bucket).is_err() {
return Err(Error::new(StorageError::BucketNameInvalid(bucket.to_string())));
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
}
check_object_name_for_length_and_slash(bucket, object)?;
if !is_valid_object_name(object) {
return Err(Error::new(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string())));
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
}
Ok(())
@@ -2533,10 +2520,7 @@ fn check_new_multipart_args(bucket: &str, object: &str) -> Result<()> {
fn check_multipart_object_args(bucket: &str, object: &str, upload_id: &str) -> Result<()> {
if let Err(e) = base64_decode(upload_id.as_bytes()) {
return Err(Error::new(StorageError::MalformedUploadID(format!(
"{}/{}-{},err:{}",
bucket, object, upload_id, e
))));
return Err(StorageError::MalformedUploadID(format!("{}/{}-{},err:{}", bucket, object, upload_id, e)));
};
check_object_args(bucket, object)
}
@@ -2560,13 +2544,13 @@ fn check_abort_multipart_args(bucket: &str, object: &str, upload_id: &str) -> Re
#[tracing::instrument(level = "debug")]
fn check_put_object_args(bucket: &str, object: &str) -> Result<()> {
if !is_meta_bucketname(bucket) && check_valid_bucket_name_strict(bucket).is_err() {
return Err(Error::new(StorageError::BucketNameInvalid(bucket.to_string())));
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
}
check_object_name_for_length_and_slash(bucket, object)?;
if object.is_empty() || !is_valid_object_prefix(object) {
return Err(Error::new(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string())));
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
}
Ok(())
@@ -2659,7 +2643,7 @@ pub async fn has_space_for(dis: &[Option<DiskInfo>], size: i64) -> Result<bool>
}
if disks_num < dis.len() / 2 || disks_num == 0 {
return Err(Error::msg(format!(
return Err(Error::other(format!(
"not enough online disks to calculate the available space,need {}, found {}",
(dis.len() / 2) + 1,
disks_num,

File diff suppressed because it is too large Load Diff

View File

@@ -1,322 +0,0 @@
use crate::{
disk::error::{is_err_file_not_found, DiskError},
utils::path::decode_dir_object,
};
use common::error::Error;
#[derive(Debug, thiserror::Error, PartialEq, Eq)]
pub enum StorageError {
#[error("not implemented")]
NotImplemented,
#[error("Invalid arguments provided for {0}/{1}-{2}")]
InvalidArgument(String, String, String),
#[error("method not allowed")]
MethodNotAllowed,
#[error("Bucket not found: {0}")]
BucketNotFound(String),
#[error("Bucket not empty: {0}")]
BucketNotEmpty(String),
#[error("Bucket name invalid: {0}")]
BucketNameInvalid(String),
#[error("Object name invalid: {0}/{1}")]
ObjectNameInvalid(String, String),
#[error("Bucket exists: {0}")]
BucketExists(String),
#[error("Storage reached its minimum free drive threshold.")]
StorageFull,
#[error("Please reduce your request rate")]
SlowDown,
#[error("Prefix access is denied:{0}/{1}")]
PrefixAccessDenied(String, String),
#[error("Invalid UploadID KeyCombination: {0}/{1}")]
InvalidUploadIDKeyCombination(String, String),
#[error("Malformed UploadID: {0}")]
MalformedUploadID(String),
#[error("Object name too long: {0}/{1}")]
ObjectNameTooLong(String, String),
#[error("Object name contains forward slash as prefix: {0}/{1}")]
ObjectNamePrefixAsSlash(String, String),
#[error("Object not found: {0}/{1}")]
ObjectNotFound(String, String),
#[error("volume not found: {0}")]
VolumeNotFound(String),
#[error("Version not found: {0}/{1}-{2}")]
VersionNotFound(String, String, String),
#[error("Invalid upload id: {0}/{1}-{2}")]
InvalidUploadID(String, String, String),
#[error("Specified part could not be found. PartNumber {0}, Expected {1}, got {2}")]
InvalidPart(usize, String, String),
#[error("Invalid version id: {0}/{1}-{2}")]
InvalidVersionID(String, String, String),
#[error("invalid data movement operation, source and destination pool are the same for : {0}/{1}-{2}")]
DataMovementOverwriteErr(String, String, String),
#[error("Object exists on :{0} as directory {1}")]
ObjectExistsAsDirectory(String, String),
#[error("Storage resources are insufficient for the read operation")]
InsufficientReadQuorum,
#[error("Storage resources are insufficient for the write operation")]
InsufficientWriteQuorum,
#[error("Decommission not started")]
DecommissionNotStarted,
#[error("Decommission already running")]
DecommissionAlreadyRunning,
#[error("DoneForNow")]
DoneForNow,
}
impl StorageError {
pub fn to_u32(&self) -> u32 {
match self {
StorageError::NotImplemented => 0x01,
StorageError::InvalidArgument(_, _, _) => 0x02,
StorageError::MethodNotAllowed => 0x03,
StorageError::BucketNotFound(_) => 0x04,
StorageError::BucketNotEmpty(_) => 0x05,
StorageError::BucketNameInvalid(_) => 0x06,
StorageError::ObjectNameInvalid(_, _) => 0x07,
StorageError::BucketExists(_) => 0x08,
StorageError::StorageFull => 0x09,
StorageError::SlowDown => 0x0A,
StorageError::PrefixAccessDenied(_, _) => 0x0B,
StorageError::InvalidUploadIDKeyCombination(_, _) => 0x0C,
StorageError::MalformedUploadID(_) => 0x0D,
StorageError::ObjectNameTooLong(_, _) => 0x0E,
StorageError::ObjectNamePrefixAsSlash(_, _) => 0x0F,
StorageError::ObjectNotFound(_, _) => 0x10,
StorageError::VersionNotFound(_, _, _) => 0x11,
StorageError::InvalidUploadID(_, _, _) => 0x12,
StorageError::InvalidVersionID(_, _, _) => 0x13,
StorageError::DataMovementOverwriteErr(_, _, _) => 0x14,
StorageError::ObjectExistsAsDirectory(_, _) => 0x15,
StorageError::InsufficientReadQuorum => 0x16,
StorageError::InsufficientWriteQuorum => 0x17,
StorageError::DecommissionNotStarted => 0x18,
StorageError::InvalidPart(_, _, _) => 0x19,
StorageError::VolumeNotFound(_) => 0x20,
StorageError::DoneForNow => 0x21,
StorageError::DecommissionAlreadyRunning => 0x22,
}
}
pub fn from_u32(error: u32) -> Option<Self> {
match error {
0x01 => Some(StorageError::NotImplemented),
0x02 => Some(StorageError::InvalidArgument(Default::default(), Default::default(), Default::default())),
0x03 => Some(StorageError::MethodNotAllowed),
0x04 => Some(StorageError::BucketNotFound(Default::default())),
0x05 => Some(StorageError::BucketNotEmpty(Default::default())),
0x06 => Some(StorageError::BucketNameInvalid(Default::default())),
0x07 => Some(StorageError::ObjectNameInvalid(Default::default(), Default::default())),
0x08 => Some(StorageError::BucketExists(Default::default())),
0x09 => Some(StorageError::StorageFull),
0x0A => Some(StorageError::SlowDown),
0x0B => Some(StorageError::PrefixAccessDenied(Default::default(), Default::default())),
0x0C => Some(StorageError::InvalidUploadIDKeyCombination(Default::default(), Default::default())),
0x0D => Some(StorageError::MalformedUploadID(Default::default())),
0x0E => Some(StorageError::ObjectNameTooLong(Default::default(), Default::default())),
0x0F => Some(StorageError::ObjectNamePrefixAsSlash(Default::default(), Default::default())),
0x10 => Some(StorageError::ObjectNotFound(Default::default(), Default::default())),
0x11 => Some(StorageError::VersionNotFound(Default::default(), Default::default(), Default::default())),
0x12 => Some(StorageError::InvalidUploadID(Default::default(), Default::default(), Default::default())),
0x13 => Some(StorageError::InvalidVersionID(Default::default(), Default::default(), Default::default())),
0x14 => Some(StorageError::DataMovementOverwriteErr(
Default::default(),
Default::default(),
Default::default(),
)),
0x15 => Some(StorageError::ObjectExistsAsDirectory(Default::default(), Default::default())),
0x16 => Some(StorageError::InsufficientReadQuorum),
0x17 => Some(StorageError::InsufficientWriteQuorum),
0x18 => Some(StorageError::DecommissionNotStarted),
0x19 => Some(StorageError::InvalidPart(Default::default(), Default::default(), Default::default())),
0x20 => Some(StorageError::VolumeNotFound(Default::default())),
0x21 => Some(StorageError::DoneForNow),
0x22 => Some(StorageError::DecommissionAlreadyRunning),
_ => None,
}
}
}
pub fn to_object_err(err: Error, params: Vec<&str>) -> Error {
if let Some(e) = err.downcast_ref::<DiskError>() {
match e {
DiskError::DiskFull => {
return Error::new(StorageError::StorageFull);
}
DiskError::FileNotFound => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
return Error::new(StorageError::ObjectNotFound(bucket, object));
}
DiskError::FileVersionNotFound => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
let version = params.get(2).cloned().unwrap_or_default().to_owned();
return Error::new(StorageError::VersionNotFound(bucket, object, version));
}
DiskError::TooManyOpenFiles => {
return Error::new(StorageError::SlowDown);
}
DiskError::FileNameTooLong => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
return Error::new(StorageError::ObjectNameInvalid(bucket, object));
}
DiskError::VolumeExists => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
return Error::new(StorageError::BucketExists(bucket));
}
DiskError::IsNotRegular => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
return Error::new(StorageError::ObjectExistsAsDirectory(bucket, object));
}
DiskError::VolumeNotFound => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
return Error::new(StorageError::BucketNotFound(bucket));
}
DiskError::VolumeNotEmpty => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
return Error::new(StorageError::BucketNotEmpty(bucket));
}
DiskError::FileAccessDenied => {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
return Error::new(StorageError::PrefixAccessDenied(bucket, object));
}
// DiskError::MaxVersionsExceeded => todo!(),
// DiskError::Unexpected => todo!(),
// DiskError::CorruptedFormat => todo!(),
// DiskError::CorruptedBackend => todo!(),
// DiskError::UnformattedDisk => todo!(),
// DiskError::InconsistentDisk => todo!(),
// DiskError::UnsupportedDisk => todo!(),
// DiskError::DiskNotDir => todo!(),
// DiskError::DiskNotFound => todo!(),
// DiskError::DiskOngoingReq => todo!(),
// DiskError::DriveIsRoot => todo!(),
// DiskError::FaultyRemoteDisk => todo!(),
// DiskError::FaultyDisk => todo!(),
// DiskError::DiskAccessDenied => todo!(),
// DiskError::FileCorrupt => todo!(),
// DiskError::BitrotHashAlgoInvalid => todo!(),
// DiskError::CrossDeviceLink => todo!(),
// DiskError::LessData => todo!(),
// DiskError::MoreData => todo!(),
// DiskError::OutdatedXLMeta => todo!(),
// DiskError::PartMissingOrCorrupt => todo!(),
// DiskError::PathNotFound => todo!(),
// DiskError::VolumeAccessDenied => todo!(),
_ => (),
}
}
err
}
pub fn is_err_decommission_already_running(err: &Error) -> bool {
if let Some(e) = err.downcast_ref::<StorageError>() {
matches!(e, StorageError::DecommissionAlreadyRunning)
} else {
false
}
}
pub fn is_err_data_movement_overwrite(err: &Error) -> bool {
if let Some(e) = err.downcast_ref::<StorageError>() {
matches!(e, StorageError::DataMovementOverwriteErr(_, _, _))
} else {
false
}
}
pub fn is_err_read_quorum(err: &Error) -> bool {
if let Some(e) = err.downcast_ref::<StorageError>() {
matches!(e, StorageError::InsufficientReadQuorum)
} else {
false
}
}
pub fn is_err_invalid_upload_id(err: &Error) -> bool {
if let Some(e) = err.downcast_ref::<StorageError>() {
matches!(e, StorageError::InvalidUploadID(_, _, _))
} else {
false
}
}
pub fn is_err_version_not_found(err: &Error) -> bool {
if let Some(e) = err.downcast_ref::<StorageError>() {
matches!(e, StorageError::VersionNotFound(_, _, _))
} else {
false
}
}
pub fn is_err_bucket_exists(err: &Error) -> bool {
if let Some(e) = err.downcast_ref::<StorageError>() {
matches!(e, StorageError::BucketExists(_))
} else {
false
}
}
pub fn is_err_bucket_not_found(err: &Error) -> bool {
if let Some(e) = err.downcast_ref::<StorageError>() {
matches!(e, StorageError::VolumeNotFound(_)) || matches!(e, StorageError::BucketNotFound(_))
} else {
false
}
}
pub fn is_err_object_not_found(err: &Error) -> bool {
if is_err_file_not_found(err) {
return true;
}
if let Some(e) = err.downcast_ref::<StorageError>() {
matches!(e, StorageError::ObjectNotFound(_, _))
} else {
false
}
}
#[test]
fn test_storage_error() {
let e1 = Error::new(StorageError::BucketExists("ss".into()));
let e2 = Error::new(StorageError::ObjectNotFound("ss".into(), "sdf".to_owned()));
assert!(is_err_bucket_exists(&e1));
assert!(!is_err_object_not_found(&e1));
assert!(is_err_object_not_found(&e2));
}

View File

@@ -1,5 +1,7 @@
use crate::config::{storageclass, KVS};
use crate::disk::DiskAPI;
use crate::disk::error_reduce::{count_errs, reduce_write_quorum_errs};
use crate::disk::{self, DiskAPI};
use crate::error::{Error, Result};
use crate::{
disk::{
error::DiskError,
@@ -9,17 +11,13 @@ use crate::{
endpoints::Endpoints,
heal::heal_commands::init_healing_tracker,
};
use common::error::{Error, Result};
use futures::future::join_all;
use std::{
collections::{hash_map::Entry, HashMap},
fmt::Debug,
};
use std::collections::{hash_map::Entry, HashMap};
use tracing::{debug, warn};
use uuid::Uuid;
pub async fn init_disks(eps: &Endpoints, opt: &DiskOption) -> (Vec<Option<DiskStore>>, Vec<Option<Error>>) {
pub async fn init_disks(eps: &Endpoints, opt: &DiskOption) -> (Vec<Option<DiskStore>>, Vec<Option<DiskError>>) {
let mut futures = Vec::with_capacity(eps.as_ref().len());
for ep in eps.as_ref().iter() {
@@ -52,29 +50,21 @@ pub async fn connect_load_init_formats(
set_count: usize,
set_drive_count: usize,
deployment_id: Option<Uuid>,
) -> Result<FormatV3, Error> {
) -> Result<FormatV3> {
warn!("connect_load_init_formats first_disk: {}", first_disk);
let (formats, errs) = load_format_erasure_all(disks, false).await;
debug!("load_format_erasure_all errs {:?}", &errs);
DiskError::check_disk_fatal_errs(&errs)?;
check_disk_fatal_errs(&errs)?;
check_format_erasure_values(&formats, set_drive_count)?;
if first_disk && DiskError::should_init_erasure_disks(&errs) {
if first_disk && should_init_erasure_disks(&errs) {
// UnformattedDisk, not format file create
warn!("first_disk && should_init_erasure_disks");
// new format and save
let fms = init_format_erasure(disks, set_count, set_drive_count, deployment_id);
let errs = save_format_file_all(disks, &fms).await;
warn!("save_format_file_all errs {:?}", &errs);
// TODO: check quorum
// reduceWriteQuorumErrs(&errs)?;
let fm = get_format_erasure_in_quorum(&fms)?;
let fm = init_format_erasure(disks, set_count, set_drive_count, deployment_id).await?;
return Ok(fm);
}
@@ -82,16 +72,16 @@ pub async fn connect_load_init_formats(
warn!(
"first_disk: {}, should_init_erasure_disks: {}",
first_disk,
DiskError::should_init_erasure_disks(&errs)
should_init_erasure_disks(&errs)
);
let unformatted = DiskError::quorum_unformatted_disks(&errs);
let unformatted = quorum_unformatted_disks(&errs);
if unformatted && !first_disk {
return Err(Error::new(ErasureError::NotFirstDisk));
return Err(Error::NotFirstDisk);
}
if unformatted && first_disk {
return Err(Error::new(ErasureError::FirstDiskWait));
return Err(Error::FirstDiskWait);
}
let fm = get_format_erasure_in_quorum(&formats)?;
@@ -99,12 +89,36 @@ pub async fn connect_load_init_formats(
Ok(fm)
}
fn init_format_erasure(
pub fn quorum_unformatted_disks(errs: &[Option<DiskError>]) -> bool {
count_errs(errs, &DiskError::UnformattedDisk) > (errs.len() / 2)
}
pub fn should_init_erasure_disks(errs: &[Option<DiskError>]) -> bool {
count_errs(errs, &DiskError::UnformattedDisk) == errs.len()
}
pub fn check_disk_fatal_errs(errs: &[Option<DiskError>]) -> disk::error::Result<()> {
if count_errs(errs, &DiskError::UnsupportedDisk) == errs.len() {
return Err(DiskError::UnsupportedDisk);
}
if count_errs(errs, &DiskError::FileAccessDenied) == errs.len() {
return Err(DiskError::FileAccessDenied);
}
if count_errs(errs, &DiskError::DiskNotDir) == errs.len() {
return Err(DiskError::DiskNotDir);
}
Ok(())
}
async fn init_format_erasure(
disks: &[Option<DiskStore>],
set_count: usize,
set_drive_count: usize,
deployment_id: Option<Uuid>,
) -> Vec<Option<FormatV3>> {
) -> Result<FormatV3> {
let fm = FormatV3::new(set_count, set_drive_count);
let mut fms = vec![None; disks.len()];
for i in 0..set_count {
@@ -120,7 +134,9 @@ fn init_format_erasure(
}
}
fms
save_format_file_all(disks, &fms).await?;
get_format_erasure_in_quorum(&fms)
}
pub fn get_format_erasure_in_quorum(formats: &[Option<FormatV3>]) -> Result<FormatV3> {
@@ -143,13 +159,13 @@ pub fn get_format_erasure_in_quorum(formats: &[Option<FormatV3>]) -> Result<Form
if *max_drives == 0 || *max_count <= formats.len() / 2 {
warn!("get_format_erasure_in_quorum fi: {:?}", &formats);
return Err(Error::new(ErasureError::ErasureReadQuorum));
return Err(Error::ErasureReadQuorum);
}
let format = formats
.iter()
.find(|f| f.as_ref().is_some_and(|v| v.drives().eq(max_drives)))
.ok_or(Error::new(ErasureError::ErasureReadQuorum))?;
.ok_or(Error::ErasureReadQuorum)?;
let mut format = format.as_ref().unwrap().clone();
format.erasure.this = Uuid::nil();
@@ -172,28 +188,28 @@ pub fn check_format_erasure_values(
check_format_erasure_value(f)?;
if formats.len() != f.erasure.sets.len() * f.erasure.sets[0].len() {
return Err(Error::msg("formats length for erasure.sets not mtach"));
return Err(Error::other("formats length for erasure.sets not mtach"));
}
if f.erasure.sets[0].len() != set_drive_count {
return Err(Error::msg("erasure set length not match set_drive_count"));
return Err(Error::other("erasure set length not match set_drive_count"));
}
}
Ok(())
}
fn check_format_erasure_value(format: &FormatV3) -> Result<()> {
if format.version != FormatMetaVersion::V1 {
return Err(Error::msg("invalid FormatMetaVersion"));
return Err(Error::other("invalid FormatMetaVersion"));
}
if format.erasure.version != FormatErasureVersion::V3 {
return Err(Error::msg("invalid FormatErasureVersion"));
return Err(Error::other("invalid FormatErasureVersion"));
}
Ok(())
}
// load_format_erasure_all 读取所有 foramt.json
pub async fn load_format_erasure_all(disks: &[Option<DiskStore>], heal: bool) -> (Vec<Option<FormatV3>>, Vec<Option<Error>>) {
pub async fn load_format_erasure_all(disks: &[Option<DiskStore>], heal: bool) -> (Vec<Option<FormatV3>>, Vec<Option<DiskError>>) {
let mut futures = Vec::with_capacity(disks.len());
let mut datas = Vec::with_capacity(disks.len());
let mut errors = Vec::with_capacity(disks.len());
@@ -203,7 +219,7 @@ pub async fn load_format_erasure_all(disks: &[Option<DiskStore>], heal: bool) ->
if let Some(disk) = disk {
load_format_erasure(disk, heal).await
} else {
Err(Error::new(DiskError::DiskNotFound))
Err(DiskError::DiskNotFound)
}
});
}
@@ -229,15 +245,14 @@ pub async fn load_format_erasure_all(disks: &[Option<DiskStore>], heal: bool) ->
(datas, errors)
}
pub async fn load_format_erasure(disk: &DiskStore, heal: bool) -> Result<FormatV3, Error> {
pub async fn load_format_erasure(disk: &DiskStore, heal: bool) -> disk::error::Result<FormatV3> {
let data = disk
.read_all(RUSTFS_META_BUCKET, FORMAT_CONFIG_FILE)
.await
.map_err(|e| match &e.downcast_ref::<DiskError>() {
Some(DiskError::FileNotFound) => Error::new(DiskError::UnformattedDisk),
Some(DiskError::DiskNotFound) => Error::new(DiskError::UnformattedDisk),
Some(_) => e,
None => e,
.map_err(|e| match e {
DiskError::FileNotFound => DiskError::UnformattedDisk,
DiskError::DiskNotFound => DiskError::UnformattedDisk,
_ => e,
})?;
let mut fm = FormatV3::try_from(data.as_slice())?;
@@ -255,7 +270,7 @@ pub async fn load_format_erasure(disk: &DiskStore, heal: bool) -> Result<FormatV
Ok(fm)
}
async fn save_format_file_all(disks: &[Option<DiskStore>], formats: &[Option<FormatV3>]) -> Vec<Option<Error>> {
async fn save_format_file_all(disks: &[Option<DiskStore>], formats: &[Option<FormatV3>]) -> disk::error::Result<()> {
let mut futures = Vec::with_capacity(disks.len());
for (i, disk) in disks.iter().enumerate() {
@@ -276,12 +291,16 @@ async fn save_format_file_all(disks: &[Option<DiskStore>], formats: &[Option<For
}
}
errors
if let Some(e) = reduce_write_quorum_errs(&errors, &[], disks.len()) {
return Err(e);
}
Ok(())
}
pub async fn save_format_file(disk: &Option<DiskStore>, format: &Option<FormatV3>, heal_id: &str) -> Result<()> {
pub async fn save_format_file(disk: &Option<DiskStore>, format: &Option<FormatV3>, heal_id: &str) -> disk::error::Result<()> {
if disk.is_none() {
return Err(Error::new(DiskError::DiskNotFound));
return Err(DiskError::DiskNotFound);
}
let format = format.as_ref().unwrap();
@@ -311,53 +330,53 @@ pub fn ec_drives_no_config(set_drive_count: usize) -> Result<usize> {
Ok(sc.get_parity_for_sc(storageclass::STANDARD).unwrap_or_default())
}
#[derive(Debug, PartialEq, thiserror::Error)]
pub enum ErasureError {
#[error("erasure read quorum")]
ErasureReadQuorum,
// #[derive(Debug, PartialEq, thiserror::Error)]
// pub enum ErasureError {
// #[error("erasure read quorum")]
// ErasureReadQuorum,
#[error("erasure write quorum")]
_ErasureWriteQuorum,
// #[error("erasure write quorum")]
// _ErasureWriteQuorum,
#[error("not first disk")]
NotFirstDisk,
// #[error("not first disk")]
// NotFirstDisk,
#[error("first disk wiat")]
FirstDiskWait,
// #[error("first disk wiat")]
// FirstDiskWait,
#[error("invalid part id {0}")]
InvalidPart(usize),
}
// #[error("invalid part id {0}")]
// InvalidPart(usize),
// }
impl ErasureError {
pub fn is(&self, err: &Error) -> bool {
if let Some(e) = err.downcast_ref::<ErasureError>() {
return self == e;
}
// impl ErasureError {
// pub fn is(&self, err: &Error) -> bool {
// if let Some(e) = err.downcast_ref::<ErasureError>() {
// return self == e;
// }
false
}
}
// false
// }
// }
impl ErasureError {
pub fn to_u32(&self) -> u32 {
match self {
ErasureError::ErasureReadQuorum => 0x01,
ErasureError::_ErasureWriteQuorum => 0x02,
ErasureError::NotFirstDisk => 0x03,
ErasureError::FirstDiskWait => 0x04,
ErasureError::InvalidPart(_) => 0x05,
}
}
// impl ErasureError {
// pub fn to_u32(&self) -> u32 {
// match self {
// ErasureError::ErasureReadQuorum => 0x01,
// ErasureError::_ErasureWriteQuorum => 0x02,
// ErasureError::NotFirstDisk => 0x03,
// ErasureError::FirstDiskWait => 0x04,
// ErasureError::InvalidPart(_) => 0x05,
// }
// }
pub fn from_u32(error: u32) -> Option<Self> {
match error {
0x01 => Some(ErasureError::ErasureReadQuorum),
0x02 => Some(ErasureError::_ErasureWriteQuorum),
0x03 => Some(ErasureError::NotFirstDisk),
0x04 => Some(ErasureError::FirstDiskWait),
0x05 => Some(ErasureError::InvalidPart(Default::default())),
_ => None,
}
}
}
// pub fn from_u32(error: u32) -> Option<Self> {
// match error {
// 0x01 => Some(ErasureError::ErasureReadQuorum),
// 0x02 => Some(ErasureError::_ErasureWriteQuorum),
// 0x03 => Some(ErasureError::NotFirstDisk),
// 0x04 => Some(ErasureError::FirstDiskWait),
// 0x05 => Some(ErasureError::InvalidPart(Default::default())),
// _ => None,
// }
// }
// }

View File

@@ -1,27 +1,26 @@
use crate::bucket::metadata_sys::get_versioning_config;
use crate::bucket::versioning::VersioningApi;
use crate::cache_value::metacache_set::{list_path_raw, ListPathRawOptions};
use crate::disk::error::{is_all_not_found, is_all_volume_not_found, is_err_eof, DiskError};
use crate::disk::{
DiskInfo, DiskStore, MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry,
MetadataResolutionParams,
use crate::disk::error::DiskError;
use crate::disk::{DiskInfo, DiskStore};
use crate::error::{
is_all_not_found, is_all_volume_not_found, is_err_bucket_not_found, to_object_err, Error, Result, StorageError,
};
use crate::error::clone_err;
use crate::file_meta::merge_file_meta_versions;
use crate::peer::is_reserved_or_invalid_bucket;
use crate::set_disk::SetDisks;
use crate::store::check_list_objs_args;
use crate::store_api::{FileInfo, ListObjectVersionsInfo, ListObjectsInfo, ObjectInfo, ObjectOptions};
use crate::store_err::{is_err_bucket_not_found, to_object_err, StorageError};
use crate::store_api::{ListObjectVersionsInfo, ListObjectsInfo, ObjectInfo, ObjectOptions};
use crate::utils::path::{self, base_dir_from_prefix, SLASH_SEPARATOR};
use crate::StorageAPI;
use crate::{store::ECStore, store_api::ListObjectsV2Info};
use common::error::{Error, Result};
use futures::future::join_all;
use rand::seq::SliceRandom;
use rand::thread_rng;
use rustfs_filemeta::{
merge_file_meta_versions, FileInfo, MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry,
MetadataResolutionParams,
};
use std::collections::HashMap;
use std::io::ErrorKind;
use std::sync::Arc;
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
use tokio::sync::mpsc::{self, Receiver, Sender};
@@ -281,13 +280,13 @@ impl ECStore {
.list_path(&opts)
.await
.unwrap_or_else(|err| MetaCacheEntriesSortedResult {
err: Some(err),
err: Some(err.into()),
..Default::default()
});
if let Some(err) = &list_result.err {
if !is_err_eof(err) {
return Err(to_object_err(list_result.err.unwrap(), vec![bucket, prefix]));
if let Some(err) = list_result.err.clone() {
if err != rustfs_filemeta::Error::Unexpected {
return Err(to_object_err(err.into(), vec![bucket, prefix]));
}
}
@@ -297,11 +296,13 @@ impl ECStore {
// contextCanceled
let mut get_objects = list_result
.entries
.unwrap_or_default()
.file_infos(bucket, prefix, delimiter.clone())
.await;
let mut get_objects = ObjectInfo::from_meta_cache_entries_sorted(
&list_result.entries.unwrap_or_default(),
bucket,
prefix,
delimiter.clone(),
)
.await;
let is_truncated = {
if max_keys > 0 && get_objects.len() > max_keys as usize {
@@ -364,7 +365,7 @@ impl ECStore {
max_keys: i32,
) -> Result<ListObjectVersionsInfo> {
if marker.is_none() && version_marker.is_some() {
return Err(Error::new(StorageError::NotImplemented));
return Err(StorageError::NotImplemented);
}
// if marker set, limit +1
@@ -383,14 +384,14 @@ impl ECStore {
let mut list_result = match self.list_path(&opts).await {
Ok(res) => res,
Err(err) => MetaCacheEntriesSortedResult {
err: Some(err),
err: Some(err.into()),
..Default::default()
},
};
if let Some(err) = &list_result.err {
if !is_err_eof(err) {
return Err(to_object_err(list_result.err.unwrap(), vec![bucket, prefix]));
if let Some(err) = list_result.err.clone() {
if err != rustfs_filemeta::Error::Unexpected {
return Err(to_object_err(err.into(), vec![bucket, prefix]));
}
}
@@ -398,11 +399,13 @@ impl ECStore {
result.forward_past(opts.marker);
}
let mut get_objects = list_result
.entries
.unwrap_or_default()
.file_info_versions(bucket, prefix, delimiter.clone(), version_marker)
.await;
let mut get_objects = ObjectInfo::from_meta_cache_entries_sorted(
&list_result.entries.unwrap_or_default(),
bucket,
prefix,
delimiter.clone(),
)
.await;
let is_truncated = {
if max_keys > 0 && get_objects.len() > max_keys as usize {
@@ -472,16 +475,16 @@ impl ECStore {
if let Some(marker) = &o.marker {
if !o.prefix.is_empty() && !marker.starts_with(&o.prefix) {
return Err(Error::new(std::io::Error::from(ErrorKind::UnexpectedEof)));
return Err(Error::Unexpected);
}
}
if o.limit == 0 {
return Err(Error::new(std::io::Error::from(ErrorKind::UnexpectedEof)));
return Err(Error::Unexpected);
}
if o.prefix.starts_with(SLASH_SEPARATOR) {
return Err(Error::new(std::io::Error::from(ErrorKind::UnexpectedEof)));
return Err(Error::Unexpected);
}
let slash_separator = Some(SLASH_SEPARATOR.to_owned());
@@ -546,12 +549,12 @@ impl ECStore {
match res{
Ok(o) => {
error!("list_path err_rx.recv() ok {:?}", &o);
MetaCacheEntriesSortedResult{ entries: None, err: Some(clone_err(o.as_ref())) }
MetaCacheEntriesSortedResult{ entries: None, err: Some(o.as_ref().clone().into()) }
},
Err(err) => {
error!("list_path err_rx.recv() err {:?}", &err);
MetaCacheEntriesSortedResult{ entries: None, err: Some(Error::new(err)) }
MetaCacheEntriesSortedResult{ entries: None, err: Some(rustfs_filemeta::Error::other(err)) }
},
}
},
@@ -562,7 +565,7 @@ impl ECStore {
};
// cancel call exit spawns
cancel_tx.send(true)?;
cancel_tx.send(true).map_err(Error::other)?;
// wait spawns exit
join_all(vec![job1, job2]).await;
@@ -584,7 +587,7 @@ impl ECStore {
}
if !truncated {
result.err = Some(Error::new(std::io::Error::from(ErrorKind::UnexpectedEof)));
result.err = Some(Error::Unexpected.into());
}
}
@@ -644,7 +647,7 @@ impl ECStore {
if is_all_not_found(&errs) {
if is_all_volume_not_found(&errs) {
return Err(Error::new(DiskError::VolumeNotFound));
return Err(StorageError::VolumeNotFound);
}
return Ok(Vec::new());
@@ -656,11 +659,11 @@ impl ECStore {
for err in errs.iter() {
if let Some(err) = err {
if is_err_eof(err) {
if err == &Error::Unexpected {
continue;
}
return Err(clone_err(err));
return Err(err.clone());
} else {
all_at_eof = false;
continue;
@@ -773,7 +776,7 @@ impl ECStore {
}
})
})),
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<Error>]| {
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<DiskError>]| {
Box::pin({
let value = tx2.clone();
let resolver = resolver.clone();
@@ -814,7 +817,7 @@ impl ECStore {
if !sent_err {
let item = ObjectInfoOrErr {
item: None,
err: Some(err),
err: Some(err.into()),
};
if let Err(err) = result.send(item).await {
@@ -833,7 +836,7 @@ impl ECStore {
if let Some(fiter) = opts.filter {
if fiter(&fi) {
let item = ObjectInfoOrErr {
item: Some(fi.to_object_info(&bucket, &fi.name, {
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
if let Some(v) = &vcf {
v.versioned(&fi.name)
} else {
@@ -849,7 +852,7 @@ impl ECStore {
}
} else {
let item = ObjectInfoOrErr {
item: Some(fi.to_object_info(&bucket, &fi.name, {
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
if let Some(v) = &vcf {
v.versioned(&fi.name)
} else {
@@ -871,7 +874,7 @@ impl ECStore {
Err(err) => {
let item = ObjectInfoOrErr {
item: None,
err: Some(err),
err: Some(err.into()),
};
if let Err(err) = result.send(item).await {
@@ -889,7 +892,7 @@ impl ECStore {
if let Some(fiter) = opts.filter {
if fiter(fi) {
let item = ObjectInfoOrErr {
item: Some(fi.to_object_info(&bucket, &fi.name, {
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
if let Some(v) = &vcf {
v.versioned(&fi.name)
} else {
@@ -905,7 +908,7 @@ impl ECStore {
}
} else {
let item = ObjectInfoOrErr {
item: Some(fi.to_object_info(&bucket, &fi.name, {
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
if let Some(v) = &vcf {
v.versioned(&fi.name)
} else {
@@ -1013,7 +1016,8 @@ async fn gather_results(
}),
err: None,
})
.await?;
.await
.map_err(Error::other)?;
returned = true;
sender = None;
@@ -1032,9 +1036,10 @@ async fn gather_results(
o: MetaCacheEntries(entrys.clone()),
..Default::default()
}),
err: Some(Error::new(std::io::Error::new(ErrorKind::UnexpectedEof, "Unexpected EOF"))),
err: Some(Error::Unexpected.into()),
})
.await?;
.await
.map_err(Error::other)?;
}
Ok(())
@@ -1073,12 +1078,12 @@ async fn merge_entry_channels(
has_entry = in_channels[0].recv()=>{
if let Some(entry) = has_entry{
// warn!("merge_entry_channels entry {}", &entry.name);
out_channel.send(entry).await?;
out_channel.send(entry).await.map_err(Error::other)?;
} else {
return Ok(())
}
},
_ = rx.recv()=>return Err(Error::msg("cancel")),
_ = rx.recv()=>return Err(Error::other("cancel")),
}
}
}
@@ -1208,7 +1213,7 @@ async fn merge_entry_channels(
if let Some(best_entry) = &best {
if best_entry.name > last {
out_channel.send(best_entry.clone()).await?;
out_channel.send(best_entry.clone()).await.map_err(Error::other)?;
last = best_entry.name.clone();
}
top[best_idx] = None; // Replace entry we just sent
@@ -1291,7 +1296,7 @@ impl SetDisks {
}
})
})),
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<Error>]| {
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<DiskError>]| {
Box::pin({
let value = tx2.clone();
let resolver = resolver.clone();
@@ -1309,6 +1314,7 @@ impl SetDisks {
},
)
.await
.map_err(Error::other)
}
}

View File

@@ -1,12 +1,3 @@
use crate::bucket::error::BucketMetadataError;
use crate::config::error::ConfigError;
use crate::disk::error::DiskError;
use crate::quorum::QuorumError;
use crate::store_err::StorageError;
use crate::store_init::ErasureError;
use common::error::Error;
use protos::proto_gen::node_service::Error as Proto_Error;
pub mod bool_flag;
pub mod crypto;
pub mod ellipses;
@@ -18,103 +9,108 @@ pub mod path;
pub mod wildcard;
pub mod xml;
const ERROR_MODULE_MASK: u32 = 0xFF00;
pub const ERROR_TYPE_MASK: u32 = 0x00FF;
const DISK_ERROR_MASK: u32 = 0x0100;
const STORAGE_ERROR_MASK: u32 = 0x0200;
const BUCKET_METADATA_ERROR_MASK: u32 = 0x0300;
const CONFIG_ERROR_MASK: u32 = 0x04000;
const QUORUM_ERROR_MASK: u32 = 0x0500;
const ERASURE_ERROR_MASK: u32 = 0x0600;
// use crate::bucket::error::BucketMetadataError;
// use crate::disk::error::DiskError;
// use crate::error::StorageError;
// use protos::proto_gen::node_service::Error as Proto_Error;
// error to u8
pub fn error_to_u32(err: &Error) -> u32 {
if let Some(e) = err.downcast_ref::<DiskError>() {
DISK_ERROR_MASK | e.to_u32()
} else if let Some(e) = err.downcast_ref::<StorageError>() {
STORAGE_ERROR_MASK | e.to_u32()
} else if let Some(e) = err.downcast_ref::<BucketMetadataError>() {
BUCKET_METADATA_ERROR_MASK | e.to_u32()
} else if let Some(e) = err.downcast_ref::<ConfigError>() {
CONFIG_ERROR_MASK | e.to_u32()
} else if let Some(e) = err.downcast_ref::<QuorumError>() {
QUORUM_ERROR_MASK | e.to_u32()
} else if let Some(e) = err.downcast_ref::<ErasureError>() {
ERASURE_ERROR_MASK | e.to_u32()
} else {
0
}
}
// const ERROR_MODULE_MASK: u32 = 0xFF00;
// pub const ERROR_TYPE_MASK: u32 = 0x00FF;
// const DISK_ERROR_MASK: u32 = 0x0100;
// const STORAGE_ERROR_MASK: u32 = 0x0200;
// const BUCKET_METADATA_ERROR_MASK: u32 = 0x0300;
// const CONFIG_ERROR_MASK: u32 = 0x04000;
// const QUORUM_ERROR_MASK: u32 = 0x0500;
// const ERASURE_ERROR_MASK: u32 = 0x0600;
pub fn u32_to_error(e: u32) -> Option<Error> {
match e & ERROR_MODULE_MASK {
DISK_ERROR_MASK => DiskError::from_u32(e & ERROR_TYPE_MASK).map(|e| Error::new(e)),
STORAGE_ERROR_MASK => StorageError::from_u32(e & ERROR_TYPE_MASK).map(|e| Error::new(e)),
BUCKET_METADATA_ERROR_MASK => BucketMetadataError::from_u32(e & ERROR_TYPE_MASK).map(|e| Error::new(e)),
CONFIG_ERROR_MASK => ConfigError::from_u32(e & ERROR_TYPE_MASK).map(|e| Error::new(e)),
QUORUM_ERROR_MASK => QuorumError::from_u32(e & ERROR_TYPE_MASK).map(|e| Error::new(e)),
ERASURE_ERROR_MASK => ErasureError::from_u32(e & ERROR_TYPE_MASK).map(|e| Error::new(e)),
_ => None,
}
}
// // error to u8
// pub fn error_to_u32(err: &Error) -> u32 {
// if let Some(e) = err.downcast_ref::<DiskError>() {
// DISK_ERROR_MASK | e.to_u32()
// } else if let Some(e) = err.downcast_ref::<StorageError>() {
// STORAGE_ERROR_MASK | e.to_u32()
// } else if let Some(e) = err.downcast_ref::<BucketMetadataError>() {
// BUCKET_METADATA_ERROR_MASK | e.to_u32()
// } else if let Some(e) = err.downcast_ref::<ConfigError>() {
// CONFIG_ERROR_MASK | e.to_u32()
// } else if let Some(e) = err.downcast_ref::<QuorumError>() {
// QUORUM_ERROR_MASK | e.to_u32()
// } else if let Some(e) = err.downcast_ref::<ErasureError>() {
// ERASURE_ERROR_MASK | e.to_u32()
// } else {
// 0
// }
// }
pub fn err_to_proto_err(err: &Error, msg: &str) -> Proto_Error {
let num = error_to_u32(err);
Proto_Error {
code: num,
error_info: msg.to_string(),
}
}
// pub fn u32_to_error(e: u32) -> Option<Error> {
// match e & ERROR_MODULE_MASK {
// DISK_ERROR_MASK => DiskError::from_u32(e & ERROR_TYPE_MASK).map(|e| Error::new(e)),
// STORAGE_ERROR_MASK => StorageError::from_u32(e & ERROR_TYPE_MASK).map(|e| Error::new(e)),
// BUCKET_METADATA_ERROR_MASK => BucketMetadataError::from_u32(e & ERROR_TYPE_MASK).map(|e| Error::new(e)),
// CONFIG_ERROR_MASK => ConfigError::from_u32(e & ERROR_TYPE_MASK).map(|e| Error::new(e)),
// QUORUM_ERROR_MASK => QuorumError::from_u32(e & ERROR_TYPE_MASK).map(|e| Error::new(e)),
// ERASURE_ERROR_MASK => ErasureError::from_u32(e & ERROR_TYPE_MASK).map(|e| Error::new(e)),
// _ => None,
// }
// }
pub fn proto_err_to_err(err: &Proto_Error) -> Error {
if let Some(e) = u32_to_error(err.code) {
e
} else {
Error::from_string(err.error_info.clone())
}
}
// pub fn err_to_proto_err(err: &Error, msg: &str) -> Proto_Error {
// let num = error_to_u32(err);
// Proto_Error {
// code: num,
// error_info: msg.to_string(),
// }
// }
#[test]
fn test_u32_to_error() {
let error = Error::new(DiskError::FileCorrupt);
let num = error_to_u32(&error);
let new_error = u32_to_error(num);
assert!(new_error.is_some());
assert_eq!(new_error.unwrap().downcast_ref::<DiskError>(), Some(&DiskError::FileCorrupt));
// pub fn proto_err_to_err(err: &Proto_Error) -> Error {
// if let Some(e) = u32_to_error(err.code) {
// e
// } else {
// Error::from_string(err.error_info.clone())
// }
// }
let error = Error::new(StorageError::BucketNotEmpty(Default::default()));
let num = error_to_u32(&error);
let new_error = u32_to_error(num);
assert!(new_error.is_some());
assert_eq!(
new_error.unwrap().downcast_ref::<StorageError>(),
Some(&StorageError::BucketNotEmpty(Default::default()))
);
// #[test]
// fn test_u32_to_error() {
// let error = Error::new(DiskError::FileCorrupt);
// let num = error_to_u32(&error);
// let new_error = u32_to_error(num);
// assert!(new_error.is_some());
// assert_eq!(new_error.unwrap().downcast_ref::<DiskError>(), Some(&DiskError::FileCorrupt));
let error = Error::new(BucketMetadataError::BucketObjectLockConfigNotFound);
let num = error_to_u32(&error);
let new_error = u32_to_error(num);
assert!(new_error.is_some());
assert_eq!(
new_error.unwrap().downcast_ref::<BucketMetadataError>(),
Some(&BucketMetadataError::BucketObjectLockConfigNotFound)
);
// let error = Error::new(StorageError::BucketNotEmpty(Default::default()));
// let num = error_to_u32(&error);
// let new_error = u32_to_error(num);
// assert!(new_error.is_some());
// assert_eq!(
// new_error.unwrap().downcast_ref::<StorageError>(),
// Some(&StorageError::BucketNotEmpty(Default::default()))
// );
let error = Error::new(ConfigError::NotFound);
let num = error_to_u32(&error);
let new_error = u32_to_error(num);
assert!(new_error.is_some());
assert_eq!(new_error.unwrap().downcast_ref::<ConfigError>(), Some(&ConfigError::NotFound));
// let error = Error::new(BucketMetadataError::BucketObjectLockConfigNotFound);
// let num = error_to_u32(&error);
// let new_error = u32_to_error(num);
// assert!(new_error.is_some());
// assert_eq!(
// new_error.unwrap().downcast_ref::<BucketMetadataError>(),
// Some(&BucketMetadataError::BucketObjectLockConfigNotFound)
// );
let error = Error::new(QuorumError::Read);
let num = error_to_u32(&error);
let new_error = u32_to_error(num);
assert!(new_error.is_some());
assert_eq!(new_error.unwrap().downcast_ref::<QuorumError>(), Some(&QuorumError::Read));
// let error = Error::new(ConfigError::NotFound);
// let num = error_to_u32(&error);
// let new_error = u32_to_error(num);
// assert!(new_error.is_some());
// assert_eq!(new_error.unwrap().downcast_ref::<ConfigError>(), Some(&ConfigError::NotFound));
let error = Error::new(ErasureError::ErasureReadQuorum);
let num = error_to_u32(&error);
let new_error = u32_to_error(num);
assert!(new_error.is_some());
assert_eq!(new_error.unwrap().downcast_ref::<ErasureError>(), Some(&ErasureError::ErasureReadQuorum));
}
// let error = Error::new(QuorumError::Read);
// let num = error_to_u32(&error);
// let new_error = u32_to_error(num);
// assert!(new_error.is_some());
// assert_eq!(new_error.unwrap().downcast_ref::<QuorumError>(), Some(&QuorumError::Read));
// let error = Error::new(ErasureError::ErasureReadQuorum);
// let num = error_to_u32(&error);
// let new_error = u32_to_error(num);
// assert!(new_error.is_some());
// assert_eq!(new_error.unwrap().downcast_ref::<ErasureError>(), Some(&ErasureError::ErasureReadQuorum));
// }

View File

@@ -1,4 +1,3 @@
use ecstore::disk::error::clone_disk_err;
use ecstore::disk::error::DiskError;
use policy::policy::Error as PolicyError;
@@ -146,17 +145,17 @@ pub fn is_err_no_such_service_account(err: &common::error::Error) -> bool {
}
}
pub fn clone_err(e: &common::error::Error) -> common::error::Error {
if let Some(e) = e.downcast_ref::<DiskError>() {
clone_disk_err(e)
} else if let Some(e) = e.downcast_ref::<std::io::Error>() {
if let Some(code) = e.raw_os_error() {
common::error::Error::new(std::io::Error::from_raw_os_error(code))
} else {
common::error::Error::new(std::io::Error::new(e.kind(), e.to_string()))
}
} else {
//TODO: Optimize other types
common::error::Error::msg(e.to_string())
}
}
// pub fn clone_err(e: &common::error::Error) -> common::error::Error {
// if let Some(e) = e.downcast_ref::<DiskError>() {
// clone_disk_err(e)
// } else if let Some(e) = e.downcast_ref::<std::io::Error>() {
// if let Some(code) = e.raw_os_error() {
// common::error::Error::new(std::io::Error::from_raw_os_error(code))
// } else {
// common::error::Error::new(std::io::Error::new(e.kind(), e.to_string()))
// }
// } else {
// //TODO: Optimize other types
// common::error::Error::msg(e.to_string())
// }
// }

View File

@@ -9,7 +9,6 @@ use crate::{
},
};
use common::error::{Error, Result};
use ecstore::config::error::is_err_config_not_found;
use ecstore::utils::{crypto::base64_encode, path::path_join_buf};
use madmin::{AccountStatus, AddOrUpdateUserReq, GroupDesc};
use policy::{

View File

@@ -7,6 +7,7 @@ use ecstore::{
disk::{
DeleteOptions, DiskAPI, DiskInfoOptions, DiskStore, FileInfoVersions, ReadMultipleReq, ReadOptions, UpdateMetadataOpts,
},
error::StorageError,
heal::{
data_usage_cache::DataUsageCache,
heal_commands::{get_local_background_heal_status, HealOpts},
@@ -16,7 +17,6 @@ use ecstore::{
peer::{LocalPeerS3Client, PeerS3Client},
store::{all_local_disk_path, find_local_disk},
store_api::{BucketOptions, DeleteBucketOptions, FileInfo, MakeBucketOptions, StorageAPI},
store_err::StorageError,
utils::err_to_proto_err,
};
use futures::{Stream, StreamExt};
@@ -295,7 +295,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(ReadAllResponse {
success: false,
data: Vec::new(),
error: Some(err_to_proto_err(&err, &format!("read all failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -320,7 +320,7 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(WriteAllResponse {
success: false,
error: Some(err_to_proto_err(&err, &format!("write all failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -360,7 +360,7 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(DeleteResponse {
success: false,
error: Some(err_to_proto_err(&err, &format!("delete failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -418,7 +418,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(VerifyFileResponse {
success: false,
check_parts_resp: "".to_string(),
error: Some(err_to_proto_err(&err, &format!("verify file failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -477,7 +477,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(CheckPartsResponse {
success: false,
check_parts_resp: "".to_string(),
error: Some(err_to_proto_err(&err, &format!("check parts failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -511,7 +511,7 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(RenamePartResponse {
success: false,
error: Some(err_to_proto_err(&err, &format!("rename part failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -538,7 +538,7 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(RenameFileResponse {
success: false,
error: Some(err_to_proto_err(&err, &format!("rename file failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -805,7 +805,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(ListDirResponse {
success: false,
volumes: Vec::new(),
error: Some(err_to_proto_err(&err, &format!("list dir failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -937,7 +937,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(RenameDataResponse {
success: false,
rename_data_resp: String::new(),
error: Some(err_to_proto_err(&err, &format!("rename data failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -962,7 +962,7 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(MakeVolumesResponse {
success: false,
error: Some(err_to_proto_err(&err, &format!("make volume failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -986,7 +986,7 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(MakeVolumeResponse {
success: false,
error: Some(err_to_proto_err(&err, &format!("make volume failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1018,7 +1018,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(ListVolumesResponse {
success: false,
volume_infos: Vec::new(),
error: Some(err_to_proto_err(&err, &format!("list volume failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1055,7 +1055,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(StatVolumeResponse {
success: false,
volume_info: String::new(),
error: Some(err_to_proto_err(&err, &format!("state volume failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1080,7 +1080,7 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(DeletePathsResponse {
success: false,
error: Some(err_to_proto_err(&err, &format!("delte paths failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1137,7 +1137,7 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(UpdateMetadataResponse {
success: false,
error: Some(err_to_proto_err(&err, &format!("update metadata failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1177,7 +1177,7 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(WriteMetadataResponse {
success: false,
error: Some(err_to_proto_err(&err, &format!("write metadata failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1233,7 +1233,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(ReadVersionResponse {
success: false,
file_info: String::new(),
error: Some(err_to_proto_err(&err, &format!("read version failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1270,7 +1270,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(ReadXlResponse {
success: false,
raw_file_info: String::new(),
error: Some(err_to_proto_err(&err, &format!("read xl failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1344,7 +1344,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(DeleteVersionResponse {
success: false,
raw_file_info: "".to_string(),
error: Some(err_to_proto_err(&err, &format!("read version failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1418,7 +1418,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(DeleteVersionsResponse {
success: false,
errors: Vec::new(),
error: Some(err_to_proto_err(&err, &format!("delete version failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1469,7 +1469,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(ReadMultipleResponse {
success: false,
read_multiple_resps: Vec::new(),
error: Some(err_to_proto_err(&err, &format!("read multiple failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1494,7 +1494,7 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(DeleteVolumeResponse {
success: false,
error: Some(err_to_proto_err(&err, &format!("delete volume failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1547,7 +1547,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(DiskInfoResponse {
success: false,
disk_info: "".to_string(),
error: Some(err_to_proto_err(&err, &format!("disk info failed: {}", err))),
error: Some(err.into()),
})),
}
} else {
@@ -1634,7 +1634,7 @@ impl Node for NodeService {
success: false,
update: "".to_string(),
data_usage_cache: "".to_string(),
error: Some(err_to_proto_err(&err, &format!("scanner failed: {}", err))),
error: Some(err.into()),
}))
.await
.expect("working rx");

Some files were not shown because too many files have changed in this diff Show More