mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-16 17:20:33 +00:00
Fix/ilm (#721)
* fix tip remote tier error * fix transitioned_object * fix filemeta * add GCS R2 * add aliyun tencent huaweicloud azure gcs r2 backend tier * fix signer * change azure to s3 Co-authored-by: houseme <housemecn@gmail.com> Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
This commit is contained in:
11
.vscode/launch.json
vendored
11
.vscode/launch.json
vendored
@@ -93,8 +93,15 @@
|
||||
"name": "Debug executable target/debug/test",
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"program": "${workspaceFolder}/target/debug/deps/lifecycle_integration_test-5eb7590b8f3bea55",
|
||||
"args": [],
|
||||
"program": "${workspaceFolder}/target/debug/deps/lifecycle_integration_test-5915cbfcab491b3b",
|
||||
"args": [
|
||||
"--skip",
|
||||
"test_lifecycle_expiry_basic",
|
||||
"--skip",
|
||||
"test_lifecycle_expiry_deletemarker",
|
||||
//"--skip",
|
||||
//"test_lifecycle_transition_basic",
|
||||
],
|
||||
"cwd": "${workspaceFolder}",
|
||||
//"stopAtEntry": false,
|
||||
//"preLaunchTask": "cargo build",
|
||||
|
||||
484
Cargo.lock
generated
484
Cargo.lock
generated
@@ -355,7 +355,7 @@ dependencies = [
|
||||
"arrow-schema",
|
||||
"chrono",
|
||||
"half",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"lexical-core",
|
||||
"memchr",
|
||||
"num",
|
||||
@@ -1157,6 +1157,15 @@ dependencies = [
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bincode"
|
||||
version = "1.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bindgen"
|
||||
version = "0.72.1"
|
||||
@@ -1188,6 +1197,9 @@ name = "bitflags"
|
||||
version = "2.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
@@ -1229,6 +1241,31 @@ dependencies = [
|
||||
"hybrid-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bon"
|
||||
version = "3.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ebeb9aaf9329dff6ceb65c689ca3db33dbf15f324909c60e4e5eef5701ce31b1"
|
||||
dependencies = [
|
||||
"bon-macros",
|
||||
"rustversion",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bon-macros"
|
||||
version = "3.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77e9d642a7e3a318e37c2c9427b5a6a48aa1ad55dcd986f3034ab2239045a645"
|
||||
dependencies = [
|
||||
"darling 0.21.3",
|
||||
"ident_case",
|
||||
"prettyplease",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"rustversion",
|
||||
"syn 2.0.108",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "brotli"
|
||||
version = "8.0.2"
|
||||
@@ -1463,7 +1500,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6139a8597ed92cf816dfb33f5dd6cf0bb93a6adc938f11039f371bc5bcd26c3"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"phf",
|
||||
"phf 0.12.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2195,7 +2232,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"half",
|
||||
"hashbrown 0.14.5",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"libc",
|
||||
"log",
|
||||
"object_store",
|
||||
@@ -2379,7 +2416,7 @@ dependencies = [
|
||||
"datafusion-functions-aggregate-common",
|
||||
"datafusion-functions-window-common",
|
||||
"datafusion-physical-expr-common",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"paste",
|
||||
"recursive",
|
||||
"serde_json",
|
||||
@@ -2394,7 +2431,7 @@ checksum = "9096732d0d8862d1950ca70324fe91f9dee3799eeb0db53ef452bdb573484db6"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"datafusion-common",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"itertools 0.14.0",
|
||||
"paste",
|
||||
]
|
||||
@@ -2551,7 +2588,7 @@ dependencies = [
|
||||
"datafusion-expr",
|
||||
"datafusion-expr-common",
|
||||
"datafusion-physical-expr",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"itertools 0.14.0",
|
||||
"log",
|
||||
"recursive",
|
||||
@@ -2574,7 +2611,7 @@ dependencies = [
|
||||
"datafusion-physical-expr-common",
|
||||
"half",
|
||||
"hashbrown 0.14.5",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"itertools 0.14.0",
|
||||
"log",
|
||||
"parking_lot",
|
||||
@@ -2654,7 +2691,7 @@ dependencies = [
|
||||
"futures",
|
||||
"half",
|
||||
"hashbrown 0.14.5",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"itertools 0.14.0",
|
||||
"log",
|
||||
"parking_lot",
|
||||
@@ -2714,7 +2751,7 @@ dependencies = [
|
||||
"bigdecimal",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"log",
|
||||
"recursive",
|
||||
"regex",
|
||||
@@ -2923,6 +2960,15 @@ dependencies = [
|
||||
"syn 2.0.108",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "doxygen-rs"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "415b6ec780d34dcf624666747194393603d0373b7141eef01d12ee58881507d9"
|
||||
dependencies = [
|
||||
"phf 0.11.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dunce"
|
||||
version = "1.0.5"
|
||||
@@ -3497,6 +3543,213 @@ version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280"
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-auth"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c5a0f0ef58bc79d636e95db264939a6f3fd80951f77743f2b7ec55e22171150d"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
"bon",
|
||||
"google-cloud-gax",
|
||||
"http 1.3.1",
|
||||
"reqwest",
|
||||
"rustc_version",
|
||||
"rustls 0.23.34",
|
||||
"rustls-pemfile 2.2.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror 2.0.17",
|
||||
"time",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-gax"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "58bc95deae841e35758fa5caba317092f26940135c7184570feb691a1844db08"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"futures",
|
||||
"google-cloud-rpc",
|
||||
"google-cloud-wkt",
|
||||
"http 1.3.1",
|
||||
"pin-project",
|
||||
"rand 0.9.2",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-gax-internal"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7963ef5d9a7e1c2c20138b6c6cbe32dc14ded18d51ba4e8c781c7f5de414dfd1"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"google-cloud-auth",
|
||||
"google-cloud-gax",
|
||||
"google-cloud-rpc",
|
||||
"google-cloud-wkt",
|
||||
"http 1.3.1",
|
||||
"http-body-util",
|
||||
"opentelemetry-semantic-conventions 0.30.0",
|
||||
"percent-encoding",
|
||||
"prost",
|
||||
"prost-types",
|
||||
"reqwest",
|
||||
"rustc_version",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tonic-prost",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-iam-v1"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a2f2c6d094d0ed9453de0fba8bb690b0c039a3d056f009d2e6c7909c32a446bb"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"google-cloud-gax",
|
||||
"google-cloud-gax-internal",
|
||||
"google-cloud-type",
|
||||
"google-cloud-wkt",
|
||||
"lazy_static",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-longrunning"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69debfcc085fc9588e8d90ed27a2ae500f636f6d7d08ee7e8cd62992ca164d68"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"google-cloud-gax",
|
||||
"google-cloud-gax-internal",
|
||||
"google-cloud-rpc",
|
||||
"google-cloud-wkt",
|
||||
"lazy_static",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-lro"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e9d7432f793f49c9d556da45995f39539dcecdc2361de113b05b0f7bbed73072"
|
||||
dependencies = [
|
||||
"google-cloud-gax",
|
||||
"google-cloud-longrunning",
|
||||
"google-cloud-rpc",
|
||||
"google-cloud-wkt",
|
||||
"serde",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-rpc"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e5b655e3540a78e18fd753ebd8f11e068210a3fa392892370f932ffcc8774346"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"google-cloud-wkt",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-storage"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d326d5fa84cc77e4af8d070f46f040abfd043eb72b4ba312756102da802d2fc"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"crc32c",
|
||||
"futures",
|
||||
"google-cloud-auth",
|
||||
"google-cloud-gax",
|
||||
"google-cloud-gax-internal",
|
||||
"google-cloud-iam-v1",
|
||||
"google-cloud-longrunning",
|
||||
"google-cloud-lro",
|
||||
"google-cloud-rpc",
|
||||
"google-cloud-type",
|
||||
"google-cloud-wkt",
|
||||
"http 1.3.1",
|
||||
"http-body 1.0.1",
|
||||
"hyper 1.7.0",
|
||||
"lazy_static",
|
||||
"md5",
|
||||
"percent-encoding",
|
||||
"pin-project",
|
||||
"prost",
|
||||
"prost-types",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
"sha2 0.10.9",
|
||||
"thiserror 2.0.17",
|
||||
"tokio",
|
||||
"tonic",
|
||||
"tracing",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-type"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "290760412b63cd266376273e4fbeb13afaa4bc7dadd5340786c916866139e14c"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"google-cloud-wkt",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-wkt"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02931df6af9beda1c852bbbbe5f7b6ba6ae5e4cd49c029fa0ca2cecc787cd9b1"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
"thiserror 2.0.17",
|
||||
"time",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "group"
|
||||
version = "0.12.1"
|
||||
@@ -3531,7 +3784,7 @@ dependencies = [
|
||||
"futures-sink",
|
||||
"futures-util",
|
||||
"http 0.2.12",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -3550,7 +3803,7 @@ dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"http 1.3.1",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -3569,6 +3822,12 @@ dependencies = [
|
||||
"zerocopy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.14.5"
|
||||
@@ -3609,6 +3868,44 @@ version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||
|
||||
[[package]]
|
||||
name = "heed"
|
||||
version = "0.22.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a56c94661ddfb51aa9cdfbf102cfcc340aa69267f95ebccc4af08d7c530d393"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"byteorder",
|
||||
"heed-traits",
|
||||
"heed-types",
|
||||
"libc",
|
||||
"lmdb-master-sys",
|
||||
"once_cell",
|
||||
"page_size",
|
||||
"serde",
|
||||
"synchronoise",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heed-traits"
|
||||
version = "0.20.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eb3130048d404c57ce5a1ac61a903696e8fcde7e8c2991e9fcfc1f27c3ef74ff"
|
||||
|
||||
[[package]]
|
||||
name = "heed-types"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "13c255bdf46e07fb840d120a36dcc81f385140d7191c76a7391672675c01a55d"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"byteorder",
|
||||
"heed-traits",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.5.2"
|
||||
@@ -4014,6 +4311,17 @@ dependencies = [
|
||||
"icu_properties",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "1.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"hashbrown 0.12.3",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "2.12.0"
|
||||
@@ -4022,6 +4330,8 @@ checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f"
|
||||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.16.0",
|
||||
"serde",
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4031,7 +4341,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"is-terminal",
|
||||
"itoa",
|
||||
"log",
|
||||
@@ -4372,6 +4682,17 @@ version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
|
||||
|
||||
[[package]]
|
||||
name = "lmdb-master-sys"
|
||||
version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "864808e0b19fb6dd3b70ba94ee671b82fce17554cf80aeb0a155c65bb08027df"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"doxygen-rs",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "local-ip-address"
|
||||
version = "0.6.5"
|
||||
@@ -5090,6 +5411,12 @@ dependencies = [
|
||||
"tonic-prost",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-semantic-conventions"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "83d059a296a47436748557a353c5e6c5705b9470ef6c95cfc52c21a8814ddac2"
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-semantic-conventions"
|
||||
version = "0.31.0"
|
||||
@@ -5193,6 +5520,16 @@ dependencies = [
|
||||
"sha2 0.10.9",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "page_size"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking"
|
||||
version = "2.2.1"
|
||||
@@ -5342,7 +5679,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772"
|
||||
dependencies = [
|
||||
"fixedbitset",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5353,17 +5690,59 @@ checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455"
|
||||
dependencies = [
|
||||
"fixedbitset",
|
||||
"hashbrown 0.15.5",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078"
|
||||
dependencies = [
|
||||
"phf_macros",
|
||||
"phf_shared 0.11.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7"
|
||||
dependencies = [
|
||||
"phf_shared",
|
||||
"phf_shared 0.12.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_generator"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d"
|
||||
dependencies = [
|
||||
"phf_shared 0.11.3",
|
||||
"rand 0.8.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_macros"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216"
|
||||
dependencies = [
|
||||
"phf_generator",
|
||||
"phf_shared 0.11.3",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.108",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_shared"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5"
|
||||
dependencies = [
|
||||
"siphasher",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5680,7 +6059,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4aeaa1f2460f1d348eeaeed86aea999ce98c1bded6f089ff8514c9d9dbdc973"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"log",
|
||||
"protobuf",
|
||||
"protobuf-support",
|
||||
@@ -6041,6 +6420,7 @@ dependencies = [
|
||||
"js-sys",
|
||||
"log",
|
||||
"mime",
|
||||
"mime_guess",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"quinn",
|
||||
@@ -6120,7 +6500,7 @@ dependencies = [
|
||||
"paste",
|
||||
"pin-project-lite",
|
||||
"rmcp-macros",
|
||||
"schemars",
|
||||
"schemars 1.0.4",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror 2.0.17",
|
||||
@@ -6353,6 +6733,7 @@ dependencies = [
|
||||
"async-trait",
|
||||
"chrono",
|
||||
"futures",
|
||||
"heed",
|
||||
"rand 0.9.2",
|
||||
"reqwest",
|
||||
"rustfs-common",
|
||||
@@ -6466,6 +6847,7 @@ dependencies = [
|
||||
"async-channel",
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
"aws-config",
|
||||
"aws-credential-types",
|
||||
"aws-sdk-s3",
|
||||
"aws-smithy-types",
|
||||
@@ -6480,6 +6862,9 @@ dependencies = [
|
||||
"flatbuffers",
|
||||
"futures",
|
||||
"glob",
|
||||
"google-cloud-auth",
|
||||
"google-cloud-storage",
|
||||
"heed",
|
||||
"hex-simd",
|
||||
"hmac 0.12.1",
|
||||
"http 1.3.1",
|
||||
@@ -6517,6 +6902,7 @@ dependencies = [
|
||||
"s3s",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"sha1 0.10.6",
|
||||
"sha2 0.10.9",
|
||||
"shadow-rs",
|
||||
@@ -6656,7 +7042,7 @@ dependencies = [
|
||||
"clap",
|
||||
"mime_guess",
|
||||
"rmcp",
|
||||
"schemars",
|
||||
"schemars 1.0.4",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
@@ -6706,7 +7092,7 @@ dependencies = [
|
||||
"opentelemetry 0.31.0",
|
||||
"opentelemetry-appender-tracing",
|
||||
"opentelemetry-otlp",
|
||||
"opentelemetry-semantic-conventions",
|
||||
"opentelemetry-semantic-conventions 0.31.0",
|
||||
"opentelemetry-stdout",
|
||||
"opentelemetry_sdk 0.31.0",
|
||||
"rustfs-config",
|
||||
@@ -7193,6 +7579,18 @@ dependencies = [
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "schemars"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f"
|
||||
dependencies = [
|
||||
"dyn-clone",
|
||||
"ref-cast",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "schemars"
|
||||
version = "1.0.4"
|
||||
@@ -7438,6 +7836,37 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_with"
|
||||
version = "3.15.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aa66c845eee442168b2c8134fec70ac50dc20e760769c8ba0ad1319ca1959b04"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"hex",
|
||||
"indexmap 1.9.3",
|
||||
"indexmap 2.12.0",
|
||||
"schemars 0.9.0",
|
||||
"schemars 1.0.4",
|
||||
"serde_core",
|
||||
"serde_json",
|
||||
"serde_with_macros",
|
||||
"time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_with_macros"
|
||||
version = "3.15.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b91a903660542fced4e99881aa481bdbaec1634568ee02e0b8bd57c64cb38955"
|
||||
dependencies = [
|
||||
"darling 0.21.3",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.108",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serial_test"
|
||||
version = "3.2.0"
|
||||
@@ -7982,6 +8411,15 @@ dependencies = [
|
||||
"futures-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synchronoise"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3dbc01390fc626ce8d1cffe3376ded2b72a11bb70e1c75f404a210e4daa4def2"
|
||||
dependencies = [
|
||||
"crossbeam-queue",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synstructure"
|
||||
version = "0.12.6"
|
||||
@@ -8385,7 +8823,7 @@ version = "0.22.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
@@ -8419,9 +8857,11 @@ dependencies = [
|
||||
"hyper-util",
|
||||
"percent-encoding",
|
||||
"pin-project",
|
||||
"rustls-native-certs 0.8.2",
|
||||
"socket2 0.6.1",
|
||||
"sync_wrapper",
|
||||
"tokio",
|
||||
"tokio-rustls 0.26.4",
|
||||
"tokio-stream",
|
||||
"tower",
|
||||
"tower-layer",
|
||||
@@ -8476,7 +8916,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"pin-project-lite",
|
||||
"slab",
|
||||
"sync_wrapper",
|
||||
@@ -9589,7 +10029,7 @@ dependencies = [
|
||||
"flate2",
|
||||
"getrandom 0.3.4",
|
||||
"hmac 0.12.1",
|
||||
"indexmap",
|
||||
"indexmap 2.12.0",
|
||||
"lzma-rust2",
|
||||
"memchr",
|
||||
"pbkdf2",
|
||||
|
||||
@@ -40,3 +40,4 @@ serde_json = { workspace = true }
|
||||
serial_test = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
heed = "0.22.0"
|
||||
|
||||
508
crates/ahm/tests/lifecycle_cache_test.rs
Normal file
508
crates/ahm/tests/lifecycle_cache_test.rs
Normal file
@@ -0,0 +1,508 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use heed::byteorder::BigEndian;
|
||||
use heed::types::*;
|
||||
use heed::{BoxedError, BytesDecode, BytesEncode, Database, DatabaseFlags, Env, EnvOpenOptions};
|
||||
use rustfs_ahm::scanner::local_scan::{self, LocalObjectRecord, LocalScanOutcome};
|
||||
use rustfs_ecstore::{
|
||||
disk::endpoint::Endpoint,
|
||||
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
|
||||
store::ECStore,
|
||||
store_api::{MakeBucketOptions, ObjectIO, ObjectInfo, ObjectOptions, PutObjReader, StorageAPI},
|
||||
};
|
||||
use serial_test::serial;
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Once;
|
||||
use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
use tokio::fs;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use tracing::{debug, info};
|
||||
//use heed_traits::Comparator;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
static _LIFECYCLE_EXPIRY_CURRENT_DAYS: i32 = 1;
|
||||
static _LIFECYCLE_EXPIRY_NONCURRENT_DAYS: i32 = 1;
|
||||
static _LIFECYCLE_TRANSITION_CURRENT_DAYS: i32 = 1;
|
||||
static _LIFECYCLE_TRANSITION_NONCURRENT_DAYS: i32 = 1;
|
||||
static GLOBAL_LMDB_ENV: OnceLock<Env> = OnceLock::new();
|
||||
static GLOBAL_LMDB_DB: OnceLock<Database<I64<BigEndian>, LifecycleContentCodec>> = OnceLock::new();
|
||||
|
||||
fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt::try_init();
|
||||
});
|
||||
}
|
||||
|
||||
/// Test helper: Create test environment with ECStore
|
||||
async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
|
||||
init_tracing();
|
||||
|
||||
// Fast path: already initialized, just clone and return
|
||||
if let Some((paths, ecstore)) = GLOBAL_ENV.get() {
|
||||
return (paths.clone(), ecstore.clone());
|
||||
}
|
||||
|
||||
// create temp dir as 4 disks with unique base dir
|
||||
let test_base_dir = format!("/tmp/rustfs_ahm_lifecyclecache_test_{}", uuid::Uuid::new_v4());
|
||||
let temp_dir = std::path::PathBuf::from(&test_base_dir);
|
||||
if temp_dir.exists() {
|
||||
fs::remove_dir_all(&temp_dir).await.ok();
|
||||
}
|
||||
fs::create_dir_all(&temp_dir).await.unwrap();
|
||||
|
||||
// create 4 disk dirs
|
||||
let disk_paths = vec![
|
||||
temp_dir.join("disk1"),
|
||||
temp_dir.join("disk2"),
|
||||
temp_dir.join("disk3"),
|
||||
temp_dir.join("disk4"),
|
||||
];
|
||||
|
||||
for disk_path in &disk_paths {
|
||||
fs::create_dir_all(disk_path).await.unwrap();
|
||||
}
|
||||
|
||||
// create EndpointServerPools
|
||||
let mut endpoints = Vec::new();
|
||||
for (i, disk_path) in disk_paths.iter().enumerate() {
|
||||
let mut endpoint = Endpoint::try_from(disk_path.to_str().unwrap()).unwrap();
|
||||
// set correct index
|
||||
endpoint.set_pool_index(0);
|
||||
endpoint.set_set_index(0);
|
||||
endpoint.set_disk_index(i);
|
||||
endpoints.push(endpoint);
|
||||
}
|
||||
|
||||
let pool_endpoints = PoolEndpoints {
|
||||
legacy: false,
|
||||
set_count: 1,
|
||||
drives_per_set: 4,
|
||||
endpoints: Endpoints::from(endpoints),
|
||||
cmd_line: "test".to_string(),
|
||||
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
|
||||
};
|
||||
|
||||
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
|
||||
|
||||
// format disks (only first time)
|
||||
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await.unwrap();
|
||||
|
||||
// create ECStore with dynamic port 0 (let OS assign) or fixed 9002 if free
|
||||
let port = 9002; // for simplicity
|
||||
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// init bucket metadata system
|
||||
let buckets_list = ecstore
|
||||
.list_bucket(&rustfs_ecstore::store_api::BucketOptions {
|
||||
no_metadata: true,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let buckets = buckets_list.into_iter().map(|v| v.name).collect();
|
||||
rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys(ecstore.clone(), buckets).await;
|
||||
|
||||
//lmdb env
|
||||
// User home directory
|
||||
/*if let Ok(home_dir) = env::var("HOME").or_else(|_| env::var("USERPROFILE")) {
|
||||
let mut path = PathBuf::from(home_dir);
|
||||
path.push(format!(".{DEFAULT_LOG_FILENAME}"));
|
||||
path.push(DEFAULT_LOG_DIR);
|
||||
if ensure_directory_writable(&path) {
|
||||
//return path;
|
||||
}
|
||||
}*/
|
||||
let test_lmdb_lifecycle_dir = "/tmp/lmdb_lifecycle".to_string();
|
||||
let temp_dir = std::path::PathBuf::from(&test_lmdb_lifecycle_dir);
|
||||
if temp_dir.exists() {
|
||||
fs::remove_dir_all(&temp_dir).await.ok();
|
||||
}
|
||||
fs::create_dir_all(&temp_dir).await.unwrap();
|
||||
let lmdb_env = unsafe { EnvOpenOptions::new().max_dbs(100).open(&test_lmdb_lifecycle_dir).unwrap() };
|
||||
let bucket_name = format!("test-lc-cache-{}", "00000");
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
let db = match lmdb_env
|
||||
.database_options()
|
||||
.name(&format!("bucket_{}", bucket_name))
|
||||
.types::<I64<BigEndian>, LifecycleContentCodec>()
|
||||
.flags(DatabaseFlags::DUP_SORT)
|
||||
//.dup_sort_comparator::<>()
|
||||
.create(&mut wtxn)
|
||||
{
|
||||
Ok(db) => db,
|
||||
Err(err) => {
|
||||
panic!("lmdb error: {}", err);
|
||||
}
|
||||
};
|
||||
let _ = wtxn.commit();
|
||||
let _ = GLOBAL_LMDB_ENV.set(lmdb_env);
|
||||
let _ = GLOBAL_LMDB_DB.set(db);
|
||||
|
||||
// Store in global once lock
|
||||
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone()));
|
||||
|
||||
(disk_paths, ecstore)
|
||||
}
|
||||
|
||||
/// Test helper: Create a test bucket
|
||||
#[allow(dead_code)]
|
||||
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(bucket_name, &Default::default())
|
||||
.await
|
||||
.expect("Failed to create test bucket");
|
||||
info!("Created test bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Test helper: Create a test lock bucket
|
||||
async fn create_test_lock_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(
|
||||
bucket_name,
|
||||
&MakeBucketOptions {
|
||||
lock_enabled: true,
|
||||
versioning_enabled: true,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create test bucket");
|
||||
info!("Created test bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Test helper: Upload test object
|
||||
async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str, data: &[u8]) {
|
||||
let mut reader = PutObjReader::from_vec(data.to_vec());
|
||||
let object_info = (**ecstore)
|
||||
.put_object(bucket, object, &mut reader, &ObjectOptions::default())
|
||||
.await
|
||||
.expect("Failed to upload test object");
|
||||
|
||||
println!("object_info1: {:?}", object_info);
|
||||
|
||||
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
|
||||
}
|
||||
|
||||
/// Test helper: Check if object exists
|
||||
async fn object_exists(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
match (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
|
||||
Ok(info) => !info.delete_marker,
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn ns_to_offset_datetime(ns: i128) -> Option<OffsetDateTime> {
|
||||
OffsetDateTime::from_unix_timestamp_nanos(ns).ok()
|
||||
}
|
||||
|
||||
fn convert_record_to_object_info(record: &LocalObjectRecord) -> ObjectInfo {
|
||||
let usage = &record.usage;
|
||||
|
||||
ObjectInfo {
|
||||
bucket: usage.bucket.clone(),
|
||||
name: usage.object.clone(),
|
||||
size: usage.total_size as i64,
|
||||
delete_marker: !usage.has_live_object && usage.delete_markers_count > 0,
|
||||
mod_time: usage.last_modified_ns.and_then(ns_to_offset_datetime),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn to_object_info(
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
total_size: i64,
|
||||
delete_marker: bool,
|
||||
mod_time: OffsetDateTime,
|
||||
version_id: &str,
|
||||
) -> ObjectInfo {
|
||||
ObjectInfo {
|
||||
bucket: bucket.to_string(),
|
||||
name: object.to_string(),
|
||||
size: total_size,
|
||||
delete_marker,
|
||||
mod_time: Some(mod_time),
|
||||
version_id: Some(Uuid::parse_str(version_id).unwrap()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
enum LifecycleType {
|
||||
ExpiryCurrent,
|
||||
ExpiryNoncurrent,
|
||||
TransitionCurrent,
|
||||
TransitionNoncurrent,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct LifecycleContent {
|
||||
ver_no: u8,
|
||||
ver_id: String,
|
||||
mod_time: OffsetDateTime,
|
||||
type_: LifecycleType,
|
||||
object_name: String,
|
||||
}
|
||||
|
||||
pub struct LifecycleContentCodec;
|
||||
|
||||
impl BytesEncode<'_> for LifecycleContentCodec {
|
||||
type EItem = LifecycleContent;
|
||||
|
||||
fn bytes_encode(lcc: &Self::EItem) -> Result<Cow<'_, [u8]>, BoxedError> {
|
||||
let (ver_no_byte, ver_id_bytes, mod_timestamp_bytes, type_byte, object_name_bytes) = match lcc {
|
||||
LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::ExpiryCurrent,
|
||||
object_name,
|
||||
} => (
|
||||
ver_no,
|
||||
ver_id.clone().into_bytes(),
|
||||
mod_time.unix_timestamp().to_be_bytes(),
|
||||
0,
|
||||
object_name.clone().into_bytes(),
|
||||
),
|
||||
LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::ExpiryNoncurrent,
|
||||
object_name,
|
||||
} => (
|
||||
ver_no,
|
||||
ver_id.clone().into_bytes(),
|
||||
mod_time.unix_timestamp().to_be_bytes(),
|
||||
1,
|
||||
object_name.clone().into_bytes(),
|
||||
),
|
||||
LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionCurrent,
|
||||
object_name,
|
||||
} => (
|
||||
ver_no,
|
||||
ver_id.clone().into_bytes(),
|
||||
mod_time.unix_timestamp().to_be_bytes(),
|
||||
2,
|
||||
object_name.clone().into_bytes(),
|
||||
),
|
||||
LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionNoncurrent,
|
||||
object_name,
|
||||
} => (
|
||||
ver_no,
|
||||
ver_id.clone().into_bytes(),
|
||||
mod_time.unix_timestamp().to_be_bytes(),
|
||||
3,
|
||||
object_name.clone().into_bytes(),
|
||||
),
|
||||
};
|
||||
|
||||
let mut output = Vec::<u8>::new();
|
||||
output.push(*ver_no_byte);
|
||||
output.extend_from_slice(&ver_id_bytes);
|
||||
output.extend_from_slice(&mod_timestamp_bytes);
|
||||
output.push(type_byte);
|
||||
output.extend_from_slice(&object_name_bytes);
|
||||
Ok(Cow::Owned(output))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BytesDecode<'a> for LifecycleContentCodec {
|
||||
type DItem = LifecycleContent;
|
||||
|
||||
fn bytes_decode(bytes: &'a [u8]) -> Result<Self::DItem, BoxedError> {
|
||||
use std::mem::size_of;
|
||||
|
||||
let ver_no = match bytes.get(..size_of::<u8>()) {
|
||||
Some(bytes) => bytes.try_into().map(u8::from_be_bytes).unwrap(),
|
||||
None => return Err("invalid LifecycleContent: cannot extract ver_no".into()),
|
||||
};
|
||||
|
||||
let ver_id = match bytes.get(size_of::<u8>()..(36 + 1)) {
|
||||
Some(bytes) => unsafe { std::str::from_utf8_unchecked(bytes).to_string() },
|
||||
None => return Err("invalid LifecycleContent: cannot extract ver_id".into()),
|
||||
};
|
||||
|
||||
let mod_timestamp = match bytes.get((36 + 1)..(size_of::<i64>() + 36 + 1)) {
|
||||
Some(bytes) => bytes.try_into().map(i64::from_be_bytes).unwrap(),
|
||||
None => return Err("invalid LifecycleContent: cannot extract mod_time timestamp".into()),
|
||||
};
|
||||
|
||||
let type_ = match bytes.get(size_of::<i64>() + 36 + 1) {
|
||||
Some(&0) => LifecycleType::ExpiryCurrent,
|
||||
Some(&1) => LifecycleType::ExpiryNoncurrent,
|
||||
Some(&2) => LifecycleType::TransitionCurrent,
|
||||
Some(&3) => LifecycleType::TransitionNoncurrent,
|
||||
Some(_) => return Err("invalid LifecycleContent: invalid LifecycleType".into()),
|
||||
None => return Err("invalid LifecycleContent: cannot extract LifecycleType".into()),
|
||||
};
|
||||
|
||||
let object_name = match bytes.get((size_of::<i64>() + 36 + 1 + 1)..) {
|
||||
Some(bytes) => unsafe { std::str::from_utf8_unchecked(bytes).to_string() },
|
||||
None => return Err("invalid LifecycleContent: cannot extract object_name".into()),
|
||||
};
|
||||
|
||||
Ok(LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time: OffsetDateTime::from_unix_timestamp(mod_timestamp).unwrap(),
|
||||
type_,
|
||||
object_name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
mod serial_tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
//#[ignore]
|
||||
async fn test_lifecycle_chche_build() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let suffix = uuid::Uuid::new_v4().simple().to_string();
|
||||
let bucket_name = format!("test-lc-cache-{}", &suffix[..8]);
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
assert!(object_exists(&ecstore, bucket_name.as_str(), object_name).await);
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
let scan_outcome = match local_scan::scan_and_persist_local_usage(ecstore.clone()).await {
|
||||
Ok(outcome) => outcome,
|
||||
Err(err) => {
|
||||
warn!("Local usage scan failed: {}", err);
|
||||
LocalScanOutcome::default()
|
||||
}
|
||||
};
|
||||
let bucket_objects_map = &scan_outcome.bucket_objects;
|
||||
|
||||
let records = match bucket_objects_map.get(&bucket_name) {
|
||||
Some(records) => records,
|
||||
None => {
|
||||
debug!("No local snapshot entries found for bucket {}; skipping lifecycle/integrity", bucket_name);
|
||||
&vec![]
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(lmdb_env) = GLOBAL_LMDB_ENV.get() {
|
||||
if let Some(lmdb) = GLOBAL_LMDB_DB.get() {
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
|
||||
/*if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(object_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config,
|
||||
None,
|
||||
None,
|
||||
&object_info,
|
||||
)
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&object_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
}
|
||||
}*/
|
||||
|
||||
for record in records {
|
||||
if !record.usage.has_live_object {
|
||||
continue;
|
||||
}
|
||||
|
||||
let object_info = convert_record_to_object_info(record);
|
||||
println!("object_info2: {:?}", object_info);
|
||||
let mod_time = object_info.mod_time.unwrap_or(OffsetDateTime::now_utc());
|
||||
let expiry_time = rustfs_ecstore::bucket::lifecycle::lifecycle::expected_expiry_time(mod_time, 1);
|
||||
|
||||
let version_id = if let Some(version_id) = object_info.version_id {
|
||||
version_id.to_string()
|
||||
} else {
|
||||
"zzzzzzzz-zzzz-zzzz-zzzz-zzzzzzzzzzzz".to_string()
|
||||
};
|
||||
|
||||
lmdb.put(
|
||||
&mut wtxn,
|
||||
&expiry_time.unix_timestamp(),
|
||||
&LifecycleContent {
|
||||
ver_no: 0,
|
||||
ver_id: version_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionNoncurrent,
|
||||
object_name: object_info.name,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
let iter = lmdb.iter_mut(&mut wtxn).unwrap();
|
||||
//let _ = unsafe { iter.del_current().unwrap() };
|
||||
for row in iter {
|
||||
if let Ok(ref elm) = row {
|
||||
let LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_,
|
||||
object_name,
|
||||
} = &elm.1;
|
||||
println!("cache row:{} {} {} {:?} {}", ver_no, ver_id, mod_time, type_, object_name);
|
||||
}
|
||||
println!("row:{:?}", row);
|
||||
}
|
||||
//drop(iter);
|
||||
wtxn.commit().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
println!("Lifecycle cache test completed");
|
||||
}
|
||||
}
|
||||
@@ -18,9 +18,9 @@ use rustfs_ecstore::{
|
||||
bucket::metadata_sys,
|
||||
disk::endpoint::Endpoint,
|
||||
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
|
||||
global::GLOBAL_TierConfigMgr,
|
||||
store::ECStore,
|
||||
store_api::{MakeBucketOptions, ObjectIO, ObjectOptions, PutObjReader, StorageAPI},
|
||||
tier::tier::TierConfigMgr,
|
||||
tier::tier_config::{TierConfig, TierMinIO, TierType},
|
||||
};
|
||||
use serial_test::serial;
|
||||
@@ -28,14 +28,11 @@ use std::sync::Once;
|
||||
use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::Arc, time::Duration};
|
||||
use tokio::fs;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use tracing::{debug, info};
|
||||
use tracing::info;
|
||||
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
static GLOBAL_TIER_CONFIG_MGR: OnceLock<Arc<RwLock<TierConfigMgr>>> = OnceLock::new();
|
||||
|
||||
fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
@@ -121,13 +118,11 @@ async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
|
||||
// Store in global once lock
|
||||
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone()));
|
||||
|
||||
let _ = GLOBAL_TIER_CONFIG_MGR.set(TierConfigMgr::new());
|
||||
|
||||
(disk_paths, ecstore)
|
||||
}
|
||||
|
||||
/// Test helper: Create a test bucket
|
||||
async fn _create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(bucket_name, &Default::default())
|
||||
.await
|
||||
@@ -220,7 +215,7 @@ async fn set_bucket_lifecycle_transition(bucket_name: &str) -> Result<(), Box<dy
|
||||
</Filter>
|
||||
<Transition>
|
||||
<Days>0</Days>
|
||||
<StorageClass>COLDTIER</StorageClass>
|
||||
<StorageClass>COLDTIER44</StorageClass>
|
||||
</Transition>
|
||||
</Rule>
|
||||
<Rule>
|
||||
@@ -231,7 +226,7 @@ async fn set_bucket_lifecycle_transition(bucket_name: &str) -> Result<(), Box<dy
|
||||
</Filter>
|
||||
<NoncurrentVersionTransition>
|
||||
<NoncurrentDays>0</NoncurrentDays>
|
||||
<StorageClass>COLDTIER</StorageClass>
|
||||
<StorageClass>COLDTIER44</StorageClass>
|
||||
</NoncurrentVersionTransition>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>"#;
|
||||
@@ -243,33 +238,51 @@ async fn set_bucket_lifecycle_transition(bucket_name: &str) -> Result<(), Box<dy
|
||||
|
||||
/// Test helper: Create a test tier
|
||||
#[allow(dead_code)]
|
||||
async fn create_test_tier() {
|
||||
async fn create_test_tier(server: u32) {
|
||||
let args = TierConfig {
|
||||
version: "v1".to_string(),
|
||||
tier_type: TierType::MinIO,
|
||||
name: "COLDTIER".to_string(),
|
||||
name: "COLDTIER44".to_string(),
|
||||
s3: None,
|
||||
aliyun: None,
|
||||
tencent: None,
|
||||
huaweicloud: None,
|
||||
azure: None,
|
||||
gcs: None,
|
||||
r2: None,
|
||||
rustfs: None,
|
||||
minio: Some(TierMinIO {
|
||||
access_key: "minioadmin".to_string(),
|
||||
secret_key: "minioadmin".to_string(),
|
||||
bucket: "mblock2".to_string(),
|
||||
endpoint: "http://127.0.0.1:9020".to_string(),
|
||||
prefix: "mypre3/".to_string(),
|
||||
region: "".to_string(),
|
||||
..Default::default()
|
||||
}),
|
||||
minio: if server == 1 {
|
||||
Some(TierMinIO {
|
||||
access_key: "minioadmin".to_string(),
|
||||
secret_key: "minioadmin".to_string(),
|
||||
bucket: "hello".to_string(),
|
||||
endpoint: "http://39.105.198.204:9000".to_string(),
|
||||
prefix: format!("mypre{}/", uuid::Uuid::new_v4()),
|
||||
region: "".to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
} else {
|
||||
Some(TierMinIO {
|
||||
access_key: "minioadmin".to_string(),
|
||||
secret_key: "minioadmin".to_string(),
|
||||
bucket: "mblock2".to_string(),
|
||||
endpoint: "http://127.0.0.1:9020".to_string(),
|
||||
prefix: format!("mypre{}/", uuid::Uuid::new_v4()),
|
||||
region: "".to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
},
|
||||
};
|
||||
let mut tier_config_mgr = GLOBAL_TIER_CONFIG_MGR.get().unwrap().write().await;
|
||||
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
|
||||
if let Err(err) = tier_config_mgr.add(args, false).await {
|
||||
warn!("tier_config_mgr add failed, e: {:?}", err);
|
||||
println!("tier_config_mgr add failed, e: {:?}", err);
|
||||
panic!("tier add failed. {err}");
|
||||
}
|
||||
if let Err(e) = tier_config_mgr.save().await {
|
||||
warn!("tier_config_mgr save failed, e: {:?}", e);
|
||||
println!("tier_config_mgr save failed, e: {:?}", e);
|
||||
panic!("tier save failed");
|
||||
}
|
||||
info!("Created test tier: {}", "COLDTIER");
|
||||
println!("Created test tier: COLDTIER44");
|
||||
}
|
||||
|
||||
/// Test helper: Check if object exists
|
||||
@@ -284,9 +297,10 @@ async fn object_exists(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bo
|
||||
#[allow(dead_code)]
|
||||
async fn object_is_delete_marker(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
if let Ok(oi) = (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
|
||||
debug!("oi: {:?}", oi);
|
||||
println!("oi: {:?}", oi);
|
||||
oi.delete_marker
|
||||
} else {
|
||||
println!("object_is_delete_marker is error");
|
||||
panic!("object_is_delete_marker is error");
|
||||
}
|
||||
}
|
||||
@@ -295,9 +309,10 @@ async fn object_is_delete_marker(ecstore: &Arc<ECStore>, bucket: &str, object: &
|
||||
#[allow(dead_code)]
|
||||
async fn object_is_transitioned(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
if let Ok(oi) = (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
|
||||
info!("oi: {:?}", oi);
|
||||
println!("oi: {:?}", oi);
|
||||
!oi.transitioned_object.status.is_empty()
|
||||
} else {
|
||||
println!("object_is_transitioned is error");
|
||||
panic!("object_is_transitioned is error");
|
||||
}
|
||||
}
|
||||
@@ -455,8 +470,9 @@ mod serial_tests {
|
||||
println!("Lifecycle expiry basic test completed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
|
||||
#[serial]
|
||||
//#[ignore]
|
||||
async fn test_lifecycle_expiry_deletemarker() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
@@ -578,12 +594,13 @@ mod serial_tests {
|
||||
println!("Lifecycle expiry basic test completed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
|
||||
#[serial]
|
||||
#[ignore]
|
||||
async fn test_lifecycle_transition_basic() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
//create_test_tier().await;
|
||||
create_test_tier(1).await;
|
||||
|
||||
// Create test bucket and object
|
||||
let suffix = uuid::Uuid::new_v4().simple().to_string();
|
||||
@@ -591,7 +608,8 @@ mod serial_tests {
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
//create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
create_test_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
@@ -599,13 +617,13 @@ mod serial_tests {
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
|
||||
/*set_bucket_lifecycle_transition(bucket_name)
|
||||
set_bucket_lifecycle_transition(bucket_name.as_str())
|
||||
.await
|
||||
.expect("Failed to set lifecycle configuration");
|
||||
println!("✅ Lifecycle configuration set for bucket: {bucket_name}");
|
||||
|
||||
// Verify lifecycle configuration was set
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name).await {
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name.as_str()).await {
|
||||
Ok(bucket_meta) => {
|
||||
assert!(bucket_meta.lifecycle_config.is_some());
|
||||
println!("✅ Bucket metadata retrieved successfully");
|
||||
@@ -613,7 +631,7 @@ mod serial_tests {
|
||||
Err(e) => {
|
||||
println!("❌ Error retrieving bucket metadata: {e:?}");
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
// Create scanner with very short intervals for testing
|
||||
let scanner_config = ScannerConfig {
|
||||
@@ -640,12 +658,11 @@ mod serial_tests {
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Check if object has been expired (deleted)
|
||||
//let check_result = object_is_transitioned(&ecstore, bucket_name, object_name).await;
|
||||
let check_result = object_exists(&ecstore, bucket_name.as_str(), object_name).await;
|
||||
let check_result = object_is_transitioned(&ecstore, &bucket_name, object_name).await;
|
||||
println!("Object exists after lifecycle processing: {check_result}");
|
||||
|
||||
if check_result {
|
||||
println!("✅ Object was not deleted by lifecycle processing");
|
||||
println!("✅ Object was transitioned by lifecycle processing");
|
||||
// Let's try to get object info to see its details
|
||||
match ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
@@ -663,7 +680,7 @@ mod serial_tests {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("❌ Object was deleted by lifecycle processing");
|
||||
println!("❌ Object was not transitioned by lifecycle processing");
|
||||
}
|
||||
|
||||
assert!(check_result);
|
||||
|
||||
@@ -102,6 +102,10 @@ aws-smithy-types = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
moka = { workspace = true }
|
||||
base64-simd.workspace = true
|
||||
serde_urlencoded.workspace = true
|
||||
google-cloud-storage = "1.1.0"
|
||||
google-cloud-auth = "1.0.1"
|
||||
aws-config = { workspace = true }
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
nix = { workspace = true }
|
||||
@@ -114,6 +118,7 @@ winapi = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
|
||||
criterion = { workspace = true, features = ["html_reports"] }
|
||||
temp-env = { workspace = true }
|
||||
heed = "0.22.0"
|
||||
|
||||
[build-dependencies]
|
||||
shadow-rs = { workspace = true, features = ["build", "metadata"] }
|
||||
|
||||
@@ -18,14 +18,18 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use crate::error::StorageError;
|
||||
use async_channel::{Receiver as A_Receiver, Sender as A_Sender, bounded};
|
||||
use bytes::BytesMut;
|
||||
use futures::Future;
|
||||
use http::HeaderMap;
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_common::data_usage::TierStats;
|
||||
use rustfs_common::heal_channel::rep_has_active_rules;
|
||||
use rustfs_common::metrics::{IlmAction, Metrics};
|
||||
use rustfs_filemeta::fileinfo::{NULL_VERSION_ID, RestoreStatusOps, is_restored_object_on_disk};
|
||||
use rustfs_utils::path::encode_dir_object;
|
||||
use rustfs_utils::string::strings_has_prefix_fold;
|
||||
use s3s::Body;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::any::Any;
|
||||
@@ -62,7 +66,11 @@ use crate::store::ECStore;
|
||||
use crate::store_api::StorageAPI;
|
||||
use crate::store_api::{GetObjectReader, HTTPRangeSpec, ObjectInfo, ObjectOptions, ObjectToDelete};
|
||||
use crate::tier::warm_backend::WarmBackendGetOpts;
|
||||
use s3s::dto::{BucketLifecycleConfiguration, DefaultRetention, ReplicationConfiguration};
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, DefaultRetention, ReplicationConfiguration, RestoreRequest, RestoreRequestType, RestoreStatus,
|
||||
ServerSideEncryption, Timestamp,
|
||||
};
|
||||
use s3s::header::{X_AMZ_RESTORE, X_AMZ_SERVER_SIDE_ENCRYPTION, X_AMZ_STORAGE_CLASS};
|
||||
|
||||
pub type TimeFn = Arc<dyn Fn() -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync + 'static>;
|
||||
pub type TraceFn =
|
||||
@@ -71,9 +79,12 @@ pub type ExpiryOpType = Box<dyn ExpiryOp + Send + Sync + 'static>;
|
||||
|
||||
static XXHASH_SEED: u64 = 0;
|
||||
|
||||
const _DISABLED: &str = "Disabled";
|
||||
pub const AMZ_OBJECT_TAGGING: &str = "X-Amz-Tagging";
|
||||
pub const AMZ_TAG_COUNT: &str = "x-amz-tagging-count";
|
||||
pub const AMZ_TAG_DIRECTIVE: &str = "X-Amz-Tagging-Directive";
|
||||
pub const AMZ_ENCRYPTION_AES: &str = "AES256";
|
||||
pub const AMZ_ENCRYPTION_KMS: &str = "aws:kms";
|
||||
|
||||
//pub const ERR_INVALID_STORAGECLASS: &str = "invalid storage class.";
|
||||
pub const ERR_INVALID_STORAGECLASS: &str = "invalid tier.";
|
||||
|
||||
lazy_static! {
|
||||
@@ -762,11 +773,14 @@ pub fn gen_transition_objname(bucket: &str) -> Result<String, Error> {
|
||||
pub async fn transition_object(api: Arc<ECStore>, oi: &ObjectInfo, lae: LcAuditEvent) -> Result<(), Error> {
|
||||
let time_ilm = Metrics::time_ilm(lae.event.action);
|
||||
|
||||
let etag = if let Some(etag) = &oi.etag { etag } else { "" };
|
||||
let etag = etag.to_string();
|
||||
|
||||
let opts = ObjectOptions {
|
||||
transition: TransitionOptions {
|
||||
status: lifecycle::TRANSITION_PENDING.to_string(),
|
||||
tier: lae.event.storage_class,
|
||||
etag: oi.etag.clone().expect("err").to_string(),
|
||||
etag,
|
||||
..Default::default()
|
||||
},
|
||||
//lifecycle_audit_event: lae,
|
||||
@@ -787,9 +801,9 @@ pub fn audit_tier_actions(_api: ECStore, _tier: &str, _bytes: i64) -> TimeFn {
|
||||
pub async fn get_transitioned_object_reader(
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
rs: HTTPRangeSpec,
|
||||
h: HeaderMap,
|
||||
oi: ObjectInfo,
|
||||
rs: &Option<HTTPRangeSpec>,
|
||||
h: &HeaderMap,
|
||||
oi: &ObjectInfo,
|
||||
opts: &ObjectOptions,
|
||||
) -> Result<GetObjectReader, std::io::Error> {
|
||||
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
|
||||
@@ -815,19 +829,131 @@ pub async fn get_transitioned_object_reader(
|
||||
let reader = tgt_client
|
||||
.get(&oi.transitioned_object.name, &oi.transitioned_object.version_id, gopts)
|
||||
.await?;
|
||||
Ok(get_fn(reader, h))
|
||||
Ok(get_fn(reader, h.clone()))
|
||||
}
|
||||
|
||||
pub fn post_restore_opts(_r: http::Request<Body>, _bucket: &str, _object: &str) -> Result<ObjectOptions, std::io::Error> {
|
||||
todo!();
|
||||
pub async fn post_restore_opts(version_id: &str, bucket: &str, object: &str) -> Result<ObjectOptions, std::io::Error> {
|
||||
let versioned = BucketVersioningSys::prefix_enabled(bucket, object).await;
|
||||
let version_suspended = BucketVersioningSys::prefix_suspended(bucket, object).await;
|
||||
let vid = version_id.trim();
|
||||
if vid != "" && vid != NULL_VERSION_ID {
|
||||
if let Err(err) = Uuid::parse_str(vid) {
|
||||
return Err(std::io::Error::other(
|
||||
StorageError::InvalidVersionID(bucket.to_string(), object.to_string(), vid.to_string()).to_string(),
|
||||
));
|
||||
}
|
||||
if !versioned && !version_suspended {
|
||||
return Err(std::io::Error::other(
|
||||
StorageError::InvalidArgument(
|
||||
bucket.to_string(),
|
||||
object.to_string(),
|
||||
format!("version-id specified {} but versioning is not enabled on {}", vid, bucket),
|
||||
)
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(ObjectOptions {
|
||||
versioned: versioned,
|
||||
version_suspended: version_suspended,
|
||||
version_id: Some(vid.to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn put_restore_opts(_bucket: &str, _object: &str, _rreq: &RestoreObjectRequest, _oi: &ObjectInfo) -> ObjectOptions {
|
||||
todo!();
|
||||
pub async fn put_restore_opts(
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
rreq: &RestoreRequest,
|
||||
oi: &ObjectInfo,
|
||||
) -> Result<ObjectOptions, std::io::Error> {
|
||||
let mut meta = HashMap::<String, String>::new();
|
||||
/*let mut b = false;
|
||||
let Some(Some(Some(mut sc))) = rreq.output_location.s3.storage_class else { b = true; };
|
||||
if b || sc == "" {
|
||||
//sc = oi.storage_class;
|
||||
sc = oi.transitioned_object.tier;
|
||||
}
|
||||
meta.insert(X_AMZ_STORAGE_CLASS.as_str().to_lowercase(), sc);*/
|
||||
|
||||
if let Some(type_) = &rreq.type_
|
||||
&& type_.as_str() == RestoreRequestType::SELECT
|
||||
{
|
||||
for v in rreq
|
||||
.output_location
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.s3
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.user_metadata
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
{
|
||||
if !strings_has_prefix_fold(&v.name.clone().unwrap(), "x-amz-meta") {
|
||||
meta.insert(
|
||||
format!("x-amz-meta-{}", v.name.as_ref().unwrap()),
|
||||
v.value.clone().unwrap_or("".to_string()),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
meta.insert(v.name.clone().unwrap(), v.value.clone().unwrap_or("".to_string()));
|
||||
}
|
||||
if let Some(output_location) = rreq.output_location.as_ref() {
|
||||
if let Some(s3) = &output_location.s3 {
|
||||
if let Some(tags) = &s3.tagging {
|
||||
meta.insert(
|
||||
AMZ_OBJECT_TAGGING.to_string(),
|
||||
serde_urlencoded::to_string(tags.tag_set.clone()).unwrap_or("".to_string()),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(output_location) = rreq.output_location.as_ref() {
|
||||
if let Some(s3) = &output_location.s3 {
|
||||
if let Some(encryption) = &s3.encryption {
|
||||
if encryption.encryption_type.as_str() != "" {
|
||||
meta.insert(X_AMZ_SERVER_SIDE_ENCRYPTION.as_str().to_string(), AMZ_ENCRYPTION_AES.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return Ok(ObjectOptions {
|
||||
versioned: BucketVersioningSys::prefix_enabled(bucket, object).await,
|
||||
version_suspended: BucketVersioningSys::prefix_suspended(bucket, object).await,
|
||||
user_defined: meta,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
for (k, v) in &oi.user_defined {
|
||||
meta.insert(k.to_string(), v.clone());
|
||||
}
|
||||
if oi.user_tags.len() != 0 {
|
||||
meta.insert(AMZ_OBJECT_TAGGING.to_string(), oi.user_tags.clone());
|
||||
}
|
||||
let restore_expiry = lifecycle::expected_expiry_time(OffsetDateTime::now_utc(), rreq.days.unwrap_or(1));
|
||||
meta.insert(
|
||||
X_AMZ_RESTORE.as_str().to_string(),
|
||||
RestoreStatus {
|
||||
is_restore_in_progress: Some(false),
|
||||
restore_expiry_date: Some(Timestamp::from(restore_expiry)),
|
||||
}
|
||||
.to_string(),
|
||||
);
|
||||
Ok(ObjectOptions {
|
||||
versioned: BucketVersioningSys::prefix_enabled(bucket, object).await,
|
||||
version_suspended: BucketVersioningSys::prefix_suspended(bucket, object).await,
|
||||
user_defined: meta,
|
||||
version_id: oi.version_id.map(|e| e.to_string()),
|
||||
mod_time: oi.mod_time,
|
||||
//expires: oi.expires,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
pub trait LifecycleOps {
|
||||
fn to_lifecycle_opts(&self) -> lifecycle::ObjectOpts;
|
||||
fn is_remote(&self) -> bool;
|
||||
}
|
||||
|
||||
impl LifecycleOps for ObjectInfo {
|
||||
@@ -848,29 +974,54 @@ impl LifecycleOps for ObjectInfo {
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn is_remote(&self) -> bool {
|
||||
if self.transitioned_object.status != lifecycle::TRANSITION_COMPLETE {
|
||||
return false;
|
||||
}
|
||||
!is_restored_object_on_disk(&self.user_defined)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct S3Location {
|
||||
pub bucketname: String,
|
||||
//pub encryption: Encryption,
|
||||
pub prefix: String,
|
||||
pub storage_class: String,
|
||||
//pub tagging: Tags,
|
||||
pub user_metadata: HashMap<String, String>,
|
||||
pub trait RestoreRequestOps {
|
||||
fn validate(&self, api: Arc<ECStore>) -> Result<(), std::io::Error>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct OutputLocation(pub S3Location);
|
||||
impl RestoreRequestOps for RestoreRequest {
|
||||
fn validate(&self, api: Arc<ECStore>) -> Result<(), std::io::Error> {
|
||||
/*if self.type_.is_none() && self.select_parameters.is_some() {
|
||||
return Err(std::io::Error::other("Select parameters can only be specified with SELECT request type"));
|
||||
}
|
||||
if let Some(type_) = self.type_ && type_ == RestoreRequestType::SELECT && self.select_parameters.is_none() {
|
||||
return Err(std::io::Error::other("SELECT restore request requires select parameters to be specified"));
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct RestoreObjectRequest {
|
||||
pub days: i64,
|
||||
pub ror_type: String,
|
||||
pub tier: String,
|
||||
pub description: String,
|
||||
//pub select_parameters: SelectParameters,
|
||||
pub output_location: OutputLocation,
|
||||
if self.type_.is_none() && self.output_location.is_some() {
|
||||
return Err(std::io::Error::other("OutputLocation required only for SELECT request type"));
|
||||
}
|
||||
if let Some(type_) = self.type_ && type_ == RestoreRequestType::SELECT && self.output_location.is_none() {
|
||||
return Err(std::io::Error::other("OutputLocation required for SELECT requests"));
|
||||
}
|
||||
|
||||
if let Some(type_) = self.type_ && type_ == RestoreRequestType::SELECT && self.days != 0 {
|
||||
return Err(std::io::Error::other("Days cannot be specified with SELECT restore request"));
|
||||
}
|
||||
if self.days == 0 && self.type_.is_none() {
|
||||
return Err(std::io::Error::other("restoration days should be at least 1"));
|
||||
}
|
||||
if self.output_location.is_some() {
|
||||
if _, err := api.get_bucket_info(self.output_location.s3.bucket_name, BucketOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if self.output_location.s3.prefix == "" {
|
||||
return Err(std::io::Error::other("Prefix is a required parameter in OutputLocation"));
|
||||
}
|
||||
if self.output_location.s3.encryption.encryption_type.as_str() != ServerSideEncryption::AES256 {
|
||||
return NotImplemented{}
|
||||
}
|
||||
}*/
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
const _MAX_RESTORE_OBJECT_REQUEST_SIZE: i64 = 2 << 20;
|
||||
@@ -975,11 +1126,10 @@ pub async fn apply_expiry_on_non_transitioned_objects(
|
||||
//debug!("lc_event.action: {:?}", lc_event.action);
|
||||
//debug!("opts: {:?}", opts);
|
||||
let mut dobj = match api.delete_object(&oi.bucket, &encode_dir_object(&oi.name), opts).await {
|
||||
Ok(obj) => obj,
|
||||
Ok(dobj) => dobj,
|
||||
Err(e) => {
|
||||
error!("Failed to delete object {}/{}: {:?}", oi.bucket, oi.name, e);
|
||||
// Return the original object info if deletion fails
|
||||
oi.clone()
|
||||
error!("delete_object error: {:?}", e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
//debug!("dobj: {:?}", dobj);
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, ExpirationStatus, LifecycleExpiration, LifecycleRule, NoncurrentVersionTransition,
|
||||
ObjectLockConfiguration, ObjectLockEnabled, Transition,
|
||||
ObjectLockConfiguration, ObjectLockEnabled, RestoreRequest, Transition,
|
||||
};
|
||||
use std::cmp::Ordering;
|
||||
use std::env;
|
||||
@@ -32,8 +32,6 @@ use tracing::info;
|
||||
|
||||
use crate::bucket::lifecycle::rule::TransitionOps;
|
||||
|
||||
use super::bucket_lifecycle_ops::RestoreObjectRequest;
|
||||
|
||||
pub const TRANSITION_COMPLETE: &str = "complete";
|
||||
pub const TRANSITION_PENDING: &str = "pending";
|
||||
|
||||
@@ -325,7 +323,7 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
}
|
||||
|
||||
if let Some(days) = expiration.days {
|
||||
let expected_expiry = expected_expiry_time(obj.mod_time.expect("err!"), days /*, date*/);
|
||||
let expected_expiry = expected_expiry_time(obj.mod_time.unwrap(), days /*, date*/);
|
||||
if now.unix_timestamp() >= expected_expiry.unix_timestamp() {
|
||||
events.push(Event {
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
@@ -402,19 +400,21 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
if storage_class.as_str() != "" && !obj.delete_marker && obj.transition_status != TRANSITION_COMPLETE
|
||||
{
|
||||
let due = rule.noncurrent_version_transitions.as_ref().unwrap()[0].next_due(obj);
|
||||
if due.is_some() && (now.unix_timestamp() >= due.unwrap().unix_timestamp()) {
|
||||
events.push(Event {
|
||||
action: IlmAction::TransitionVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due,
|
||||
storage_class: rule.noncurrent_version_transitions.as_ref().unwrap()[0]
|
||||
.storage_class
|
||||
.clone()
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
if let Some(due0) = due {
|
||||
if now.unix_timestamp() == 0 || now.unix_timestamp() > due0.unix_timestamp() {
|
||||
events.push(Event {
|
||||
action: IlmAction::TransitionVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due,
|
||||
storage_class: rule.noncurrent_version_transitions.as_ref().unwrap()[0]
|
||||
.storage_class
|
||||
.clone()
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -446,7 +446,7 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
});
|
||||
}
|
||||
} else if let Some(days) = expiration.days {
|
||||
let expected_expiry: OffsetDateTime = expected_expiry_time(obj.mod_time.expect("err!"), days);
|
||||
let expected_expiry: OffsetDateTime = expected_expiry_time(obj.mod_time.unwrap(), days);
|
||||
info!(
|
||||
"eval_inner: expiration check - days={}, obj_time={:?}, expiry_time={:?}, now={:?}, should_expire={}",
|
||||
days,
|
||||
@@ -480,12 +480,12 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
if obj.transition_status != TRANSITION_COMPLETE {
|
||||
if let Some(ref transitions) = rule.transitions {
|
||||
let due = transitions[0].next_due(obj);
|
||||
if let Some(due) = due {
|
||||
if due.unix_timestamp() > 0 && (now.unix_timestamp() >= due.unix_timestamp()) {
|
||||
if let Some(due0) = due {
|
||||
if now.unix_timestamp() == 0 || now.unix_timestamp() > due0.unix_timestamp() {
|
||||
events.push(Event {
|
||||
action: IlmAction::TransitionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(due),
|
||||
due,
|
||||
storage_class: transitions[0].storage_class.clone().expect("err!").as_str().to_string(),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
@@ -580,8 +580,10 @@ impl LifecycleCalculate for LifecycleExpiration {
|
||||
if !obj.is_latest || !obj.delete_marker {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(expected_expiry_time(obj.mod_time.unwrap(), self.days.unwrap()))
|
||||
match self.days {
|
||||
Some(days) => Some(expected_expiry_time(obj.mod_time.unwrap(), days)),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -591,10 +593,16 @@ impl LifecycleCalculate for NoncurrentVersionTransition {
|
||||
if obj.is_latest || self.storage_class.is_none() {
|
||||
return None;
|
||||
}
|
||||
if self.noncurrent_days.is_none() {
|
||||
return obj.successor_mod_time;
|
||||
match self.noncurrent_days {
|
||||
Some(noncurrent_days) => {
|
||||
if let Some(successor_mod_time) = obj.successor_mod_time {
|
||||
Some(expected_expiry_time(successor_mod_time, noncurrent_days))
|
||||
} else {
|
||||
Some(expected_expiry_time(OffsetDateTime::now_utc(), noncurrent_days))
|
||||
}
|
||||
}
|
||||
None => obj.successor_mod_time,
|
||||
}
|
||||
Some(expected_expiry_time(obj.successor_mod_time.unwrap(), self.noncurrent_days.unwrap()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -609,10 +617,10 @@ impl LifecycleCalculate for Transition {
|
||||
return Some(date.into());
|
||||
}
|
||||
|
||||
if self.days.is_none() {
|
||||
return obj.mod_time;
|
||||
match self.days {
|
||||
Some(days) => Some(expected_expiry_time(obj.mod_time.unwrap(), days)),
|
||||
None => obj.mod_time,
|
||||
}
|
||||
Some(expected_expiry_time(obj.mod_time.unwrap(), self.days.unwrap()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -692,7 +700,7 @@ pub struct TransitionOptions {
|
||||
pub status: String,
|
||||
pub tier: String,
|
||||
pub etag: String,
|
||||
pub restore_request: RestoreObjectRequest,
|
||||
pub restore_request: RestoreRequest,
|
||||
pub restore_expiry: OffsetDateTime,
|
||||
pub expire_restored: bool,
|
||||
}
|
||||
|
||||
@@ -18,28 +18,23 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use bytes::Bytes;
|
||||
use http::HeaderMap;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Cursor;
|
||||
use tokio::io::BufReader;
|
||||
|
||||
use crate::client::{
|
||||
api_error_response::{err_invalid_argument, http_resp_to_error_response},
|
||||
api_get_object_acl::AccessControlList,
|
||||
api_get_options::GetObjectOptions,
|
||||
transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info},
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use http::HeaderMap;
|
||||
use s3s::dto::RestoreRequest;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Cursor;
|
||||
use tokio::io::BufReader;
|
||||
|
||||
const TIER_STANDARD: &str = "Standard";
|
||||
const TIER_BULK: &str = "Bulk";
|
||||
const TIER_EXPEDITED: &str = "Expedited";
|
||||
|
||||
#[derive(Debug, Default, serde::Serialize)]
|
||||
pub struct GlacierJobParameters {
|
||||
pub tier: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, serde::Serialize, serde::Deserialize)]
|
||||
pub struct Encryption {
|
||||
pub encryption_type: String,
|
||||
@@ -65,58 +60,6 @@ pub struct S3 {
|
||||
pub user_metadata: MetadataEntry,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, serde::Serialize)]
|
||||
pub struct SelectParameters {
|
||||
pub expression_type: String,
|
||||
pub expression: String,
|
||||
//input_serialization: SelectObjectInputSerialization,
|
||||
//output_serialization: SelectObjectOutputSerialization,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, serde::Serialize)]
|
||||
pub struct OutputLocation(pub S3);
|
||||
|
||||
#[derive(Debug, Default, serde::Serialize)]
|
||||
pub struct RestoreRequest {
|
||||
pub restore_type: String,
|
||||
pub tier: String,
|
||||
pub days: i64,
|
||||
pub glacier_job_parameters: GlacierJobParameters,
|
||||
pub description: String,
|
||||
pub select_parameters: SelectParameters,
|
||||
pub output_location: OutputLocation,
|
||||
}
|
||||
|
||||
impl RestoreRequest {
|
||||
fn set_days(&mut self, v: i64) {
|
||||
self.days = v;
|
||||
}
|
||||
|
||||
fn set_glacier_job_parameters(&mut self, v: GlacierJobParameters) {
|
||||
self.glacier_job_parameters = v;
|
||||
}
|
||||
|
||||
fn set_type(&mut self, v: &str) {
|
||||
self.restore_type = v.to_string();
|
||||
}
|
||||
|
||||
fn set_tier(&mut self, v: &str) {
|
||||
self.tier = v.to_string();
|
||||
}
|
||||
|
||||
fn set_description(&mut self, v: &str) {
|
||||
self.description = v.to_string();
|
||||
}
|
||||
|
||||
fn set_select_parameters(&mut self, v: SelectParameters) {
|
||||
self.select_parameters = v;
|
||||
}
|
||||
|
||||
fn set_output_location(&mut self, v: OutputLocation) {
|
||||
self.output_location = v;
|
||||
}
|
||||
}
|
||||
|
||||
impl TransitionClient {
|
||||
pub async fn restore_object(
|
||||
&self,
|
||||
@@ -125,12 +68,13 @@ impl TransitionClient {
|
||||
version_id: &str,
|
||||
restore_req: &RestoreRequest,
|
||||
) -> Result<(), std::io::Error> {
|
||||
let restore_request = match quick_xml::se::to_string(restore_req) {
|
||||
/*let restore_request = match quick_xml::se::to_string(restore_req) {
|
||||
Ok(buf) => buf,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e));
|
||||
}
|
||||
};
|
||||
};*/
|
||||
let restore_request = "".to_string();
|
||||
let restore_request_bytes = restore_request.as_bytes().to_vec();
|
||||
|
||||
let mut url_values = HashMap::new();
|
||||
|
||||
@@ -27,7 +27,7 @@ use tracing::{debug, error, info};
|
||||
|
||||
use crate::client::{
|
||||
api_error_response::{http_resp_to_error_response, to_error_response},
|
||||
transition_api::{Document, TransitionClient},
|
||||
transition_api::{CreateBucketConfiguration, LocationConstraint, TransitionClient},
|
||||
};
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
use s3s::Body;
|
||||
@@ -82,7 +82,7 @@ impl TransitionClient {
|
||||
let req = self.get_bucket_location_request(bucket_name)?;
|
||||
|
||||
let mut resp = self.doit(req).await?;
|
||||
location = process_bucket_location_response(resp, bucket_name).await?;
|
||||
location = process_bucket_location_response(resp, bucket_name, &self.tier_type).await?;
|
||||
{
|
||||
let mut bucket_loc_cache = self.bucket_loc_cache.lock().unwrap();
|
||||
bucket_loc_cache.set(bucket_name, &location);
|
||||
@@ -175,7 +175,11 @@ impl TransitionClient {
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_bucket_location_response(mut resp: http::Response<Body>, bucket_name: &str) -> Result<String, std::io::Error> {
|
||||
async fn process_bucket_location_response(
|
||||
mut resp: http::Response<Body>,
|
||||
bucket_name: &str,
|
||||
tier_type: &str,
|
||||
) -> Result<String, std::io::Error> {
|
||||
//if resp != nil {
|
||||
if resp.status() != StatusCode::OK {
|
||||
let err_resp = http_resp_to_error_response(&resp, vec![], bucket_name, "");
|
||||
@@ -209,9 +213,17 @@ async fn process_bucket_location_response(mut resp: http::Response<Body>, bucket
|
||||
//}
|
||||
|
||||
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
|
||||
let Document(location_constraint) = quick_xml::de::from_str::<Document>(&String::from_utf8(b).unwrap()).unwrap();
|
||||
let mut location = "".to_string();
|
||||
if tier_type == "huaweicloud" {
|
||||
let d = quick_xml::de::from_str::<CreateBucketConfiguration>(&String::from_utf8(b).unwrap()).unwrap();
|
||||
location = d.location_constraint;
|
||||
} else {
|
||||
if let Ok(LocationConstraint { field }) = quick_xml::de::from_str::<LocationConstraint>(&String::from_utf8(b).unwrap()) {
|
||||
location = field;
|
||||
}
|
||||
}
|
||||
//debug!("location: {}", location);
|
||||
|
||||
let mut location = location_constraint;
|
||||
if location == "" {
|
||||
location = "us-east-1".to_string();
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
use http::HeaderMap;
|
||||
use s3s::dto::ETag;
|
||||
use std::pin::Pin;
|
||||
use std::{collections::HashMap, io::Cursor, sync::Arc};
|
||||
use tokio::io::BufReader;
|
||||
|
||||
@@ -54,7 +55,7 @@ impl PutObjReader {
|
||||
}
|
||||
}
|
||||
|
||||
pub type ObjReaderFn = Arc<dyn Fn(BufReader<Cursor<Vec<u8>>>, HeaderMap) -> GetObjectReader + 'static>;
|
||||
pub type ObjReaderFn<'a> = Arc<dyn Fn(BufReader<Cursor<Vec<u8>>>, HeaderMap) -> GetObjectReader + Send + Sync + 'a>;
|
||||
|
||||
fn part_number_to_rangespec(oi: ObjectInfo, part_number: usize) -> Option<HTTPRangeSpec> {
|
||||
if oi.size == 0 || oi.parts.len() == 0 {
|
||||
@@ -108,19 +109,24 @@ fn get_compressed_offsets(oi: ObjectInfo, offset: i64) -> (i64, i64, i64, i64, u
|
||||
(compressed_offset, part_skip, first_part_idx, decrypt_skip, seq_num)
|
||||
}
|
||||
|
||||
pub fn new_getobjectreader(
|
||||
rs: HTTPRangeSpec,
|
||||
oi: &ObjectInfo,
|
||||
pub fn new_getobjectreader<'a>(
|
||||
rs: &Option<HTTPRangeSpec>,
|
||||
oi: &'a ObjectInfo,
|
||||
opts: &ObjectOptions,
|
||||
h: &HeaderMap,
|
||||
) -> Result<(ObjReaderFn, i64, i64), ErrorResponse> {
|
||||
_h: &HeaderMap,
|
||||
) -> Result<(ObjReaderFn<'a>, i64, i64), ErrorResponse> {
|
||||
//let (_, mut is_encrypted) = crypto.is_encrypted(oi.user_defined)?;
|
||||
let mut is_encrypted = false;
|
||||
let is_compressed = false; //oi.is_compressed_ok();
|
||||
|
||||
let mut rs_ = None;
|
||||
if rs.is_none() && opts.part_number.is_some() && opts.part_number.unwrap() > 0 {
|
||||
rs_ = part_number_to_rangespec(oi.clone(), opts.part_number.unwrap());
|
||||
}
|
||||
|
||||
let mut get_fn: ObjReaderFn;
|
||||
|
||||
let (off, length) = match rs.get_offset_length(oi.size) {
|
||||
let (off, length) = match rs_.unwrap().get_offset_length(oi.size) {
|
||||
Ok(x) => x,
|
||||
Err(err) => {
|
||||
return Err(ErrorResponse {
|
||||
@@ -136,12 +142,11 @@ pub fn new_getobjectreader(
|
||||
};
|
||||
get_fn = Arc::new(move |input_reader: BufReader<Cursor<Vec<u8>>>, _: HeaderMap| {
|
||||
//Box::pin({
|
||||
/*let r = GetObjectReader {
|
||||
let r = GetObjectReader {
|
||||
object_info: oi.clone(),
|
||||
stream: StreamingBlob::new(HashReader::new(input_reader, 10, None, None, 10)),
|
||||
stream: Box::new(input_reader),
|
||||
};
|
||||
r*/
|
||||
todo!();
|
||||
r
|
||||
//})
|
||||
});
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ use std::{
|
||||
use time::Duration;
|
||||
use time::OffsetDateTime;
|
||||
use tokio::io::BufReader;
|
||||
use tracing::{debug, error};
|
||||
use tracing::{debug, error, warn};
|
||||
use url::{Url, form_urlencoded};
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -109,6 +109,7 @@ pub struct TransitionClient {
|
||||
pub health_status: AtomicI32,
|
||||
pub trailing_header_support: bool,
|
||||
pub max_retries: i64,
|
||||
pub tier_type: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
@@ -132,13 +133,13 @@ pub enum BucketLookupType {
|
||||
}
|
||||
|
||||
impl TransitionClient {
|
||||
pub async fn new(endpoint: &str, opts: Options) -> Result<TransitionClient, std::io::Error> {
|
||||
let clnt = Self::private_new(endpoint, opts).await?;
|
||||
pub async fn new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
|
||||
let clnt = Self::private_new(endpoint, opts, tier_type).await?;
|
||||
|
||||
Ok(clnt)
|
||||
}
|
||||
|
||||
async fn private_new(endpoint: &str, opts: Options) -> Result<TransitionClient, std::io::Error> {
|
||||
async fn private_new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
|
||||
let endpoint_url = get_endpoint_url(endpoint, opts.secure)?;
|
||||
|
||||
//#[cfg(feature = "ring")]
|
||||
@@ -175,6 +176,7 @@ impl TransitionClient {
|
||||
health_status: AtomicI32::new(C_UNKNOWN),
|
||||
trailing_header_support: opts.trailing_headers,
|
||||
max_retries: opts.max_retries,
|
||||
tier_type: tier_type.to_string(),
|
||||
};
|
||||
|
||||
{
|
||||
@@ -283,11 +285,14 @@ impl TransitionClient {
|
||||
let mut resp = resp.unwrap();
|
||||
debug!("http_resp: {:?}", resp);
|
||||
|
||||
//let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
|
||||
//debug!("http_resp_body: {}", String::from_utf8(b).unwrap());
|
||||
|
||||
//if self.is_trace_enabled && !(self.trace_errors_only && resp.status() == StatusCode::OK) {
|
||||
if resp.status() != StatusCode::OK {
|
||||
//self.dump_http(&cloned_req, &resp)?;
|
||||
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
|
||||
debug!("err_body: {}", String::from_utf8(b).unwrap());
|
||||
warn!("err_body: {}", String::from_utf8(b).unwrap());
|
||||
}
|
||||
|
||||
Ok(resp)
|
||||
@@ -330,7 +335,8 @@ impl TransitionClient {
|
||||
}
|
||||
|
||||
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
|
||||
let err_response = http_resp_to_error_response(&resp, b.clone(), &metadata.bucket_name, &metadata.object_name);
|
||||
let mut err_response = http_resp_to_error_response(&resp, b.clone(), &metadata.bucket_name, &metadata.object_name);
|
||||
err_response.message = format!("remote tier error: {}", err_response.message);
|
||||
|
||||
if self.region == "" {
|
||||
match err_response.code {
|
||||
@@ -380,9 +386,9 @@ impl TransitionClient {
|
||||
method: &http::Method,
|
||||
metadata: &mut RequestMetadata,
|
||||
) -> Result<http::Request<Body>, std::io::Error> {
|
||||
let location = metadata.bucket_location.clone();
|
||||
let mut location = metadata.bucket_location.clone();
|
||||
if location == "" && metadata.bucket_name != "" {
|
||||
let location = self.get_bucket_location(&metadata.bucket_name).await?;
|
||||
location = self.get_bucket_location(&metadata.bucket_name).await?;
|
||||
}
|
||||
|
||||
let is_makebucket = metadata.object_name == "" && method == http::Method::PUT && metadata.query_values.len() == 0;
|
||||
@@ -624,7 +630,7 @@ pub struct TransitionCore(pub Arc<TransitionClient>);
|
||||
|
||||
impl TransitionCore {
|
||||
pub async fn new(endpoint: &str, opts: Options) -> Result<Self, std::io::Error> {
|
||||
let client = TransitionClient::new(endpoint, opts).await?;
|
||||
let client = TransitionClient::new(endpoint, opts, "").await?;
|
||||
Ok(Self(Arc::new(client)))
|
||||
}
|
||||
|
||||
@@ -997,4 +1003,13 @@ impl tower::Service<Request<Body>> for SendRequest {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Document(pub String);
|
||||
pub struct LocationConstraint {
|
||||
#[serde(rename = "$value")]
|
||||
pub field: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CreateBucketConfiguration {
|
||||
#[serde(rename = "LocationConstraint")]
|
||||
pub location_constraint: String,
|
||||
}
|
||||
|
||||
@@ -31,13 +31,15 @@ use crate::disk::{
|
||||
use crate::erasure_coding;
|
||||
use crate::erasure_coding::bitrot_verify;
|
||||
use crate::error::{Error, Result, is_err_version_not_found};
|
||||
use crate::error::{ObjectApiError, is_err_object_not_found};
|
||||
use crate::error::{GenericError, ObjectApiError, is_err_object_not_found};
|
||||
use crate::global::{GLOBAL_LocalNodeName, GLOBAL_TierConfigMgr};
|
||||
use crate::store_api::ListObjectVersionsInfo;
|
||||
use crate::store_api::{ListPartsInfo, ObjectOptions, ObjectToDelete};
|
||||
use crate::store_api::{ObjectInfoOrErr, WalkOptions};
|
||||
use crate::{
|
||||
bucket::lifecycle::bucket_lifecycle_ops::{gen_transition_objname, get_transitioned_object_reader, put_restore_opts},
|
||||
bucket::lifecycle::bucket_lifecycle_ops::{
|
||||
LifecycleOps, gen_transition_objname, get_transitioned_object_reader, put_restore_opts,
|
||||
},
|
||||
cache_value::metacache_set::{ListPathRawOptions, list_path_raw},
|
||||
config::{GLOBAL_STORAGE_CLASS, storageclass},
|
||||
disk::{
|
||||
@@ -96,7 +98,7 @@ use std::{
|
||||
};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::{
|
||||
io::AsyncWrite,
|
||||
io::{AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader},
|
||||
sync::{RwLock, broadcast},
|
||||
};
|
||||
use tokio::{
|
||||
@@ -3419,7 +3421,7 @@ impl SetDisks {
|
||||
oi.user_defined.remove(X_AMZ_RESTORE.as_str());
|
||||
|
||||
let version_id = oi.version_id.map(|v| v.to_string());
|
||||
let obj = self
|
||||
let _obj = self
|
||||
.copy_object(
|
||||
bucket,
|
||||
object,
|
||||
@@ -3435,8 +3437,7 @@ impl SetDisks {
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await;
|
||||
obj?;
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -3536,7 +3537,10 @@ impl ObjectIO for SetDisks {
|
||||
return Ok(reader);
|
||||
}
|
||||
|
||||
// TODO: remote
|
||||
if object_info.is_remote() {
|
||||
let gr = get_transitioned_object_reader(bucket, object, &range, &h, &object_info, opts).await?;
|
||||
return Ok(gr);
|
||||
}
|
||||
|
||||
let (rd, wd) = tokio::io::duplex(DEFAULT_READ_BUFFER_SIZE);
|
||||
|
||||
@@ -4565,7 +4569,7 @@ impl StorageAPI for SetDisks {
|
||||
let tgt_client = match tier_config_mgr.get_driver(&opts.transition.tier).await {
|
||||
Ok(client) => client,
|
||||
Err(err) => {
|
||||
return Err(Error::other(err.to_string()));
|
||||
return Err(Error::other(format!("remote tier error: {}", err)));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -4594,10 +4598,10 @@ impl StorageAPI for SetDisks {
|
||||
// Normalize ETags by removing quotes before comparison (PR #592 compatibility)
|
||||
let transition_etag = rustfs_utils::path::trim_etag(&opts.transition.etag);
|
||||
let stored_etag = rustfs_utils::path::trim_etag(&get_raw_etag(&fi.metadata));
|
||||
if !opts.mod_time.expect("err").unix_timestamp() == fi.mod_time.as_ref().expect("err").unix_timestamp()
|
||||
if opts.mod_time.expect("err").unix_timestamp() != fi.mod_time.as_ref().expect("err").unix_timestamp()
|
||||
|| transition_etag != stored_etag
|
||||
{
|
||||
return Err(to_object_err(Error::from(DiskError::FileNotFound), vec![bucket, object]));
|
||||
return Err(to_object_err(Error::other(DiskError::FileNotFound), vec![bucket, object]));
|
||||
}
|
||||
if fi.transition_status == TRANSITION_COMPLETE {
|
||||
return Ok(());
|
||||
@@ -4699,7 +4703,7 @@ impl StorageAPI for SetDisks {
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn restore_transitioned_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
// Acquire write-lock early for the restore operation
|
||||
// if !opts.no_lock {
|
||||
// let guard_opt = self
|
||||
@@ -4711,6 +4715,7 @@ impl StorageAPI for SetDisks {
|
||||
// }
|
||||
// _lock_guard = guard_opt;
|
||||
// }
|
||||
let self_ = self.clone();
|
||||
let set_restore_header_fn = async move |oi: &mut ObjectInfo, rerr: Option<Error>| -> Result<()> {
|
||||
if rerr.is_none() {
|
||||
return Ok(());
|
||||
@@ -4719,54 +4724,79 @@ impl StorageAPI for SetDisks {
|
||||
Err(rerr.unwrap())
|
||||
};
|
||||
let mut oi = ObjectInfo::default();
|
||||
let fi = self.get_object_fileinfo(bucket, object, opts, true).await;
|
||||
let fi = self_.clone().get_object_fileinfo(bucket, object, opts, true).await;
|
||||
if let Err(err) = fi {
|
||||
return set_restore_header_fn(&mut oi, Some(to_object_err(err, vec![bucket, object]))).await;
|
||||
}
|
||||
let (actual_fi, _, _) = fi.unwrap();
|
||||
|
||||
oi = ObjectInfo::from_file_info(&actual_fi, bucket, object, opts.versioned || opts.version_suspended);
|
||||
let ropts = put_restore_opts(bucket, object, &opts.transition.restore_request, &oi);
|
||||
/*if oi.parts.len() == 1 {
|
||||
let mut rs: HTTPRangeSpec;
|
||||
let gr = get_transitioned_object_reader(bucket, object, rs, HeaderMap::new(), oi, opts);
|
||||
//if err != nil {
|
||||
// return set_restore_header_fn(&mut oi, Some(toObjectErr(err, bucket, object)));
|
||||
//}
|
||||
let hash_reader = HashReader::new(gr, gr.obj_info.size, "", "", gr.obj_info.size);
|
||||
let p_reader = PutObjReader::new(StreamingBlob::from(Box::pin(hash_reader)), hash_reader.size());
|
||||
if let Err(err) = self.put_object(bucket, object, &mut p_reader, &ropts).await {
|
||||
return set_restore_header_fn(&mut oi, Some(to_object_err(err, vec![bucket, object])));
|
||||
let ropts = put_restore_opts(bucket, object, &opts.transition.restore_request, &oi).await?;
|
||||
if oi.parts.len() == 1 {
|
||||
let rs: Option<HTTPRangeSpec> = None;
|
||||
let gr = get_transitioned_object_reader(bucket, object, &rs, &HeaderMap::new(), &oi, opts).await;
|
||||
if let Err(err) = gr {
|
||||
return set_restore_header_fn(&mut oi, Some(to_object_err(err.into(), vec![bucket, object]))).await;
|
||||
}
|
||||
let gr = gr.unwrap();
|
||||
let reader = BufReader::new(gr.stream);
|
||||
let hash_reader = HashReader::new(
|
||||
Box::new(WarpReader::new(reader)),
|
||||
gr.object_info.size,
|
||||
gr.object_info.size,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
)?;
|
||||
let mut p_reader = PutObjReader::new(hash_reader);
|
||||
if let Err(err) = self_.clone().put_object(bucket, object, &mut p_reader, &ropts).await {
|
||||
return set_restore_header_fn(&mut oi, Some(to_object_err(err, vec![bucket, object]))).await;
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let res = self.new_multipart_upload(bucket, object, &ropts).await?;
|
||||
let res = self_.clone().new_multipart_upload(bucket, object, &ropts).await?;
|
||||
//if err != nil {
|
||||
// return set_restore_header_fn(&mut oi, err);
|
||||
// return set_restore_header_fn(&mut oi, err).await;
|
||||
//}
|
||||
|
||||
let mut uploaded_parts: Vec<CompletePart> = vec![];
|
||||
let mut rs: HTTPRangeSpec;
|
||||
let gr = get_transitioned_object_reader(bucket, object, rs, HeaderMap::new(), oi, opts).await?;
|
||||
//if err != nil {
|
||||
// return set_restore_header_fn(&mut oi, err);
|
||||
//}
|
||||
let rs: Option<HTTPRangeSpec> = None;
|
||||
let gr = get_transitioned_object_reader(bucket, object, &rs, &HeaderMap::new(), &oi, opts).await;
|
||||
if let Err(err) = gr {
|
||||
return set_restore_header_fn(&mut oi, Some(StorageError::Io(err))).await;
|
||||
}
|
||||
let gr = gr.unwrap();
|
||||
|
||||
for part_info in oi.parts {
|
||||
//let hr = HashReader::new(LimitReader(gr, part_info.size), part_info.size, "", "", part_info.size);
|
||||
let hr = HashReader::new(gr, part_info.size as i64, part_info.size as i64, None, false);
|
||||
//if err != nil {
|
||||
// return set_restore_header_fn(&mut oi, err);
|
||||
//}
|
||||
let mut p_reader = PutObjReader::new(hr, hr.size());
|
||||
let p_info = self.put_object_part(bucket, object, &res.upload_id, part_info.number, &mut p_reader, &ObjectOptions::default()).await?;
|
||||
for part_info in &oi.parts {
|
||||
let reader = BufReader::new(Cursor::new(vec![] /*gr.stream*/));
|
||||
let hash_reader = HashReader::new(
|
||||
Box::new(WarpReader::new(reader)),
|
||||
part_info.size as i64,
|
||||
part_info.size as i64,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
)?;
|
||||
let mut p_reader = PutObjReader::new(hash_reader);
|
||||
let p_info = self_
|
||||
.clone()
|
||||
.put_object_part(bucket, object, &res.upload_id, part_info.number, &mut p_reader, &ObjectOptions::default())
|
||||
.await?;
|
||||
//if let Err(err) = p_info {
|
||||
// return set_restore_header_fn(&mut oi, err);
|
||||
// return set_restore_header_fn(&mut oi, err).await;
|
||||
//}
|
||||
if p_info.size != part_info.size {
|
||||
return set_restore_header_fn(&mut oi, Some(Error::from(ObjectApiError::InvalidObjectState(GenericError{bucket: bucket.to_string(), object: object.to_string(), ..Default::default()}))));
|
||||
return set_restore_header_fn(
|
||||
&mut oi,
|
||||
Some(Error::other(ObjectApiError::InvalidObjectState(GenericError {
|
||||
bucket: bucket.to_string(),
|
||||
object: object.to_string(),
|
||||
..Default::default()
|
||||
}))),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
uploaded_parts.push(CompletePart {
|
||||
part_num: p_info.part_num,
|
||||
@@ -4778,12 +4808,22 @@ impl StorageAPI for SetDisks {
|
||||
checksum_crc64nvme: None,
|
||||
});
|
||||
}
|
||||
if let Err(err) = self.complete_multipart_upload(bucket, object, &res.upload_id, uploaded_parts, &ObjectOptions {
|
||||
mod_time: oi.mod_time,
|
||||
..Default::default()
|
||||
}).await {
|
||||
set_restore_header_fn(&mut oi, Some(err));
|
||||
}*/
|
||||
if let Err(err) = self_
|
||||
.clone()
|
||||
.complete_multipart_upload(
|
||||
bucket,
|
||||
object,
|
||||
&res.upload_id,
|
||||
uploaded_parts,
|
||||
&ObjectOptions {
|
||||
mod_time: oi.mod_time,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
return set_restore_header_fn(&mut oi, Some(err)).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -646,7 +646,7 @@ impl StorageAPI for Sets {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn restore_transitioned_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
self.get_disks_by_key(object)
|
||||
.restore_transitioned_object(bucket, object, opts)
|
||||
.await
|
||||
|
||||
@@ -1864,17 +1864,20 @@ impl StorageAPI for ECStore {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn restore_transitioned_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
let object = encode_dir_object(object);
|
||||
if self.single_pool() {
|
||||
return self.pools[0].restore_transitioned_object(bucket, &object, opts).await;
|
||||
return self.pools[0].clone().restore_transitioned_object(bucket, &object, opts).await;
|
||||
}
|
||||
|
||||
//opts.skip_decommissioned = true;
|
||||
//opts.nolock = true;
|
||||
let idx = self.get_pool_idx_existing_with_opts(bucket, object.as_str(), opts).await?;
|
||||
|
||||
self.pools[idx].restore_transitioned_object(bucket, &object, opts).await
|
||||
self.pools[idx]
|
||||
.clone()
|
||||
.restore_transitioned_object(bucket, &object, opts)
|
||||
.await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
|
||||
@@ -1325,7 +1325,7 @@ pub trait StorageAPI: ObjectIO + Debug {
|
||||
async fn get_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<String>;
|
||||
async fn add_partial(&self, bucket: &str, object: &str, version_id: &str) -> Result<()>;
|
||||
async fn transition_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
|
||||
async fn restore_transitioned_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
|
||||
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
|
||||
async fn put_object_tags(&self, bucket: &str, object: &str, tags: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
|
||||
async fn delete_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
|
||||
|
||||
|
||||
@@ -18,6 +18,13 @@ pub mod tier_config;
|
||||
pub mod tier_gen;
|
||||
pub mod tier_handlers;
|
||||
pub mod warm_backend;
|
||||
pub mod warm_backend_aliyun;
|
||||
pub mod warm_backend_azure;
|
||||
pub mod warm_backend_gcs;
|
||||
pub mod warm_backend_huaweicloud;
|
||||
pub mod warm_backend_minio;
|
||||
pub mod warm_backend_r2;
|
||||
pub mod warm_backend_rustfs;
|
||||
pub mod warm_backend_s3;
|
||||
pub mod warm_backend_s3sdk;
|
||||
pub mod warm_backend_tencent;
|
||||
|
||||
@@ -141,8 +141,8 @@ impl TierConfigMgr {
|
||||
(TierType::Unsupported, false)
|
||||
}
|
||||
|
||||
pub async fn add(&mut self, tier: TierConfig, force: bool) -> std::result::Result<(), AdminError> {
|
||||
let tier_name = &tier.name;
|
||||
pub async fn add(&mut self, tier_config: TierConfig, force: bool) -> std::result::Result<(), AdminError> {
|
||||
let tier_name = &tier_config.name;
|
||||
if tier_name != tier_name.to_uppercase().as_str() {
|
||||
return Err(ERR_TIER_NAME_NOT_UPPERCASE.clone());
|
||||
}
|
||||
@@ -152,7 +152,7 @@ impl TierConfigMgr {
|
||||
return Err(ERR_TIER_ALREADY_EXISTS.clone());
|
||||
}
|
||||
|
||||
let d = new_warm_backend(&tier, true).await?;
|
||||
let d = new_warm_backend(&tier_config, true).await?;
|
||||
|
||||
if !force {
|
||||
let in_use = d.in_use().await;
|
||||
@@ -180,7 +180,7 @@ impl TierConfigMgr {
|
||||
}
|
||||
|
||||
self.driver_cache.insert(tier_name.to_string(), d);
|
||||
self.tiers.insert(tier_name.to_string(), tier);
|
||||
self.tiers.insert(tier_name.to_string(), tier_config);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -260,10 +260,10 @@ impl TierConfigMgr {
|
||||
return Err(ERR_TIER_NOT_FOUND.clone());
|
||||
}
|
||||
|
||||
let mut cfg = self.tiers[tier_name].clone();
|
||||
let mut tier_config = self.tiers[tier_name].clone();
|
||||
match tier_type {
|
||||
TierType::S3 => {
|
||||
let mut s3 = cfg.s3.as_mut().expect("err");
|
||||
let mut s3 = tier_config.s3.as_mut().expect("err");
|
||||
if creds.aws_role {
|
||||
s3.aws_role = true
|
||||
}
|
||||
@@ -277,7 +277,7 @@ impl TierConfigMgr {
|
||||
}
|
||||
}
|
||||
TierType::RustFS => {
|
||||
let mut rustfs = cfg.rustfs.as_mut().expect("err");
|
||||
let mut rustfs = tier_config.rustfs.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
@@ -285,18 +285,65 @@ impl TierConfigMgr {
|
||||
rustfs.secret_key = creds.secret_key;
|
||||
}
|
||||
TierType::MinIO => {
|
||||
let mut minio = cfg.minio.as_mut().expect("err");
|
||||
let mut minio = tier_config.minio.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
minio.access_key = creds.access_key;
|
||||
minio.secret_key = creds.secret_key;
|
||||
}
|
||||
TierType::Aliyun => {
|
||||
let mut aliyun = tier_config.aliyun.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
aliyun.access_key = creds.access_key;
|
||||
aliyun.secret_key = creds.secret_key;
|
||||
}
|
||||
TierType::Tencent => {
|
||||
let mut tencent = tier_config.tencent.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
tencent.access_key = creds.access_key;
|
||||
tencent.secret_key = creds.secret_key;
|
||||
}
|
||||
TierType::Huaweicloud => {
|
||||
let mut huaweicloud = tier_config.huaweicloud.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
huaweicloud.access_key = creds.access_key;
|
||||
huaweicloud.secret_key = creds.secret_key;
|
||||
}
|
||||
TierType::Azure => {
|
||||
let mut azure = tier_config.azure.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
azure.access_key = creds.access_key;
|
||||
azure.secret_key = creds.secret_key;
|
||||
}
|
||||
TierType::GCS => {
|
||||
let mut gcs = tier_config.gcs.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
gcs.creds = creds.access_key; //creds.creds_json
|
||||
}
|
||||
TierType::R2 => {
|
||||
let mut r2 = tier_config.r2.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
r2.access_key = creds.access_key;
|
||||
r2.secret_key = creds.secret_key;
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
let d = new_warm_backend(&cfg, true).await?;
|
||||
self.tiers.insert(tier_name.to_string(), cfg);
|
||||
let d = new_warm_backend(&tier_config, true).await?;
|
||||
self.tiers.insert(tier_name.to_string(), tier_config);
|
||||
self.driver_cache.insert(tier_name.to_string(), d);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -26,14 +26,22 @@ pub enum TierType {
|
||||
Unsupported,
|
||||
#[serde(rename = "s3")]
|
||||
S3,
|
||||
#[serde(rename = "azure")]
|
||||
Azure,
|
||||
#[serde(rename = "gcs")]
|
||||
GCS,
|
||||
#[serde(rename = "rustfs")]
|
||||
RustFS,
|
||||
#[serde(rename = "minio")]
|
||||
MinIO,
|
||||
#[serde(rename = "aliyun")]
|
||||
Aliyun,
|
||||
#[serde(rename = "tencent")]
|
||||
Tencent,
|
||||
#[serde(rename = "huaweicloud")]
|
||||
Huaweicloud,
|
||||
#[serde(rename = "azure")]
|
||||
Azure,
|
||||
#[serde(rename = "gcs")]
|
||||
GCS,
|
||||
#[serde(rename = "r2")]
|
||||
R2,
|
||||
}
|
||||
|
||||
impl Display for TierType {
|
||||
@@ -48,6 +56,24 @@ impl Display for TierType {
|
||||
TierType::MinIO => {
|
||||
write!(f, "MinIO")
|
||||
}
|
||||
TierType::Aliyun => {
|
||||
write!(f, "Aliyun")
|
||||
}
|
||||
TierType::Tencent => {
|
||||
write!(f, "Tencent")
|
||||
}
|
||||
TierType::Huaweicloud => {
|
||||
write!(f, "Huaweicloud")
|
||||
}
|
||||
TierType::Azure => {
|
||||
write!(f, "Azure")
|
||||
}
|
||||
TierType::GCS => {
|
||||
write!(f, "GCS")
|
||||
}
|
||||
TierType::R2 => {
|
||||
write!(f, "R2")
|
||||
}
|
||||
_ => {
|
||||
write!(f, "Unsupported")
|
||||
}
|
||||
@@ -61,6 +87,12 @@ impl TierType {
|
||||
"S3" => TierType::S3,
|
||||
"RustFS" => TierType::RustFS,
|
||||
"MinIO" => TierType::MinIO,
|
||||
"Aliyun" => TierType::Aliyun,
|
||||
"Tencent" => TierType::Tencent,
|
||||
"Huaweicloud" => TierType::Huaweicloud,
|
||||
"Azure" => TierType::Azure,
|
||||
"GCS" => TierType::GCS,
|
||||
"R2" => TierType::R2,
|
||||
_ => TierType::Unsupported,
|
||||
}
|
||||
}
|
||||
@@ -70,6 +102,12 @@ impl TierType {
|
||||
TierType::S3 => "s3".to_string(),
|
||||
TierType::RustFS => "rustfs".to_string(),
|
||||
TierType::MinIO => "minio".to_string(),
|
||||
TierType::Aliyun => "aliyun".to_string(),
|
||||
TierType::Tencent => "tencent".to_string(),
|
||||
TierType::Huaweicloud => "huaweicloud".to_string(),
|
||||
TierType::Azure => "azure".to_string(),
|
||||
TierType::GCS => "gcs".to_string(),
|
||||
TierType::R2 => "r2".to_string(),
|
||||
_ => "unsupported".to_string(),
|
||||
}
|
||||
}
|
||||
@@ -86,8 +124,18 @@ pub struct TierConfig {
|
||||
pub name: String,
|
||||
#[serde(rename = "s3", skip_serializing_if = "Option::is_none")]
|
||||
pub s3: Option<TierS3>,
|
||||
//TODO: azure: Option<TierAzure>,
|
||||
//TODO: gcs: Option<TierGCS>,
|
||||
#[serde(rename = "aliyun", skip_serializing_if = "Option::is_none")]
|
||||
pub aliyun: Option<TierAliyun>,
|
||||
#[serde(rename = "tencent", skip_serializing_if = "Option::is_none")]
|
||||
pub tencent: Option<TierTencent>,
|
||||
#[serde(rename = "huaweicloud", skip_serializing_if = "Option::is_none")]
|
||||
pub huaweicloud: Option<TierHuaweicloud>,
|
||||
#[serde(rename = "azure", skip_serializing_if = "Option::is_none")]
|
||||
pub azure: Option<TierAzure>,
|
||||
#[serde(rename = "gcs", skip_serializing_if = "Option::is_none")]
|
||||
pub gcs: Option<TierGCS>,
|
||||
#[serde(rename = "r2", skip_serializing_if = "Option::is_none")]
|
||||
pub r2: Option<TierR2>,
|
||||
#[serde(rename = "rustfs", skip_serializing_if = "Option::is_none")]
|
||||
pub rustfs: Option<TierRustFS>,
|
||||
#[serde(rename = "minio", skip_serializing_if = "Option::is_none")]
|
||||
@@ -97,10 +145,14 @@ pub struct TierConfig {
|
||||
impl Clone for TierConfig {
|
||||
fn clone(&self) -> TierConfig {
|
||||
let mut s3 = None;
|
||||
//az TierAzure
|
||||
//gcs TierGCS
|
||||
let mut r = None;
|
||||
let mut m = None;
|
||||
let mut aliyun = None;
|
||||
let mut tencent = None;
|
||||
let mut huaweicloud = None;
|
||||
let mut azure = None;
|
||||
let mut gcs = None;
|
||||
let mut r2 = None;
|
||||
match self.tier_type {
|
||||
TierType::S3 => {
|
||||
let mut s3_ = self.s3.as_ref().expect("err").clone();
|
||||
@@ -117,6 +169,36 @@ impl Clone for TierConfig {
|
||||
m_.secret_key = "REDACTED".to_string();
|
||||
m = Some(m_);
|
||||
}
|
||||
TierType::Aliyun => {
|
||||
let mut aliyun_ = self.aliyun.as_ref().expect("err").clone();
|
||||
aliyun_.secret_key = "REDACTED".to_string();
|
||||
aliyun = Some(aliyun_);
|
||||
}
|
||||
TierType::Tencent => {
|
||||
let mut tencent_ = self.tencent.as_ref().expect("err").clone();
|
||||
tencent_.secret_key = "REDACTED".to_string();
|
||||
tencent = Some(tencent_);
|
||||
}
|
||||
TierType::Huaweicloud => {
|
||||
let mut huaweicloud_ = self.huaweicloud.as_ref().expect("err").clone();
|
||||
huaweicloud_.secret_key = "REDACTED".to_string();
|
||||
huaweicloud = Some(huaweicloud_);
|
||||
}
|
||||
TierType::Azure => {
|
||||
let mut azure_ = self.azure.as_ref().expect("err").clone();
|
||||
azure_.secret_key = "REDACTED".to_string();
|
||||
azure = Some(azure_);
|
||||
}
|
||||
TierType::GCS => {
|
||||
let mut gcs_ = self.gcs.as_ref().expect("err").clone();
|
||||
gcs_.creds = "REDACTED".to_string();
|
||||
gcs = Some(gcs_);
|
||||
}
|
||||
TierType::R2 => {
|
||||
let mut r2_ = self.r2.as_ref().expect("err").clone();
|
||||
r2_.secret_key = "REDACTED".to_string();
|
||||
r2 = Some(r2_);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
TierConfig {
|
||||
@@ -126,6 +208,12 @@ impl Clone for TierConfig {
|
||||
s3,
|
||||
rustfs: r,
|
||||
minio: m,
|
||||
aliyun,
|
||||
tencent,
|
||||
huaweicloud,
|
||||
azure,
|
||||
gcs,
|
||||
r2,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -137,6 +225,12 @@ impl TierConfig {
|
||||
TierType::S3 => self.s3.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::RustFS => self.rustfs.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::MinIO => self.minio.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::Aliyun => self.aliyun.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::Tencent => self.tencent.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::Huaweicloud => self.huaweicloud.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::Azure => self.azure.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::GCS => self.gcs.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::R2 => self.r2.as_ref().expect("err").endpoint.clone(),
|
||||
_ => {
|
||||
info!("unexpected tier type {}", self.tier_type);
|
||||
"".to_string()
|
||||
@@ -149,6 +243,12 @@ impl TierConfig {
|
||||
TierType::S3 => self.s3.as_ref().expect("err").bucket.clone(),
|
||||
TierType::RustFS => self.rustfs.as_ref().expect("err").bucket.clone(),
|
||||
TierType::MinIO => self.minio.as_ref().expect("err").bucket.clone(),
|
||||
TierType::Aliyun => self.aliyun.as_ref().expect("err").bucket.clone(),
|
||||
TierType::Tencent => self.tencent.as_ref().expect("err").bucket.clone(),
|
||||
TierType::Huaweicloud => self.huaweicloud.as_ref().expect("err").bucket.clone(),
|
||||
TierType::Azure => self.azure.as_ref().expect("err").bucket.clone(),
|
||||
TierType::GCS => self.gcs.as_ref().expect("err").bucket.clone(),
|
||||
TierType::R2 => self.r2.as_ref().expect("err").bucket.clone(),
|
||||
_ => {
|
||||
info!("unexpected tier type {}", self.tier_type);
|
||||
"".to_string()
|
||||
@@ -161,6 +261,12 @@ impl TierConfig {
|
||||
TierType::S3 => self.s3.as_ref().expect("err").prefix.clone(),
|
||||
TierType::RustFS => self.rustfs.as_ref().expect("err").prefix.clone(),
|
||||
TierType::MinIO => self.minio.as_ref().expect("err").prefix.clone(),
|
||||
TierType::Aliyun => self.aliyun.as_ref().expect("err").prefix.clone(),
|
||||
TierType::Tencent => self.tencent.as_ref().expect("err").prefix.clone(),
|
||||
TierType::Huaweicloud => self.huaweicloud.as_ref().expect("err").prefix.clone(),
|
||||
TierType::Azure => self.azure.as_ref().expect("err").prefix.clone(),
|
||||
TierType::GCS => self.gcs.as_ref().expect("err").prefix.clone(),
|
||||
TierType::R2 => self.r2.as_ref().expect("err").prefix.clone(),
|
||||
_ => {
|
||||
info!("unexpected tier type {}", self.tier_type);
|
||||
"".to_string()
|
||||
@@ -173,6 +279,12 @@ impl TierConfig {
|
||||
TierType::S3 => self.s3.as_ref().expect("err").region.clone(),
|
||||
TierType::RustFS => self.rustfs.as_ref().expect("err").region.clone(),
|
||||
TierType::MinIO => self.minio.as_ref().expect("err").region.clone(),
|
||||
TierType::Aliyun => self.aliyun.as_ref().expect("err").region.clone(),
|
||||
TierType::Tencent => self.tencent.as_ref().expect("err").region.clone(),
|
||||
TierType::Huaweicloud => self.huaweicloud.as_ref().expect("err").region.clone(),
|
||||
TierType::Azure => self.azure.as_ref().expect("err").region.clone(),
|
||||
TierType::GCS => self.gcs.as_ref().expect("err").region.clone(),
|
||||
TierType::R2 => self.r2.as_ref().expect("err").region.clone(),
|
||||
_ => {
|
||||
info!("unexpected tier type {}", self.tier_type);
|
||||
"".to_string()
|
||||
@@ -319,3 +431,152 @@ impl TierMinIO {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierAliyun {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "accessKey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretKey")]
|
||||
pub secret_key: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierTencent {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "accessKey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretKey")]
|
||||
pub secret_key: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierHuaweicloud {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "accessKey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretKey")]
|
||||
pub secret_key: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct ServicePrincipalAuth {
|
||||
pub tenant_id: String,
|
||||
pub client_id: String,
|
||||
pub client_secret: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierAzure {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "accessKey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretKey")]
|
||||
pub secret_key: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
#[serde(rename = "storageClass")]
|
||||
pub storage_class: String,
|
||||
#[serde(rename = "spAuth")]
|
||||
pub sp_auth: ServicePrincipalAuth,
|
||||
}
|
||||
|
||||
impl TierAzure {
|
||||
pub fn is_sp_enabled(&self) -> bool {
|
||||
!self.sp_auth.tenant_id.is_empty() && !self.sp_auth.client_id.is_empty() && !self.sp_auth.client_secret.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
fn AzureServicePrincipal(tenantID, clientID, clientSecret string) func(az *TierAzure) error {
|
||||
return func(az *TierAzure) error {
|
||||
if tenantID == "" {
|
||||
return errors.New("empty tenant ID unsupported")
|
||||
}
|
||||
if clientID == "" {
|
||||
return errors.New("empty client ID unsupported")
|
||||
}
|
||||
if clientSecret == "" {
|
||||
return errors.New("empty client secret unsupported")
|
||||
}
|
||||
az.SPAuth.TenantID = tenantID
|
||||
az.SPAuth.ClientID = clientID
|
||||
az.SPAuth.ClientSecret = clientSecret
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fn AzurePrefix(prefix string) func(az *TierAzure) error {
|
||||
return func(az *TierAzure) error {
|
||||
az.Prefix = prefix
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fn AzureEndpoint(endpoint string) func(az *TierAzure) error {
|
||||
return func(az *TierAzure) error {
|
||||
az.Endpoint = endpoint
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fn AzureRegion(region string) func(az *TierAzure) error {
|
||||
return func(az *TierAzure) error {
|
||||
az.Region = region
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fn AzureStorageClass(sc string) func(az *TierAzure) error {
|
||||
return func(az *TierAzure) error {
|
||||
az.StorageClass = sc
|
||||
return nil
|
||||
}
|
||||
}*/
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierGCS {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "creds")]
|
||||
pub creds: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
#[serde(rename = "storageClass")]
|
||||
pub storage_class: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierR2 {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "accessKey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretKey")]
|
||||
pub secret_key: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
@@ -27,9 +27,15 @@ use crate::tier::{
|
||||
tier::ERR_TIER_TYPE_UNSUPPORTED,
|
||||
tier_config::{TierConfig, TierType},
|
||||
tier_handlers::{ERR_TIER_BUCKET_NOT_FOUND, ERR_TIER_PERM_ERR},
|
||||
warm_backend_aliyun::WarmBackendAliyun,
|
||||
warm_backend_azure::WarmBackendAzure,
|
||||
warm_backend_gcs::WarmBackendGCS,
|
||||
warm_backend_huaweicloud::WarmBackendHuaweicloud,
|
||||
warm_backend_minio::WarmBackendMinIO,
|
||||
warm_backend_r2::WarmBackendR2,
|
||||
warm_backend_rustfs::WarmBackendRustFS,
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
warm_backend_tencent::WarmBackendTencent,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use http::StatusCode;
|
||||
@@ -128,6 +134,78 @@ pub async fn new_warm_backend(tier: &TierConfig, probe: bool) -> Result<WarmBack
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
TierType::Aliyun => {
|
||||
let dd = WarmBackendAliyun::new(tier.aliyun.as_ref().expect("err"), &tier.name).await;
|
||||
if let Err(err) = dd {
|
||||
warn!("{}", err);
|
||||
return Err(AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig".to_string(),
|
||||
message: format!("Unable to setup remote tier, check tier configuration: {}", err.to_string()),
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
});
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
TierType::Tencent => {
|
||||
let dd = WarmBackendTencent::new(tier.tencent.as_ref().expect("err"), &tier.name).await;
|
||||
if let Err(err) = dd {
|
||||
warn!("{}", err);
|
||||
return Err(AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig".to_string(),
|
||||
message: format!("Unable to setup remote tier, check tier configuration: {}", err.to_string()),
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
});
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
TierType::Huaweicloud => {
|
||||
let dd = WarmBackendHuaweicloud::new(tier.huaweicloud.as_ref().expect("err"), &tier.name).await;
|
||||
if let Err(err) = dd {
|
||||
warn!("{}", err);
|
||||
return Err(AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig".to_string(),
|
||||
message: format!("Unable to setup remote tier, check tier configuration: {}", err.to_string()),
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
});
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
TierType::Azure => {
|
||||
let dd = WarmBackendAzure::new(tier.azure.as_ref().expect("err"), &tier.name).await;
|
||||
if let Err(err) = dd {
|
||||
warn!("{}", err);
|
||||
return Err(AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig".to_string(),
|
||||
message: format!("Unable to setup remote tier, check tier configuration: {}", err.to_string()),
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
});
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
TierType::GCS => {
|
||||
let dd = WarmBackendGCS::new(tier.gcs.as_ref().expect("err"), &tier.name).await;
|
||||
if let Err(err) = dd {
|
||||
warn!("{}", err);
|
||||
return Err(AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig".to_string(),
|
||||
message: format!("Unable to setup remote tier, check tier configuration: {}", err.to_string()),
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
});
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
TierType::R2 => {
|
||||
let dd = WarmBackendR2::new(tier.r2.as_ref().expect("err"), &tier.name).await;
|
||||
if let Err(err) = dd {
|
||||
warn!("{}", err);
|
||||
return Err(AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig".to_string(),
|
||||
message: format!("Unable to setup remote tier, check tier configuration: {}", err.to_string()),
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
});
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
_ => {
|
||||
return Err(ERR_TIER_TYPE_UNSUPPORTED.clone());
|
||||
}
|
||||
|
||||
164
crates/ecstore/src/tier/warm_backend_aliyun.rs
Normal file
164
crates/ecstore/src/tier/warm_backend_aliyun.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{BucketLookupType, Options, ReadCloser, ReaderImpl, TransitionClient, TransitionCore},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierAliyun,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendAliyun(WarmBackendS3);
|
||||
|
||||
impl WarmBackendAliyun {
|
||||
pub async fn new(conf: &TierAliyun, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let u = match url::Url::parse(&conf.endpoint) {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
let opts = Options {
|
||||
creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
bucket_lookup: BucketLookupType::BucketLookupDNS,
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"aliyun",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
Ok(Self(WarmBackendS3 {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendAliyun {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = optimal_part_size(length)?;
|
||||
let client = self.0.client.clone();
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.0.bucket,
|
||||
&self.0.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
self.0.get(object, rv, opts).await
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
self.0.remove(object, rv).await
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
self.0.in_use().await
|
||||
}
|
||||
}
|
||||
|
||||
fn optimal_part_size(object_size: i64) -> Result<i64, std::io::Error> {
|
||||
let mut object_size = object_size;
|
||||
if object_size == -1 {
|
||||
object_size = MAX_MULTIPART_PUT_OBJECT_SIZE;
|
||||
}
|
||||
|
||||
if object_size > MAX_MULTIPART_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other("entity too large"));
|
||||
}
|
||||
|
||||
let configured_part_size = MIN_PART_SIZE;
|
||||
let mut part_size_flt = object_size as f64 / MAX_PARTS_COUNT as f64;
|
||||
part_size_flt = (part_size_flt as f64 / configured_part_size as f64).ceil() * configured_part_size as f64;
|
||||
|
||||
let part_size = part_size_flt as i64;
|
||||
if part_size == 0 {
|
||||
return Ok(MIN_PART_SIZE);
|
||||
}
|
||||
Ok(part_size)
|
||||
}
|
||||
164
crates/ecstore/src/tier/warm_backend_azure.rs
Normal file
164
crates/ecstore/src/tier/warm_backend_azure.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{BucketLookupType, Options, ReadCloser, ReaderImpl, TransitionClient, TransitionCore},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierAzure,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendAzure(WarmBackendS3);
|
||||
|
||||
impl WarmBackendAzure {
|
||||
pub async fn new(conf: &TierAzure, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let u = match url::Url::parse(&conf.endpoint) {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
let opts = Options {
|
||||
creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
bucket_lookup: BucketLookupType::BucketLookupDNS,
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"azure",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
Ok(Self(WarmBackendS3 {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendAzure {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = optimal_part_size(length)?;
|
||||
let client = self.0.client.clone();
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.0.bucket,
|
||||
&self.0.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
self.0.get(object, rv, opts).await
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
self.0.remove(object, rv).await
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
self.0.in_use().await
|
||||
}
|
||||
}
|
||||
|
||||
fn optimal_part_size(object_size: i64) -> Result<i64, std::io::Error> {
|
||||
let mut object_size = object_size;
|
||||
if object_size == -1 {
|
||||
object_size = MAX_MULTIPART_PUT_OBJECT_SIZE;
|
||||
}
|
||||
|
||||
if object_size > MAX_MULTIPART_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other("entity too large"));
|
||||
}
|
||||
|
||||
let configured_part_size = MIN_PART_SIZE;
|
||||
let mut part_size_flt = object_size as f64 / MAX_PARTS_COUNT as f64;
|
||||
part_size_flt = (part_size_flt as f64 / configured_part_size as f64).ceil() * configured_part_size as f64;
|
||||
|
||||
let part_size = part_size_flt as i64;
|
||||
if part_size == 0 {
|
||||
return Ok(MIN_PART_SIZE);
|
||||
}
|
||||
Ok(part_size)
|
||||
}
|
||||
231
crates/ecstore/src/tier/warm_backend_azure2.rs
Normal file
231
crates/ecstore/src/tier/warm_backend_azure2.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use azure_core::http::{Body, ClientOptions, RequestContent};
|
||||
use azure_storage::StorageCredentials;
|
||||
use azure_storage_blobs::prelude::*;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
transition_api::{Options, ReadCloser, ReaderImpl},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierAzure,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendAzure {
|
||||
pub client: Arc<BlobServiceClient>,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub storage_class: String,
|
||||
}
|
||||
|
||||
impl WarmBackendAzure {
|
||||
pub async fn new(conf: &TierAzure, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let creds = StorageCredentials::access_key(conf.access_key.clone(), conf.secret_key.clone());
|
||||
let client = ClientBuilder::new(conf.access_key.clone(), creds)
|
||||
//.endpoint(conf.endpoint)
|
||||
.blob_service_client();
|
||||
let client = Arc::new(client);
|
||||
Ok(Self {
|
||||
client,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
/*pub fn tier(&self) -> *blob.AccessTier {
|
||||
if self.storage_class == "" {
|
||||
return None;
|
||||
}
|
||||
for t in blob.PossibleAccessTierValues() {
|
||||
if strings.EqualFold(self.storage_class, t) {
|
||||
return &t
|
||||
}
|
||||
}
|
||||
None
|
||||
}*/
|
||||
|
||||
pub fn get_dest(&self, object: &str) -> String {
|
||||
let mut dest_obj = object.to_string();
|
||||
if self.prefix != "" {
|
||||
dest_obj = format!("{}/{}", &self.prefix, object);
|
||||
}
|
||||
return dest_obj;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendAzure {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = length;
|
||||
let client = self.client.clone();
|
||||
let container_client = client.container_client(self.bucket.clone());
|
||||
let blob_client = container_client.blob_client(self.get_dest(object));
|
||||
/*let res = blob_client
|
||||
.upload(
|
||||
RequestContent::from(match r {
|
||||
ReaderImpl::Body(content_body) => content_body.to_vec(),
|
||||
ReaderImpl::ObjectBody(mut content_body) => content_body.read_all().await?,
|
||||
}),
|
||||
false,
|
||||
length as u64,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("upload error"));
|
||||
};*/
|
||||
|
||||
let Ok(res) = blob_client
|
||||
.put_block_blob(match r {
|
||||
ReaderImpl::Body(content_body) => content_body.to_vec(),
|
||||
ReaderImpl::ObjectBody(mut content_body) => content_body.read_all().await?,
|
||||
})
|
||||
.content_type("text/plain")
|
||||
.into_future()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("put_block_blob error"));
|
||||
};
|
||||
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.request_id.to_string())
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let container_client = client.container_client(self.bucket.clone());
|
||||
let blob_client = container_client.blob_client(self.get_dest(object));
|
||||
blob_client.get();
|
||||
todo!();
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let container_client = client.container_client(self.bucket.clone());
|
||||
let blob_client = container_client.blob_client(self.get_dest(object));
|
||||
blob_client.delete();
|
||||
todo!();
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
/*let result = self.client
|
||||
.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR, 1)
|
||||
.await?;
|
||||
|
||||
Ok(result.common_prefixes.len() > 0 || result.contents.len() > 0)*/
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/*fn azure_to_object_error(err: Error, params: Vec<String>) -> Option<error> {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bucket := ""
|
||||
object := ""
|
||||
if len(params) >= 1 {
|
||||
bucket = params[0]
|
||||
}
|
||||
if len(params) == 2 {
|
||||
object = params[1]
|
||||
}
|
||||
|
||||
azureErr, ok := err.(*azcore.ResponseError)
|
||||
if !ok {
|
||||
// We don't interpret non Azure errors. As azure errors will
|
||||
// have StatusCode to help to convert to object errors.
|
||||
return err
|
||||
}
|
||||
|
||||
serviceCode := azureErr.ErrorCode
|
||||
statusCode := azureErr.StatusCode
|
||||
|
||||
azureCodesToObjectError(err, serviceCode, statusCode, bucket, object)
|
||||
}*/
|
||||
|
||||
/*fn azure_codes_to_object_error(err: Error, service_code: String, status_code: i32, bucket: String, object: String) -> Option<Error> {
|
||||
switch serviceCode {
|
||||
case "ContainerNotFound", "ContainerBeingDeleted":
|
||||
err = BucketNotFound{Bucket: bucket}
|
||||
case "ContainerAlreadyExists":
|
||||
err = BucketExists{Bucket: bucket}
|
||||
case "InvalidResourceName":
|
||||
err = BucketNameInvalid{Bucket: bucket}
|
||||
case "RequestBodyTooLarge":
|
||||
err = PartTooBig{}
|
||||
case "InvalidMetadata":
|
||||
err = UnsupportedMetadata{}
|
||||
case "BlobAccessTierNotSupportedForAccountType":
|
||||
err = NotImplemented{}
|
||||
case "OutOfRangeInput":
|
||||
err = ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
default:
|
||||
switch statusCode {
|
||||
case http.StatusNotFound:
|
||||
if object != "" {
|
||||
err = ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
} else {
|
||||
err = BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
case http.StatusBadRequest:
|
||||
err = BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}*/
|
||||
248
crates/ecstore/src/tier/warm_backend_gcs.rs
Normal file
248
crates/ecstore/src/tier/warm_backend_gcs.rs
Normal file
@@ -0,0 +1,248 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use google_cloud_auth::credentials::Credentials;
|
||||
use google_cloud_auth::credentials::user_account::Builder;
|
||||
use google_cloud_storage as gcs;
|
||||
use google_cloud_storage::client::Storage;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
transition_api::{Options, ReadCloser, ReaderImpl},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierGCS,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendGCS {
|
||||
pub client: Arc<Storage>,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub storage_class: String,
|
||||
}
|
||||
|
||||
impl WarmBackendGCS {
|
||||
pub async fn new(conf: &TierGCS, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.creds == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let authorized_user = serde_json::from_str(&conf.creds)?;
|
||||
let credentials = Builder::new(authorized_user)
|
||||
//.with_retry_policy(AlwaysRetry.with_attempt_limit(3))
|
||||
//.with_backoff_policy(backoff)
|
||||
.build()
|
||||
.map_err(|e| std::io::Error::other(format!("Invalid credentials JSON: {}", e)))?;
|
||||
|
||||
let Ok(client) = Storage::builder()
|
||||
.with_endpoint(conf.endpoint.clone())
|
||||
.with_credentials(credentials)
|
||||
.build()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("Storage::builder error"));
|
||||
};
|
||||
let client = Arc::new(client);
|
||||
Ok(Self {
|
||||
client,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_dest(&self, object: &str) -> String {
|
||||
let mut dest_obj = object.to_string();
|
||||
if self.prefix != "" {
|
||||
dest_obj = format!("{}/{}", &self.prefix, object);
|
||||
}
|
||||
return dest_obj;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendGCS {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let d = match r {
|
||||
ReaderImpl::Body(content_body) => content_body.to_vec(),
|
||||
ReaderImpl::ObjectBody(mut content_body) => content_body.read_all().await?,
|
||||
};
|
||||
let Ok(res) = self
|
||||
.client
|
||||
.write_object(&self.bucket, &self.get_dest(object), Bytes::from(d))
|
||||
.send_buffered()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("write_object error"));
|
||||
};
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.generation.to_string())
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
let Ok(mut reader) = self.client.read_object(&self.bucket, &self.get_dest(object)).send().await else {
|
||||
return Err(std::io::Error::other("read_object error"));
|
||||
};
|
||||
let mut contents = Vec::new();
|
||||
while let Ok(Some(chunk)) = reader.next().await.transpose() {
|
||||
contents.extend_from_slice(&chunk);
|
||||
}
|
||||
Ok(ReadCloser::new(std::io::Cursor::new(contents)))
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
/*self.client
|
||||
.delete_object()
|
||||
.set_bucket(&self.bucket)
|
||||
.set_object(&self.get_dest(object))
|
||||
//.set_generation(object.generation)
|
||||
.send()
|
||||
.await?;*/
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
/*let result = self.client
|
||||
.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR, 1)
|
||||
.await?;
|
||||
|
||||
Ok(result.common_prefixes.len() > 0 || result.contents.len() > 0)*/
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/*fn gcs_to_object_error(err: Error, params: Vec<String>) -> Option<Error> {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bucket := ""
|
||||
object := ""
|
||||
uploadID := ""
|
||||
if len(params) >= 1 {
|
||||
bucket = params[0]
|
||||
}
|
||||
if len(params) == 2 {
|
||||
object = params[1]
|
||||
}
|
||||
if len(params) == 3 {
|
||||
uploadID = params[2]
|
||||
}
|
||||
|
||||
// in some cases just a plain error is being returned
|
||||
switch err.Error() {
|
||||
case "storage: bucket doesn't exist":
|
||||
err = BucketNotFound{
|
||||
Bucket: bucket,
|
||||
}
|
||||
return err
|
||||
case "storage: object doesn't exist":
|
||||
if uploadID != "" {
|
||||
err = InvalidUploadID{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
} else {
|
||||
err = ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
googleAPIErr, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
// We don't interpret non MinIO errors. As minio errors will
|
||||
// have StatusCode to help to convert to object errors.
|
||||
return err
|
||||
}
|
||||
|
||||
if len(googleAPIErr.Errors) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
reason := googleAPIErr.Errors[0].Reason
|
||||
message := googleAPIErr.Errors[0].Message
|
||||
|
||||
switch reason {
|
||||
case "required":
|
||||
// Anonymous users does not have storage.xyz access to project 123.
|
||||
fallthrough
|
||||
case "keyInvalid":
|
||||
fallthrough
|
||||
case "forbidden":
|
||||
err = PrefixAccessDenied{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
case "invalid":
|
||||
err = BucketNameInvalid{
|
||||
Bucket: bucket,
|
||||
}
|
||||
case "notFound":
|
||||
if object != "" {
|
||||
err = ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
break
|
||||
}
|
||||
err = BucketNotFound{Bucket: bucket}
|
||||
case "conflict":
|
||||
if message == "You already own this bucket. Please select another name." {
|
||||
err = BucketAlreadyOwnedByYou{Bucket: bucket}
|
||||
break
|
||||
}
|
||||
if message == "Sorry, that name is not available. Please try a different one." {
|
||||
err = BucketAlreadyExists{Bucket: bucket}
|
||||
break
|
||||
}
|
||||
err = BucketNotEmpty{Bucket: bucket}
|
||||
}
|
||||
|
||||
return err
|
||||
}*/
|
||||
164
crates/ecstore/src/tier/warm_backend_huaweicloud.rs
Normal file
164
crates/ecstore/src/tier/warm_backend_huaweicloud.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{BucketLookupType, Options, ReadCloser, ReaderImpl, TransitionClient, TransitionCore},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierHuaweicloud,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendHuaweicloud(WarmBackendS3);
|
||||
|
||||
impl WarmBackendHuaweicloud {
|
||||
pub async fn new(conf: &TierHuaweicloud, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let u = match url::Url::parse(&conf.endpoint) {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
let opts = Options {
|
||||
creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
bucket_lookup: BucketLookupType::BucketLookupDNS,
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"huaweicloud",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
Ok(Self(WarmBackendS3 {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendHuaweicloud {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = optimal_part_size(length)?;
|
||||
let client = self.0.client.clone();
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.0.bucket,
|
||||
&self.0.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
self.0.get(object, rv, opts).await
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
self.0.remove(object, rv).await
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
self.0.in_use().await
|
||||
}
|
||||
}
|
||||
|
||||
fn optimal_part_size(object_size: i64) -> Result<i64, std::io::Error> {
|
||||
let mut object_size = object_size;
|
||||
if object_size == -1 {
|
||||
object_size = MAX_MULTIPART_PUT_OBJECT_SIZE;
|
||||
}
|
||||
|
||||
if object_size > MAX_MULTIPART_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other("entity too large"));
|
||||
}
|
||||
|
||||
let configured_part_size = MIN_PART_SIZE;
|
||||
let mut part_size_flt = object_size as f64 / MAX_PARTS_COUNT as f64;
|
||||
part_size_flt = (part_size_flt as f64 / configured_part_size as f64).ceil() * configured_part_size as f64;
|
||||
|
||||
let part_size = part_size_flt as i64;
|
||||
if part_size == 0 {
|
||||
return Ok(MIN_PART_SIZE);
|
||||
}
|
||||
Ok(part_size)
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
#![allow(unused_imports)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -12,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
@@ -70,12 +70,17 @@ impl WarmBackendMinIO {
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client =
|
||||
TransitionClient::new(&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)), opts).await?;
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"minio",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
|
||||
163
crates/ecstore/src/tier/warm_backend_r2.rs
Normal file
163
crates/ecstore/src/tier/warm_backend_r2.rs
Normal file
@@ -0,0 +1,163 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{Options, ReadCloser, ReaderImpl, TransitionClient, TransitionCore},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierR2,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendR2(WarmBackendS3);
|
||||
|
||||
impl WarmBackendR2 {
|
||||
pub async fn new(conf: &TierR2, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let u = match url::Url::parse(&conf.endpoint) {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
let opts = Options {
|
||||
creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"r2",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
Ok(Self(WarmBackendS3 {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendR2 {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = optimal_part_size(length)?;
|
||||
let client = self.0.client.clone();
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.0.bucket,
|
||||
&self.0.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
self.0.get(object, rv, opts).await
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
self.0.remove(object, rv).await
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
self.0.in_use().await
|
||||
}
|
||||
}
|
||||
|
||||
fn optimal_part_size(object_size: i64) -> Result<i64, std::io::Error> {
|
||||
let mut object_size = object_size;
|
||||
if object_size == -1 {
|
||||
object_size = MAX_MULTIPART_PUT_OBJECT_SIZE;
|
||||
}
|
||||
|
||||
if object_size > MAX_MULTIPART_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other("entity too large"));
|
||||
}
|
||||
|
||||
let configured_part_size = MIN_PART_SIZE;
|
||||
let mut part_size_flt = object_size as f64 / MAX_PARTS_COUNT as f64;
|
||||
part_size_flt = (part_size_flt as f64 / configured_part_size as f64).ceil() * configured_part_size as f64;
|
||||
|
||||
let part_size = part_size_flt as i64;
|
||||
if part_size == 0 {
|
||||
return Ok(MIN_PART_SIZE);
|
||||
}
|
||||
Ok(part_size)
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
#![allow(unused_imports)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -12,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
@@ -67,12 +67,17 @@ impl WarmBackendRustFS {
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client =
|
||||
TransitionClient::new(&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)), opts).await?;
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"rustfs",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#![allow(unused_imports)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -12,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
@@ -92,9 +92,10 @@ impl WarmBackendS3 {
|
||||
creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
region: conf.region.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let client = TransitionClient::new(&u.host().expect("err").to_string(), opts).await?;
|
||||
let client = TransitionClient::new(&u.host().expect("err").to_string(), opts, "s3").await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
|
||||
196
crates/ecstore/src/tier/warm_backend_s3sdk.rs
Normal file
196
crates/ecstore/src/tier/warm_backend_s3sdk.rs
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use url::Url;
|
||||
|
||||
use aws_config::meta::region::RegionProviderChain;
|
||||
use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::config::{Credentials, Region};
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
|
||||
use crate::client::{
|
||||
api_get_options::GetObjectOptions,
|
||||
api_put_object::PutObjectOptions,
|
||||
api_remove::RemoveObjectOptions,
|
||||
transition_api::{ReadCloser, ReaderImpl},
|
||||
};
|
||||
use crate::error::ErrorResponse;
|
||||
use crate::error::error_resp_to_object_err;
|
||||
use crate::tier::{
|
||||
tier_config::TierS3,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
|
||||
pub struct WarmBackendS3 {
|
||||
pub client: Arc<Client>,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub storage_class: String,
|
||||
}
|
||||
|
||||
impl WarmBackendS3 {
|
||||
pub async fn new(conf: &TierS3, tier: &str) -> Result<Self, std::io::Error> {
|
||||
let u = match Url::parse(&conf.endpoint) {
|
||||
Ok(u) => u,
|
||||
Err(err) => {
|
||||
return Err(std::io::Error::other(err.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
if conf.aws_role_web_identity_token_file == "" && conf.aws_role_arn != ""
|
||||
|| conf.aws_role_web_identity_token_file != "" && conf.aws_role_arn == ""
|
||||
{
|
||||
return Err(std::io::Error::other("both the token file and the role ARN are required"));
|
||||
} else if conf.access_key == "" && conf.secret_key != "" || conf.access_key != "" && conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both the access and secret keys are required"));
|
||||
} else if conf.aws_role
|
||||
&& (conf.aws_role_web_identity_token_file != ""
|
||||
|| conf.aws_role_arn != ""
|
||||
|| conf.access_key != ""
|
||||
|| conf.secret_key != "")
|
||||
{
|
||||
return Err(std::io::Error::other(
|
||||
"AWS Role cannot be activated with static credentials or the web identity token file",
|
||||
));
|
||||
} else if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let creds;
|
||||
if conf.access_key != "" && conf.secret_key != "" {
|
||||
creds = Credentials::new(
|
||||
conf.access_key.clone(), // access_key_id
|
||||
conf.secret_key.clone(), // secret_access_key
|
||||
None, // session_token (可选)
|
||||
None,
|
||||
"Static",
|
||||
);
|
||||
} else {
|
||||
return Err(std::io::Error::other("insufficient parameters for S3 backend authentication"));
|
||||
}
|
||||
let region_provider = RegionProviderChain::default_provider().or_else(Region::new(conf.region.clone()));
|
||||
#[allow(deprecated)]
|
||||
let config = aws_config::from_env()
|
||||
.endpoint_url(conf.endpoint.clone())
|
||||
.region(region_provider)
|
||||
.credentials_provider(creds)
|
||||
.load()
|
||||
.await;
|
||||
let client = Client::new(&config);
|
||||
let client = Arc::new(client);
|
||||
Ok(Self {
|
||||
client,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.clone().trim_matches('/').to_string(),
|
||||
storage_class: conf.storage_class.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_dest(&self, object: &str) -> String {
|
||||
let mut dest_obj = object.to_string();
|
||||
if self.prefix != "" {
|
||||
dest_obj = format!("{}/{}", &self.prefix, object);
|
||||
}
|
||||
return dest_obj;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendS3 {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let Ok(res) = client
|
||||
.put_object()
|
||||
.bucket(&self.bucket)
|
||||
.key(&self.get_dest(object))
|
||||
.body(match r {
|
||||
ReaderImpl::Body(content_body) => ByteStream::from(content_body.to_vec()),
|
||||
ReaderImpl::ObjectBody(mut content_body) => ByteStream::from(content_body.read_all().await?),
|
||||
})
|
||||
.send()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("put_object error"));
|
||||
};
|
||||
|
||||
Ok(res.version_id().unwrap_or("").to_string())
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let Ok(res) = client
|
||||
.get_object()
|
||||
.bucket(&self.bucket)
|
||||
.key(&self.get_dest(object))
|
||||
.send()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("get_object error"));
|
||||
};
|
||||
|
||||
Ok(ReadCloser::new(std::io::Cursor::new(
|
||||
res.body.collect().await.map(|data| data.into_bytes().to_vec())?,
|
||||
)))
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
if let Err(_) = client
|
||||
.delete_object()
|
||||
.bucket(&self.bucket)
|
||||
.key(&self.get_dest(object))
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
return Err(std::io::Error::other("delete_object error"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let Ok(res) = client
|
||||
.list_objects_v2()
|
||||
.bucket(&self.bucket)
|
||||
//.max_keys(10)
|
||||
//.into_paginator()
|
||||
.send()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("list_objects_v2 error"));
|
||||
};
|
||||
|
||||
Ok(res.common_prefixes.unwrap().len() > 0 || res.contents.unwrap().len() > 0)
|
||||
}
|
||||
}
|
||||
164
crates/ecstore/src/tier/warm_backend_tencent.rs
Normal file
164
crates/ecstore/src/tier/warm_backend_tencent.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{BucketLookupType, Options, ReadCloser, ReaderImpl, TransitionClient, TransitionCore},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierTencent,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendTencent(WarmBackendS3);
|
||||
|
||||
impl WarmBackendTencent {
|
||||
pub async fn new(conf: &TierTencent, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let u = match url::Url::parse(&conf.endpoint) {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
let opts = Options {
|
||||
creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
bucket_lookup: BucketLookupType::BucketLookupDNS,
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"tencent",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
Ok(Self(WarmBackendS3 {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendTencent {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = optimal_part_size(length)?;
|
||||
let client = self.0.client.clone();
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.0.bucket,
|
||||
&self.0.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
self.0.get(object, rv, opts).await
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
self.0.remove(object, rv).await
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
self.0.in_use().await
|
||||
}
|
||||
}
|
||||
|
||||
fn optimal_part_size(object_size: i64) -> Result<i64, std::io::Error> {
|
||||
let mut object_size = object_size;
|
||||
if object_size == -1 {
|
||||
object_size = MAX_MULTIPART_PUT_OBJECT_SIZE;
|
||||
}
|
||||
|
||||
if object_size > MAX_MULTIPART_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other("entity too large"));
|
||||
}
|
||||
|
||||
let configured_part_size = MIN_PART_SIZE;
|
||||
let mut part_size_flt = object_size as f64 / MAX_PARTS_COUNT as f64;
|
||||
part_size_flt = (part_size_flt as f64 / configured_part_size as f64).ceil() * configured_part_size as f64;
|
||||
|
||||
let part_size = part_size_flt as i64;
|
||||
if part_size == 0 {
|
||||
return Ok(MIN_PART_SIZE);
|
||||
}
|
||||
Ok(part_size)
|
||||
}
|
||||
@@ -12,16 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::filemeta::TRANSITION_COMPLETE;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{ReplicationState, ReplicationStatusType, VersionPurgeStatusType};
|
||||
use bytes::Bytes;
|
||||
use rmp_serde::Serializer;
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use rustfs_utils::http::headers::{RESERVED_METADATA_PREFIX_LOWER, RUSTFS_HEALING};
|
||||
use s3s::dto::{RestoreStatus, Timestamp};
|
||||
use s3s::header::X_AMZ_RESTORE;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
use time::OffsetDateTime;
|
||||
use time::{OffsetDateTime, format_description::well_known::Rfc3339};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const ERASURE_ALGORITHM: &str = "rs-vandermonde";
|
||||
@@ -35,6 +38,8 @@ pub const TIER_FV_ID: &str = "tier-free-versionID";
|
||||
pub const TIER_FV_MARKER: &str = "tier-free-marker";
|
||||
pub const TIER_SKIP_FV_ID: &str = "tier-skip-fvid";
|
||||
|
||||
const ERR_RESTORE_HDR_MALFORMED: &str = "x-amz-restore header malformed";
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Default)]
|
||||
pub struct ObjectPartInfo {
|
||||
pub etag: String,
|
||||
@@ -394,7 +399,10 @@ impl FileInfo {
|
||||
|
||||
/// Check if the object is remote (transitioned to another tier)
|
||||
pub fn is_remote(&self) -> bool {
|
||||
!self.transition_tier.is_empty()
|
||||
if self.transition_status != TRANSITION_COMPLETE {
|
||||
return false;
|
||||
}
|
||||
!is_restored_object_on_disk(&self.metadata)
|
||||
}
|
||||
|
||||
/// Get the data directory for this object
|
||||
@@ -535,3 +543,101 @@ pub struct FilesInfo {
|
||||
pub files: Vec<FileInfo>,
|
||||
pub is_truncated: bool,
|
||||
}
|
||||
|
||||
pub trait RestoreStatusOps {
|
||||
fn expiry(&self) -> Option<OffsetDateTime>;
|
||||
fn on_going(&self) -> bool;
|
||||
fn on_disk(&self) -> bool;
|
||||
fn to_string(&self) -> String;
|
||||
}
|
||||
|
||||
impl RestoreStatusOps for RestoreStatus {
|
||||
fn expiry(&self) -> Option<OffsetDateTime> {
|
||||
if self.on_going() {
|
||||
return None;
|
||||
}
|
||||
self.restore_expiry_date.clone().map(OffsetDateTime::from)
|
||||
}
|
||||
|
||||
fn on_going(&self) -> bool {
|
||||
if let Some(on_going) = self.is_restore_in_progress {
|
||||
return on_going;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn on_disk(&self) -> bool {
|
||||
let expiry = self.expiry();
|
||||
if let Some(expiry0) = expiry
|
||||
&& OffsetDateTime::now_utc().unix_timestamp() < expiry0.unix_timestamp()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn to_string(&self) -> String {
|
||||
if self.on_going() {
|
||||
return "ongoing-request=\"true\"".to_string();
|
||||
}
|
||||
format!(
|
||||
"ongoing-request=\"false\", expiry-date=\"{}\"",
|
||||
OffsetDateTime::from(self.restore_expiry_date.clone().unwrap())
|
||||
.format(&Rfc3339)
|
||||
.unwrap()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_restore_obj_status(restore_hdr: &str) -> Result<RestoreStatus> {
|
||||
let tokens: Vec<&str> = restore_hdr.splitn(2, ",").collect();
|
||||
let progress_tokens: Vec<&str> = tokens[0].splitn(2, "=").collect();
|
||||
if progress_tokens.len() != 2 {
|
||||
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
|
||||
}
|
||||
if progress_tokens[0].trim() != "ongoing-request" {
|
||||
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
|
||||
}
|
||||
|
||||
match progress_tokens[1] {
|
||||
"true" | "\"true\"" => {
|
||||
if tokens.len() == 1 {
|
||||
return Ok(RestoreStatus {
|
||||
is_restore_in_progress: Some(true),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
}
|
||||
"false" | "\"false\"" => {
|
||||
if tokens.len() != 2 {
|
||||
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
|
||||
}
|
||||
let expiry_tokens: Vec<&str> = tokens[1].splitn(2, "=").collect();
|
||||
if expiry_tokens.len() != 2 {
|
||||
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
|
||||
}
|
||||
if expiry_tokens[0].trim() != "expiry-date" {
|
||||
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
|
||||
}
|
||||
let expiry = OffsetDateTime::parse(expiry_tokens[1].trim_matches('"'), &Rfc3339).unwrap();
|
||||
/*if err != nil {
|
||||
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
|
||||
}*/
|
||||
return Ok(RestoreStatus {
|
||||
is_restore_in_progress: Some(false),
|
||||
restore_expiry_date: Some(Timestamp::from(expiry)),
|
||||
});
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
Err(Error::other(ERR_RESTORE_HDR_MALFORMED))
|
||||
}
|
||||
|
||||
pub fn is_restored_object_on_disk(meta: &HashMap<String, String>) -> bool {
|
||||
if let Some(restore_hdr) = meta.get(X_AMZ_RESTORE.as_str()) {
|
||||
if let Ok(restore_status) = parse_restore_obj_status(restore_hdr) {
|
||||
return restore_status.on_disk();
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
@@ -22,8 +22,9 @@ use byteorder::ByteOrder;
|
||||
use bytes::Bytes;
|
||||
use rustfs_utils::http::AMZ_BUCKET_REPLICATION_STATUS;
|
||||
use rustfs_utils::http::headers::{
|
||||
self, AMZ_META_UNENCRYPTED_CONTENT_LENGTH, AMZ_META_UNENCRYPTED_CONTENT_MD5, AMZ_STORAGE_CLASS, RESERVED_METADATA_PREFIX,
|
||||
RESERVED_METADATA_PREFIX_LOWER, VERSION_PURGE_STATUS_KEY,
|
||||
self, AMZ_META_UNENCRYPTED_CONTENT_LENGTH, AMZ_META_UNENCRYPTED_CONTENT_MD5, AMZ_RESTORE_EXPIRY_DAYS,
|
||||
AMZ_RESTORE_REQUEST_DATE, AMZ_STORAGE_CLASS, RESERVED_METADATA_PREFIX, RESERVED_METADATA_PREFIX_LOWER,
|
||||
VERSION_PURGE_STATUS_KEY,
|
||||
};
|
||||
use s3s::header::X_AMZ_RESTORE;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -68,9 +69,6 @@ pub const TRANSITIONED_OBJECTNAME: &str = "transitioned-object";
|
||||
pub const TRANSITIONED_VERSION_ID: &str = "transitioned-versionID";
|
||||
pub const TRANSITION_TIER: &str = "transition-tier";
|
||||
|
||||
const X_AMZ_RESTORE_EXPIRY_DAYS: &str = "X-Amz-Restore-Expiry-Days";
|
||||
const X_AMZ_RESTORE_REQUEST_DATE: &str = "X-Amz-Restore-Request-Date";
|
||||
|
||||
// type ScanHeaderVersionFn = Box<dyn Fn(usize, &[u8], &[u8]) -> Result<()>>;
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
@@ -693,11 +691,6 @@ impl FileMeta {
|
||||
}
|
||||
}
|
||||
|
||||
// ???
|
||||
if fi.transition_status == TRANSITION_COMPLETE {
|
||||
update_version = false;
|
||||
}
|
||||
|
||||
for (i, ver) in self.versions.iter().enumerate() {
|
||||
if ver.header.version_id != fi.version_id {
|
||||
continue;
|
||||
@@ -1088,13 +1081,24 @@ impl FileMeta {
|
||||
|
||||
/// Count shared data directories
|
||||
pub fn shared_data_dir_count(&self, version_id: Option<Uuid>, data_dir: Option<Uuid>) -> usize {
|
||||
if self.data.entries().unwrap_or_default() > 0
|
||||
&& version_id.is_some()
|
||||
&& self
|
||||
.data
|
||||
.find(version_id.unwrap().to_string().as_str())
|
||||
.unwrap_or_default()
|
||||
.is_some()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
self.versions
|
||||
.iter()
|
||||
.filter(|v| {
|
||||
v.header.version_type == VersionType::Object && v.header.version_id != version_id && v.header.user_data_dir()
|
||||
})
|
||||
.filter_map(|v| FileMetaVersion::decode_data_dir_from_meta(&v.meta).ok().flatten())
|
||||
.filter(|&dir| Some(dir) == data_dir)
|
||||
.filter_map(|v| FileMetaVersion::decode_data_dir_from_meta(&v.meta).ok())
|
||||
.filter(|&dir| dir == data_dir)
|
||||
.count()
|
||||
}
|
||||
|
||||
@@ -1838,8 +1842,8 @@ impl MetaObject {
|
||||
|
||||
pub fn remove_restore_hdrs(&mut self) {
|
||||
self.meta_user.remove(X_AMZ_RESTORE.as_str());
|
||||
self.meta_user.remove(X_AMZ_RESTORE_EXPIRY_DAYS);
|
||||
self.meta_user.remove(X_AMZ_RESTORE_REQUEST_DATE);
|
||||
self.meta_user.remove(AMZ_RESTORE_EXPIRY_DAYS);
|
||||
self.meta_user.remove(AMZ_RESTORE_REQUEST_DATE);
|
||||
}
|
||||
|
||||
pub fn uses_data_dir(&self) -> bool {
|
||||
|
||||
@@ -44,6 +44,20 @@ impl InlineData {
|
||||
if self.0.is_empty() { &self.0 } else { &self.0[1..] }
|
||||
}
|
||||
|
||||
pub fn entries(&self) -> Result<usize> {
|
||||
if self.0.is_empty() || !self.version_ok() {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let buf = self.after_version();
|
||||
|
||||
let mut cur = Cursor::new(buf);
|
||||
|
||||
let fields_len = rmp::decode::read_map_len(&mut cur)?;
|
||||
|
||||
Ok(fields_len as usize)
|
||||
}
|
||||
|
||||
pub fn find(&self, key: &str) -> Result<Option<Vec<u8>>> {
|
||||
if self.0.is_empty() || !self.version_ok() {
|
||||
return Ok(None);
|
||||
|
||||
@@ -167,11 +167,13 @@ fn get_canonical_request(req: &request::Request<Body>, ignored_headers: &HashMap
|
||||
if let Some(q) = req.uri().query() {
|
||||
// Parse query string into key-value pairs
|
||||
let mut query_params: Vec<(String, String)> = Vec::new();
|
||||
for param in q.split('&') {
|
||||
if let Some((key, value)) = param.split_once('=') {
|
||||
query_params.push((key.to_string(), value.to_string()));
|
||||
} else {
|
||||
query_params.push((param.to_string(), "".to_string()));
|
||||
if !q.is_empty() {
|
||||
for param in q.split('&') {
|
||||
if let Some((key, value)) = param.split_once('=') {
|
||||
query_params.push((key.to_string(), value.to_string()));
|
||||
} else {
|
||||
query_params.push((param.to_string(), "".to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,6 +181,7 @@ fn get_canonical_request(req: &request::Request<Body>, ignored_headers: &HashMap
|
||||
query_params.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
|
||||
// Build canonical query string
|
||||
//println!("query_params: {query_params:?}");
|
||||
let sorted_params: Vec<String> = query_params.iter().map(|(k, v)| format!("{k}={v}")).collect();
|
||||
|
||||
canonical_query_string = sorted_params.join("&");
|
||||
@@ -417,3 +420,390 @@ pub fn sign_v4_trailer(
|
||||
trailer,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(unused_variables, unused_mut)]
|
||||
mod tests {
|
||||
use http::request;
|
||||
use time::macros::datetime;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn example_list_objects() {
|
||||
// let access_key_id = "AKIAIOSFODNN7EXAMPLE";
|
||||
let secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
|
||||
let timestamp = "20130524T000000Z";
|
||||
let t = datetime!(2013-05-24 0:00 UTC);
|
||||
// let bucket = "examplebucket";
|
||||
let region = "us-east-1";
|
||||
let service = "s3";
|
||||
let path = "/";
|
||||
|
||||
let mut req = request::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri("http://examplebucket.s3.amazonaws.com/?")
|
||||
.body(Body::empty())
|
||||
.unwrap();
|
||||
let mut headers = req.headers_mut();
|
||||
headers.insert("host", "examplebucket.s3.amazonaws.com".parse().unwrap());
|
||||
headers.insert(
|
||||
"x-amz-content-sha256",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
headers.insert("x-amz-date", timestamp.parse().unwrap());
|
||||
|
||||
let query = vec![
|
||||
("max-keys".to_string(), "2".to_string()),
|
||||
("prefix".to_string(), "J".to_string()),
|
||||
];
|
||||
let uri = req.uri().clone();
|
||||
let mut parts = req.uri().clone().into_parts();
|
||||
parts.path_and_query = Some(
|
||||
format!("{}?{}", uri.path(), serde_urlencoded::to_string(&query).unwrap())
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
*req.uri_mut() = Uri::from_parts(parts).unwrap();
|
||||
|
||||
let canonical_request = get_canonical_request(&req, &v4_ignored_headers, &get_hashed_payload(&req));
|
||||
assert_eq!(
|
||||
canonical_request,
|
||||
concat!(
|
||||
"GET\n",
|
||||
"/\n",
|
||||
"max-keys=2&prefix=J\n",
|
||||
"host:examplebucket.s3.amazonaws.com\n",
|
||||
"x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n",
|
||||
"x-amz-date:",
|
||||
"20130524T000000Z",
|
||||
"\n",
|
||||
"\n",
|
||||
"host;x-amz-content-sha256;x-amz-date\n",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
)
|
||||
);
|
||||
|
||||
let string_to_sign = get_string_to_sign_v4(t, region, &canonical_request, service);
|
||||
assert_eq!(
|
||||
string_to_sign,
|
||||
concat!(
|
||||
"AWS4-HMAC-SHA256\n",
|
||||
"20130524T000000Z",
|
||||
"\n",
|
||||
"20130524/us-east-1/s3/aws4_request\n",
|
||||
"df57d21db20da04d7fa30298dd4488ba3a2b47ca3a489c74750e0f1e7df1b9b7",
|
||||
)
|
||||
);
|
||||
|
||||
let signing_key = get_signing_key(secret_access_key, region, t, service);
|
||||
let signature = get_signature(signing_key, &string_to_sign);
|
||||
|
||||
assert_eq!(signature, "34b48302e7b5fa45bde8084f4b7868a86f0a534bc59db6670ed5711ef69dc6f7");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn example_signature() {
|
||||
// let access_key_id = "rustfsadmin";
|
||||
let secret_access_key = "rustfsadmin";
|
||||
let timestamp = "20250505T011054Z";
|
||||
let t = datetime!(2025-05-05 01:10:54 UTC);
|
||||
// let bucket = "mblock2";
|
||||
let region = "us-east-1";
|
||||
let service = "s3";
|
||||
let path = "/mblock2/";
|
||||
|
||||
let mut req = request::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri("http://192.168.1.11:9020/mblock2/?")
|
||||
.body(Body::empty())
|
||||
.unwrap();
|
||||
|
||||
let mut headers = req.headers_mut();
|
||||
headers.insert("host", "192.168.1.11:9020".parse().unwrap());
|
||||
headers.insert(
|
||||
"x-amz-content-sha256",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
headers.insert("x-amz-date", timestamp.parse().unwrap());
|
||||
|
||||
let mut query: Vec<(String, String)> = Vec::new();
|
||||
let uri = req.uri().clone();
|
||||
let mut parts = req.uri().clone().into_parts();
|
||||
parts.path_and_query = Some(
|
||||
format!("{}?{}", uri.path(), serde_urlencoded::to_string(&query).unwrap())
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
//println!("parts.path_and_query: {:?}", parts.path_and_query);
|
||||
*req.uri_mut() = Uri::from_parts(parts).unwrap();
|
||||
|
||||
let canonical_request = get_canonical_request(&req, &v4_ignored_headers, &get_hashed_payload(&req));
|
||||
println!("canonical_request: \n{}\n", canonical_request);
|
||||
assert_eq!(
|
||||
canonical_request,
|
||||
concat!(
|
||||
"GET\n",
|
||||
"/mblock2/\n",
|
||||
"\n",
|
||||
"host:192.168.1.11:9020\n",
|
||||
"x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n",
|
||||
"x-amz-date:",
|
||||
"20250505T011054Z",
|
||||
"\n",
|
||||
"\n",
|
||||
"host;x-amz-content-sha256;x-amz-date\n",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
)
|
||||
);
|
||||
|
||||
let string_to_sign = get_string_to_sign_v4(t, region, &canonical_request, service);
|
||||
println!("string_to_sign: \n{}\n", string_to_sign);
|
||||
assert_eq!(
|
||||
string_to_sign,
|
||||
concat!(
|
||||
"AWS4-HMAC-SHA256\n",
|
||||
"20250505T011054Z",
|
||||
"\n",
|
||||
"20250505/us-east-1/s3/aws4_request\n",
|
||||
"c2960d00cc7de7bed3e2e2d1330ec298ded8f78a231c1d32dedac72ebec7f9b0",
|
||||
)
|
||||
);
|
||||
|
||||
let signing_key = get_signing_key(secret_access_key, region, t, service);
|
||||
let signature = get_signature(signing_key, &string_to_sign);
|
||||
println!("signature: \n{}\n", signature);
|
||||
assert_eq!(signature, "73fad2dfea0727e10a7179bf49150360a56f2e6b519c53999fd6e011152187d0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn example_signature2() {
|
||||
// let access_key_id = "rustfsadmin";
|
||||
let secret_access_key = "rustfsadmin";
|
||||
let timestamp = "20250507T051030Z";
|
||||
let t = datetime!(2025-05-07 05:10:30 UTC);
|
||||
// let bucket = "mblock2";
|
||||
let region = "us-east-1";
|
||||
let service = "s3";
|
||||
let path = "/mblock2/";
|
||||
|
||||
let mut req = request::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri("http://192.168.1.11:9020/mblock2/?list-type=2&encoding-type=url&prefix=mypre&delimiter=%2F&fetch-owner=true&max-keys=1")
|
||||
.body(Body::empty()).unwrap();
|
||||
|
||||
let mut headers = req.headers_mut();
|
||||
headers.insert("host", "192.168.1.11:9020".parse().unwrap());
|
||||
headers.insert(
|
||||
"x-amz-content-sha256",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
headers.insert("x-amz-date", timestamp.parse().unwrap());
|
||||
|
||||
println!("{:?}", req.uri().query());
|
||||
let canonical_request = get_canonical_request(&req, &v4_ignored_headers, &get_hashed_payload(&req));
|
||||
println!("canonical_request: \n{}\n", canonical_request);
|
||||
assert_eq!(
|
||||
canonical_request,
|
||||
concat!(
|
||||
"GET\n",
|
||||
"/mblock2/\n",
|
||||
"delimiter=%2F&encoding-type=url&fetch-owner=true&list-type=2&max-keys=1&prefix=mypre\n",
|
||||
"host:192.168.1.11:9020\n",
|
||||
"x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n",
|
||||
"x-amz-date:",
|
||||
"20250507T051030Z",
|
||||
"\n",
|
||||
"\n",
|
||||
"host;x-amz-content-sha256;x-amz-date\n",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
)
|
||||
);
|
||||
|
||||
let string_to_sign = get_string_to_sign_v4(t, region, &canonical_request, service);
|
||||
println!("string_to_sign: \n{}\n", string_to_sign);
|
||||
assert_eq!(
|
||||
string_to_sign,
|
||||
concat!(
|
||||
"AWS4-HMAC-SHA256\n",
|
||||
"20250507T051030Z",
|
||||
"\n",
|
||||
"20250507/us-east-1/s3/aws4_request\n",
|
||||
"e6db9e09e9c873aff0b9ca170998b4753f6a6c36c90bc2dca80613affb47f999",
|
||||
)
|
||||
);
|
||||
|
||||
let signing_key = get_signing_key(secret_access_key, region, t, service);
|
||||
let signature = get_signature(signing_key, &string_to_sign);
|
||||
println!("signature: \n{}\n", signature);
|
||||
assert_eq!(signature, "dfbed913d1982428f6224ee506431fc133dbcad184194c0cbf01bc517435788a");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn example_signature3() {
|
||||
// let access_key_id = "rustfsadmin";
|
||||
let secret_access_key = "rustfsadmin";
|
||||
let timestamp = "20250628T061107Z";
|
||||
let t = datetime!(2025-06-28 06:11:07 UTC);
|
||||
// let bucket = "mbver";
|
||||
let region = "";
|
||||
let service = "s3";
|
||||
let path = "/mbver/";
|
||||
|
||||
let mut req = request::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri("http://192.168.1.11:9020/mbver/?list-type=2&encoding-type=url&prefix=mypre99&delimiter=%2F&fetch-owner=true&max-keys=1")
|
||||
.body(Body::empty()).unwrap();
|
||||
|
||||
let mut headers = req.headers_mut();
|
||||
headers.insert("host", "127.0.0.1:9000".parse().unwrap());
|
||||
headers.insert(
|
||||
"x-amz-content-sha256",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
headers.insert("x-amz-date", timestamp.parse().unwrap());
|
||||
|
||||
println!("{:?}", req.uri().query());
|
||||
let canonical_request = get_canonical_request(&req, &v4_ignored_headers, &get_hashed_payload(&req));
|
||||
println!("canonical_request: \n{}\n", canonical_request);
|
||||
assert_eq!(
|
||||
canonical_request,
|
||||
concat!(
|
||||
"GET\n",
|
||||
"/mbver/\n",
|
||||
"delimiter=%2F&encoding-type=url&fetch-owner=true&list-type=2&max-keys=1&prefix=mypre99\n",
|
||||
"host:127.0.0.1:9000\n",
|
||||
"x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n",
|
||||
"x-amz-date:",
|
||||
"20250628T061107Z",
|
||||
"\n",
|
||||
"\n",
|
||||
"host;x-amz-content-sha256;x-amz-date\n",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
)
|
||||
);
|
||||
|
||||
let string_to_sign = get_string_to_sign_v4(t, region, &canonical_request, service);
|
||||
println!("string_to_sign: \n{}\n", string_to_sign);
|
||||
assert_eq!(
|
||||
string_to_sign,
|
||||
concat!(
|
||||
"AWS4-HMAC-SHA256\n",
|
||||
"20250628T061107Z",
|
||||
"\n",
|
||||
"20250628//s3/aws4_request\n",
|
||||
"9dcfa3d3139baf71a046e7fa17dacab8ee11676771e25e7cd09098bf39f09d5b", //payload hash
|
||||
)
|
||||
);
|
||||
|
||||
let signing_key = get_signing_key(secret_access_key, region, t, service);
|
||||
let signature = get_signature(signing_key, &string_to_sign);
|
||||
println!("signature: \n{}\n", signature);
|
||||
assert_eq!(signature, "c7c7c6e12e5709c0c2ffc4707600a86c3cd261dd1de7409126a17f5b08c58dfa");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn example_presigned_url() {
|
||||
let access_key_id = "AKIAIOSFODNN7EXAMPLE";
|
||||
let secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
|
||||
let timestamp = "20130524T000000Z";
|
||||
let t = datetime!(2013-05-24 0:00 UTC);
|
||||
// let bucket = "mblock2";
|
||||
let region = "us-east-1";
|
||||
let service = "s3";
|
||||
let path = "/";
|
||||
let session_token = "";
|
||||
|
||||
let mut req = request::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri("http://examplebucket.s3.amazonaws.com/test.txt")
|
||||
.body(Body::empty())
|
||||
.unwrap();
|
||||
|
||||
let mut headers = req.headers_mut();
|
||||
headers.insert("host", "examplebucket.s3.amazonaws.com".parse().unwrap());
|
||||
|
||||
req = pre_sign_v4(req, access_key_id, secret_access_key, "", region, 86400, t);
|
||||
|
||||
let mut canonical_request = req.method().as_str().to_string();
|
||||
canonical_request.push('\n');
|
||||
canonical_request.push_str(req.uri().path());
|
||||
canonical_request.push('\n');
|
||||
canonical_request.push_str(req.uri().query().unwrap());
|
||||
canonical_request.push('\n');
|
||||
canonical_request.push_str(&get_canonical_headers(&req, &v4_ignored_headers));
|
||||
canonical_request.push('\n');
|
||||
canonical_request.push_str(&get_signed_headers(&req, &v4_ignored_headers));
|
||||
canonical_request.push('\n');
|
||||
canonical_request.push_str(&get_hashed_payload(&req));
|
||||
//println!("canonical_request: \n{}\n", canonical_request);
|
||||
assert_eq!(
|
||||
canonical_request,
|
||||
concat!(
|
||||
"GET\n",
|
||||
"/test.txt\n",
|
||||
"X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20130524T000000Z&X-Amz-Expires=0000086400&X-Amz-SignedHeaders=host&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Signature=98f1c9f47b39a4c40662680a9b029b046b7da5542c2e35d67edb8ff18d2ccf5c\n",
|
||||
"host:examplebucket.s3.amazonaws.com\n",
|
||||
"\n",
|
||||
"host\n",
|
||||
"UNSIGNED-PAYLOAD",
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn example_presigned_url2() {
|
||||
let access_key_id = "rustfsadmin";
|
||||
let secret_access_key = "rustfsadmin";
|
||||
let timestamp = "20130524T000000Z";
|
||||
let t = datetime!(2013-05-24 0:00 UTC);
|
||||
// let bucket = "mblock2";
|
||||
let region = "us-east-1";
|
||||
let service = "s3";
|
||||
let path = "/mblock2/";
|
||||
let session_token = "";
|
||||
|
||||
let mut req = request::Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri("http://192.168.1.11:9020/mblock2/test.txt?delimiter=%2F&fetch-owner=true&prefix=mypre&encoding-type=url&max-keys=1&list-type=2")
|
||||
.body(Body::empty()).unwrap();
|
||||
|
||||
let mut headers = req.headers_mut();
|
||||
headers.insert("host", "192.168.1.11:9020".parse().unwrap());
|
||||
|
||||
req = pre_sign_v4(req, access_key_id, secret_access_key, "", region, 86400, t);
|
||||
|
||||
let mut canonical_request = req.method().as_str().to_string();
|
||||
canonical_request.push('\n');
|
||||
canonical_request.push_str(req.uri().path());
|
||||
canonical_request.push('\n');
|
||||
canonical_request.push_str(req.uri().query().unwrap());
|
||||
canonical_request.push('\n');
|
||||
canonical_request.push_str(&get_canonical_headers(&req, &v4_ignored_headers));
|
||||
canonical_request.push('\n');
|
||||
canonical_request.push_str(&get_signed_headers(&req, &v4_ignored_headers));
|
||||
canonical_request.push('\n');
|
||||
canonical_request.push_str(&get_hashed_payload(&req));
|
||||
//println!("canonical_request: \n{}\n", canonical_request);
|
||||
assert_eq!(
|
||||
canonical_request,
|
||||
concat!(
|
||||
"GET\n",
|
||||
"/mblock2/test.txt\n",
|
||||
"delimiter=%2F&fetch-owner=true&prefix=mypre&encoding-type=url&max-keys=1&list-type=2&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20130524T000000Z&X-Amz-Expires=0000086400&X-Amz-SignedHeaders=host&X-Amz-Credential=rustfsadmin%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Signature=fe7f63f41e4ca18be9e70f560bbe9c079cf06ab97630934e04f7524751ff302d\n",
|
||||
"host:192.168.1.11:9020\n",
|
||||
"\n",
|
||||
"host\n",
|
||||
"UNSIGNED-PAYLOAD",
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,10 +51,8 @@ pub const AMZ_TAG_COUNT: &str = "x-amz-tagging-count";
|
||||
pub const AMZ_TAG_DIRECTIVE: &str = "X-Amz-Tagging-Directive";
|
||||
|
||||
// S3 transition restore
|
||||
pub const AMZ_RESTORE: &str = "x-amz-restore";
|
||||
pub const AMZ_RESTORE_EXPIRY_DAYS: &str = "X-Amz-Restore-Expiry-Days";
|
||||
pub const AMZ_RESTORE_REQUEST_DATE: &str = "X-Amz-Restore-Request-Date";
|
||||
pub const AMZ_RESTORE_OUTPUT_PATH: &str = "x-amz-restore-output-path";
|
||||
|
||||
// S3 extensions
|
||||
pub const AMZ_COPY_SOURCE_IF_MODIFIED_SINCE: &str = "x-amz-copy-source-if-modified-since";
|
||||
|
||||
@@ -116,6 +116,24 @@ impl Operation for AddTier {
|
||||
TierType::MinIO => {
|
||||
args.name = args.minio.clone().unwrap().name;
|
||||
}
|
||||
TierType::Aliyun => {
|
||||
args.name = args.aliyun.clone().unwrap().name;
|
||||
}
|
||||
TierType::Tencent => {
|
||||
args.name = args.tencent.clone().unwrap().name;
|
||||
}
|
||||
TierType::Huaweicloud => {
|
||||
args.name = args.huaweicloud.clone().unwrap().name;
|
||||
}
|
||||
TierType::Azure => {
|
||||
args.name = args.azure.clone().unwrap().name;
|
||||
}
|
||||
TierType::GCS => {
|
||||
args.name = args.gcs.clone().unwrap().name;
|
||||
}
|
||||
TierType::R2 => {
|
||||
args.name = args.r2.clone().unwrap().name;
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
debug!("add tier args {:?}", args);
|
||||
|
||||
@@ -33,7 +33,10 @@ use http::{HeaderMap, StatusCode};
|
||||
use metrics::counter;
|
||||
use rustfs_ecstore::{
|
||||
bucket::{
|
||||
lifecycle::{bucket_lifecycle_ops::validate_transition_tier, lifecycle::Lifecycle},
|
||||
lifecycle::{
|
||||
bucket_lifecycle_ops::{RestoreRequestOps, post_restore_opts, validate_transition_tier},
|
||||
lifecycle::{self, Lifecycle, TransitionOptions},
|
||||
},
|
||||
metadata::{
|
||||
BUCKET_LIFECYCLE_CONFIG, BUCKET_NOTIFICATION_CONFIG, BUCKET_POLICY_CONFIG, BUCKET_REPLICATION_CONFIG,
|
||||
BUCKET_SSECONFIG, BUCKET_TAGGING_CONFIG, BUCKET_VERSIONING_CONFIG, OBJECT_LOCK_CONFIG,
|
||||
@@ -74,7 +77,8 @@ use rustfs_ecstore::{
|
||||
},
|
||||
};
|
||||
use rustfs_filemeta::REPLICATE_INCOMING_DELETE;
|
||||
use rustfs_filemeta::{ReplicationStatusType, ReplicationType, VersionPurgeStatusType, fileinfo::ObjectPartInfo};
|
||||
use rustfs_filemeta::fileinfo::{ObjectPartInfo, RestoreStatusOps};
|
||||
use rustfs_filemeta::{ReplicationStatusType, ReplicationType, VersionPurgeStatusType};
|
||||
use rustfs_kms::{
|
||||
DataKey,
|
||||
service_manager::get_global_encryption_service,
|
||||
@@ -103,11 +107,15 @@ use rustfs_utils::{
|
||||
CompressionAlgorithm,
|
||||
http::{
|
||||
AMZ_BUCKET_REPLICATION_STATUS,
|
||||
headers::{AMZ_DECODED_CONTENT_LENGTH, AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX_LOWER},
|
||||
headers::{
|
||||
AMZ_DECODED_CONTENT_LENGTH, AMZ_OBJECT_TAGGING, AMZ_RESTORE_EXPIRY_DAYS, AMZ_RESTORE_REQUEST_DATE,
|
||||
RESERVED_METADATA_PREFIX_LOWER,
|
||||
},
|
||||
},
|
||||
path::{is_dir_object, path_join_buf},
|
||||
};
|
||||
use rustfs_zip::CompressionFormat;
|
||||
use s3s::header::{X_AMZ_RESTORE, X_AMZ_RESTORE_OUTPUT_PATH};
|
||||
use s3s::{S3, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, dto::*, s3_error};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
@@ -846,118 +854,180 @@ impl S3 for FS {
|
||||
Ok(S3Response::new(output))
|
||||
}
|
||||
|
||||
async fn restore_object(&self, _req: S3Request<RestoreObjectInput>) -> S3Result<S3Response<RestoreObjectOutput>> {
|
||||
Err(s3_error!(NotImplemented, "RestoreObject is not implemented yet"))
|
||||
/*
|
||||
let bucket = params.bucket;
|
||||
if let Err(e) = un_escape_path(params.object) {
|
||||
async fn restore_object(&self, req: S3Request<RestoreObjectInput>) -> S3Result<S3Response<RestoreObjectOutput>> {
|
||||
let RestoreObjectInput {
|
||||
bucket,
|
||||
key: object,
|
||||
restore_request: rreq,
|
||||
version_id,
|
||||
..
|
||||
} = req.input.clone();
|
||||
let rreq = rreq.unwrap();
|
||||
|
||||
/*if let Err(e) = un_escape_path(object) {
|
||||
warn!("post restore object failed, e: {:?}", e);
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("PostRestoreObjectFailed".into()), "post restore object failed"));
|
||||
}
|
||||
}*/
|
||||
|
||||
let Some(store) = new_object_layer_fn() else {
|
||||
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
|
||||
};
|
||||
|
||||
if Err(err) = check_request_auth_type(req, policy::RestoreObjectAction, bucket, object) {
|
||||
/*if Err(err) = check_request_auth_type(req, policy::RestoreObjectAction, bucket, object) {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("PostRestoreObjectFailed".into()), "post restore object failed"));
|
||||
}
|
||||
}*/
|
||||
|
||||
if req.content_length <= 0 {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed"));
|
||||
}
|
||||
let Some(opts) = post_restore_opts(req, bucket, object) else {
|
||||
/*if req.content_length <= 0 {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed"));
|
||||
}*/
|
||||
let Ok(opts) = post_restore_opts(&version_id.unwrap(), &bucket, &object).await else {
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("ErrEmptyRequestBody".into()),
|
||||
"post restore object failed",
|
||||
));
|
||||
};
|
||||
|
||||
let Some(obj_info) = store.get_object_info(bucket, object, opts) else {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed"));
|
||||
let Ok(mut obj_info) = store.get_object_info(&bucket, &object, &opts).await else {
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("ErrEmptyRequestBody".into()),
|
||||
"post restore object failed",
|
||||
));
|
||||
};
|
||||
|
||||
if obj_info.transitioned_object.status != lifecycle::TRANSITION_COMPLETE {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed"));
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("ErrEmptyRequestBody".into()),
|
||||
"post restore object failed",
|
||||
));
|
||||
}
|
||||
|
||||
let mut api_err;
|
||||
let Some(rreq) = parse_restore_request(req.body(), req.content_length) else {
|
||||
let api_err = errorCodes.ToAPIErr(ErrMalformedXML);
|
||||
api_err.description = err.Error()
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed"));
|
||||
};
|
||||
let mut status_code = http::StatusCode::OK;
|
||||
//let mut api_err;
|
||||
let mut _status_code = http::StatusCode::OK;
|
||||
let mut already_restored = false;
|
||||
if Err(err) = rreq.validate(store) {
|
||||
api_err = errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
api_err.description = err.Error()
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("ErrEmptyRequestBody".into()), "post restore object failed"));
|
||||
if let Err(_err) = rreq.validate(store.clone()) {
|
||||
//api_err = to_api_err(ErrMalformedXML);
|
||||
//api_err.description = err.to_string();
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("ErrEmptyRequestBody".into()),
|
||||
"post restore object failed",
|
||||
));
|
||||
} else {
|
||||
if obj_info.restore_ongoing && rreq.Type != "SELECT" {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("ErrObjectRestoreAlreadyInProgress".into()), "post restore object failed"));
|
||||
if obj_info.restore_ongoing && (rreq.type_.is_none() || rreq.type_.as_ref().unwrap().as_str() != "SELECT") {
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("ErrObjectRestoreAlreadyInProgress".into()),
|
||||
"post restore object failed",
|
||||
));
|
||||
}
|
||||
if !obj_info.restore_ongoing && !obj_info.restore_expires.unix_timestamp() == 0 {
|
||||
status_code = http::StatusCode::Accepted;
|
||||
if !obj_info.restore_ongoing && obj_info.restore_expires.unwrap().unix_timestamp() != 0 {
|
||||
_status_code = http::StatusCode::ACCEPTED;
|
||||
already_restored = true;
|
||||
}
|
||||
}
|
||||
let restore_expiry = lifecycle::expected_expiry_time(OffsetDateTime::now_utc(), rreq.days);
|
||||
let mut metadata = clone_mss(obj_info.user_defined);
|
||||
let restore_expiry = lifecycle::expected_expiry_time(OffsetDateTime::now_utc(), *rreq.days.as_ref().unwrap());
|
||||
let mut metadata = obj_info.user_defined.clone();
|
||||
|
||||
if rreq.type != "SELECT" {
|
||||
obj_info.metadataOnly = true;
|
||||
metadata[xhttp.AmzRestoreExpiryDays] = rreq.days;
|
||||
metadata[xhttp.AmzRestoreRequestDate] = OffsetDateTime::now_utc().format(http::TimeFormat);
|
||||
let mut header = HeaderMap::new();
|
||||
|
||||
let obj_info_ = obj_info.clone();
|
||||
if rreq.type_.is_none() || rreq.type_.as_ref().unwrap().as_str() != "SELECT" {
|
||||
obj_info.metadata_only = true;
|
||||
metadata.insert(AMZ_RESTORE_EXPIRY_DAYS.to_string(), rreq.days.unwrap().to_string());
|
||||
metadata.insert(AMZ_RESTORE_REQUEST_DATE.to_string(), OffsetDateTime::now_utc().format(&Rfc3339).unwrap());
|
||||
if already_restored {
|
||||
metadata[AmzRestore] = completed_restore_obj(restore_expiry).String()
|
||||
metadata.insert(
|
||||
X_AMZ_RESTORE.as_str().to_string(),
|
||||
RestoreStatus {
|
||||
is_restore_in_progress: Some(false),
|
||||
restore_expiry_date: Some(Timestamp::from(restore_expiry)),
|
||||
}
|
||||
.to_string(),
|
||||
);
|
||||
} else {
|
||||
metadata[AmzRestore] = ongoing_restore_obj().to_string()
|
||||
metadata.insert(
|
||||
X_AMZ_RESTORE.as_str().to_string(),
|
||||
RestoreStatus {
|
||||
is_restore_in_progress: Some(true),
|
||||
restore_expiry_date: Some(Timestamp::from(OffsetDateTime::now_utc())),
|
||||
}
|
||||
.to_string(),
|
||||
);
|
||||
}
|
||||
obj_info.user_defined = metadata;
|
||||
if let Err(err) = store.copy_object(bucket, object, bucket, object, obj_info, ObjectOptions {
|
||||
version_id: obj_info.version_id,
|
||||
}, ObjectOptions {
|
||||
version_id: obj_info.version_id,
|
||||
m_time: obj_info.mod_time,
|
||||
}) {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("ErrInvalidObjectState".into()), "post restore object failed"));
|
||||
if let Err(_err) = store
|
||||
.clone()
|
||||
.copy_object(
|
||||
&bucket,
|
||||
&object,
|
||||
&bucket,
|
||||
&object,
|
||||
&mut obj_info,
|
||||
&ObjectOptions {
|
||||
version_id: obj_info_.version_id.map(|e| e.to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
&ObjectOptions {
|
||||
version_id: obj_info_.version_id.map(|e| e.to_string()),
|
||||
mod_time: obj_info_.mod_time,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("ErrInvalidObjectState".into()),
|
||||
"post restore object failed",
|
||||
));
|
||||
}
|
||||
if already_restored {
|
||||
return Ok(());
|
||||
let output = RestoreObjectOutput {
|
||||
request_charged: Some(RequestCharged::from_static(RequestCharged::REQUESTER)),
|
||||
restore_output_path: None,
|
||||
};
|
||||
return Ok(S3Response::new(output));
|
||||
}
|
||||
}
|
||||
|
||||
let restore_object = must_get_uuid();
|
||||
if rreq.output_location.s3.bucket_name != "" {
|
||||
w.Header()[AmzRestoreOutputPath] = []string{pathJoin(rreq.OutputLocation.S3.BucketName, rreq.OutputLocation.S3.Prefix, restore_object)}
|
||||
let restore_object = Uuid::new_v4().to_string();
|
||||
//if let Some(rreq) = rreq {
|
||||
if let Some(output_location) = &rreq.output_location {
|
||||
if let Some(s3) = &output_location.s3 {
|
||||
if !s3.bucket_name.is_empty() {
|
||||
header.insert(
|
||||
X_AMZ_RESTORE_OUTPUT_PATH,
|
||||
format!("{}{}{}", s3.bucket_name, s3.prefix, restore_object).parse().unwrap(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
w.WriteHeader(status_code)
|
||||
send_event(EventArgs {
|
||||
//}
|
||||
/*send_event(EventArgs {
|
||||
event_name: event::ObjectRestorePost,
|
||||
bucket_name: bucket,
|
||||
object: obj_info,
|
||||
req_params: extract_req_params(r),
|
||||
user_agent: req.user_agent(),
|
||||
host: handlers::get_source_ip(r),
|
||||
});
|
||||
});*/
|
||||
tokio::spawn(async move {
|
||||
if !rreq.SelectParameters.IsEmpty() {
|
||||
let actual_size = obj_info.get_actual_size();
|
||||
/*if rreq.select_parameters.is_some() {
|
||||
let actual_size = obj_info_.get_actual_size();
|
||||
if actual_size.is_err() {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("ErrInvalidObjectState".into()), "post restore object failed"));
|
||||
}
|
||||
|
||||
let object_rsc = s3select.NewObjectReadSeekCloser(
|
||||
|offset int64| -> (io.ReadCloser, error) {
|
||||
let object_rsc = s3select.new_object_read_seek_closer(
|
||||
|offset: i64| -> (ReadCloser, error) {
|
||||
rs := &HTTPRangeSpec{
|
||||
IsSuffixLength: false,
|
||||
Start: offset,
|
||||
End: -1,
|
||||
}
|
||||
return getTransitionedObjectReader(bucket, object, rs, r.Header,
|
||||
obj_info, ObjectOptions {version_id: obj_info.version_id});
|
||||
return get_transitioned_object_reader(bucket, object, rs, r.Header,
|
||||
obj_info, ObjectOptions {version_id: obj_info_.version_id});
|
||||
},
|
||||
actual_size.unwrap(),
|
||||
);
|
||||
if err = rreq.SelectParameters.Open(objectRSC); err != nil {
|
||||
if err = rreq.select_parameters.open(object_rsc); err != nil {
|
||||
if serr, ok := err.(s3select.SelectError); ok {
|
||||
let encoded_error_response = encodeResponse(APIErrorResponse {
|
||||
code: serr.ErrorCode(),
|
||||
@@ -982,29 +1052,41 @@ impl S3 for FS {
|
||||
rreq.select_parameters.evaluate(rw);
|
||||
rreq.select_parameters.Close();
|
||||
return Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header));
|
||||
}
|
||||
}*/
|
||||
let opts = ObjectOptions {
|
||||
transition: TransitionOptions {
|
||||
restore_request: rreq,
|
||||
restore_expiry: restore_expiry,
|
||||
restore_expiry,
|
||||
..Default::default()
|
||||
},
|
||||
version_id: objInfo.version_id,
|
||||
}
|
||||
if Err(err) = store.restore_transitioned_object(bucket, object, opts) {
|
||||
format!(format!("unable to restore transitioned bucket/object {}/{}: {}", bucket, object, err.to_string()));
|
||||
return Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header));
|
||||
version_id: obj_info_.version_id.map(|e| e.to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
if let Err(err) = store.clone().restore_transitioned_object(&bucket, &object, &opts).await {
|
||||
warn!("unable to restore transitioned bucket/object {}/{}: {}", bucket, object, err.to_string());
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("ErrRestoreTransitionedObject".into()),
|
||||
format!("unable to restore transitioned bucket/object {}/{}: {}", bucket, object, err),
|
||||
));
|
||||
}
|
||||
|
||||
send_event(EventArgs {
|
||||
/*send_event(EventArgs {
|
||||
EventName: event.ObjectRestoreCompleted,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: handlers.GetSourceIP(r),
|
||||
});
|
||||
});*/
|
||||
Ok(())
|
||||
});
|
||||
*/
|
||||
|
||||
let output = RestoreObjectOutput {
|
||||
request_charged: Some(RequestCharged::from_static(RequestCharged::REQUESTER)),
|
||||
restore_output_path: None,
|
||||
};
|
||||
|
||||
return Ok(S3Response::with_headers(output, header));
|
||||
}
|
||||
|
||||
/// Delete a bucket
|
||||
|
||||
Reference in New Issue
Block a user