diff --git a/.docker/Dockerfile.devenv b/.docker/Dockerfile.devenv index 1e3916af..fee6c2dd 100644 --- a/.docker/Dockerfile.devenv +++ b/.docker/Dockerfile.devenv @@ -7,9 +7,10 @@ RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/ RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y # install protoc -RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip \ - && unzip protoc-30.2-linux-x86_64.zip -d protoc3 \ - && mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc && mv protoc3/include/* /usr/local/include/ && rm -rf protoc-30.2-linux-x86_64.zip protoc3 +RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \ + && unzip protoc-31.1-linux-x86_64.zip -d protoc3 \ + && mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \ + && mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3 # install flatc RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \ diff --git a/.docker/Dockerfile.rockylinux9.3 b/.docker/Dockerfile.rockylinux9.3 index eb3a25d7..f677aabe 100644 --- a/.docker/Dockerfile.rockylinux9.3 +++ b/.docker/Dockerfile.rockylinux9.3 @@ -13,10 +13,10 @@ RUN dnf makecache RUN yum install wget git unzip gcc openssl-devel pkgconf-pkg-config -y # install protoc -RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip \ - && unzip protoc-30.2-linux-x86_64.zip -d protoc3 \ +RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \ + && unzip protoc-31.1-linux-x86_64.zip -d protoc3 \ && mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \ - && rm -rf protoc-30.2-linux-x86_64.zip protoc3 + && mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3 # install flatc RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \ diff --git a/.docker/Dockerfile.ubuntu22.04 b/.docker/Dockerfile.ubuntu22.04 index e8f71520..2cb9689c 100644 --- a/.docker/Dockerfile.ubuntu22.04 +++ b/.docker/Dockerfile.ubuntu22.04 @@ -7,9 +7,10 @@ RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/ RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y # install protoc -RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip \ - && unzip protoc-30.2-linux-x86_64.zip -d protoc3 \ - && mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc && mv protoc3/include/* /usr/local/include/ && rm -rf protoc-30.2-linux-x86_64.zip protoc3 +RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \ + && unzip protoc-31.1-linux-x86_64.zip -d protoc3 \ + && mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \ + && mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3 # install flatc RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \ diff --git a/.docker/observability/README_ZH.md b/.docker/observability/README_ZH.md index 53b6db0b..48568689 100644 --- a/.docker/observability/README_ZH.md +++ b/.docker/observability/README_ZH.md @@ -22,21 +22,6 @@ docker compose -f docker-compose.yml up -d ## 配置可观测性 -### 创建配置文件 - -1. 进入 `deploy/config` 目录 -2. 复制示例配置:`cp obs.toml.example obs.toml` -3. 编辑 `obs.toml` 配置文件,修改以下关键参数: - -| 配置项 | 说明 | 示例值 | -|-----------------|----------------------------|-----------------------| -| endpoint | OpenTelemetry Collector 地址 | http://localhost:4317 | -| service_name | 服务名称 | rustfs | -| service_version | 服务版本 | 1.0.0 | -| environment | 运行环境 | production | -| meter_interval | 指标导出间隔 (秒) | 30 | -| sample_ratio | 采样率 | 1.0 | -| use_stdout | 是否输出到控制台 | true/false | -| logger_level | 日志级别 | info | - -``` \ No newline at end of file +```shell +export RUSTFS_OBS_ENDPOINT="http://localhost:4317" # OpenTelemetry Collector 地址 +``` diff --git a/.docker/observability/config/obs-multi.toml b/.docker/observability/config/obs-multi.toml deleted file mode 100644 index 2637a401..00000000 --- a/.docker/observability/config/obs-multi.toml +++ /dev/null @@ -1,34 +0,0 @@ -[observability] -endpoint = "http://otel-collector:4317" # Default is "http://localhost:4317" if not specified -use_stdout = false # Output with stdout, true output, false no output -sample_ratio = 2.0 -meter_interval = 30 -service_name = "rustfs" -service_version = "0.1.0" -environments = "production" -logger_level = "debug" -local_logging_enabled = true - -#[[sinks]] -#type = "Kafka" -#brokers = "localhost:9092" -#topic = "logs" -#batch_size = 100 # Default is 100 if not specified -#batch_timeout_ms = 1000 # Default is 1000ms if not specified -# -#[[sinks]] -#type = "Webhook" -#endpoint = "http://localhost:8080/webhook" -#auth_token = "" -#batch_size = 100 # Default is 3 if not specified -#batch_timeout_ms = 1000 # Default is 100ms if not specified - -[[sinks]] -type = "File" -path = "/root/data/logs/rustfs.log" -buffer_size = 100 # Default is 8192 bytes if not specified -flush_interval_ms = 1000 -flush_threshold = 100 - -[logger] -queue_capacity = 10 \ No newline at end of file diff --git a/.docker/observability/config/obs.toml b/.docker/observability/config/obs.toml deleted file mode 100644 index 58069fc5..00000000 --- a/.docker/observability/config/obs.toml +++ /dev/null @@ -1,34 +0,0 @@ -[observability] -endpoint = "http://localhost:4317" # Default is "http://localhost:4317" if not specified -use_stdout = false # Output with stdout, true output, false no output -sample_ratio = 2.0 -meter_interval = 30 -service_name = "rustfs" -service_version = "0.1.0" -environments = "production" -logger_level = "debug" -local_logging_enabled = true - -#[[sinks]] -#type = "Kafka" -#brokers = "localhost:9092" -#topic = "logs" -#batch_size = 100 # Default is 100 if not specified -#batch_timeout_ms = 1000 # Default is 1000ms if not specified -# -#[[sinks]] -#type = "Webhook" -#endpoint = "http://localhost:8080/webhook" -#auth_token = "" -#batch_size = 100 # Default is 3 if not specified -#batch_timeout_ms = 1000 # Default is 100ms if not specified - -[[sinks]] -type = "File" -path = "/root/data/logs/rustfs.log" -buffer_size = 100 # Default is 8192 bytes if not specified -flush_interval_ms = 1000 -flush_threshold = 100 - -[logger] -queue_capacity = 10 \ No newline at end of file diff --git a/.docker/observability/docker-compose.yml b/.docker/observability/docker-compose.yml index 55e4f84c..22a59e48 100644 --- a/.docker/observability/docker-compose.yml +++ b/.docker/observability/docker-compose.yml @@ -1,6 +1,6 @@ services: otel-collector: - image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.124.0 + image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0 environment: - TZ=Asia/Shanghai volumes: @@ -16,7 +16,7 @@ services: networks: - otel-network jaeger: - image: jaegertracing/jaeger:2.5.0 + image: jaegertracing/jaeger:2.6.0 environment: - TZ=Asia/Shanghai ports: @@ -26,7 +26,7 @@ services: networks: - otel-network prometheus: - image: prom/prometheus:v3.3.0 + image: prom/prometheus:v3.4.1 environment: - TZ=Asia/Shanghai volumes: @@ -36,7 +36,7 @@ services: networks: - otel-network loki: - image: grafana/loki:3.5.0 + image: grafana/loki:3.5.1 environment: - TZ=Asia/Shanghai volumes: @@ -47,7 +47,7 @@ services: networks: - otel-network grafana: - image: grafana/grafana:11.6.1 + image: grafana/grafana:12.0.1 ports: - "3000:3000" # Web UI environment: diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index 345eec13..8c4399ac 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -32,11 +32,11 @@ runs: - uses: arduino/setup-protoc@v3 with: - version: "30.2" + version: "31.1" - uses: Nugine/setup-flatc@v1 with: - version: "24.3.25" + version: "25.2.10" - uses: dtolnay/rust-toolchain@master with: diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index da6edbed..bbf16465 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -4,13 +4,13 @@ on: push: branches: - main - paths: + paths: - '**/Cargo.toml' - '**/Cargo.lock' pull_request: branches: - main - paths: + paths: - '**/Cargo.toml' - '**/Cargo.lock' schedule: @@ -20,6 +20,6 @@ jobs: audit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.2.2 - uses: taiki-e/install-action@cargo-audit - run: cargo audit -D warnings diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 07c80c8e..03c1ec03 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -81,7 +81,7 @@ jobs: uses: actions/cache@v4.2.3 with: path: /Users/runner/hostedtoolcache/protoc - key: protoc-${{ runner.os }}-30.2 + key: protoc-${{ runner.os }}-31.1 restore-keys: | protoc-${{ runner.os }}- @@ -89,7 +89,7 @@ jobs: if: steps.cache-protoc.outputs.cache-hit != 'true' uses: arduino/setup-protoc@v3 with: - version: '30.2' + version: '31.1' repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Setup Flatc @@ -107,10 +107,10 @@ jobs: # Set up Zig for cross-compilation - uses: mlugg/setup-zig@v2 - if: matrix.variant.glibc != 'default' || contains(matrix.variant.target, 'linux') + if: matrix.variant.glibc != 'default' || contains(matrix.variant.target, 'aarch64-unknown-linux') - uses: taiki-e/install-action@cargo-zigbuild - if: matrix.variant.glibc != 'default' || contains(matrix.variant.target, 'linux') + if: matrix.variant.glibc != 'default' || contains(matrix.variant.target, 'aarch64-unknown-linux') # Download static resources - name: Download and Extract Static Assets @@ -150,7 +150,7 @@ jobs: # Determine whether to use zigbuild USE_ZIGBUILD=false - if [[ "$GLIBC" != "default" || "$TARGET" == *"linux"* ]]; then + if [[ "$GLIBC" != "default" || "$TARGET" == *"aarch64-unknown-linux"* ]]; then USE_ZIGBUILD=true echo "Using zigbuild for cross-compilation" fi diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 512a6751..a656b181 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: permissions: actions: write contents: read - runs-on: self-hosted + runs-on: ubuntu-latest outputs: should_skip: ${{ steps.skip_check.outputs.should_skip }} steps: @@ -34,9 +34,9 @@ jobs: pr-checks: name: Pull Request Quality Checks if: github.event_name == 'pull_request' - runs-on: self-hosted + runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.2.2 - uses: ./.github/actions/setup - name: Format Check @@ -54,9 +54,9 @@ jobs: develop: needs: skip-check if: needs.skip-check.outputs.should_skip != 'true' - runs-on: self-hosted + runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.2.2 - uses: ./.github/actions/setup - name: Format @@ -92,9 +92,9 @@ jobs: - skip-check - develop if: needs.skip-check.outputs.should_skip != 'true' - runs-on: self-hosted + runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.2.2 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 with: diff --git a/.github/workflows/samply.yml b/.github/workflows/samply.yml index 012257c6..7ae200ff 100644 --- a/.github/workflows/samply.yml +++ b/.github/workflows/samply.yml @@ -7,7 +7,7 @@ jobs: profile: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.2.2 - uses: dtolnay/rust-toolchain@nightly with: diff --git a/.gitignore b/.gitignore index 8eb6191c..c3bbd3e6 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,6 @@ rustfs/static/* !rustfs/static/.gitkeep vendor cli/rustfs-gui/embedded-rustfs/rustfs -deploy/config/obs.toml *.log deploy/certs/* *jsonl @@ -18,4 +17,4 @@ deploy/certs/* .rustfs.sys .cargo profile.json -.docker/openobserve-otel/data \ No newline at end of file +.docker/openobserve-otel/data diff --git a/Cargo.lock b/Cargo.lock index 0006dc63..f47523c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -58,23 +58,23 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", "once_cell", "version_check", ] [[package]] name = "ahash" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", "const-random", - "getrandom 0.2.15", + "getrandom 0.3.3", "once_cell", "version_check", - "zerocopy 0.7.35", + "zerocopy", ] [[package]] @@ -130,9 +130,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" dependencies = [ "anstyle", "anstyle-parse", @@ -145,36 +145,36 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.7" +version = "3.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" dependencies = [ "anstyle", - "once_cell", + "once_cell_polyfill", "windows-sys 0.59.0", ] @@ -196,7 +196,7 @@ dependencies = [ "ecstore", "futures", "futures-core", - "http", + "http 1.3.1", "object_store", "pin-project-lite", "s3s", @@ -215,7 +215,7 @@ dependencies = [ "base64-simd", "common", "hex-simd", - "rand 0.8.5", + "rand 0.9.1", "rsa", "serde", "serde_json", @@ -307,14 +307,14 @@ version = "54.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a12fcdb3f1d03f69d3ec26ac67645a8fe3f878d77b5ebb0b15d64a116c212985" dependencies = [ - "ahash 0.8.11", + "ahash 0.8.12", "arrow-buffer", "arrow-data", "arrow-schema", "chrono", "chrono-tz", "half", - "hashbrown 0.15.2", + "hashbrown 0.15.4", "num", ] @@ -452,7 +452,7 @@ version = "54.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69efcd706420e52cd44f5c4358d279801993846d1c2a8e52111853d61d55a619" dependencies = [ - "ahash 0.8.11", + "ahash 0.8.12", "arrow-array", "arrow-buffer", "arrow-data", @@ -509,7 +509,7 @@ dependencies = [ "serde_repr", "tokio", "url", - "zbus 5.5.0", + "zbus 5.7.1", ] [[package]] @@ -555,9 +555,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +checksum = "1237c0ae75a0f3765f58910ff9cdd0a12eeb39ab2f4c7de23262f337f0aacbb3" dependencies = [ "async-lock", "cfg-if", @@ -566,7 +566,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.44", + "rustix 1.0.7", "slab", "tracing", "windows-sys 0.59.0", @@ -585,9 +585,9 @@ dependencies = [ [[package]] name = "async-process" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" +checksum = "cde3f4e40e6021d7acffc90095cbd6dc54cb593903d1de5832f435eb274b85dc" dependencies = [ "async-channel", "async-io", @@ -598,7 +598,7 @@ dependencies = [ "cfg-if", "event-listener", "futures-lite", - "rustix 0.38.44", + "rustix 1.0.7", "tracing", ] @@ -610,14 +610,14 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] name = "async-signal" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" +checksum = "d7605a4e50d4b06df3898d5a70bf5fde51ed9059b0434b73105193bc27acce0d" dependencies = [ "async-io", "async-lock", @@ -625,12 +625,34 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.44", + "rustix 1.0.7", "signal-hook-registry", "slab", "windows-sys 0.59.0", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "async-task" version = "4.7.1" @@ -645,7 +667,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -708,7 +730,7 @@ checksum = "99e1aca718ea7b89985790c94aad72d77533063fe00bc497bb79a7c2dae6a661" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -718,10 +740,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] -name = "aws-lc-rs" -version = "1.13.0" +name = "aws-credential-types" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b756939cb2f8dc900aa6dcd505e6e2428e9cae7ff7b028c49e3946efa70878" +checksum = "687bc16bc431a8533fe0097c7f0182874767f920989d7260950172ae8e3c4465" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "zeroize", +] + +[[package]] +name = "aws-lc-rs" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fcc8f365936c834db5514fc45aee5b1202d677e6b40e48468aaaa8183ca8c7" dependencies = [ "aws-lc-sys", "zeroize", @@ -729,9 +763,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ddeb19ee86cb16ecfc871e5b0660aff6285760957aaedda6284cf0e790d3769" +checksum = "61b1d86e7705efe1be1b569bab41d4fa1e14e220b60a160f78de2db687add079" dependencies = [ "bindgen", "cc", @@ -740,6 +774,293 @@ dependencies = [ "fs_extra", ] +[[package]] +name = "aws-runtime" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c4063282c69991e57faab9e5cb21ae557e59f5b0fb285c196335243df8dc25c" +dependencies = [ + "aws-credential-types", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http-body 0.4.6", + "percent-encoding", + "pin-project-lite", + "tracing", + "uuid", +] + +[[package]] +name = "aws-sdk-s3" +version = "1.91.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10c7d58f9c99e7d33e5a9b288ec84db24de046add7ba4c1e98baf6b3a5b37fde" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-checksums", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "fastrand", + "hex", + "hmac 0.12.1", + "http 0.2.12", + "http 1.3.1", + "http-body 0.4.6", + "lru 0.12.5", + "percent-encoding", + "regex-lite", + "sha2 0.10.9", + "tracing", + "url", +] + +[[package]] +name = "aws-sigv4" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3734aecf9ff79aa401a6ca099d076535ab465ff76b46440cf567c8e70b65dc13" +dependencies = [ + "aws-credential-types", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "crypto-bigint 0.5.5", + "form_urlencoded", + "hex", + "hmac 0.12.1", + "http 0.2.12", + "http 1.3.1", + "p256", + "percent-encoding", + "ring", + "sha2 0.10.9", + "subtle", + "time", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-async" +version = "1.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e190749ea56f8c42bf15dd76c65e14f8f765233e6df9b0506d9d934ebef867c" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "aws-smithy-checksums" +version = "0.63.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2f77a921dbd2c78ebe70726799787c1d110a2245dd65e39b20923dfdfb2deee" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "bytes", + "crc-fast", + "hex", + "http 0.2.12", + "http-body 0.4.6", + "md-5", + "pin-project-lite", + "sha1 0.10.6", + "sha2 0.10.9", + "tracing", +] + +[[package]] +name = "aws-smithy-eventstream" +version = "0.60.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c45d3dddac16c5c59d553ece225a88870cf81b7b813c9cc17b78cf4685eac7a" +dependencies = [ + "aws-smithy-types", + "bytes", + "crc32fast", +] + +[[package]] +name = "aws-smithy-http" +version = "0.62.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99335bec6cdc50a346fda1437f9fefe33abf8c99060739a546a16457f2862ca9" +dependencies = [ + "aws-smithy-eventstream", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http 1.3.1", + "http-body 0.4.6", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tracing", +] + +[[package]] +name = "aws-smithy-http-client" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "073d330f94bdf1f47bb3e0f5d45dda1e372a54a553c39ab6e9646902c8c81594" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "h2 0.3.26", + "h2 0.4.10", + "http 0.2.12", + "http 1.3.1", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper 1.6.0", + "hyper-rustls 0.24.2", + "hyper-rustls 0.27.7", + "hyper-util", + "pin-project-lite", + "rustls 0.21.12", + "rustls 0.23.27", + "rustls-native-certs 0.8.1", + "rustls-pki-types", + "tokio", + "tower", + "tracing", +] + +[[package]] +name = "aws-smithy-json" +version = "0.61.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92144e45819cae7dc62af23eac5a038a58aa544432d2102609654376a900bd07" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-observability" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9364d5989ac4dd918e5cc4c4bdcc61c9be17dcd2586ea7f69e348fc7c6cab393" +dependencies = [ + "aws-smithy-runtime-api", +] + +[[package]] +name = "aws-smithy-runtime" +version = "1.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14302f06d1d5b7d333fd819943075b13d27c7700b414f574c3c35859bfb55d5e" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-http-client", + "aws-smithy-observability", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.3.1", + "http-body 0.4.6", + "http-body 1.0.1", + "pin-project-lite", + "pin-utils", + "tokio", + "tracing", +] + +[[package]] +name = "aws-smithy-runtime-api" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e5d9e3a80a18afa109391fb5ad09c3daf887b516c6fd805a157c6ea7994a57" +dependencies = [ + "aws-smithy-async", + "aws-smithy-types", + "bytes", + "http 0.2.12", + "http 1.3.1", + "pin-project-lite", + "tokio", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-types" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40076bd09fadbc12d5e026ae080d0930defa606856186e31d83ccc6a255eeaf3" +dependencies = [ + "base64-simd", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http 1.3.1", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "itoa 1.0.15", + "num-integer", + "pin-project-lite", + "pin-utils", + "ryu", + "serde", + "time", + "tokio", + "tokio-util", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.60.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab0b0166827aa700d3dc519f72f8b3a91c35d0b8d042dc5d643a91e6f80648fc" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "1.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a322fec39e4df22777ed3ad8ea868ac2f94cd15e1a55f6ee8d8d6305057689a" +dependencies = [ + "aws-credential-types", + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "rustc_version", + "tracing", +] + [[package]] name = "axum" version = "0.8.4" @@ -750,10 +1071,10 @@ dependencies = [ "bytes", "form_urlencoded", "futures-util", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.6.0", "hyper-util", "itoa 1.0.15", "matchit", @@ -768,7 +1089,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", "tracing", @@ -782,8 +1103,8 @@ checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6" dependencies = [ "bytes", "futures-core", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -804,14 +1125,14 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", "rustversion", "serde", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", ] @@ -825,13 +1146,13 @@ dependencies = [ "arc-swap", "bytes", "fs-err", - "http", - "http-body", - "hyper", + "http 1.3.1", + "http-body 1.0.1", + "hyper 1.6.0", "hyper-util", "pin-project-lite", "rustls 0.23.27", - "rustls-pemfile", + "rustls-pemfile 2.2.0", "rustls-pki-types", "tokio", "tokio-rustls 0.26.2", @@ -840,9 +1161,9 @@ dependencies = [ [[package]] name = "backon" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd0b50b1b78dbadd44ab18b3c794e496f3a139abb9fbc27d9c94c4eebbb96496" +checksum = "302eaff5357a264a2c42f127ecb8bac761cf99749fc3dc95677e2743991f99e7" dependencies = [ "fastrand", "gloo-timers", @@ -851,9 +1172,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" dependencies = [ "addr2line", "cfg-if", @@ -864,6 +1185,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base64" version = "0.21.7" @@ -888,9 +1215,9 @@ dependencies = [ [[package]] name = "base64ct" -version = "1.7.3" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "bigdecimal" @@ -911,7 +1238,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cexpr", "clang-sys", "itertools 0.12.1", @@ -924,7 +1251,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.100", + "syn 2.0.101", "which", ] @@ -936,9 +1263,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" dependencies = [ "serde", ] @@ -1064,9 +1391,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee" [[package]] name = "byteorder" @@ -1080,6 +1407,16 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +[[package]] +name = "bytes-utils" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" +dependencies = [ + "bytes", + "either", +] + [[package]] name = "bytesize" version = "2.0.1" @@ -1120,7 +1457,7 @@ version = "0.18.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ca26ef0159422fb77631dc9d17b102f253b876fe1586b03b803e63a309b4ee2" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cairo-sys-rs", "glib", "libc", @@ -1141,9 +1478,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" dependencies = [ "serde", ] @@ -1179,9 +1516,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.19" +version = "1.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3a13707ac958681c13b39b458c073d0d9bc8a22cb1b2f4c8e55eb72c13f362" +checksum = "956a5e21988b87f372569b66183b78babf23ebc2e744b733e4350a752c4dafac" dependencies = [ "jobserver", "libc", @@ -1342,7 +1679,7 @@ checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", - "libloading 0.8.6", + "libloading 0.8.8", ] [[package]] @@ -1376,7 +1713,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1412,14 +1749,14 @@ dependencies = [ [[package]] name = "cocoa" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f79398230a6e2c08f5c9760610eb6924b52aa9e7950a619602baba59dcbbdbb2" +checksum = "ad36507aeb7e16159dfe68db81ccc27571c3ccd4b76fb2fb72fc59e7a4b1b64c" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "block", - "cocoa-foundation 0.2.0", - "core-foundation 0.10.0", + "cocoa-foundation 0.2.1", + "core-foundation 0.10.1", "core-graphics 0.24.0", "foreign-types", "libc", @@ -1442,23 +1779,22 @@ dependencies = [ [[package]] name = "cocoa-foundation" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14045fb83be07b5acf1c0884b2180461635b433455fa35d1cd6f17f1450679d" +checksum = "81411967c50ee9a1fc11365f8c585f863a22a9697c89239c452292c40ba79b0d" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "block", - "core-foundation 0.10.0", + "core-foundation 0.10.1", "core-graphics-types 0.2.0", - "libc", "objc", ] [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "combine" @@ -1488,7 +1824,7 @@ dependencies = [ "lazy_static", "scopeguard", "tokio", - "tonic 0.13.1", + "tonic", "tracing-error", ] @@ -1516,7 +1852,7 @@ dependencies = [ "serde", "serde_json", "toml", - "winnow 0.7.6", + "winnow 0.7.10", "yaml-rust2", ] @@ -1557,7 +1893,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", "once_cell", "tiny-keccak", ] @@ -1580,7 +1916,7 @@ checksum = "04382d0d9df7434af6b1b49ea1a026ef39df1b0738b1cc373368cf175354f6eb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1600,7 +1936,7 @@ checksum = "f0d1c4c3cb85e5856b34e829af0035d7154f8c2889b15bbf43c8a6c6786dcab5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1656,9 +1992,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -1689,8 +2025,8 @@ version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa95a34622365fa5bbf40b20b75dba8dfa8c94c734aea8ac9a5ca38af14316f1" dependencies = [ - "bitflags 2.9.0", - "core-foundation 0.10.0", + "bitflags 2.9.1", + "core-foundation 0.10.1", "core-graphics-types 0.2.0", "foreign-types", "libc", @@ -1713,8 +2049,8 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d44a101f213f6c4cdc1853d4b78aef6db6bdfa3468798cc1d9912f4735013eb" dependencies = [ - "bitflags 2.9.0", - "core-foundation 0.10.0", + "bitflags 2.9.1", + "core-foundation 0.10.1", "libc", ] @@ -1729,9 +2065,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" dependencies = [ "crc-catalog", ] @@ -1742,6 +2078,20 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" +[[package]] +name = "crc-fast" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68fcb2be5386ffb77e30bf10820934cb89a628bcb976e7cc632dcd88c059ebea" +dependencies = [ + "cc", + "crc", + "digest 0.10.7", + "libc", + "rand 0.9.1", + "regex", +] + [[package]] name = "crc32c" version = "0.6.8" @@ -1866,7 +2216,7 @@ dependencies = [ "chacha20poly1305", "jsonwebtoken", "pbkdf2", - "rand 0.8.5", + "rand 0.9.1", "serde_json", "sha2 0.10.9", "test-case", @@ -1874,6 +2224,28 @@ dependencies = [ "time", ] +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -1887,9 +2259,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.2.0-rc.2" +version = "0.2.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "170d71b5b14dec99db7739f6fc7d6ec2db80b78c3acb77db48392ccc3d8a9ea0" +checksum = "8a23fa214dea9efd4dacee5a5614646b30216ae0f05d4bb51bafb50e9da1c5be" dependencies = [ "hybrid-array", ] @@ -1918,7 +2290,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13b588ba4ac1a99f7f2964d24b3d896ddc6bf847ee3855dbd4366f058cfcd331" dependencies = [ "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1972,7 +2344,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1983,7 +2355,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1996,7 +2368,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.10", + "parking_lot_core 0.9.11", ] [[package]] @@ -2010,7 +2382,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.10", + "parking_lot_core 0.9.11", ] [[package]] @@ -2057,7 +2429,7 @@ dependencies = [ "itertools 0.14.0", "log", "object_store", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "parquet", "rand 0.8.5", "regex", @@ -2087,7 +2459,7 @@ dependencies = [ "futures", "itertools 0.14.0", "log", - "parking_lot 0.12.3", + "parking_lot 0.12.4", ] [[package]] @@ -2118,7 +2490,7 @@ version = "46.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f53d7ec508e1b3f68bd301cee3f649834fad51eff9240d898a4b2614cfd0a7a" dependencies = [ - "ahash 0.8.11", + "ahash 0.8.12", "arrow", "arrow-ipc", "base64 0.22.1", @@ -2199,7 +2571,7 @@ dependencies = [ "futures", "log", "object_store", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "rand 0.8.5", "tempfile", "url", @@ -2274,7 +2646,7 @@ version = "46.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adfc2d074d5ee4d9354fdcc9283d5b2b9037849237ddecb8942a29144b77ca05" dependencies = [ - "ahash 0.8.11", + "ahash 0.8.12", "arrow", "datafusion-common", "datafusion-doc", @@ -2295,7 +2667,7 @@ version = "46.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cbceba0f98d921309a9121b702bcd49289d383684cccabf9a92cda1602f3bbb" dependencies = [ - "ahash 0.8.11", + "ahash 0.8.12", "arrow", "datafusion-common", "datafusion-expr-common", @@ -2335,7 +2707,7 @@ dependencies = [ "datafusion-common", "datafusion-expr", "datafusion-physical-plan", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "paste", ] @@ -2374,7 +2746,7 @@ checksum = "4800e1ff7ecf8f310887e9b54c9c444b8e215ccbc7b21c2f244cfae373b1ece7" dependencies = [ "datafusion-expr", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2402,7 +2774,7 @@ version = "46.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1447c2c6bc8674a16be4786b4abf528c302803fafa186aa6275692570e64d85" dependencies = [ - "ahash 0.8.11", + "ahash 0.8.12", "arrow", "datafusion-common", "datafusion-expr", @@ -2424,7 +2796,7 @@ version = "46.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f8c25dcd069073a75b3d2840a79d0f81e64bdd2c05f2d3d18939afb36a7dcb" dependencies = [ - "ahash 0.8.11", + "ahash 0.8.12", "arrow", "datafusion-common", "datafusion-expr-common", @@ -2457,7 +2829,7 @@ version = "46.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88cc160df00e413e370b3b259c8ea7bfbebc134d32de16325950e9e923846b7f" dependencies = [ - "ahash 0.8.11", + "ahash 0.8.12", "arrow", "arrow-ord", "arrow-schema", @@ -2476,7 +2848,7 @@ dependencies = [ "indexmap 2.9.0", "itertools 0.14.0", "log", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "pin-project-lite", "tokio", ] @@ -2528,6 +2900,16 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da692b8d1080ea3045efaab14434d40468c3d8657e42abddfffca87b428f4c1b" +[[package]] +name = "der" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid 0.9.6", + "zeroize", +] + [[package]] name = "der" version = "0.7.10" @@ -2557,7 +2939,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2578,7 +2960,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2588,20 +2970,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] name = "derive_more" -version = "0.99.19" +version = "0.99.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da29a38df43d6f156149c9b43ded5e018ddff2a855cf2cfd62e8cd7d079c69f" +checksum = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" dependencies = [ "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2624,7 +3006,7 @@ checksum = "6c478574b20020306f98d61c8ca3322d762e1ff08117422ac6106438605ea516" dependencies = [ "block-buffer 0.11.0-rc.4", "const-oid 0.10.1", - "crypto-common 0.2.0-rc.2", + "crypto-common 0.2.0-rc.3", "subtle", ] @@ -2704,7 +3086,7 @@ dependencies = [ "dioxus-rsx", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2898,7 +3280,7 @@ dependencies = [ "convert_case 0.6.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2991,7 +3373,7 @@ dependencies = [ "proc-macro2", "quote", "slab", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3003,7 +3385,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3017,7 +3399,7 @@ dependencies = [ "futures-util", "generational-box", "once_cell", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "rustc-hash 1.1.0", "tracing", "warnings", @@ -3064,7 +3446,7 @@ dependencies = [ "proc-macro2", "quote", "server_fn_macro", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3100,7 +3482,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a0d569e003ff27784e0e14e4a594048698e0c0f0b66cabcb51511be55a7caa0" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "block2 0.6.1", "libc", "objc2 0.6.1", @@ -3112,7 +3494,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "objc2 0.6.1", ] @@ -3124,7 +3506,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3141,13 +3523,13 @@ dependencies = [ [[package]] name = "dlopen2_derive" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b99bf03862d7f545ebc28ddd33a665b50865f4dfd84031a393823879bd4c54" +checksum = "788160fb30de9cdd857af31c6a2675904b16ece8fc2737b2c7127ba368c9d0f4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3167,9 +3549,9 @@ checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" [[package]] name = "dpi" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f25c0e292a7ca6d6498557ff1df68f32c99850012b6ea401cf8daf771f22ff53" +checksum = "d8b14ccef22fc6f5a8f4d7d768562a182c04ce9a3b3157b91390b52ddfdf1a76" [[package]] name = "dtoa" @@ -3209,16 +3591,29 @@ dependencies = [ "serde", "serde_json", "tokio", - "tonic 0.13.1", - "tower 0.5.2", + "tonic", + "tower", "url", ] +[[package]] +name = "ecdsa" +version = "0.14.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der 0.6.1", + "elliptic-curve", + "rfc6979", + "signature 1.6.4", +] + [[package]] name = "ecstore" version = "0.0.1" dependencies = [ "async-trait", + "aws-sdk-s3", "backon", "base64-simd", "blake2", @@ -3233,21 +3628,21 @@ dependencies = [ "glob", "hex-simd", "highway", - "http", + "http 1.3.1", "lazy_static", "lock", "madmin", "md-5", "netif", "nix 0.30.1", - "num", "num_cpus", + "once_cell", "path-absolutize", "path-clean", "pin-project-lite", "policy", "protos", - "rand 0.8.5", + "rand 0.9.1", "reed-solomon-erasure", "regex", "reqwest", @@ -3256,9 +3651,9 @@ dependencies = [ "rustfs-config", "rustfs-filemeta", "rustfs-rio", + "rustfs-rsc", "rustfs-utils", "s3s", - "s3s-policy", "serde", "serde_json", "sha2 0.11.0-pre.5", @@ -3271,8 +3666,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "tonic 0.13.1", - "tower 0.5.2", + "tonic", "tracing", "tracing-error", "transform-stream", @@ -3290,6 +3684,26 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest 0.10.7", + "ff", + "generic-array", + "group", + "pkcs8 0.9.0", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "encoding_rs" version = "0.8.35" @@ -3323,28 +3737,28 @@ checksum = "fc4caf64a58d7a6d65ab00639b046ff54399a39f5f2554728895ace4b297cd79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] name = "enumset" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a4b049558765cef5f0c1a273c3fc57084d768b44d2f98127aef4cceb17293" +checksum = "11a6b7c3d347de0a9f7bfd2f853be43fe32fa6fac30c70f6d6d67a1e936b87ee" dependencies = [ "enumset_derive", ] [[package]] name = "enumset_derive" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59c3b24c345d8c314966bdc1832f6c2635bfcce8e7cf363bd115987bba2ee242" +checksum = "6da3ea9e1d1a3b1593e15781f930120e72aa7501610b2f82e5b6739c72e8eac5" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3365,9 +3779,9 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" dependencies = [ "libc", "windows-sys 0.59.0", @@ -3419,6 +3833,16 @@ dependencies = [ "simd-adler32", ] +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "field-offset" version = "0.3.6" @@ -3463,15 +3887,15 @@ version = "25.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1045398c1bfd89168b5fd3f1fc11f6e70b34f6f66300c87d44d3de849463abf1" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "rustc_version", ] [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "miniz_oxide", @@ -3539,7 +3963,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3559,9 +3983,9 @@ dependencies = [ [[package]] name = "fs-err" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f89bda4c2a21204059a977ed3bfe746677dfd137b83c339e702b0ac91d482aa" +checksum = "88d7be93788013f265201256d58f04936a8079ad5dc898743aa20525f503b683" dependencies = [ "autocfg", "tokio", @@ -3661,7 +4085,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3808,7 +4232,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a673cf4fb0ea6a91aa86c08695756dfe875277a912cdbf33db9a9f62d47ed82b" dependencies = [ - "parking_lot 0.12.3", + "parking_lot 0.12.4", "tracing", ] @@ -3835,9 +4259,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "js-sys", @@ -3848,9 +4272,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "js-sys", @@ -3914,7 +4338,7 @@ version = "0.18.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "233daaf6e83ae6a12a52055f568f9d7cf4671dabb78ff9560ab6da230ce00ee5" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "futures-channel", "futures-core", "futures-executor", @@ -3942,7 +4366,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3967,7 +4391,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b436093d1598b05e3b7fddc097b2bad32763f53a1beb25ab6f9718c6a60acd09" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cocoa 0.25.0", "crossbeam-channel", "keyboard-types", @@ -3988,7 +4412,7 @@ dependencies = [ "futures-core", "futures-sink", "gloo-utils", - "http", + "http 1.3.1", "js-sys", "pin-project", "serde", @@ -4035,6 +4459,17 @@ dependencies = [ "system-deps", ] +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "gtk" version = "0.18.2" @@ -4084,21 +4519,40 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] name = "h2" -version = "0.4.9" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75249d144030531f8dee69fe9cea04d3edf809a017ae445e2abdff6629e86633" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap 2.9.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http", + "http 1.3.1", "indexmap 2.9.0", "slab", "tokio", @@ -4132,16 +4586,18 @@ version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ - "ahash 0.8.11", + "ahash 0.8.12", "allocator-api2", ] [[package]] name = "hashbrown" -version = "0.15.2" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" dependencies = [ + "allocator-api2", + "equivalent", "foldhash", ] @@ -4151,7 +4607,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.15.4", ] [[package]] @@ -4166,18 +4622,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - -[[package]] -name = "hermit-abi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" - [[package]] name = "hermit-abi" version = "0.5.1" @@ -4247,6 +4691,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa 1.0.15", +] + [[package]] name = "http" version = "1.3.1" @@ -4258,6 +4713,17 @@ dependencies = [ "itoa 1.0.15", ] +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + [[package]] name = "http-body" version = "1.0.1" @@ -4265,7 +4731,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http", + "http 1.3.1", ] [[package]] @@ -4276,8 +4742,8 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "pin-project-lite", ] @@ -4301,13 +4767,37 @@ checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" [[package]] name = "hybrid-array" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dab50e193aebe510fe0e40230145820e02f48dae0cf339ea4204e6e708ff7bd" +checksum = "891d15931895091dea5c47afa5b3c9a01ba634b311919fd4d41388fa0e3d76af" dependencies = [ "typenum", ] +[[package]] +name = "hyper" +version = "0.14.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa 1.0.15", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + [[package]] name = "hyper" version = "1.6.0" @@ -4317,9 +4807,9 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2", - "http", - "http-body", + "h2 0.4.10", + "http 1.3.1", + "http-body 1.0.1", "httparse", "httpdate", "itoa 1.0.15", @@ -4331,20 +4821,36 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.5" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", - "hyper", + "http 0.2.12", + "hyper 0.14.32", + "log", + "rustls 0.21.12", + "rustls-native-certs 0.6.3", + "tokio", + "tokio-rustls 0.24.1", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http 1.3.1", + "hyper 1.6.0", "hyper-util", "rustls 0.23.27", + "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", "tokio-rustls 0.26.2", "tower-service", - "webpki-roots 0.26.8", + "webpki-roots", ] [[package]] @@ -4353,7 +4859,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper", + "hyper 1.6.0", "hyper-util", "pin-project-lite", "tokio", @@ -4362,18 +4868,18 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c293b6b3d21eca78250dc7dbebd6b9210ec5530e038cbfe0661b5c47ab06e8" +checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", "futures-core", "futures-util", - "http", - "http-body", - "hyper", + "http 1.3.1", + "http-body 1.0.1", + "hyper 1.6.0", "ipnet", "libc", "percent-encoding", @@ -4402,7 +4908,7 @@ dependencies = [ "lazy_static", "madmin", "policy", - "rand 0.8.5", + "rand 0.9.1", "regex", "serde", "serde_json", @@ -4426,7 +4932,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.61.0", + "windows-core 0.61.2", ] [[package]] @@ -4440,21 +4946,22 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", + "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locid" -version = "1.5.0" +name = "icu_locale_core" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", @@ -4463,31 +4970,11 @@ dependencies = [ "zerovec", ] -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", "icu_collections", @@ -4495,67 +4982,54 @@ dependencies = [ "icu_properties", "icu_provider", "smallvec", - "utf16_iter", - "utf8_iter", - "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", "icu_collections", - "icu_locid_transform", + "icu_locale_core", "icu_properties_data", "icu_provider", - "tinystr", + "potential_utf", + "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", - "icu_locid", - "icu_provider_macros", + "icu_locale_core", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", + "zerotrie", "zerovec", ] -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -4575,14 +5049,33 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", ] +[[package]] +name = "include_dir" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "923d117408f1e49d914f1a379a309cffe4f18c05cf4e3d12e613a15fc81bd0dd" +dependencies = [ + "include_dir_macros", +] + +[[package]] +name = "include_dir_macros" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cab85a7ed0bd5f0e76d93846e0147172bed2e2d3f859bcc33a8d9699cad1a75" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -4601,7 +5094,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", - "hashbrown 0.15.2", + "hashbrown 0.15.4", "serde", ] @@ -4620,7 +5113,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "inotify-sys", "libc", ] @@ -4689,7 +5182,7 @@ version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ - "hermit-abi 0.5.1", + "hermit-abi", "libc", "windows-sys 0.59.0", ] @@ -4805,7 +5298,7 @@ version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "libc", ] @@ -4851,7 +5344,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b750dcadc39a09dbadd74e118f6dd6598df77fa01df0cfcdc52c28dece74528a" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "serde", "unicode-segmentation", ] @@ -5039,19 +5532,19 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.53.0", ] [[package]] name = "libm" -version = "0.2.11" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libredox" @@ -5059,9 +5552,9 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "libc", - "redox_syscall 0.5.11", + "redox_syscall 0.5.12", ] [[package]] @@ -5127,9 +5620,9 @@ checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "local-ip-address" @@ -5152,11 +5645,11 @@ dependencies = [ "common", "lazy_static", "protos", - "rand 0.8.5", + "rand 0.9.1", "serde", "serde_json", "tokio", - "tonic 0.13.1", + "tonic", "tracing", "tracing-error", "url", @@ -5165,9 +5658,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -5198,6 +5691,21 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.4", +] + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "lz4" version = "1.28.1" @@ -5260,7 +5768,7 @@ dependencies = [ "chrono", "common", "humantime", - "hyper", + "hyper 1.6.0", "s3s", "serde", "serde_json", @@ -5310,7 +5818,7 @@ dependencies = [ "manganis-core", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5358,6 +5866,12 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "md5" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" + [[package]] name = "memchr" version = "2.7.4" @@ -5407,14 +5921,14 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -5457,9 +5971,9 @@ dependencies = [ [[package]] name = "multimap" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" [[package]] name = "ndk" @@ -5467,7 +5981,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3f42e7bbe13d351b6bead8286a43aac9534b82bd3cc43e47037f012ebfd62d4" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "jni-sys", "log", "ndk-sys", @@ -5538,7 +6052,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cfg-if", "cfg_aliases", "libc", @@ -5551,10 +6065,11 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cfg-if", "cfg_aliases", "libc", + "memoffset", ] [[package]] @@ -5588,7 +6103,7 @@ version = "8.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fee8403b3d66ac7b26aee6e40a897d85dc5ce26f44da36b8b73e987cc52e943" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "filetime", "fsevent-sys", "inotify", @@ -5755,11 +6270,11 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi", "libc", ] @@ -5781,7 +6296,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5801,12 +6316,12 @@ checksum = "a3c00a0c9600379bd32f8972de90676a7672cba3bf4886986bc05902afc1e093" [[package]] name = "nvml-wrapper" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9bff0aa1d48904a1385ea2a8b97576fbdcbc9a3cfccd0d31fe978e1c4038c5" +checksum = "0d5c6c0ef9702176a570f06ad94f3198bc29c524c8b498f1b9346e1b1bdcbb3a" dependencies = [ - "bitflags 2.9.0", - "libloading 0.8.6", + "bitflags 2.9.1", + "libloading 0.8.8", "nvml-wrapper-sys", "static_assertions", "thiserror 1.0.69", @@ -5815,11 +6330,11 @@ dependencies = [ [[package]] name = "nvml-wrapper-sys" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "698d45156f28781a4e79652b6ebe2eaa0589057d588d3aec1333f6466f13fcb5" +checksum = "dd23dbe2eb8d8335d2bce0299e0a07d6a63c089243d626ca75b770a962ff49e6" dependencies = [ - "libloading 0.8.6", + "libloading 0.8.8", ] [[package]] @@ -5874,7 +6389,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4e89ad9e3d7d297152b17d39ed92cd50ca8063a89a9fa569046d41568891eff" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "block2 0.5.1", "libc", "objc2 0.5.2", @@ -5890,7 +6405,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6f29f568bec459b0ddff777cec4fe3fd8666d82d5a40ebd0ff7e66134f89bcc" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "block2 0.6.1", "objc2 0.6.1", "objc2-core-foundation", @@ -5903,7 +6418,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "617fbf49e071c178c0b24c080767db52958f716d9eabdf0890523aeae54773ef" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "block2 0.5.1", "objc2 0.5.2", "objc2-foundation 0.2.2", @@ -5915,7 +6430,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c10c2894a6fed806ade6027bcd50662746363a9589d3ec9d9bef30a4e4bc166" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "dispatch2 0.3.0", "objc2 0.6.1", ] @@ -5926,7 +6441,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "989c6c68c13021b5c2d6b71456ebb0f9dc78d752e86a98da7c716f4f9470f5a4" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "objc2-core-foundation", ] @@ -5954,7 +6469,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee638a5da3799329310ad4cfa62fbf045d5f56e3ef5ba4149e7452dcf89d5a8" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "block2 0.5.1", "libc", "objc2 0.5.2", @@ -5966,7 +6481,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "900831247d2fe1a09a683278e5384cfb8c80c79fe6b166f9d14bfdde0ea1b03c" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "block2 0.6.1", "objc2 0.6.1", "objc2-core-foundation", @@ -5988,7 +6503,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd0cba1276f6023976a406a14ffa85e1fdd19df6b0f737b063b95f6c8c7aadd6" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "block2 0.5.1", "objc2 0.5.2", "objc2-foundation 0.2.2", @@ -6000,7 +6515,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e42bee7bff906b14b167da2bac5efe6b6a07e6f7c0a21a7308d40c960242dc7a" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "block2 0.5.1", "objc2 0.5.2", "objc2-foundation 0.2.2", @@ -6046,7 +6561,7 @@ dependencies = [ "futures", "humantime", "itertools 0.13.0", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "percent-encoding", "snafu", "tokio", @@ -6061,6 +6576,12 @@ version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +[[package]] +name = "once_cell_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" + [[package]] name = "oorandom" version = "11.1.5" @@ -6081,9 +6602,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "opentelemetry" -version = "0.29.1" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e87237e2775f74896f9ad219d26a2081751187eb7c9f5c58dde20a23b95d16c" +checksum = "aaf416e4cb72756655126f7dd7bb0af49c674f4c1b9903e80c009e0c37e552e6" dependencies = [ "futures-core", "futures-sink", @@ -6095,9 +6616,9 @@ dependencies = [ [[package]] name = "opentelemetry-appender-tracing" -version = "0.29.1" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e716f864eb23007bdd9dc4aec381e188a1cee28eecf22066772b5fd822b9727d" +checksum = "e68f63eca5fad47e570e00e893094fc17be959c80c79a7d6ec1abdd5ae6ffc16" dependencies = [ "opentelemetry", "tracing", @@ -6107,80 +6628,61 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "opentelemetry-http" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46d7ab32b827b5b495bd90fa95a6cb65ccc293555dcc3199ae2937d2d237c8ed" -dependencies = [ - "async-trait", - "bytes", - "http", - "opentelemetry", - "reqwest", - "tracing", -] - [[package]] name = "opentelemetry-otlp" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d899720fe06916ccba71c01d04ecd77312734e2de3467fd30d9d580c8ce85656" +checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b" dependencies = [ - "futures-core", - "http", + "http 1.3.1", "opentelemetry", - "opentelemetry-http", "opentelemetry-proto", "opentelemetry_sdk", "prost", - "reqwest", "thiserror 2.0.12", "tokio", - "tonic 0.12.3", + "tonic", "tracing", ] [[package]] name = "opentelemetry-proto" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c40da242381435e18570d5b9d50aca2a4f4f4d8e146231adb4e7768023309b3" +checksum = "2e046fd7660710fe5a05e8748e70d9058dc15c94ba914e7c4faa7c728f0e8ddc" dependencies = [ "opentelemetry", "opentelemetry_sdk", "prost", - "tonic 0.12.3", + "tonic", ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b29a9f89f1a954936d5aa92f19b2feec3c8f3971d3e96206640db7f9706ae3" +checksum = "83d059a296a47436748557a353c5e6c5705b9470ef6c95cfc52c21a8814ddac2" [[package]] name = "opentelemetry-stdout" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7e27d446dabd68610ef0b77d07b102ecde827a4596ea9c01a4d3811e945b286" +checksum = "447191061af41c3943e082ea359ab8b64ff27d6d34d30d327df309ddef1eef6f" dependencies = [ "chrono", - "futures-util", "opentelemetry", "opentelemetry_sdk", ] [[package]] name = "opentelemetry_sdk" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afdefb21d1d47394abc1ba6c57363ab141be19e27cc70d0e422b7f303e4d290b" +checksum = "11f644aa9e5e31d11896e024305d7e3c98a88884d9f8919dbf37a9991bc47a4b" dependencies = [ "futures-channel", "futures-executor", "futures-util", - "glob", "opentelemetry", "percent-encoding", "rand 0.9.1", @@ -6188,7 +6690,6 @@ dependencies = [ "thiserror 2.0.12", "tokio", "tokio-stream", - "tracing", ] [[package]] @@ -6238,6 +6739,17 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa", + "elliptic-curve", + "sha2 0.10.9", +] + [[package]] name = "pango" version = "0.18.3" @@ -6282,12 +6794,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", + "parking_lot_core 0.9.11", ] [[package]] @@ -6306,13 +6818,13 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.11", + "redox_syscall 0.5.12", "smallvec", "windows-targets 0.52.6", ] @@ -6323,7 +6835,7 @@ version = "54.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfb15796ac6f56b429fd99e33ba133783ad75b27c36b4b5ce06f1f82cc97754e" dependencies = [ - "ahash 0.8.11", + "ahash 0.8.12", "arrow-array", "arrow-buffer", "arrow-cast", @@ -6338,7 +6850,7 @@ dependencies = [ "flate2", "futures", "half", - "hashbrown 0.15.2", + "hashbrown 0.15.4", "lz4_flex", "num", "num-bigint", @@ -6475,7 +6987,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -6646,7 +7158,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -6678,9 +7190,19 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" dependencies = [ - "der", - "pkcs8", - "spki", + "der 0.7.10", + "pkcs8 0.10.2", + "spki 0.7.3", +] + +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der 0.6.1", + "spki 0.6.0", ] [[package]] @@ -6689,8 +7211,8 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der", - "spki", + "der 0.7.10", + "spki 0.7.3", ] [[package]] @@ -6754,7 +7276,7 @@ dependencies = [ "jsonwebtoken", "lazy_static", "madmin", - "rand 0.8.5", + "rand 0.9.1", "regex", "serde", "serde_json", @@ -6768,15 +7290,15 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.4" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +checksum = "b53a684391ad002dd6a596ceb6c74fd004fdce75f4be2e3f615068abbea5fd50" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi 0.4.0", + "hermit-abi", "pin-project-lite", - "rustix 0.38.44", + "rustix 1.0.7", "tracing", "windows-sys 0.59.0", ] @@ -6816,6 +7338,15 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "potential_utf" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -6828,7 +7359,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.24", + "zerocopy", ] [[package]] @@ -6839,12 +7370,12 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "prettyplease" -version = "0.2.32" +version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" +checksum = "9dee91521343f4c5c6a63edd65e54f31f5c92fe8978c40a4282f8372194c6a7d" dependencies = [ "proc-macro2", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -6872,7 +7403,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ - "toml_edit 0.22.24", + "toml_edit 0.22.27", ] [[package]] @@ -6922,7 +7453,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "version_check", ] @@ -6952,7 +7483,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.100", + "syn 2.0.101", "tempfile", ] @@ -6966,7 +7497,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -7008,16 +7539,16 @@ dependencies = [ "prost-build", "protobuf", "tokio", - "tonic 0.13.1", + "tonic", "tonic-build", - "tower 0.5.2", + "tower", ] [[package]] name = "psm" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58e5423e24c18cc840e1c98370b3993c6649cd1678b4d24318bcf0a083cbe88" +checksum = "6e944464ec8536cd1beb0bbfd96987eb5e3b72f2ecdafdc5c769a37f1fa2ae1f" dependencies = [ "cc", ] @@ -7033,7 +7564,7 @@ dependencies = [ "derive_builder", "futures", "lazy_static", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "s3s", "snafu", "tokio", @@ -7042,9 +7573,9 @@ dependencies = [ [[package]] name = "quick-xml" -version = "0.37.4" +version = "0.37.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4ce8c88de324ff838700f36fb6ab86c96df0e3c4ab6ef3a9b2044465cce1369" +checksum = "331e97a1af0bf59823e6eadffe373d7b27f485be8748f71471c662c1f269b7fb" dependencies = [ "memchr", "serde", @@ -7052,9 +7583,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.7" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" +checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" dependencies = [ "bytes", "cfg_aliases", @@ -7072,12 +7603,13 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.10" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc" +checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" dependencies = [ "bytes", - "getrandom 0.3.2", + "getrandom 0.3.3", + "lru-slab", "rand 0.9.1", "ring", "rustc-hash 2.1.1", @@ -7092,9 +7624,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "541d0f57c6ec747a90738a52741d3221f7960e8ac2f0ff4b1a63680e033b4ab5" +checksum = "ee4e529991f949c5e25755532370b8af5d114acae52326361d68d47af64aa842" dependencies = [ "cfg_aliases", "libc", @@ -7199,7 +7731,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", ] [[package]] @@ -7208,7 +7740,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", ] [[package]] @@ -7308,7 +7840,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76009fbe0614077fc1a2ce255e3a1881a2e3a3527097d5dc6d8212c585e7e38b" dependencies = [ "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -7331,11 +7863,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2f103c6d277498fbceb16e84d317e2a400f160f46904d5f5410848c829511a3" +checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] [[package]] @@ -7344,7 +7876,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", "libredox", "thiserror 2.0.12", ] @@ -7358,7 +7890,7 @@ dependencies = [ "cc", "libc", "libm", - "lru", + "lru 0.7.8", "parking_lot 0.11.2", "smallvec", "spin", @@ -7396,6 +7928,12 @@ dependencies = [ "regex-syntax 0.8.5", ] +[[package]] +name = "regex-lite" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" + [[package]] name = "regex-syntax" version = "0.6.29" @@ -7410,9 +7948,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.16" +version = "0.12.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bf597b113be201cb2269b4c39b39a804d01b99ee95a4278f0ed04e45cff1c71" +checksum = "a2f8e5513d63f2e5b386eb5106dc67eaf3f84e95258e210489136b8b92ad6119" dependencies = [ "base64 0.22.1", "bytes", @@ -7420,12 +7958,12 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.4.10", + "http 1.3.1", + "http-body 1.0.1", "http-body-util", - "hyper", - "hyper-rustls", + "hyper 1.6.0", + "hyper-rustls 0.27.7", "hyper-util", "ipnet", "js-sys", @@ -7445,7 +7983,7 @@ dependencies = [ "tokio", "tokio-rustls 0.26.2", "tokio-util", - "tower 0.5.2", + "tower", "tower-http", "tower-service", "url", @@ -7453,7 +7991,18 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.0", + "webpki-roots", +] + +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint 0.4.9", + "hmac 0.12.1", + "zeroize", ] [[package]] @@ -7511,7 +8060,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.16", "libc", "untrusted", "windows-sys 0.52.0", @@ -7546,7 +8095,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ "base64 0.21.7", - "bitflags 2.9.0", + "bitflags 2.9.1", "serde", "serde_derive", ] @@ -7563,10 +8112,10 @@ dependencies = [ "num-integer", "num-traits", "pkcs1", - "pkcs8", + "pkcs8 0.10.2", "rand_core 0.6.4", - "signature", - "spki", + "signature 2.2.0", + "spki 0.7.3", "subtle", "zeroize", ] @@ -7581,8 +8130,8 @@ dependencies = [ "flume", "futures-util", "log", - "rustls-native-certs", - "rustls-pemfile", + "rustls-native-certs 0.7.3", + "rustls-pemfile 2.2.0", "rustls-webpki 0.102.8", "thiserror 1.0.69", "tokio", @@ -7591,9 +8140,9 @@ dependencies = [ [[package]] name = "rust-embed" -version = "8.7.1" +version = "8.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60e425e204264b144d4c929d126d0de524b40a961686414bab5040f7465c71be" +checksum = "025908b8682a26ba8d12f6f2d66b987584a4a87bc024abc5bbc12553a8cd178a" dependencies = [ "rust-embed-impl", "rust-embed-utils", @@ -7602,23 +8151,23 @@ dependencies = [ [[package]] name = "rust-embed-impl" -version = "8.7.0" +version = "8.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bf418c9a2e3f6663ca38b8a7134cc2c2167c9d69688860e8961e3faa731702e" +checksum = "6065f1a4392b71819ec1ea1df1120673418bf386f50de1d6f54204d836d4349c" dependencies = [ "proc-macro2", "quote", "rust-embed-utils", "shellexpand", - "syn 2.0.100", + "syn 2.0.101", "walkdir", ] [[package]] name = "rust-embed-utils" -version = "8.7.0" +version = "8.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d55b95147fe01265d06b3955db798bdaed52e60e2211c41137701b3aba8e21" +checksum = "f6cc0c81648b20b70c491ff8cce00c1c3b223bb8ed2b5d41f0e54c6c4c0a3594" dependencies = [ "sha2 0.10.9", "walkdir", @@ -7671,6 +8220,7 @@ dependencies = [ "async-trait", "atoi", "atomic_enum", + "aws-sdk-s3", "axum", "axum-extra", "axum-server", @@ -7685,11 +8235,13 @@ dependencies = [ "flatbuffers 25.2.10", "futures", "futures-util", - "http", - "http-body", - "hyper", + "http 1.3.1", + "http-body 1.0.1", + "hyper 1.6.0", "hyper-util", "iam", + "include_dir", + "jsonwebtoken", "lazy_static", "libsystemd", "lock", @@ -7699,11 +8251,13 @@ dependencies = [ "mime_guess", "netif", "opentelemetry", + "percent-encoding", "pin-project-lite", "policy", "prost-build", "protos", "query", + "regex", "rmp-serde", "rust-embed", "rustfs-config", @@ -7728,12 +8282,13 @@ dependencies = [ "tokio-stream", "tokio-tar", "tokio-util", - "tonic 0.13.1", + "tonic", "tonic-build", - "tower 0.5.2", + "tower", "tower-http", "tracing", "transform-stream", + "urlencoding", "uuid", ] @@ -7755,7 +8310,7 @@ dependencies = [ "axum", "config", "dotenvy", - "http", + "http 1.3.1", "rdkafka", "reqwest", "rumqttc", @@ -7857,11 +8412,11 @@ dependencies = [ "flate2", "futures", "hex-simd", - "http", + "http 1.3.1", "lz4", "md-5", "pin-project-lite", - "rand 0.8.5", + "rand 0.9.1", "reqwest", "rustfs-utils", "snap", @@ -7870,6 +8425,33 @@ dependencies = [ "zstd", ] +[[package]] +name = "rustfs-rsc" +version = "2025.506.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8229535cdd7a9d1f5757bd7b588f342e6ea984d66c6b1f6350f9de85d3ce9c25" +dependencies = [ + "async-stream", + "base64 0.22.1", + "bytes", + "chrono", + "crc32fast", + "futures", + "futures-core", + "futures-util", + "hex", + "hmac 0.12.1", + "hyper 1.6.0", + "md5", + "once_cell", + "regex", + "reqwest", + "serde", + "serde-xml-rs", + "sha2 0.10.9", + "urlencoding", +] + [[package]] name = "rustfs-utils" version = "0.0.1" @@ -7883,7 +8465,7 @@ dependencies = [ "nix 0.30.1", "rustfs-config", "rustls 0.23.27", - "rustls-pemfile", + "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", "sha2 0.10.9", @@ -7913,7 +8495,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "errno", "libc", "linux-raw-sys 0.4.15", @@ -7922,17 +8504,29 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.5" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf" +checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "errno", "libc", "linux-raw-sys 0.9.4", "windows-sys 0.59.0", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + [[package]] name = "rustls" version = "0.22.4" @@ -7958,11 +8552,23 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.2", + "rustls-webpki 0.103.3", "subtle", "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile 1.0.4", + "schannel", + "security-framework 2.11.1", +] + [[package]] name = "rustls-native-certs" version = "0.7.3" @@ -7970,12 +8576,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 2.2.0", "rustls-pki-types", "schannel", "security-framework 2.11.1", ] +[[package]] +name = "rustls-native-certs" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework 3.2.0", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + [[package]] name = "rustls-pemfile" version = "2.2.0" @@ -7987,11 +8614,22 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" dependencies = [ "web-time", + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", ] [[package]] @@ -8007,9 +8645,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.2" +version = "0.103.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7149975849f1abb3832b246010ef62ccc80d3a76169517ada7188252b9cfb437" +checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" dependencies = [ "aws-lc-rs", "ring", @@ -8019,9 +8657,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "ryu" @@ -8048,11 +8686,11 @@ dependencies = [ "futures", "hex-simd", "hmac 0.13.0-pre.5", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "http-body-util", "httparse", - "hyper", + "hyper 1.6.0", "itoa 1.0.15", "md-5", "memchr", @@ -8072,24 +8710,13 @@ dependencies = [ "thiserror 2.0.12", "time", "tokio", - "tower 0.5.2", + "tower", "tracing", "transform-stream", "urlencoding", "zeroize", ] -[[package]] -name = "s3s-policy" -version = "0.12.0-dev" -source = "git+https://github.com/Nugine/s3s.git?rev=4733cdfb27b2713e832967232cbff413bb768c10#4733cdfb27b2713e832967232cbff413bb768c10" -dependencies = [ - "indexmap 2.9.0", - "serde", - "serde_json", - "thiserror 2.0.12", -] - [[package]] name = "same-file" version = "1.0.6" @@ -8114,13 +8741,37 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct", + "der 0.6.1", + "generic-array", + "pkcs8 0.9.0", + "subtle", + "zeroize", +] + [[package]] name = "security-framework" version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -8133,8 +8784,8 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.9.0", - "core-foundation 0.10.0", + "bitflags 2.9.1", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -8214,6 +8865,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "serde-xml-rs" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb3aa78ecda1ebc9ec9847d5d3aba7d618823446a049ba2491940506da6e2782" +dependencies = [ + "log", + "serde", + "thiserror 1.0.69", + "xml-rs", +] + [[package]] name = "serde_derive" version = "1.0.219" @@ -8222,7 +8885,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -8275,14 +8938,14 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ "serde", ] @@ -8326,7 +8989,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -8340,7 +9003,7 @@ dependencies = [ "dashmap 5.5.3", "futures", "gloo-net", - "http", + "http 1.3.1", "js-sys", "once_cell", "reqwest", @@ -8368,7 +9031,7 @@ dependencies = [ "convert_case 0.6.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "xxhash-rust", ] @@ -8379,7 +9042,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f2aa8119b558a17992e0ac1fd07f080099564f24532858811ce04f742542440" dependencies = [ "server_fn_macro", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -8476,9 +9139,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" +checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" dependencies = [ "libc", "signal-hook-registry", @@ -8493,6 +9156,16 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + [[package]] name = "signature" version = "2.2.0" @@ -8560,12 +9233,12 @@ dependencies = [ [[package]] name = "sledgehammer_bindgen_macro" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a1b4f13e2bbf2f5b29d09dfebc9de69229ffee245aed80e3b70f9b5fd28c06" +checksum = "f62f06db0370222f7f498ef478fce9f8df5828848d1d3517e3331936d7074f55" dependencies = [ "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -8589,18 +9262,18 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" dependencies = [ "serde", ] [[package]] name = "snafu" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "223891c85e2a29c3fe8fb900c1fae5e69c2e42415e3177752e8718475efa5019" +checksum = "320b01e011bf8d5d7a4a4a4be966d9160968935849c83b918827f6a435e7f627" dependencies = [ "backtrace", "snafu-derive", @@ -8608,14 +9281,14 @@ dependencies = [ [[package]] name = "snafu-derive" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c3c6b7927ffe7ecaa769ee0e3994da3b8cafc8f444578982c83ecb161af917" +checksum = "1961e2ef424c1424204d3a5d6975f934f56b6d50ff5732382d84ebf460e147f7" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -8626,9 +9299,9 @@ checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", @@ -8669,6 +9342,16 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der 0.6.1", +] + [[package]] name = "spki" version = "0.7.3" @@ -8676,7 +9359,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der", + "der 0.7.10", ] [[package]] @@ -8698,7 +9381,7 @@ checksum = "da5fc6819faabb412da764b99d3b713bb55083c11e7e0c00144d386cd6a1939c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -8709,9 +9392,9 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "stacker" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601f9201feb9b09c00266478bf459952b9ef9a6b94edb2f21eba14ab681a60a9" +checksum = "cddb07e32ddb770749da91081d8d0ac3a16f1a569a18b20348cd371f5dead06b" dependencies = [ "cc", "cfg-if", @@ -8743,7 +9426,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f" dependencies = [ "new_debug_unreachable", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "phf_shared 0.11.3", "precomputed-hash", "serde", @@ -8786,7 +9469,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -8886,9 +9569,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.100" +version = "2.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" dependencies = [ "proc-macro2", "quote", @@ -8906,20 +9589,20 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] name = "sysinfo" -version = "0.35.1" +version = "0.35.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79251336d17c72d9762b8b54be4befe38d2db56fbbc0241396d70f173c39d47a" +checksum = "3c3ffa3e4ff2b324a57f7aeb3c349656c7b127c3c189520251a648102a92496e" dependencies = [ "libc", "memchr", @@ -8935,7 +9618,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -8969,9 +9652,9 @@ version = "0.30.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6682a07cf5bab0b8a2bd20d0a542917ab928b5edb75ebd4eda6b05cbaab872da" dependencies = [ - "bitflags 2.9.0", - "cocoa 0.26.0", - "core-foundation 0.10.0", + "bitflags 2.9.1", + "cocoa 0.26.1", + "core-foundation 0.10.1", "core-graphics 0.24.0", "crossbeam-channel", "dispatch", @@ -8990,7 +9673,7 @@ dependencies = [ "ndk-sys", "objc", "once_cell", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "raw-window-handle 0.5.2", "raw-window-handle 0.6.2", "scopeguard", @@ -9011,7 +9694,7 @@ checksum = "f4e16beb8b2ac17db28eab8bca40e62dbfbb34c0fcdc6d9826b11b7b5d047dfd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -9022,14 +9705,14 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "tempfile" -version = "3.19.1" +version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" dependencies = [ "fastrand", - "getrandom 0.3.2", + "getrandom 0.3.3", "once_cell", - "rustix 1.0.5", + "rustix 1.0.7", "windows-sys 0.59.0", ] @@ -9062,7 +9745,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -9073,7 +9756,7 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "test-case-core", ] @@ -9109,7 +9792,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -9120,7 +9803,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -9208,9 +9891,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", @@ -9251,7 +9934,7 @@ dependencies = [ "bytes", "libc", "mio", - "parking_lot 0.12.3", + "parking_lot 0.12.4", "pin-project-lite", "signal-hook-registry", "socket2", @@ -9268,7 +9951,17 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", +] + +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.12", + "tokio", ] [[package]] @@ -9334,21 +10027,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.20" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.24", + "toml_edit 0.22.27", ] [[package]] name = "toml_datetime" -version = "0.6.8" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ "serde", ] @@ -9377,43 +10070,23 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.24" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ "indexmap 2.9.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.7.6", + "toml_write", + "winnow 0.7.10", ] [[package]] -name = "tonic" -version = "0.12.3" +name = "toml_write" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" -dependencies = [ - "async-trait", - "base64 0.22.1", - "bytes", - "flate2", - "http", - "http-body", - "http-body-util", - "hyper", - "hyper-timeout", - "hyper-util", - "percent-encoding", - "pin-project", - "prost", - "tokio", - "tokio-stream", - "tower 0.4.13", - "tower-layer", - "tower-service", - "tracing", -] +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "tonic" @@ -9426,11 +10099,11 @@ dependencies = [ "base64 0.22.1", "bytes", "flate2", - "h2", - "http", - "http-body", + "h2 0.4.10", + "http 1.3.1", + "http-body 1.0.1", "http-body-util", - "hyper", + "hyper 1.6.0", "hyper-timeout", "hyper-util", "percent-encoding", @@ -9439,7 +10112,7 @@ dependencies = [ "socket2", "tokio", "tokio-stream", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", "tracing", @@ -9456,27 +10129,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.100", -] - -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "indexmap 1.9.3", - "pin-project", - "pin-project-lite", - "rand 0.8.5", - "slab", - "tokio", - "tokio-util", - "tower-layer", - "tower-service", - "tracing", + "syn 2.0.101", ] [[package]] @@ -9500,22 +10153,22 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.4" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdb0c213ca27a9f57ab69ddb290fd80d970922355b83ae380b395d3986b8a2e" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ "async-compression", - "bitflags 2.9.0", + "bitflags 2.9.1", "bytes", "futures-core", "futures-util", - "http", - "http-body", + "http 1.3.1", + "http-body 1.0.1", "iri-string", "pin-project-lite", "tokio", "tokio-util", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", "tracing", @@ -9559,20 +10212,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "1b1ffbcf9c6f6b99d386e7444eb608ba646ae452a36b39737deb9663b610f662" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -9601,9 +10254,9 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd8e764bd6f5813fd8bebc3117875190c5b0415be8f7f8059bffb6ecd979c444" +checksum = "ddcf5959f39507d0d04d6413119c04f33b623f4f951ebcbdddddfad2d0623a9c" dependencies = [ "js-sys", "once_cell", @@ -9711,7 +10364,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 1.3.1", "httparse", "log", "rand 0.8.5", @@ -9855,12 +10508,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -9875,11 +10522,11 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "js-sys", "rand 0.9.1", "serde", @@ -9889,13 +10536,13 @@ dependencies = [ [[package]] name = "uuid-macro-internal" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72dcd78c4f979627a754f5522cea6e6a25e55139056535fe6e69c506cd64a862" +checksum = "26b682e8c381995ea03130e381928e0e005b7c9eb483c6c8682f50e07b33c2b7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -10002,7 +10649,7 @@ checksum = "59195a1db0e95b920366d949ba5e0d3fc0e70b67c09be15ce5abb790106b0571" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -10048,7 +10695,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "wasm-bindgen-shared", ] @@ -10083,7 +10730,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10191,15 +10838,6 @@ dependencies = [ "system-deps", ] -[[package]] -name = "webpki-roots" -version = "0.26.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2210b291f7ea53617fbafcc4939f10914214ec15aace5ba62293a668f322c5c9" -dependencies = [ - "rustls-pki-types", -] - [[package]] name = "webpki-roots" version = "1.0.0" @@ -10231,7 +10869,7 @@ checksum = "1d228f15bba3b9d56dde8bddbee66fa24545bd17b48d5128ccf4a8742b18e431" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -10305,7 +10943,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5ee8f3d025738cb02bad7868bbb5f8a6327501e870bf51f1b455b0a2454a419" dependencies = [ "windows-collections", - "windows-core 0.61.0", + "windows-core 0.61.2", "windows-future", "windows-link", "windows-numerics", @@ -10317,7 +10955,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" dependencies = [ - "windows-core 0.61.0", + "windows-core 0.61.2", ] [[package]] @@ -10335,25 +10973,26 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.61.0" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement 0.60.0", "windows-interface 0.59.1", "windows-link", - "windows-result 0.3.2", - "windows-strings 0.4.0", + "windows-result 0.3.4", + "windows-strings 0.4.2", ] [[package]] name = "windows-future" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a1d6bbefcb7b60acd19828e1bc965da6fcf18a7e39490c5f8be71e54a19ba32" +checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" dependencies = [ - "windows-core 0.61.0", + "windows-core 0.61.2", "windows-link", + "windows-threading", ] [[package]] @@ -10364,7 +11003,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -10375,7 +11014,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -10386,7 +11025,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -10397,7 +11036,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -10412,19 +11051,19 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" dependencies = [ - "windows-core 0.61.0", + "windows-core 0.61.2", "windows-link", ] [[package]] name = "windows-registry" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" +checksum = "b3bab093bdd303a1240bb99b8aba8ea8a69ee19d34c9e2ef9594e708a4878820" dependencies = [ - "windows-result 0.3.2", - "windows-strings 0.3.1", - "windows-targets 0.53.0", + "windows-link", + "windows-result 0.3.4", + "windows-strings 0.4.2", ] [[package]] @@ -10438,9 +11077,9 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ "windows-link", ] @@ -10457,18 +11096,9 @@ dependencies = [ [[package]] name = "windows-strings" -version = "0.3.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" -dependencies = [ - "windows-link", -] - -[[package]] -name = "windows-strings" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ "windows-link", ] @@ -10571,6 +11201,15 @@ dependencies = [ "windows_x86_64_msvc 0.53.0", ] +[[package]] +name = "windows-threading" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-version" version = "0.1.4" @@ -10771,9 +11410,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.7.6" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63d3fcd9bba44b03821e7d699eeee959f3126dcc4aa8e4ae18ec617c2a5cea10" +checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" dependencies = [ "memchr", ] @@ -10784,7 +11423,7 @@ version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] [[package]] @@ -10805,20 +11444,14 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - [[package]] name = "writeable" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "wry" @@ -10828,7 +11461,7 @@ checksum = "ac0099a336829fbf54c26b5f620c68980ebbe37196772aeaf6118df4931b5cb0" dependencies = [ "base64 0.22.1", "block", - "cocoa 0.26.0", + "cocoa 0.26.1", "core-graphics 0.24.0", "crossbeam-channel", "dpi", @@ -10836,7 +11469,7 @@ dependencies = [ "gdkx11", "gtk", "html5ever", - "http", + "http 1.3.1", "javascriptcore-rs", "jni", "kuchikiki", @@ -10888,7 +11521,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" dependencies = [ "libc", - "rustix 1.0.5", + "rustix 1.0.7", ] [[package]] @@ -10901,6 +11534,18 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "xml-rs" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a62ce76d9b56901b19a74f19431b0d8b3bc7ca4ad685a746dfd78ca8f4fc6bda" + +[[package]] +name = "xmlparser" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" + [[package]] name = "xxhash-rust" version = "0.8.15" @@ -10918,9 +11563,9 @@ dependencies = [ [[package]] name = "yaml-rust2" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "818913695e83ece1f8d2a1c52d54484b7b46d0f9c06beeb2649b9da50d9b512d" +checksum = "18b783b2c2789414f8bb84ca3318fc9c2d7e7be1c22907d37839a58dedb369d3" dependencies = [ "arraydeque", "encoding_rs", @@ -10929,9 +11574,9 @@ dependencies = [ [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", @@ -10941,13 +11586,13 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "synstructure", ] @@ -10986,9 +11631,9 @@ dependencies = [ [[package]] name = "zbus" -version = "5.5.0" +version = "5.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59c333f648ea1b647bc95dc1d34807c8e25ed7a6feff3394034dc4776054b236" +checksum = "d3a7c7cee313d044fca3f48fa782cb750c79e4ca76ba7bc7718cd4024cdf6f68" dependencies = [ "async-broadcast", "async-recursion", @@ -10998,20 +11643,18 @@ dependencies = [ "futures-core", "futures-lite", "hex", - "nix 0.29.0", + "nix 0.30.1", "ordered-stream", "serde", "serde_repr", - "static_assertions", "tokio", "tracing", "uds_windows", "windows-sys 0.59.0", - "winnow 0.7.6", - "xdg-home", - "zbus_macros 5.5.0", + "winnow 0.7.10", + "zbus_macros 5.7.1", "zbus_names 4.2.0", - "zvariant 5.4.0", + "zvariant 5.5.3", ] [[package]] @@ -11023,22 +11666,22 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "zvariant_utils 2.1.0", ] [[package]] name = "zbus_macros" -version = "5.5.0" +version = "5.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f325ad10eb0d0a3eb060203494c3b7ec3162a01a59db75d2deee100339709fc0" +checksum = "a17e7e5eec1550f747e71a058df81a9a83813ba0f6a95f39c4e218bdc7ba366a" dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "zbus_names 4.2.0", - "zvariant 5.4.0", + "zvariant 5.5.3", "zvariant_utils 3.2.0", ] @@ -11061,48 +11704,28 @@ checksum = "7be68e64bf6ce8db94f63e72f0c7eb9a60d733f7e0499e628dfab0f84d6bcb97" dependencies = [ "serde", "static_assertions", - "winnow 0.7.6", - "zvariant 5.4.0", + "winnow 0.7.10", + "zvariant 5.5.3", ] [[package]] name = "zerocopy" -version = "0.7.35" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" dependencies = [ - "zerocopy-derive 0.7.35", -] - -[[package]] -name = "zerocopy" -version = "0.8.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" -dependencies = [ - "zerocopy-derive 0.8.24", + "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -11122,7 +11745,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "synstructure", ] @@ -11143,14 +11766,25 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", +] + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", ] [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" dependencies = [ "yoke", "zerofrom", @@ -11159,13 +11793,13 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -11183,7 +11817,7 @@ dependencies = [ "deflate64", "displaydoc", "flate2", - "getrandom 0.3.2", + "getrandom 0.3.3", "hmac 0.12.1", "indexmap 2.9.0", "lzma-rs", @@ -11254,17 +11888,16 @@ dependencies = [ [[package]] name = "zvariant" -version = "5.4.0" +version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2df9ee044893fcffbdc25de30546edef3e32341466811ca18421e3cd6c5a3ac" +checksum = "9d30786f75e393ee63a21de4f9074d4c038d52c5b1bb4471f955db249f9dffb1" dependencies = [ "endi", "enumflags2", "serde", - "static_assertions", "url", - "winnow 0.7.6", - "zvariant_derive 5.4.0", + "winnow 0.7.10", + "zvariant_derive 5.5.3", "zvariant_utils 3.2.0", ] @@ -11277,20 +11910,20 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "zvariant_utils 2.1.0", ] [[package]] name = "zvariant_derive" -version = "5.4.0" +version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74170caa85b8b84cc4935f2d56a57c7a15ea6185ccdd7eadb57e6edd90f94b2f" +checksum = "75fda702cd42d735ccd48117b1630432219c0e9616bf6cb0f8350844ee4d9580" dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "zvariant_utils 3.2.0", ] @@ -11302,7 +11935,7 @@ checksum = "c51bcff7cc3dbb5055396bcf774748c3dab426b4b8659046963523cee4808340" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -11315,6 +11948,6 @@ dependencies = [ "quote", "serde", "static_assertions", - "syn 2.0.100", - "winnow 0.7.6", + "syn 2.0.101", + "winnow 0.7.10", ] diff --git a/Cargo.toml b/Cargo.toml index 3d848e33..8fd2edc0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,24 +61,33 @@ rustfs-filemeta = { path = "crates/filemeta", version = "0.0.1" } rustfs-disk = { path = "crates/disk", version = "0.0.1" } rustfs-error = { path = "crates/error", version = "0.0.1" } workers = { path = "./common/workers", version = "0.0.1" } -tokio-tar = "0.3.1" +aes-gcm = { version = "0.10.3", features = ["std"] } +arc-swap = "1.7.1" +argon2 = { version = "0.5.3", features = ["std"] } atoi = "2.0.0" async-recursion = "1.1.1" async-trait = "0.1.88" atomic_enum = "0.3.0" +aws-sdk-s3 = "1.29.0" axum = "0.8.4" axum-extra = "0.10.1" axum-server = { version = "0.7.2", features = ["tls-rustls"] } -backon = "1.5.0" +backon = "1.5.1" +base64-simd = "0.8.0" blake2 = "0.10.6" bytes = "1.10.1" bytesize = "2.0.1" +byteorder = "1.5.0" +cfg-if = "1.0.0" +chacha20poly1305 = { version = "0.10.1" } chrono = { version = "0.4.41", features = ["serde"] } clap = { version = "4.5.39", features = ["derive", "env"] } config = "0.15.11" const-str = { version = "0.6.2", features = ["std", "proc"] } +crc32fast = "1.4.2" datafusion = "46.0.1" derive_builder = "0.20.2" +dotenvy = "0.15.7" dioxus = { version = "0.6.3", features = ["router"] } dirs = "6.0.0" flatbuffers = "25.2.10" @@ -88,9 +97,10 @@ futures-core = "0.3.31" futures-util = "0.3.31" glob = "0.3.2" hex = "0.4.3" +hex-simd = "0.8.0" highway = { version = "1.3.0" } hyper = "1.6.0" -hyper-util = { version = "0.1.11", features = [ +hyper-util = { version = "0.1.14", features = [ "tokio", "server-auto", "server-graceful", @@ -98,6 +108,9 @@ hyper-util = { version = "0.1.11", features = [ http = "1.3.1" http-body = "1.0.1" humantime = "2.2.0" +include_dir = "0.7.4" +ipnetwork = { version = "0.21.1", features = ["serde"] } +itertools = "0.14.0" jsonwebtoken = "9.3.1" keyring = { version = "3.6.2", features = [ "apple-native", @@ -114,36 +127,43 @@ mime_guess = "2.0.5" netif = "0.1.6" nix = { version = "0.30.1", features = ["fs"] } nu-ansi-term = "0.50.1" -num_cpus = { version = "1.16.0" } -nvml-wrapper = "0.10.0" +num_cpus = { version = "1.17.0" } +nvml-wrapper = "0.11.0" object_store = "0.11.2" -opentelemetry = { version = "0.29.1" } -opentelemetry-appender-tracing = { version = "0.29.1", features = [ +once_cell = "1.21.3" +opentelemetry = { version = "0.30.0" } +opentelemetry-appender-tracing = { version = "0.30.1", features = [ "experimental_use_tracing_span_context", "experimental_metadata_attributes", + "spec_unstable_logs_enabled" ] } -opentelemetry_sdk = { version = "0.29.0" } -opentelemetry-stdout = { version = "0.29.0" } -opentelemetry-otlp = { version = "0.29.0" } -opentelemetry-semantic-conventions = { version = "0.29.0", features = [ +opentelemetry_sdk = { version = "0.30.0" } +opentelemetry-stdout = { version = "0.30.0" } +opentelemetry-otlp = { version = "0.30.0", default-features = false, features = [ + "grpc-tonic", "gzip-tonic", "trace", "metrics", "logs", "internal-logs" +] } +opentelemetry-semantic-conventions = { version = "0.30.0", features = [ "semconv_experimental", ] } -parking_lot = "0.12.3" +parking_lot = "0.12.4" +path-absolutize = "3.1.1" +path-clean = "1.0.1" +pbkdf2 = "0.12.2" +percent-encoding = "2.3.1" pin-project-lite = "0.2.16" # pin-utils = "0.1.0" prost = "0.13.5" prost-build = "0.13.5" -prost-types = "0.13.5" protobuf = "3.7" -rand = "0.8.5" +rand = "0.9.1" rdkafka = { version = "0.37.0", features = ["tokio"] } reed-solomon-erasure = { version = "6.0.0", features = ["simd-accel"] } regex = { version = "1.11.1" } -reqwest = { version = "0.12.16", default-features = false, features = [ +reqwest = { version = "0.12.19", default-features = false, features = [ "rustls-tls", "charset", "http2", - "macos-system-configuration", + "system-proxy", "stream", "json", "blocking", @@ -154,25 +174,29 @@ rfd = { version = "0.15.3", default-features = false, features = [ ] } rmp = "0.8.14" rmp-serde = "1.3.0" +rsa = "0.9.8" rumqttc = { version = "0.24" } -rust-embed = { version = "8.7.1" } +rust-embed = { version = "8.7.2" } +rustfs-rsc = "2025.506.1" rustls = { version = "0.23.27" } -rustls-pki-types = "1.11.0" +rustls-pki-types = "1.12.0" rustls-pemfile = "2.2.0" s3s = { git = "https://github.com/Nugine/s3s.git", rev = "4733cdfb27b2713e832967232cbff413bb768c10" } s3s-policy = { git = "https://github.com/Nugine/s3s.git", rev = "4733cdfb27b2713e832967232cbff413bb768c10" } +scopeguard = "1.2.0" shadow-rs = { version = "1.1.1", default-features = false } serde = { version = "1.0.219", features = ["derive"] } serde_json = "1.0.140" serde_urlencoded = "0.7.1" serde_with = "3.12.0" sha2 = "0.10.9" +siphasher = "1.0.1" smallvec = { version = "1.15.0", features = ["serde"] } -snafu = "0.8.5" -socket2 = "0.5.9" +snafu = "0.8.6" +socket2 = "0.5.10" strum = { version = "0.27.1", features = ["derive"] } -sysinfo = "0.35.1" -tempfile = "3.19.1" +sysinfo = "0.35.2" +tempfile = "3.20.0" test-case = "3.3.1" thiserror = "2.0.12" time = { version = "0.3.41", features = [ @@ -187,24 +211,26 @@ tonic = { version = "0.13.1", features = ["gzip"] } tonic-build = { version = "0.13.1" } tokio-rustls = { version = "0.26.2", default-features = false } tokio-stream = { version = "0.1.17" } +tokio-tar = "0.3.1" tokio-util = { version = "0.7.15", features = ["io", "compat"] } tower = { version = "0.5.2", features = ["timeout"] } -tower-http = { version = "0.6.2", features = ["cors"] } +tower-http = { version = "0.6.6", features = ["cors"] } tracing = "0.1.41" -tracing-core = "0.1.33" +tracing-core = "0.1.34" tracing-error = "0.2.1" tracing-subscriber = { version = "0.3.19", features = ["env-filter", "time"] } tracing-appender = "0.2.3" -tracing-opentelemetry = "0.30.0" +tracing-opentelemetry = "0.31.0" transform-stream = "0.3.1" url = "2.5.4" -uuid = { version = "1.16.0", features = [ +urlencoding = "2.1.3" +uuid = { version = "1.17.0", features = [ "v4", "fast-rng", "macro-diagnostics", ] } winapi = { version = "0.3.9" } - +xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] } [profile.wasm-dev] inherits = "dev" @@ -218,10 +244,10 @@ inherits = "dev" [profile.release] opt-level = 3 -lto = "thin" -codegen-units = 1 -panic = "abort" # Optional, remove the panic expansion code -strip = true # strip symbol information to reduce binary size +#lto = "thin" +#codegen-units = 1 +#panic = "abort" # Optional, remove the panic expansion code +#strip = true # strip symbol information to reduce binary size [profile.production] inherits = "release" @@ -230,4 +256,4 @@ codegen-units = 1 [profile.profiling] inherits = "release" -debug = true \ No newline at end of file +debug = true diff --git a/README.md b/README.md index 8cb48943..64c0d3aa 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ | Package | Version | Download Link | |---------|---------|----------------------------------------------------------------------------------------------------------------------------------| | Rust | 1.8.5+ | [rust-lang.org/tools/install](https://www.rust-lang.org/tools/install) | -| protoc | 30.2+ | [protoc-30.2-linux-x86_64.zip](https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip) | +| protoc | 31.1+ | [protoc-31.1-linux-x86_64.zip](https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip) | | flatc | 24.0+ | [Linux.flatc.binary.g++-13.zip](https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip) | ### Building RustFS diff --git a/README_ZH.md b/README_ZH.md index 7362e9e7..2af21ce1 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -7,7 +7,7 @@ | 软件包 | 版本 | 下载链接 | |--------|--------|----------------------------------------------------------------------------------------------------------------------------------| | Rust | 1.8.5+ | [rust-lang.org/tools/install](https://www.rust-lang.org/tools/install) | -| protoc | 30.2+ | [protoc-30.2-linux-x86_64.zip](https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip) | +| protoc | 31.1+ | [protoc-31.1-linux-x86_64.zip](https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip) | | flatc | 24.0+ | [Linux.flatc.binary.g++-13.zip](https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip) | ### 构建 RustFS diff --git a/appauth/Cargo.toml b/appauth/Cargo.toml index b638313e..1f807c69 100644 --- a/appauth/Cargo.toml +++ b/appauth/Cargo.toml @@ -7,11 +7,11 @@ rust-version.workspace = true version.workspace = true [dependencies] -base64-simd = "0.8.0" +base64-simd = { workspace = true } common.workspace = true -hex-simd = "0.8.0" +hex-simd = { workspace = true } rand.workspace = true -rsa = "0.9.8" +rsa = { workspace = true } serde.workspace = true serde_json.workspace = true diff --git a/appauth/src/token.rs b/appauth/src/token.rs index d8a00ce7..85c5b2b2 100644 --- a/appauth/src/token.rs +++ b/appauth/src/token.rs @@ -2,6 +2,7 @@ use rsa::Pkcs1v15Encrypt; use rsa::{ RsaPrivateKey, RsaPublicKey, pkcs8::{DecodePrivateKey, DecodePublicKey}, + rand_core::OsRng, }; use serde::{Deserialize, Serialize}; use std::io::{Error, Result}; @@ -19,9 +20,7 @@ pub struct Token { pub fn gencode(token: &Token, key: &str) -> Result { let data = serde_json::to_vec(token)?; let public_key = RsaPublicKey::from_public_key_pem(key).map_err(Error::other)?; - let encrypted_data = public_key - .encrypt(&mut rand::thread_rng(), Pkcs1v15Encrypt, &data) - .map_err(Error::other)?; + let encrypted_data = public_key.encrypt(&mut OsRng, Pkcs1v15Encrypt, &data).map_err(Error::other)?; Ok(base64_simd::URL_SAFE_NO_PAD.encode_to_string(&encrypted_data)) } @@ -65,7 +64,7 @@ mod tests { use std::time::{SystemTime, UNIX_EPOCH}; #[test] fn test_gencode_and_parse() { - let mut rng = rand::thread_rng(); + let mut rng = OsRng; let bits = 2048; let private_key = RsaPrivateKey::new(&mut rng, bits).expect("Failed to generate private key"); let public_key = RsaPublicKey::from(&private_key); @@ -88,7 +87,7 @@ mod tests { #[test] fn test_parse_invalid_token() { - let private_key_pem = RsaPrivateKey::new(&mut rand::thread_rng(), 2048) + let private_key_pem = RsaPrivateKey::new(&mut OsRng, 2048) .expect("Failed to generate private key") .to_pkcs8_pem(LineEnding::LF) .unwrap(); diff --git a/bucket_replicate_test.md b/bucket_replicate_test.md new file mode 100644 index 00000000..2f8ac96c --- /dev/null +++ b/bucket_replicate_test.md @@ -0,0 +1,45 @@ +启动两个rustfs +rustfs --address 0.0.0.0:9000 /rustfs-data9000 +rustfs --address 0.0.0.0:9001 /rustfs-data9001 + + +### 使用 rustfs mc 设置 alias 分别为 rustfs 和 rustfs2 + + +### 创建 bucket +mc mb rustfs/srcbucket + +### 创建 desc bucket + +mc mb rustfs2/destbucket + + + +### 开启版本控制 + +mc version enable rustfs/srcbucket +mc version enable rustfs2/destbucket + +#### 使用修改过的 mc 才能 add bucket replication + +./mc replication add rustfs/srcbucket --remote-bucket rustfs2/destbucket + + + +###### 复制一个小文件; +mc cp ./1.txt rustfs/srcbucket + +###### 查看是否成功 +mc ls --versions rustfs/srcbucket/1.txt +mc ls --versions rustfs/destbucket/1.txt + + +##### 复制一个大文件 +1 创建一个大文件 +dd if=/dev/zero of=./dd.out bs=4096000 count=1000 + +mc cp ./dd.out rustfs/srcbucket/ + +##### 查看是否成功 +mc ls --versions rustfs/srcbucket/dd.out +mc ls --versions rustfs2/destbucket/dd.out diff --git a/build_rustfs.sh b/build_rustfs.sh new file mode 100755 index 00000000..aafbfcd1 --- /dev/null +++ b/build_rustfs.sh @@ -0,0 +1,21 @@ +#!/bin/bash +clear + +# Get the current platform architecture +ARCH=$(uname -m) + +# Set the target directory according to the schema +if [ "$ARCH" == "x86_64" ]; then + TARGET_DIR="target/x86_64" +elif [ "$ARCH" == "aarch64" ]; then + TARGET_DIR="target/arm64" +else + TARGET_DIR="target/unknown" +fi + +# Set CARGO_TARGET_DIR and build the project +CARGO_TARGET_DIR=$TARGET_DIR RUSTFLAGS="-C link-arg=-fuse-ld=mold" cargo build --package rustfs + +echo -e "\a" +echo -e "\a" +echo -e "\a" diff --git a/common/common/Cargo.toml b/common/common/Cargo.toml index b2a34d3a..10900f33 100644 --- a/common/common/Cargo.toml +++ b/common/common/Cargo.toml @@ -9,7 +9,7 @@ workspace = true [dependencies] async-trait.workspace = true lazy_static.workspace = true -scopeguard = "1.2.0" +scopeguard = { workspace = true } tokio.workspace = true tonic = { workspace = true } tracing-error.workspace = true diff --git a/common/common/src/bucket_stats.rs b/common/common/src/bucket_stats.rs new file mode 100644 index 00000000..43970e8c --- /dev/null +++ b/common/common/src/bucket_stats.rs @@ -0,0 +1,73 @@ +use std::collections::HashMap; + +use crate::last_minute::{self}; +pub struct ReplicationLatency { + // 单个和多部分 PUT 请求的延迟 + upload_histogram: last_minute::LastMinuteHistogram, +} + +impl ReplicationLatency { + // 合并两个 ReplicationLatency + pub fn merge(&mut self, other: &mut ReplicationLatency) -> &ReplicationLatency { + self.upload_histogram.merge(&other.upload_histogram); + self + } + + // 获取上传延迟(按对象大小区间分类) + pub fn get_upload_latency(&mut self) -> HashMap { + let mut ret = HashMap::new(); + let avg = self.upload_histogram.get_avg_data(); + for (i, v) in avg.iter().enumerate() { + let avg_duration = v.avg(); + ret.insert(self.size_tag_to_string(i), avg_duration.as_millis() as u64); + } + ret + } + pub fn update(&mut self, size: i64, during: std::time::Duration) { + self.upload_histogram.add(size, during); + } + + // 模拟从 size tag 到字符串的转换 + fn size_tag_to_string(&self, tag: usize) -> String { + match tag { + 0 => String::from("Size < 1 KiB"), + 1 => String::from("Size < 1 MiB"), + 2 => String::from("Size < 10 MiB"), + 3 => String::from("Size < 100 MiB"), + 4 => String::from("Size < 1 GiB"), + _ => String::from("Size > 1 GiB"), + } + } +} + +// #[derive(Debug, Clone, Default)] +// pub struct ReplicationLastMinute { +// pub last_minute: LastMinuteLatency, +// } + +// impl ReplicationLastMinute { +// pub fn merge(&mut self, other: ReplicationLastMinute) -> ReplicationLastMinute { +// let mut nl = ReplicationLastMinute::default(); +// nl.last_minute = self.last_minute.merge(&mut other.last_minute); +// nl +// } + +// pub fn add_size(&mut self, n: i64) { +// let t = SystemTime::now() +// .duration_since(UNIX_EPOCH) +// .expect("Time went backwards") +// .as_secs(); +// self.last_minute.add_all(t - 1, &AccElem { total: t - 1, size: n as u64, n: 1 }); +// } + +// pub fn get_total(&self) -> AccElem { +// self.last_minute.get_total() +// } +// } + +// impl fmt::Display for ReplicationLastMinute { +// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +// let t = self.last_minute.get_total(); +// write!(f, "ReplicationLastMinute sz= {}, n= {}, dur= {}", t.size, t.n, t.total) +// } +// } diff --git a/common/common/src/last_minute.rs b/common/common/src/last_minute.rs index 2c9a9bc8..71d2648a 100644 --- a/common/common/src/last_minute.rs +++ b/common/common/src/last_minute.rs @@ -1,6 +1,81 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; -#[derive(Clone, Debug, Default)] +#[allow(dead_code)] +#[derive(Debug, Default)] +struct TimedAction { + count: u64, + acc_time: u64, + min_time: Option, + max_time: Option, + bytes: u64, +} + +#[allow(dead_code)] +impl TimedAction { + // Avg returns the average time spent on the action. + pub fn avg(&self) -> Option { + if self.count == 0 { + return None; + } + Some(std::time::Duration::from_nanos(self.acc_time / self.count)) + } + + // AvgBytes returns the average bytes processed. + pub fn avg_bytes(&self) -> u64 { + if self.count == 0 { + return 0; + } + self.bytes / self.count + } + + // Merge other into t. + pub fn merge(&mut self, other: TimedAction) { + self.count += other.count; + self.acc_time += other.acc_time; + self.bytes += other.bytes; + + if self.count == 0 { + self.min_time = other.min_time; + } + if let Some(other_min) = other.min_time { + self.min_time = self.min_time.map_or(Some(other_min), |min| Some(min.min(other_min))); + } + + self.max_time = self + .max_time + .map_or(other.max_time, |max| Some(max.max(other.max_time.unwrap_or(0)))); + } +} + +#[allow(dead_code)] +#[derive(Debug)] +enum SizeCategory { + SizeLessThan1KiB = 0, + SizeLessThan1MiB, + SizeLessThan10MiB, + SizeLessThan100MiB, + SizeLessThan1GiB, + SizeGreaterThan1GiB, + // Add new entries here + SizeLastElemMarker, +} + +impl std::fmt::Display for SizeCategory { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match *self { + SizeCategory::SizeLessThan1KiB => "SizeLessThan1KiB", + SizeCategory::SizeLessThan1MiB => "SizeLessThan1MiB", + SizeCategory::SizeLessThan10MiB => "SizeLessThan10MiB", + SizeCategory::SizeLessThan100MiB => "SizeLessThan100MiB", + SizeCategory::SizeLessThan1GiB => "SizeLessThan1GiB", + SizeCategory::SizeGreaterThan1GiB => "SizeGreaterThan1GiB", + SizeCategory::SizeLastElemMarker => "SizeLastElemMarker", + }; + write!(f, "{}", s) + } +} + +#[derive(Clone, Debug, Default, Copy)] pub struct AccElem { pub total: u64, pub size: u64, @@ -28,7 +103,7 @@ impl AccElem { } } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct LastMinuteLatency { pub totals: Vec, pub last_sec: u64, @@ -44,10 +119,11 @@ impl Default for LastMinuteLatency { } impl LastMinuteLatency { - pub fn merge(&mut self, o: &mut LastMinuteLatency) -> LastMinuteLatency { + pub fn merge(&mut self, o: &LastMinuteLatency) -> LastMinuteLatency { let mut merged = LastMinuteLatency::default(); + let mut x = o.clone(); if self.last_sec > o.last_sec { - o.forward_to(self.last_sec); + x.forward_to(self.last_sec); merged.last_sec = self.last_sec; } else { self.forward_to(o.last_sec); @@ -111,7 +187,6 @@ impl LastMinuteLatency { } } } - #[cfg(test)] mod tests { use super::*; @@ -415,7 +490,7 @@ mod tests { latency2.totals[0].total = 20; latency2.totals[0].n = 3; - let merged = latency1.merge(&mut latency2); + let merged = latency1.merge(&latency2); assert_eq!(merged.last_sec, 1000); assert_eq!(merged.totals[0].total, 30); // 10 + 20 @@ -434,7 +509,7 @@ mod tests { latency1.totals[0].total = 10; latency2.totals[0].total = 20; - let merged = latency1.merge(&mut latency2); + let merged = latency1.merge(&latency2); assert_eq!(merged.last_sec, 1010); // Should use the later time assert_eq!(merged.totals[0].total, 30); @@ -443,9 +518,9 @@ mod tests { #[test] fn test_last_minute_latency_merge_empty() { let mut latency1 = LastMinuteLatency::default(); - let mut latency2 = LastMinuteLatency::default(); + let latency2 = LastMinuteLatency::default(); - let merged = latency1.merge(&mut latency2); + let merged = latency1.merge(&latency2); assert_eq!(merged.last_sec, 0); for elem in &merged.totals { @@ -558,7 +633,7 @@ mod tests { n: 5, }; - let cloned = elem.clone(); + let cloned = elem; assert_eq!(elem.total, cloned.total); assert_eq!(elem.size, cloned.size); assert_eq!(elem.n, cloned.n); @@ -755,3 +830,44 @@ mod tests { assert_eq!(elem.avg(), Duration::from_secs(0)); } } + +const SIZE_LAST_ELEM_MARKER: usize = 10; // 这里假设你的 marker 是 10,请根据实际情况修改 + +#[allow(dead_code)] +#[derive(Debug, Default)] +pub struct LastMinuteHistogram { + histogram: Vec, + size: u32, +} + +impl LastMinuteHistogram { + pub fn merge(&mut self, other: &LastMinuteHistogram) { + for i in 0..self.histogram.len() { + self.histogram[i].merge(&other.histogram[i]); + } + } + + pub fn add(&mut self, size: i64, t: std::time::Duration) { + let index = size_to_tag(size); + self.histogram[index].add(&t); + } + + pub fn get_avg_data(&mut self) -> [AccElem; SIZE_LAST_ELEM_MARKER] { + let mut res = [AccElem::default(); SIZE_LAST_ELEM_MARKER]; + for (i, elem) in self.histogram.iter_mut().enumerate() { + res[i] = elem.get_total(); + } + res + } +} + +fn size_to_tag(size: i64) -> usize { + match size { + _ if size < 1024 => 0, // sizeLessThan1KiB + _ if size < 1024 * 1024 => 1, // sizeLessThan1MiB + _ if size < 10 * 1024 * 1024 => 2, // sizeLessThan10MiB + _ if size < 100 * 1024 * 1024 => 3, // sizeLessThan100MiB + _ if size < 1024 * 1024 * 1024 => 4, // sizeLessThan1GiB + _ => 5, // sizeGreaterThan1GiB + } +} diff --git a/common/common/src/lib.rs b/common/common/src/lib.rs index a6656ed4..3b39fd08 100644 --- a/common/common/src/lib.rs +++ b/common/common/src/lib.rs @@ -1,3 +1,4 @@ +pub mod bucket_stats; // pub mod error; pub mod globals; pub mod last_minute; diff --git a/common/lock/src/lrwmutex.rs b/common/lock/src/lrwmutex.rs index e8a38200..bddb84e7 100644 --- a/common/lock/src/lrwmutex.rs +++ b/common/lock/src/lrwmutex.rs @@ -77,8 +77,8 @@ impl LRWMutex { } let sleep_time: u64; { - let mut rng = rand::thread_rng(); - sleep_time = rng.gen_range(10..=50); + let mut rng = rand::rng(); + sleep_time = rng.random_range(10..=50); } sleep(Duration::from_millis(sleep_time)).await; } diff --git a/crates/event-notifier/Cargo.toml b/crates/event-notifier/Cargo.toml index 8d9acd9a..36f3027f 100644 --- a/crates/event-notifier/Cargo.toml +++ b/crates/event-notifier/Cargo.toml @@ -37,7 +37,7 @@ tokio = { workspace = true, features = ["test-util"] } tracing-subscriber = { workspace = true } http = { workspace = true } axum = { workspace = true } -dotenvy = "0.15.7" +dotenvy = { workspace = true } [lints] workspace = true diff --git a/crates/obs/Cargo.toml b/crates/obs/Cargo.toml index e7677df8..5a040c87 100644 --- a/crates/obs/Cargo.toml +++ b/crates/obs/Cargo.toml @@ -27,7 +27,7 @@ opentelemetry = { workspace = true } opentelemetry-appender-tracing = { workspace = true, features = ["experimental_use_tracing_span_context", "experimental_metadata_attributes"] } opentelemetry_sdk = { workspace = true, features = ["rt-tokio"] } opentelemetry-stdout = { workspace = true } -opentelemetry-otlp = { workspace = true, features = ["grpc-tonic", "gzip-tonic"] } +opentelemetry-otlp = { workspace = true, features = ["grpc-tonic", "gzip-tonic", "trace", "metrics", "logs", "internal-logs"] } opentelemetry-semantic-conventions = { workspace = true, features = ["semconv_experimental"] } rustfs-utils = { workspace = true, features = ["ip"] } serde = { workspace = true } diff --git a/crates/obs/src/telemetry.rs b/crates/obs/src/telemetry.rs index c034ad10..a83dbac9 100644 --- a/crates/obs/src/telemetry.rs +++ b/crates/obs/src/telemetry.rs @@ -332,7 +332,7 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard { let flexi_logger_result = flexi_logger::Logger::try_with_env_or_str(logger_level) .unwrap_or_else(|e| { eprintln!( - "Invalid logger level: {}, using default: {},failed error:{}", + "Invalid logger level: {}, using default: {}, failed error: {:?}", logger_level, DEFAULT_LOG_LEVEL, e ); flexi_logger::Logger::with(log_spec.clone()) diff --git a/crates/rio/src/compress_reader.rs b/crates/rio/src/compress_reader.rs index 2986ca90..40720706 100644 --- a/crates/rio/src/compress_reader.rs +++ b/crates/rio/src/compress_reader.rs @@ -434,7 +434,7 @@ mod tests { use rand::Rng; // Generate 1MB of random bytes let mut data = vec![0u8; 1024 * 1024]; - rand::thread_rng().fill(&mut data[..]); + rand::rng().fill(&mut data[..]); let reader = Cursor::new(data.clone()); let mut compress_reader = CompressReader::new(reader, CompressionAlgorithm::Gzip); @@ -453,7 +453,7 @@ mod tests { use rand::Rng; // Generate 1MB of random bytes let mut data = vec![0u8; 1024 * 1024]; - rand::thread_rng().fill(&mut data[..]); + rand::rng().fill(&mut data[..]); let reader = Cursor::new(data.clone()); let mut compress_reader = CompressReader::new(reader, CompressionAlgorithm::Deflate); diff --git a/crates/rio/src/encrypt_reader.rs b/crates/rio/src/encrypt_reader.rs index 314fd376..976fa424 100644 --- a/crates/rio/src/encrypt_reader.rs +++ b/crates/rio/src/encrypt_reader.rs @@ -348,8 +348,8 @@ mod tests { let data = b"hello sse encrypt"; let mut key = [0u8; 32]; let mut nonce = [0u8; 12]; - rand::thread_rng().fill_bytes(&mut key); - rand::thread_rng().fill_bytes(&mut nonce); + rand::rng().fill_bytes(&mut key); + rand::rng().fill_bytes(&mut nonce); let reader = BufReader::new(&data[..]); let encrypt_reader = EncryptReader::new(reader, key, nonce); @@ -375,8 +375,8 @@ mod tests { let data = b"test decrypt only"; let mut key = [0u8; 32]; let mut nonce = [0u8; 12]; - rand::thread_rng().fill_bytes(&mut key); - rand::thread_rng().fill_bytes(&mut nonce); + rand::rng().fill_bytes(&mut key); + rand::rng().fill_bytes(&mut nonce); // Encrypt let reader = BufReader::new(&data[..]); @@ -401,11 +401,11 @@ mod tests { use rand::Rng; let size = 1024 * 1024; let mut data = vec![0u8; size]; - rand::thread_rng().fill(&mut data[..]); + rand::rng().fill(&mut data[..]); let mut key = [0u8; 32]; let mut nonce = [0u8; 12]; - rand::thread_rng().fill_bytes(&mut key); - rand::thread_rng().fill_bytes(&mut nonce); + rand::rng().fill_bytes(&mut key); + rand::rng().fill_bytes(&mut nonce); let reader = std::io::Cursor::new(data.clone()); let encrypt_reader = EncryptReader::new(reader, key, nonce); diff --git a/crates/rio/src/etag_reader.rs b/crates/rio/src/etag_reader.rs index 6a3881ce..73176df8 100644 --- a/crates/rio/src/etag_reader.rs +++ b/crates/rio/src/etag_reader.rs @@ -165,7 +165,7 @@ mod tests { // Generate 3MB random data let size = 3 * 1024 * 1024; let mut data = vec![0u8; size]; - rand::thread_rng().fill(&mut data[..]); + rand::rng().fill(&mut data[..]); let mut hasher = Md5::new(); hasher.update(&data); diff --git a/crates/rio/src/hash_reader.rs b/crates/rio/src/hash_reader.rs index a5e11f33..2e3a4ebf 100644 --- a/crates/rio/src/hash_reader.rs +++ b/crates/rio/src/hash_reader.rs @@ -378,7 +378,7 @@ mod tests { // Generate 1MB random data let size = 1024 * 1024; let mut data = vec![0u8; size]; - rand::thread_rng().fill(&mut data[..]); + rand::rng().fill(&mut data[..]); let mut hasher = Md5::new(); hasher.update(&data); @@ -418,8 +418,8 @@ mod tests { let mut key = [0u8; 32]; let mut nonce = [0u8; 12]; - rand::thread_rng().fill_bytes(&mut key); - rand::thread_rng().fill_bytes(&mut nonce); + rand::rng().fill_bytes(&mut key); + rand::rng().fill_bytes(&mut nonce); let is_encrypt = true; diff --git a/crates/rio/src/limit_reader.rs b/crates/rio/src/limit_reader.rs index 60d0f8d6..6c50d826 100644 --- a/crates/rio/src/limit_reader.rs +++ b/crates/rio/src/limit_reader.rs @@ -174,7 +174,7 @@ mod tests { // Generate a 3MB random byte array for testing let size = 3 * 1024 * 1024; let mut data = vec![0u8; size]; - rand::thread_rng().fill(&mut data[..]); + rand::rng().fill(&mut data[..]); let reader = Cursor::new(data.clone()); let mut limit_reader = LimitReader::new(reader, size as u64); diff --git a/crates/zip/Cargo.toml b/crates/zip/Cargo.toml index f149df6d..19f2d803 100644 --- a/crates/zip/Cargo.toml +++ b/crates/zip/Cargo.toml @@ -18,8 +18,8 @@ async-compression = { version = "0.4.0", features = [ ] } async_zip = { version = "0.0.17", features = ["tokio"] } zip = "2.2.0" -tokio = { version = "1.45.0", features = ["full"] } -tokio-stream = "0.1.17" +tokio = { workspace = true, features = ["full"] } +tokio-stream = { workspace = true } tokio-tar = { workspace = true } xz2 = { version = "0.1", optional = true, features = ["static"] } diff --git a/crypto/Cargo.toml b/crypto/Cargo.toml index 7822ee5a..2a7e4f32 100644 --- a/crypto/Cargo.toml +++ b/crypto/Cargo.toml @@ -10,12 +10,12 @@ version.workspace = true workspace = true [dependencies] -aes-gcm = { version = "0.10.3", features = ["std"], optional = true } -argon2 = { version = "0.5.3", features = ["std"], optional = true } -cfg-if = "1.0.0" -chacha20poly1305 = { version = "0.10.1", optional = true } +aes-gcm = { workspace = true, features = ["std"], optional = true } +argon2 = { workspace = true, features = ["std"], optional = true } +cfg-if = { workspace = true } +chacha20poly1305 = { workspace = true, optional = true } jsonwebtoken = { workspace = true } -pbkdf2 = { version = "0.12.2", optional = true } +pbkdf2 = { workspace = true, optional = true } rand = { workspace = true, optional = true } sha2 = { workspace = true, optional = true } thiserror.workspace = true diff --git a/crypto/src/encdec/decrypt.rs b/crypto/src/encdec/decrypt.rs index e3660e6d..8c46ab46 100644 --- a/crypto/src/encdec/decrypt.rs +++ b/crypto/src/encdec/decrypt.rs @@ -28,6 +28,97 @@ pub fn decrypt_data(password: &[u8], data: &[u8]) -> Result, crate::Erro } } +// use argon2::{Argon2, PasswordHasher}; +// use argon2::password_hash::{SaltString}; +// use aes_gcm::{Aes256Gcm, Key, Nonce}; // For AES-GCM +// use chacha20poly1305::{ChaCha20Poly1305, Key as ChaChaKey, Nonce as ChaChaNonce}; // For ChaCha20 +// use pbkdf2::pbkdf2; +// use sha2::Sha256; +// use std::io::{self, Read}; +// use thiserror::Error; + +// #[derive(Debug, Error)] +// pub enum DecryptError { +// #[error("unexpected header")] +// UnexpectedHeader, +// #[error("invalid encryption algorithm ID")] +// InvalidAlgorithmId, +// #[error("IO error")] +// Io(#[from] io::Error), +// #[error("decryption error")] +// DecryptionError, +// } + +// pub fn decrypt_data2(password: &str, mut data: R) -> Result, DecryptError> { +// // Parse the stream header +// let mut hdr = [0u8; 32 + 1 + 8]; +// if data.read_exact(&mut hdr).is_err() { +// return Err(DecryptError::UnexpectedHeader); +// } + +// let salt = &hdr[0..32]; +// let id = hdr[32]; +// let nonce = &hdr[33..41]; + +// let key = match id { +// // Argon2id + AES-GCM +// 0x01 => { +// let salt = SaltString::encode_b64(salt).map_err(|_| DecryptError::DecryptionError)?; +// let argon2 = Argon2::default(); +// let hashed_key = argon2.hash_password(password.as_bytes(), &salt) +// .map_err(|_| DecryptError::DecryptionError)?; +// hashed_key.hash.unwrap().as_bytes().to_vec() +// } +// // Argon2id + ChaCha20Poly1305 +// 0x02 => { +// let salt = SaltString::encode_b64(salt).map_err(|_| DecryptError::DecryptionError)?; +// let argon2 = Argon2::default(); +// let hashed_key = argon2.hash_password(password.as_bytes(), &salt) +// .map_err(|_| DecryptError::DecryptionError)?; +// hashed_key.hash.unwrap().as_bytes().to_vec() +// } +// // PBKDF2 + AES-GCM +// // 0x03 => { +// // let mut key = [0u8; 32]; +// // pbkdf2::(password.as_bytes(), salt, 10000, &mut key); +// // key.to_vec() +// // } +// _ => return Err(DecryptError::InvalidAlgorithmId), +// }; + +// // Decrypt data using the corresponding cipher +// let mut encrypted_data = Vec::new(); +// data.read_to_end(&mut encrypted_data)?; + +// let plaintext = match id { +// 0x01 => { +// let cipher = Aes256Gcm::new(Key::from_slice(&key)); +// let nonce = Nonce::from_slice(nonce); +// cipher +// .decrypt(nonce, encrypted_data.as_ref()) +// .map_err(|_| DecryptError::DecryptionError)? +// } +// 0x02 => { +// let cipher = ChaCha20Poly1305::new(ChaChaKey::from_slice(&key)); +// let nonce = ChaChaNonce::from_slice(nonce); +// cipher +// .decrypt(nonce, encrypted_data.as_ref()) +// .map_err(|_| DecryptError::DecryptionError)? +// } +// 0x03 => { + +// let cipher = Aes256Gcm::new(Key::from_slice(&key)); +// let nonce = Nonce::from_slice(nonce); +// cipher +// .decrypt(nonce, encrypted_data.as_ref()) +// .map_err(|_| DecryptError::DecryptionError)? +// } +// _ => return Err(DecryptError::InvalidAlgorithmId), +// }; + +// Ok(plaintext) +// } + #[cfg(any(test, feature = "crypto"))] #[inline] fn decryp(stream: T, nonce: &[u8], data: &[u8]) -> Result, crate::Error> { diff --git a/crypto/src/encdec/encrypt.rs b/crypto/src/encdec/encrypt.rs index bc96d353..7483161b 100644 --- a/crypto/src/encdec/encrypt.rs +++ b/crypto/src/encdec/encrypt.rs @@ -42,8 +42,9 @@ fn encrypt( data: &[u8], ) -> Result, crate::Error> { use crate::error::Error; + use aes_gcm::aead::rand_core::OsRng; - let nonce = T::generate_nonce(rand::thread_rng()); + let nonce = T::generate_nonce(&mut OsRng); let encryptor = stream.encrypt(&nonce, data).map_err(Error::ErrEncryptFailed)?; diff --git a/deploy/README.md b/deploy/README.md index 2efdd85e..bf1a2bce 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -35,9 +35,7 @@ managing and monitoring the system. | ├── rustfs_cert.pem | └── rustfs_key.pem |--config -| |--obs.example.yaml // example config | |--rustfs.env // env config | |--rustfs-zh.env // env config in Chinese -| |--.example.obs.env // example env config | |--event.example.toml // event config ``` \ No newline at end of file diff --git a/deploy/build/rustfs-zh.service b/deploy/build/rustfs-zh.service index 17351e67..e6166a19 100644 --- a/deploy/build/rustfs-zh.service +++ b/deploy/build/rustfs-zh.service @@ -38,13 +38,13 @@ ExecStart=/usr/local/bin/rustfs \ --volumes /data/rustfs/vol1,/data/rustfs/vol2 \ --obs-config /etc/rustfs/obs.yaml \ --console-enable \ - --console-address 0.0.0.0:9002 + --console-address 0.0.0.0:9001 # 定义启动命令,运行 /usr/local/bin/rustfs,带参数: # --address 0.0.0.0:9000:服务监听所有接口的 9000 端口。 # --volumes:指定存储卷路径为 /data/rustfs/vol1 和 /data/rustfs/vol2。 # --obs-config:指定配置文件路径为 /etc/rustfs/obs.yaml。 # --console-enable:启用控制台功能。 -# --console-address 0.0.0.0:9002:控制台监听所有接口的 9002 端口。 +# --console-address 0.0.0.0:9001:控制台监听所有接口的 9001 端口。 # 定义环境变量配置,用于传递给服务程序,推荐使用且简洁 # rustfs 示例文件 详见: `../config/rustfs-zh.env` diff --git a/deploy/build/rustfs.run-zh.md b/deploy/build/rustfs.run-zh.md index 85def56e..879b5b06 100644 --- a/deploy/build/rustfs.run-zh.md +++ b/deploy/build/rustfs.run-zh.md @@ -83,7 +83,7 @@ sudo journalctl -u rustfs --since today ```bash # 检查服务端口 ss -tunlp | grep 9000 -ss -tunlp | grep 9002 +ss -tunlp | grep 9001 # 测试服务可用性 curl -I http://localhost:9000 diff --git a/deploy/build/rustfs.run.md b/deploy/build/rustfs.run.md index 2e26ea31..1324a02c 100644 --- a/deploy/build/rustfs.run.md +++ b/deploy/build/rustfs.run.md @@ -83,7 +83,7 @@ sudo journalctl -u rustfs --since today ```bash # Check service ports ss -tunlp | grep 9000 -ss -tunlp | grep 9002 +ss -tunlp | grep 9001 # Test service availability curl -I http://localhost:9000 diff --git a/deploy/build/rustfs.service b/deploy/build/rustfs.service index 9c72e427..41871ead 100644 --- a/deploy/build/rustfs.service +++ b/deploy/build/rustfs.service @@ -24,7 +24,7 @@ ExecStart=/usr/local/bin/rustfs \ --volumes /data/rustfs/vol1,/data/rustfs/vol2 \ --obs-config /etc/rustfs/obs.yaml \ --console-enable \ - --console-address 0.0.0.0:9002 + --console-address 0.0.0.0:9001 # environment variable configuration (Option 2: Use environment variables) # rustfs example file see: `../config/rustfs.env` diff --git a/deploy/config/rustfs-zh.env b/deploy/config/rustfs-zh.env index 37f20e5c..6be1c043 100644 --- a/deploy/config/rustfs-zh.env +++ b/deploy/config/rustfs-zh.env @@ -13,11 +13,11 @@ RUSTFS_ADDRESS="0.0.0.0:9000" # 是否启用 RustFS 控制台功能 RUSTFS_CONSOLE_ENABLE=true # RustFS 控制台监听地址和端口 -RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9002" +RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9001" # RustFS 服务端点地址,用于客户端访问 RUSTFS_SERVER_ENDPOINT="http://127.0.0.1:9000" # RustFS 服务域名配置 -RUSTFS_SERVER_DOMAINS=127.0.0.1:9002 +RUSTFS_SERVER_DOMAINS=127.0.0.1:9001 # RustFS 许可证内容 RUSTFS_LICENSE="license content" # 可观测性配置Endpoint:http://localhost:4317 diff --git a/deploy/config/rustfs.env b/deploy/config/rustfs.env index a8a0d853..c3961f9a 100644 --- a/deploy/config/rustfs.env +++ b/deploy/config/rustfs.env @@ -13,11 +13,11 @@ RUSTFS_ADDRESS="0.0.0.0:9000" # Enable RustFS console functionality RUSTFS_CONSOLE_ENABLE=true # RustFS console listen address and port -RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9002" +RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9001" # RustFS service endpoint for client access RUSTFS_SERVER_ENDPOINT="http://127.0.0.1:9000" # RustFS service domain configuration -RUSTFS_SERVER_DOMAINS=127.0.0.1:9002 +RUSTFS_SERVER_DOMAINS=127.0.0.1:9001 # RustFS license content RUSTFS_LICENSE="license content" # Observability configuration endpoint: RUSTFS_OBS_ENDPOINT diff --git a/docker-compose-obs.yaml b/docker-compose-obs.yaml index a709587b..bafefe57 100644 --- a/docker-compose-obs.yaml +++ b/docker-compose-obs.yaml @@ -1,6 +1,6 @@ services: otel-collector: - image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.124.0 + image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0 environment: - TZ=Asia/Shanghai volumes: @@ -16,7 +16,7 @@ services: networks: - rustfs-network jaeger: - image: jaegertracing/jaeger:2.5.0 + image: jaegertracing/jaeger:2.6.0 environment: - TZ=Asia/Shanghai ports: @@ -26,7 +26,7 @@ services: networks: - rustfs-network prometheus: - image: prom/prometheus:v3.3.0 + image: prom/prometheus:v3.4.1 environment: - TZ=Asia/Shanghai volumes: @@ -36,7 +36,7 @@ services: networks: - rustfs-network loki: - image: grafana/loki:3.5.0 + image: grafana/loki:3.5.1 environment: - TZ=Asia/Shanghai volumes: @@ -47,7 +47,7 @@ services: networks: - rustfs-network grafana: - image: grafana/grafana:11.6.1 + image: grafana/grafana:12.0.1 ports: - "3000:3000" # Web UI environment: diff --git a/ecstore/Cargo.toml b/ecstore/Cargo.toml index f6e3cf6b..b703d4f1 100644 --- a/ecstore/Cargo.toml +++ b/ecstore/Cargo.toml @@ -16,6 +16,7 @@ async-trait.workspace = true backon.workspace = true blake2 = { workspace = true } bytes.workspace = true +byteorder = { workspace = true } common.workspace = true policy.workspace = true chrono.workspace = true @@ -41,34 +42,33 @@ lock.workspace = true regex = { workspace = true } netif = { workspace = true } nix = { workspace = true } -path-absolutize = "3.1.1" +path-absolutize = { workspace = true } protos.workspace = true rmp.workspace = true rmp-serde.workspace = true tokio-util = { workspace = true, features = ["io", "compat"] } -crc32fast = "1.4.2" -siphasher = "1.0.1" -base64-simd = "0.8.0" +crc32fast = { workspace = true } +siphasher = { workspace = true } +base64-simd = { workspace = true } sha2 = { version = "0.11.0-pre.4" } -hex-simd = "0.8.0" -path-clean = "1.0.1" +hex-simd = { workspace = true } +path-clean = { workspace = true } tempfile.workspace = true tokio = { workspace = true, features = ["io-util", "sync", "signal"] } tokio-stream = { workspace = true } tonic.workspace = true -tower.workspace = true -byteorder = "1.5.0" -xxhash-rust = { version = "0.8.15", features = ["xxh64"] } -num = "0.4.3" +xxhash-rust = { workspace = true, features = ["xxh64", "xxh3"] } num_cpus = { workspace = true } -s3s-policy.workspace = true rand.workspace = true pin-project-lite.workspace = true md-5.workspace = true madmin.workspace = true workers.workspace = true reqwest = { workspace = true } -urlencoding = "2.1.3" +aws-sdk-s3 = { workspace = true } +once_cell = { workspace = true } +rustfs-rsc = { workspace = true } +urlencoding = { workspace = true } smallvec = { workspace = true } shadow-rs.workspace = true rustfs-filemeta.workspace = true diff --git a/ecstore/src/bucket/metadata.rs b/ecstore/src/bucket/metadata.rs index 40663fb2..9f36326e 100644 --- a/ecstore/src/bucket/metadata.rs +++ b/ecstore/src/bucket/metadata.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use time::OffsetDateTime; use tracing::error; +use crate::bucket::target::BucketTarget; use crate::config::com::{read_config, save_config}; use crate::error::{Error, Result}; use crate::new_object_layer_fn; @@ -278,8 +279,11 @@ impl BucketMetadata { self.replication_config_updated_at = updated; } BUCKET_TARGETS_FILE => { - self.tagging_config_xml = data; - self.tagging_config_updated_at = updated; + // let x = data.clone(); + // let str = std::str::from_utf8(&x).expect("Invalid UTF-8"); + // println!("update config:{}", str); + self.bucket_targets_config_json = data.clone(); + self.bucket_targets_config_updated_at = updated; } _ => return Err(Error::other(format!("config file not found : {}", config_file))), } @@ -342,8 +346,10 @@ impl BucketMetadata { if !self.replication_config_xml.is_empty() { self.replication_config = Some(deserialize::(&self.replication_config_xml)?); } + //let temp = self.bucket_targets_config_json.clone(); if !self.bucket_targets_config_json.is_empty() { - self.bucket_target_config = Some(BucketTargets::unmarshal(&self.bucket_targets_config_json)?); + let arr: Vec = serde_json::from_slice(&self.bucket_targets_config_json)?; + self.bucket_target_config = Some(BucketTargets { targets: arr }); } else { self.bucket_target_config = Some(BucketTargets::default()) } diff --git a/ecstore/src/bucket/metadata_sys.rs b/ecstore/src/bucket/metadata_sys.rs index bbd47895..cf00e5da 100644 --- a/ecstore/src/bucket/metadata_sys.rs +++ b/ecstore/src/bucket/metadata_sys.rs @@ -7,6 +7,7 @@ use crate::StorageAPI; use crate::bucket::error::BucketMetadataError; use crate::bucket::metadata::{BUCKET_LIFECYCLE_CONFIG, load_bucket_metadata_parse}; use crate::bucket::utils::is_meta_bucketname; +use crate::cmd::bucket_targets; use crate::error::{Error, Result, is_err_bucket_not_found}; use crate::global::{GLOBAL_Endpoints, is_dist_erasure, is_erasure, new_object_layer_fn}; use crate::heal::heal_commands::HealOpts; @@ -226,7 +227,9 @@ impl BucketMetadataSys { match res { Ok(res) => { if let Some(bucket) = buckets.get(idx) { - mp.insert(bucket.clone(), Arc::new(res)); + let x = Arc::new(res); + mp.insert(bucket.clone(), x.clone()); + bucket_targets::init_bucket_targets(bucket, x.clone()).await; } } Err(e) => { @@ -340,6 +343,7 @@ impl BucketMetadataSys { } pub async fn get_config_from_disk(&self, bucket: &str) -> Result { + println!("load data from disk"); if is_meta_bucketname(bucket) { return Err(Error::other("errInvalidArgument")); } @@ -549,7 +553,12 @@ impl BucketMetadataSys { pub async fn get_replication_config(&self, bucket: &str) -> Result<(ReplicationConfiguration, OffsetDateTime)> { let (bm, reload) = match self.get_config(bucket).await { - Ok(res) => res, + Ok(res) => { + if res.0.replication_config.is_none() { + return Err(BucketMetadataError::BucketReplicationConfigNotFound.into()); + } + res + } Err(err) => { warn!("get_replication_config err {:?}", &err); return if err == Error::ConfigNotFound { @@ -564,7 +573,7 @@ impl BucketMetadataSys { if reload { // TODO: globalBucketTargetSys } - + //println!("549 {:?}", config.clone()); Ok((config.clone(), bm.replication_config_updated_at)) } else { Err(BucketMetadataError::BucketReplicationConfigNotFound.into()) @@ -584,9 +593,12 @@ impl BucketMetadataSys { } }; + println!("573"); + if let Some(config) = &bm.bucket_target_config { if reload { // TODO: globalBucketTargetSys + //config. } Ok(config.clone()) diff --git a/ecstore/src/bucket/mod.rs b/ecstore/src/bucket/mod.rs index 33e5b87e..a4e79c93 100644 --- a/ecstore/src/bucket/mod.rs +++ b/ecstore/src/bucket/mod.rs @@ -4,8 +4,9 @@ pub mod metadata_sys; pub mod object_lock; pub mod policy_sys; mod quota; +pub mod replication; pub mod tagging; -mod target; +pub mod target; pub mod utils; pub mod versioning; pub mod versioning_sys; diff --git a/ecstore/src/bucket/replication/datatypes.rs b/ecstore/src/bucket/replication/datatypes.rs new file mode 100644 index 00000000..26c7c005 --- /dev/null +++ b/ecstore/src/bucket/replication/datatypes.rs @@ -0,0 +1,27 @@ +// Replication status type for x-amz-replication-status header +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StatusType { + Pending, + Completed, + CompletedLegacy, + Failed, + Replica, +} + +impl StatusType { + // Converts the enum variant to its string representation + pub fn as_str(&self) -> &'static str { + match self { + StatusType::Pending => "PENDING", + StatusType::Completed => "COMPLETED", + StatusType::CompletedLegacy => "COMPLETE", + StatusType::Failed => "FAILED", + StatusType::Replica => "REPLICA", + } + } + + // Checks if the status is empty (not set) + pub fn is_empty(&self) -> bool { + matches!(self, StatusType::Pending) // Adjust this as needed + } +} diff --git a/ecstore/src/bucket/replication/mod.rs b/ecstore/src/bucket/replication/mod.rs new file mode 100644 index 00000000..0d8681dc --- /dev/null +++ b/ecstore/src/bucket/replication/mod.rs @@ -0,0 +1 @@ +pub mod datatypes; diff --git a/ecstore/src/bucket/tagging/mod.rs b/ecstore/src/bucket/tagging/mod.rs index f50b85a7..538f8e64 100644 --- a/ecstore/src/bucket/tagging/mod.rs +++ b/ecstore/src/bucket/tagging/mod.rs @@ -25,6 +25,7 @@ pub fn encode_tags(tags: Vec) -> String { for tag in tags.iter() { if let (Some(k), Some(v)) = (tag.key.as_ref(), tag.value.as_ref()) { + //encoded.append_pair(k.as_ref().unwrap().as_str(), v.as_ref().unwrap().as_str()); encoded.append_pair(k.as_str(), v.as_str()); } } diff --git a/ecstore/src/bucket/target/mod.rs b/ecstore/src/bucket/target/mod.rs index f2ee39fe..7be98bf4 100644 --- a/ecstore/src/bucket/target/mod.rs +++ b/ecstore/src/bucket/target/mod.rs @@ -1,15 +1,16 @@ use crate::error::Result; use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; -use std::time::Duration; use time::OffsetDateTime; #[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct Credentials { - access_key: String, - secret_key: String, - session_token: Option, - expiration: Option, + #[serde(rename = "accessKey")] + pub access_key: String, + #[serde(rename = "secretKey")] + pub secret_key: String, + pub session_token: Option, + pub expiration: Option>, } #[derive(Debug, Deserialize, Serialize, Default, Clone)] @@ -20,52 +21,53 @@ pub enum ServiceType { #[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct LatencyStat { - curr: Duration, // 当前延迟 - avg: Duration, // 平均延迟 - max: Duration, // 最大延迟 + curr: u64, // 当前延迟 + avg: u64, // 平均延迟 + max: u64, // 最大延迟 } // 定义 BucketTarget 结构体 #[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct BucketTarget { - source_bucket: String, + #[serde(rename = "sourcebucket")] + pub source_bucket: String, - endpoint: String, + pub endpoint: String, - credentials: Option, - - target_bucket: String, + pub credentials: Option, + #[serde(rename = "targetbucket")] + pub target_bucket: String, secure: bool, - - path: Option, + pub path: Option, api: Option, - arn: Option, + pub arn: Option, + #[serde(rename = "type")] + pub type_: Option, - type_: ServiceType, - - region: Option, + pub region: Option, bandwidth_limit: Option, + #[serde(rename = "replicationSync")] replication_sync: bool, storage_class: Option, - - health_check_duration: Option, - + #[serde(rename = "healthCheckDuration")] + health_check_duration: u64, + #[serde(rename = "disableProxy")] disable_proxy: bool, - reset_before_date: Option, - + #[serde(rename = "resetBeforeDate")] + reset_before_date: String, reset_id: Option, - - total_downtime: Duration, + #[serde(rename = "totalDowntime")] + total_downtime: u64, last_online: Option, - + #[serde(rename = "isOnline")] online: bool, latency: LatencyStat, @@ -73,6 +75,15 @@ pub struct BucketTarget { deployment_id: Option, edge: bool, + #[serde(rename = "edgeSyncBeforeExpiry")] + edge_sync_before_expiry: bool, +} + +impl BucketTarget { + pub fn is_empty(self) -> bool { + //self.target_bucket.is_empty() && self.endpoint.is_empty() && self.arn.is_empty() + self.target_bucket.is_empty() && self.endpoint.is_empty() && self.arn.is_none() + } } #[derive(Debug, Deserialize, Serialize, Default, Clone)] @@ -93,4 +104,18 @@ impl BucketTargets { let t: BucketTargets = rmp_serde::from_slice(buf)?; Ok(t) } + + pub fn is_empty(&self) -> bool { + if self.targets.is_empty() { + return true; + } + + for target in &self.targets { + if !target.clone().is_empty() { + return false; + } + } + + true + } } diff --git a/ecstore/src/cmd/bucket_replication.rs b/ecstore/src/cmd/bucket_replication.rs new file mode 100644 index 00000000..8e1c8a26 --- /dev/null +++ b/ecstore/src/cmd/bucket_replication.rs @@ -0,0 +1,2761 @@ +#![allow(unused_variables)] +#![allow(dead_code)] +// use error::Error; +use crate::StorageAPI; +use crate::bucket::metadata_sys::get_replication_config; +use crate::bucket::versioning_sys::BucketVersioningSys; +use crate::error::Error; +use crate::new_object_layer_fn; +use crate::peer::RemotePeerS3Client; +use crate::store; +use crate::store_api::ObjectIO; +use crate::store_api::ObjectInfo; +use crate::store_api::ObjectOptions; +use crate::store_api::ObjectToDelete; +use aws_sdk_s3::Client as S3Client; +use aws_sdk_s3::Config; +use aws_sdk_s3::config::BehaviorVersion; +use aws_sdk_s3::config::Credentials; +use aws_sdk_s3::config::Region; +use bytes::Bytes; +use chrono::DateTime; +use chrono::Duration; +use chrono::Utc; +use futures::StreamExt; +use futures::stream::FuturesUnordered; +use http::HeaderMap; +use http::Method; +use lazy_static::lazy_static; +// use std::time::SystemTime; +use once_cell::sync::Lazy; +use regex::Regex; +use rustfs_rsc::Minio; +use rustfs_rsc::provider::StaticProvider; +use s3s::dto::DeleteMarkerReplicationStatus; +use s3s::dto::DeleteReplicationStatus; +use s3s::dto::ExistingObjectReplicationStatus; +use s3s::dto::ReplicaModificationsStatus; +use s3s::dto::ReplicationRuleStatus; +use serde::{Deserialize, Serialize}; +use std::any::Any; +use std::collections::HashMap; +use std::collections::HashSet; +use std::fmt; +use std::iter::Iterator; +use std::sync::Arc; +use std::sync::atomic::AtomicI32; +use std::sync::atomic::Ordering; +use std::vec; +use time::OffsetDateTime; +use tokio::sync::Mutex; +use tokio::sync::RwLock; +use tokio::sync::mpsc::{Receiver, Sender}; +use tokio::task; +use tracing::{debug, error, info, warn}; +use uuid::Uuid; +use xxhash_rust::xxh3::xxh3_64; +// use bucket_targets::{self, GLOBAL_Bucket_Target_Sys}; + +#[derive(Serialize, Deserialize, Debug)] +struct MRFReplicateEntry { + #[serde(rename = "bucket")] + bucket: String, + + #[serde(rename = "object")] + object: String, + + #[serde(skip_serializing, skip_deserializing)] + version_id: String, + + #[serde(rename = "retryCount")] + retry_count: i32, + + #[serde(skip_serializing, skip_deserializing)] + sz: i64, +} + +trait ReplicationWorkerOperation: Any + Send + Sync { + fn to_mrf_entry(&self) -> MRFReplicateEntry; + fn as_any(&self) -> &dyn Any; +} + +// WorkerMaxLimit max number of workers per node for "fast" mode +pub const WORKER_MAX_LIMIT: usize = 50; + +// WorkerMinLimit min number of workers per node for "slow" mode +pub const WORKER_MIN_LIMIT: usize = 5; + +// WorkerAutoDefault is default number of workers for "auto" mode +pub const WORKER_AUTO_DEFAULT: usize = 10; + +// MRFWorkerMaxLimit max number of mrf workers per node for "fast" mode +pub const MRF_WORKER_MAX_LIMIT: usize = 8; + +// MRFWorkerMinLimit min number of mrf workers per node for "slow" mode +pub const MRF_WORKER_MIN_LIMIT: usize = 2; + +// MRFWorkerAutoDefault is default number of mrf workers for "auto" mode +pub const MRF_WORKER_AUTO_DEFAULT: usize = 4; + +// LargeWorkerCount is default number of workers assigned to large uploads ( >= 128MiB) +pub const LARGE_WORKER_COUNT: usize = 2; + +pub const MIN_LARGE_OBJSIZE: u64 = 128 * 1024 * 1024; + +pub struct ReplicationPool { + // Atomic operations + active_workers: Arc, + active_lrg_workers: Arc, + active_mrf_workers: Arc, + + // Shared objects + obj_layer: Arc, + //ctx: Arc>, // Placeholder for context; replace as needed + priority: String, + max_workers: usize, + max_lworkers: usize, + //stats: Option>, + + // Synchronization primitives + //mu: RwLock<()>, + //mrf_mu: Mutex<()>, + //resyncer: Option>, + + // Workers + workers_sender: Vec>>, + workers_recever: Vec>>, + lrg_workers_sender: Vec>>, + lrg_workers_receiver: Vec>>, + + // MRF + //mrf_worker_kill_ch: Option>, + mrf_replica_ch_sender: Sender>, + mrf_replica_ch_receiver: Receiver>, + //mrf_save_ch: Sender, + //mrf_stop_ch: Sender<()>, + mrf_worker_size: usize, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)] +#[repr(u8)] // 明确表示底层值为 u8 +pub enum ReplicationType { + #[default] + UnsetReplicationType = 0, + ObjectReplicationType = 1, + DeleteReplicationType = 2, + MetadataReplicationType = 3, + HealReplicationType = 4, + ExistingObjectReplicationType = 5, + ResyncReplicationType = 6, + AllReplicationType = 7, +} + +impl ReplicationType { + /// 从 u8 转换为枚举 + pub fn from_u8(value: u8) -> Option { + match value { + 0 => Some(Self::UnsetReplicationType), + 1 => Some(Self::ObjectReplicationType), + 2 => Some(Self::DeleteReplicationType), + 3 => Some(Self::MetadataReplicationType), + 4 => Some(Self::HealReplicationType), + 5 => Some(Self::ExistingObjectReplicationType), + 6 => Some(Self::ResyncReplicationType), + 7 => Some(Self::AllReplicationType), + _ => None, + } + } + + /// 获取枚举对应的 u8 值 + pub fn as_u8(self) -> u8 { + self as u8 + } + + pub fn is_data_replication(self) -> bool { + matches!( + self, + ReplicationType::ObjectReplicationType + | ReplicationType::HealReplicationType + | ReplicationType::ExistingObjectReplicationType + ) + } +} + +const SYSTEM_XML_OBJECT: &str = ".system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/system.xml"; +const CAPACITY_XML_OBJECT: &str = ".system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml"; +const VEEAM_AGENT_SUBSTR: &str = "APN/1.0 Veeam/1.0"; + +fn is_veeam_sos_api_object(object: &str) -> bool { + matches!(object, SYSTEM_XML_OBJECT | CAPACITY_XML_OBJECT) +} + +pub async fn queue_replication_heal( + bucket: &str, + oi: &ObjectInfo, + rcfg: &s3s::dto::ReplicationConfiguration, + _retry_count: u32, +) -> Option { + if oi.mod_time.is_none() || is_veeam_sos_api_object(&oi.name) { + return None; + } + + if rcfg.rules.is_empty() { + return None; + } + + let mut moi = oi.clone(); + + let mut roi = get_heal_replicate_object_info(&mut moi, rcfg).await; + //roi.retry_count = retry_count; + + if !roi.dsc.replicate_any() { + error!("Replication heal for object {} in bucket {} is not configured", oi.name, bucket); + return None; + } + + if oi.replication_status == ReplicationStatusType::Completed && !roi.existing_obj_resync.must_resync() { + return None; + } + + // Handle Delete Marker or VersionPurgeStatus cases + if roi.delete_marker || !roi.version_purge_status.is_empty() { + let (version_id, dm_version_id) = if roi.version_purge_status.is_empty() { + (String::new(), roi.version_id.clone()) + } else { + (roi.version_id.clone(), String::new()) + }; + + let dv = DeletedObjectReplicationInfo { + deleted_object: DeletedObject { + object_name: Some(roi.name.clone()), + delete_marker_version_id: Some(dm_version_id), + version_id: Some(roi.version_id.clone()), + replication_state: roi.replication_state.clone(), + delete_marker_mtime: roi.mod_time, + delete_marker: Some(roi.delete_marker), + }, + bucket: roi.bucket.clone(), + op_type: ReplicationType::HealReplicationType, + //event_type: ReplicationType::HealDeleteType, + event_type: "".to_string(), + reset_id: "".to_string(), + target_arn: "".to_string(), + }; + + if matches!(roi.replication_status, ReplicationStatusType::Pending | ReplicationStatusType::Failed) + || matches!(roi.version_purge_status, VersionPurgeStatusType::Failed | VersionPurgeStatusType::Pending) + { + let mut pool = GLOBAL_REPLICATION_POOL.write().await; + pool.as_mut().unwrap().queue_replica_task(roi).await; + //GLOBAL_REPLICATION_POOL().queue_replica_delete_task(dv); + return None; + } + + if roi.existing_obj_resync.must_resync() + && (roi.replication_status == ReplicationStatusType::Completed || roi.replication_status.is_empty()) + { + //queue_replicate_deletes_wrapper(dv, &roi.existing_obj_resync); + let mut pool = GLOBAL_REPLICATION_POOL.write().await; + pool.as_mut().unwrap().queue_replica_task(roi).await; + return None; + } + + return None; + } + + if roi.existing_obj_resync.must_resync() { + roi.op_type = ReplicationType::ExistingObjectReplicationType as i32; + } + + let mut pool = GLOBAL_REPLICATION_POOL.write().await; + + match roi.replication_status { + ReplicationStatusType::Pending | ReplicationStatusType::Failed => { + //roi.event_type = ReplicateEventType::Heal; + //roi.event_type = ReplicateEventType::Heal; + pool.as_mut().unwrap().queue_replica_task(roi.clone()).await; + return Some(roi); + } + _ => {} + } + + if roi.existing_obj_resync.must_resync() { + //roi.event_type = ReplicateEventType::Existing; + pool.as_mut().unwrap().queue_replica_task(roi.clone()).await; + } + + Some(roi) +} + +fn new_replicate_target_decision(arn: String, replicate: bool, sync: bool) -> ReplicateTargetDecision { + ReplicateTargetDecision { + id: String::new(), // Using a default value for the 'id' field is acceptable + replicate, + synchronous: sync, + arn, + } +} + +pub async fn check_replicate_delete( + bucket: &str, + dobj: &ObjectToDelete, + oi: &ObjectInfo, + del_opts: &ObjectOptions, + gerr: Option<&Error>, +) -> ReplicateDecision { + error!("check_replicate_delete"); + let mut dsc = ReplicateDecision::default(); + + let rcfg = match get_replication_config(bucket).await { + Ok((cfg, mod_time)) => cfg, + Err(e) => { + //repl_log_once_if(ctx, None, bucket); // 你需要实现这个日志函数 + error!("get replication config err:"); + return dsc; + } + }; + + if del_opts.replication_request { + return dsc; + } + + if !del_opts.versioned { + return dsc; + } + + let mut opts = ReplicationObjectOpts { + name: dobj.object_name.clone(), + ssec: false, + user_tags: Some(oi.user_tags.clone()), + delete_marker: oi.delete_marker, + //version_id: dobj.version_id.clone().map(|v| v.to_string()), + version_id: oi.version_id.map(|uuid| uuid.to_string()).unwrap_or_default(), + op_type: ReplicationType::DeleteReplicationType, + target_arn: None, + replica: true, + existing_object: true, + }; + + let tgt_arns = rcfg.filter_target_arns(&opts); + dsc.targets_map = HashMap::with_capacity(tgt_arns.len()); + + if tgt_arns.is_empty() { + return dsc; + } + + let sync = false; + let mut replicate; + + for tgt_arn in tgt_arns { + //let mut opts = opts.clone(); + opts.target_arn = Some(tgt_arn.clone()); + replicate = rcfg.replicate(&opts); + + if gerr.is_some() { + let valid_repl_status = matches!( + oi.target_replication_status(tgt_arn.clone()), + ReplicationStatusType::Pending | ReplicationStatusType::Completed | ReplicationStatusType::Failed + ); + + if oi.delete_marker && (valid_repl_status || replicate) { + dsc.set(new_replicate_target_decision(tgt_arn.clone(), replicate, sync)); + continue; + } + + if !oi.version_purge_status.is_empty() { + replicate = matches!(oi.version_purge_status, VersionPurgeStatusType::Pending | VersionPurgeStatusType::Failed); + dsc.set(new_replicate_target_decision(tgt_arn.clone(), replicate, sync)); + continue; + } + } + + let tgt = bucket_targets::get_bucket_target_client(bucket, &tgt_arn).await; + + let tgt_dsc = match tgt { + Ok(tgt) => new_replicate_target_decision(tgt_arn.clone(), replicate, tgt.replicate_sync), + Err(_) => new_replicate_target_decision(tgt_arn.clone(), false, false), + }; + + // let tgt_dsc = if let Some(tgt) = tgt { + // new_replicate_target_decision(tgt_arn.clone(), replicate, tgt.replicate_sync) + // } else { + // new_replicate_target_decision(tgt_arn.clone(), false, false) + // }; + + dsc.set(tgt_dsc); + } + + dsc +} +// use crate::replication::*; +// use crate::crypto; +// use crate::global::*; + +fn target_reset_header(arn: &str) -> String { + format!("{}{}-{}", RESERVED_METADATA_PREFIX_LOWER, REPLICATION_RESET, arn) +} + +pub async fn get_heal_replicate_object_info( + oi: &mut ObjectInfo, + rcfg: &s3s::dto::ReplicationConfiguration, +) -> ReplicateObjectInfo { + let mut user_defined = oi.user_defined.clone(); + + if !rcfg.rules.is_empty() { + if !oi.replication_status.is_empty() { + oi.replication_status_internal = format!("{}={};", rcfg.role, oi.replication_status.as_str()); + } + + if !oi.version_purge_status.is_empty() { + oi.version_purge_status_internal = format!("{}={};", rcfg.role, oi.version_purge_status); + } + + // let to_replace: Vec<(String, String)> = user_defined + // .iter() + // .filter(|(k, _)| k.eq_ignore_ascii_case(&(RESERVED_METADATA_PREFIX_LOWER.to_owned() + REPLICATION_RESET))) + // .map(|(k, v)| (k.clone(), v.clone())) + // .collect::>() + // .collect(); + let to_replace: Vec<(String, String)> = match &user_defined { + Some(map) => map + .iter() + .filter(|(k, _)| k.eq_ignore_ascii_case(&(RESERVED_METADATA_PREFIX_LOWER.to_owned() + REPLICATION_RESET))) + .map(|(k, v)| (k.clone(), v.clone())) + .collect(), + None => Vec::new(), + }; + + // 第二步:apply 修改 + for (k, v) in to_replace { + if let Some(mp) = user_defined.as_mut() { + mp.remove(&k); + mp.insert(target_reset_header(&rcfg.role), v); + } + } + } + //} + + //let dsc = if oi.delete_marker || !oi.version_purge_status.is_empty() { + let dsc = if oi.delete_marker { + check_replicate_delete( + &oi.bucket, + &ObjectToDelete { + object_name: oi.name.clone(), + version_id: oi.version_id, + }, + oi, + &ObjectOptions { + // versioned: global_bucket_versioning_sys::prefix_enabled(&oi.bucket, &oi.name), + // version_suspended: global_bucket_versioning_sys::prefix_suspended(&oi.bucket, &oi.name), + versioned: true, + version_suspended: false, + ..Default::default() + }, + None, + ) + .await + } else { + // let opts: ObjectOptions = put_opts(&bucket, &key, version_id, &req.headers, Some(mt)) + // .await + // .map_err(to_s3_error)?; + let mt = oi.user_defined.clone(); + let mt2 = oi.user_defined.clone(); + let opts = ObjectOptions { + user_defined: user_defined.clone(), + versioned: true, + version_id: oi.version_id.map(|uuid| uuid.to_string()), + mod_time: oi.mod_time, + ..Default::default() + }; + let repoptions = get_must_replicate_options( + mt2.as_ref().unwrap_or(&HashMap::new()), + "", + ReplicationStatusType::Unknown, + ReplicationType::ObjectReplicationType, + &opts, + ); + + let decision = must_replicate(&oi.bucket, &oi.name, &repoptions).await; + error!("decision:"); + decision + }; + + let tgt_statuses = replication_statuses_map(&oi.replication_status_internal); + let purge_statuses = version_purge_statuses_map(&oi.version_purge_status_internal); + //let existing_obj_resync = rcfg.resync(&GLOBAL_CONTEXT, oi, &dsc, &tgt_statuses); + + // let tm = user_defined + // .get(&(RESERVED_METADATA_PREFIX_LOWER.to_owned() + REPLICATION_TIMESTAMP)) + // .and_then(|v| DateTime::parse_from_rfc3339(v).ok()) + // .map(|dt| dt.with_timezone(&Utc)); + + let tm = user_defined.as_ref().and_then(|map| { + map.get(&(RESERVED_METADATA_PREFIX_LOWER.to_owned() + REPLICATION_TIMESTAMP)) + .and_then(|v| DateTime::parse_from_rfc3339(v).ok()) + .map(|dt| dt.with_timezone(&Utc)) + }); + + let mut rstate = oi.replication_state(); + rstate.replicate_decision_str = dsc.to_string(); + + let asz = oi.get_actual_size().unwrap_or(0); + + let key = format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, REPLICATION_TIMESTAMP); + let tm: Option> = user_defined + .as_ref() + .unwrap() + .get(&key) + .and_then(|v| DateTime::parse_from_rfc3339(v).ok()) + .map(|dt| dt.with_timezone(&Utc)); + + let mut result = ReplicateObjectInfo { + name: oi.name.clone(), + size: oi.size as i64, + actual_size: asz as i64, + bucket: oi.bucket.clone(), + //version_id: oi.version_id.clone(), + version_id: oi + .version_id + .map(|uuid| uuid.to_string()) // 将 Uuid 转换为 String + .unwrap_or_default(), + etag: oi.etag.clone().unwrap(), + mod_time: convert_offsetdatetime_to_chrono(oi.mod_time).unwrap(), + replication_status: oi.replication_status.clone(), + replication_status_internal: oi.replication_status_internal.clone(), + delete_marker: oi.delete_marker, + version_purge_status_internal: oi.version_purge_status_internal.clone(), + version_purge_status: oi.version_purge_status.clone(), + replication_state: rstate, + op_type: 1, + dsc, + existing_obj_resync: Default::default(), + target_statuses: tgt_statuses, + target_purge_statuses: purge_statuses, + replication_timestamp: tm.unwrap_or_else(Utc::now), + //ssec: crypto::is_encrypted(&oi.user_defined), + ssec: false, + user_tags: oi.user_tags.clone(), + checksum: oi.checksum.clone(), + event_type: "".to_string(), + retry_count: 0, + reset_id: "".to_string(), + target_arn: "".to_string(), + }; + + if result.ssec { + result.checksum = oi.checksum.clone(); + } + + warn!( + "Replication heal for object {} in bucket {} is configured {:?}", + oi.name, oi.bucket, oi.version_id + ); + + result +} + +#[derive(Debug, Clone)] +pub struct MustReplicateOptions { + pub meta: HashMap, + pub status: ReplicationStatusType, + pub op_type: ReplicationType, + pub replication_request: bool, // Incoming request is a replication request +} + +impl MustReplicateOptions { + /// Get the replication status from metadata, if available. + pub fn replication_status(&self) -> ReplicationStatusType { + if let Some(rs) = self.meta.get("x-amz-bucket-replication-status") { + return match rs.as_str() { + "Pending" => ReplicationStatusType::Pending, + "Completed" => ReplicationStatusType::Completed, + "CompletedLegacy" => ReplicationStatusType::CompletedLegacy, + "Failed" => ReplicationStatusType::Failed, + "Replica" => ReplicationStatusType::Replica, + _ => ReplicationStatusType::Unknown, + }; + } + self.status.clone() + } + + /// Check if the operation type is existing object replication. + pub fn is_existing_object_replication(&self) -> bool { + self.op_type == ReplicationType::ExistingObjectReplicationType + } + + /// Check if the operation type is metadata replication. + pub fn is_metadata_replication(&self) -> bool { + self.op_type == ReplicationType::MetadataReplicationType + } +} + +use tokio::sync::mpsc; + +use crate::cmd::bucket_targets; + +// use super::bucket_targets::Client; +use super::bucket_targets::TargetClient; +//use crate::storage; + +// 模拟依赖的类型 +pub struct Context; // 用于代替 Go 的 `context.Context` +#[derive(Default)] +pub struct ReplicationStats; + +#[derive(Default)] +pub struct ReplicationPoolOpts { + pub priority: String, + pub max_workers: usize, + pub max_l_workers: usize, +} + +//pub static GLOBAL_REPLICATION_POOL: OnceLock> = OnceLock::new(); + +pub static GLOBAL_REPLICATION_POOL: Lazy>> = Lazy::new(|| { + RwLock::new(None) // 允许延迟初始化 +}); + +impl ReplicationPool { + pub async fn init_bucket_replication_pool( + obj_layer: Arc, + opts: ReplicationPoolOpts, + stats: Arc, + ) { + let mut workers = 0; + let mut failed_workers = 0; + let mut priority = "auto".to_string(); + let mut max_workers = WORKER_MAX_LIMIT; + warn!("init_bucket_replication_pool {} {} {} {}", workers, failed_workers, priority, max_workers); + + let (sender, receiver) = mpsc::channel::>(10); + + // Self { + // mrf_replica_ch_sender: sender, + // } + + if !opts.priority.is_empty() { + priority = opts.priority.clone(); + } + if opts.max_workers > 0 { + max_workers = opts.max_workers; + } + + match priority.as_str() { + "fast" => { + workers = WORKER_MAX_LIMIT; + failed_workers = MRF_WORKER_MAX_LIMIT; + } + "slow" => { + workers = WORKER_MIN_LIMIT; + failed_workers = MRF_WORKER_MIN_LIMIT; + } + _ => { + workers = WORKER_AUTO_DEFAULT; + failed_workers = MRF_WORKER_AUTO_DEFAULT; + } + } + + if max_workers > 0 && workers > max_workers { + workers = max_workers; + } + if max_workers > 0 && failed_workers > max_workers { + failed_workers = max_workers; + } + + let max_l_workers = if opts.max_l_workers > 0 { + opts.max_l_workers + } else { + LARGE_WORKER_COUNT + }; + + // 初始化通道 + let (mrf_replica_tx, _) = mpsc::channel::(100_000); + let (mrf_worker_kill_tx, _) = mpsc::channel::(failed_workers); + let (mrf_save_tx, _) = mpsc::channel::(100_000); + let (mrf_stop_tx, _) = mpsc::channel::(1); + + let mut pool = Self { + workers_sender: Vec::with_capacity(workers), + workers_recever: Vec::with_capacity(workers), + lrg_workers_sender: Vec::with_capacity(max_l_workers), + lrg_workers_receiver: Vec::with_capacity(max_l_workers), + active_workers: Arc::new(AtomicI32::new(0)), + active_lrg_workers: Arc::new(AtomicI32::new(0)), + active_mrf_workers: Arc::new(AtomicI32::new(0)), + max_lworkers: max_l_workers, + //mrf_worker_kill_ch: None, + mrf_replica_ch_sender: sender, + mrf_replica_ch_receiver: receiver, + mrf_worker_size: workers, + priority, + max_workers, + obj_layer, + }; + + warn!("work size is: {}", workers); + pool.resize_lrg_workers(max_l_workers, Some(0)).await; + pool.resize_workers(workers, Some(0)).await; + pool.resize_failed_workers(failed_workers).await; + let obj_layer_clone = pool.obj_layer.clone(); + + // 启动后台任务 + let resyncer = Arc::new(RwLock::new(ReplicationResyncer::new())); + let x = Arc::new(RwLock::new(&pool)); + // tokio::spawn(async move { + // resyncer.lock().await.persist_to_disk(ctx_clone, obj_layer_clone).await; + // }); + + tokio::spawn(async move { + //pool4.process_mrf().await + }); + let pool5 = Arc::clone(&x); + tokio::spawn(async move { + //pool5.persist_mrf().await + }); + + let mut global_pool = GLOBAL_REPLICATION_POOL.write().await; + global_pool.replace(pool); + } + + pub async fn resize_lrg_workers(&mut self, n: usize, check_old: Option) { + //let mut lrg_workers = self.lrg_workers.lock().unwrap(); + if (check_old.is_some() && self.lrg_workers_sender.len() != check_old.unwrap()) + || n == self.lrg_workers_sender.len() + || n < 1 + { + // Either already satisfied or worker count changed while waiting for the lock. + return; + } + println!("2 resize_lrg_workers"); + + let active_workers = Arc::clone(&self.active_lrg_workers); + let obj_layer = Arc::clone(&self.obj_layer); + let mut lrg_workers_sender = std::mem::take(&mut self.lrg_workers_sender); + + while lrg_workers_sender.len() < n { + let (sender, mut receiver) = mpsc::channel::>(100); + lrg_workers_sender.push(sender); + + let active_workers_clone = Arc::clone(&active_workers); + let obj_layer_clone = Arc::clone(&obj_layer); + + tokio::spawn(async move { + while let Some(operation) = receiver.recv().await { + println!("resize workers 1"); + active_workers_clone.fetch_add(1, Ordering::SeqCst); + + if let Some(info) = operation.as_any().downcast_ref::() { + replicate_object(info.clone(), obj_layer_clone.clone()).await; + } else if let Some(info) = operation.as_any().downcast_ref::() { + replicate_delete(&info.clone(), obj_layer_clone.clone()).await; + } else { + eprintln!("Unknown replication type"); + } + + active_workers_clone.fetch_sub(1, Ordering::SeqCst); + } + }); + } + + // Add new workers if needed + // Remove excess workers if needed + while lrg_workers_sender.len() > n { + lrg_workers_sender.pop(); // Dropping the sender will close the channel + } + + self.lrg_workers_sender = lrg_workers_sender; + } + + pub async fn resize_workers(&mut self, n: usize, check_old: Option) { + debug!("resize worker"); + //let mut lrg_workers = self.lrg_workers.lock().unwrap(); + if (check_old.is_some() && self.workers_sender.len() != check_old.unwrap()) || n == self.workers_sender.len() || n < 1 { + // Either already satisfied or worker count changed while waiting for the lock. + return; + } + debug!("resize worker"); + // Add new workers if needed + let active_workers_clone = Arc::clone(&self.active_workers); + let mut vsender = std::mem::take(&mut self.workers_sender); + //let mut works_sender = std::mem::take(&mut self.workers_sender); + let layer = Arc::clone(&self.obj_layer); + while vsender.len() < n { + debug!("resize workers"); + let (sender, mut receiver) = mpsc::channel::>(100); + vsender.push(sender); + + let active_workers_clone = Arc::clone(&active_workers_clone); + // Spawn a new workero + let layer_clone = Arc::clone(&layer); + tokio::spawn(async move { + while let Some(operation) = receiver.recv().await { + // Simulate work being processed + active_workers_clone.fetch_add(1, Ordering::SeqCst); + + if let Some(info) = operation.as_any().downcast_ref::() { + //self.stats.inc_q(&info.bucket, info.size, info.delete_marker, &info.op_type); + let _layer = Arc::clone(&layer_clone); + replicate_object(info.clone(), _layer).await; + //self.stats.dec_q(&info.bucket, info.size, info.delete_marker, &info.op_type); + } else if let Some(info) = operation.as_any().downcast_ref::() { + let _layer = Arc::clone(&layer_clone); + replicate_delete(&info.clone(), _layer).await; + } else { + eprintln!("Unknown replication type"); + } + + active_workers_clone.fetch_sub(1, Ordering::SeqCst); + } + }); + } + // Remove excess workers if needed + while vsender.len() > n { + vsender.pop(); // Dropping the sender will close the channel + } + self.workers_sender = vsender; + warn!("self sender size is {:?}", self.workers_sender.len()); + warn!("self sender size is {:?}", self.workers_sender.len()); + } + + async fn resize_failed_workers(&self, _count: usize) { + // 实现失败 worker 的初始化逻辑 + } + + // async fn process_mrf(&self) { + // // 实现 MRF 处理逻辑 + // } + + // async fn persist_mrf(&self) { + // // 实现 MRF 持久化逻辑 + // } + + fn get_worker_ch(&self, bucket: &str, object: &str, _sz: i64) -> Option<&Sender>> { + let h = xxh3_64(format!("{}{}", bucket, object).as_bytes()); // 计算哈希值 + //need lock; + let workers = &self.workers_sender; // 读锁 + + if workers.is_empty() { + warn!("workers is empty"); + return None; + } + + let index = (h as usize) % workers.len(); // 选择 worker + Some(&workers[index]) // 返回对应的 Sender + } + + async fn queue_replica_task(&mut self, ri: ReplicateObjectInfo) { + if ri.size >= MIN_LARGE_OBJSIZE as i64 { + let h = xxh3_64(format!("{}{}", ri.bucket, ri.name).as_bytes()); + let workers = &self.lrg_workers_sender; + let worker_count = workers.len(); + + if worker_count > 0 { + let worker_index = (h as usize) % worker_count; + let sender = &workers[worker_index]; + + match sender.try_send(Box::new(ri.clone())) { + Ok(_) => return, + Err(_) => { + // 任务队列满了,执行 MRF 处理 + //println!("Queue full, saving to MRF: {}", ri.to_mrf_entry()); + println!("Queue full, saving to MRF"); + } + } + } + + // 检查是否需要增加 worker + let existing = worker_count; + let max_workers = self.max_lworkers.min(LARGE_WORKER_COUNT); + + if self.active_lrg_workers.load(Ordering::SeqCst) < max_workers as i32 { + let new_worker_count = (existing + 1).min(max_workers); + self.resize_lrg_workers(new_worker_count, Some(existing)).await; + } + return; + } + let mut ch: Option<&Sender>> = None; + let mut heal_ch: Option<&Sender>> = None; + warn!("enqueue object:{}", ch.is_none()); + + if ri.op_type == ReplicationType::HealReplicationType as i32 + || ri.op_type == ReplicationType::ExistingObjectReplicationType as i32 + { + ch = Some(&self.mrf_replica_ch_sender); + heal_ch = self.get_worker_ch(&ri.name, &ri.bucket, ri.size); + } else { + info!("get worker channel for replication"); + ch = self.get_worker_ch(&ri.name, &ri.bucket, ri.size); + } + + if ch.is_none() && heal_ch.is_none() { + error!("replicste chan empty"); + return; + } + + let mut sent = false; + tokio::select! { + //_ = self.ctx_done.closed() => {}, + Some(h) = async { heal_ch } => { + //if let Some(h) = h { + if h.send(Box::new(ri.clone())).await.is_ok() { + warn!("enqueue object"); + sent = true; + } + //} + } + Some(c) = async { ch } => { + //if let Some(c) = c { + if c.send(Box::new(ri.clone())).await.is_ok() { + info!("enqueue object"); + sent = true; + } + //} + } + } + + if !sent { + //todo! + //self.queue_mrf_save(ri).await; + let max_workers = self.max_workers; + + match self.priority.as_str() { + "fast" => { + println!("Warning: Unable to keep up with incoming traffic"); + } + "slow" => { + println!("Warning: Incoming traffic is too high. Increase replication priority."); + } + _ => { + let worker_count = self.active_workers.load(Ordering::SeqCst); + let max_workers = max_workers.min(WORKER_MAX_LIMIT); + if worker_count < max_workers as i32 { + //self.resize_workers((worker_count + 1 as usize).try_into().unwrap(), worker_count).await; + self.resize_workers(worker_count as usize + 1_usize, Some(worker_count as usize)) + .await; + } + + //let max_mrf_workers = max_workers.min(MRFWorkerMaxLimit); + let max_mrf_workers = max_workers.min(MRF_WORKER_MAX_LIMIT); + if self.mrf_worker_size < max_mrf_workers { + self.resize_failed_workers(self.mrf_worker_size + 1).await; + } + } + } + } + } +} + +pub struct ReplicationResyncer; + +impl Default for ReplicationResyncer { + fn default() -> Self { + Self + } +} + +impl ReplicationResyncer { + pub fn new() -> Self { + Self + } + + pub async fn persist_to_disk(&self, _ctx: Arc, _obj_layer: Arc) { + // 实现持久化到磁盘的逻辑 + } +} + +pub async fn init_bucket_replication_pool() { + if let Some(store) = new_object_layer_fn() { + let opts = ReplicationPoolOpts::default(); + let stats = ReplicationStats; + let stat = Arc::new(stats); + warn!("init bucket replication pool"); + ReplicationPool::init_bucket_replication_pool(store, opts, stat).await; + } else { + // TODO: to be added + } +} + +pub struct ReplicationClient { + pub s3cli: S3Client, + pub remote_peer_client: RemotePeerS3Client, + pub arn: String, +} + +pub trait RemotePeerS3ClientExt { + fn putobject(remote_bucket: String, remote_object: String, size: i64); + fn multipart(); +} + +impl RemotePeerS3ClientExt for RemotePeerS3Client { + fn putobject(remote_bucket: String, remote_object: String, size: i64) {} + + fn multipart() {} +} + +#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum ReplicationStatusType { + #[default] + Pending, + Completed, + CompletedLegacy, + Failed, + Replica, + Unknown, +} + +impl ReplicationStatusType { + // Converts the enum variant to its string representation + pub fn as_str(&self) -> &'static str { + match self { + ReplicationStatusType::Pending => "PENDING", + ReplicationStatusType::Completed => "COMPLETED", + ReplicationStatusType::CompletedLegacy => "COMPLETE", + ReplicationStatusType::Failed => "FAILED", + ReplicationStatusType::Replica => "REPLICA", + ReplicationStatusType::Unknown => "", + } + } + + // Checks if the status is empty (not set) + pub fn is_empty(&self) -> bool { + matches!(self, ReplicationStatusType::Pending) // Adjust logic if needed + } + + // 从字符串构造 ReplicationStatusType 枚举 + pub fn from(value: &str) -> Self { + match value.to_uppercase().as_str() { + "PENDING" => ReplicationStatusType::Pending, + "COMPLETED" => ReplicationStatusType::Completed, + "COMPLETE" => ReplicationStatusType::CompletedLegacy, + "FAILED" => ReplicationStatusType::Failed, + "REPLICA" => ReplicationStatusType::Replica, + other => ReplicationStatusType::Unknown, + } + } +} + +#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum VersionPurgeStatusType { + Pending, + Complete, + Failed, + Empty, + #[default] + Unknown, +} + +impl VersionPurgeStatusType { + // 检查是否是 Empty + pub fn is_empty(&self) -> bool { + matches!(self, VersionPurgeStatusType::Empty) + } + + // 检查是否是 Pending(Pending 或 Failed 都算作 Pending 状态) + pub fn is_pending(&self) -> bool { + matches!(self, VersionPurgeStatusType::Pending | VersionPurgeStatusType::Failed) + } +} + +// 从字符串实现转换(类似于 Go 的字符串比较) +impl From<&str> for VersionPurgeStatusType { + fn from(value: &str) -> Self { + match value.to_uppercase().as_str() { + "PENDING" => VersionPurgeStatusType::Pending, + "COMPLETE" => VersionPurgeStatusType::Complete, + "FAILED" => VersionPurgeStatusType::Failed, + _ => VersionPurgeStatusType::Empty, + } + } +} + +impl fmt::Display for VersionPurgeStatusType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + VersionPurgeStatusType::Pending => "PENDING", + VersionPurgeStatusType::Complete => "COMPLETE", + VersionPurgeStatusType::Failed => "FAILED", + VersionPurgeStatusType::Empty => "", + VersionPurgeStatusType::Unknown => "UNKNOWN", + }; + write!(f, "{}", s) + } +} + +pub fn get_composite_version_purge_status(status_map: &HashMap) -> VersionPurgeStatusType { + if status_map.is_empty() { + return VersionPurgeStatusType::Unknown; + } + + let mut completed_count = 0; + + for status in status_map.values() { + match status { + VersionPurgeStatusType::Failed => return VersionPurgeStatusType::Failed, + VersionPurgeStatusType::Complete => completed_count += 1, + _ => {} + } + } + + if completed_count == status_map.len() { + VersionPurgeStatusType::Complete + } else { + VersionPurgeStatusType::Pending + } +} + +// 定义 ReplicationAction 枚举 +#[derive(Debug, Default, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub enum ReplicationAction { + ReplicateMetadata, + #[default] + ReplicateNone, + ReplicateAll, +} + +impl ReplicationAction { + /// Get the replication action based on the operation type and object info comparison. + pub fn from_operation_type(op_type: &str) -> Self { + match op_type.to_lowercase().as_str() { + "metadata" => ReplicationAction::ReplicateMetadata, + "none" => ReplicationAction::ReplicateNone, + "all" => ReplicationAction::ReplicateAll, + _ => ReplicationAction::ReplicateAll, + } + } +} + +impl std::str::FromStr for ReplicationAction { + type Err = (); + + fn from_str(action: &str) -> Result { + match action.to_lowercase().as_str() { + "metadata" => Ok(ReplicationAction::ReplicateMetadata), + "none" => Ok(ReplicationAction::ReplicateNone), + "all" => Ok(ReplicationAction::ReplicateAll), + _ => Ok(ReplicationAction::ReplicateAll), + } + } +} + +// 定义 ObjectInfo 结构体 +// #[derive(Debug)] +// pub struct ObjectInfo { +// pub e_tag: String, +// pub version_id: String, +// pub actual_size: i64, +// pub mod_time: DateTime, +// pub delete_marker: bool, +// pub content_type: String, +// pub content_encoding: String, +// pub user_tags: HashMap, +// pub user_defined: HashMap, +// } + +// impl ObjectInfo { +// // 获取实际大小 +// pub fn get_actual_size(&self) -> i64 { +// self.actual_size +// } +// } + +// 定义 MinioObjectInfo 结构体 +#[derive(Debug)] +pub struct MinioObjectInfo { + pub e_tag: String, + pub version_id: String, + pub size: i64, + pub last_modified: DateTime, + pub is_delete_marker: bool, + pub content_type: String, + pub metadata: HashMap>, + pub user_tag_count: usize, + pub user_tags: HashMap, +} + +// 忽略大小写比较字符串列表 +// fn equals(k1: &str, keys: &[&str]) -> bool { +// keys.iter().any(|&k2| k1.eq_ignore_ascii_case(k2)) +// } + +// 比较两个对象的 ReplicationAction +pub fn get_replication_action(oi1: &ObjectInfo, oi2: &ObjectInfo, op_type: &str) -> ReplicationAction { + let _null_version_id = "null"; + + // 如果是现有对象复制,判断是否需要跳过同步 + if op_type == "existing" && oi1.mod_time > oi2.mod_time && oi1.version_id.is_none() { + return ReplicationAction::ReplicateNone; + } + + let sz = oi1.get_actual_size(); + + // 完整复制的条件 + if oi1.etag != oi2.etag + || oi1.version_id != oi2.version_id + || sz.unwrap() != oi2.size + || oi1.delete_marker != oi2.delete_marker + || oi1.mod_time != oi2.mod_time + { + return ReplicationAction::ReplicateAll; + } + + // 元数据复制的条件 + if oi1.content_type != oi2.content_type { + return ReplicationAction::ReplicateMetadata; + } + + // if oi1.content_encoding.is_some() { + // if let Some(enc) = oi2 + // .metadata + // .get("content-encoding") + // .or_else(|| oi2.metadata.get("content-encoding".to_lowercase().as_str())) + // { + // if enc.join(",") != oi1.content_encoding { + // return ReplicationAction::ReplicateMetadata; + // } + // } else { + // return ReplicationAction::ReplicateMetadata; + // } + // } + + // if !oi2.user_tags.is_empty() && oi1.user_tags != oi2.user_tags { + // return ReplicationAction::ReplicateMetadata; + // } + + // 需要比较的头部前缀列表 + // let compare_keys = vec![ + // "expires", + // "cache-control", + // "content-language", + // "content-disposition", + // "x-amz-object-lock-mode", + // "x-amz-object-lock-retain-until-date", + // "x-amz-object-lock-legal-hold", + // "x-amz-website-redirect-location", + // "x-amz-meta-", + // ]; + + // 提取并比较必要的元数据 + // let compare_meta1: HashMap = oi1 + // .user_defined + // .iter() + // .filter(|(k, _)| compare_keys.iter().any(|prefix| k.to_lowercase().starts_with(prefix))) + // .map(|(k, v)| (k.to_lowercase(), v.clone())) + // .collect(); + + // let compare_meta2: HashMap = oi2 + // .metadata + // .iter() + // .filter(|(k, _)| compare_keys.iter().any(|prefix| k.to_lowercase().starts_with(prefix))) + // .map(|(k, v)| (k.to_lowercase(), v.join(","))) + // .collect(); + + // if compare_meta1 != compare_meta2 { + // return ReplicationAction::ReplicateMetadata; + // } + + ReplicationAction::ReplicateNone +} + +/// 目标的复制决策结构 +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReplicateTargetDecision { + pub replicate: bool, // 是否进行复制 + pub synchronous: bool, // 是否是同步复制 + pub arn: String, // 复制目标的 ARN + pub id: String, // ID +} + +impl ReplicateTargetDecision { + /// 创建一个新的 ReplicateTargetDecision 实例 + pub fn new(arn: &str, replicate: bool, synchronous: bool) -> Self { + Self { + id: String::new(), + replicate, + synchronous, + arn: arn.to_string(), + } + } +} + +impl fmt::Display for ReplicateTargetDecision { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{};{};{};{}", self.replicate, self.synchronous, self.arn, self.id) + } +} + +/// 复制决策结构体,包含多个目标的决策 +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct ReplicateDecision { + targets_map: HashMap, +} + +impl ReplicateDecision { + /// 创建一个新的空的 ReplicateDecision + pub fn new() -> Self { + Self { + targets_map: HashMap::new(), + } + } + + /// 检查是否有任何目标需要复制 + pub fn replicate_any(&self) -> bool { + self.targets_map.values().any(|t| t.replicate) + } + + /// 检查是否有任何目标需要同步复制 + pub fn synchronous(&self) -> bool { + self.targets_map.values().any(|t| t.synchronous) + } + + /// 将目标的决策添加到 map 中 + pub fn set(&mut self, decision: ReplicateTargetDecision) { + self.targets_map.insert(decision.arn.clone(), decision); + } + + /// 返回所有目标的 Pending 状态字符串 + pub fn pending_status(&self) -> String { + let mut result = String::new(); + for target in self.targets_map.values() { + if target.replicate { + result.push_str(&format!("{}=PENDING;", target.arn)); + } + } + result + } +} + +impl fmt::Display for ReplicateDecision { + /// 将 ReplicateDecision 转换为字符串格式 + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut entries = Vec::new(); + for (key, value) in &self.targets_map { + entries.push(format!("{}={}", key, value)); + } + write!(f, "{}", entries.join(",")) + } +} + +/// ResyncTargetDecision 表示重同步决策 +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ResyncTargetDecision { + pub replicate: bool, + pub reset_id: String, + pub reset_before_date: DateTime, +} + +/// ResyncDecision 表示所有目标的重同步决策 +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +pub struct ResyncDecision { + targets: HashMap, +} + +impl ResyncDecision { + /// 创建一个新的 ResyncDecision + pub fn new() -> Self { + Self { targets: HashMap::new() } + } + + /// 检查是否没有任何目标需要重同步 + pub fn is_empty(&self) -> bool { + self.targets.is_empty() + } + + /// 检查是否有至少一个目标需要重同步 + pub fn must_resync(&self) -> bool { + self.targets.values().any(|v| v.replicate) + } + + /// 检查指定目标是否需要重同步 + pub fn must_resync_target(&self, tgt_arn: &str) -> bool { + if let Some(target) = self.targets.get(tgt_arn) { + target.replicate + } else { + false + } + } +} + +/// 解析字符串为 ReplicateDecision 结构 +pub fn parse_replicate_decision(input: &str) -> Result { + let mut decision = ReplicateDecision::new(); + if input.is_empty() { + return Ok(decision); + } + + for pair in input.split(',') { + if pair.is_empty() { + continue; + } + let parts: Vec<&str> = pair.split('=').collect(); + if parts.len() != 2 { + return Err("Invalid replicate decision format"); + } + + let key = parts[0]; + let value = parts[1].trim_matches('"'); + let values: Vec<&str> = value.split(';').collect(); + + if values.len() != 4 { + return Err("Invalid replicate target decision format"); + } + + let replicate = values[0] == "true"; + let synchronous = values[1] == "true"; + let arn = values[2].to_string(); + let id = values[3].to_string(); + + decision.set(ReplicateTargetDecision { + replicate, + synchronous, + arn, + id, + }); + } + Ok(decision) +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct ReplicatedTargetInfo { + pub arn: String, + pub size: i64, + pub duration: Duration, + pub replication_action: ReplicationAction, // 完整或仅元数据 + pub op_type: i32, // 传输类型 + pub replication_status: ReplicationStatusType, // 当前复制状态 + pub prev_replication_status: ReplicationStatusType, // 上一个复制状态 + pub version_purge_status: VersionPurgeStatusType, // 版本清理状态 + pub resync_timestamp: String, // 重同步时间戳 + pub replication_resynced: bool, // 是否重同步 + pub endpoint: String, // 目标端点 + pub secure: bool, // 是否安全连接 + pub err: Option, // 错误信息 +} + +// 实现 ReplicatedTargetInfo 方法 +impl ReplicatedTargetInfo { + /// 检查 arn 是否为空 + pub fn is_empty(&self) -> bool { + self.arn.is_empty() + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct DeletedObjectReplicationInfo { + #[serde(flatten)] // 使用 `flatten` 将 `DeletedObject` 的字段展开到当前结构体 + pub deleted_object: DeletedObject, + + pub bucket: String, + pub event_type: String, + pub op_type: ReplicationType, // 假设 `replication.Type` 是 `ReplicationType` 枚举 + pub reset_id: String, + pub target_arn: String, +} + +pub fn get_composite_replication_status(m: &HashMap) -> ReplicationStatusType { + if m.is_empty() { + return ReplicationStatusType::Unknown; + } + + let mut completed_count = 0; + + for status in m.values() { + match status { + ReplicationStatusType::Failed => return ReplicationStatusType::Failed, + ReplicationStatusType::Completed => completed_count += 1, + _ => {} + } + } + + if completed_count == m.len() { + return ReplicationStatusType::Completed; + } + + ReplicationStatusType::Pending +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct ReplicationState { + pub replica_timestamp: DateTime, + pub replica_status: ReplicationStatusType, + pub delete_marker: bool, + pub replication_timestamp: DateTime, + pub replication_status_internal: String, + pub version_purge_status_internal: String, + pub replicate_decision_str: String, + pub targets: HashMap, + pub purge_targets: HashMap, + pub reset_statuses_map: HashMap, +} + +// impl Default for ReplicationState { +// fn default() -> Self { +// ReplicationState { +// replica_timestamp: Utc::now(), +// replica_status: ReplicationStatusType::default(), +// delete_marker: false, +// replication_timestamp: Utc::now(), +// replication_status_internal: String::new(), +// version_purge_status_internal: String::new(), +// replicate_decision_str: String::new(), +// targets: HashMap::new(), +// purge_targets: HashMap::new(), +// reset_statuses_map: HashMap::new(), +// } +// } +// } + +pub struct ReplicationObjectOpts { + pub name: String, + pub user_tags: Option, + pub version_id: String, + pub delete_marker: bool, + pub ssec: bool, + pub op_type: ReplicationType, + pub replica: bool, + pub existing_object: bool, + pub target_arn: Option, +} + +pub trait ConfigProcess { + fn filter_actionable_rules(&self, obj: &ReplicationObjectOpts) -> Vec; + + fn replicate(&self, obj: &ReplicationObjectOpts) -> bool; + fn filter_target_arns(&self, obj: &ReplicationObjectOpts) -> Vec; +} + +impl ConfigProcess for s3s::dto::ReplicationConfiguration { + fn filter_target_arns(&self, obj: &ReplicationObjectOpts) -> Vec { + let mut arns = Vec::new(); + let mut tgts_map = HashSet::new(); + + let rules = self.filter_actionable_rules(obj); + debug!("rule len is {}", rules.len()); + for rule in rules { + debug!("rule"); + + if rule.status == ReplicationRuleStatus::from_static(ReplicationRuleStatus::DISABLED) { + debug!("rule"); + continue; + } + + if !self.role.is_empty() { + debug!("rule"); + arns.push(self.role.clone()); // use legacy RoleArn if present + return arns; + } + + debug!("rule"); + if !tgts_map.contains(&rule.destination.bucket) { + tgts_map.insert(rule.destination.bucket.clone()); + } + } + + for arn in tgts_map { + arns.push(arn); + } + arns + } + + fn replicate(&self, obj: &ReplicationObjectOpts) -> bool { + for rule in self.filter_actionable_rules(obj) { + if rule.status == ReplicationRuleStatus::from_static(ReplicationRuleStatus::DISABLED) { + warn!("need replicate failed"); + continue; + } + if obj.existing_object + && rule.existing_object_replication.is_some() + && rule.existing_object_replication.unwrap().status + == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED) + { + warn!("need replicate failed"); + return false; + } + + if obj.op_type == ReplicationType::DeleteReplicationType { + return if !obj.version_id.is_empty() { + // MinIO 扩展:检查版本化删除 + if rule.delete_replication.is_none() { + warn!("need replicate failed"); + return false; + } + rule.delete_replication.unwrap().status + == DeleteReplicationStatus::from_static(DeleteReplicationStatus::DISABLED) + } else { + if rule.delete_marker_replication.is_none() { + warn!("need replicate failed"); + return false; + } + if rule.delete_marker_replication.as_ref().unwrap().status.clone().is_none() { + warn!("need replicate failed"); + return false; + } + rule.delete_marker_replication.as_ref().unwrap().status.clone().unwrap() + == DeleteMarkerReplicationStatus::from_static(DeleteMarkerReplicationStatus::DISABLED) + }; + } + // 处理常规对象/元数据复制 + if !obj.replica { + warn!("not need replicate {} {} ", obj.name, obj.version_id); + return true; + } + return obj.replica + && rule.source_selection_criteria.is_some() + && rule.source_selection_criteria.unwrap().replica_modifications.unwrap().status + == ReplicaModificationsStatus::from_static(ReplicaModificationsStatus::ENABLED); + } + warn!("need replicate failed"); + false + } + + fn filter_actionable_rules(&self, obj: &ReplicationObjectOpts) -> Vec { + if obj.name.is_empty() + && !matches!(obj.op_type, ReplicationType::ResyncReplicationType | ReplicationType::AllReplicationType) + { + warn!("filter"); + return vec![]; + } + + let mut rules: Vec = Vec::new(); + debug!("rule size is {}", &self.rules.len()); + + for rule in &self.rules { + if rule.status.as_str() == ReplicationRuleStatus::DISABLED { + debug!("rule size is"); + continue; + } + + if obj.target_arn.is_some() + && rule.destination.bucket != obj.target_arn.clone().unwrap() + && self.role != obj.target_arn.clone().unwrap() + { + debug!("rule size is"); + continue; + } + debug!("match {:?}", obj.op_type.clone()); + if matches!(obj.op_type, ReplicationType::ResyncReplicationType | ReplicationType::AllReplicationType) { + //println!("filter"); + rules.push(rule.clone()); + continue; + } + + if obj.existing_object { + if rule.existing_object_replication.is_none() { + continue; + } + + if rule.existing_object_replication.clone().unwrap().status.as_str() == ExistingObjectReplicationStatus::DISABLED + { + continue; + } + } + + if rule.prefix.is_some() && !obj.name.starts_with(rule.prefix.as_ref().unwrap()) { + continue; + } + + //if rule.filter.test_tags(&obj.user_tags) { + rules.push(rule.clone()); + //} + } + + rules.sort_by(|a, b| { + if a.priority == b.priority { + a.destination.bucket.to_string().cmp(&b.destination.bucket.to_string()) + } else { + b.priority.cmp(&a.priority) + } + }); + + rules + } +} + +fn replication_statuses_map(s: &str) -> HashMap { + let mut targets = HashMap::new(); + let repl_status_regex = Regex::new(r"(\w+):([\w-]+)").unwrap(); + + for cap in repl_status_regex.captures_iter(s) { + if let (Some(target), Some(status)) = (cap.get(1), cap.get(2)) { + let tp = ReplicationStatusType::from(status.as_str()); + targets.insert(target.as_str().to_string(), tp); + } + } + + targets +} + +fn version_purge_statuses_map(s: &str) -> HashMap { + let mut targets = HashMap::new(); + let repl_status_regex = Regex::new(r"(\w+):([\w-]+)").unwrap(); + + for cap in repl_status_regex.captures_iter(s) { + if let (Some(target), Some(status)) = (cap.get(1), cap.get(2)) { + let ptp = VersionPurgeStatusType::from(status.as_str()); + targets.insert(target.as_str().to_string(), ptp); + } + } + + targets +} + +pub trait TraitForObjectInfo { + fn replication_state(&self) -> ReplicationState; +} + +const RESERVED_METADATA_PREFIX: &str = "X-Minio-Internal-"; +const RESERVED_METADATA_PREFIX_LOWER: &str = "x-minio-internal-"; +lazy_static! { + static ref THROTTLE_DEADLINE: std::time::Duration = std::time::Duration::from_secs(3600); +} + +// Replication-related string constants +pub const REPLICATION_RESET: &str = "replication-reset"; +pub const REPLICATION_STATUS: &str = "replication-status"; +pub const REPLICATION_TIMESTAMP: &str = "replication-timestamp"; +pub const REPLICA_STATUS: &str = "replica-status"; +pub const REPLICA_TIMESTAMP: &str = "replica-timestamp"; +pub const TAGGING_TIMESTAMP: &str = "tagging-timestamp"; +pub const OBJECT_LOCK_RETENTION_TIMESTAMP: &str = "objectlock-retention-timestamp"; +pub const OBJECT_LOCK_LEGAL_HOLD_TIMESTAMP: &str = "objectlock-legalhold-timestamp"; +pub const REPLICATION_SSEC_CHECKSUM_HEADER: &str = "X-Minio-Replication-Ssec-Crc"; + +impl TraitForObjectInfo for ObjectInfo { + fn replication_state(&self) -> ReplicationState { + let mut rs = ReplicationState { + replication_status_internal: self.replication_status_internal.clone(), + //version_purge_status_internal: self.version_purge_status_internal.clone(), + version_purge_status_internal: "".to_string(), + replicate_decision_str: self.replication_status_internal.clone(), + targets: HashMap::new(), + purge_targets: HashMap::new(), + reset_statuses_map: HashMap::new(), + replica_timestamp: Utc::now(), + replica_status: ReplicationStatusType::Pending, + delete_marker: false, + replication_timestamp: Utc::now(), + }; + + // Set targets and purge_targets using respective functions + rs.targets = replication_statuses_map(&self.replication_status_internal); + //rs.purge_targets = version_purge_statuses_map(&self.version_purge_status_internal); + rs.purge_targets = version_purge_statuses_map(""); + + // Process reset statuses map + if self.user_defined.is_some() { + for (k, v) in self.user_defined.as_ref().unwrap() { + if k.starts_with(&(RESERVED_METADATA_PREFIX_LOWER.to_owned() + REPLICATION_RESET)) { + let arn = k.trim_start_matches(&(RESERVED_METADATA_PREFIX_LOWER.to_owned() + REPLICATION_RESET)); + rs.reset_statuses_map.insert(arn.to_string(), v.clone()); + } + } + } + rs + } +} + +fn convert_offsetdatetime_to_chrono(offset_dt: Option) -> Option> { + //offset_dt.map(|odt| { + let tm = offset_dt.unwrap().unix_timestamp(); + //let naive = NaiveDateTime::from_timestamp_opt(tm, 0).expect("Invalid timestamp"); + DateTime::::from_timestamp(tm, 0) + //DateTime::from_naive_utc_and_offset(naive, Utc) // Convert to Utc first + //}) +} + +pub async fn schedule_replication(oi: ObjectInfo, o: Arc, dsc: ReplicateDecision, op_type: i32) { + let tgt_statuses = replication_statuses_map(&oi.replication_status_internal); + // //let purge_statuses = version_purge_statuses_map(&oi.); + let replication_timestamp = Utc::now(); // Placeholder for timestamp parsing + let replication_state = oi.replication_state(); + + let actual_size = oi.actual_size.unwrap_or(0); + //let ssec = oi.user_defined.contains_key("ssec"); + let ssec = false; + + let ri = ReplicateObjectInfo { + name: oi.name, + size: oi.size as i64, + bucket: oi.bucket, + version_id: oi + .version_id + .map(|uuid| uuid.to_string()) // 将 Uuid 转换为 String + .unwrap_or_default(), + etag: oi.etag.unwrap_or_default(), + mod_time: convert_offsetdatetime_to_chrono(oi.mod_time).unwrap(), + replication_status: oi.replication_status, + replication_status_internal: oi.replication_status_internal, + delete_marker: oi.delete_marker, + version_purge_status_internal: oi.version_purge_status_internal, + version_purge_status: oi.version_purge_status, + replication_state, + op_type, + dsc: dsc.clone(), + target_statuses: tgt_statuses, + target_purge_statuses: Default::default(), + replication_timestamp, + ssec, + user_tags: oi.user_tags, + checksum: if ssec { oi.checksum.clone() } else { Vec::new() }, + event_type: "".to_string(), + retry_count: 0, + reset_id: "".to_string(), + existing_obj_resync: Default::default(), + target_arn: "".to_string(), + actual_size: 0, + }; + + if dsc.synchronous() { + warn!("object sync replication"); + replicate_object(ri, o).await; + } else { + warn!("object need async replication"); + //GLOBAL_REPLICATION_POOL.lock().unwrap().queue_replica_task(ri); + let mut pool = GLOBAL_REPLICATION_POOL.write().await; + pool.as_mut().unwrap().queue_replica_task(ri).await; + } +} + +pub async fn must_replicate(bucket: &str, object: &str, mopts: &MustReplicateOptions) -> ReplicateDecision { + let mut decision = ReplicateDecision::default(); + + // object layer 未初始化时直接返回 + if new_object_layer_fn().is_none() { + return decision; + } + + // 检查是否允许复制(版本化前缀 + if !BucketVersioningSys::prefix_enabled(bucket, object).await { + return decision; + } + + let repl_status = mopts.replication_status(); + if repl_status == ReplicationStatusType::Replica && !mopts.is_metadata_replication() { + return decision; + } + + if mopts.replication_request { + return decision; + } + + let cfg = match get_replication_config(bucket).await { + Ok((config, timestamp)) => config, + //Ok(None) => return decision, + Err(err) => { + //repl_log_once_if(err, bucket); + return decision; + } + }; + + let mut opts = ReplicationObjectOpts { + name: object.to_string(), + //ssec: crypto::is_ssec_encrypted(&mopts.meta), + ssec: false, + replica: repl_status == ReplicationStatusType::Replica, + existing_object: mopts.is_existing_object_replication(), + user_tags: None, + target_arn: None, + version_id: "0".to_string(), + delete_marker: false, + op_type: mopts.op_type, + }; + + if let Some(tag_str) = mopts.meta.get("x-amz-object-tagging") { + opts.user_tags = Some(tag_str.clone()); + } + + // let rules = cfg.filter_actionable_rules(&opts); + let tgt_arns = cfg.filter_target_arns(&opts); + info!("arn lens:{}", tgt_arns.len()); + for tgt_arn in tgt_arns { + let tgt = bucket_targets::get_bucket_target_client(bucket, &tgt_arn.clone()).await; + //let tgt = GLOBAL_Bucket_Target_Sys.get().unwrap().get_remote_target_client(tgt) + + // 不判断在线状态,因为目标可能暂时不可用 + opts.target_arn = Some(tgt_arn.clone()); + let replicate = cfg.replicate(&opts); + info!("need replicate {}", &replicate); + + let synchronous = tgt.is_ok_and(|t| t.replicate_sync); + //decision.set(ReplicateTargetDecision::new(replicate,synchronous)); + info!("targe decision arn is:{}", tgt_arn.clone()); + decision.set(ReplicateTargetDecision { + replicate, + synchronous, + arn: tgt_arn.clone(), + id: 0.to_string(), + }); + } + info!("must replicate"); + decision +} + +impl ReplicationState { + // Equal 方法:判断两个状态是否相等 + pub fn equal(&self, other: &ReplicationState) -> bool { + self.replica_status == other.replica_status + && self.replication_status_internal == other.replication_status_internal + && self.version_purge_status_internal == other.version_purge_status_internal + } + + // CompositeReplicationStatus 方法:返回总体的复制状态 + pub fn composite_replication_status(&self) -> ReplicationStatusType { + if !self.replication_status_internal.is_empty() { + let status = ReplicationStatusType::from(self.replication_status_internal.as_str()); + match status { + ReplicationStatusType::Pending + | ReplicationStatusType::Completed + | ReplicationStatusType::Failed + | ReplicationStatusType::Replica => status, + _ => { + let repl_status = get_composite_replication_status(&self.targets); + if self.replica_timestamp == Utc::now() || self.replica_timestamp.timestamp() == 0 { + return repl_status; + } + if repl_status == ReplicationStatusType::Completed && self.replica_timestamp > self.replication_timestamp { + return self.replica_status.clone(); + } + repl_status + } + } + } else if !self.replica_status.is_empty() { + self.replica_status.clone() + } else { + return ReplicationStatusType::Unknown; + } + } + + // CompositeVersionPurgeStatus 方法:返回总体的版本清除状态 + pub fn composite_version_purge_status(&self) -> VersionPurgeStatusType { + let status = VersionPurgeStatusType::from(self.version_purge_status_internal.as_str()); + match status { + VersionPurgeStatusType::Pending | VersionPurgeStatusType::Complete | VersionPurgeStatusType::Failed => status, + _ => get_composite_version_purge_status(&self.purge_targets), + } + } + + // target_state 方法:返回目标状态 + pub fn target_state(&self, arn: &str) -> ReplicatedTargetInfo { + ReplicatedTargetInfo { + arn: arn.to_string(), + prev_replication_status: self.targets.get(arn).cloned().unwrap_or(ReplicationStatusType::Unknown), + version_purge_status: self + .purge_targets + .get(arn) + .cloned() + .unwrap_or(VersionPurgeStatusType::Unknown), + resync_timestamp: self.reset_statuses_map.get(arn).cloned().unwrap_or_default(), + size: 0, + replication_status: self.replica_status.clone(), + duration: Duration::zero(), + replication_action: ReplicationAction::ReplicateAll, + op_type: 0, + replication_resynced: false, + endpoint: "".to_string(), + secure: false, + err: None, + } + } +} + +lazy_static! { + static ref REPL_STATUS_REGEX: Regex = Regex::new(r"([^=].*?)=([^,].*?);").unwrap(); +} +pub trait ObjectInfoExt { + fn target_replication_status(&self, arn: String) -> ReplicationStatusType; + fn is_multipart(&self) -> bool; +} + +impl ObjectInfoExt for ObjectInfo { + fn target_replication_status(&self, arn: String) -> ReplicationStatusType { + let rep_stat_matches = REPL_STATUS_REGEX.captures_iter(&self.replication_status_internal); + for matched in rep_stat_matches { + if let Some(arn_match) = matched.get(1) { + if arn_match.as_str() == arn { + if let Some(status_match) = matched.get(2) { + return ReplicationStatusType::from(status_match.as_str()); + } + } + } + } + /* `ReplicationStatusType` value */ + ReplicationStatusType::Unknown + } + fn is_multipart(&self) -> bool { + match &self.etag { + Some(etgval) => etgval.len() != 32 && !etgval.is_empty(), + None => false, + } + } +} + +// Replication type enum (placeholder, as it's not clearly used in the Go code) +//#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReplicateObjectInfo { + pub name: String, + pub bucket: String, + pub version_id: String, + pub etag: String, + pub size: i64, + pub actual_size: i64, + pub mod_time: DateTime, + pub user_tags: String, + pub ssec: bool, + pub replication_status: ReplicationStatusType, + pub replication_status_internal: String, + pub version_purge_status_internal: String, + pub version_purge_status: VersionPurgeStatusType, + pub replication_state: ReplicationState, + pub delete_marker: bool, + + pub op_type: i32, + pub event_type: String, + pub retry_count: u32, + pub reset_id: String, + pub dsc: ReplicateDecision, + pub existing_obj_resync: ResyncDecision, + pub target_arn: String, + pub target_statuses: HashMap, + pub target_purge_statuses: HashMap, + pub replication_timestamp: DateTime, + pub checksum: Vec, +} +impl ReplicateObjectInfo { + pub fn to_object_info(&self) -> ObjectInfo { + ObjectInfo { + bucket: self.bucket.clone(), + name: self.name.clone(), + mod_time: Some( + OffsetDateTime::from_unix_timestamp(self.mod_time.timestamp()).unwrap_or_else(|_| OffsetDateTime::now_utc()), + ), + size: self.size as usize, + actual_size: Some(self.actual_size as usize), + is_dir: false, + user_defined: None, // 可以按需从别处导入 + parity_blocks: 0, + data_blocks: 0, + version_id: Uuid::try_parse(&self.version_id).ok(), + delete_marker: self.delete_marker, + user_tags: self.user_tags.clone(), + parts: Vec::new(), + is_latest: true, + content_type: None, + content_encoding: None, + num_versions: 0, + successor_mod_time: None, + put_object_reader: None, + etag: Some(self.etag.clone()), + inlined: false, + metadata_only: false, + version_only: false, + replication_status_internal: self.replication_status_internal.clone(), + replication_status: self.replication_status.clone(), + version_purge_status_internal: self.version_purge_status_internal.clone(), + version_purge_status: self.version_purge_status.clone(), + checksum: self.checksum.clone(), + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct DeletedObject { + #[serde(rename = "DeleteMarker")] + pub delete_marker: Option, // Go 中的 `bool` 转换为 Rust 中的 `Option` 以支持 `omitempty` + + #[serde(rename = "DeleteMarkerVersionId")] + pub delete_marker_version_id: Option, // `omitempty` 转为 `Option` + + #[serde(rename = "Key")] + pub object_name: Option, // 同样适用 `Option` 包含 `omitempty` + + #[serde(rename = "VersionId")] + pub version_id: Option, // 同上 + + // 以下字段未出现在 XML 序列化中,因此不需要 serde 标注 + #[serde(skip)] + pub delete_marker_mtime: DateTime, // 自定义类型,需定义或引入 + #[serde(skip)] + pub replication_state: ReplicationState, // 自定义类型,需定义或引入 +} + +// 假设 `DeleteMarkerMTime` 和 `ReplicationState` 的定义如下: +#[derive(Debug, Default, Clone)] +pub struct DeleteMarkerMTime { + time: chrono::NaiveDate, + // 填写具体字段类型 +} + +impl ReplicationWorkerOperation for ReplicateObjectInfo { + fn to_mrf_entry(&self) -> MRFReplicateEntry { + MRFReplicateEntry { + bucket: self.bucket.clone(), + object: self.name.clone(), + version_id: self.version_id.clone(), // 直接使用计算后的 version_id + retry_count: 0, + sz: self.size, + } + } + fn as_any(&self) -> &dyn Any { + self + } +} + +impl ReplicationWorkerOperation for DeletedObjectReplicationInfo { + fn to_mrf_entry(&self) -> MRFReplicateEntry { + // Since both branches are identical, we can simplify this + let version_id = self.deleted_object.delete_marker_version_id.clone(); + + MRFReplicateEntry { + bucket: self.bucket.clone(), + object: self.deleted_object.object_name.clone().unwrap(), + version_id: "0".to_string(), // 直接使用计算后的 version_id + retry_count: 0, + sz: 0, + } + } + fn as_any(&self) -> &dyn Any { + self + } +} + +pub fn get_s3client_from_para(ak: &str, sk: &str, url: &str, _region: &str) -> Result> { + let credentials = Credentials::new(ak, sk, None, None, ""); + let region = Region::new("us-east-1".to_string()); + + let config = Config::builder() + .region(region) + .endpoint_url(url.to_string()) + .credentials_provider(credentials) + .behavior_version(BehaviorVersion::latest()) // Adjust as necessary + .build(); + Ok(S3Client::from_conf(config)) +} + +// use hyper::body::Body; +// use s3s::Body; + +async fn replicate_object_with_multipart( + rep_obj: &ReplicateObjectInfo, + local_obj_info: &ObjectInfo, + target_info: &ReplicatedTargetInfo, + tgt_cli: &TargetClient, +) -> Result<(), Error> { + let store = new_object_layer_fn().unwrap(); + let provider = StaticProvider::new(&tgt_cli.ak, &tgt_cli.sk, None); + let minio_cli = Minio::builder() + .endpoint(target_info.endpoint.clone()) + .provider(provider) + .secure(false) + .build() + .map_err(|e| Error::other(format!("build minio client failed: {}", e)))?; + + let ret = minio_cli + .create_multipart_upload_with_versionid(tgt_cli.bucket.clone(), local_obj_info.name.clone(), rep_obj.version_id.clone()) + .await; + match ret { + Ok(task) => { + let parts_len = local_obj_info.parts.len(); + let mut part_results = vec![None; parts_len]; + let version_id = local_obj_info.version_id.expect("missing version_id"); + let task = Arc::new(task); // clone safe + let store = Arc::new(store); + let minio_cli = Arc::new(minio_cli); + + let mut upload_futures = FuturesUnordered::new(); + + for (index, _) in local_obj_info.parts.iter().enumerate() { + let store = Arc::clone(&store); + let minio_cli = Arc::clone(&minio_cli); + let task = Arc::clone(&task); + let bucket = local_obj_info.bucket.clone(); + let name = local_obj_info.name.clone(); + let version_id_clone = version_id; + + upload_futures.push(tokio::spawn(async move { + let get_opts = ObjectOptions { + version_id: Some(version_id_clone.to_string()), + versioned: true, + part_number: Some(index + 1), + version_suspended: false, + ..Default::default() + }; + + let h = HeaderMap::new(); + match store.get_object_reader(&bucket, &name, None, h, &get_opts).await { + Ok(mut reader) => match reader.read_all().await { + Ok(ret) => { + debug!("2025 readall suc:"); + let body = Bytes::from(ret); + match minio_cli.upload_part(&task, index + 1, body).await { + Ok(part) => { + debug!("2025 multipar upload suc:"); + Ok((index, part)) + } + Err(err) => { + error!("upload part {} failed: {}", index + 1, err); + Err(Error::other(format!("upload error: {}", err))) + } + } + } + Err(err) => { + error!("read error for part {}: {}", index + 1, err); + Err(err) + } + }, + Err(err) => { + error!("reader error for part {}: {}", index + 1, err); + Err(Error::other(format!("reader error: {}", err))) + } + } + })); + } + + while let Some(result) = upload_futures.next().await { + match result { + Ok(Ok((index, part))) => { + part_results[index] = Some(part); + } + Ok(Err(err)) => { + error!("upload part failed: {}", err); + return Err(err); + } + Err(join_err) => { + error!("tokio join error: {}", join_err); + return Err(Error::other(format!("join error: {}", join_err))); + } + } + } + + let parts: Vec<_> = part_results.into_iter().flatten().collect(); + + let ret = minio_cli.complete_multipart_upload(&task, parts, None).await; + match ret { + Ok(res) => { + warn!("finish upload suc:{:?} version_id={:?}", res, local_obj_info.version_id); + } + Err(err) => { + error!("finish upload failed:{}", err); + return Err(Error::other(format!("finish upload failed:{}", err))); + } + } + } + Err(err) => { + return Err(Error::other(format!("finish upload failed:{}", err))); + } + } + Ok(()) +} + +impl ReplicateObjectInfo { + fn target_replication_status(&self, arn: &str) -> ReplicationStatusType { + // 定义正则表达式,匹配类似 `arn;status` 格式 + let repl_status_regex = Regex::new(r"(\w+);(\w+)").expect("Invalid regex"); + + // 遍历正则表达式的匹配项 + for caps in repl_status_regex.captures_iter(&self.replication_status_internal) { + if let (Some(matched_arn), Some(matched_status)) = (caps.get(1), caps.get(2)) { + // 如果 ARN 匹配,返回对应的状态 + if matched_arn.as_str() == arn { + return ReplicationStatusType::from(matched_status.as_str()); + } + } + } + + // 如果没有匹配到,返回默认的 `Unknown` 状态 + ReplicationStatusType::Unknown + } + + async fn replicate_object(&self, target: &TargetClient, _arn: String) -> ReplicatedTargetInfo { + let _start_time = Utc::now(); + + // 初始化 ReplicatedTargetInfo + warn!("replicate is {}", _arn.clone()); + let mut rinfo = ReplicatedTargetInfo { + size: self.actual_size, + arn: _arn.clone(), + prev_replication_status: self.target_replication_status(&_arn.clone()), + replication_status: ReplicationStatusType::Failed, + op_type: self.op_type, + replication_action: ReplicationAction::ReplicateAll, + endpoint: target.endpoint.clone(), + secure: target.endpoint.clone().contains("https://"), + resync_timestamp: Utc::now().to_string(), + replication_resynced: false, + duration: Duration::default(), + err: None, + version_purge_status: VersionPurgeStatusType::Pending, + }; + + if self.target_replication_status(&_arn) == ReplicationStatusType::Completed + && !self.existing_obj_resync.is_empty() + && !self.existing_obj_resync.must_resync_target(&_arn) + { + warn!("replication return"); + rinfo.replication_status = ReplicationStatusType::Completed; + rinfo.replication_resynced = true; + return rinfo; + } + + // 模拟远程目标离线的检查 + // if self.is_target_offline(&target.endpoint) { + // rinfo.err = Some(format!( + // "Target is offline for bucket: {} arn: {} retry: {}", + // self.bucket, + // _arn.clone(), + // self.retry_count + // )); + // return rinfo; + // } + + // versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object) + // versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object) + + // 模拟对象获取和元数据检查 + let opt = ObjectOptions { + version_id: Some(self.version_id.clone()), + versioned: true, + version_suspended: false, + ..Default::default() + }; + + let object_info = match self.get_object_info(opt).await { + Ok(info) => info, + Err(err) => { + error!("get object info err:{}", err); + rinfo.err = Some(err.to_string()); + return rinfo; + } + }; + + rinfo.prev_replication_status = object_info.target_replication_status(_arn); + + // 设置对象大小 + //rinfo.size = object_info.actual_size.unwrap_or(0); + rinfo.size = object_info.actual_size.map_or(0, |v| v as i64); + //rinfo.replication_action = object_info. + + rinfo.replication_status = ReplicationStatusType::Completed; + rinfo.size = object_info.get_actual_size().unwrap_or(0) as i64; + rinfo.replication_action = ReplicationAction::ReplicateAll; + + let store = new_object_layer_fn().unwrap(); + //todo!() put replicationopts; + if object_info.is_multipart() { + debug!("version is multi part"); + match replicate_object_with_multipart(self, &object_info, &rinfo, target).await { + Ok(_) => { + rinfo.replication_status = ReplicationStatusType::Completed; + println!("Object replicated successfully."); + } + Err(e) => { + rinfo.replication_status = ReplicationStatusType::Failed; + error!("Failed to replicate object: {:?}", e); + // 你可以根据错误类型进一步分类处理 + } + } + //replicate_object_with_multipart(local_obj_info, target_info, tgt_cli) + } else { + let get_opts = ObjectOptions { + version_id: Some(object_info.version_id.expect("REASON").to_string()), + versioned: true, + version_suspended: false, + ..Default::default() + }; + warn!("version id is:{:?}", get_opts.version_id.clone()); + let h = HeaderMap::new(); + let gr = store + .get_object_reader(&object_info.bucket, &object_info.name, None, h, &get_opts) + .await; + + match gr { + Ok(mut reader) => { + warn!("endpoint is: {}", rinfo.endpoint); + let provider = StaticProvider::new(&target.ak, &target.sk, None); + let res = reader.read_all().await; + match res { + Ok(ret) => { + let body = rustfs_rsc::Data::from(ret); + let minio_cli = Minio::builder() + .endpoint(rinfo.endpoint.clone()) + .provider(provider) + .secure(false) + .build() + .unwrap(); + + let ex = minio_cli.executor(Method::PUT); + let ret = ex + .bucket_name(target.bucket.clone()) + .object_name(self.name.clone()) + .body(body) + .query("versionId", get_opts.version_id.clone().unwrap()) + .send_ok() + .await; + match ret { + Ok(_res) => { + warn!("replicate suc: {} {} {}", self.bucket, self.name, self.version_id); + rinfo.replication_status = ReplicationStatusType::Completed; + } + Err(err) => { + error!("replicate {} err:{}", target.bucket.clone(), err); + rinfo.replication_status = ReplicationStatusType::Failed; + } + } + } + Err(err) => { + error!("read_all err {}", err); + rinfo.replication_status = ReplicationStatusType::Failed; + return rinfo; + } + } + } + Err(err) => { + rinfo.replication_status = ReplicationStatusType::Failed; + error!("get client error {}", err); + } + } + } + rinfo + } + + fn is_target_offline(&self, endpoint: &str) -> bool { + // 模拟检查目标是否离线 + warn!("Checking if target {} is offline", endpoint); + false + } + + async fn get_object_info(&self, opts: ObjectOptions) -> Result { + let objectlayer = new_object_layer_fn(); + //let opts = ecstore::store_api::ObjectOptions { max_parity: (), mod_time: (), part_number: (), delete_prefix: (), version_id: (), no_lock: (), versioned: (), version_suspended: (), skip_decommissioned: (), skip_rebalancing: (), data_movement: (), src_pool_idx: (), user_defined: (), preserve_etag: (), metadata_chg: (), replication_request: (), delete_marker: () } + + objectlayer.unwrap().get_object_info(&self.bucket, &self.name, &opts).await + } + + fn perform_replication(&self, target: &RemotePeerS3Client, object_info: &ObjectInfo) -> Result<(), String> { + // 模拟复制操作 + // println!( + // "Replicating object {} to target {}", + // //object_info.name, target.arn + // ); + Ok(()) + } + + fn current_timestamp() -> String { + // 返回当前时间戳 + "2024-12-18T00:00:00Z".to_string() + } +} + +//pub fn getvalidrule(cfg: ReplicationConfiguration) -> Vec { +// let mut arns = Vec::new(); +// let mut tgts_map = std::collections::HashSet::new(); +// for rule in cfg.rules { +// if rule.status.as_str() == "Disabe" { +// continue; +// } + +// if tgts_map.insert(rule.clone()) {} +// } +// arns +//} + +pub async fn replicate_delete(_ri: &DeletedObjectReplicationInfo, object_api: Arc) {} + +pub fn clone_mss(v: &HashMap) -> HashMap { + let mut r = HashMap::with_capacity(v.len()); + for (k, v) in v { + r.insert(k.clone(), v.clone()); + } + r +} + +pub fn get_must_replicate_options( + user_defined: &HashMap, + user_tags: &str, + status: ReplicationStatusType, // 假设 `status` 是字符串类型 + op: ReplicationType, // 假设 `op` 是字符串类型 + opts: &ObjectOptions, +) -> MustReplicateOptions { + let mut meta = clone_mss(user_defined); + + if !user_tags.is_empty() { + meta.insert("xhttp.AmzObjectTagging".to_string(), user_tags.to_string()); + } + + MustReplicateOptions { + meta, + status, + op_type: op, + replication_request: opts.replication_request, + } +} + +#[derive(Default)] +struct ReplicatedInfos { + //replication_time_stamp: DateTime, + targets: Vec, +} + +// #[derive(Clone, Copy, PartialEq)] +// enum ReplicationStatus { +// Completed, +// InProgress, +// Pending, +// } + +impl ReplicatedInfos { + pub fn action(&self) -> ReplicationAction { + for target in &self.targets { + if target.is_empty() { + continue; + } + if target.prev_replication_status != ReplicationStatusType::Completed { + return target.replication_action.clone(); + } + } + ReplicationAction::ReplicateNone + } + + // fn completed_size(&self) -> i64 { + // let mut sz = 0; + // for t in &self.targets { + // if t.empty() { + // continue; + // } + // if t.replication_status == ReplicationStatusType::Completed + // && t.prev_replication_status != ReplicationStatusType::Completed + // { + // sz += t.size; + // } + // } + // sz + // } + + pub fn replication_resynced(&self) -> bool { + // 只要存在一个非 empty 且 replication_resynced 为 true 的目标,就返回 true + self.targets.iter().any(|t| !t.is_empty() && t.replication_resynced) + } + + /// 对应 Go 的 ReplicationStatusInternal + pub fn replication_status_internal(&self) -> String { + let mut buf = String::new(); + for t in &self.targets { + if t.is_empty() { + continue; + } + // 类似 fmt.Fprintf(b, "%s=%s;", t.Arn, t.ReplicationStatus.String()) + buf.push_str(&format!("{}={};", t.arn, t.replication_status.as_str())); + } + buf + } + + pub fn replication_status(&self) -> ReplicationStatusType { + // 如果没有任何目标,返回 Unknown(对应 Go 里 StatusType("")) + if self.targets.is_empty() { + return ReplicationStatusType::Unknown; + } + + // 统计已完成的数量 + let mut completed = 0; + + for t in &self.targets { + match t.replication_status { + ReplicationStatusType::Failed => { + // 只要有一个失败,整体就是 Failed + return ReplicationStatusType::Failed; + } + ReplicationStatusType::Completed => { + completed += 1; + } + _ => {} + } + } + + // 全部完成,则 Completed,否则 Pending + if completed == self.targets.len() { + ReplicationStatusType::Completed + } else { + ReplicationStatusType::Pending + } + } +} + +impl ReplicatedTargetInfo { + fn empty(&self) -> bool { + // Implement your logic to check if the target is empty + self.size == 0 + } +} + +pub async fn replicate_object(ri: ReplicateObjectInfo, object_api: Arc) { + let bucket = ri.bucket.clone(); + let obj = ri.name.clone(); + match get_replication_config(&bucket).await { + Ok((cfg, timestamp)) => { + info!( + "replicate object: {} {} and arn is: {}", + ri.name.clone(), + timestamp, + ri.target_arn.clone() + ); + //let arns = getvalidrule(config); + + //TODO:nslock + + let objectlayer = new_object_layer_fn(); + + let opts = ReplicationObjectOpts { + name: ri.name.clone(), + //ssec: crypto::is_ssec_encrypted(&mopts.meta), + ssec: false, + //replica: repl_status == ReplicationStatusType::Replica, + replica: ri.replication_status == ReplicationStatusType::Replica, + existing_object: ri.existing_obj_resync.must_resync(), + user_tags: None, + target_arn: Some(ri.target_arn.clone()), + version_id: ri.version_id.clone(), + delete_marker: false, + op_type: ReplicationType::from_u8(ri.op_type as u8).expect("REASON"), + }; + + let tgt_arns = cfg.filter_target_arns(&opts); + info!("target len:{}", tgt_arns.len()); + + let rinfos = Arc::new(Mutex::new(ReplicatedInfos::default())); + let cri = Arc::new(ri.clone()); + let mut tasks: Vec> = vec![]; + + for tgt_arn in tgt_arns { + let tgt = bucket_targets::get_bucket_target_client(&ri.bucket, &tgt_arn).await; + + if tgt.is_err() { + // repl_log_once_if(ctx, format!("failed to get target for bucket: {} arn: {}", bucket, tgt_arn), &tgt_arn).await; + // send_event(event_args { + // event_name: "ObjectReplicationNotTracked".to_string(), + // bucket_name: bucket.to_string(), + // object: ri.to_object_info(), + // user_agent: "Internal: [Replication]".to_string(), + // host: global_local_node_name.to_string(), + // }).await; + continue; + } + + let tgt = tgt.unwrap(); + let rinfos_clone = Arc::clone(&rinfos); + let lcri = Arc::clone(&cri); + let task = task::spawn(async move { + warn!("async task"); + let mut tgt_info: ReplicatedTargetInfo = Default::default(); + if lcri.op_type as u8 == ReplicationType::ObjectReplicationType.as_u8() { + warn!("object replication and arn is {}", tgt.arn.clone()); + // all incoming calls go through optimized path.`o` + + tgt_info = lcri.replicate_object(&tgt, tgt.arn.clone()).await; + } else { + warn!("async task"); + // tgt_info = ri.replicate_all(object_api, &tgt).await; + } + + let mut rinfos_locked = rinfos_clone.lock().await; + rinfos_locked.targets.push(tgt_info); + }); + + tasks.push(task); + } + //futures::future::join_all(tasks); + futures::future::join_all(tasks).await; + + let mut rs = rinfos.lock().await; + let replication_status = rs.replication_status(); + //rinfos + let new_repl_status_internal = rs.replication_status_internal(); + // ri.to_object_info() 假设... + warn!("{} and {}", new_repl_status_internal, ri.replication_status_internal); + let obj_info = ri.to_object_info(); + if ri.replication_status_internal != new_repl_status_internal || rs.replication_resynced() { + warn!("save meta"); + let mut eval_metadata = HashMap::new(); + + eval_metadata.insert( + format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status"), + new_repl_status_internal.clone(), + ); + eval_metadata.insert( + format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp"), + Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Nanos, true), + ); + eval_metadata.insert("x-amz-bucket-replication-status".to_string(), replication_status.as_str().to_owned()); + + for rinfo in &rs.targets { + // if !rinfo.resync_timestamp.is_empty() { + // eval_metadata.insert( + // format!("x-minio-replication-reset-status-{}", rinfo.arn), + // rinfo.resync_timestamp.clone(), + // ); + // } + } + + if !ri.user_tags.is_empty() { + eval_metadata.insert("x-amz-tagging".to_string(), ri.user_tags.clone()); + } + + let popts = ObjectOptions { + //mod_time: Some(ri.mod_time), + mod_time: None, + version_id: Some(ri.version_id.clone()), + eval_metadata: Some(eval_metadata), + ..Default::default() + }; + + //let uobj_info = ; + match object_api.put_object_metadata(&ri.bucket, &ri.name, &popts).await { + Ok(info) => { + info!("Put metadata success: {:?}", info); + // 你可以访问 info 字段,例如 info.size, info.last_modified 等 + } + Err(e) => { + error!("Failed to put metadata: {}", e); + // 根据错误类型做不同处理 + // if let Some(CustomError::NotFound) = e.downcast_ref::() { ... } + } + } + + // if !uobj_info.name.is_empty() { + // obj_info = uobj_info; + // } + + let mut op_type = ReplicationType::MetadataReplicationType; + if rs.action() == ReplicationAction::ReplicateAll { + op_type = ReplicationType::ObjectReplicationType + } + + for rinfo in &mut rs.targets { + if rinfo.replication_status != rinfo.prev_replication_status { + //rinfo.op_type = Some(op_type.clone()); + //global_replication_stats::update(&bucket, rinfo); + } + } + debug!("op type: {:?}", op_type); + } + + // send_event(EventArgs { + // event_name: ri.event_name.clone(), + // bucket_name: bucket.into(), + // object: obj_info.clone(), + // user_agent: "Internal: [Replication]".into(), + // host: "local-node-name".into(), + // }); + + // 失败重试 + // if rs.replication_status() != ReplicationStatusType::Completed { + // //ri.op_type = "HealReplicationType".into(); + // ri.event_type = "ReplicateMRF".into(); + // //ri.replication_status_internal = rinfos.replication_status_internal(); + // ri.retry_count += 1; + // // global_replication_pool.get().queue_mrf_save(ri.to_mrf_entry()); + // } + } + Err(err) => { + println!("Failed to get replication config: {:?}", err); + } + } +} diff --git a/ecstore/src/cmd/bucket_replication_utils.rs b/ecstore/src/cmd/bucket_replication_utils.rs new file mode 100644 index 00000000..7c456427 --- /dev/null +++ b/ecstore/src/cmd/bucket_replication_utils.rs @@ -0,0 +1,55 @@ +use std::collections::HashMap; +use chrono::{DateTime, Utc}; + +// Representation of the replication status +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StatusType { + Pending, + Completed, + CompletedLegacy, + Failed, + Replica, +} + +// Representation of version purge status type (customize as needed) +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum VersionPurgeStatusType { + Pending, + Completed, + Failed, +} + +// ReplicationState struct definition +#[derive(Debug, Clone)] +pub struct ReplicationState { + // Timestamp when the last replica update was received + pub replica_time_stamp: DateTime, + + // Replica status + pub replica_status: StatusType, + + // Represents DeleteMarker replication state + pub delete_marker: bool, + + // Timestamp when the last replication activity happened + pub replication_time_stamp: DateTime, + + // Stringified representation of all replication activity + pub replication_status_internal: String, + + // Stringified representation of all version purge statuses + // Example format: "arn1=PENDING;arn2=COMPLETED;" + pub version_purge_status_internal: String, + + // Stringified representation of replication decision for each target + pub replicate_decision_str: String, + + // Map of ARN -> replication status for ongoing replication activity + pub targets: HashMap, + + // Map of ARN -> VersionPurgeStatus for all the targets + pub purge_targets: HashMap, + + // Map of ARN -> stringified reset id and timestamp for all the targets + pub reset_statuses_map: HashMap, +} \ No newline at end of file diff --git a/ecstore/src/cmd/bucket_targets.rs b/ecstore/src/cmd/bucket_targets.rs new file mode 100644 index 00000000..7920c870 --- /dev/null +++ b/ecstore/src/cmd/bucket_targets.rs @@ -0,0 +1,867 @@ +#![allow(unused_variables)] +#![allow(dead_code)] +use crate::{ + StorageAPI, + bucket::{metadata_sys, target::BucketTarget}, + endpoints::Node, + peer::{PeerS3Client, RemotePeerS3Client}, +}; +use crate::{ + bucket::{self, target::BucketTargets}, + new_object_layer_fn, peer, store_api, +}; +//use tokio::sync::RwLock; +use aws_sdk_s3::Client as S3Client; +use chrono::Utc; +use lazy_static::lazy_static; +use std::sync::Arc; +use std::{ + collections::HashMap, + time::{Duration, SystemTime}, +}; +use thiserror::Error; +use tokio::sync::RwLock; + +pub struct TClient { + pub s3cli: S3Client, + pub remote_peer_client: peer::RemotePeerS3Client, + pub arn: String, +} +impl TClient { + pub fn new(s3cli: S3Client, remote_peer_client: RemotePeerS3Client, arn: String) -> Self { + TClient { + s3cli, + remote_peer_client, + arn, + } + } +} + +pub struct EpHealth { + pub endpoint: String, + pub scheme: String, + pub online: bool, + pub last_online: SystemTime, + pub last_hc_at: SystemTime, + pub offline_duration: Duration, + pub latency: LatencyStat, // Assuming LatencyStat is a custom struct +} + +impl EpHealth { + pub fn new( + endpoint: String, + scheme: String, + online: bool, + last_online: SystemTime, + last_hc_at: SystemTime, + offline_duration: Duration, + latency: LatencyStat, + ) -> Self { + EpHealth { + endpoint, + scheme, + online, + last_online, + last_hc_at, + offline_duration, + latency, + } + } +} + +pub struct LatencyStat { + // Define the fields of LatencyStat as per your requirements +} + +pub struct ArnTarget { + client: TargetClient, + last_refresh: chrono::DateTime, +} +impl ArnTarget { + pub fn new(bucket: String, endpoint: String, ak: String, sk: String) -> Self { + Self { + client: TargetClient { + bucket, + storage_class: "STANDRD".to_string(), + disable_proxy: false, + health_check_duration: Duration::from_secs(100), + endpoint, + reset_id: "0".to_string(), + replicate_sync: false, + secure: false, + arn: "".to_string(), + client: reqwest::Client::new(), + ak, + sk, + }, + last_refresh: Utc::now(), + } + } +} + +// pub fn get_s3client_from_para( +// ak: &str, +// sk: &str, +// url: &str, +// _region: &str, +// ) -> Result> { +// let credentials = Credentials::new(ak, sk, None, None, ""); +// let region = Region::new("us-east-1".to_string()); + +// let config = Config::builder() +// .region(region) +// .endpoint_url(url.to_string()) +// .credentials_provider(credentials) +// .behavior_version(BehaviorVersion::latest()) // Adjust as necessary +// .build(); +// Ok(S3Client::from_conf(config)) +// } + +pub struct BucketTargetSys { + arn_remote_map: Arc>>, + targets_map: Arc>>>, + hc: HashMap, + //store:Option>, +} + +lazy_static! { + pub static ref GLOBAL_Bucket_Target_Sys: std::sync::OnceLock = BucketTargetSys::new().into(); +} + +//#[derive(Debug)] +// pub enum SetTargetError { +// NotFound, +// } + +pub async fn get_bucket_target_client(bucket: &str, arn: &str) -> Result { + if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() { + sys.get_remote_target_client2(arn).await + } else { + Err(SetTargetError::TargetNotFound(bucket.to_string())) + } +} + +#[derive(Debug)] +pub struct BucketRemoteTargetNotFound { + pub bucket: String, +} + +pub async fn init_bucket_targets(bucket: &str, meta: Arc) { + println!("140 {}", bucket); + if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() { + if let Some(tgts) = meta.bucket_target_config.clone() { + for tgt in tgts.targets { + warn!("ak and sk is:{:?}", tgt.credentials); + let _ = sys.set_target(bucket, &tgt, false, true).await; + //sys.targets_map. + } + } + } +} + +pub async fn remove_bucket_target(bucket: &str, arn_str: &str) { + if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() { + let _ = sys.remove_target(bucket, arn_str).await; + } +} + +impl Default for BucketTargetSys { + fn default() -> Self { + Self::new() + } +} + +impl BucketTargetSys { + pub fn new() -> Self { + BucketTargetSys { + arn_remote_map: Arc::new(RwLock::new(HashMap::new())), + targets_map: Arc::new(RwLock::new(HashMap::new())), + hc: HashMap::new(), + } + } + + pub async fn list_bucket_targets(&self, bucket: &str) -> Result { + let targets_map = self.targets_map.read().await; + if let Some(targets) = targets_map.get(bucket) { + Ok(BucketTargets { + targets: targets.clone(), + }) + } else { + Err(BucketRemoteTargetNotFound { + bucket: bucket.to_string(), + }) + } + } + + pub async fn list_targets(&self, bucket: Option<&str>, _arn_type: Option<&str>) -> Vec { + let _ = _arn_type; + //let health_stats = self.health_stats(); + + let mut targets = Vec::new(); + + if let Some(bucket_name) = bucket { + if let Ok(ts) = self.list_bucket_targets(bucket_name).await { + for t in ts.targets { + //if arn_type.map_or(true, |arn| t.target_type == arn) { + //if let Some(hs) = health_stats.get(&t.url().host) { + // t.total_downtime = hs.offline_duration; + // t.online = hs.online; + // t.last_online = hs.last_online; + // t.latency = LatencyStat { + // curr: hs.latency.curr, + // avg: hs.latency.avg, + // max: hs.latency.peak, + // }; + //} + targets.push(t.clone()); + //} + } + } + return targets; + } + + // Locking and iterating over all targets in the system + let targets_map = self.targets_map.read().await; + for tgts in targets_map.values() { + for t in tgts { + //if arn_type.map_or(true, |arn| t.target_type == arn) { + // if let Some(hs) = health_stats.get(&t.url().host) { + // t.total_downtime = hs.offline_duration; + // t.online = hs.online; + // t.last_online = hs.last_online; + // t.latency = LatencyStat { + // curr: hs.latency.curr, + // avg: hs.latency.avg, + // max: hs.latency.peak, + // }; + // } + targets.push(t.clone()); + //} + } + } + + targets + } + + pub async fn remove_target(&self, bucket: &str, arn_str: &str) -> Result<(), SetTargetError> { + //to do need lock; + let mut targets_map = self.targets_map.write().await; + let tgts = targets_map.get(bucket); + let mut arn_remotes_map = self.arn_remote_map.write().await; + if tgts.is_none() { + //Err(SetTargetError::TargetNotFound(bucket.to_string())); + return Ok(()); + } + + let tgts = tgts.unwrap(); // 安全解引用 + let mut targets = Vec::with_capacity(tgts.len()); + let mut found = false; + + // 遍历 targets,找出不匹配的 ARN + for tgt in tgts { + if tgt.arn != Some(arn_str.to_string()) { + targets.push(tgt.clone()); // 克隆符合条件的项 + } else { + found = true; // 找到匹配的 ARN + } + } + + // 如果没有找到匹配的 ARN,则返回错误 + if !found { + return Ok(()); + } + + // 更新 targets_map + targets_map.insert(bucket.to_string(), targets); + arn_remotes_map.remove(arn_str); + + let targets = self.list_targets(Some(bucket), None).await; + println!("targets is {}", targets.len()); + match serde_json::to_vec(&targets) { + Ok(json) => { + let _ = metadata_sys::update(bucket, "bucket-targets.json", json).await; + } + Err(e) => { + println!("序列化失败{}", e); + } + } + + Ok(()) + } + + pub async fn get_remote_arn(&self, bucket: &str, target: Option<&BucketTarget>, depl_id: &str) -> (Option, bool) { + if target.is_none() { + return (None, false); + } + + let target = target.unwrap(); + + let targets_map = self.targets_map.read().await; + + // 获取锁以访问 arn_remote_map + let mut _arn_remotes_map = self.arn_remote_map.read().await; + if let Some(tgts) = targets_map.get(bucket) { + for tgt in tgts { + if tgt.type_ == target.type_ + && tgt.target_bucket == target.target_bucket + && tgt.endpoint == target.endpoint + && tgt.credentials.as_ref().unwrap().access_key == target.credentials.as_ref().unwrap().access_key + { + return (tgt.arn.clone(), true); + } + } + } + + // if !target.type_.is_valid() { + // return (None, false); + // } + + println!("generate_arn"); + + (Some(generate_arn(target.clone(), depl_id.to_string())), false) + } + + pub async fn get_remote_target_client2(&self, arn: &str) -> Result { + let map = self.arn_remote_map.read().await; + info!("get remote target client and arn is: {}", arn); + if let Some(value) = map.get(arn) { + let mut x = value.client.clone(); + x.arn = arn.to_string(); + Ok(x) + } else { + error!("not find target"); + Err(SetTargetError::TargetNotFound(arn.to_string())) + } + } + + // pub async fn get_remote_target_client(&self, _tgt: &BucketTarget) -> Result { + // // Mocked implementation for obtaining a remote client + // let tcli = TargetClient { + // bucket: _tgt.target_bucket.clone(), + // storage_class: "STANDRD".to_string(), + // disable_proxy: false, + // health_check_duration: Duration::from_secs(100), + // endpoint: _tgt.endpoint.clone(), + // reset_id: "0".to_string(), + // replicate_sync: false, + // secure: false, + // arn: "".to_string(), + // client: reqwest::Client::new(), + // ak: _tgt. + + // }; + // Ok(tcli) + // } + // pub async fn get_remote_target_client_with_bucket(&self, _bucket: String) -> Result { + // // Mocked implementation for obtaining a remote client + // let tcli = TargetClient { + // bucket: _tgt.target_bucket.clone(), + // storage_class: "STANDRD".to_string(), + // disable_proxy: false, + // health_check_duration: Duration::from_secs(100), + // endpoint: _tgt.endpoint.clone(), + // reset_id: "0".to_string(), + // replicate_sync: false, + // secure: false, + // arn: "".to_string(), + // client: reqwest::Client::new(), + // }; + // Ok(tcli) + // } + + async fn local_is_bucket_versioned(&self, _bucket: &str) -> bool { + let Some(store) = new_object_layer_fn() else { + return false; + }; + //store.get_bucket_info(bucket, opts) + + // let binfo:BucketInfo = store + // .get_bucket_info(bucket, &ecstore::store_api::BucketOptions::default()).await; + match store.get_bucket_info(_bucket, &store_api::BucketOptions::default()).await { + Ok(info) => { + println!("Bucket Info: {:?}", info); + info.versionning + } + Err(err) => { + eprintln!("Error: {:?}", err); + false + } + } + } + + async fn is_bucket_versioned(&self, _bucket: &str) -> bool { + true + // let url_str = "http://127.0.0.1:9001"; + + // // 转换为 Url 类型 + // let parsed_url = url::Url::parse(url_str).unwrap(); + + // let node = Node { + // url: parsed_url, + // pools: vec![], + // is_local: false, + // grid_host: "".to_string(), + // }; + // let cli = ecstore::peer::RemotePeerS3Client::new(Some(node), None); + + // match cli.get_bucket_info(_bucket, &ecstore::store_api::BucketOptions::default()).await + // { + // Ok(info) => { + // println!("Bucket Info: {:?}", info); + // info.versionning + // } + // Err(err) => { + // eprintln!("Error: {:?}", err); + // return false; + // } + // } + } + + pub async fn set_target(&self, bucket: &str, tgt: &BucketTarget, update: bool, fromdisk: bool) -> Result<(), SetTargetError> { + // if !tgt.type_.is_valid() && !update { + // return Err(SetTargetError::InvalidTargetType(bucket.to_string())); + // } + + //let client = self.get_remote_target_client(tgt).await?; + if tgt.type_ == Some("replication".to_string()) && !fromdisk { + let versioning_config = self.local_is_bucket_versioned(bucket).await; + if !versioning_config { + // println!("111111111"); + return Err(SetTargetError::TargetNotVersioned(bucket.to_string())); + } + } + + let url_str = format!("http://{}", tgt.endpoint.clone()); + + println!("url str is {}", url_str); + // 转换为 Url 类型 + let parsed_url = url::Url::parse(&url_str).unwrap(); + + let node = Node { + url: parsed_url, + pools: vec![], + is_local: false, + grid_host: "".to_string(), + }; + + let cli = peer::RemotePeerS3Client::new(Some(node), None); + + match cli + .get_bucket_info(&tgt.target_bucket, &store_api::BucketOptions::default()) + .await + { + Ok(info) => { + println!("Bucket Info: {:?}", info); + if !info.versionning { + println!("2222222222 {}", info.versionning); + return Err(SetTargetError::TargetNotVersioned(tgt.target_bucket.to_string())); + } + } + Err(err) => { + println!("remote bucket 369 is:{}", tgt.target_bucket); + eprintln!("Error: {:?}", err); + return Err(SetTargetError::SourceNotVersioned(tgt.target_bucket.to_string())); + } + } + + //if tgt.target_type == BucketTargetType::ReplicationService { + // Check if target is a RustFS server and alive + // let hc_result = tokio::time::timeout(Duration::from_secs(3), client.health_check(&tgt.endpoint)).await; + // match hc_result { + // Ok(Ok(true)) => {} // Server is alive + // Ok(Ok(false)) | Ok(Err(_)) | Err(_) => { + // return Err(SetTargetError::HealthCheckFailed(tgt.target_bucket.clone())); + // } + // } + + //Lock and update target maps + let mut targets_map = self.targets_map.write().await; + let mut arn_remotes_map = self.arn_remote_map.write().await; + + let targets = targets_map.entry(bucket.to_string()).or_default(); + let mut found = false; + + for existing_target in targets.iter_mut() { + println!("418 exist:{}", existing_target.source_bucket.clone()); + if existing_target.type_ == tgt.type_ { + if existing_target.arn == tgt.arn { + if !update { + return Err(SetTargetError::TargetAlreadyExists(existing_target.target_bucket.clone())); + } + *existing_target = tgt.clone(); + found = true; + break; + } + + if existing_target.endpoint == tgt.endpoint { + println!("endpoint is same:{}", tgt.endpoint.clone()); + return Err(SetTargetError::TargetAlreadyExists(existing_target.target_bucket.clone())); + } + } + } + + if !found && !update { + println!("437 exist:{}", tgt.arn.clone().unwrap()); + targets.push(tgt.clone()); + } + let arntgt: ArnTarget = ArnTarget::new( + tgt.target_bucket.clone(), + tgt.endpoint.clone(), + tgt.credentials.clone().unwrap().access_key.clone(), + tgt.credentials.clone().unwrap().secret_key, + ); + + arn_remotes_map.insert(tgt.arn.clone().unwrap().clone(), arntgt); + //self.update_bandwidth_limit(bucket, &tgt.arn, tgt.bandwidth_limit).await; + + Ok(()) + } +} + +#[derive(Clone)] +pub struct TargetClient { + pub client: reqwest::Client, // Using reqwest HTTP client + pub health_check_duration: Duration, + pub bucket: String, // Remote bucket target + pub replicate_sync: bool, + pub storage_class: String, // Storage class on remote + pub disable_proxy: bool, + pub arn: String, // ARN to uniquely identify remote target + pub reset_id: String, + pub endpoint: String, + pub secure: bool, + pub ak: String, + pub sk: String, +} + +impl TargetClient { + #[allow(clippy::too_many_arguments)] + pub fn new( + client: reqwest::Client, + health_check_duration: Duration, + bucket: String, + replicate_sync: bool, + storage_class: String, + disable_proxy: bool, + arn: String, + reset_id: String, + endpoint: String, + secure: bool, + ak: String, + sk: String, + ) -> Self { + TargetClient { + client, + health_check_duration, + bucket, + replicate_sync, + storage_class, + disable_proxy, + arn, + reset_id, + endpoint, + secure, + ak, + sk, + } + } + pub async fn bucket_exists(&self, _bucket: &str) -> Result { + Ok(true) // Mocked implementation + } +} +use tracing::{error, info, warn}; +use uuid::Uuid; + +#[derive(Debug, Clone)] +pub struct VersioningConfig { + pub enabled: bool, +} + +impl VersioningConfig { + pub fn is_enabled(&self) -> bool { + self.enabled + } +} + +#[derive(Debug)] +pub struct Client; + +impl Client { + pub async fn bucket_exists(&self, _bucket: &str) -> Result { + Ok(true) // Mocked implementation + } + + pub async fn get_bucket_versioning(&self, _bucket: &str) -> Result { + Ok(VersioningConfig { enabled: true }) + } + + pub async fn health_check(&self, _endpoint: &str) -> Result { + Ok(true) // Mocked health check + } +} + +#[derive(Debug, PartialEq)] +pub struct ServiceType(String); + +impl ServiceType { + pub fn is_valid(&self) -> bool { + !self.0.is_empty() // 根据需求添加具体的验证逻辑 + } +} + +#[derive(Debug, PartialEq)] +pub struct ARN { + pub arn_type: String, + pub id: String, + pub region: String, + pub bucket: String, +} + +impl ARN { + /// 检查 ARN 是否为空 + pub fn is_empty(&self) -> bool { + //!self.arn_type.is_valid() + false + } + + /// 从字符串解析 ARN + pub fn parse(s: &str) -> Result { + // ARN 必须是格式 arn:rustfs:::: + if !s.starts_with("arn:rustfs:") { + return Err(format!("Invalid ARN {}", s)); + } + + let tokens: Vec<&str> = s.split(':').collect(); + if tokens.len() != 6 || tokens[4].is_empty() || tokens[5].is_empty() { + return Err(format!("Invalid ARN {}", s)); + } + + Ok(ARN { + arn_type: tokens[2].to_string(), + region: tokens[3].to_string(), + id: tokens[4].to_string(), + bucket: tokens[5].to_string(), + }) + } +} + +// 实现 `Display` trait,使得可以直接使用 `format!` 或 `{}` 输出 ARN +impl std::fmt::Display for ARN { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "arn:rustfs:{}:{}:{}:{}", self.arn_type, self.region, self.id, self.bucket) + } +} + +fn must_get_uuid() -> String { + Uuid::new_v4().to_string() + // match Uuid::new_v4() { + // Ok(uuid) => uuid.to_string(), + // Err(err) => { + // error!("Critical error: {}", err); + // panic!("Failed to generate UUID: {}", err); // Ensures similar behavior as Go's logger.CriticalIf + // } + // } +} +fn generate_arn(target: BucketTarget, depl_id: String) -> String { + let mut uuid: String = depl_id; + if uuid.is_empty() { + uuid = must_get_uuid(); + } + + let arn: ARN = ARN { + arn_type: target.type_.unwrap(), + id: (uuid), + region: "us-east-1".to_string(), + bucket: (target.target_bucket), + }; + arn.to_string() +} + +// use std::collections::HashMap; +// use std::sync::{Arc, Mutex, RwLock}; +// use std::time::Duration; +// use tokio::time::timeout; +// use tokio::sync::RwLock as AsyncRwLock; +// use serde::Deserialize; +// use thiserror::Error; + +// #[derive(Debug, Clone, PartialEq)] +// pub enum BucketTargetType { +// ReplicationService, +// // Add other service types as needed +// } + +// impl BucketTargetType { +// pub fn is_valid(&self) -> bool { +// matches!(self, BucketTargetType::ReplicationService) +// } +// } + +// #[derive(Debug, Clone)] +// pub struct BucketTarget { +// pub arn: String, +// pub target_bucket: String, +// pub endpoint: String, +// pub credentials: Credentials, +// pub secure: bool, +// pub bandwidth_limit: Option, +// pub target_type: BucketTargetType, +// } + +// #[derive(Debug, Clone)] +// pub struct Credentials { +// pub access_key: String, +// pub secret_key: String, +// } + +// #[derive(Debug)] +// pub struct BucketTargetSys { +// targets_map: Arc>>>, +// arn_remotes_map: Arc>>, +// } + +// impl BucketTargetSys { +// pub fn new() -> Self { +// Self { +// targets_map: Arc::new(RwLock::new(HashMap::new())), +// arn_remotes_map: Arc::new(Mutex::new(HashMap::new())), +// } +// } + +// pub async fn set_target( +// &self, +// bucket: &str, +// tgt: &BucketTarget, +// update: bool, +// ) -> Result<(), SetTargetError> { +// if !tgt.target_type.is_valid() && !update { +// return Err(SetTargetError::InvalidTargetType(bucket.to_string())); +// } + +// let client = self.get_remote_target_client(tgt).await?; + +// // Validate if target credentials are OK +// let exists = client.bucket_exists(&tgt.target_bucket).await?; +// if !exists { +// return Err(SetTargetError::TargetNotFound(tgt.target_bucket.clone())); +// } + +// if tgt.target_type == BucketTargetType::ReplicationService { +// if !self.is_bucket_versioned(bucket).await { +// return Err(SetTargetError::SourceNotVersioned(bucket.to_string())); +// } + +// let versioning_config = client.get_bucket_versioning(&tgt.target_bucket).await?; +// if !versioning_config.is_enabled() { +// return Err(SetTargetError::TargetNotVersioned(tgt.target_bucket.clone())); +// } +// } + +// // Check if target is a RustFS server and alive +// let hc_result = timeout(Duration::from_secs(3), client.health_check(&tgt.endpoint)).await; +// match hc_result { +// Ok(Ok(true)) => {} // Server is alive +// Ok(Ok(false)) | Ok(Err(_)) | Err(_) => { +// return Err(SetTargetError::HealthCheckFailed(tgt.target_bucket.clone())); +// } +// } + +// // Lock and update target maps +// let mut targets_map = self.targets_map.write().await; +// let mut arn_remotes_map = self.arn_remotes_map.lock().unwrap(); + +// let targets = targets_map.entry(bucket.to_string()).or_default(); +// let mut found = false; + +// for existing_target in targets.iter_mut() { +// if existing_target.target_type == tgt.target_type { +// if existing_target.arn == tgt.arn { +// if !update { +// return Err(SetTargetError::TargetAlreadyExists(existing_target.target_bucket.clone())); +// } +// *existing_target = tgt.clone(); +// found = true; +// break; +// } + +// if existing_target.endpoint == tgt.endpoint { +// return Err(SetTargetError::TargetAlreadyExists(existing_target.target_bucket.clone())); +// } +// } +// } + +// if !found && !update { +// targets.push(tgt.clone()); +// } + +// arn_remotes_map.insert(tgt.arn.clone(), ArnTarget { client }); +// self.update_bandwidth_limit(bucket, &tgt.arn, tgt.bandwidth_limit).await; + +// Ok(()) +// } + +// async fn get_remote_target_client(&self, tgt: &BucketTarget) -> Result { +// // Mocked implementation for obtaining a remote client +// Ok(Client {}) +// } + +// async fn is_bucket_versioned(&self, bucket: &str) -> bool { +// // Mocked implementation for checking if a bucket is versioned +// true +// } + +// async fn update_bandwidth_limit( +// &self, +// bucket: &str, +// arn: &str, +// limit: Option, +// ) { +// // Mocked implementation for updating bandwidth limits +// } +// } + +// #[derive(Debug)] +// pub struct Client; + +// impl Client { +// pub async fn bucket_exists(&self, _bucket: &str) -> Result { +// Ok(true) // Mocked implementation +// } + +// pub async fn get_bucket_versioning( +// &self, +// _bucket: &str, +// ) -> Result { +// Ok(VersioningConfig { enabled: true }) +// } + +// pub async fn health_check(&self, _endpoint: &str) -> Result { +// Ok(true) // Mocked health check +// } +// } + +// #[derive(Debug, Clone)] +// pub struct ArnTarget { +// pub client: Client, +// } + +#[derive(Debug, Error)] +pub enum SetTargetError { + #[error("Invalid target type for bucket {0}")] + InvalidTargetType(String), + + #[error("Target bucket {0} not found")] + TargetNotFound(String), + + #[error("Source bucket {0} is not versioned")] + SourceNotVersioned(String), + + #[error("Target bucket {0} is not versioned")] + TargetNotVersioned(String), + + #[error("Health check failed for bucket {0}")] + HealthCheckFailed(String), + + #[error("Target bucket {0} already exists")] + TargetAlreadyExists(String), +} diff --git a/ecstore/src/cmd/bucketreplicationhandler.rs b/ecstore/src/cmd/bucketreplicationhandler.rs new file mode 100644 index 00000000..e69de29b diff --git a/ecstore/src/cmd/mod.rs b/ecstore/src/cmd/mod.rs new file mode 100644 index 00000000..da274f39 --- /dev/null +++ b/ecstore/src/cmd/mod.rs @@ -0,0 +1,2 @@ +pub mod bucket_replication; +pub mod bucket_targets; diff --git a/ecstore/src/disk/local.rs b/ecstore/src/disk/local.rs index be476385..592b34f0 100644 --- a/ecstore/src/disk/local.rs +++ b/ecstore/src/disk/local.rs @@ -2207,7 +2207,7 @@ impl DiskAPI for LocalDisk { for info in obj_infos.iter() { let done = ScannerMetrics::time(ScannerMetric::ApplyVersion); let sz: usize; - (obj_deleted, sz) = item.apply_actions(info, &size_s).await; + (obj_deleted, sz) = item.apply_actions(info, &mut size_s).await; done(); if obj_deleted { diff --git a/ecstore/src/file_meta.rs b/ecstore/src/file_meta.rs index 202c8b4f..3a87d3e1 100644 --- a/ecstore/src/file_meta.rs +++ b/ecstore/src/file_meta.rs @@ -1,3404 +1,3384 @@ -// use crate::disk::FileInfoVersions; -// use crate::file_meta_inline::InlineData; -// use crate::store_api::RawFileInfo; -// use crate::error::StorageError; -// use crate::{ -// disk::error::DiskError, -// store_api::{ErasureInfo, FileInfo, ObjectPartInfo, ERASURE_ALGORITHM}, -// }; -// use byteorder::ByteOrder; -// use common::error::{Error, Result}; -// use rmp::Marker; -// use serde::{Deserialize, Serialize}; -// use std::cmp::Ordering; -// use std::fmt::Display; -// use std::io::{self, Read, Write}; -// use std::{collections::HashMap, io::Cursor}; -// use time::OffsetDateTime; -// use tokio::io::AsyncRead; -// use tracing::{error, warn}; -// use uuid::Uuid; -// use xxhash_rust::xxh64; - -// // XL header specifies the format -// pub static XL_FILE_HEADER: [u8; 4] = [b'X', b'L', b'2', b' ']; -// // pub static XL_FILE_VERSION_CURRENT: [u8; 4] = [0; 4]; - -// // Current version being written. -// // static XL_FILE_VERSION: [u8; 4] = [1, 0, 3, 0]; -// static XL_FILE_VERSION_MAJOR: u16 = 1; -// static XL_FILE_VERSION_MINOR: u16 = 3; -// static XL_HEADER_VERSION: u8 = 3; -// static XL_META_VERSION: u8 = 2; -// static XXHASH_SEED: u64 = 0; - -// const XL_FLAG_FREE_VERSION: u8 = 1 << 0; -// // const XL_FLAG_USES_DATA_DIR: u8 = 1 << 1; -// const _XL_FLAG_INLINE_DATA: u8 = 1 << 2; - -// const META_DATA_READ_DEFAULT: usize = 4 << 10; -// const MSGP_UINT32_SIZE: usize = 5; - -// // type ScanHeaderVersionFn = Box Result<()>>; - -// #[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] -// pub struct FileMeta { -// pub versions: Vec, -// pub data: InlineData, // TODO: xlMetaInlineData -// pub meta_ver: u8, -// } - -// impl FileMeta { -// pub fn new() -> Self { -// Self { -// meta_ver: XL_META_VERSION, -// data: InlineData::new(), -// ..Default::default() -// } -// } - -// // isXL2V1Format -// #[tracing::instrument(level = "debug", skip_all)] -// pub fn is_xl2_v1_format(buf: &[u8]) -> bool { -// !matches!(Self::check_xl2_v1(buf), Err(_e)) -// } - -// #[tracing::instrument(level = "debug", skip_all)] -// pub fn load(buf: &[u8]) -> Result { -// let mut xl = FileMeta::default(); -// xl.unmarshal_msg(buf)?; - -// Ok(xl) -// } - -// // check_xl2_v1 读 xl 文件头,返回后续内容,版本信息 -// // checkXL2V1 -// #[tracing::instrument(level = "debug", skip_all)] -// pub fn check_xl2_v1(buf: &[u8]) -> Result<(&[u8], u16, u16)> { -// if buf.len() < 8 { -// return Err(Error::msg("xl file header not exists")); -// } - -// if buf[0..4] != XL_FILE_HEADER { -// return Err(Error::msg("xl file header err")); -// } - -// let major = byteorder::LittleEndian::read_u16(&buf[4..6]); -// let minor = byteorder::LittleEndian::read_u16(&buf[6..8]); -// if major > XL_FILE_VERSION_MAJOR { -// return Err(Error::msg("xl file version err")); -// } - -// Ok((&buf[8..], major, minor)) -// } - -// // 固定 u32 -// pub fn read_bytes_header(buf: &[u8]) -> Result<(u32, &[u8])> { -// if buf.len() < 5 { -// return Err(Error::new(io::Error::new( -// io::ErrorKind::UnexpectedEof, -// format!("Buffer too small: {} bytes, need at least 5", buf.len()), -// ))); -// } - -// let (mut size_buf, _) = buf.split_at(5); - -// // 取 meta 数据,buf = crc + data -// let bin_len = rmp::decode::read_bin_len(&mut size_buf)?; - -// Ok((bin_len, &buf[5..])) -// } - -// pub fn unmarshal_msg(&mut self, buf: &[u8]) -> Result { -// let i = buf.len() as u64; +use crate::disk::FileInfoVersions; +use crate::error::StorageError; +use crate::file_meta_inline::InlineData; +use crate::store_api::RawFileInfo; +use crate::{ + disk::error::DiskError, + store_api::{ERASURE_ALGORITHM, ErasureInfo, FileInfo, ObjectPartInfo}, +}; +use byteorder::ByteOrder; +use common::error::{Error, Result}; +use rmp::Marker; +use serde::{Deserialize, Serialize}; +use std::cmp::Ordering; +use std::fmt::Display; +use std::io::{self, Read, Write}; +use std::{collections::HashMap, io::Cursor}; +use time::OffsetDateTime; +use tokio::io::AsyncRead; +use tracing::{error, warn}; +use uuid::Uuid; +use xxhash_rust::xxh64; + +// XL header specifies the format +pub static XL_FILE_HEADER: [u8; 4] = [b'X', b'L', b'2', b' ']; +// pub static XL_FILE_VERSION_CURRENT: [u8; 4] = [0; 4]; + +// Current version being written. +// static XL_FILE_VERSION: [u8; 4] = [1, 0, 3, 0]; +static XL_FILE_VERSION_MAJOR: u16 = 1; +static XL_FILE_VERSION_MINOR: u16 = 3; +static XL_HEADER_VERSION: u8 = 3; +static XL_META_VERSION: u8 = 2; +static XXHASH_SEED: u64 = 0; + +const XL_FLAG_FREE_VERSION: u8 = 1 << 0; +// const XL_FLAG_USES_DATA_DIR: u8 = 1 << 1; +const _XL_FLAG_INLINE_DATA: u8 = 1 << 2; + +const META_DATA_READ_DEFAULT: usize = 4 << 10; +const MSGP_UINT32_SIZE: usize = 5; + +// type ScanHeaderVersionFn = Box Result<()>>; + +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] +pub struct FileMeta { + pub versions: Vec, + pub data: InlineData, // TODO: xlMetaInlineData + pub meta_ver: u8, +} + +impl FileMeta { + pub fn new() -> Self { + Self { + meta_ver: XL_META_VERSION, + data: InlineData::new(), + ..Default::default() + } + } + + // isXL2V1Format + #[tracing::instrument(level = "debug", skip_all)] + pub fn is_xl2_v1_format(buf: &[u8]) -> bool { + !matches!(Self::check_xl2_v1(buf), Err(_e)) + } + + #[tracing::instrument(level = "debug", skip_all)] + pub fn load(buf: &[u8]) -> Result { + let mut xl = FileMeta::default(); + xl.unmarshal_msg(buf)?; + + Ok(xl) + } + + // check_xl2_v1 读 xl 文件头,返回后续内容,版本信息 + // checkXL2V1 + #[tracing::instrument(level = "debug", skip_all)] + pub fn check_xl2_v1(buf: &[u8]) -> Result<(&[u8], u16, u16)> { + if buf.len() < 8 { + return Err(Error::msg("xl file header not exists")); + } + + if buf[0..4] != XL_FILE_HEADER { + return Err(Error::msg("xl file header err")); + } + + let major = byteorder::LittleEndian::read_u16(&buf[4..6]); + let minor = byteorder::LittleEndian::read_u16(&buf[6..8]); + if major > XL_FILE_VERSION_MAJOR { + return Err(Error::msg("xl file version err")); + } + + Ok((&buf[8..], major, minor)) + } + + // 固定 u32 + pub fn read_bytes_header(buf: &[u8]) -> Result<(u32, &[u8])> { + if buf.len() < 5 { + return Err(Error::new(io::Error::new( + io::ErrorKind::UnexpectedEof, + format!("Buffer too small: {} bytes, need at least 5", buf.len()), + ))); + } + + let (mut size_buf, _) = buf.split_at(5); + + // 取 meta 数据,buf = crc + data + let bin_len = rmp::decode::read_bin_len(&mut size_buf)?; + + Ok((bin_len, &buf[5..])) + } + + pub fn unmarshal_msg(&mut self, buf: &[u8]) -> Result { + let i = buf.len() as u64; -// // check version, buf = buf[8..] -// let (buf, _, _) = Self::check_xl2_v1(buf)?; + // check version, buf = buf[8..] + let (buf, _, _) = Self::check_xl2_v1(buf)?; -// let (mut size_buf, buf) = buf.split_at(5); + let (mut size_buf, buf) = buf.split_at(5); -// // 取 meta 数据,buf = crc + data -// let bin_len = rmp::decode::read_bin_len(&mut size_buf)?; + // 取 meta 数据,buf = crc + data + let bin_len = rmp::decode::read_bin_len(&mut size_buf)?; -// let (meta, buf) = buf.split_at(bin_len as usize); + let (meta, buf) = buf.split_at(bin_len as usize); -// let (mut crc_buf, buf) = buf.split_at(5); + let (mut crc_buf, buf) = buf.split_at(5); -// // crc check -// let crc = rmp::decode::read_u32(&mut crc_buf)?; -// let meta_crc = xxh64::xxh64(meta, XXHASH_SEED) as u32; + // crc check + let crc = rmp::decode::read_u32(&mut crc_buf)?; + let meta_crc = xxh64::xxh64(meta, XXHASH_SEED) as u32; -// if crc != meta_crc { -// return Err(Error::msg("xl file crc check failed")); -// } + if crc != meta_crc { + return Err(Error::msg("xl file crc check failed")); + } -// if !buf.is_empty() { -// self.data.update(buf); -// self.data.validate()?; -// } + if !buf.is_empty() { + self.data.update(buf); + self.data.validate()?; + } -// // 解析 meta -// if !meta.is_empty() { -// let (versions_len, _, meta_ver, meta) = Self::decode_xl_headers(meta)?; + // 解析 meta + if !meta.is_empty() { + let (versions_len, _, meta_ver, meta) = Self::decode_xl_headers(meta)?; -// // let (_, meta) = meta.split_at(read_size as usize); + // let (_, meta) = meta.split_at(read_size as usize); -// self.meta_ver = meta_ver; + self.meta_ver = meta_ver; -// self.versions = Vec::with_capacity(versions_len); + self.versions = Vec::with_capacity(versions_len); -// let mut cur: Cursor<&[u8]> = Cursor::new(meta); -// for _ in 0..versions_len { -// let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize; -// let start = cur.position() as usize; -// let end = start + bin_len; -// let header_buf = &meta[start..end]; + let mut cur: Cursor<&[u8]> = Cursor::new(meta); + for _ in 0..versions_len { + let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize; + let start = cur.position() as usize; + let end = start + bin_len; + let header_buf = &meta[start..end]; -// let mut ver = FileMetaShallowVersion::default(); -// ver.header.unmarshal_msg(header_buf)?; + let mut ver = FileMetaShallowVersion::default(); + ver.header.unmarshal_msg(header_buf)?; -// cur.set_position(end as u64); + cur.set_position(end as u64); -// let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize; -// let start = cur.position() as usize; -// let end = start + bin_len; -// let mut ver_meta_buf = &meta[start..end]; + let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize; + let start = cur.position() as usize; + let end = start + bin_len; + let mut ver_meta_buf = &meta[start..end]; -// ver_meta_buf.read_to_end(&mut ver.meta)?; + ver_meta_buf.read_to_end(&mut ver.meta)?; -// cur.set_position(end as u64); + cur.set_position(end as u64); -// self.versions.push(ver); -// } -// } + self.versions.push(ver); + } + } -// Ok(i) -// } + Ok(i) + } -// // decode_xl_headers 解析 meta 头,返回 (versions 数量,xl_header_version, xl_meta_version, 已读数据长度) -// #[tracing::instrument(level = "debug", skip_all)] -// fn decode_xl_headers(buf: &[u8]) -> Result<(usize, u8, u8, &[u8])> { -// let mut cur = Cursor::new(buf); + // decode_xl_headers 解析 meta 头,返回 (versions 数量,xl_header_version, xl_meta_version, 已读数据长度) + #[tracing::instrument(level = "debug", skip_all)] + fn decode_xl_headers(buf: &[u8]) -> Result<(usize, u8, u8, &[u8])> { + let mut cur = Cursor::new(buf); -// let header_ver: u8 = rmp::decode::read_int(&mut cur)?; + let header_ver: u8 = rmp::decode::read_int(&mut cur)?; -// if header_ver > XL_HEADER_VERSION { -// return Err(Error::msg("xl header version invalid")); -// } + if header_ver > XL_HEADER_VERSION { + return Err(Error::msg("xl header version invalid")); + } -// let meta_ver: u8 = rmp::decode::read_int(&mut cur)?; -// if meta_ver > XL_META_VERSION { -// return Err(Error::msg("xl meta version invalid")); -// } + let meta_ver: u8 = rmp::decode::read_int(&mut cur)?; + if meta_ver > XL_META_VERSION { + return Err(Error::msg("xl meta version invalid")); + } -// let versions_len: usize = rmp::decode::read_int(&mut cur)?; + let versions_len: usize = rmp::decode::read_int(&mut cur)?; -// Ok((versions_len, header_ver, meta_ver, &buf[cur.position() as usize..])) -// } + Ok((versions_len, header_ver, meta_ver, &buf[cur.position() as usize..])) + } -// fn decode_versions Result<()>>(buf: &[u8], versions: usize, mut fnc: F) -> Result<()> { -// let mut cur: Cursor<&[u8]> = Cursor::new(buf); + fn decode_versions Result<()>>(buf: &[u8], versions: usize, mut fnc: F) -> Result<()> { + let mut cur: Cursor<&[u8]> = Cursor::new(buf); -// for i in 0..versions { -// let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize; -// let start = cur.position() as usize; -// let end = start + bin_len; -// let header_buf = &buf[start..end]; + for i in 0..versions { + let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize; + let start = cur.position() as usize; + let end = start + bin_len; + let header_buf = &buf[start..end]; -// cur.set_position(end as u64); + cur.set_position(end as u64); -// let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize; -// let start = cur.position() as usize; -// let end = start + bin_len; -// let ver_meta_buf = &buf[start..end]; + let bin_len = rmp::decode::read_bin_len(&mut cur)? as usize; + let start = cur.position() as usize; + let end = start + bin_len; + let ver_meta_buf = &buf[start..end]; -// cur.set_position(end as u64); + cur.set_position(end as u64); -// if let Err(err) = fnc(i, header_buf, ver_meta_buf) { -// if let Some(e) = err.downcast_ref::() { -// if e == &StorageError::DoneForNow { -// return Ok(()); -// } -// } + if let Err(err) = fnc(i, header_buf, ver_meta_buf) { + if let Some(e) = err.downcast_ref::() { + if e == &StorageError::DoneForNow { + return Ok(()); + } + } -// return Err(err); -// } -// } + return Err(err); + } + } -// Ok(()) -// } + Ok(()) + } -// pub fn is_latest_delete_marker(buf: &[u8]) -> bool { -// let header = Self::decode_xl_headers(buf).ok(); -// if let Some((versions, _hdr_v, _meta_v, meta)) = header { -// if versions == 0 { -// return false; -// } + pub fn is_latest_delete_marker(buf: &[u8]) -> bool { + let header = Self::decode_xl_headers(buf).ok(); + if let Some((versions, _hdr_v, _meta_v, meta)) = header { + if versions == 0 { + return false; + } -// let mut is_delete_marker = false; + let mut is_delete_marker = false; -// let _ = Self::decode_versions(meta, versions, |_: usize, hdr: &[u8], _: &[u8]| { -// let mut header = FileMetaVersionHeader::default(); -// if header.unmarshal_msg(hdr).is_err() { -// return Err(Error::new(StorageError::DoneForNow)); -// } + let _ = Self::decode_versions(meta, versions, |_: usize, hdr: &[u8], _: &[u8]| { + let mut header = FileMetaVersionHeader::default(); + if header.unmarshal_msg(hdr).is_err() { + return Err(Error::new(StorageError::DoneForNow)); + } -// is_delete_marker = header.version_type == VersionType::Delete; + is_delete_marker = header.version_type == VersionType::Delete; -// Err(Error::new(StorageError::DoneForNow)) -// }); + Err(Error::new(StorageError::DoneForNow)) + }); -// is_delete_marker -// } else { -// false -// } -// } + is_delete_marker + } else { + false + } + } -// #[tracing::instrument(level = "debug", skip_all)] -// pub fn marshal_msg(&self) -> Result> { -// let mut wr = Vec::new(); + #[tracing::instrument(level = "debug", skip_all)] + pub fn marshal_msg(&self) -> Result> { + let mut wr = Vec::new(); -// // header -// wr.write_all(XL_FILE_HEADER.as_slice())?; + // header + wr.write_all(XL_FILE_HEADER.as_slice())?; -// let mut major = [0u8; 2]; -// byteorder::LittleEndian::write_u16(&mut major, XL_FILE_VERSION_MAJOR); -// wr.write_all(major.as_slice())?; + let mut major = [0u8; 2]; + byteorder::LittleEndian::write_u16(&mut major, XL_FILE_VERSION_MAJOR); + wr.write_all(major.as_slice())?; -// let mut minor = [0u8; 2]; -// byteorder::LittleEndian::write_u16(&mut minor, XL_FILE_VERSION_MINOR); -// wr.write_all(minor.as_slice())?; + let mut minor = [0u8; 2]; + byteorder::LittleEndian::write_u16(&mut minor, XL_FILE_VERSION_MINOR); + wr.write_all(minor.as_slice())?; -// // size bin32 预留 write_bin_len -// wr.write_all(&[0xc6, 0, 0, 0, 0])?; + // size bin32 预留 write_bin_len + wr.write_all(&[0xc6, 0, 0, 0, 0])?; -// let offset = wr.len(); + let offset = wr.len(); -// rmp::encode::write_uint8(&mut wr, XL_HEADER_VERSION)?; -// rmp::encode::write_uint8(&mut wr, XL_META_VERSION)?; + rmp::encode::write_uint8(&mut wr, XL_HEADER_VERSION)?; + rmp::encode::write_uint8(&mut wr, XL_META_VERSION)?; -// // versions -// rmp::encode::write_sint(&mut wr, self.versions.len() as i64)?; + // versions + rmp::encode::write_sint(&mut wr, self.versions.len() as i64)?; -// for ver in self.versions.iter() { -// let hmsg = ver.header.marshal_msg()?; -// rmp::encode::write_bin(&mut wr, &hmsg)?; - -// rmp::encode::write_bin(&mut wr, &ver.meta)?; -// } - -// // 更新 bin 长度 -// let data_len = wr.len() - offset; -// byteorder::BigEndian::write_u32(&mut wr[offset - 4..offset], data_len as u32); - -// let crc = xxh64::xxh64(&wr[offset..], XXHASH_SEED) as u32; -// let mut crc_buf = [0u8; 5]; -// crc_buf[0] = 0xce; // u32 -// byteorder::BigEndian::write_u32(&mut crc_buf[1..], crc); - -// wr.write_all(&crc_buf)?; - -// wr.write_all(self.data.as_slice())?; - -// Ok(wr) -// } - -// // pub fn unmarshal(buf: &[u8]) -> Result { -// // let mut s = Self::default(); -// // s.unmarshal_msg(buf)?; -// // Ok(s) -// // // let t: FileMeta = rmp_serde::from_slice(buf)?; -// // // Ok(t) -// // } - -// // pub fn marshal_msg(&self) -> Result> { -// // let mut buf = Vec::new(); - -// // self.serialize(&mut Serializer::new(&mut buf))?; - -// // Ok(buf) -// // } - -// fn get_idx(&self, idx: usize) -> Result { -// if idx > self.versions.len() { -// return Err(Error::new(DiskError::FileNotFound)); -// } - -// FileMetaVersion::try_from(self.versions[idx].meta.as_slice()) -// } - -// fn set_idx(&mut self, idx: usize, ver: FileMetaVersion) -> Result<()> { -// if idx >= self.versions.len() { -// return Err(Error::new(DiskError::FileNotFound)); -// } - -// // TODO: use old buf -// let meta_buf = ver.marshal_msg()?; - -// let pre_mod_time = self.versions[idx].header.mod_time; - -// self.versions[idx].header = ver.header(); -// self.versions[idx].meta = meta_buf; - -// if pre_mod_time != self.versions[idx].header.mod_time { -// self.sort_by_mod_time(); -// } - -// Ok(()) -// } - -// fn sort_by_mod_time(&mut self) { -// if self.versions.len() <= 1 { -// return; -// } - -// // Sort by mod_time in descending order (latest first) -// self.versions.sort_by(|a, b| { -// match (a.header.mod_time, b.header.mod_time) { -// (Some(a_time), Some(b_time)) => b_time.cmp(&a_time), // Descending order -// (Some(_), None) => Ordering::Less, -// (None, Some(_)) => Ordering::Greater, -// (None, None) => Ordering::Equal, -// } -// }); -// } - -// // 查找版本 -// pub fn find_version(&self, vid: Option) -> Result<(usize, FileMetaVersion)> { -// for (i, fver) in self.versions.iter().enumerate() { -// if fver.header.version_id == vid { -// let version = self.get_idx(i)?; -// return Ok((i, version)); -// } -// } - -// Err(Error::new(DiskError::FileVersionNotFound)) -// } - -// // shard_data_dir_count 查询 vid 下 data_dir 的数量 -// #[tracing::instrument(level = "debug", skip_all)] -// pub fn shard_data_dir_count(&self, vid: &Option, data_dir: &Option) -> usize { -// self.versions -// .iter() -// .filter(|v| v.header.version_type == VersionType::Object && v.header.version_id != *vid && v.header.user_data_dir()) -// .map(|v| FileMetaVersion::decode_data_dir_from_meta(&v.meta).unwrap_or_default()) -// .filter(|v| v == data_dir) -// .count() -// } - -// pub fn update_object_version(&mut self, fi: FileInfo) -> Result<()> { -// for version in self.versions.iter_mut() { -// match version.header.version_type { -// VersionType::Invalid => (), -// VersionType::Object => { -// if version.header.version_id == fi.version_id { -// let mut ver = FileMetaVersion::try_from(version.meta.as_slice())?; - -// if let Some(ref mut obj) = ver.object { -// if let Some(ref mut meta_user) = obj.meta_user { -// if let Some(meta) = &fi.metadata { -// for (k, v) in meta { -// meta_user.insert(k.clone(), v.clone()); -// } -// } -// obj.meta_user = Some(meta_user.clone()); -// } else { -// let mut meta_user = HashMap::new(); -// if let Some(meta) = &fi.metadata { -// for (k, v) in meta { -// // TODO: MetaSys -// meta_user.insert(k.clone(), v.clone()); -// } -// } -// obj.meta_user = Some(meta_user); -// } - -// if let Some(mod_time) = fi.mod_time { -// obj.mod_time = Some(mod_time); -// } -// } - -// // 更新 -// version.header = ver.header(); -// version.meta = ver.marshal_msg()?; -// } -// } -// VersionType::Delete => { -// if version.header.version_id == fi.version_id { -// return Err(Error::msg("method not allowed")); -// } -// } -// } -// } - -// self.versions.sort_by(|a, b| { -// if a.header.mod_time != b.header.mod_time { -// a.header.mod_time.cmp(&b.header.mod_time) -// } else if a.header.version_type != b.header.version_type { -// a.header.version_type.cmp(&b.header.version_type) -// } else if a.header.version_id != b.header.version_id { -// a.header.version_id.cmp(&b.header.version_id) -// } else if a.header.flags != b.header.flags { -// a.header.flags.cmp(&b.header.flags) -// } else { -// a.cmp(b) -// } -// }); -// Ok(()) -// } - -// // 添加版本 -// #[tracing::instrument(level = "debug", skip_all)] -// pub fn add_version(&mut self, fi: FileInfo) -> Result<()> { -// let vid = fi.version_id; - -// if let Some(ref data) = fi.data { -// let key = vid.unwrap_or_default().to_string(); -// self.data.replace(&key, data.clone())?; -// } - -// let version = FileMetaVersion::from(fi); - -// if !version.valid() { -// return Err(Error::msg("file meta version invalid")); -// } - -// // should replace -// for (idx, ver) in self.versions.iter().enumerate() { -// if ver.header.version_id != vid { -// continue; -// } - -// return self.set_idx(idx, version); -// } - -// // TODO: version count limit ! - -// let mod_time = version.get_mod_time(); - -// // puth a -1 mod time value , so we can relplace this -// self.versions.push(FileMetaShallowVersion { -// header: FileMetaVersionHeader { -// mod_time: Some(OffsetDateTime::from_unix_timestamp(-1)?), -// ..Default::default() -// }, -// ..Default::default() -// }); - -// for (idx, exist) in self.versions.iter().enumerate() { -// if let Some(ref ex_mt) = exist.header.mod_time { -// if let Some(ref in_md) = mod_time { -// if ex_mt <= in_md { -// // insert -// self.versions.insert(idx, FileMetaShallowVersion::try_from(version)?); -// self.versions.pop(); -// return Ok(()); -// } -// } -// } -// } - -// Err(Error::msg("add_version failed")) -// } - -// // delete_version 删除版本,返回 data_dir -// pub fn delete_version(&mut self, fi: &FileInfo) -> Result> { -// let mut ventry = FileMetaVersion::default(); -// if fi.deleted { -// ventry.version_type = VersionType::Delete; -// ventry.delete_marker = Some(MetaDeleteMarker { -// version_id: fi.version_id, -// mod_time: fi.mod_time, -// ..Default::default() -// }); - -// if !fi.is_valid() { -// return Err(Error::msg("invalid file meta version")); -// } -// } - -// for (i, ver) in self.versions.iter().enumerate() { -// if ver.header.version_id != fi.version_id { -// continue; -// } - -// return match ver.header.version_type { -// VersionType::Invalid => Err(Error::msg("invalid file meta version")), -// VersionType::Delete => Ok(None), -// VersionType::Object => { -// let v = self.get_idx(i)?; - -// self.versions.remove(i); - -// let a = v.object.map(|v| v.data_dir).unwrap_or_default(); -// Ok(a) -// } -// }; -// } - -// Err(Error::new(DiskError::FileVersionNotFound)) -// } - -// // read_data fill fi.dada -// #[tracing::instrument(level = "debug", skip(self))] -// pub fn into_fileinfo( -// &self, -// volume: &str, -// path: &str, -// version_id: &str, -// read_data: bool, -// all_parts: bool, -// ) -> Result { -// let has_vid = { -// if !version_id.is_empty() { -// let id = Uuid::parse_str(version_id)?; -// if !id.is_nil() { -// Some(id) -// } else { -// None -// } -// } else { -// None -// } -// }; - -// let mut is_latest = true; -// let mut succ_mod_time = None; -// for ver in self.versions.iter() { -// let header = &ver.header; - -// if let Some(vid) = has_vid { -// if header.version_id != Some(vid) { -// is_latest = false; -// succ_mod_time = header.mod_time; -// continue; -// } -// } - -// let mut fi = ver.to_fileinfo(volume, path, has_vid, all_parts)?; -// fi.is_latest = is_latest; -// if let Some(_d) = succ_mod_time { -// fi.successor_mod_time = succ_mod_time; -// } -// if read_data { -// fi.data = self.data.find(fi.version_id.unwrap_or_default().to_string().as_str())?; -// } - -// fi.num_versions = self.versions.len(); - -// return Ok(fi); -// } - -// if has_vid.is_none() { -// Err(Error::from(DiskError::FileNotFound)) -// } else { -// Err(Error::from(DiskError::FileVersionNotFound)) -// } -// } - -// #[tracing::instrument(level = "debug", skip(self))] -// pub fn into_file_info_versions(&self, volume: &str, path: &str, all_parts: bool) -> Result { -// let mut versions = Vec::new(); -// for version in self.versions.iter() { -// let mut file_version = FileMetaVersion::default(); -// file_version.unmarshal_msg(&version.meta)?; -// let fi = file_version.to_fileinfo(volume, path, None, all_parts); -// versions.push(fi); -// } - -// Ok(FileInfoVersions { -// volume: volume.to_string(), -// name: path.to_string(), -// latest_mod_time: versions[0].mod_time, -// versions, -// ..Default::default() -// }) -// } - -// pub fn lastest_mod_time(&self) -> Option { -// if self.versions.is_empty() { -// return None; -// } - -// self.versions.first().unwrap().header.mod_time -// } -// } - -// // impl Display for FileMeta { -// // fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { -// // f.write_str("FileMeta:")?; -// // for (i, ver) in self.versions.iter().enumerate() { -// // let mut meta = FileMetaVersion::default(); -// // meta.unmarshal_msg(&ver.meta).unwrap_or_default(); -// // f.write_fmt(format_args!("ver:{} header {:?}, meta {:?}", i, ver.header, meta))?; -// // } - -// // f.write_str("\n") -// // } -// // } - -// #[derive(Serialize, Deserialize, Debug, Default, PartialEq, Clone, Eq, PartialOrd, Ord)] -// pub struct FileMetaShallowVersion { -// pub header: FileMetaVersionHeader, -// pub meta: Vec, // FileMetaVersion.marshal_msg -// } - -// impl FileMetaShallowVersion { -// pub fn to_fileinfo(&self, volume: &str, path: &str, version_id: Option, all_parts: bool) -> Result { -// let file_version = FileMetaVersion::try_from(self.meta.as_slice())?; - -// Ok(file_version.to_fileinfo(volume, path, version_id, all_parts)) -// } -// } - -// impl TryFrom for FileMetaShallowVersion { -// type Error = Error; - -// fn try_from(value: FileMetaVersion) -> std::result::Result { -// let header = value.header(); -// let meta = value.marshal_msg()?; -// Ok(Self { meta, header }) -// } -// } - -// #[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)] -// pub struct FileMetaVersion { -// pub version_type: VersionType, -// pub object: Option, -// pub delete_marker: Option, -// pub write_version: u64, // rustfs version -// } - -// impl FileMetaVersion { -// pub fn valid(&self) -> bool { -// if !self.version_type.valid() { -// return false; -// } - -// match self.version_type { -// VersionType::Object => self -// .object -// .as_ref() -// .map(|v| v.erasure_algorithm.valid() && v.bitrot_checksum_algo.valid() && v.mod_time.is_some()) -// .unwrap_or_default(), -// VersionType::Delete => self -// .delete_marker -// .as_ref() -// .map(|v| v.mod_time.unwrap_or(OffsetDateTime::UNIX_EPOCH) > OffsetDateTime::UNIX_EPOCH) -// .unwrap_or_default(), -// _ => false, -// } -// } - -// pub fn get_data_dir(&self) -> Option { -// self.valid() -// .then(|| { -// if self.version_type == VersionType::Object { -// self.object.as_ref().map(|v| v.data_dir).unwrap_or_default() -// } else { -// None -// } -// }) -// .unwrap_or_default() -// } - -// pub fn get_version_id(&self) -> Option { -// match self.version_type { -// VersionType::Object | VersionType::Delete => self.object.as_ref().map(|v| v.version_id).unwrap_or_default(), -// _ => None, -// } -// } - -// pub fn get_mod_time(&self) -> Option { -// match self.version_type { -// VersionType::Object => self.object.as_ref().map(|v| v.mod_time).unwrap_or_default(), -// VersionType::Delete => self.delete_marker.as_ref().map(|v| v.mod_time).unwrap_or_default(), -// _ => None, -// } -// } - -// // decode_data_dir_from_meta 从 meta 中读取 data_dir TODO: 直接从 meta buf 中只解析出 data_dir, msg.skip -// pub fn decode_data_dir_from_meta(buf: &[u8]) -> Result> { -// let mut ver = Self::default(); -// ver.unmarshal_msg(buf)?; - -// let data_dir = ver.object.map(|v| v.data_dir).unwrap_or_default(); -// Ok(data_dir) -// } - -// pub fn unmarshal_msg(&mut self, buf: &[u8]) -> Result { -// let mut cur = Cursor::new(buf); - -// let mut fields_len = rmp::decode::read_map_len(&mut cur)?; - -// while fields_len > 0 { -// fields_len -= 1; - -// // println!("unmarshal_msg fields idx {}", fields_len); - -// let str_len = rmp::decode::read_str_len(&mut cur)?; - -// // println!("unmarshal_msg fields name len() {}", &str_len); - -// // !!!Vec::with_capacity(str_len) 失败,vec! 正常 -// let mut field_buff = vec![0u8; str_len as usize]; - -// cur.read_exact(&mut field_buff)?; - -// let field = String::from_utf8(field_buff)?; - -// // println!("unmarshal_msg fields name {}", &field); - -// match field.as_str() { -// "Type" => { -// let u: u8 = rmp::decode::read_int(&mut cur)?; -// self.version_type = VersionType::from_u8(u); -// } - -// "V2Obj" => { -// // is_nil() -// if buf[cur.position() as usize] == 0xc0 { -// rmp::decode::read_nil(&mut cur)?; -// } else { -// // let buf = unsafe { cur.position() }; -// let mut obj = MetaObject::default(); -// // let start = cur.position(); - -// let (_, remain) = buf.split_at(cur.position() as usize); - -// let read_len = obj.unmarshal_msg(remain)?; -// cur.set_position(cur.position() + read_len); - -// self.object = Some(obj); -// } -// } -// "DelObj" => { -// if buf[cur.position() as usize] == 0xc0 { -// rmp::decode::read_nil(&mut cur)?; -// } else { -// // let buf = unsafe { cur.position() }; -// let mut obj = MetaDeleteMarker::default(); -// // let start = cur.position(); - -// let (_, remain) = buf.split_at(cur.position() as usize); -// let read_len = obj.unmarshal_msg(remain)?; -// cur.set_position(cur.position() + read_len); - -// self.delete_marker = Some(obj); -// } -// } -// "v" => { -// self.write_version = rmp::decode::read_int(&mut cur)?; -// } -// name => return Err(Error::msg(format!("not suport field name {}", name))), -// } -// } - -// Ok(cur.position()) -// } - -// pub fn marshal_msg(&self) -> Result> { -// let mut len: u32 = 4; -// let mut mask: u8 = 0; - -// if self.object.is_none() { -// len -= 1; -// mask |= 0x2; -// } -// if self.delete_marker.is_none() { -// len -= 1; -// mask |= 0x4; -// } - -// let mut wr = Vec::new(); - -// // 字段数量 -// rmp::encode::write_map_len(&mut wr, len)?; - -// // write "Type" -// rmp::encode::write_str(&mut wr, "Type")?; -// rmp::encode::write_uint(&mut wr, self.version_type.to_u8() as u64)?; - -// if (mask & 0x2) == 0 { -// // write V2Obj -// rmp::encode::write_str(&mut wr, "V2Obj")?; -// if self.object.is_none() { -// let _ = rmp::encode::write_nil(&mut wr); -// } else { -// let buf = self.object.as_ref().unwrap().marshal_msg()?; -// wr.write_all(&buf)?; -// } -// } - -// if (mask & 0x4) == 0 { -// // write "DelObj" -// rmp::encode::write_str(&mut wr, "DelObj")?; -// if self.delete_marker.is_none() { -// let _ = rmp::encode::write_nil(&mut wr); -// } else { -// let buf = self.delete_marker.as_ref().unwrap().marshal_msg()?; -// wr.write_all(&buf)?; -// } -// } - -// // write "v" -// rmp::encode::write_str(&mut wr, "v")?; -// rmp::encode::write_uint(&mut wr, self.write_version)?; - -// Ok(wr) -// } - -// pub fn free_version(&self) -> bool { -// self.version_type == VersionType::Delete && self.delete_marker.as_ref().map(|m| m.free_version()).unwrap_or_default() -// } - -// pub fn header(&self) -> FileMetaVersionHeader { -// FileMetaVersionHeader::from(self.clone()) -// } - -// pub fn to_fileinfo(&self, volume: &str, path: &str, version_id: Option, all_parts: bool) -> FileInfo { -// match self.version_type { -// VersionType::Invalid => FileInfo { -// name: path.to_string(), -// volume: volume.to_string(), -// version_id, -// ..Default::default() -// }, -// VersionType::Object => self -// .object -// .as_ref() -// .unwrap() -// .clone() -// .into_fileinfo(volume, path, version_id, all_parts), -// VersionType::Delete => self -// .delete_marker -// .as_ref() -// .unwrap() -// .clone() -// .into_fileinfo(volume, path, version_id, all_parts), -// } -// } -// } - -// impl TryFrom<&[u8]> for FileMetaVersion { -// type Error = Error; - -// fn try_from(value: &[u8]) -> std::result::Result { -// let mut ver = FileMetaVersion::default(); -// ver.unmarshal_msg(value)?; -// Ok(ver) -// } -// } - -// impl From for FileMetaVersion { -// fn from(value: FileInfo) -> Self { -// { -// if value.deleted { -// FileMetaVersion { -// version_type: VersionType::Delete, -// delete_marker: Some(MetaDeleteMarker::from(value)), -// object: None, -// write_version: 0, -// } -// } else { -// FileMetaVersion { -// version_type: VersionType::Object, -// delete_marker: None, -// object: Some(MetaObject::from(value)), -// write_version: 0, -// } -// } -// } -// } -// } - -// impl TryFrom for FileMetaVersion { -// type Error = Error; - -// fn try_from(value: FileMetaShallowVersion) -> std::result::Result { -// FileMetaVersion::try_from(value.meta.as_slice()) -// } -// } - -// #[derive(Serialize, Deserialize, Debug, PartialEq, Default, Clone, Eq, Hash)] -// pub struct FileMetaVersionHeader { -// pub version_id: Option, -// pub mod_time: Option, -// pub signature: [u8; 4], -// pub version_type: VersionType, -// pub flags: u8, -// pub ec_n: u8, -// pub ec_m: u8, -// } - -// impl FileMetaVersionHeader { -// pub fn has_ec(&self) -> bool { -// self.ec_m > 0 && self.ec_n > 0 -// } - -// pub fn matches_not_strict(&self, o: &FileMetaVersionHeader) -> bool { -// let mut ok = self.version_id == o.version_id && self.version_type == o.version_type && self.matches_ec(o); -// if self.version_id.is_none() { -// ok = ok && self.mod_time == o.mod_time; -// } - -// ok -// } - -// pub fn matches_ec(&self, o: &FileMetaVersionHeader) -> bool { -// if self.has_ec() && o.has_ec() { -// return self.ec_n == o.ec_n && self.ec_m == o.ec_m; -// } - -// true -// } - -// pub fn free_version(&self) -> bool { -// self.flags & XL_FLAG_FREE_VERSION != 0 -// } - -// pub fn sorts_before(&self, o: &FileMetaVersionHeader) -> bool { -// if self == o { -// return false; -// } - -// // Prefer newest modtime. -// if self.mod_time != o.mod_time { -// return self.mod_time > o.mod_time; -// } - -// match self.mod_time.cmp(&o.mod_time) { -// Ordering::Greater => { -// return true; -// } -// Ordering::Less => { -// return false; -// } -// _ => {} -// } - -// // The following doesn't make too much sense, but we want sort to be consistent nonetheless. -// // Prefer lower types -// if self.version_type != o.version_type { -// return self.version_type < o.version_type; -// } -// // Consistent sort on signature -// match self.version_id.cmp(&o.version_id) { -// Ordering::Greater => { -// return true; -// } -// Ordering::Less => { -// return false; -// } -// _ => {} -// } - -// if self.flags != o.flags { -// return self.flags > o.flags; -// } - -// false -// } - -// pub fn user_data_dir(&self) -> bool { -// self.flags & Flags::UsesDataDir as u8 != 0 -// } -// #[tracing::instrument] -// pub fn marshal_msg(&self) -> Result> { -// let mut wr = Vec::new(); - -// // array len 7 -// rmp::encode::write_array_len(&mut wr, 7)?; - -// // version_id -// rmp::encode::write_bin(&mut wr, self.version_id.unwrap_or_default().as_bytes())?; -// // mod_time -// rmp::encode::write_i64(&mut wr, self.mod_time.unwrap_or(OffsetDateTime::UNIX_EPOCH).unix_timestamp_nanos() as i64)?; -// // signature -// rmp::encode::write_bin(&mut wr, self.signature.as_slice())?; -// // version_type -// rmp::encode::write_uint8(&mut wr, self.version_type.to_u8())?; -// // flags -// rmp::encode::write_uint8(&mut wr, self.flags)?; -// // ec_n -// rmp::encode::write_uint8(&mut wr, self.ec_n)?; -// // ec_m -// rmp::encode::write_uint8(&mut wr, self.ec_m)?; - -// Ok(wr) -// } - -// pub fn unmarshal_msg(&mut self, buf: &[u8]) -> Result { -// let mut cur = Cursor::new(buf); -// let alen = rmp::decode::read_array_len(&mut cur)?; -// if alen != 7 { -// return Err(Error::msg(format!("version header array len err need 7 got {}", alen))); -// } - -// // version_id -// rmp::decode::read_bin_len(&mut cur)?; -// let mut buf = [0u8; 16]; -// cur.read_exact(&mut buf)?; -// self.version_id = { -// let id = Uuid::from_bytes(buf); -// if id.is_nil() { -// None -// } else { -// Some(id) -// } -// }; - -// // mod_time -// let unix: i128 = rmp::decode::read_int(&mut cur)?; - -// let time = OffsetDateTime::from_unix_timestamp_nanos(unix)?; -// if time == OffsetDateTime::UNIX_EPOCH { -// self.mod_time = None; -// } else { -// self.mod_time = Some(time); -// } - -// // signature -// rmp::decode::read_bin_len(&mut cur)?; -// cur.read_exact(&mut self.signature)?; - -// // version_type -// let typ: u8 = rmp::decode::read_int(&mut cur)?; -// self.version_type = VersionType::from_u8(typ); - -// // flags -// self.flags = rmp::decode::read_int(&mut cur)?; -// // ec_n -// self.ec_n = rmp::decode::read_int(&mut cur)?; -// // ec_m -// self.ec_m = rmp::decode::read_int(&mut cur)?; - -// Ok(cur.position()) -// } -// } - -// impl PartialOrd for FileMetaVersionHeader { -// fn partial_cmp(&self, other: &Self) -> Option { -// Some(self.cmp(other)) -// } -// } - -// impl Ord for FileMetaVersionHeader { -// fn cmp(&self, other: &Self) -> Ordering { -// match self.mod_time.cmp(&other.mod_time) { -// Ordering::Equal => {} -// ord => return ord, -// } - -// match self.version_type.cmp(&other.version_type) { -// Ordering::Equal => {} -// ord => return ord, -// } -// match self.signature.cmp(&other.signature) { -// Ordering::Equal => {} -// ord => return ord, -// } -// match self.version_id.cmp(&other.version_id) { -// Ordering::Equal => {} -// ord => return ord, -// } -// self.flags.cmp(&other.flags) -// } -// } - -// impl From for FileMetaVersionHeader { -// fn from(value: FileMetaVersion) -> Self { -// let flags = { -// let mut f: u8 = 0; -// if value.free_version() { -// f |= Flags::FreeVersion as u8; -// } - -// if value.version_type == VersionType::Object && value.object.as_ref().map(|v| v.use_data_dir()).unwrap_or_default() { -// f |= Flags::UsesDataDir as u8; -// } - -// if value.version_type == VersionType::Object && value.object.as_ref().map(|v| v.use_inlinedata()).unwrap_or_default() -// { -// f |= Flags::InlineData as u8; -// } - -// f -// }; - -// let (ec_n, ec_m) = { -// if value.version_type == VersionType::Object && value.object.is_some() { -// ( -// value.object.as_ref().unwrap().erasure_n as u8, -// value.object.as_ref().unwrap().erasure_m as u8, -// ) -// } else { -// (0, 0) -// } -// }; - -// Self { -// version_id: value.get_version_id(), -// mod_time: value.get_mod_time(), -// signature: [0, 0, 0, 0], -// version_type: value.version_type, -// flags, -// ec_n, -// ec_m, -// } -// } -// } - -// #[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] -// // 因为自定义 message_pack,所以一定要保证字段顺序 -// pub struct MetaObject { -// pub version_id: Option, // Version ID -// pub data_dir: Option, // Data dir ID -// pub erasure_algorithm: ErasureAlgo, // Erasure coding algorithm -// pub erasure_m: usize, // Erasure data blocks -// pub erasure_n: usize, // Erasure parity blocks -// pub erasure_block_size: usize, // Erasure block size -// pub erasure_index: usize, // Erasure disk index -// pub erasure_dist: Vec, // Erasure distribution -// pub bitrot_checksum_algo: ChecksumAlgo, // Bitrot checksum algo -// pub part_numbers: Vec, // Part Numbers -// pub part_etags: Option>, // Part ETags -// pub part_sizes: Vec, // Part Sizes -// pub part_actual_sizes: Option>, // Part ActualSizes (compression) -// pub part_indices: Option>>, // Part Indexes (compression) -// pub size: usize, // Object version size -// pub mod_time: Option, // Object version modified time -// pub meta_sys: Option>>, // Object version internal metadata -// pub meta_user: Option>, // Object version metadata set by user -// } - -// impl MetaObject { -// pub fn unmarshal_msg(&mut self, buf: &[u8]) -> Result { -// let mut cur = Cursor::new(buf); - -// let mut fields_len = rmp::decode::read_map_len(&mut cur)?; - -// // let mut ret = Self::default(); - -// while fields_len > 0 { -// fields_len -= 1; - -// // println!("unmarshal_msg fields idx {}", fields_len); - -// let str_len = rmp::decode::read_str_len(&mut cur)?; - -// // println!("unmarshal_msg fields name len() {}", &str_len); - -// // !!!Vec::with_capacity(str_len) 失败,vec! 正常 -// let mut field_buff = vec![0u8; str_len as usize]; - -// cur.read_exact(&mut field_buff)?; - -// let field = String::from_utf8(field_buff)?; - -// // println!("unmarshal_msg fields name {}", &field); - -// match field.as_str() { -// "ID" => { -// rmp::decode::read_bin_len(&mut cur)?; -// let mut buf = [0u8; 16]; -// cur.read_exact(&mut buf)?; -// self.version_id = { -// let id = Uuid::from_bytes(buf); -// if id.is_nil() { -// None -// } else { -// Some(id) -// } -// }; -// } -// "DDir" => { -// rmp::decode::read_bin_len(&mut cur)?; -// let mut buf = [0u8; 16]; -// cur.read_exact(&mut buf)?; -// self.data_dir = { -// let id = Uuid::from_bytes(buf); -// if id.is_nil() { -// None -// } else { -// Some(id) -// } -// }; -// } -// "EcAlgo" => { -// let u: u8 = rmp::decode::read_int(&mut cur)?; -// self.erasure_algorithm = ErasureAlgo::from_u8(u) -// } -// "EcM" => { -// self.erasure_m = rmp::decode::read_int(&mut cur)?; -// } -// "EcN" => { -// self.erasure_n = rmp::decode::read_int(&mut cur)?; -// } -// "EcBSize" => { -// self.erasure_block_size = rmp::decode::read_int(&mut cur)?; -// } -// "EcIndex" => { -// self.erasure_index = rmp::decode::read_int(&mut cur)?; -// } -// "EcDist" => { -// let alen = rmp::decode::read_array_len(&mut cur)? as usize; -// self.erasure_dist = vec![0u8; alen]; -// for i in 0..alen { -// self.erasure_dist[i] = rmp::decode::read_int(&mut cur)?; -// } -// } -// "CSumAlgo" => { -// let u: u8 = rmp::decode::read_int(&mut cur)?; -// self.bitrot_checksum_algo = ChecksumAlgo::from_u8(u) -// } -// "PartNums" => { -// let alen = rmp::decode::read_array_len(&mut cur)? as usize; -// self.part_numbers = vec![0; alen]; -// for i in 0..alen { -// self.part_numbers[i] = rmp::decode::read_int(&mut cur)?; -// } -// } -// "PartETags" => { -// let array_len = match rmp::decode::read_nil(&mut cur) { -// Ok(_) => None, -// Err(e) => match e { -// rmp::decode::ValueReadError::TypeMismatch(marker) => match marker { -// Marker::FixArray(l) => Some(l as usize), -// Marker::Array16 => Some(rmp::decode::read_u16(&mut cur)? as usize), -// Marker::Array32 => Some(rmp::decode::read_u16(&mut cur)? as usize), -// _ => return Err(Error::msg("PartETags parse failed")), -// }, -// _ => return Err(Error::msg("PartETags parse failed.")), -// }, -// }; - -// if array_len.is_some() { -// let l = array_len.unwrap(); -// let mut etags = Vec::with_capacity(l); -// for _ in 0..l { -// let str_len = rmp::decode::read_str_len(&mut cur)?; -// let mut field_buff = vec![0u8; str_len as usize]; -// cur.read_exact(&mut field_buff)?; -// etags.push(String::from_utf8(field_buff)?); -// } -// self.part_etags = Some(etags); -// } -// } -// "PartSizes" => { -// let alen = rmp::decode::read_array_len(&mut cur)? as usize; -// self.part_sizes = vec![0; alen]; -// for i in 0..alen { -// self.part_sizes[i] = rmp::decode::read_int(&mut cur)?; -// } -// } -// "PartASizes" => { -// let array_len = match rmp::decode::read_nil(&mut cur) { -// Ok(_) => None, -// Err(e) => match e { -// rmp::decode::ValueReadError::TypeMismatch(marker) => match marker { -// Marker::FixArray(l) => Some(l as usize), -// Marker::Array16 => Some(rmp::decode::read_u16(&mut cur)? as usize), -// Marker::Array32 => Some(rmp::decode::read_u16(&mut cur)? as usize), -// _ => return Err(Error::msg("PartETags parse failed")), -// }, -// _ => return Err(Error::msg("PartETags parse failed.")), -// }, -// }; -// if let Some(l) = array_len { -// let mut sizes = vec![0; l]; -// for size in sizes.iter_mut().take(l) { -// *size = rmp::decode::read_int(&mut cur)?; -// } -// // for size in sizes.iter_mut().take(l) { -// // let tmp = rmp::decode::read_int(&mut cur)?; -// // size = tmp; -// // } -// self.part_actual_sizes = Some(sizes); -// } -// } -// "PartIdx" => { -// let alen = rmp::decode::read_array_len(&mut cur)? as usize; - -// if alen == 0 { -// self.part_indices = None; -// continue; -// } - -// let mut indices = Vec::with_capacity(alen); -// for _ in 0..alen { -// let blen = rmp::decode::read_bin_len(&mut cur)?; -// let mut buf = vec![0u8; blen as usize]; -// cur.read_exact(&mut buf)?; - -// indices.push(buf); -// } - -// self.part_indices = Some(indices); -// } -// "Size" => { -// self.size = rmp::decode::read_int(&mut cur)?; -// } -// "MTime" => { -// let unix: i128 = rmp::decode::read_int(&mut cur)?; -// let time = OffsetDateTime::from_unix_timestamp_nanos(unix)?; -// if time == OffsetDateTime::UNIX_EPOCH { -// self.mod_time = None; -// } else { -// self.mod_time = Some(time); -// } -// } -// "MetaSys" => { -// let len = match rmp::decode::read_nil(&mut cur) { -// Ok(_) => None, -// Err(e) => match e { -// rmp::decode::ValueReadError::TypeMismatch(marker) => match marker { -// Marker::FixMap(l) => Some(l as usize), -// Marker::Map16 => Some(rmp::decode::read_u16(&mut cur)? as usize), -// Marker::Map32 => Some(rmp::decode::read_u16(&mut cur)? as usize), -// _ => return Err(Error::msg("MetaSys parse failed")), -// }, -// _ => return Err(Error::msg("MetaSys parse failed.")), -// }, -// }; -// if len.is_some() { -// let l = len.unwrap(); -// let mut map = HashMap::new(); -// for _ in 0..l { -// let str_len = rmp::decode::read_str_len(&mut cur)?; -// let mut field_buff = vec![0u8; str_len as usize]; -// cur.read_exact(&mut field_buff)?; -// let key = String::from_utf8(field_buff)?; - -// let blen = rmp::decode::read_bin_len(&mut cur)?; -// let mut val = vec![0u8; blen as usize]; -// cur.read_exact(&mut val)?; - -// map.insert(key, val); -// } - -// self.meta_sys = Some(map); -// } -// } -// "MetaUsr" => { -// let len = match rmp::decode::read_nil(&mut cur) { -// Ok(_) => None, -// Err(e) => match e { -// rmp::decode::ValueReadError::TypeMismatch(marker) => match marker { -// Marker::FixMap(l) => Some(l as usize), -// Marker::Map16 => Some(rmp::decode::read_u16(&mut cur)? as usize), -// Marker::Map32 => Some(rmp::decode::read_u16(&mut cur)? as usize), -// _ => return Err(Error::msg("MetaUsr parse failed")), -// }, -// _ => return Err(Error::msg("MetaUsr parse failed.")), -// }, -// }; -// if len.is_some() { -// let l = len.unwrap(); -// let mut map = HashMap::new(); -// for _ in 0..l { -// let str_len = rmp::decode::read_str_len(&mut cur)?; -// let mut field_buff = vec![0u8; str_len as usize]; -// cur.read_exact(&mut field_buff)?; -// let key = String::from_utf8(field_buff)?; - -// let blen = rmp::decode::read_str_len(&mut cur)?; -// let mut val_buf = vec![0u8; blen as usize]; -// cur.read_exact(&mut val_buf)?; -// let val = String::from_utf8(val_buf)?; - -// map.insert(key, val); -// } - -// self.meta_user = Some(map); -// } -// } - -// name => return Err(Error::msg(format!("not suport field name {}", name))), -// } -// } - -// Ok(cur.position()) -// } -// // marshal_msg 自定义 messagepack 命名与 go 一致 -// pub fn marshal_msg(&self) -> Result> { -// let mut len: u32 = 18; -// let mut mask: u32 = 0; - -// if self.part_indices.is_none() { -// len -= 1; -// mask |= 0x2000; -// } - -// let mut wr = Vec::new(); - -// // 字段数量 -// rmp::encode::write_map_len(&mut wr, len)?; - -// // string "ID" -// rmp::encode::write_str(&mut wr, "ID")?; -// rmp::encode::write_bin(&mut wr, self.version_id.unwrap_or_default().as_bytes())?; - -// // string "DDir" -// rmp::encode::write_str(&mut wr, "DDir")?; -// rmp::encode::write_bin(&mut wr, self.data_dir.unwrap_or_default().as_bytes())?; - -// // string "EcAlgo" -// rmp::encode::write_str(&mut wr, "EcAlgo")?; -// rmp::encode::write_uint(&mut wr, self.erasure_algorithm.to_u8() as u64)?; - -// // string "EcM" -// rmp::encode::write_str(&mut wr, "EcM")?; -// rmp::encode::write_uint(&mut wr, self.erasure_m.try_into().unwrap())?; - -// // string "EcN" -// rmp::encode::write_str(&mut wr, "EcN")?; -// rmp::encode::write_uint(&mut wr, self.erasure_n.try_into().unwrap())?; - -// // string "EcBSize" -// rmp::encode::write_str(&mut wr, "EcBSize")?; -// rmp::encode::write_uint(&mut wr, self.erasure_block_size.try_into().unwrap())?; - -// // string "EcIndex" -// rmp::encode::write_str(&mut wr, "EcIndex")?; -// rmp::encode::write_uint(&mut wr, self.erasure_index.try_into().unwrap())?; - -// // string "EcDist" -// rmp::encode::write_str(&mut wr, "EcDist")?; -// rmp::encode::write_array_len(&mut wr, self.erasure_dist.len() as u32)?; -// for v in self.erasure_dist.iter() { -// rmp::encode::write_uint(&mut wr, *v as _)?; -// } - -// // string "CSumAlgo" -// rmp::encode::write_str(&mut wr, "CSumAlgo")?; -// rmp::encode::write_uint(&mut wr, self.bitrot_checksum_algo.to_u8() as u64)?; - -// // string "PartNums" -// rmp::encode::write_str(&mut wr, "PartNums")?; -// rmp::encode::write_array_len(&mut wr, self.part_numbers.len() as u32)?; -// for v in self.part_numbers.iter() { -// rmp::encode::write_uint(&mut wr, *v as _)?; -// } - -// // string "PartETags" -// rmp::encode::write_str(&mut wr, "PartETags")?; -// if self.part_etags.is_none() { -// rmp::encode::write_nil(&mut wr)?; -// } else { -// let etags = self.part_etags.as_ref().unwrap(); -// rmp::encode::write_array_len(&mut wr, etags.len() as u32)?; -// for v in etags.iter() { -// rmp::encode::write_str(&mut wr, v.as_str())?; -// } -// } - -// // string "PartSizes" -// rmp::encode::write_str(&mut wr, "PartSizes")?; -// rmp::encode::write_array_len(&mut wr, self.part_sizes.len() as u32)?; -// for v in self.part_sizes.iter() { -// rmp::encode::write_uint(&mut wr, *v as _)?; -// } - -// // string "PartASizes" -// rmp::encode::write_str(&mut wr, "PartASizes")?; -// if self.part_actual_sizes.is_none() { -// rmp::encode::write_nil(&mut wr)?; -// } else { -// let asizes = self.part_actual_sizes.as_ref().unwrap(); -// rmp::encode::write_array_len(&mut wr, asizes.len() as u32)?; -// for v in asizes.iter() { -// rmp::encode::write_uint(&mut wr, *v as _)?; -// } -// } - -// if (mask & 0x2000) == 0 { -// // string "PartIdx" -// rmp::encode::write_str(&mut wr, "PartIdx")?; -// let indices = self.part_indices.as_ref().unwrap(); -// rmp::encode::write_array_len(&mut wr, indices.len() as u32)?; -// for v in indices.iter() { -// rmp::encode::write_bin(&mut wr, v)?; -// } -// } - -// // string "Size" -// rmp::encode::write_str(&mut wr, "Size")?; -// rmp::encode::write_uint(&mut wr, self.size.try_into().unwrap())?; - -// // string "MTime" -// rmp::encode::write_str(&mut wr, "MTime")?; -// rmp::encode::write_uint( -// &mut wr, -// self.mod_time -// .unwrap_or(OffsetDateTime::UNIX_EPOCH) -// .unix_timestamp_nanos() -// .try_into() -// .unwrap(), -// )?; - -// // string "MetaSys" -// rmp::encode::write_str(&mut wr, "MetaSys")?; -// if self.meta_sys.is_none() { -// rmp::encode::write_nil(&mut wr)?; -// } else { -// let metas = self.meta_sys.as_ref().unwrap(); -// rmp::encode::write_map_len(&mut wr, metas.len() as u32)?; -// for (k, v) in metas { -// rmp::encode::write_str(&mut wr, k.as_str())?; -// rmp::encode::write_bin(&mut wr, v)?; -// } -// } - -// // string "MetaUsr" -// rmp::encode::write_str(&mut wr, "MetaUsr")?; -// if self.meta_user.is_none() { -// rmp::encode::write_nil(&mut wr)?; -// } else { -// let metas = self.meta_user.as_ref().unwrap(); -// rmp::encode::write_map_len(&mut wr, metas.len() as u32)?; -// for (k, v) in metas { -// rmp::encode::write_str(&mut wr, k.as_str())?; -// rmp::encode::write_str(&mut wr, v.as_str())?; -// } -// } - -// Ok(wr) -// } -// pub fn use_data_dir(&self) -> bool { -// // TODO: when use inlinedata -// true -// } - -// pub fn use_inlinedata(&self) -> bool { -// // TODO: when use inlinedata -// false -// } - -// pub fn into_fileinfo(self, volume: &str, path: &str, _version_id: Option, _all_parts: bool) -> FileInfo { -// let version_id = self.version_id; - -// let erasure = ErasureInfo { -// algorithm: self.erasure_algorithm.to_string(), -// data_blocks: self.erasure_m, -// parity_blocks: self.erasure_n, -// block_size: self.erasure_block_size, -// index: self.erasure_index, -// distribution: self.erasure_dist.iter().map(|&v| v as usize).collect(), -// ..Default::default() -// }; - -// let mut parts = Vec::new(); -// for (i, _) in self.part_numbers.iter().enumerate() { -// parts.push(ObjectPartInfo { -// number: self.part_numbers[i], -// size: self.part_sizes[i], -// ..Default::default() -// }); -// } - -// let metadata = { -// if let Some(metauser) = self.meta_user.as_ref() { -// let mut m = HashMap::new(); -// for (k, v) in metauser { -// // TODO: skip xhttp x-amz-storage-class -// m.insert(k.to_owned(), v.to_owned()); -// } -// Some(m) -// } else { -// None -// } -// }; - -// FileInfo { -// version_id, -// erasure, -// data_dir: self.data_dir, -// mod_time: self.mod_time, -// size: self.size, -// name: path.to_string(), -// volume: volume.to_string(), -// parts, -// metadata, -// ..Default::default() -// } -// } -// } - -// impl From for MetaObject { -// fn from(value: FileInfo) -> Self { -// let part_numbers: Vec = value.parts.iter().map(|v| v.number).collect(); -// let part_sizes: Vec = value.parts.iter().map(|v| v.size).collect(); - -// Self { -// version_id: value.version_id, -// size: value.size, -// mod_time: value.mod_time, -// data_dir: value.data_dir, -// erasure_algorithm: ErasureAlgo::ReedSolomon, -// erasure_m: value.erasure.data_blocks, -// erasure_n: value.erasure.parity_blocks, -// erasure_block_size: value.erasure.block_size, -// erasure_index: value.erasure.index, -// erasure_dist: value.erasure.distribution.iter().map(|x| *x as u8).collect(), -// bitrot_checksum_algo: ChecksumAlgo::HighwayHash, -// part_numbers, -// part_etags: None, // TODO: add part_etags -// part_sizes, -// part_actual_sizes: None, // TODO: add part_etags -// part_indices: None, -// meta_sys: None, -// meta_user: value.metadata.clone(), -// } -// } -// } - -// #[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] -// pub struct MetaDeleteMarker { -// pub version_id: Option, // Version ID for delete marker -// pub mod_time: Option, // Object delete marker modified time -// pub meta_sys: Option>>, // Delete marker internal metadata -// } - -// impl MetaDeleteMarker { -// pub fn free_version(&self) -> bool { -// self.meta_sys -// .as_ref() -// .map(|v| v.get(FREE_VERSION_META_HEADER).is_some()) -// .unwrap_or_default() -// } - -// pub fn into_fileinfo(self, volume: &str, path: &str, version_id: Option, _all_parts: bool) -> FileInfo { -// FileInfo { -// name: path.to_string(), -// volume: volume.to_string(), -// version_id, -// deleted: true, -// mod_time: self.mod_time, -// ..Default::default() -// } -// } - -// pub fn unmarshal_msg(&mut self, buf: &[u8]) -> Result { -// let mut cur = Cursor::new(buf); - -// let mut fields_len = rmp::decode::read_map_len(&mut cur)?; - -// while fields_len > 0 { -// fields_len -= 1; - -// let str_len = rmp::decode::read_str_len(&mut cur)?; - -// // !!!Vec::with_capacity(str_len) 失败,vec! 正常 -// let mut field_buff = vec![0u8; str_len as usize]; - -// cur.read_exact(&mut field_buff)?; - -// let field = String::from_utf8(field_buff)?; - -// match field.as_str() { -// "ID" => { -// rmp::decode::read_bin_len(&mut cur)?; -// let mut buf = [0u8; 16]; -// cur.read_exact(&mut buf)?; -// self.version_id = { -// let id = Uuid::from_bytes(buf); -// if id.is_nil() { -// None -// } else { -// Some(id) -// } -// }; -// } - -// "MTime" => { -// let unix: i64 = rmp::decode::read_int(&mut cur)?; -// let time = OffsetDateTime::from_unix_timestamp(unix)?; -// if time == OffsetDateTime::UNIX_EPOCH { -// self.mod_time = None; -// } else { -// self.mod_time = Some(time); -// } -// } -// "MetaSys" => { -// let l = rmp::decode::read_map_len(&mut cur)?; -// let mut map = HashMap::new(); -// for _ in 0..l { -// let str_len = rmp::decode::read_str_len(&mut cur)?; -// let mut field_buff = vec![0u8; str_len as usize]; -// cur.read_exact(&mut field_buff)?; -// let key = String::from_utf8(field_buff)?; - -// let blen = rmp::decode::read_bin_len(&mut cur)?; -// let mut val = vec![0u8; blen as usize]; -// cur.read_exact(&mut val)?; - -// map.insert(key, val); -// } - -// self.meta_sys = Some(map); -// } -// name => return Err(Error::msg(format!("not suport field name {}", name))), -// } -// } - -// Ok(cur.position()) -// } - -// pub fn marshal_msg(&self) -> Result> { -// let mut len: u32 = 3; -// let mut mask: u8 = 0; - -// if self.meta_sys.is_none() { -// len -= 1; -// mask |= 0x4; -// } - -// let mut wr = Vec::new(); - -// // 字段数量 -// rmp::encode::write_map_len(&mut wr, len)?; - -// // string "ID" -// rmp::encode::write_str(&mut wr, "ID")?; -// rmp::encode::write_bin(&mut wr, self.version_id.unwrap_or_default().as_bytes())?; - -// // string "MTime" -// rmp::encode::write_str(&mut wr, "MTime")?; -// rmp::encode::write_uint( -// &mut wr, -// self.mod_time -// .unwrap_or(OffsetDateTime::UNIX_EPOCH) -// .unix_timestamp() -// .try_into() -// .unwrap(), -// )?; - -// if (mask & 0x4) == 0 { -// let metas = self.meta_sys.as_ref().unwrap(); -// rmp::encode::write_map_len(&mut wr, metas.len() as u32)?; -// for (k, v) in metas { -// rmp::encode::write_str(&mut wr, k.as_str())?; -// rmp::encode::write_bin(&mut wr, v)?; -// } -// } - -// Ok(wr) -// } -// } - -// impl From for MetaDeleteMarker { -// fn from(value: FileInfo) -> Self { -// Self { -// version_id: value.version_id, -// mod_time: value.mod_time, -// meta_sys: None, -// } -// } -// } - -// #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Default, Clone, PartialOrd, Ord, Hash)] -// pub enum VersionType { -// #[default] -// Invalid = 0, -// Object = 1, -// Delete = 2, -// // Legacy = 3, -// } - -// impl VersionType { -// pub fn valid(&self) -> bool { -// matches!(*self, VersionType::Object | VersionType::Delete) -// } - -// pub fn to_u8(&self) -> u8 { -// match self { -// VersionType::Invalid => 0, -// VersionType::Object => 1, -// VersionType::Delete => 2, -// } -// } - -// pub fn from_u8(n: u8) -> Self { -// match n { -// 1 => VersionType::Object, -// 2 => VersionType::Delete, -// _ => VersionType::Invalid, -// } -// } -// } - -// #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Default, Clone)] -// pub enum ErasureAlgo { -// #[default] -// Invalid = 0, -// ReedSolomon = 1, -// } - -// impl ErasureAlgo { -// pub fn valid(&self) -> bool { -// *self > ErasureAlgo::Invalid -// } -// pub fn to_u8(&self) -> u8 { -// match self { -// ErasureAlgo::Invalid => 0, -// ErasureAlgo::ReedSolomon => 1, -// } -// } - -// pub fn from_u8(u: u8) -> Self { -// match u { -// 1 => ErasureAlgo::ReedSolomon, -// _ => ErasureAlgo::Invalid, -// } -// } -// } - -// impl Display for ErasureAlgo { + for ver in self.versions.iter() { + let hmsg = ver.header.marshal_msg()?; + rmp::encode::write_bin(&mut wr, &hmsg)?; + + rmp::encode::write_bin(&mut wr, &ver.meta)?; + } + + // 更新 bin 长度 + let data_len = wr.len() - offset; + byteorder::BigEndian::write_u32(&mut wr[offset - 4..offset], data_len as u32); + + let crc = xxh64::xxh64(&wr[offset..], XXHASH_SEED) as u32; + let mut crc_buf = [0u8; 5]; + crc_buf[0] = 0xce; // u32 + byteorder::BigEndian::write_u32(&mut crc_buf[1..], crc); + + wr.write_all(&crc_buf)?; + + wr.write_all(self.data.as_slice())?; + + Ok(wr) + } + + // pub fn unmarshal(buf: &[u8]) -> Result { + // let mut s = Self::default(); + // s.unmarshal_msg(buf)?; + // Ok(s) + // // let t: FileMeta = rmp_serde::from_slice(buf)?; + // // Ok(t) + // } + + // pub fn marshal_msg(&self) -> Result> { + // let mut buf = Vec::new(); + + // self.serialize(&mut Serializer::new(&mut buf))?; + + // Ok(buf) + // } + + fn get_idx(&self, idx: usize) -> Result { + if idx > self.versions.len() { + return Err(Error::new(DiskError::FileNotFound)); + } + + FileMetaVersion::try_from(self.versions[idx].meta.as_slice()) + } + + fn set_idx(&mut self, idx: usize, ver: FileMetaVersion) -> Result<()> { + if idx >= self.versions.len() { + return Err(Error::new(DiskError::FileNotFound)); + } + + // TODO: use old buf + let meta_buf = ver.marshal_msg()?; + + let pre_mod_time = self.versions[idx].header.mod_time; + + self.versions[idx].header = ver.header(); + self.versions[idx].meta = meta_buf; + + if pre_mod_time != self.versions[idx].header.mod_time { + self.sort_by_mod_time(); + } + + Ok(()) + } + + fn sort_by_mod_time(&mut self) { + if self.versions.len() <= 1 { + return; + } + + // Sort by mod_time in descending order (latest first) + self.versions.sort_by(|a, b| { + match (a.header.mod_time, b.header.mod_time) { + (Some(a_time), Some(b_time)) => b_time.cmp(&a_time), // Descending order + (Some(_), None) => Ordering::Less, + (None, Some(_)) => Ordering::Greater, + (None, None) => Ordering::Equal, + } + }); + } + + // 查找版本 + pub fn find_version(&self, vid: Option) -> Result<(usize, FileMetaVersion)> { + for (i, fver) in self.versions.iter().enumerate() { + if fver.header.version_id == vid { + let version = self.get_idx(i)?; + return Ok((i, version)); + } + } + + Err(Error::new(DiskError::FileVersionNotFound)) + } + + // shard_data_dir_count 查询 vid 下 data_dir 的数量 + #[tracing::instrument(level = "debug", skip_all)] + pub fn shard_data_dir_count(&self, vid: &Option, data_dir: &Option) -> usize { + self.versions + .iter() + .filter(|v| v.header.version_type == VersionType::Object && v.header.version_id != *vid && v.header.user_data_dir()) + .map(|v| FileMetaVersion::decode_data_dir_from_meta(&v.meta).unwrap_or_default()) + .filter(|v| v == data_dir) + .count() + } + + pub fn update_object_version(&mut self, fi: FileInfo) -> Result<()> { + for version in self.versions.iter_mut() { + match version.header.version_type { + VersionType::Invalid => (), + VersionType::Object => { + if version.header.version_id == fi.version_id { + let mut ver = FileMetaVersion::try_from(version.meta.as_slice())?; + + if let Some(ref mut obj) = ver.object { + if let Some(ref mut meta_user) = obj.meta_user { + if let Some(meta) = &fi.metadata { + for (k, v) in meta { + meta_user.insert(k.clone(), v.clone()); + } + } + obj.meta_user = Some(meta_user.clone()); + } else { + let mut meta_user = HashMap::new(); + if let Some(meta) = &fi.metadata { + for (k, v) in meta { + // TODO: MetaSys + meta_user.insert(k.clone(), v.clone()); + } + } + obj.meta_user = Some(meta_user); + } + + if let Some(mod_time) = fi.mod_time { + obj.mod_time = Some(mod_time); + } + } + + // 更新 + version.header = ver.header(); + version.meta = ver.marshal_msg()?; + } + } + VersionType::Delete => { + if version.header.version_id == fi.version_id { + return Err(Error::msg("method not allowed")); + } + } + } + } + + self.versions.sort_by(|a, b| { + if a.header.mod_time != b.header.mod_time { + a.header.mod_time.cmp(&b.header.mod_time) + } else if a.header.version_type != b.header.version_type { + a.header.version_type.cmp(&b.header.version_type) + } else if a.header.version_id != b.header.version_id { + a.header.version_id.cmp(&b.header.version_id) + } else if a.header.flags != b.header.flags { + a.header.flags.cmp(&b.header.flags) + } else { + a.cmp(b) + } + }); + Ok(()) + } + + // 添加版本 + #[tracing::instrument(level = "debug", skip_all)] + pub fn add_version(&mut self, fi: FileInfo) -> Result<()> { + let vid = fi.version_id; + + if let Some(ref data) = fi.data { + let key = vid.unwrap_or_default().to_string(); + self.data.replace(&key, data.clone())?; + } + + let version = FileMetaVersion::from(fi); + + if !version.valid() { + return Err(Error::msg("file meta version invalid")); + } + + // should replace + for (idx, ver) in self.versions.iter().enumerate() { + if ver.header.version_id != vid { + continue; + } + + return self.set_idx(idx, version); + } + + // TODO: version count limit ! + + let mod_time = version.get_mod_time(); + + // puth a -1 mod time value , so we can relplace this + self.versions.push(FileMetaShallowVersion { + header: FileMetaVersionHeader { + mod_time: Some(OffsetDateTime::from_unix_timestamp(-1)?), + ..Default::default() + }, + ..Default::default() + }); + + for (idx, exist) in self.versions.iter().enumerate() { + if let Some(ref ex_mt) = exist.header.mod_time { + if let Some(ref in_md) = mod_time { + if ex_mt <= in_md { + // insert + self.versions.insert(idx, FileMetaShallowVersion::try_from(version)?); + self.versions.pop(); + return Ok(()); + } + } + } + } + + Err(Error::msg("add_version failed")) + } + + // delete_version 删除版本,返回 data_dir + pub fn delete_version(&mut self, fi: &FileInfo) -> Result> { + let mut ventry = FileMetaVersion::default(); + if fi.deleted { + ventry.version_type = VersionType::Delete; + ventry.delete_marker = Some(MetaDeleteMarker { + version_id: fi.version_id, + mod_time: fi.mod_time, + ..Default::default() + }); + + if !fi.is_valid() { + return Err(Error::msg("invalid file meta version")); + } + } + + for (i, ver) in self.versions.iter().enumerate() { + if ver.header.version_id != fi.version_id { + continue; + } + + return match ver.header.version_type { + VersionType::Invalid => Err(Error::msg("invalid file meta version")), + VersionType::Delete => Ok(None), + VersionType::Object => { + let v = self.get_idx(i)?; + + self.versions.remove(i); + + let a = v.object.map(|v| v.data_dir).unwrap_or_default(); + Ok(a) + } + }; + } + + Err(Error::new(DiskError::FileVersionNotFound)) + } + + // read_data fill fi.dada + #[tracing::instrument(level = "debug", skip(self))] + pub fn into_fileinfo( + &self, + volume: &str, + path: &str, + version_id: &str, + read_data: bool, + all_parts: bool, + ) -> Result { + let has_vid = { + if !version_id.is_empty() { + let id = Uuid::parse_str(version_id)?; + if !id.is_nil() { Some(id) } else { None } + } else { + None + } + }; + + let mut is_latest = true; + let mut succ_mod_time = None; + for ver in self.versions.iter() { + let header = &ver.header; + + if let Some(vid) = has_vid { + if header.version_id != Some(vid) { + is_latest = false; + succ_mod_time = header.mod_time; + continue; + } + } + + let mut fi = ver.to_fileinfo(volume, path, has_vid, all_parts)?; + fi.is_latest = is_latest; + if let Some(_d) = succ_mod_time { + fi.successor_mod_time = succ_mod_time; + } + if read_data { + fi.data = self.data.find(fi.version_id.unwrap_or_default().to_string().as_str())?; + } + + fi.num_versions = self.versions.len(); + + return Ok(fi); + } + + if has_vid.is_none() { + Err(Error::from(DiskError::FileNotFound)) + } else { + Err(Error::from(DiskError::FileVersionNotFound)) + } + } + + #[tracing::instrument(level = "debug", skip(self))] + pub fn into_file_info_versions(&self, volume: &str, path: &str, all_parts: bool) -> Result { + let mut versions = Vec::new(); + for version in self.versions.iter() { + let mut file_version = FileMetaVersion::default(); + file_version.unmarshal_msg(&version.meta)?; + let fi = file_version.to_fileinfo(volume, path, None, all_parts); + versions.push(fi); + } + + Ok(FileInfoVersions { + volume: volume.to_string(), + name: path.to_string(), + latest_mod_time: versions[0].mod_time, + versions, + ..Default::default() + }) + } + + pub fn lastest_mod_time(&self) -> Option { + if self.versions.is_empty() { + return None; + } + + self.versions.first().unwrap().header.mod_time + } +} + +// impl Display for FileMeta { // fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { -// match self { -// ErasureAlgo::Invalid => write!(f, "Invalid"), -// ErasureAlgo::ReedSolomon => write!(f, "{}", ERASURE_ALGORITHM), -// } -// } -// } - -// #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Default, Clone)] -// pub enum ChecksumAlgo { -// #[default] -// Invalid = 0, -// HighwayHash = 1, -// } - -// impl ChecksumAlgo { -// pub fn valid(&self) -> bool { -// *self > ChecksumAlgo::Invalid -// } -// pub fn to_u8(&self) -> u8 { -// match self { -// ChecksumAlgo::Invalid => 0, -// ChecksumAlgo::HighwayHash => 1, -// } -// } -// pub fn from_u8(u: u8) -> Self { -// match u { -// 1 => ChecksumAlgo::HighwayHash, -// _ => ChecksumAlgo::Invalid, -// } -// } -// } - -// #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Default, Clone)] -// pub enum Flags { -// #[default] -// FreeVersion = 1 << 0, -// UsesDataDir = 1 << 1, -// InlineData = 1 << 2, -// } - -// const FREE_VERSION_META_HEADER: &str = "free-version"; - -// // mergeXLV2Versions -// pub fn merge_file_meta_versions( -// mut quorum: usize, -// mut strict: bool, -// requested_versions: usize, -// versions: &[Vec], -// ) -> Vec { -// if quorum == 0 { -// quorum = 1; -// } - -// if versions.len() < quorum || versions.is_empty() { -// return Vec::new(); -// } - -// if versions.len() == 1 { -// return versions[0].clone(); -// } - -// if quorum == 1 { -// strict = true; -// } - -// let mut versions = versions.to_owned(); - -// let mut n_versions = 0; - -// let mut merged = Vec::new(); -// loop { -// let mut tops = Vec::new(); -// let mut top_sig = FileMetaVersionHeader::default(); -// let mut consistent = true; -// for vers in versions.iter() { -// if vers.is_empty() { -// consistent = false; -// continue; -// } -// if tops.is_empty() { -// consistent = true; -// top_sig = vers[0].header.clone(); -// } else { -// consistent = consistent && vers[0].header == top_sig; -// } -// tops.push(vers[0].clone()); +// f.write_str("FileMeta:")?; +// for (i, ver) in self.versions.iter().enumerate() { +// let mut meta = FileMetaVersion::default(); +// meta.unmarshal_msg(&ver.meta).unwrap_or_default(); +// f.write_fmt(format_args!("ver:{} header {:?}, meta {:?}", i, ver.header, meta))?; // } -// // check if done... -// if tops.len() < quorum { -// break; -// } - -// let mut latest = FileMetaShallowVersion::default(); -// if consistent { -// merged.push(tops[0].clone()); -// if tops[0].header.free_version() { -// n_versions += 1; -// } -// } else { -// let mut lastest_count = 0; -// for (i, ver) in tops.iter().enumerate() { -// if ver.header == latest.header { -// lastest_count += 1; -// continue; -// } - -// if i == 0 || ver.header.sorts_before(&latest.header) { -// if i == 0 || lastest_count == 0 { -// lastest_count = 1; -// } else if !strict && ver.header.matches_not_strict(&latest.header) { -// lastest_count += 1; -// } else { -// lastest_count = 1; -// } -// latest = ver.clone(); -// continue; -// } - -// // Mismatch, but older. -// if lastest_count > 0 && !strict && ver.header.matches_not_strict(&latest.header) { -// lastest_count += 1; -// continue; -// } - -// if lastest_count > 0 && ver.header.version_id == latest.header.version_id { -// let mut x: HashMap = HashMap::new(); -// for a in tops.iter() { -// if a.header.version_id != ver.header.version_id { -// continue; -// } -// let mut a_clone = a.clone(); -// if !strict { -// a_clone.header.signature = [0; 4]; -// } -// *x.entry(a_clone.header).or_insert(1) += 1; -// } -// lastest_count = 0; -// for (k, v) in x.iter() { -// if *v < lastest_count { -// continue; -// } -// if *v == lastest_count && latest.header.sorts_before(k) { -// continue; -// } -// tops.iter().for_each(|a| { -// let mut hdr = a.header.clone(); -// if !strict { -// hdr.signature = [0; 4]; -// } -// if hdr == *k { -// latest = a.clone(); -// } -// }); - -// lastest_count = *v; -// } -// break; -// } -// } -// if lastest_count >= quorum { -// if !latest.header.free_version() { -// n_versions += 1; -// } -// merged.push(latest.clone()); -// } -// } - -// // Remove from all streams up until latest modtime or if selected. -// versions.iter_mut().for_each(|vers| { -// // // Keep top entry (and remaining)... -// let mut bre = false; -// vers.retain(|ver| { -// if bre { -// return true; -// } -// if let Ordering::Greater = ver.header.mod_time.cmp(&latest.header.mod_time) { -// bre = true; -// return false; -// } -// if ver.header == latest.header { -// bre = true; -// return false; -// } -// if let Ordering::Equal = latest.header.version_id.cmp(&ver.header.version_id) { -// bre = true; -// return false; -// } -// for merged_v in merged.iter() { -// if let Ordering::Equal = ver.header.version_id.cmp(&merged_v.header.version_id) { -// bre = true; -// return false; -// } -// } -// true -// }); -// }); -// if requested_versions > 0 && requested_versions == n_versions { -// merged.append(&mut versions[0]); -// break; -// } -// } - -// // Sanity check. Enable if duplicates show up. -// // todo -// merged -// } - -// pub async fn file_info_from_raw(ri: RawFileInfo, bucket: &str, object: &str, read_data: bool) -> Result { -// get_file_info(&ri.buf, bucket, object, "", FileInfoOpts { data: read_data }).await -// } - -// pub struct FileInfoOpts { -// pub data: bool, -// } - -// pub async fn get_file_info(buf: &[u8], volume: &str, path: &str, version_id: &str, opts: FileInfoOpts) -> Result { -// let vid = { -// if version_id.is_empty() { -// None -// } else { -// Some(Uuid::parse_str(version_id)?) -// } -// }; - -// let meta = FileMeta::load(buf)?; -// if meta.versions.is_empty() { -// return Ok(FileInfo { -// volume: volume.to_owned(), -// name: path.to_owned(), -// version_id: vid, -// is_latest: true, -// deleted: true, -// mod_time: Some(OffsetDateTime::from_unix_timestamp(1)?), -// ..Default::default() -// }); -// } - -// let fi = meta.into_fileinfo(volume, path, version_id, opts.data, true)?; -// Ok(fi) -// } - -// async fn read_more( -// reader: &mut R, -// buf: &mut Vec, -// total_size: usize, -// read_size: usize, -// has_full: bool, -// ) -> Result<()> { -// use tokio::io::AsyncReadExt; -// let has = buf.len(); - -// if has >= read_size { -// return Ok(()); -// } - -// if has_full || read_size > total_size { -// return Err(Error::new(io::Error::new(io::ErrorKind::UnexpectedEof, "Unexpected EOF"))); -// } - -// let extra = read_size - has; -// if buf.capacity() >= read_size { -// // Extend the buffer if we have enough space. -// buf.resize(read_size, 0); -// } else { -// buf.extend(vec![0u8; extra]); -// } - -// reader.read_exact(&mut buf[has..]).await?; -// Ok(()) -// } - -// pub async fn read_xl_meta_no_data(reader: &mut R, size: usize) -> Result> { -// use tokio::io::AsyncReadExt; - -// let mut initial = size; -// let mut has_full = true; - -// if initial > META_DATA_READ_DEFAULT { -// initial = META_DATA_READ_DEFAULT; -// has_full = false; -// } - -// let mut buf = vec![0u8; initial]; -// reader.read_exact(&mut buf).await?; - -// let (tmp_buf, major, minor) = FileMeta::check_xl2_v1(&buf)?; - -// match major { -// 1 => match minor { -// 0 => { -// read_more(reader, &mut buf, size, size, has_full).await?; -// Ok(buf) -// } -// 1..=3 => { -// let (sz, tmp_buf) = FileMeta::read_bytes_header(tmp_buf)?; -// let mut want = sz as usize + (buf.len() - tmp_buf.len()); - -// if minor < 2 { -// read_more(reader, &mut buf, size, want, has_full).await?; -// return Ok(buf[..want].to_vec()); -// } - -// let want_max = usize::min(want + MSGP_UINT32_SIZE, size); -// read_more(reader, &mut buf, size, want_max, has_full).await?; - -// if buf.len() < want { -// error!("read_xl_meta_no_data buffer too small (length: {}, needed: {})", &buf.len(), want); -// return Err(Error::new(DiskError::FileCorrupt)); -// } - -// let tmp = &buf[want..]; -// let crc_size = 5; -// let other_size = tmp.len() - crc_size; - -// want += tmp.len() - other_size; - -// Ok(buf[..want].to_vec()) -// } -// _ => Err(Error::new(io::Error::new(io::ErrorKind::InvalidData, "Unknown minor metadata version"))), -// }, -// _ => Err(Error::new(io::Error::new(io::ErrorKind::InvalidData, "Unknown major metadata version"))), -// } -// } -// #[cfg(test)] -// #[allow(clippy::field_reassign_with_default)] -// mod test { -// use super::*; - -// #[test] -// fn test_new_file_meta() { -// let mut fm = FileMeta::new(); - -// let (m, n) = (3, 2); - -// for i in 0..5 { -// let mut fi = FileInfo::new(i.to_string().as_str(), m, n); -// fi.mod_time = Some(OffsetDateTime::now_utc()); - -// fm.add_version(fi).unwrap(); -// } - -// let buff = fm.marshal_msg().unwrap(); - -// let mut newfm = FileMeta::default(); -// newfm.unmarshal_msg(&buff).unwrap(); - -// assert_eq!(fm, newfm) -// } - -// #[test] -// fn test_marshal_metaobject() { -// let obj = MetaObject { -// data_dir: Some(Uuid::new_v4()), -// ..Default::default() -// }; - -// // println!("obj {:?}", &obj); - -// let encoded = obj.marshal_msg().unwrap(); - -// let mut obj2 = MetaObject::default(); -// obj2.unmarshal_msg(&encoded).unwrap(); - -// // println!("obj2 {:?}", &obj2); - -// assert_eq!(obj, obj2); -// assert_eq!(obj.data_dir, obj2.data_dir); -// } - -// #[test] -// fn test_marshal_metadeletemarker() { -// let obj = MetaDeleteMarker { -// version_id: Some(Uuid::new_v4()), -// ..Default::default() -// }; - -// // println!("obj {:?}", &obj); - -// let encoded = obj.marshal_msg().unwrap(); - -// let mut obj2 = MetaDeleteMarker::default(); -// obj2.unmarshal_msg(&encoded).unwrap(); - -// // println!("obj2 {:?}", &obj2); - -// assert_eq!(obj, obj2); -// assert_eq!(obj.version_id, obj2.version_id); -// } - -// #[test] -// #[tracing::instrument] -// fn test_marshal_metaversion() { -// let mut fi = FileInfo::new("test", 3, 2); -// fi.version_id = Some(Uuid::new_v4()); -// fi.mod_time = Some(OffsetDateTime::from_unix_timestamp(OffsetDateTime::now_utc().unix_timestamp()).unwrap()); -// let mut obj = FileMetaVersion::from(fi); -// obj.write_version = 110; - -// // println!("obj {:?}", &obj); - -// let encoded = obj.marshal_msg().unwrap(); - -// let mut obj2 = FileMetaVersion::default(); -// obj2.unmarshal_msg(&encoded).unwrap(); - -// // println!("obj2 {:?}", &obj2); - -// // 时间截不一致 - - -// assert_eq!(obj, obj2); -// assert_eq!(obj.get_version_id(), obj2.get_version_id()); -// assert_eq!(obj.write_version, obj2.write_version); -// assert_eq!(obj.write_version, 110); -// } - -// #[test] -// #[tracing::instrument] -// fn test_marshal_metaversionheader() { -// let mut obj = FileMetaVersionHeader::default(); -// let vid = Some(Uuid::new_v4()); -// obj.version_id = vid; - -// let encoded = obj.marshal_msg().unwrap(); - -// let mut obj2 = FileMetaVersionHeader::default(); -// obj2.unmarshal_msg(&encoded).unwrap(); - -// // 时间截不一致 - - -// assert_eq!(obj, obj2); -// assert_eq!(obj.version_id, obj2.version_id); -// assert_eq!(obj.version_id, vid); -// } - -// // New comprehensive tests for utility functions and validation - -// #[test] -// fn test_xl_file_header_constants() { -// // Test XL file header constants -// assert_eq!(XL_FILE_HEADER, [b'X', b'L', b'2', b' ']); -// assert_eq!(XL_FILE_VERSION_MAJOR, 1); -// assert_eq!(XL_FILE_VERSION_MINOR, 3); -// assert_eq!(XL_HEADER_VERSION, 3); -// assert_eq!(XL_META_VERSION, 2); -// } - -// #[test] -// fn test_is_xl2_v1_format() { -// // Test valid XL2 V1 format -// let mut valid_buf = vec![0u8; 20]; -// valid_buf[0..4].copy_from_slice(&XL_FILE_HEADER); -// byteorder::LittleEndian::write_u16(&mut valid_buf[4..6], 1); -// byteorder::LittleEndian::write_u16(&mut valid_buf[6..8], 0); - -// assert!(FileMeta::is_xl2_v1_format(&valid_buf)); - -// // Test invalid format - wrong header -// let invalid_buf = vec![0u8; 20]; -// assert!(!FileMeta::is_xl2_v1_format(&invalid_buf)); - -// // Test buffer too small -// let small_buf = vec![0u8; 4]; -// assert!(!FileMeta::is_xl2_v1_format(&small_buf)); -// } - -// #[test] -// fn test_check_xl2_v1() { -// // Test valid XL2 V1 check -// let mut valid_buf = vec![0u8; 20]; -// valid_buf[0..4].copy_from_slice(&XL_FILE_HEADER); -// byteorder::LittleEndian::write_u16(&mut valid_buf[4..6], 1); -// byteorder::LittleEndian::write_u16(&mut valid_buf[6..8], 2); - -// let result = FileMeta::check_xl2_v1(&valid_buf); -// assert!(result.is_ok()); -// let (remaining, major, minor) = result.unwrap(); -// assert_eq!(major, 1); -// assert_eq!(minor, 2); -// assert_eq!(remaining.len(), 12); // 20 - 8 - -// // Test buffer too small -// let small_buf = vec![0u8; 4]; -// assert!(FileMeta::check_xl2_v1(&small_buf).is_err()); - -// // Test wrong header -// let mut wrong_header = vec![0u8; 20]; -// wrong_header[0..4].copy_from_slice(b"ABCD"); -// assert!(FileMeta::check_xl2_v1(&wrong_header).is_err()); - -// // Test version too high -// let mut high_version = vec![0u8; 20]; -// high_version[0..4].copy_from_slice(&XL_FILE_HEADER); -// byteorder::LittleEndian::write_u16(&mut high_version[4..6], 99); -// byteorder::LittleEndian::write_u16(&mut high_version[6..8], 0); -// assert!(FileMeta::check_xl2_v1(&high_version).is_err()); -// } - -// #[test] -// fn test_version_type_enum() { -// // Test VersionType enum methods -// assert!(VersionType::Object.valid()); -// assert!(VersionType::Delete.valid()); -// assert!(!VersionType::Invalid.valid()); - -// assert_eq!(VersionType::Object.to_u8(), 1); -// assert_eq!(VersionType::Delete.to_u8(), 2); -// assert_eq!(VersionType::Invalid.to_u8(), 0); - -// assert_eq!(VersionType::from_u8(1), VersionType::Object); -// assert_eq!(VersionType::from_u8(2), VersionType::Delete); -// assert_eq!(VersionType::from_u8(99), VersionType::Invalid); -// } - -// #[test] -// fn test_erasure_algo_enum() { -// // Test ErasureAlgo enum methods -// assert!(ErasureAlgo::ReedSolomon.valid()); -// assert!(!ErasureAlgo::Invalid.valid()); - -// assert_eq!(ErasureAlgo::ReedSolomon.to_u8(), 1); -// assert_eq!(ErasureAlgo::Invalid.to_u8(), 0); - -// assert_eq!(ErasureAlgo::from_u8(1), ErasureAlgo::ReedSolomon); -// assert_eq!(ErasureAlgo::from_u8(99), ErasureAlgo::Invalid); - -// // Test Display trait -// assert_eq!(format!("{}", ErasureAlgo::ReedSolomon), "rs-vandermonde"); -// assert_eq!(format!("{}", ErasureAlgo::Invalid), "Invalid"); -// } - -// #[test] -// fn test_checksum_algo_enum() { -// // Test ChecksumAlgo enum methods -// assert!(ChecksumAlgo::HighwayHash.valid()); -// assert!(!ChecksumAlgo::Invalid.valid()); - -// assert_eq!(ChecksumAlgo::HighwayHash.to_u8(), 1); -// assert_eq!(ChecksumAlgo::Invalid.to_u8(), 0); - -// assert_eq!(ChecksumAlgo::from_u8(1), ChecksumAlgo::HighwayHash); -// assert_eq!(ChecksumAlgo::from_u8(99), ChecksumAlgo::Invalid); -// } - -// #[test] -// fn test_file_meta_version_header_methods() { -// let mut header = FileMetaVersionHeader { -// ec_n: 4, -// ec_m: 2, -// flags: XL_FLAG_FREE_VERSION, -// ..Default::default() -// }; - -// // Test has_ec -// assert!(header.has_ec()); - -// // Test free_version -// assert!(header.free_version()); - -// // Test user_data_dir (should be false by default) -// assert!(!header.user_data_dir()); - -// // Test with different flags -// header.flags = 0; -// assert!(!header.free_version()); -// } - -// #[test] -// fn test_file_meta_version_header_comparison() { -// let mut header1 = FileMetaVersionHeader { -// mod_time: Some(OffsetDateTime::from_unix_timestamp(1000).unwrap()), -// version_id: Some(Uuid::new_v4()), -// ..Default::default() -// }; - -// let mut header2 = FileMetaVersionHeader { -// mod_time: Some(OffsetDateTime::from_unix_timestamp(2000).unwrap()), -// version_id: Some(Uuid::new_v4()), -// ..Default::default() -// }; - -// // Test sorts_before - header2 should sort before header1 (newer mod_time) -// assert!(!header1.sorts_before(&header2)); -// assert!(header2.sorts_before(&header1)); - -// // Test matches_not_strict -// let header3 = header1.clone(); -// assert!(header1.matches_not_strict(&header3)); - -// // Test matches_ec -// header1.ec_n = 4; -// header1.ec_m = 2; -// header2.ec_n = 4; -// header2.ec_m = 2; -// assert!(header1.matches_ec(&header2)); - -// header2.ec_n = 6; -// assert!(!header1.matches_ec(&header2)); -// } - -// #[test] -// fn test_file_meta_version_methods() { -// // Test with object version -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(Uuid::new_v4()); -// fi.data_dir = Some(Uuid::new_v4()); -// fi.mod_time = Some(OffsetDateTime::now_utc()); - -// let version = FileMetaVersion::from(fi.clone()); - -// assert!(version.valid()); -// assert_eq!(version.get_version_id(), fi.version_id); -// assert_eq!(version.get_data_dir(), fi.data_dir); -// assert_eq!(version.get_mod_time(), fi.mod_time); -// assert!(!version.free_version()); - -// // Test with delete marker -// let mut delete_fi = FileInfo::new("test", 4, 2); -// delete_fi.deleted = true; -// delete_fi.version_id = Some(Uuid::new_v4()); -// delete_fi.mod_time = Some(OffsetDateTime::now_utc()); - -// let delete_version = FileMetaVersion::from(delete_fi); -// assert!(delete_version.valid()); -// assert_eq!(delete_version.version_type, VersionType::Delete); -// } - -// #[test] -// fn test_meta_object_methods() { -// let mut obj = MetaObject { -// data_dir: Some(Uuid::new_v4()), -// size: 1024, -// ..Default::default() -// }; - -// // Test use_data_dir -// assert!(obj.use_data_dir()); - -// obj.data_dir = None; -// assert!(obj.use_data_dir()); // use_data_dir always returns true - -// // Test use_inlinedata (currently always returns false) -// obj.size = 100; // Small size -// assert!(!obj.use_inlinedata()); - -// obj.size = 100000; // Large size -// assert!(!obj.use_inlinedata()); -// } - -// #[test] -// fn test_meta_delete_marker_methods() { -// let marker = MetaDeleteMarker::default(); - -// // Test free_version (should always return false for delete markers) -// assert!(!marker.free_version()); -// } - -// #[test] -// fn test_file_meta_latest_mod_time() { -// let mut fm = FileMeta::new(); - -// // Empty FileMeta should return None -// assert!(fm.lastest_mod_time().is_none()); - -// // Add versions with different mod times -// let time1 = OffsetDateTime::from_unix_timestamp(1000).unwrap(); -// let time2 = OffsetDateTime::from_unix_timestamp(2000).unwrap(); -// let time3 = OffsetDateTime::from_unix_timestamp(1500).unwrap(); - -// let mut fi1 = FileInfo::new("test1", 4, 2); -// fi1.mod_time = Some(time1); -// fm.add_version(fi1).unwrap(); - -// let mut fi2 = FileInfo::new("test2", 4, 2); -// fi2.mod_time = Some(time2); -// fm.add_version(fi2).unwrap(); - -// let mut fi3 = FileInfo::new("test3", 4, 2); -// fi3.mod_time = Some(time3); -// fm.add_version(fi3).unwrap(); - -// // Sort first to ensure latest is at the front -// fm.sort_by_mod_time(); - -// // Should return the first version's mod time (lastest_mod_time returns first version's time) -// assert_eq!(fm.lastest_mod_time(), fm.versions[0].header.mod_time); -// } - -// #[test] -// fn test_file_meta_shard_data_dir_count() { -// let mut fm = FileMeta::new(); -// let data_dir = Some(Uuid::new_v4()); - -// // Add versions with same data_dir -// for i in 0..3 { -// let mut fi = FileInfo::new(&format!("test{}", i), 4, 2); -// fi.data_dir = data_dir; -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi).unwrap(); -// } - -// // Add one version with different data_dir -// let mut fi_diff = FileInfo::new("test_diff", 4, 2); -// fi_diff.data_dir = Some(Uuid::new_v4()); -// fi_diff.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi_diff).unwrap(); - -// // Count should be 0 because user_data_dir() requires UsesDataDir flag to be set -// assert_eq!(fm.shard_data_dir_count(&None, &data_dir), 0); - -// // Count should be 0 for non-existent data_dir -// assert_eq!(fm.shard_data_dir_count(&None, &Some(Uuid::new_v4())), 0); -// } - -// #[test] -// fn test_file_meta_sort_by_mod_time() { -// let mut fm = FileMeta::new(); - -// let time1 = OffsetDateTime::from_unix_timestamp(3000).unwrap(); -// let time2 = OffsetDateTime::from_unix_timestamp(1000).unwrap(); -// let time3 = OffsetDateTime::from_unix_timestamp(2000).unwrap(); - -// // Add versions in non-chronological order -// let mut fi1 = FileInfo::new("test1", 4, 2); -// fi1.mod_time = Some(time1); -// fm.add_version(fi1).unwrap(); - -// let mut fi2 = FileInfo::new("test2", 4, 2); -// fi2.mod_time = Some(time2); -// fm.add_version(fi2).unwrap(); - -// let mut fi3 = FileInfo::new("test3", 4, 2); -// fi3.mod_time = Some(time3); -// fm.add_version(fi3).unwrap(); - -// // Sort by mod time -// fm.sort_by_mod_time(); - -// // Verify they are sorted (newest first) - add_version already sorts by insertion -// // The actual order depends on how add_version inserts them -// // Let's check the first version is the latest -// let latest_time = fm.versions.iter().map(|v| v.header.mod_time).max().flatten(); -// assert_eq!(fm.versions[0].header.mod_time, latest_time); -// } - -// #[test] -// fn test_file_meta_find_version() { -// let mut fm = FileMeta::new(); -// let version_id = Some(Uuid::new_v4()); - -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = version_id; -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi).unwrap(); - -// // Should find the version -// let result = fm.find_version(version_id); -// assert!(result.is_ok()); -// let (idx, version) = result.unwrap(); -// assert_eq!(idx, 0); -// assert_eq!(version.get_version_id(), version_id); - -// // Should not find non-existent version -// let non_existent_id = Some(Uuid::new_v4()); -// assert!(fm.find_version(non_existent_id).is_err()); -// } - -// #[test] -// fn test_file_meta_delete_version() { -// let mut fm = FileMeta::new(); -// let version_id = Some(Uuid::new_v4()); - -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = version_id; -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi.clone()).unwrap(); - -// assert_eq!(fm.versions.len(), 1); - -// // Delete the version -// let result = fm.delete_version(&fi); -// assert!(result.is_ok()); - -// // Version should be removed -// assert_eq!(fm.versions.len(), 0); -// } - -// #[test] -// fn test_file_meta_update_object_version() { -// let mut fm = FileMeta::new(); -// let version_id = Some(Uuid::new_v4()); - -// // Add initial version -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = version_id; -// fi.size = 1024; -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi.clone()).unwrap(); - -// // Update with new metadata (size is not updated by update_object_version) -// let mut metadata = HashMap::new(); -// metadata.insert("test-key".to_string(), "test-value".to_string()); -// fi.metadata = Some(metadata.clone()); -// let result = fm.update_object_version(fi); -// assert!(result.is_ok()); - -// // Verify the metadata was updated -// let (_, updated_version) = fm.find_version(version_id).unwrap(); -// if let Some(obj) = updated_version.object { -// assert_eq!(obj.size, 1024); // Size remains unchanged -// assert_eq!(obj.meta_user, Some(metadata)); // Metadata is updated -// } else { -// panic!("Expected object version"); -// } -// } - -// #[test] -// fn test_file_info_opts() { -// let opts = FileInfoOpts { data: true }; -// assert!(opts.data); - -// let opts_no_data = FileInfoOpts { data: false }; -// assert!(!opts_no_data.data); -// } - -// #[test] -// fn test_decode_data_dir_from_meta() { -// // Test with valid metadata containing data_dir -// let data_dir = Some(Uuid::new_v4()); -// let obj = MetaObject { -// data_dir, -// mod_time: Some(OffsetDateTime::now_utc()), -// erasure_algorithm: ErasureAlgo::ReedSolomon, -// bitrot_checksum_algo: ChecksumAlgo::HighwayHash, -// ..Default::default() -// }; - -// // Create a valid FileMetaVersion with the object -// let version = FileMetaVersion { -// version_type: VersionType::Object, -// object: Some(obj), -// ..Default::default() -// }; - -// let encoded = version.marshal_msg().unwrap(); -// let result = FileMetaVersion::decode_data_dir_from_meta(&encoded); -// assert!(result.is_ok()); -// assert_eq!(result.unwrap(), data_dir); - -// // Test with invalid metadata -// let invalid_data = vec![0u8; 10]; -// let result = FileMetaVersion::decode_data_dir_from_meta(&invalid_data); -// assert!(result.is_err()); -// } - -// #[test] -// fn test_is_latest_delete_marker() { -// // Test the is_latest_delete_marker function with simple data -// // Since the function is complex and requires specific XL format, -// // we'll test with empty data which should return false -// let empty_data = vec![]; -// assert!(!FileMeta::is_latest_delete_marker(&empty_data)); - -// // Test with invalid data -// let invalid_data = vec![1, 2, 3, 4, 5]; -// assert!(!FileMeta::is_latest_delete_marker(&invalid_data)); -// } - -// #[test] -// fn test_merge_file_meta_versions_basic() { -// // Test basic merge functionality -// let mut version1 = FileMetaShallowVersion::default(); -// version1.header.version_id = Some(Uuid::new_v4()); -// version1.header.mod_time = Some(OffsetDateTime::from_unix_timestamp(1000).unwrap()); - -// let mut version2 = FileMetaShallowVersion::default(); -// version2.header.version_id = Some(Uuid::new_v4()); -// version2.header.mod_time = Some(OffsetDateTime::from_unix_timestamp(2000).unwrap()); - -// let versions = vec![ -// vec![version1.clone(), version2.clone()], -// vec![version1.clone()], -// vec![version2.clone()], -// ]; - -// let merged = merge_file_meta_versions(2, false, 10, &versions); - -// // Should return versions that appear in at least quorum (2) sources -// assert!(!merged.is_empty()); +// f.write_str("\n") // } // } -// #[tokio::test] -// async fn test_read_xl_meta_no_data() { -// use tokio::fs; -// use tokio::fs::File; -// use tokio::io::AsyncWriteExt; - -// let mut fm = FileMeta::new(); - -// let (m, n) = (3, 2); - -// for i in 0..5 { -// let mut fi = FileInfo::new(i.to_string().as_str(), m, n); -// fi.mod_time = Some(OffsetDateTime::now_utc()); - -// fm.add_version(fi).unwrap(); -// } - -// // Use marshal_msg to create properly formatted data with XL headers -// let buff = fm.marshal_msg().unwrap(); - -// let filepath = "./test_xl.meta"; - -// let mut file = File::create(filepath).await.unwrap(); -// file.write_all(&buff).await.unwrap(); - -// let mut f = File::open(filepath).await.unwrap(); - -// let stat = f.metadata().await.unwrap(); - -// let data = read_xl_meta_no_data(&mut f, stat.len() as usize).await.unwrap(); - -// let mut newfm = FileMeta::default(); -// newfm.unmarshal_msg(&data).unwrap(); - -// fs::remove_file(filepath).await.unwrap(); - -// assert_eq!(fm, newfm) -// } - -// #[tokio::test] -// async fn test_get_file_info() { -// // Test get_file_info function -// let mut fm = FileMeta::new(); -// let version_id = Uuid::new_v4(); - -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(version_id); -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi).unwrap(); - -// let encoded = fm.marshal_msg().unwrap(); - -// let opts = FileInfoOpts { data: false }; -// let result = get_file_info(&encoded, "test-volume", "test-path", &version_id.to_string(), opts).await; - -// assert!(result.is_ok()); -// let file_info = result.unwrap(); -// assert_eq!(file_info.volume, "test-volume"); -// assert_eq!(file_info.name, "test-path"); -// } - -// #[tokio::test] -// async fn test_file_info_from_raw() { -// // Test file_info_from_raw function -// let mut fm = FileMeta::new(); -// let mut fi = FileInfo::new("test", 4, 2); -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi).unwrap(); - -// let encoded = fm.marshal_msg().unwrap(); - -// let raw_info = RawFileInfo { buf: encoded }; - -// let result = file_info_from_raw(raw_info, "test-bucket", "test-object", false).await; -// assert!(result.is_ok()); - -// let file_info = result.unwrap(); -// assert_eq!(file_info.volume, "test-bucket"); -// assert_eq!(file_info.name, "test-object"); -// } - -// // Additional comprehensive tests for better coverage - -// #[test] -// fn test_file_meta_load_function() { -// // Test FileMeta::load function -// let mut fm = FileMeta::new(); -// let mut fi = FileInfo::new("test", 4, 2); -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi).unwrap(); - -// let encoded = fm.marshal_msg().unwrap(); - -// // Test successful load -// let loaded_fm = FileMeta::load(&encoded); -// assert!(loaded_fm.is_ok()); -// assert_eq!(loaded_fm.unwrap(), fm); - -// // Test load with invalid data -// let invalid_data = vec![0u8; 10]; -// let result = FileMeta::load(&invalid_data); -// assert!(result.is_err()); -// } - -// #[test] -// fn test_file_meta_read_bytes_header() { -// // Create a real FileMeta and marshal it to get proper format -// let mut fm = FileMeta::new(); -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(Uuid::new_v4()); -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi).unwrap(); - -// let marshaled = fm.marshal_msg().unwrap(); - -// // First call check_xl2_v1 to get the buffer after XL header validation -// let (after_xl_header, _major, _minor) = FileMeta::check_xl2_v1(&marshaled).unwrap(); - -// // Ensure we have at least 5 bytes for read_bytes_header -// if after_xl_header.len() < 5 { -// panic!("Buffer too small: {} bytes, need at least 5", after_xl_header.len()); -// } - -// // Now call read_bytes_header on the remaining buffer -// let result = FileMeta::read_bytes_header(after_xl_header); -// assert!(result.is_ok()); -// let (length, remaining) = result.unwrap(); - -// // The length should be greater than 0 for real data -// assert!(length > 0); -// // remaining should be everything after the 5-byte header -// assert_eq!(remaining.len(), after_xl_header.len() - 5); - -// // Test with buffer too small -// let small_buf = vec![0u8; 2]; -// let result = FileMeta::read_bytes_header(&small_buf); -// assert!(result.is_err()); -// } - -// #[test] -// fn test_file_meta_get_set_idx() { -// let mut fm = FileMeta::new(); -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(Uuid::new_v4()); -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi).unwrap(); - -// // Test get_idx -// let result = fm.get_idx(0); -// assert!(result.is_ok()); - -// // Test get_idx with invalid index -// let result = fm.get_idx(10); -// assert!(result.is_err()); - -// // Test set_idx -// let new_version = FileMetaVersion { -// version_type: VersionType::Object, -// ..Default::default() -// }; -// let result = fm.set_idx(0, new_version); -// assert!(result.is_ok()); - -// // Test set_idx with invalid index -// let invalid_version = FileMetaVersion::default(); -// let result = fm.set_idx(10, invalid_version); -// assert!(result.is_err()); -// } - -// #[test] -// fn test_file_meta_into_fileinfo() { -// let mut fm = FileMeta::new(); -// let version_id = Uuid::new_v4(); -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(version_id); -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi).unwrap(); - -// // Test into_fileinfo with valid version_id -// let result = fm.into_fileinfo("test-volume", "test-path", &version_id.to_string(), false, false); -// assert!(result.is_ok()); -// let file_info = result.unwrap(); -// assert_eq!(file_info.volume, "test-volume"); -// assert_eq!(file_info.name, "test-path"); - -// // Test into_fileinfo with invalid version_id -// let invalid_id = Uuid::new_v4(); -// let result = fm.into_fileinfo("test-volume", "test-path", &invalid_id.to_string(), false, false); -// assert!(result.is_err()); - -// // Test into_fileinfo with empty version_id (should get latest) -// let result = fm.into_fileinfo("test-volume", "test-path", "", false, false); -// assert!(result.is_ok()); -// } - -// #[test] -// fn test_file_meta_into_file_info_versions() { -// let mut fm = FileMeta::new(); - -// // Add multiple versions -// for i in 0..3 { -// let mut fi = FileInfo::new(&format!("test{}", i), 4, 2); -// fi.version_id = Some(Uuid::new_v4()); -// fi.mod_time = Some(OffsetDateTime::from_unix_timestamp(1000 + i).unwrap()); -// fm.add_version(fi).unwrap(); -// } - -// let result = fm.into_file_info_versions("test-volume", "test-path", false); -// assert!(result.is_ok()); -// let versions = result.unwrap(); -// assert_eq!(versions.versions.len(), 3); -// } - -// #[test] -// fn test_file_meta_shallow_version_to_fileinfo() { -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(Uuid::new_v4()); -// fi.mod_time = Some(OffsetDateTime::now_utc()); - -// let version = FileMetaVersion::from(fi.clone()); -// let shallow_version = FileMetaShallowVersion::try_from(version).unwrap(); - -// let result = shallow_version.to_fileinfo("test-volume", "test-path", fi.version_id, false); -// assert!(result.is_ok()); -// let converted_fi = result.unwrap(); -// assert_eq!(converted_fi.volume, "test-volume"); -// assert_eq!(converted_fi.name, "test-path"); -// } - -// #[test] -// fn test_file_meta_version_try_from_bytes() { -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(Uuid::new_v4()); -// let version = FileMetaVersion::from(fi); -// let encoded = version.marshal_msg().unwrap(); - -// // Test successful conversion -// let result = FileMetaVersion::try_from(encoded.as_slice()); -// assert!(result.is_ok()); - -// // Test with invalid data -// let invalid_data = vec![0u8; 5]; -// let result = FileMetaVersion::try_from(invalid_data.as_slice()); -// assert!(result.is_err()); -// } - -// #[test] -// fn test_file_meta_version_try_from_shallow() { -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(Uuid::new_v4()); -// let version = FileMetaVersion::from(fi); -// let shallow = FileMetaShallowVersion::try_from(version.clone()).unwrap(); - -// let result = FileMetaVersion::try_from(shallow); -// assert!(result.is_ok()); -// let converted = result.unwrap(); -// assert_eq!(converted.get_version_id(), version.get_version_id()); -// } - -// #[test] -// fn test_file_meta_version_header_from_version() { -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(Uuid::new_v4()); -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// let version = FileMetaVersion::from(fi.clone()); - -// let header = FileMetaVersionHeader::from(version); -// assert_eq!(header.version_id, fi.version_id); -// assert_eq!(header.mod_time, fi.mod_time); -// } - -// #[test] -// fn test_meta_object_into_fileinfo() { -// let obj = MetaObject { -// version_id: Some(Uuid::new_v4()), -// size: 1024, -// mod_time: Some(OffsetDateTime::now_utc()), -// ..Default::default() -// }; - -// let version_id = obj.version_id; -// let expected_version_id = version_id; -// let file_info = obj.into_fileinfo("test-volume", "test-path", version_id, false); -// assert_eq!(file_info.volume, "test-volume"); -// assert_eq!(file_info.name, "test-path"); -// assert_eq!(file_info.size, 1024); -// assert_eq!(file_info.version_id, expected_version_id); -// } - -// #[test] -// fn test_meta_object_from_fileinfo() { -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(Uuid::new_v4()); -// fi.data_dir = Some(Uuid::new_v4()); -// fi.size = 2048; -// fi.mod_time = Some(OffsetDateTime::now_utc()); - -// let obj = MetaObject::from(fi.clone()); -// assert_eq!(obj.version_id, fi.version_id); -// assert_eq!(obj.data_dir, fi.data_dir); -// assert_eq!(obj.size, fi.size); -// assert_eq!(obj.mod_time, fi.mod_time); -// } - -// #[test] -// fn test_meta_delete_marker_into_fileinfo() { -// let marker = MetaDeleteMarker { -// version_id: Some(Uuid::new_v4()), -// mod_time: Some(OffsetDateTime::now_utc()), -// ..Default::default() -// }; - -// let version_id = marker.version_id; -// let expected_version_id = version_id; -// let file_info = marker.into_fileinfo("test-volume", "test-path", version_id, false); -// assert_eq!(file_info.volume, "test-volume"); -// assert_eq!(file_info.name, "test-path"); -// assert_eq!(file_info.version_id, expected_version_id); -// assert!(file_info.deleted); -// } - -// #[test] -// fn test_meta_delete_marker_from_fileinfo() { -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(Uuid::new_v4()); -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fi.deleted = true; - -// let marker = MetaDeleteMarker::from(fi.clone()); -// assert_eq!(marker.version_id, fi.version_id); -// assert_eq!(marker.mod_time, fi.mod_time); -// } - -// #[test] -// fn test_flags_enum() { -// // Test Flags enum values -// assert_eq!(Flags::FreeVersion as u8, 1); -// assert_eq!(Flags::UsesDataDir as u8, 2); -// assert_eq!(Flags::InlineData as u8, 4); -// } - -// #[test] -// fn test_file_meta_version_header_user_data_dir() { -// let header = FileMetaVersionHeader { -// flags: 0, -// ..Default::default() -// }; - -// // Test without UsesDataDir flag -// assert!(!header.user_data_dir()); - -// // Test with UsesDataDir flag -// let header = FileMetaVersionHeader { -// flags: Flags::UsesDataDir as u8, -// ..Default::default() -// }; -// assert!(header.user_data_dir()); - -// // Test with multiple flags including UsesDataDir -// let header = FileMetaVersionHeader { -// flags: Flags::UsesDataDir as u8 | Flags::FreeVersion as u8, -// ..Default::default() -// }; -// assert!(header.user_data_dir()); -// } - -// #[test] -// fn test_file_meta_version_header_ordering() { -// let header1 = FileMetaVersionHeader { -// mod_time: Some(OffsetDateTime::from_unix_timestamp(1000).unwrap()), -// version_id: Some(Uuid::new_v4()), -// ..Default::default() -// }; - -// let header2 = FileMetaVersionHeader { -// mod_time: Some(OffsetDateTime::from_unix_timestamp(2000).unwrap()), -// version_id: Some(Uuid::new_v4()), -// ..Default::default() -// }; - -// // Test partial_cmp -// assert!(header1.partial_cmp(&header2).is_some()); - -// // Test cmp - header2 should be greater (newer) -// use std::cmp::Ordering; -// assert_eq!(header1.cmp(&header2), Ordering::Less); // header1 has earlier time -// assert_eq!(header2.cmp(&header1), Ordering::Greater); // header2 has later time -// assert_eq!(header1.cmp(&header1), Ordering::Equal); -// } - -// #[test] -// fn test_merge_file_meta_versions_edge_cases() { -// // Test with empty versions -// let empty_versions: Vec> = vec![]; -// let merged = merge_file_meta_versions(1, false, 10, &empty_versions); -// assert!(merged.is_empty()); - -// // Test with quorum larger than available sources -// let mut version = FileMetaShallowVersion::default(); -// version.header.version_id = Some(Uuid::new_v4()); -// let versions = vec![vec![version]]; -// let merged = merge_file_meta_versions(5, false, 10, &versions); -// assert!(merged.is_empty()); - -// // Test strict mode -// let mut version1 = FileMetaShallowVersion::default(); -// version1.header.version_id = Some(Uuid::new_v4()); -// version1.header.mod_time = Some(OffsetDateTime::from_unix_timestamp(1000).unwrap()); - -// let mut version2 = FileMetaShallowVersion::default(); -// version2.header.version_id = Some(Uuid::new_v4()); -// version2.header.mod_time = Some(OffsetDateTime::from_unix_timestamp(2000).unwrap()); - -// let versions = vec![vec![version1.clone()], vec![version2.clone()]]; - -// let _merged_strict = merge_file_meta_versions(1, true, 10, &versions); -// let merged_non_strict = merge_file_meta_versions(1, false, 10, &versions); - -// // In strict mode, behavior might be different -// assert!(!merged_non_strict.is_empty()); -// } - -// #[tokio::test] -// async fn test_read_more_function() { -// use std::io::Cursor; - -// let data = b"Hello, World! This is test data."; -// let mut reader = Cursor::new(data); -// let mut buf = vec![0u8; 10]; - -// // Test reading more data -// let result = read_more(&mut reader, &mut buf, 33, 20, false).await; -// assert!(result.is_ok()); -// assert_eq!(buf.len(), 20); - -// // Test with has_full = true and buffer already has enough data -// let mut reader2 = Cursor::new(data); -// let mut buf2 = vec![0u8; 5]; -// let result = read_more(&mut reader2, &mut buf2, 10, 5, true).await; -// assert!(result.is_ok()); -// assert_eq!(buf2.len(), 5); // Should remain 5 since has >= read_size - -// // Test reading beyond available data -// let mut reader3 = Cursor::new(b"short"); -// let mut buf3 = vec![0u8; 2]; -// let result = read_more(&mut reader3, &mut buf3, 100, 98, false).await; -// // Should handle gracefully even if not enough data -// assert!(result.is_ok() || result.is_err()); // Either is acceptable -// } - -// #[tokio::test] -// async fn test_read_xl_meta_no_data_edge_cases() { -// use std::io::Cursor; - -// // Test with empty data -// let empty_data = vec![]; -// let mut reader = Cursor::new(empty_data); -// let result = read_xl_meta_no_data(&mut reader, 0).await; -// assert!(result.is_err()); // Should fail because buffer is empty - -// // Test with very small size (should fail because it's not valid XL format) -// let small_data = vec![1, 2, 3]; -// let mut reader = Cursor::new(small_data); -// let result = read_xl_meta_no_data(&mut reader, 3).await; -// assert!(result.is_err()); // Should fail because data is too small for XL format -// } - -// #[tokio::test] -// async fn test_get_file_info_edge_cases() { -// // Test with empty buffer -// let empty_buf = vec![]; -// let opts = FileInfoOpts { data: false }; -// let result = get_file_info(&empty_buf, "volume", "path", "version", opts).await; -// assert!(result.is_err()); - -// // Test with invalid version_id format -// let mut fm = FileMeta::new(); -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(Uuid::new_v4()); -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi).unwrap(); -// let encoded = fm.marshal_msg().unwrap(); - -// let opts = FileInfoOpts { data: false }; -// let result = get_file_info(&encoded, "volume", "path", "invalid-uuid", opts).await; -// assert!(result.is_err()); -// } - -// #[tokio::test] -// async fn test_file_info_from_raw_edge_cases() { -// // Test with empty buffer -// let empty_raw = RawFileInfo { buf: vec![] }; -// let result = file_info_from_raw(empty_raw, "bucket", "object", false).await; -// assert!(result.is_err()); - -// // Test with invalid buffer -// let invalid_raw = RawFileInfo { -// buf: vec![1, 2, 3, 4, 5], -// }; -// let result = file_info_from_raw(invalid_raw, "bucket", "object", false).await; -// assert!(result.is_err()); -// } - -// #[test] -// fn test_file_meta_version_invalid_cases() { -// // Test invalid version -// let version = FileMetaVersion { -// version_type: VersionType::Invalid, -// ..Default::default() -// }; -// assert!(!version.valid()); - -// // Test version with neither object nor delete marker -// let version = FileMetaVersion { -// version_type: VersionType::Object, -// object: None, -// delete_marker: None, -// ..Default::default() -// }; -// assert!(!version.valid()); -// } - -// #[test] -// fn test_meta_object_edge_cases() { -// let obj = MetaObject { -// data_dir: None, -// ..Default::default() -// }; - -// // Test use_data_dir with None (use_data_dir always returns true) -// assert!(obj.use_data_dir()); - -// // Test use_inlinedata (always returns false in current implementation) -// let obj = MetaObject { -// size: 128 * 1024, // 128KB threshold -// ..Default::default() -// }; -// assert!(!obj.use_inlinedata()); // Should be false - -// let obj = MetaObject { -// size: 128 * 1024 - 1, -// ..Default::default() -// }; -// assert!(!obj.use_inlinedata()); // Should also be false (always false) -// } - -// #[test] -// fn test_file_meta_version_header_edge_cases() { -// let header = FileMetaVersionHeader { -// ec_n: 0, -// ec_m: 0, -// ..Default::default() -// }; - -// // Test has_ec with zero values -// assert!(!header.has_ec()); - -// // Test matches_not_strict with different signatures but same version_id -// let version_id = Some(Uuid::new_v4()); -// let header = FileMetaVersionHeader { -// version_id, -// version_type: VersionType::Object, -// signature: [1, 2, 3, 4], -// ..Default::default() -// }; -// let other = FileMetaVersionHeader { -// version_id, -// version_type: VersionType::Object, -// signature: [5, 6, 7, 8], -// ..Default::default() -// }; -// // Should match because they have same version_id and type -// assert!(header.matches_not_strict(&other)); - -// // Test sorts_before with same mod_time but different version_id -// let time = OffsetDateTime::from_unix_timestamp(1000).unwrap(); -// let header_time1 = FileMetaVersionHeader { -// mod_time: Some(time), -// version_id: Some(Uuid::new_v4()), -// ..Default::default() -// }; -// let header_time2 = FileMetaVersionHeader { -// mod_time: Some(time), -// version_id: Some(Uuid::new_v4()), -// ..Default::default() -// }; - -// // Should use version_id for comparison when mod_time is same -// let sorts_before = header_time1.sorts_before(&header_time2); -// assert!(sorts_before || header_time2.sorts_before(&header_time1)); // One should sort before the other -// } - -// #[test] -// fn test_file_meta_add_version_edge_cases() { -// let mut fm = FileMeta::new(); - -// // Test adding version with same version_id (should update) -// let version_id = Some(Uuid::new_v4()); -// let mut fi1 = FileInfo::new("test1", 4, 2); -// fi1.version_id = version_id; -// fi1.size = 1024; -// fi1.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi1).unwrap(); - -// let mut fi2 = FileInfo::new("test2", 4, 2); -// fi2.version_id = version_id; -// fi2.size = 2048; -// fi2.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi2).unwrap(); - -// // Should still have only one version, but updated -// assert_eq!(fm.versions.len(), 1); -// let (_, version) = fm.find_version(version_id).unwrap(); -// if let Some(obj) = version.object { -// assert_eq!(obj.size, 2048); // Size gets updated when adding same version_id -// } -// } - -// #[test] -// fn test_file_meta_delete_version_edge_cases() { -// let mut fm = FileMeta::new(); - -// // Test deleting non-existent version -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = Some(Uuid::new_v4()); - -// let result = fm.delete_version(&fi); -// assert!(result.is_err()); // Should fail for non-existent version -// } - -// #[test] -// fn test_file_meta_shard_data_dir_count_edge_cases() { -// let mut fm = FileMeta::new(); - -// // Test with None data_dir parameter -// let count = fm.shard_data_dir_count(&None, &None); -// assert_eq!(count, 0); - -// // Test with version_id parameter (not None) -// let version_id = Some(Uuid::new_v4()); -// let data_dir = Some(Uuid::new_v4()); - -// let mut fi = FileInfo::new("test", 4, 2); -// fi.version_id = version_id; -// fi.data_dir = data_dir; -// fi.mod_time = Some(OffsetDateTime::now_utc()); -// fm.add_version(fi).unwrap(); - -// let count = fm.shard_data_dir_count(&version_id, &data_dir); -// assert_eq!(count, 0); // Should be 0 because user_data_dir() requires flag - -// // Test with different version_id -// let other_version_id = Some(Uuid::new_v4()); -// let count = fm.shard_data_dir_count(&other_version_id, &data_dir); -// assert_eq!(count, 1); // Should be 1 because the version has matching data_dir and user_data_dir() is true -// } +#[derive(Serialize, Deserialize, Debug, Default, PartialEq, Clone, Eq, PartialOrd, Ord)] +pub struct FileMetaShallowVersion { + pub header: FileMetaVersionHeader, + pub meta: Vec, // FileMetaVersion.marshal_msg +} + +impl FileMetaShallowVersion { + pub fn to_fileinfo(&self, volume: &str, path: &str, version_id: Option, all_parts: bool) -> Result { + let file_version = FileMetaVersion::try_from(self.meta.as_slice())?; + + Ok(file_version.to_fileinfo(volume, path, version_id, all_parts)) + } +} + +impl TryFrom for FileMetaShallowVersion { + type Error = Error; + + fn try_from(value: FileMetaVersion) -> std::result::Result { + let header = value.header(); + let meta = value.marshal_msg()?; + Ok(Self { meta, header }) + } +} + +#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)] +pub struct FileMetaVersion { + pub version_type: VersionType, + pub object: Option, + pub delete_marker: Option, + pub write_version: u64, // rustfs version +} + +impl FileMetaVersion { + pub fn valid(&self) -> bool { + if !self.version_type.valid() { + return false; + } + + match self.version_type { + VersionType::Object => self + .object + .as_ref() + .map(|v| v.erasure_algorithm.valid() && v.bitrot_checksum_algo.valid() && v.mod_time.is_some()) + .unwrap_or_default(), + VersionType::Delete => self + .delete_marker + .as_ref() + .map(|v| v.mod_time.unwrap_or(OffsetDateTime::UNIX_EPOCH) > OffsetDateTime::UNIX_EPOCH) + .unwrap_or_default(), + _ => false, + } + } + + pub fn get_data_dir(&self) -> Option { + self.valid() + .then(|| { + if self.version_type == VersionType::Object { + self.object.as_ref().map(|v| v.data_dir).unwrap_or_default() + } else { + None + } + }) + .unwrap_or_default() + } + + pub fn get_version_id(&self) -> Option { + match self.version_type { + VersionType::Object | VersionType::Delete => self.object.as_ref().map(|v| v.version_id).unwrap_or_default(), + _ => None, + } + } + + pub fn get_mod_time(&self) -> Option { + match self.version_type { + VersionType::Object => self.object.as_ref().map(|v| v.mod_time).unwrap_or_default(), + VersionType::Delete => self.delete_marker.as_ref().map(|v| v.mod_time).unwrap_or_default(), + _ => None, + } + } + + // decode_data_dir_from_meta 从 meta 中读取 data_dir TODO: 直接从 meta buf 中只解析出 data_dir, msg.skip + pub fn decode_data_dir_from_meta(buf: &[u8]) -> Result> { + let mut ver = Self::default(); + ver.unmarshal_msg(buf)?; + + let data_dir = ver.object.map(|v| v.data_dir).unwrap_or_default(); + Ok(data_dir) + } + + pub fn unmarshal_msg(&mut self, buf: &[u8]) -> Result { + let mut cur = Cursor::new(buf); + + let mut fields_len = rmp::decode::read_map_len(&mut cur)?; + + while fields_len > 0 { + fields_len -= 1; + + // println!("unmarshal_msg fields idx {}", fields_len); + + let str_len = rmp::decode::read_str_len(&mut cur)?; + + // println!("unmarshal_msg fields name len() {}", &str_len); + + // !!!Vec::with_capacity(str_len) 失败,vec! 正常 + let mut field_buff = vec![0u8; str_len as usize]; + + cur.read_exact(&mut field_buff)?; + + let field = String::from_utf8(field_buff)?; + + // println!("unmarshal_msg fields name {}", &field); + + match field.as_str() { + "Type" => { + let u: u8 = rmp::decode::read_int(&mut cur)?; + self.version_type = VersionType::from_u8(u); + } + + "V2Obj" => { + // is_nil() + if buf[cur.position() as usize] == 0xc0 { + rmp::decode::read_nil(&mut cur)?; + } else { + // let buf = unsafe { cur.position() }; + let mut obj = MetaObject::default(); + // let start = cur.position(); + + let (_, remain) = buf.split_at(cur.position() as usize); + + let read_len = obj.unmarshal_msg(remain)?; + cur.set_position(cur.position() + read_len); + + self.object = Some(obj); + } + } + "DelObj" => { + if buf[cur.position() as usize] == 0xc0 { + rmp::decode::read_nil(&mut cur)?; + } else { + // let buf = unsafe { cur.position() }; + let mut obj = MetaDeleteMarker::default(); + // let start = cur.position(); + + let (_, remain) = buf.split_at(cur.position() as usize); + let read_len = obj.unmarshal_msg(remain)?; + cur.set_position(cur.position() + read_len); + + self.delete_marker = Some(obj); + } + } + "v" => { + self.write_version = rmp::decode::read_int(&mut cur)?; + } + name => return Err(Error::msg(format!("not suport field name {}", name))), + } + } + + Ok(cur.position()) + } + + pub fn marshal_msg(&self) -> Result> { + let mut len: u32 = 4; + let mut mask: u8 = 0; + + if self.object.is_none() { + len -= 1; + mask |= 0x2; + } + if self.delete_marker.is_none() { + len -= 1; + mask |= 0x4; + } + + let mut wr = Vec::new(); + + // 字段数量 + rmp::encode::write_map_len(&mut wr, len)?; + + // write "Type" + rmp::encode::write_str(&mut wr, "Type")?; + rmp::encode::write_uint(&mut wr, self.version_type.to_u8() as u64)?; + + if (mask & 0x2) == 0 { + // write V2Obj + rmp::encode::write_str(&mut wr, "V2Obj")?; + if self.object.is_none() { + let _ = rmp::encode::write_nil(&mut wr); + } else { + let buf = self.object.as_ref().unwrap().marshal_msg()?; + wr.write_all(&buf)?; + } + } + + if (mask & 0x4) == 0 { + // write "DelObj" + rmp::encode::write_str(&mut wr, "DelObj")?; + if self.delete_marker.is_none() { + let _ = rmp::encode::write_nil(&mut wr); + } else { + let buf = self.delete_marker.as_ref().unwrap().marshal_msg()?; + wr.write_all(&buf)?; + } + } + + // write "v" + rmp::encode::write_str(&mut wr, "v")?; + rmp::encode::write_uint(&mut wr, self.write_version)?; + + Ok(wr) + } + + pub fn free_version(&self) -> bool { + self.version_type == VersionType::Delete && self.delete_marker.as_ref().map(|m| m.free_version()).unwrap_or_default() + } + + pub fn header(&self) -> FileMetaVersionHeader { + FileMetaVersionHeader::from(self.clone()) + } + + pub fn to_fileinfo(&self, volume: &str, path: &str, version_id: Option, all_parts: bool) -> FileInfo { + match self.version_type { + VersionType::Invalid => FileInfo { + name: path.to_string(), + volume: volume.to_string(), + version_id, + ..Default::default() + }, + VersionType::Object => self + .object + .as_ref() + .unwrap() + .clone() + .into_fileinfo(volume, path, version_id, all_parts), + VersionType::Delete => self + .delete_marker + .as_ref() + .unwrap() + .clone() + .into_fileinfo(volume, path, version_id, all_parts), + } + } +} + +impl TryFrom<&[u8]> for FileMetaVersion { + type Error = Error; + + fn try_from(value: &[u8]) -> std::result::Result { + let mut ver = FileMetaVersion::default(); + ver.unmarshal_msg(value)?; + Ok(ver) + } +} + +impl From for FileMetaVersion { + fn from(value: FileInfo) -> Self { + { + if value.deleted { + FileMetaVersion { + version_type: VersionType::Delete, + delete_marker: Some(MetaDeleteMarker::from(value)), + object: None, + write_version: 0, + } + } else { + FileMetaVersion { + version_type: VersionType::Object, + delete_marker: None, + object: Some(MetaObject::from(value)), + write_version: 0, + } + } + } + } +} + +impl TryFrom for FileMetaVersion { + type Error = Error; + + fn try_from(value: FileMetaShallowVersion) -> std::result::Result { + FileMetaVersion::try_from(value.meta.as_slice()) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Default, Clone, Eq, Hash)] +pub struct FileMetaVersionHeader { + pub version_id: Option, + pub mod_time: Option, + pub signature: [u8; 4], + pub version_type: VersionType, + pub flags: u8, + pub ec_n: u8, + pub ec_m: u8, +} + +impl FileMetaVersionHeader { + pub fn has_ec(&self) -> bool { + self.ec_m > 0 && self.ec_n > 0 + } + + pub fn matches_not_strict(&self, o: &FileMetaVersionHeader) -> bool { + let mut ok = self.version_id == o.version_id && self.version_type == o.version_type && self.matches_ec(o); + if self.version_id.is_none() { + ok = ok && self.mod_time == o.mod_time; + } + + ok + } + + pub fn matches_ec(&self, o: &FileMetaVersionHeader) -> bool { + if self.has_ec() && o.has_ec() { + return self.ec_n == o.ec_n && self.ec_m == o.ec_m; + } + + true + } + + pub fn free_version(&self) -> bool { + self.flags & XL_FLAG_FREE_VERSION != 0 + } + + pub fn sorts_before(&self, o: &FileMetaVersionHeader) -> bool { + if self == o { + return false; + } + + // Prefer newest modtime. + if self.mod_time != o.mod_time { + return self.mod_time > o.mod_time; + } + + match self.mod_time.cmp(&o.mod_time) { + Ordering::Greater => { + return true; + } + Ordering::Less => { + return false; + } + _ => {} + } + + // The following doesn't make too much sense, but we want sort to be consistent nonetheless. + // Prefer lower types + if self.version_type != o.version_type { + return self.version_type < o.version_type; + } + // Consistent sort on signature + match self.version_id.cmp(&o.version_id) { + Ordering::Greater => { + return true; + } + Ordering::Less => { + return false; + } + _ => {} + } + + if self.flags != o.flags { + return self.flags > o.flags; + } + + false + } + + pub fn user_data_dir(&self) -> bool { + self.flags & Flags::UsesDataDir as u8 != 0 + } + #[tracing::instrument] + pub fn marshal_msg(&self) -> Result> { + let mut wr = Vec::new(); + + // array len 7 + rmp::encode::write_array_len(&mut wr, 7)?; + + // version_id + rmp::encode::write_bin(&mut wr, self.version_id.unwrap_or_default().as_bytes())?; + // mod_time + rmp::encode::write_i64(&mut wr, self.mod_time.unwrap_or(OffsetDateTime::UNIX_EPOCH).unix_timestamp_nanos() as i64)?; + // signature + rmp::encode::write_bin(&mut wr, self.signature.as_slice())?; + // version_type + rmp::encode::write_uint8(&mut wr, self.version_type.to_u8())?; + // flags + rmp::encode::write_uint8(&mut wr, self.flags)?; + // ec_n + rmp::encode::write_uint8(&mut wr, self.ec_n)?; + // ec_m + rmp::encode::write_uint8(&mut wr, self.ec_m)?; + + Ok(wr) + } + + pub fn unmarshal_msg(&mut self, buf: &[u8]) -> Result { + let mut cur = Cursor::new(buf); + let alen = rmp::decode::read_array_len(&mut cur)?; + if alen != 7 { + return Err(Error::msg(format!("version header array len err need 7 got {}", alen))); + } + + // version_id + rmp::decode::read_bin_len(&mut cur)?; + let mut buf = [0u8; 16]; + cur.read_exact(&mut buf)?; + self.version_id = { + let id = Uuid::from_bytes(buf); + if id.is_nil() { None } else { Some(id) } + }; + + // mod_time + let unix: i128 = rmp::decode::read_int(&mut cur)?; + + let time = OffsetDateTime::from_unix_timestamp_nanos(unix)?; + if time == OffsetDateTime::UNIX_EPOCH { + self.mod_time = None; + } else { + self.mod_time = Some(time); + } + + // signature + rmp::decode::read_bin_len(&mut cur)?; + cur.read_exact(&mut self.signature)?; + + // version_type + let typ: u8 = rmp::decode::read_int(&mut cur)?; + self.version_type = VersionType::from_u8(typ); + + // flags + self.flags = rmp::decode::read_int(&mut cur)?; + // ec_n + self.ec_n = rmp::decode::read_int(&mut cur)?; + // ec_m + self.ec_m = rmp::decode::read_int(&mut cur)?; + + Ok(cur.position()) + } +} + +impl PartialOrd for FileMetaVersionHeader { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for FileMetaVersionHeader { + fn cmp(&self, other: &Self) -> Ordering { + match self.mod_time.cmp(&other.mod_time) { + Ordering::Equal => {} + ord => return ord, + } + + match self.version_type.cmp(&other.version_type) { + Ordering::Equal => {} + ord => return ord, + } + match self.signature.cmp(&other.signature) { + Ordering::Equal => {} + ord => return ord, + } + match self.version_id.cmp(&other.version_id) { + Ordering::Equal => {} + ord => return ord, + } + self.flags.cmp(&other.flags) + } +} + +impl From for FileMetaVersionHeader { + fn from(value: FileMetaVersion) -> Self { + let flags = { + let mut f: u8 = 0; + if value.free_version() { + f |= Flags::FreeVersion as u8; + } + + if value.version_type == VersionType::Object && value.object.as_ref().map(|v| v.use_data_dir()).unwrap_or_default() { + f |= Flags::UsesDataDir as u8; + } + + if value.version_type == VersionType::Object && value.object.as_ref().map(|v| v.use_inlinedata()).unwrap_or_default() + { + f |= Flags::InlineData as u8; + } + + f + }; + + let (ec_n, ec_m) = { + if value.version_type == VersionType::Object && value.object.is_some() { + ( + value.object.as_ref().unwrap().erasure_n as u8, + value.object.as_ref().unwrap().erasure_m as u8, + ) + } else { + (0, 0) + } + }; + + Self { + version_id: value.get_version_id(), + mod_time: value.get_mod_time(), + signature: [0, 0, 0, 0], + version_type: value.version_type, + flags, + ec_n, + ec_m, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] +// 因为自定义 message_pack,所以一定要保证字段顺序 +pub struct MetaObject { + pub version_id: Option, // Version ID + pub data_dir: Option, // Data dir ID + pub erasure_algorithm: ErasureAlgo, // Erasure coding algorithm + pub erasure_m: usize, // Erasure data blocks + pub erasure_n: usize, // Erasure parity blocks + pub erasure_block_size: usize, // Erasure block size + pub erasure_index: usize, // Erasure disk index + pub erasure_dist: Vec, // Erasure distribution + pub bitrot_checksum_algo: ChecksumAlgo, // Bitrot checksum algo + pub part_numbers: Vec, // Part Numbers + pub part_etags: Option>, // Part ETags + pub part_sizes: Vec, // Part Sizes + pub part_actual_sizes: Option>, // Part ActualSizes (compression) + pub part_indices: Option>>, // Part Indexes (compression) + pub size: usize, // Object version size + pub mod_time: Option, // Object version modified time + pub meta_sys: Option>>, // Object version internal metadata + pub meta_user: Option>, // Object version metadata set by user +} + +impl MetaObject { + pub fn unmarshal_msg(&mut self, buf: &[u8]) -> Result { + let mut cur = Cursor::new(buf); + + let mut fields_len = rmp::decode::read_map_len(&mut cur)?; + + // let mut ret = Self::default(); + + while fields_len > 0 { + fields_len -= 1; + + // println!("unmarshal_msg fields idx {}", fields_len); + + let str_len = rmp::decode::read_str_len(&mut cur)?; + + // println!("unmarshal_msg fields name len() {}", &str_len); + + // !!!Vec::with_capacity(str_len) 失败,vec! 正常 + let mut field_buff = vec![0u8; str_len as usize]; + + cur.read_exact(&mut field_buff)?; + + let field = String::from_utf8(field_buff)?; + + // println!("unmarshal_msg fields name {}", &field); + + match field.as_str() { + "ID" => { + rmp::decode::read_bin_len(&mut cur)?; + let mut buf = [0u8; 16]; + cur.read_exact(&mut buf)?; + self.version_id = { + let id = Uuid::from_bytes(buf); + if id.is_nil() { None } else { Some(id) } + }; + } + "DDir" => { + rmp::decode::read_bin_len(&mut cur)?; + let mut buf = [0u8; 16]; + cur.read_exact(&mut buf)?; + self.data_dir = { + let id = Uuid::from_bytes(buf); + if id.is_nil() { None } else { Some(id) } + }; + } + "EcAlgo" => { + let u: u8 = rmp::decode::read_int(&mut cur)?; + self.erasure_algorithm = ErasureAlgo::from_u8(u) + } + "EcM" => { + self.erasure_m = rmp::decode::read_int(&mut cur)?; + } + "EcN" => { + self.erasure_n = rmp::decode::read_int(&mut cur)?; + } + "EcBSize" => { + self.erasure_block_size = rmp::decode::read_int(&mut cur)?; + } + "EcIndex" => { + self.erasure_index = rmp::decode::read_int(&mut cur)?; + } + "EcDist" => { + let alen = rmp::decode::read_array_len(&mut cur)? as usize; + self.erasure_dist = vec![0u8; alen]; + for i in 0..alen { + self.erasure_dist[i] = rmp::decode::read_int(&mut cur)?; + } + } + "CSumAlgo" => { + let u: u8 = rmp::decode::read_int(&mut cur)?; + self.bitrot_checksum_algo = ChecksumAlgo::from_u8(u) + } + "PartNums" => { + let alen = rmp::decode::read_array_len(&mut cur)? as usize; + self.part_numbers = vec![0; alen]; + for i in 0..alen { + self.part_numbers[i] = rmp::decode::read_int(&mut cur)?; + } + } + "PartETags" => { + let array_len = match rmp::decode::read_nil(&mut cur) { + Ok(_) => None, + Err(e) => match e { + rmp::decode::ValueReadError::TypeMismatch(marker) => match marker { + Marker::FixArray(l) => Some(l as usize), + Marker::Array16 => Some(rmp::decode::read_u16(&mut cur)? as usize), + Marker::Array32 => Some(rmp::decode::read_u16(&mut cur)? as usize), + _ => return Err(Error::msg("PartETags parse failed")), + }, + _ => return Err(Error::msg("PartETags parse failed.")), + }, + }; + + if array_len.is_some() { + let l = array_len.unwrap(); + let mut etags = Vec::with_capacity(l); + for _ in 0..l { + let str_len = rmp::decode::read_str_len(&mut cur)?; + let mut field_buff = vec![0u8; str_len as usize]; + cur.read_exact(&mut field_buff)?; + etags.push(String::from_utf8(field_buff)?); + } + self.part_etags = Some(etags); + } + } + "PartSizes" => { + let alen = rmp::decode::read_array_len(&mut cur)? as usize; + self.part_sizes = vec![0; alen]; + for i in 0..alen { + self.part_sizes[i] = rmp::decode::read_int(&mut cur)?; + } + } + "PartASizes" => { + let array_len = match rmp::decode::read_nil(&mut cur) { + Ok(_) => None, + Err(e) => match e { + rmp::decode::ValueReadError::TypeMismatch(marker) => match marker { + Marker::FixArray(l) => Some(l as usize), + Marker::Array16 => Some(rmp::decode::read_u16(&mut cur)? as usize), + Marker::Array32 => Some(rmp::decode::read_u16(&mut cur)? as usize), + _ => return Err(Error::msg("PartETags parse failed")), + }, + _ => return Err(Error::msg("PartETags parse failed.")), + }, + }; + if let Some(l) = array_len { + let mut sizes = vec![0; l]; + for size in sizes.iter_mut().take(l) { + *size = rmp::decode::read_int(&mut cur)?; + } + // for size in sizes.iter_mut().take(l) { + // let tmp = rmp::decode::read_int(&mut cur)?; + // size = tmp; + // } + self.part_actual_sizes = Some(sizes); + } + } + "PartIdx" => { + let alen = rmp::decode::read_array_len(&mut cur)? as usize; + + if alen == 0 { + self.part_indices = None; + continue; + } + + let mut indices = Vec::with_capacity(alen); + for _ in 0..alen { + let blen = rmp::decode::read_bin_len(&mut cur)?; + let mut buf = vec![0u8; blen as usize]; + cur.read_exact(&mut buf)?; + + indices.push(buf); + } + + self.part_indices = Some(indices); + } + "Size" => { + self.size = rmp::decode::read_int(&mut cur)?; + } + "MTime" => { + let unix: i128 = rmp::decode::read_int(&mut cur)?; + let time = OffsetDateTime::from_unix_timestamp_nanos(unix)?; + if time == OffsetDateTime::UNIX_EPOCH { + self.mod_time = None; + } else { + self.mod_time = Some(time); + } + } + "MetaSys" => { + let len = match rmp::decode::read_nil(&mut cur) { + Ok(_) => None, + Err(e) => match e { + rmp::decode::ValueReadError::TypeMismatch(marker) => match marker { + Marker::FixMap(l) => Some(l as usize), + Marker::Map16 => Some(rmp::decode::read_u16(&mut cur)? as usize), + Marker::Map32 => Some(rmp::decode::read_u16(&mut cur)? as usize), + _ => return Err(Error::msg("MetaSys parse failed")), + }, + _ => return Err(Error::msg("MetaSys parse failed.")), + }, + }; + if len.is_some() { + let l = len.unwrap(); + let mut map = HashMap::new(); + for _ in 0..l { + let str_len = rmp::decode::read_str_len(&mut cur)?; + let mut field_buff = vec![0u8; str_len as usize]; + cur.read_exact(&mut field_buff)?; + let key = String::from_utf8(field_buff)?; + + let blen = rmp::decode::read_bin_len(&mut cur)?; + let mut val = vec![0u8; blen as usize]; + cur.read_exact(&mut val)?; + + map.insert(key, val); + } + + self.meta_sys = Some(map); + } + } + "MetaUsr" => { + let len = match rmp::decode::read_nil(&mut cur) { + Ok(_) => None, + Err(e) => match e { + rmp::decode::ValueReadError::TypeMismatch(marker) => match marker { + Marker::FixMap(l) => Some(l as usize), + Marker::Map16 => Some(rmp::decode::read_u16(&mut cur)? as usize), + Marker::Map32 => Some(rmp::decode::read_u16(&mut cur)? as usize), + _ => return Err(Error::msg("MetaUsr parse failed")), + }, + _ => return Err(Error::msg("MetaUsr parse failed.")), + }, + }; + if len.is_some() { + let l = len.unwrap(); + let mut map = HashMap::new(); + for _ in 0..l { + let str_len = rmp::decode::read_str_len(&mut cur)?; + let mut field_buff = vec![0u8; str_len as usize]; + cur.read_exact(&mut field_buff)?; + let key = String::from_utf8(field_buff)?; + + let blen = rmp::decode::read_str_len(&mut cur)?; + let mut val_buf = vec![0u8; blen as usize]; + cur.read_exact(&mut val_buf)?; + let val = String::from_utf8(val_buf)?; + + map.insert(key, val); + } + + self.meta_user = Some(map); + } + } + + name => return Err(Error::msg(format!("not suport field name {}", name))), + } + } + + Ok(cur.position()) + } + // marshal_msg 自定义 messagepack 命名与 go 一致 + pub fn marshal_msg(&self) -> Result> { + let mut len: u32 = 18; + let mut mask: u32 = 0; + + if self.part_indices.is_none() { + len -= 1; + mask |= 0x2000; + } + + let mut wr = Vec::new(); + + // 字段数量 + rmp::encode::write_map_len(&mut wr, len)?; + + // string "ID" + rmp::encode::write_str(&mut wr, "ID")?; + rmp::encode::write_bin(&mut wr, self.version_id.unwrap_or_default().as_bytes())?; + + // string "DDir" + rmp::encode::write_str(&mut wr, "DDir")?; + rmp::encode::write_bin(&mut wr, self.data_dir.unwrap_or_default().as_bytes())?; + + // string "EcAlgo" + rmp::encode::write_str(&mut wr, "EcAlgo")?; + rmp::encode::write_uint(&mut wr, self.erasure_algorithm.to_u8() as u64)?; + + // string "EcM" + rmp::encode::write_str(&mut wr, "EcM")?; + rmp::encode::write_uint(&mut wr, self.erasure_m.try_into().unwrap())?; + + // string "EcN" + rmp::encode::write_str(&mut wr, "EcN")?; + rmp::encode::write_uint(&mut wr, self.erasure_n.try_into().unwrap())?; + + // string "EcBSize" + rmp::encode::write_str(&mut wr, "EcBSize")?; + rmp::encode::write_uint(&mut wr, self.erasure_block_size.try_into().unwrap())?; + + // string "EcIndex" + rmp::encode::write_str(&mut wr, "EcIndex")?; + rmp::encode::write_uint(&mut wr, self.erasure_index.try_into().unwrap())?; + + // string "EcDist" + rmp::encode::write_str(&mut wr, "EcDist")?; + rmp::encode::write_array_len(&mut wr, self.erasure_dist.len() as u32)?; + for v in self.erasure_dist.iter() { + rmp::encode::write_uint(&mut wr, *v as _)?; + } + + // string "CSumAlgo" + rmp::encode::write_str(&mut wr, "CSumAlgo")?; + rmp::encode::write_uint(&mut wr, self.bitrot_checksum_algo.to_u8() as u64)?; + + // string "PartNums" + rmp::encode::write_str(&mut wr, "PartNums")?; + rmp::encode::write_array_len(&mut wr, self.part_numbers.len() as u32)?; + for v in self.part_numbers.iter() { + rmp::encode::write_uint(&mut wr, *v as _)?; + } + + // string "PartETags" + rmp::encode::write_str(&mut wr, "PartETags")?; + if self.part_etags.is_none() { + rmp::encode::write_nil(&mut wr)?; + } else { + let etags = self.part_etags.as_ref().unwrap(); + rmp::encode::write_array_len(&mut wr, etags.len() as u32)?; + for v in etags.iter() { + rmp::encode::write_str(&mut wr, v.as_str())?; + } + } + + // string "PartSizes" + rmp::encode::write_str(&mut wr, "PartSizes")?; + rmp::encode::write_array_len(&mut wr, self.part_sizes.len() as u32)?; + for v in self.part_sizes.iter() { + rmp::encode::write_uint(&mut wr, *v as _)?; + } + + // string "PartASizes" + rmp::encode::write_str(&mut wr, "PartASizes")?; + if self.part_actual_sizes.is_none() { + rmp::encode::write_nil(&mut wr)?; + } else { + let asizes = self.part_actual_sizes.as_ref().unwrap(); + rmp::encode::write_array_len(&mut wr, asizes.len() as u32)?; + for v in asizes.iter() { + rmp::encode::write_uint(&mut wr, *v as _)?; + } + } + + if (mask & 0x2000) == 0 { + // string "PartIdx" + rmp::encode::write_str(&mut wr, "PartIdx")?; + let indices = self.part_indices.as_ref().unwrap(); + rmp::encode::write_array_len(&mut wr, indices.len() as u32)?; + for v in indices.iter() { + rmp::encode::write_bin(&mut wr, v)?; + } + } + + // string "Size" + rmp::encode::write_str(&mut wr, "Size")?; + rmp::encode::write_uint(&mut wr, self.size.try_into().unwrap())?; + + // string "MTime" + rmp::encode::write_str(&mut wr, "MTime")?; + rmp::encode::write_uint( + &mut wr, + self.mod_time + .unwrap_or(OffsetDateTime::UNIX_EPOCH) + .unix_timestamp_nanos() + .try_into() + .unwrap(), + )?; + + // string "MetaSys" + rmp::encode::write_str(&mut wr, "MetaSys")?; + if self.meta_sys.is_none() { + rmp::encode::write_nil(&mut wr)?; + } else { + let metas = self.meta_sys.as_ref().unwrap(); + rmp::encode::write_map_len(&mut wr, metas.len() as u32)?; + for (k, v) in metas { + rmp::encode::write_str(&mut wr, k.as_str())?; + rmp::encode::write_bin(&mut wr, v)?; + } + } + + // string "MetaUsr" + rmp::encode::write_str(&mut wr, "MetaUsr")?; + if self.meta_user.is_none() { + rmp::encode::write_nil(&mut wr)?; + } else { + let metas = self.meta_user.as_ref().unwrap(); + rmp::encode::write_map_len(&mut wr, metas.len() as u32)?; + for (k, v) in metas { + rmp::encode::write_str(&mut wr, k.as_str())?; + rmp::encode::write_str(&mut wr, v.as_str())?; + } + } + + Ok(wr) + } + pub fn use_data_dir(&self) -> bool { + // TODO: when use inlinedata + true + } + + pub fn use_inlinedata(&self) -> bool { + // TODO: when use inlinedata + false + } + + pub fn into_fileinfo(self, volume: &str, path: &str, _version_id: Option, _all_parts: bool) -> FileInfo { + let version_id = self.version_id; + + let erasure = ErasureInfo { + algorithm: self.erasure_algorithm.to_string(), + data_blocks: self.erasure_m, + parity_blocks: self.erasure_n, + block_size: self.erasure_block_size, + index: self.erasure_index, + distribution: self.erasure_dist.iter().map(|&v| v as usize).collect(), + ..Default::default() + }; + + let mut parts = Vec::new(); + for (i, _) in self.part_numbers.iter().enumerate() { + parts.push(ObjectPartInfo { + number: self.part_numbers[i], + size: self.part_sizes[i], + ..Default::default() + }); + } + + let metadata = { + if let Some(metauser) = self.meta_user.as_ref() { + let mut m = HashMap::new(); + for (k, v) in metauser { + // TODO: skip xhttp x-amz-storage-class + m.insert(k.to_owned(), v.to_owned()); + } + Some(m) + } else { + None + } + }; + + FileInfo { + version_id, + erasure, + data_dir: self.data_dir, + mod_time: self.mod_time, + size: self.size, + name: path.to_string(), + volume: volume.to_string(), + parts, + metadata, + ..Default::default() + } + } +} + +impl From for MetaObject { + fn from(value: FileInfo) -> Self { + let part_numbers: Vec = value.parts.iter().map(|v| v.number).collect(); + let part_sizes: Vec = value.parts.iter().map(|v| v.size).collect(); + + Self { + version_id: value.version_id, + size: value.size, + mod_time: value.mod_time, + data_dir: value.data_dir, + erasure_algorithm: ErasureAlgo::ReedSolomon, + erasure_m: value.erasure.data_blocks, + erasure_n: value.erasure.parity_blocks, + erasure_block_size: value.erasure.block_size, + erasure_index: value.erasure.index, + erasure_dist: value.erasure.distribution.iter().map(|x| *x as u8).collect(), + bitrot_checksum_algo: ChecksumAlgo::HighwayHash, + part_numbers, + part_etags: None, // TODO: add part_etags + part_sizes, + part_actual_sizes: None, // TODO: add part_etags + part_indices: None, + meta_sys: None, + meta_user: value.metadata.clone(), + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] +pub struct MetaDeleteMarker { + pub version_id: Option, // Version ID for delete marker + pub mod_time: Option, // Object delete marker modified time + pub meta_sys: Option>>, // Delete marker internal metadata +} + +impl MetaDeleteMarker { + pub fn free_version(&self) -> bool { + self.meta_sys + .as_ref() + .map(|v| v.get(FREE_VERSION_META_HEADER).is_some()) + .unwrap_or_default() + } + + pub fn into_fileinfo(self, volume: &str, path: &str, version_id: Option, _all_parts: bool) -> FileInfo { + FileInfo { + name: path.to_string(), + volume: volume.to_string(), + version_id, + deleted: true, + mod_time: self.mod_time, + ..Default::default() + } + } + + pub fn unmarshal_msg(&mut self, buf: &[u8]) -> Result { + let mut cur = Cursor::new(buf); + + let mut fields_len = rmp::decode::read_map_len(&mut cur)?; + + while fields_len > 0 { + fields_len -= 1; + + let str_len = rmp::decode::read_str_len(&mut cur)?; + + // !!!Vec::with_capacity(str_len) 失败,vec! 正常 + let mut field_buff = vec![0u8; str_len as usize]; + + cur.read_exact(&mut field_buff)?; + + let field = String::from_utf8(field_buff)?; + + match field.as_str() { + "ID" => { + rmp::decode::read_bin_len(&mut cur)?; + let mut buf = [0u8; 16]; + cur.read_exact(&mut buf)?; + self.version_id = { + let id = Uuid::from_bytes(buf); + if id.is_nil() { None } else { Some(id) } + }; + } + + "MTime" => { + let unix: i64 = rmp::decode::read_int(&mut cur)?; + let time = OffsetDateTime::from_unix_timestamp(unix)?; + if time == OffsetDateTime::UNIX_EPOCH { + self.mod_time = None; + } else { + self.mod_time = Some(time); + } + } + "MetaSys" => { + let l = rmp::decode::read_map_len(&mut cur)?; + let mut map = HashMap::new(); + for _ in 0..l { + let str_len = rmp::decode::read_str_len(&mut cur)?; + let mut field_buff = vec![0u8; str_len as usize]; + cur.read_exact(&mut field_buff)?; + let key = String::from_utf8(field_buff)?; + + let blen = rmp::decode::read_bin_len(&mut cur)?; + let mut val = vec![0u8; blen as usize]; + cur.read_exact(&mut val)?; + + map.insert(key, val); + } + + self.meta_sys = Some(map); + } + name => return Err(Error::msg(format!("not suport field name {}", name))), + } + } + + Ok(cur.position()) + } + + pub fn marshal_msg(&self) -> Result> { + let mut len: u32 = 3; + let mut mask: u8 = 0; + + if self.meta_sys.is_none() { + len -= 1; + mask |= 0x4; + } + + let mut wr = Vec::new(); + + // 字段数量 + rmp::encode::write_map_len(&mut wr, len)?; + + // string "ID" + rmp::encode::write_str(&mut wr, "ID")?; + rmp::encode::write_bin(&mut wr, self.version_id.unwrap_or_default().as_bytes())?; + + // string "MTime" + rmp::encode::write_str(&mut wr, "MTime")?; + rmp::encode::write_uint( + &mut wr, + self.mod_time + .unwrap_or(OffsetDateTime::UNIX_EPOCH) + .unix_timestamp() + .try_into() + .unwrap(), + )?; + + if (mask & 0x4) == 0 { + let metas = self.meta_sys.as_ref().unwrap(); + rmp::encode::write_map_len(&mut wr, metas.len() as u32)?; + for (k, v) in metas { + rmp::encode::write_str(&mut wr, k.as_str())?; + rmp::encode::write_bin(&mut wr, v)?; + } + } + + Ok(wr) + } +} + +impl From for MetaDeleteMarker { + fn from(value: FileInfo) -> Self { + Self { + version_id: value.version_id, + mod_time: value.mod_time, + meta_sys: None, + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Default, Clone, PartialOrd, Ord, Hash)] +pub enum VersionType { + #[default] + Invalid = 0, + Object = 1, + Delete = 2, + // Legacy = 3, +} + +impl VersionType { + pub fn valid(&self) -> bool { + matches!(*self, VersionType::Object | VersionType::Delete) + } + + pub fn to_u8(&self) -> u8 { + match self { + VersionType::Invalid => 0, + VersionType::Object => 1, + VersionType::Delete => 2, + } + } + + pub fn from_u8(n: u8) -> Self { + match n { + 1 => VersionType::Object, + 2 => VersionType::Delete, + _ => VersionType::Invalid, + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Default, Clone)] +pub enum ErasureAlgo { + #[default] + Invalid = 0, + ReedSolomon = 1, +} + +impl ErasureAlgo { + pub fn valid(&self) -> bool { + *self > ErasureAlgo::Invalid + } + pub fn to_u8(&self) -> u8 { + match self { + ErasureAlgo::Invalid => 0, + ErasureAlgo::ReedSolomon => 1, + } + } + + pub fn from_u8(u: u8) -> Self { + match u { + 1 => ErasureAlgo::ReedSolomon, + _ => ErasureAlgo::Invalid, + } + } +} + +impl Display for ErasureAlgo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ErasureAlgo::Invalid => write!(f, "Invalid"), + ErasureAlgo::ReedSolomon => write!(f, "{}", ERASURE_ALGORITHM), + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Default, Clone)] +pub enum ChecksumAlgo { + #[default] + Invalid = 0, + HighwayHash = 1, +} + +impl ChecksumAlgo { + pub fn valid(&self) -> bool { + *self > ChecksumAlgo::Invalid + } + pub fn to_u8(&self) -> u8 { + match self { + ChecksumAlgo::Invalid => 0, + ChecksumAlgo::HighwayHash => 1, + } + } + pub fn from_u8(u: u8) -> Self { + match u { + 1 => ChecksumAlgo::HighwayHash, + _ => ChecksumAlgo::Invalid, + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Default, Clone)] +pub enum Flags { + #[default] + FreeVersion = 1 << 0, + UsesDataDir = 1 << 1, + InlineData = 1 << 2, +} + +const FREE_VERSION_META_HEADER: &str = "free-version"; + +// mergeXLV2Versions +pub fn merge_file_meta_versions( + mut quorum: usize, + mut strict: bool, + requested_versions: usize, + versions: &[Vec], +) -> Vec { + if quorum == 0 { + quorum = 1; + } + + if versions.len() < quorum || versions.is_empty() { + return Vec::new(); + } + + if versions.len() == 1 { + return versions[0].clone(); + } + + if quorum == 1 { + strict = true; + } + + let mut versions = versions.to_owned(); + + let mut n_versions = 0; + + let mut merged = Vec::new(); + loop { + let mut tops = Vec::new(); + let mut top_sig = FileMetaVersionHeader::default(); + let mut consistent = true; + for vers in versions.iter() { + if vers.is_empty() { + consistent = false; + continue; + } + if tops.is_empty() { + consistent = true; + top_sig = vers[0].header.clone(); + } else { + consistent = consistent && vers[0].header == top_sig; + } + tops.push(vers[0].clone()); + } + + // check if done... + if tops.len() < quorum { + break; + } + + let mut latest = FileMetaShallowVersion::default(); + if consistent { + merged.push(tops[0].clone()); + if tops[0].header.free_version() { + n_versions += 1; + } + } else { + let mut lastest_count = 0; + for (i, ver) in tops.iter().enumerate() { + if ver.header == latest.header { + lastest_count += 1; + continue; + } + + if i == 0 || ver.header.sorts_before(&latest.header) { + if i == 0 || lastest_count == 0 { + lastest_count = 1; + } else if !strict && ver.header.matches_not_strict(&latest.header) { + lastest_count += 1; + } else { + lastest_count = 1; + } + latest = ver.clone(); + continue; + } + + // Mismatch, but older. + if lastest_count > 0 && !strict && ver.header.matches_not_strict(&latest.header) { + lastest_count += 1; + continue; + } + + if lastest_count > 0 && ver.header.version_id == latest.header.version_id { + let mut x: HashMap = HashMap::new(); + for a in tops.iter() { + if a.header.version_id != ver.header.version_id { + continue; + } + let mut a_clone = a.clone(); + if !strict { + a_clone.header.signature = [0; 4]; + } + *x.entry(a_clone.header).or_insert(1) += 1; + } + lastest_count = 0; + for (k, v) in x.iter() { + if *v < lastest_count { + continue; + } + if *v == lastest_count && latest.header.sorts_before(k) { + continue; + } + tops.iter().for_each(|a| { + let mut hdr = a.header.clone(); + if !strict { + hdr.signature = [0; 4]; + } + if hdr == *k { + latest = a.clone(); + } + }); + + lastest_count = *v; + } + break; + } + } + if lastest_count >= quorum { + if !latest.header.free_version() { + n_versions += 1; + } + merged.push(latest.clone()); + } + } + + // Remove from all streams up until latest modtime or if selected. + versions.iter_mut().for_each(|vers| { + // // Keep top entry (and remaining)... + let mut bre = false; + vers.retain(|ver| { + if bre { + return true; + } + if let Ordering::Greater = ver.header.mod_time.cmp(&latest.header.mod_time) { + bre = true; + return false; + } + if ver.header == latest.header { + bre = true; + return false; + } + if let Ordering::Equal = latest.header.version_id.cmp(&ver.header.version_id) { + bre = true; + return false; + } + for merged_v in merged.iter() { + if let Ordering::Equal = ver.header.version_id.cmp(&merged_v.header.version_id) { + bre = true; + return false; + } + } + true + }); + }); + if requested_versions > 0 && requested_versions == n_versions { + merged.append(&mut versions[0]); + break; + } + } + + // Sanity check. Enable if duplicates show up. + // todo + merged +} + +pub async fn file_info_from_raw(ri: RawFileInfo, bucket: &str, object: &str, read_data: bool) -> Result { + get_file_info(&ri.buf, bucket, object, "", FileInfoOpts { data: read_data }).await +} + +pub struct FileInfoOpts { + pub data: bool, +} + +pub async fn get_file_info(buf: &[u8], volume: &str, path: &str, version_id: &str, opts: FileInfoOpts) -> Result { + let vid = { + if version_id.is_empty() { + None + } else { + Some(Uuid::parse_str(version_id)?) + } + }; + + let meta = FileMeta::load(buf)?; + if meta.versions.is_empty() { + return Ok(FileInfo { + volume: volume.to_owned(), + name: path.to_owned(), + version_id: vid, + is_latest: true, + deleted: true, + mod_time: Some(OffsetDateTime::from_unix_timestamp(1)?), + ..Default::default() + }); + } + + let fi = meta.into_fileinfo(volume, path, version_id, opts.data, true)?; + Ok(fi) +} + +async fn read_more( + reader: &mut R, + buf: &mut Vec, + total_size: usize, + read_size: usize, + has_full: bool, +) -> Result<()> { + use tokio::io::AsyncReadExt; + let has = buf.len(); + + if has >= read_size { + return Ok(()); + } + + if has_full || read_size > total_size { + return Err(Error::new(io::Error::new(io::ErrorKind::UnexpectedEof, "Unexpected EOF"))); + } + + let extra = read_size - has; + if buf.capacity() >= read_size { + // Extend the buffer if we have enough space. + buf.resize(read_size, 0); + } else { + buf.extend(vec![0u8; extra]); + } + + reader.read_exact(&mut buf[has..]).await?; + Ok(()) +} + +pub async fn read_xl_meta_no_data(reader: &mut R, size: usize) -> Result> { + use tokio::io::AsyncReadExt; + + let mut initial = size; + let mut has_full = true; + + if initial > META_DATA_READ_DEFAULT { + initial = META_DATA_READ_DEFAULT; + has_full = false; + } + + let mut buf = vec![0u8; initial]; + reader.read_exact(&mut buf).await?; + + let (tmp_buf, major, minor) = FileMeta::check_xl2_v1(&buf)?; + + match major { + 1 => match minor { + 0 => { + read_more(reader, &mut buf, size, size, has_full).await?; + Ok(buf) + } + 1..=3 => { + let (sz, tmp_buf) = FileMeta::read_bytes_header(tmp_buf)?; + let mut want = sz as usize + (buf.len() - tmp_buf.len()); + + if minor < 2 { + read_more(reader, &mut buf, size, want, has_full).await?; + return Ok(buf[..want].to_vec()); + } + + let want_max = usize::min(want + MSGP_UINT32_SIZE, size); + read_more(reader, &mut buf, size, want_max, has_full).await?; + + if buf.len() < want { + error!("read_xl_meta_no_data buffer too small (length: {}, needed: {})", &buf.len(), want); + return Err(Error::new(DiskError::FileCorrupt)); + } + + let tmp = &buf[want..]; + let crc_size = 5; + let other_size = tmp.len() - crc_size; + + want += tmp.len() - other_size; + + Ok(buf[..want].to_vec()) + } + _ => Err(Error::new(io::Error::new(io::ErrorKind::InvalidData, "Unknown minor metadata version"))), + }, + _ => Err(Error::new(io::Error::new(io::ErrorKind::InvalidData, "Unknown major metadata version"))), + } +} +#[cfg(test)] +#[allow(clippy::field_reassign_with_default)] +mod test { + use super::*; + + #[test] + fn test_new_file_meta() { + let mut fm = FileMeta::new(); + + let (m, n) = (3, 2); + + for i in 0..5 { + let mut fi = FileInfo::new(i.to_string().as_str(), m, n); + fi.mod_time = Some(OffsetDateTime::now_utc()); + + fm.add_version(fi).unwrap(); + } + + let buff = fm.marshal_msg().unwrap(); + + let mut newfm = FileMeta::default(); + newfm.unmarshal_msg(&buff).unwrap(); + + assert_eq!(fm, newfm) + } + + #[test] + fn test_marshal_metaobject() { + let obj = MetaObject { + data_dir: Some(Uuid::new_v4()), + ..Default::default() + }; + + // println!("obj {:?}", &obj); + + let encoded = obj.marshal_msg().unwrap(); + + let mut obj2 = MetaObject::default(); + obj2.unmarshal_msg(&encoded).unwrap(); + + // println!("obj2 {:?}", &obj2); + + assert_eq!(obj, obj2); + assert_eq!(obj.data_dir, obj2.data_dir); + } + + #[test] + fn test_marshal_metadeletemarker() { + let obj = MetaDeleteMarker { + version_id: Some(Uuid::new_v4()), + ..Default::default() + }; + + // println!("obj {:?}", &obj); + + let encoded = obj.marshal_msg().unwrap(); + + let mut obj2 = MetaDeleteMarker::default(); + obj2.unmarshal_msg(&encoded).unwrap(); + + // println!("obj2 {:?}", &obj2); + + assert_eq!(obj, obj2); + assert_eq!(obj.version_id, obj2.version_id); + } + + #[test] + #[tracing::instrument] + fn test_marshal_metaversion() { + let mut fi = FileInfo::new("test", 3, 2); + fi.version_id = Some(Uuid::new_v4()); + fi.mod_time = Some(OffsetDateTime::from_unix_timestamp(OffsetDateTime::now_utc().unix_timestamp()).unwrap()); + let mut obj = FileMetaVersion::from(fi); + obj.write_version = 110; + + // println!("obj {:?}", &obj); + + let encoded = obj.marshal_msg().unwrap(); + + let mut obj2 = FileMetaVersion::default(); + obj2.unmarshal_msg(&encoded).unwrap(); + + // println!("obj2 {:?}", &obj2); + + // 时间截不一致 - - + assert_eq!(obj, obj2); + assert_eq!(obj.get_version_id(), obj2.get_version_id()); + assert_eq!(obj.write_version, obj2.write_version); + assert_eq!(obj.write_version, 110); + } + + #[test] + #[tracing::instrument] + fn test_marshal_metaversionheader() { + let mut obj = FileMetaVersionHeader::default(); + let vid = Some(Uuid::new_v4()); + obj.version_id = vid; + + let encoded = obj.marshal_msg().unwrap(); + + let mut obj2 = FileMetaVersionHeader::default(); + obj2.unmarshal_msg(&encoded).unwrap(); + + // 时间截不一致 - - + assert_eq!(obj, obj2); + assert_eq!(obj.version_id, obj2.version_id); + assert_eq!(obj.version_id, vid); + } + + // New comprehensive tests for utility functions and validation + + #[test] + fn test_xl_file_header_constants() { + // Test XL file header constants + assert_eq!(XL_FILE_HEADER, [b'X', b'L', b'2', b' ']); + assert_eq!(XL_FILE_VERSION_MAJOR, 1); + assert_eq!(XL_FILE_VERSION_MINOR, 3); + assert_eq!(XL_HEADER_VERSION, 3); + assert_eq!(XL_META_VERSION, 2); + } + + #[test] + fn test_is_xl2_v1_format() { + // Test valid XL2 V1 format + let mut valid_buf = vec![0u8; 20]; + valid_buf[0..4].copy_from_slice(&XL_FILE_HEADER); + byteorder::LittleEndian::write_u16(&mut valid_buf[4..6], 1); + byteorder::LittleEndian::write_u16(&mut valid_buf[6..8], 0); + + assert!(FileMeta::is_xl2_v1_format(&valid_buf)); + + // Test invalid format - wrong header + let invalid_buf = vec![0u8; 20]; + assert!(!FileMeta::is_xl2_v1_format(&invalid_buf)); + + // Test buffer too small + let small_buf = vec![0u8; 4]; + assert!(!FileMeta::is_xl2_v1_format(&small_buf)); + } + + #[test] + fn test_check_xl2_v1() { + // Test valid XL2 V1 check + let mut valid_buf = vec![0u8; 20]; + valid_buf[0..4].copy_from_slice(&XL_FILE_HEADER); + byteorder::LittleEndian::write_u16(&mut valid_buf[4..6], 1); + byteorder::LittleEndian::write_u16(&mut valid_buf[6..8], 2); + + let result = FileMeta::check_xl2_v1(&valid_buf); + assert!(result.is_ok()); + let (remaining, major, minor) = result.unwrap(); + assert_eq!(major, 1); + assert_eq!(minor, 2); + assert_eq!(remaining.len(), 12); // 20 - 8 + + // Test buffer too small + let small_buf = vec![0u8; 4]; + assert!(FileMeta::check_xl2_v1(&small_buf).is_err()); + + // Test wrong header + let mut wrong_header = vec![0u8; 20]; + wrong_header[0..4].copy_from_slice(b"ABCD"); + assert!(FileMeta::check_xl2_v1(&wrong_header).is_err()); + + // Test version too high + let mut high_version = vec![0u8; 20]; + high_version[0..4].copy_from_slice(&XL_FILE_HEADER); + byteorder::LittleEndian::write_u16(&mut high_version[4..6], 99); + byteorder::LittleEndian::write_u16(&mut high_version[6..8], 0); + assert!(FileMeta::check_xl2_v1(&high_version).is_err()); + } + + #[test] + fn test_version_type_enum() { + // Test VersionType enum methods + assert!(VersionType::Object.valid()); + assert!(VersionType::Delete.valid()); + assert!(!VersionType::Invalid.valid()); + + assert_eq!(VersionType::Object.to_u8(), 1); + assert_eq!(VersionType::Delete.to_u8(), 2); + assert_eq!(VersionType::Invalid.to_u8(), 0); + + assert_eq!(VersionType::from_u8(1), VersionType::Object); + assert_eq!(VersionType::from_u8(2), VersionType::Delete); + assert_eq!(VersionType::from_u8(99), VersionType::Invalid); + } + + #[test] + fn test_erasure_algo_enum() { + // Test ErasureAlgo enum methods + assert!(ErasureAlgo::ReedSolomon.valid()); + assert!(!ErasureAlgo::Invalid.valid()); + + assert_eq!(ErasureAlgo::ReedSolomon.to_u8(), 1); + assert_eq!(ErasureAlgo::Invalid.to_u8(), 0); + + assert_eq!(ErasureAlgo::from_u8(1), ErasureAlgo::ReedSolomon); + assert_eq!(ErasureAlgo::from_u8(99), ErasureAlgo::Invalid); + + // Test Display trait + assert_eq!(format!("{}", ErasureAlgo::ReedSolomon), "rs-vandermonde"); + assert_eq!(format!("{}", ErasureAlgo::Invalid), "Invalid"); + } + + #[test] + fn test_checksum_algo_enum() { + // Test ChecksumAlgo enum methods + assert!(ChecksumAlgo::HighwayHash.valid()); + assert!(!ChecksumAlgo::Invalid.valid()); + + assert_eq!(ChecksumAlgo::HighwayHash.to_u8(), 1); + assert_eq!(ChecksumAlgo::Invalid.to_u8(), 0); + + assert_eq!(ChecksumAlgo::from_u8(1), ChecksumAlgo::HighwayHash); + assert_eq!(ChecksumAlgo::from_u8(99), ChecksumAlgo::Invalid); + } + + #[test] + fn test_file_meta_version_header_methods() { + let mut header = FileMetaVersionHeader { + ec_n: 4, + ec_m: 2, + flags: XL_FLAG_FREE_VERSION, + ..Default::default() + }; + + // Test has_ec + assert!(header.has_ec()); + + // Test free_version + assert!(header.free_version()); + + // Test user_data_dir (should be false by default) + assert!(!header.user_data_dir()); + + // Test with different flags + header.flags = 0; + assert!(!header.free_version()); + } + + #[test] + fn test_file_meta_version_header_comparison() { + let mut header1 = FileMetaVersionHeader { + mod_time: Some(OffsetDateTime::from_unix_timestamp(1000).unwrap()), + version_id: Some(Uuid::new_v4()), + ..Default::default() + }; + + let mut header2 = FileMetaVersionHeader { + mod_time: Some(OffsetDateTime::from_unix_timestamp(2000).unwrap()), + version_id: Some(Uuid::new_v4()), + ..Default::default() + }; + + // Test sorts_before - header2 should sort before header1 (newer mod_time) + assert!(!header1.sorts_before(&header2)); + assert!(header2.sorts_before(&header1)); + + // Test matches_not_strict + let header3 = header1.clone(); + assert!(header1.matches_not_strict(&header3)); + + // Test matches_ec + header1.ec_n = 4; + header1.ec_m = 2; + header2.ec_n = 4; + header2.ec_m = 2; + assert!(header1.matches_ec(&header2)); + + header2.ec_n = 6; + assert!(!header1.matches_ec(&header2)); + } + + #[test] + fn test_file_meta_version_methods() { + // Test with object version + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(Uuid::new_v4()); + fi.data_dir = Some(Uuid::new_v4()); + fi.mod_time = Some(OffsetDateTime::now_utc()); + + let version = FileMetaVersion::from(fi.clone()); + + assert!(version.valid()); + assert_eq!(version.get_version_id(), fi.version_id); + assert_eq!(version.get_data_dir(), fi.data_dir); + assert_eq!(version.get_mod_time(), fi.mod_time); + assert!(!version.free_version()); + + // Test with delete marker + let mut delete_fi = FileInfo::new("test", 4, 2); + delete_fi.deleted = true; + delete_fi.version_id = Some(Uuid::new_v4()); + delete_fi.mod_time = Some(OffsetDateTime::now_utc()); + + let delete_version = FileMetaVersion::from(delete_fi); + assert!(delete_version.valid()); + assert_eq!(delete_version.version_type, VersionType::Delete); + } + + #[test] + fn test_meta_object_methods() { + let mut obj = MetaObject { + data_dir: Some(Uuid::new_v4()), + size: 1024, + ..Default::default() + }; + + // Test use_data_dir + assert!(obj.use_data_dir()); + + obj.data_dir = None; + assert!(obj.use_data_dir()); // use_data_dir always returns true + + // Test use_inlinedata (currently always returns false) + obj.size = 100; // Small size + assert!(!obj.use_inlinedata()); + + obj.size = 100000; // Large size + assert!(!obj.use_inlinedata()); + } + + #[test] + fn test_meta_delete_marker_methods() { + let marker = MetaDeleteMarker::default(); + + // Test free_version (should always return false for delete markers) + assert!(!marker.free_version()); + } + + #[test] + fn test_file_meta_latest_mod_time() { + let mut fm = FileMeta::new(); + + // Empty FileMeta should return None + assert!(fm.lastest_mod_time().is_none()); + + // Add versions with different mod times + let time1 = OffsetDateTime::from_unix_timestamp(1000).unwrap(); + let time2 = OffsetDateTime::from_unix_timestamp(2000).unwrap(); + let time3 = OffsetDateTime::from_unix_timestamp(1500).unwrap(); + + let mut fi1 = FileInfo::new("test1", 4, 2); + fi1.mod_time = Some(time1); + fm.add_version(fi1).unwrap(); + + let mut fi2 = FileInfo::new("test2", 4, 2); + fi2.mod_time = Some(time2); + fm.add_version(fi2).unwrap(); + + let mut fi3 = FileInfo::new("test3", 4, 2); + fi3.mod_time = Some(time3); + fm.add_version(fi3).unwrap(); + + // Sort first to ensure latest is at the front + fm.sort_by_mod_time(); + + // Should return the first version's mod time (lastest_mod_time returns first version's time) + assert_eq!(fm.lastest_mod_time(), fm.versions[0].header.mod_time); + } + + #[test] + fn test_file_meta_shard_data_dir_count() { + let mut fm = FileMeta::new(); + let data_dir = Some(Uuid::new_v4()); + + // Add versions with same data_dir + for i in 0..3 { + let mut fi = FileInfo::new(&format!("test{}", i), 4, 2); + fi.data_dir = data_dir; + fi.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi).unwrap(); + } + + // Add one version with different data_dir + let mut fi_diff = FileInfo::new("test_diff", 4, 2); + fi_diff.data_dir = Some(Uuid::new_v4()); + fi_diff.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi_diff).unwrap(); + + // Count should be 0 because user_data_dir() requires UsesDataDir flag to be set + assert_eq!(fm.shard_data_dir_count(&None, &data_dir), 0); + + // Count should be 0 for non-existent data_dir + assert_eq!(fm.shard_data_dir_count(&None, &Some(Uuid::new_v4())), 0); + } + + #[test] + fn test_file_meta_sort_by_mod_time() { + let mut fm = FileMeta::new(); + + let time1 = OffsetDateTime::from_unix_timestamp(3000).unwrap(); + let time2 = OffsetDateTime::from_unix_timestamp(1000).unwrap(); + let time3 = OffsetDateTime::from_unix_timestamp(2000).unwrap(); + + // Add versions in non-chronological order + let mut fi1 = FileInfo::new("test1", 4, 2); + fi1.mod_time = Some(time1); + fm.add_version(fi1).unwrap(); + + let mut fi2 = FileInfo::new("test2", 4, 2); + fi2.mod_time = Some(time2); + fm.add_version(fi2).unwrap(); + + let mut fi3 = FileInfo::new("test3", 4, 2); + fi3.mod_time = Some(time3); + fm.add_version(fi3).unwrap(); + + // Sort by mod time + fm.sort_by_mod_time(); + + // Verify they are sorted (newest first) - add_version already sorts by insertion + // The actual order depends on how add_version inserts them + // Let's check the first version is the latest + let latest_time = fm.versions.iter().map(|v| v.header.mod_time).max().flatten(); + assert_eq!(fm.versions[0].header.mod_time, latest_time); + } + + #[test] + fn test_file_meta_find_version() { + let mut fm = FileMeta::new(); + let version_id = Some(Uuid::new_v4()); + + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = version_id; + fi.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi).unwrap(); + + // Should find the version + let result = fm.find_version(version_id); + assert!(result.is_ok()); + let (idx, version) = result.unwrap(); + assert_eq!(idx, 0); + assert_eq!(version.get_version_id(), version_id); + + // Should not find non-existent version + let non_existent_id = Some(Uuid::new_v4()); + assert!(fm.find_version(non_existent_id).is_err()); + } + + #[test] + fn test_file_meta_delete_version() { + let mut fm = FileMeta::new(); + let version_id = Some(Uuid::new_v4()); + + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = version_id; + fi.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi.clone()).unwrap(); + + assert_eq!(fm.versions.len(), 1); + + // Delete the version + let result = fm.delete_version(&fi); + assert!(result.is_ok()); + + // Version should be removed + assert_eq!(fm.versions.len(), 0); + } + + #[test] + fn test_file_meta_update_object_version() { + let mut fm = FileMeta::new(); + let version_id = Some(Uuid::new_v4()); + + // Add initial version + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = version_id; + fi.size = 1024; + fi.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi.clone()).unwrap(); + + // Update with new metadata (size is not updated by update_object_version) + let mut metadata = HashMap::new(); + metadata.insert("test-key".to_string(), "test-value".to_string()); + fi.metadata = Some(metadata.clone()); + let result = fm.update_object_version(fi); + assert!(result.is_ok()); + + // Verify the metadata was updated + let (_, updated_version) = fm.find_version(version_id).unwrap(); + if let Some(obj) = updated_version.object { + assert_eq!(obj.size, 1024); // Size remains unchanged + assert_eq!(obj.meta_user, Some(metadata)); // Metadata is updated + } else { + panic!("Expected object version"); + } + } + + #[test] + fn test_file_info_opts() { + let opts = FileInfoOpts { data: true }; + assert!(opts.data); + + let opts_no_data = FileInfoOpts { data: false }; + assert!(!opts_no_data.data); + } + + #[test] + fn test_decode_data_dir_from_meta() { + // Test with valid metadata containing data_dir + let data_dir = Some(Uuid::new_v4()); + let obj = MetaObject { + data_dir, + mod_time: Some(OffsetDateTime::now_utc()), + erasure_algorithm: ErasureAlgo::ReedSolomon, + bitrot_checksum_algo: ChecksumAlgo::HighwayHash, + ..Default::default() + }; + + // Create a valid FileMetaVersion with the object + let version = FileMetaVersion { + version_type: VersionType::Object, + object: Some(obj), + ..Default::default() + }; + + let encoded = version.marshal_msg().unwrap(); + let result = FileMetaVersion::decode_data_dir_from_meta(&encoded); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), data_dir); + + // Test with invalid metadata + let invalid_data = vec![0u8; 10]; + let result = FileMetaVersion::decode_data_dir_from_meta(&invalid_data); + assert!(result.is_err()); + } + + #[test] + fn test_is_latest_delete_marker() { + // Test the is_latest_delete_marker function with simple data + // Since the function is complex and requires specific XL format, + // we'll test with empty data which should return false + let empty_data = vec![]; + assert!(!FileMeta::is_latest_delete_marker(&empty_data)); + + // Test with invalid data + let invalid_data = vec![1, 2, 3, 4, 5]; + assert!(!FileMeta::is_latest_delete_marker(&invalid_data)); + } + + #[test] + fn test_merge_file_meta_versions_basic() { + // Test basic merge functionality + let mut version1 = FileMetaShallowVersion::default(); + version1.header.version_id = Some(Uuid::new_v4()); + version1.header.mod_time = Some(OffsetDateTime::from_unix_timestamp(1000).unwrap()); + + let mut version2 = FileMetaShallowVersion::default(); + version2.header.version_id = Some(Uuid::new_v4()); + version2.header.mod_time = Some(OffsetDateTime::from_unix_timestamp(2000).unwrap()); + + let versions = vec![ + vec![version1.clone(), version2.clone()], + vec![version1.clone()], + vec![version2.clone()], + ]; + + let merged = merge_file_meta_versions(2, false, 10, &versions); + + // Should return versions that appear in at least quorum (2) sources + assert!(!merged.is_empty()); + } +} + +#[tokio::test] +async fn test_read_xl_meta_no_data() { + use tokio::fs; + use tokio::fs::File; + use tokio::io::AsyncWriteExt; + + let mut fm = FileMeta::new(); + + let (m, n) = (3, 2); + + for i in 0..5 { + let mut fi = FileInfo::new(i.to_string().as_str(), m, n); + fi.mod_time = Some(OffsetDateTime::now_utc()); + + fm.add_version(fi).unwrap(); + } + + // Use marshal_msg to create properly formatted data with XL headers + let buff = fm.marshal_msg().unwrap(); + + let filepath = "./test_xl.meta"; + + let mut file = File::create(filepath).await.unwrap(); + file.write_all(&buff).await.unwrap(); + + let mut f = File::open(filepath).await.unwrap(); + + let stat = f.metadata().await.unwrap(); + + let data = read_xl_meta_no_data(&mut f, stat.len() as usize).await.unwrap(); + + let mut newfm = FileMeta::default(); + newfm.unmarshal_msg(&data).unwrap(); + + fs::remove_file(filepath).await.unwrap(); + + assert_eq!(fm, newfm) +} + +#[tokio::test] +async fn test_get_file_info() { + // Test get_file_info function + let mut fm = FileMeta::new(); + let version_id = Uuid::new_v4(); + + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(version_id); + fi.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi).unwrap(); + + let encoded = fm.marshal_msg().unwrap(); + + let opts = FileInfoOpts { data: false }; + let result = get_file_info(&encoded, "test-volume", "test-path", &version_id.to_string(), opts).await; + + assert!(result.is_ok()); + let file_info = result.unwrap(); + assert_eq!(file_info.volume, "test-volume"); + assert_eq!(file_info.name, "test-path"); +} + +#[tokio::test] +async fn test_file_info_from_raw() { + // Test file_info_from_raw function + let mut fm = FileMeta::new(); + let mut fi = FileInfo::new("test", 4, 2); + fi.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi).unwrap(); + + let encoded = fm.marshal_msg().unwrap(); + + let raw_info = RawFileInfo { buf: encoded }; + + let result = file_info_from_raw(raw_info, "test-bucket", "test-object", false).await; + assert!(result.is_ok()); + + let file_info = result.unwrap(); + assert_eq!(file_info.volume, "test-bucket"); + assert_eq!(file_info.name, "test-object"); +} + +// Additional comprehensive tests for better coverage + +#[test] +fn test_file_meta_load_function() { + // Test FileMeta::load function + let mut fm = FileMeta::new(); + let mut fi = FileInfo::new("test", 4, 2); + fi.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi).unwrap(); + + let encoded = fm.marshal_msg().unwrap(); + + // Test successful load + let loaded_fm = FileMeta::load(&encoded); + assert!(loaded_fm.is_ok()); + assert_eq!(loaded_fm.unwrap(), fm); + + // Test load with invalid data + let invalid_data = vec![0u8; 10]; + let result = FileMeta::load(&invalid_data); + assert!(result.is_err()); +} + +#[test] +fn test_file_meta_read_bytes_header() { + // Create a real FileMeta and marshal it to get proper format + let mut fm = FileMeta::new(); + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(Uuid::new_v4()); + fi.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi).unwrap(); + + let marshaled = fm.marshal_msg().unwrap(); + + // First call check_xl2_v1 to get the buffer after XL header validation + let (after_xl_header, _major, _minor) = FileMeta::check_xl2_v1(&marshaled).unwrap(); + + // Ensure we have at least 5 bytes for read_bytes_header + if after_xl_header.len() < 5 { + panic!("Buffer too small: {} bytes, need at least 5", after_xl_header.len()); + } + + // Now call read_bytes_header on the remaining buffer + let result = FileMeta::read_bytes_header(after_xl_header); + assert!(result.is_ok()); + let (length, remaining) = result.unwrap(); + + // The length should be greater than 0 for real data + assert!(length > 0); + // remaining should be everything after the 5-byte header + assert_eq!(remaining.len(), after_xl_header.len() - 5); + + // Test with buffer too small + let small_buf = vec![0u8; 2]; + let result = FileMeta::read_bytes_header(&small_buf); + assert!(result.is_err()); +} + +#[test] +fn test_file_meta_get_set_idx() { + let mut fm = FileMeta::new(); + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(Uuid::new_v4()); + fi.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi).unwrap(); + + // Test get_idx + let result = fm.get_idx(0); + assert!(result.is_ok()); + + // Test get_idx with invalid index + let result = fm.get_idx(10); + assert!(result.is_err()); + + // Test set_idx + let new_version = FileMetaVersion { + version_type: VersionType::Object, + ..Default::default() + }; + let result = fm.set_idx(0, new_version); + assert!(result.is_ok()); + + // Test set_idx with invalid index + let invalid_version = FileMetaVersion::default(); + let result = fm.set_idx(10, invalid_version); + assert!(result.is_err()); +} + +#[test] +fn test_file_meta_into_fileinfo() { + let mut fm = FileMeta::new(); + let version_id = Uuid::new_v4(); + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(version_id); + fi.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi).unwrap(); + + // Test into_fileinfo with valid version_id + let result = fm.into_fileinfo("test-volume", "test-path", &version_id.to_string(), false, false); + assert!(result.is_ok()); + let file_info = result.unwrap(); + assert_eq!(file_info.volume, "test-volume"); + assert_eq!(file_info.name, "test-path"); + + // Test into_fileinfo with invalid version_id + let invalid_id = Uuid::new_v4(); + let result = fm.into_fileinfo("test-volume", "test-path", &invalid_id.to_string(), false, false); + assert!(result.is_err()); + + // Test into_fileinfo with empty version_id (should get latest) + let result = fm.into_fileinfo("test-volume", "test-path", "", false, false); + assert!(result.is_ok()); +} + +#[test] +fn test_file_meta_into_file_info_versions() { + let mut fm = FileMeta::new(); + + // Add multiple versions + for i in 0..3 { + let mut fi = FileInfo::new(&format!("test{}", i), 4, 2); + fi.version_id = Some(Uuid::new_v4()); + fi.mod_time = Some(OffsetDateTime::from_unix_timestamp(1000 + i).unwrap()); + fm.add_version(fi).unwrap(); + } + + let result = fm.into_file_info_versions("test-volume", "test-path", false); + assert!(result.is_ok()); + let versions = result.unwrap(); + assert_eq!(versions.versions.len(), 3); +} + +#[test] +fn test_file_meta_shallow_version_to_fileinfo() { + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(Uuid::new_v4()); + fi.mod_time = Some(OffsetDateTime::now_utc()); + + let version = FileMetaVersion::from(fi.clone()); + let shallow_version = FileMetaShallowVersion::try_from(version).unwrap(); + + let result = shallow_version.to_fileinfo("test-volume", "test-path", fi.version_id, false); + assert!(result.is_ok()); + let converted_fi = result.unwrap(); + assert_eq!(converted_fi.volume, "test-volume"); + assert_eq!(converted_fi.name, "test-path"); +} + +#[test] +fn test_file_meta_version_try_from_bytes() { + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(Uuid::new_v4()); + let version = FileMetaVersion::from(fi); + let encoded = version.marshal_msg().unwrap(); + + // Test successful conversion + let result = FileMetaVersion::try_from(encoded.as_slice()); + assert!(result.is_ok()); + + // Test with invalid data + let invalid_data = vec![0u8; 5]; + let result = FileMetaVersion::try_from(invalid_data.as_slice()); + assert!(result.is_err()); +} + +#[test] +fn test_file_meta_version_try_from_shallow() { + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(Uuid::new_v4()); + let version = FileMetaVersion::from(fi); + let shallow = FileMetaShallowVersion::try_from(version.clone()).unwrap(); + + let result = FileMetaVersion::try_from(shallow); + assert!(result.is_ok()); + let converted = result.unwrap(); + assert_eq!(converted.get_version_id(), version.get_version_id()); +} + +#[test] +fn test_file_meta_version_header_from_version() { + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(Uuid::new_v4()); + fi.mod_time = Some(OffsetDateTime::now_utc()); + let version = FileMetaVersion::from(fi.clone()); + + let header = FileMetaVersionHeader::from(version); + assert_eq!(header.version_id, fi.version_id); + assert_eq!(header.mod_time, fi.mod_time); +} + +#[test] +fn test_meta_object_into_fileinfo() { + let obj = MetaObject { + version_id: Some(Uuid::new_v4()), + size: 1024, + mod_time: Some(OffsetDateTime::now_utc()), + ..Default::default() + }; + + let version_id = obj.version_id; + let expected_version_id = version_id; + let file_info = obj.into_fileinfo("test-volume", "test-path", version_id, false); + assert_eq!(file_info.volume, "test-volume"); + assert_eq!(file_info.name, "test-path"); + assert_eq!(file_info.size, 1024); + assert_eq!(file_info.version_id, expected_version_id); +} + +#[test] +fn test_meta_object_from_fileinfo() { + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(Uuid::new_v4()); + fi.data_dir = Some(Uuid::new_v4()); + fi.size = 2048; + fi.mod_time = Some(OffsetDateTime::now_utc()); + + let obj = MetaObject::from(fi.clone()); + assert_eq!(obj.version_id, fi.version_id); + assert_eq!(obj.data_dir, fi.data_dir); + assert_eq!(obj.size, fi.size); + assert_eq!(obj.mod_time, fi.mod_time); +} + +#[test] +fn test_meta_delete_marker_into_fileinfo() { + let marker = MetaDeleteMarker { + version_id: Some(Uuid::new_v4()), + mod_time: Some(OffsetDateTime::now_utc()), + ..Default::default() + }; + + let version_id = marker.version_id; + let expected_version_id = version_id; + let file_info = marker.into_fileinfo("test-volume", "test-path", version_id, false); + assert_eq!(file_info.volume, "test-volume"); + assert_eq!(file_info.name, "test-path"); + assert_eq!(file_info.version_id, expected_version_id); + assert!(file_info.deleted); +} + +#[test] +fn test_meta_delete_marker_from_fileinfo() { + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(Uuid::new_v4()); + fi.mod_time = Some(OffsetDateTime::now_utc()); + fi.deleted = true; + + let marker = MetaDeleteMarker::from(fi.clone()); + assert_eq!(marker.version_id, fi.version_id); + assert_eq!(marker.mod_time, fi.mod_time); +} + +#[test] +fn test_flags_enum() { + // Test Flags enum values + assert_eq!(Flags::FreeVersion as u8, 1); + assert_eq!(Flags::UsesDataDir as u8, 2); + assert_eq!(Flags::InlineData as u8, 4); +} + +#[test] +fn test_file_meta_version_header_user_data_dir() { + let header = FileMetaVersionHeader { + flags: 0, + ..Default::default() + }; + + // Test without UsesDataDir flag + assert!(!header.user_data_dir()); + + // Test with UsesDataDir flag + let header = FileMetaVersionHeader { + flags: Flags::UsesDataDir as u8, + ..Default::default() + }; + assert!(header.user_data_dir()); + + // Test with multiple flags including UsesDataDir + let header = FileMetaVersionHeader { + flags: Flags::UsesDataDir as u8 | Flags::FreeVersion as u8, + ..Default::default() + }; + assert!(header.user_data_dir()); +} + +#[test] +fn test_file_meta_version_header_ordering() { + let header1 = FileMetaVersionHeader { + mod_time: Some(OffsetDateTime::from_unix_timestamp(1000).unwrap()), + version_id: Some(Uuid::new_v4()), + ..Default::default() + }; + + let header2 = FileMetaVersionHeader { + mod_time: Some(OffsetDateTime::from_unix_timestamp(2000).unwrap()), + version_id: Some(Uuid::new_v4()), + ..Default::default() + }; + + // Test partial_cmp + assert!(header1.partial_cmp(&header2).is_some()); + + // Test cmp - header2 should be greater (newer) + use std::cmp::Ordering; + assert_eq!(header1.cmp(&header2), Ordering::Less); // header1 has earlier time + assert_eq!(header2.cmp(&header1), Ordering::Greater); // header2 has later time + assert_eq!(header1.cmp(&header1), Ordering::Equal); +} + +#[test] +fn test_merge_file_meta_versions_edge_cases() { + // Test with empty versions + let empty_versions: Vec> = vec![]; + let merged = merge_file_meta_versions(1, false, 10, &empty_versions); + assert!(merged.is_empty()); + + // Test with quorum larger than available sources + let mut version = FileMetaShallowVersion::default(); + version.header.version_id = Some(Uuid::new_v4()); + let versions = vec![vec![version]]; + let merged = merge_file_meta_versions(5, false, 10, &versions); + assert!(merged.is_empty()); + + // Test strict mode + let mut version1 = FileMetaShallowVersion::default(); + version1.header.version_id = Some(Uuid::new_v4()); + version1.header.mod_time = Some(OffsetDateTime::from_unix_timestamp(1000).unwrap()); + + let mut version2 = FileMetaShallowVersion::default(); + version2.header.version_id = Some(Uuid::new_v4()); + version2.header.mod_time = Some(OffsetDateTime::from_unix_timestamp(2000).unwrap()); + + let versions = vec![vec![version1.clone()], vec![version2.clone()]]; + + let _merged_strict = merge_file_meta_versions(1, true, 10, &versions); + let merged_non_strict = merge_file_meta_versions(1, false, 10, &versions); + + // In strict mode, behavior might be different + assert!(!merged_non_strict.is_empty()); +} + +#[tokio::test] +async fn test_read_more_function() { + use std::io::Cursor; + + let data = b"Hello, World! This is test data."; + let mut reader = Cursor::new(data); + let mut buf = vec![0u8; 10]; + + // Test reading more data + let result = read_more(&mut reader, &mut buf, 33, 20, false).await; + assert!(result.is_ok()); + assert_eq!(buf.len(), 20); + + // Test with has_full = true and buffer already has enough data + let mut reader2 = Cursor::new(data); + let mut buf2 = vec![0u8; 5]; + let result = read_more(&mut reader2, &mut buf2, 10, 5, true).await; + assert!(result.is_ok()); + assert_eq!(buf2.len(), 5); // Should remain 5 since has >= read_size + + // Test reading beyond available data + let mut reader3 = Cursor::new(b"short"); + let mut buf3 = vec![0u8; 2]; + let result = read_more(&mut reader3, &mut buf3, 100, 98, false).await; + // Should handle gracefully even if not enough data + assert!(result.is_ok() || result.is_err()); // Either is acceptable +} + +#[tokio::test] +async fn test_read_xl_meta_no_data_edge_cases() { + use std::io::Cursor; + + // Test with empty data + let empty_data = vec![]; + let mut reader = Cursor::new(empty_data); + let result = read_xl_meta_no_data(&mut reader, 0).await; + assert!(result.is_err()); // Should fail because buffer is empty + + // Test with very small size (should fail because it's not valid XL format) + let small_data = vec![1, 2, 3]; + let mut reader = Cursor::new(small_data); + let result = read_xl_meta_no_data(&mut reader, 3).await; + assert!(result.is_err()); // Should fail because data is too small for XL format +} + +#[tokio::test] +async fn test_get_file_info_edge_cases() { + // Test with empty buffer + let empty_buf = vec![]; + let opts = FileInfoOpts { data: false }; + let result = get_file_info(&empty_buf, "volume", "path", "version", opts).await; + assert!(result.is_err()); + + // Test with invalid version_id format + let mut fm = FileMeta::new(); + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(Uuid::new_v4()); + fi.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi).unwrap(); + let encoded = fm.marshal_msg().unwrap(); + + let opts = FileInfoOpts { data: false }; + let result = get_file_info(&encoded, "volume", "path", "invalid-uuid", opts).await; + assert!(result.is_err()); +} + +#[tokio::test] +async fn test_file_info_from_raw_edge_cases() { + // Test with empty buffer + let empty_raw = RawFileInfo { buf: vec![] }; + let result = file_info_from_raw(empty_raw, "bucket", "object", false).await; + assert!(result.is_err()); + + // Test with invalid buffer + let invalid_raw = RawFileInfo { + buf: vec![1, 2, 3, 4, 5], + }; + let result = file_info_from_raw(invalid_raw, "bucket", "object", false).await; + assert!(result.is_err()); +} + +#[test] +fn test_file_meta_version_invalid_cases() { + // Test invalid version + let version = FileMetaVersion { + version_type: VersionType::Invalid, + ..Default::default() + }; + assert!(!version.valid()); + + // Test version with neither object nor delete marker + let version = FileMetaVersion { + version_type: VersionType::Object, + object: None, + delete_marker: None, + ..Default::default() + }; + assert!(!version.valid()); +} + +#[test] +fn test_meta_object_edge_cases() { + let obj = MetaObject { + data_dir: None, + ..Default::default() + }; + + // Test use_data_dir with None (use_data_dir always returns true) + assert!(obj.use_data_dir()); + + // Test use_inlinedata (always returns false in current implementation) + let obj = MetaObject { + size: 128 * 1024, // 128KB threshold + ..Default::default() + }; + assert!(!obj.use_inlinedata()); // Should be false + + let obj = MetaObject { + size: 128 * 1024 - 1, + ..Default::default() + }; + assert!(!obj.use_inlinedata()); // Should also be false (always false) +} + +#[test] +fn test_file_meta_version_header_edge_cases() { + let header = FileMetaVersionHeader { + ec_n: 0, + ec_m: 0, + ..Default::default() + }; + + // Test has_ec with zero values + assert!(!header.has_ec()); + + // Test matches_not_strict with different signatures but same version_id + let version_id = Some(Uuid::new_v4()); + let header = FileMetaVersionHeader { + version_id, + version_type: VersionType::Object, + signature: [1, 2, 3, 4], + ..Default::default() + }; + let other = FileMetaVersionHeader { + version_id, + version_type: VersionType::Object, + signature: [5, 6, 7, 8], + ..Default::default() + }; + // Should match because they have same version_id and type + assert!(header.matches_not_strict(&other)); + + // Test sorts_before with same mod_time but different version_id + let time = OffsetDateTime::from_unix_timestamp(1000).unwrap(); + let header_time1 = FileMetaVersionHeader { + mod_time: Some(time), + version_id: Some(Uuid::new_v4()), + ..Default::default() + }; + let header_time2 = FileMetaVersionHeader { + mod_time: Some(time), + version_id: Some(Uuid::new_v4()), + ..Default::default() + }; + + // Should use version_id for comparison when mod_time is same + let sorts_before = header_time1.sorts_before(&header_time2); + assert!(sorts_before || header_time2.sorts_before(&header_time1)); // One should sort before the other +} + +#[test] +fn test_file_meta_add_version_edge_cases() { + let mut fm = FileMeta::new(); + + // Test adding version with same version_id (should update) + let version_id = Some(Uuid::new_v4()); + let mut fi1 = FileInfo::new("test1", 4, 2); + fi1.version_id = version_id; + fi1.size = 1024; + fi1.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi1).unwrap(); + + let mut fi2 = FileInfo::new("test2", 4, 2); + fi2.version_id = version_id; + fi2.size = 2048; + fi2.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi2).unwrap(); + + // Should still have only one version, but updated + assert_eq!(fm.versions.len(), 1); + let (_, version) = fm.find_version(version_id).unwrap(); + if let Some(obj) = version.object { + assert_eq!(obj.size, 2048); // Size gets updated when adding same version_id + } +} + +#[test] +fn test_file_meta_delete_version_edge_cases() { + let mut fm = FileMeta::new(); + + // Test deleting non-existent version + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = Some(Uuid::new_v4()); + + let result = fm.delete_version(&fi); + assert!(result.is_err()); // Should fail for non-existent version +} + +#[test] +fn test_file_meta_shard_data_dir_count_edge_cases() { + let mut fm = FileMeta::new(); + + // Test with None data_dir parameter + let count = fm.shard_data_dir_count(&None, &None); + assert_eq!(count, 0); + + // Test with version_id parameter (not None) + let version_id = Some(Uuid::new_v4()); + let data_dir = Some(Uuid::new_v4()); + + let mut fi = FileInfo::new("test", 4, 2); + fi.version_id = version_id; + fi.data_dir = data_dir; + fi.mod_time = Some(OffsetDateTime::now_utc()); + fm.add_version(fi).unwrap(); + + let count = fm.shard_data_dir_count(&version_id, &data_dir); + assert_eq!(count, 0); // Should be 0 because user_data_dir() requires flag + + // Test with different version_id + let other_version_id = Some(Uuid::new_v4()); + let count = fm.shard_data_dir_count(&other_version_id, &data_dir); + assert_eq!(count, 1); // Should be 1 because the version has matching data_dir and user_data_dir() is true +} diff --git a/ecstore/src/heal/data_scanner.rs b/ecstore/src/heal/data_scanner.rs index 0986c76f..2b53fec6 100644 --- a/ecstore/src/heal/data_scanner.rs +++ b/ecstore/src/heal/data_scanner.rs @@ -18,8 +18,10 @@ use super::{ data_usage_cache::{DataUsageCache, DataUsageEntry, DataUsageHash}, heal_commands::{HEAL_DEEP_SCAN, HEAL_NORMAL_SCAN, HealScanMode}, }; +use crate::{bucket::metadata_sys, cmd::bucket_replication::queue_replication_heal}; use crate::{ bucket::{versioning::VersioningApi, versioning_sys::BucketVersioningSys}, + cmd::bucket_replication::ReplicationStatusType, disk, heal::data_usage::DATA_USAGE_ROOT, }; @@ -170,9 +172,9 @@ pub async fn init_data_scanner() { // Calculate randomized sleep duration // Use random factor (0.0 to 1.0) multiplied by the scanner cycle duration - let random_factor: f64 = { - let mut rng = rand::thread_rng(); - rng.gen_range(1.0..10.0) + let random_factor = { + let mut rng = rand::rng(); + rng.random_range(1.0..10.0) }; let base_cycle_duration = SCANNER_CYCLE.load(Ordering::SeqCst) as f64; let sleep_duration_secs = random_factor * base_cycle_duration; @@ -556,13 +558,114 @@ impl ScannerItem { Ok(object_infos) } - pub async fn apply_actions(&self, oi: &ObjectInfo, _size_s: &SizeSummary) -> (bool, usize) { + pub async fn apply_actions(&mut self, oi: &ObjectInfo, _size_s: &mut SizeSummary) -> (bool, usize) { let done = ScannerMetrics::time(ScannerMetric::Ilm); //todo: lifecycle + info!( + "apply_actions {} {} {:?} {:?}", + oi.bucket.clone(), + oi.name.clone(), + oi.version_id.clone(), + oi.user_defined.clone() + ); + + // Create a mutable clone if you need to modify fields + let mut oi = oi.clone(); + oi.replication_status = ReplicationStatusType::from( + oi.user_defined + .as_ref() + .and_then(|map| map.get("x-amz-bucket-replication-status")) + .unwrap_or(&"PENDING".to_string()), + ); + info!("apply status is: {:?}", oi.replication_status); + self.heal_replication(&oi, _size_s).await; done(); (false, oi.size) } + + pub async fn heal_replication(&mut self, oi: &ObjectInfo, size_s: &mut SizeSummary) { + if oi.version_id.is_none() { + error!( + "heal_replication: no version_id or replication config {} {} {}", + oi.bucket, + oi.name, + oi.version_id.is_none() + ); + return; + } + + //let config = s3s::dto::ReplicationConfiguration{ role: todo!(), rules: todo!() }; + // Use the provided variable instead of borrowing self mutably. + let replication = match metadata_sys::get_replication_config(&oi.bucket).await { + Ok((replication, _)) => replication, + Err(_) => { + error!("heal_replication: failed to get replication config for bucket {} {}", oi.bucket, oi.name); + return; + } + }; + if replication.rules.is_empty() { + error!("heal_replication: no replication rules for bucket {} {}", oi.bucket, oi.name); + return; + } + if replication.role.is_empty() { + // error!("heal_replication: no replication role for bucket {} {}", oi.bucket, oi.name); + // return; + } + + //if oi.delete_marker || !oi.version_purge_status.is_empty() { + if oi.delete_marker { + error!( + "heal_replication: delete marker or version purge status {} {} {:?} {} {:?}", + oi.bucket, oi.name, oi.version_id, oi.delete_marker, oi.version_purge_status + ); + return; + } + + if oi.replication_status == ReplicationStatusType::Completed { + return; + } + + info!("replication status is: {:?} and user define {:?}", oi.replication_status, oi.user_defined); + + let roi = queue_replication_heal(&oi.bucket, oi, &replication, 3).await; + + if roi.is_none() { + info!("not need heal {} {} {:?}", oi.bucket, oi.name, oi.version_id); + return; + } + + for (arn, tgt_status) in &roi.unwrap().target_statuses { + let tgt_size_s = size_s.repl_target_stats.entry(arn.clone()).or_default(); + + match tgt_status { + ReplicationStatusType::Pending => { + tgt_size_s.pending_count += 1; + tgt_size_s.pending_size += oi.size; + size_s.pending_count += 1; + size_s.pending_size += oi.size; + } + ReplicationStatusType::Failed => { + tgt_size_s.failed_count += 1; + tgt_size_s.failed_size += oi.size; + size_s.failed_count += 1; + size_s.failed_size += oi.size; + } + ReplicationStatusType::Completed | ReplicationStatusType::CompletedLegacy => { + tgt_size_s.replicated_count += 1; + tgt_size_s.replicated_size += oi.size; + size_s.replicated_count += 1; + size_s.replicated_size += oi.size; + } + _ => {} + } + } + + if matches!(oi.replication_status, ReplicationStatusType::Replica) { + size_s.replica_count += 1; + size_s.replica_size += oi.size; + } + } } #[derive(Debug, Default)] diff --git a/ecstore/src/heal/data_usage_cache.rs b/ecstore/src/heal/data_usage_cache.rs index a47c6fa8..2e329f89 100644 --- a/ecstore/src/heal/data_usage_cache.rs +++ b/ecstore/src/heal/data_usage_cache.rs @@ -438,8 +438,8 @@ impl DataUsageCache { } retries += 1; let dur = { - let mut rng = rand::thread_rng(); - rng.gen_range(0..1_000) + let mut rng = rand::rng(); + rng.random_range(0..1_000) }; sleep(Duration::from_millis(dur)).await; } diff --git a/ecstore/src/lib.rs b/ecstore/src/lib.rs index 17bfe8a6..07ed57cf 100644 --- a/ecstore/src/lib.rs +++ b/ecstore/src/lib.rs @@ -3,6 +3,7 @@ pub mod admin_server_info; pub mod bucket; pub mod cache_value; mod chunk_stream; +pub mod cmd; pub mod config; pub mod disk; pub mod disks_layout; diff --git a/ecstore/src/peer.rs b/ecstore/src/peer.rs index bcb8835f..e7f747a6 100644 --- a/ecstore/src/peer.rs +++ b/ecstore/src/peer.rs @@ -1,3 +1,4 @@ +use crate::bucket::metadata_sys; use crate::disk::error::{Error, Result}; use crate::disk::error_reduce::{BUCKET_OP_IGNORED_ERRS, is_all_buckets_not_found, reduce_write_quorum_errs}; use crate::disk::{DiskAPI, DiskStore}; @@ -425,14 +426,17 @@ impl PeerS3Client for LocalPeerS3Client { } // TODO: reduceWriteQuorumErrs - - // debug!("get_bucket_info errs:{:?}", errs); + let mut versioned = false; + if let Ok(sys) = metadata_sys::get(bucket).await { + versioned = sys.versioning(); + } ress.iter() .find_map(|op| { op.as_ref().map(|v| BucketInfo { name: v.name.clone(), created: v.created, + versionning: versioned, ..Default::default() }) }) @@ -491,10 +495,13 @@ pub struct RemotePeerS3Client { } impl RemotePeerS3Client { - fn new(node: Option, pools: Option>) -> Self { + pub fn new(node: Option, pools: Option>) -> Self { let addr = node.as_ref().map(|v| v.url.to_string()).unwrap_or_default().to_string(); Self { node, pools, addr } } + pub fn get_addr(&self) -> String { + self.addr.clone() + } } #[async_trait] diff --git a/ecstore/src/set_disk.rs b/ecstore/src/set_disk.rs index be11ccb3..cfc7bf54 100644 --- a/ecstore/src/set_disk.rs +++ b/ecstore/src/set_disk.rs @@ -59,10 +59,7 @@ use http::HeaderMap; use lock::{LockApi, namespace_lock::NsLockMap}; use madmin::heal_commands::{HealDriveInfo, HealResultItem}; use md5::{Digest as Md5Digest, Md5}; -use rand::{ - thread_rng, - {Rng, seq::SliceRandom}, -}; +use rand::{Rng, seq::SliceRandom}; use rustfs_filemeta::{ FileInfo, FileMeta, FileMetaShallowVersion, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, ObjectPartInfo, RawFileInfo, file_info_from_raw, merge_file_meta_versions, @@ -133,7 +130,7 @@ impl SetDisks { } } - let mut rng = thread_rng(); + let mut rng = rand::rng(); disks.shuffle(&mut rng); @@ -142,7 +139,7 @@ impl SetDisks { async fn get_online_local_disks(&self) -> Vec> { let mut disks = self.get_online_disks().await; - let mut rng = thread_rng(); + let mut rng = rand::rng(); disks.shuffle(&mut rng); @@ -165,10 +162,10 @@ impl SetDisks { let mut futures = Vec::with_capacity(disks.len()); let mut numbers: Vec = (0..disks.len()).collect(); { - let mut rng = thread_rng(); + let mut rng = rand::rng(); disks.shuffle(&mut rng); - numbers.shuffle(&mut thread_rng()); + numbers.shuffle(&mut rng); } for &i in numbers.iter() { @@ -242,7 +239,7 @@ impl SetDisks { async fn _get_local_disks(&self) -> Vec> { let mut disks = self.get_disks_internal().await; - let mut rng = thread_rng(); + let mut rng = rand::rng(); disks.shuffle(&mut rng); @@ -3021,7 +3018,7 @@ impl SetDisks { // in different order per erasure set, this wider spread is needed when // there are lots of buckets with different order of objects in them. let permutes = { - let mut rng = thread_rng(); + let mut rng = rand::rng(); let mut permutes: Vec = (0..buckets.len()).collect(); permutes.shuffle(&mut rng); permutes @@ -3043,8 +3040,8 @@ impl SetDisks { let (buckets_results_tx, mut buckets_results_rx) = mpsc::channel::(disks.len()); let update_time = { - let mut rng = thread_rng(); - Duration::from_secs(30) + Duration::from_secs_f64(10.0 * rng.gen_range(0.0..1.0)) + let mut rng = rand::rng(); + Duration::from_secs(30) + Duration::from_secs_f64(10.0 * rng.random_range(0.0..1.0)) }; let mut ticker = interval(update_time); @@ -3389,7 +3386,7 @@ impl SetDisks { } { - let mut rng = thread_rng(); + let mut rng = rand::rng(); // 随机洗牌 disks.shuffle(&mut rng); diff --git a/ecstore/src/store.rs b/ecstore/src/store.rs index cee0320f..65641885 100644 --- a/ecstore/src/store.rs +++ b/ecstore/src/store.rs @@ -49,6 +49,7 @@ use glob::Pattern; use http::HeaderMap; use lazy_static::lazy_static; use madmin::heal_commands::HealResultItem; +use rand::Rng as _; use rustfs_filemeta::MetaCacheEntry; use s3s::dto::{BucketVersioningStatus, ObjectLockConfiguration, ObjectLockEnabled, VersioningConfiguration}; use std::cmp::Ordering; @@ -501,7 +502,8 @@ impl ECStore { return None; } - let random_u64: u64 = rand::random(); + let mut rng = rand::rng(); + let random_u64: u64 = rng.random_range(0..total); let choose = random_u64 % total; let mut at_total = 0; diff --git a/ecstore/src/store_api.rs b/ecstore/src/store_api.rs index 17983919..e5f9cb50 100644 --- a/ecstore/src/store_api.rs +++ b/ecstore/src/store_api.rs @@ -1,5 +1,6 @@ use crate::bucket::metadata_sys::get_versioning_config; use crate::bucket::versioning::VersioningApi as _; +use crate::cmd::bucket_replication::{ReplicationStatusType, VersionPurgeStatusType}; use crate::error::{Error, Result}; use crate::heal::heal_ops::HealSequence; use crate::store_utils::clean_metadata; @@ -317,6 +318,11 @@ pub struct ObjectInfo { pub inlined: bool, pub metadata_only: bool, pub version_only: bool, + pub replication_status_internal: String, + pub replication_status: ReplicationStatusType, + pub version_purge_status_internal: String, + pub version_purge_status: VersionPurgeStatusType, + pub checksum: Vec, } impl Clone for ObjectInfo { @@ -345,6 +351,11 @@ impl Clone for ObjectInfo { inlined: self.inlined, metadata_only: self.metadata_only, version_only: self.version_only, + replication_status_internal: self.replication_status_internal.clone(), + replication_status: self.replication_status.clone(), + version_purge_status_internal: self.version_purge_status_internal.clone(), + version_purge_status: self.version_purge_status.clone(), + checksum: Default::default(), } } } diff --git a/ecstore/src/store_list_objects.rs b/ecstore/src/store_list_objects.rs index df105bcd..5b6ecb89 100644 --- a/ecstore/src/store_list_objects.rs +++ b/ecstore/src/store_list_objects.rs @@ -15,7 +15,6 @@ use crate::utils::path::{self, SLASH_SEPARATOR, base_dir_from_prefix}; use crate::{store::ECStore, store_api::ListObjectsV2Info}; use futures::future::join_all; use rand::seq::SliceRandom; -use rand::thread_rng; use rustfs_filemeta::{ FileInfo, MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams, merge_file_meta_versions, @@ -24,7 +23,7 @@ use std::collections::HashMap; use std::sync::Arc; use tokio::sync::broadcast::{self, Receiver as B_Receiver}; use tokio::sync::mpsc::{self, Receiver, Sender}; -use tracing::error; +use tracing::{error, warn}; use uuid::Uuid; const MAX_OBJECT_LIST: i32 = 1000; @@ -539,6 +538,9 @@ impl ECStore { error!("gather_results err {:?}", err); let _ = err_tx2.send(Arc::new(err)); } + + // cancel call exit spawns + let _ = cancel_tx.send(true); }); let mut result = { @@ -564,9 +566,6 @@ impl ECStore { } }; - // cancel call exit spawns - cancel_tx.send(true).map_err(Error::other)?; - // wait spawns exit join_all(vec![job1, job2]).await; @@ -621,7 +620,7 @@ impl ECStore { tokio::spawn(async move { if let Err(err) = merge_entry_channels(rx, inputs, sender.clone(), 1).await { - println!("merge_entry_channels err {:?}", err) + error!("merge_entry_channels err {:?}", err) } }); @@ -715,7 +714,7 @@ impl ECStore { let fallback_disks = { if ask_disks > 0 && disks.len() > ask_disks as usize { - let mut rand = thread_rng(); + let mut rand = rand::rng(); disks.shuffle(&mut rand); disks.split_off(ask_disks as usize) } else { @@ -1067,7 +1066,10 @@ async fn merge_entry_channels( return Ok(()) } }, - _ = rx.recv()=>return Err(Error::other("cancel")), + _ = rx.recv()=>{ + warn!("merge_entry_channels rx.recv() cancel"); + return Ok(()) + }, } } } @@ -1230,7 +1232,7 @@ impl SetDisks { let mut fallback_disks = Vec::new(); if ask_disks > 0 && disks.len() > ask_disks as usize { - let mut rand = thread_rng(); + let mut rand = rand::rng(); disks.shuffle(&mut rand); fallback_disks = disks.split_off(ask_disks as usize); diff --git a/ecstore/src/xhttp.rs b/ecstore/src/xhttp.rs index 8c34d8c0..a5df9268 100644 --- a/ecstore/src/xhttp.rs +++ b/ecstore/src/xhttp.rs @@ -1,3 +1,4 @@ pub const AMZ_OBJECT_TAGGING: &str = "X-Amz-Tagging"; +pub const AMZ_BUCKET_REPLICATION_STATUS: &str = "X-Amz-Replication-Status"; pub const AMZ_STORAGE_CLASS: &str = "x-amz-storage-class"; pub const AMZ_DECODED_CONTENT_LENGTH: &str = "X-Amz-Decoded-Content-Length"; diff --git a/iam/Cargo.toml b/iam/Cargo.toml index 14815aa1..abb4b1a6 100644 --- a/iam/Cargo.toml +++ b/iam/Cargo.toml @@ -18,20 +18,19 @@ policy.workspace = true serde_json.workspace = true async-trait.workspace = true thiserror.workspace = true -strum = { version = "0.27.1", features = ["derive"] } -arc-swap = "1.7.1" +strum = { workspace = true, features = ["derive"] } +arc-swap = { workspace = true } crypto = { path = "../crypto" } -ipnetwork = { version = "0.21.1", features = ["serde"] } -itertools = "0.14.0" +ipnetwork = { workspace = true, features = ["serde"] } +itertools = { workspace = true } futures.workspace = true rand.workspace = true -base64-simd = "0.8.0" +base64-simd = { workspace = true } jsonwebtoken = { workspace = true } tracing.workspace = true madmin.workspace = true lazy_static.workspace = true -regex = "1.11.1" - +regex = { workspace = true } [dev-dependencies] test-case.workspace = true diff --git a/iam/src/utils.rs b/iam/src/utils.rs index e53c0ab2..475ccaf9 100644 --- a/iam/src/utils.rs +++ b/iam/src/utils.rs @@ -14,10 +14,10 @@ pub fn gen_access_key(length: usize) -> Result { } let mut result = String::with_capacity(length); - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); for _ in 0..length { - result.push(ALPHA_NUMERIC_TABLE[rng.gen_range(0..ALPHA_NUMERIC_TABLE.len())]); + result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]); } Ok(result) @@ -29,7 +29,7 @@ pub fn gen_secret_key(length: usize) -> Result { if length < 8 { return Err(Error::other("secret key length is too short")); } - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)]; rng.fill_bytes(&mut key); diff --git a/policy/Cargo.toml b/policy/Cargo.toml index b4e22fc1..cb9c3341 100644 --- a/policy/Cargo.toml +++ b/policy/Cargo.toml @@ -16,20 +16,19 @@ serde = { workspace = true, features = ["derive", "rc"] } serde_json.workspace = true async-trait.workspace = true thiserror.workspace = true -strum = { version = "0.27.1", features = ["derive"] } +strum = { workspace = true, features = ["derive"] } arc-swap = "1.7.1" crypto = { path = "../crypto" } -ipnetwork = { version = "0.21.1", features = ["serde"] } -itertools = "0.14.0" +ipnetwork = { workspace = true, features = ["serde"] } +itertools = { workspace = true } futures.workspace = true rand.workspace = true -base64-simd = "0.8.0" +base64-simd = { workspace = true } jsonwebtoken = { workspace = true } tracing.workspace = true madmin.workspace = true lazy_static.workspace = true -regex = "1.11.1" - +regex = { workspace = true } [dev-dependencies] test-case.workspace = true diff --git a/policy/src/utils.rs b/policy/src/utils.rs index c32958b3..9c833d63 100644 --- a/policy/src/utils.rs +++ b/policy/src/utils.rs @@ -14,10 +14,10 @@ pub fn gen_access_key(length: usize) -> Result { } let mut result = String::with_capacity(length); - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); for _ in 0..length { - result.push(ALPHA_NUMERIC_TABLE[rng.gen_range(0..ALPHA_NUMERIC_TABLE.len())]); + result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]); } Ok(result) @@ -29,7 +29,7 @@ pub fn gen_secret_key(length: usize) -> Result { if length < 8 { return Err(Error::other("secret key length is too short")); } - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)]; rng.fill_bytes(&mut key); diff --git a/rustfs/Cargo.toml b/rustfs/Cargo.toml index 5abb23e2..96c6d371 100644 --- a/rustfs/Cargo.toml +++ b/rustfs/Cargo.toml @@ -22,6 +22,7 @@ api = { workspace = true } appauth = { workspace = true } atoi = { workspace = true } atomic_enum = { workspace = true } +aws-sdk-s3 = { workspace = true } axum.workspace = true axum-extra = { workspace = true } axum-server = { workspace = true } @@ -43,14 +44,18 @@ hyper-util.workspace = true http.workspace = true http-body.workspace = true iam = { workspace = true } +include_dir = { workspace = true } +jsonwebtoken = { workspace = true } lock.workspace = true matchit = { workspace = true } mime.workspace = true mime_guess = { workspace = true } opentelemetry = { workspace = true } +percent-encoding = { workspace = true } pin-project-lite.workspace = true protos.workspace = true query = { workspace = true } +regex = { workspace = true } rmp-serde.workspace = true rustfs-config = { workspace = true } rustfs-event-notifier = { workspace = true } @@ -64,6 +69,7 @@ serde_json.workspace = true serde_urlencoded = { workspace = true } shadow-rs = { workspace = true, features = ["build", "metadata"] } socket2 = { workspace = true } +thiserror = { workspace = true } tracing.workspace = true time = { workspace = true, features = ["parsing", "formatting", "serde"] } tokio-util.workspace = true @@ -85,9 +91,9 @@ tower-http = { workspace = true, features = [ "compression-gzip", "cors", ] } +urlencoding = { workspace = true } uuid = { workspace = true } rustfs-filemeta.workspace = true -thiserror.workspace = true rustfs-rio.workspace = true [target.'cfg(target_os = "linux")'.dependencies] diff --git a/rustfs/src/admin/handlers.rs b/rustfs/src/admin/handlers.rs index 8fc18397..dd603599 100644 --- a/rustfs/src/admin/handlers.rs +++ b/rustfs/src/admin/handlers.rs @@ -2,9 +2,13 @@ use super::router::Operation; use crate::auth::check_key_valid; use crate::auth::get_condition_values; use crate::auth::get_session_token; +use crate::error::ApiError; use bytes::Bytes; use ecstore::admin_server_info::get_server_info; +use ecstore::bucket::metadata_sys::{self, get_replication_config}; +use ecstore::bucket::target::BucketTarget; use ecstore::bucket::versioning_sys::BucketVersioningSys; +use ecstore::cmd::bucket_targets::{self, GLOBAL_Bucket_Target_Sys}; use ecstore::error::StorageError; use ecstore::global::GLOBAL_ALlHealState; use ecstore::heal::data_usage::load_data_usage_from_backend; @@ -23,9 +27,11 @@ use http::{HeaderMap, Uri}; use hyper::StatusCode; use iam::get_global_action_cred; use iam::store::MappedPolicy; +// use lazy_static::lazy_static; use madmin::metrics::RealtimeMetrics; use madmin::utils::parse_duration; use matchit::Params; +use percent_encoding::{AsciiSet, CONTROLS, percent_encode}; use policy::policy::Args; use policy::policy::BucketPolicy; use policy::policy::action::Action; @@ -36,6 +42,7 @@ use s3s::stream::{ByteStream, DynByteStream}; use s3s::{Body, S3Error, S3Request, S3Response, S3Result, s3_error}; use s3s::{S3ErrorCode, StdError}; use serde::{Deserialize, Serialize}; +// use serde_json::to_vec; use std::collections::{HashMap, HashSet}; use std::path::PathBuf; use std::pin::Pin; @@ -47,6 +54,7 @@ use tokio::time::interval; use tokio::{select, spawn}; use tokio_stream::wrappers::ReceiverStream; use tracing::{error, info, warn}; +// use url::UrlQuery; pub mod event; pub mod group; @@ -57,6 +65,7 @@ pub mod service_account; pub mod sts; pub mod trace; pub mod user; +use urlencoding::decode; #[derive(Debug, Serialize, Default)] #[serde(rename_all = "PascalCase", default)] @@ -745,6 +754,278 @@ impl Operation for BackgroundHealStatusHandler { } } +fn extract_query_params(uri: &Uri) -> HashMap { + let mut params = HashMap::new(); + + if let Some(query) = uri.query() { + query.split('&').for_each(|pair| { + if let Some((key, value)) = pair.split_once('=') { + params.insert(key.to_string(), value.to_string()); + } + }); + } + + params +} + +//disable encrypto from client because rustfs use len 8 Nonce but rustfs use 12 len Nonce + +#[allow(dead_code)] +fn is_local_host(_host: String) -> bool { + false +} + +//awscurl --service s3 --region us-east-1 --access_key rustfsadmin --secret_key rustfsadmin "http://:9000/rustfs/admin/v3/replicationmetrics?bucket=1" +pub struct GetReplicationMetricsHandler {} +#[async_trait::async_trait] +impl Operation for GetReplicationMetricsHandler { + async fn call(&self, _req: S3Request, _params: Params<'_, '_>) -> S3Result> { + error!("GetReplicationMetricsHandler"); + let querys = extract_query_params(&_req.uri); + if let Some(bucket) = querys.get("bucket") { + error!("get bucket:{} metris", bucket); + } + //return Err(s3_error!(InvalidArgument, "Invalid bucket name")); + //Ok(S3Response::with_headers((StatusCode::OK, Body::from()), header)) + return Ok(S3Response::new((StatusCode::OK, Body::from("Ok".to_string())))); + } +} + +pub struct SetRemoteTargetHandler {} +#[async_trait::async_trait] +impl Operation for SetRemoteTargetHandler { + async fn call(&self, mut _req: S3Request, _params: Params<'_, '_>) -> S3Result> { + //return Ok(S3Response::new((StatusCode::OK, Body::from("OK".to_string())))); + // println!("handle MetricsHandler, params: {:?}", _req.input); + info!("handle MetricsHandler, params: {:?}", _req.credentials); + let querys = extract_query_params(&_req.uri); + let Some(_cred) = _req.credentials else { + error!("credentials null"); + return Err(s3_error!(InvalidRequest, "get cred failed")); + }; + let _is_owner = true; // 先按 true 处理,后期根据请求决定 + let body = _req.input.store_all_unlimited().await.unwrap(); + //println!("body: {}", std::str::from_utf8(&body.clone()).unwrap()); + + //println!("bucket is:{}", bucket.clone()); + if let Some(bucket) = querys.get("bucket") { + if bucket.is_empty() { + println!("have bucket: {}", bucket); + return Ok(S3Response::new((StatusCode::OK, Body::from("fuck".to_string())))); + } + let Some(store) = new_object_layer_fn() else { + return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); + }; + + // let binfo:BucketInfo = store + // .get_bucket_info(bucket, &ecstore::store_api::BucketOptions::default()).await; + match store + .get_bucket_info(bucket, &ecstore::store_api::BucketOptions::default()) + .await + { + Ok(info) => { + println!("Bucket Info: {:?}", info); + if !info.versionning { + return Ok(S3Response::new((StatusCode::FORBIDDEN, Body::from("bucket need versioned".to_string())))); + } + } + Err(err) => { + eprintln!("Error: {:?}", err); + return Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::from("empty bucket".to_string())))); + } + } + + let mut remote_target: BucketTarget = serde_json::from_slice(&body).map_err(ApiError::other)?; // 错误会被传播 + remote_target.source_bucket = bucket.clone(); + + info!("remote target {} And arn is:", remote_target.source_bucket.clone()); + + if let Some(val) = remote_target.arn.clone() { + info!("arn is {}", val); + } + + if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() { + let (arn, exist) = sys.get_remote_arn(bucket, Some(&remote_target), "").await; + info!("exist: {} {}", exist, arn.clone().unwrap_or_default()); + if exist && arn.is_some() { + let jsonarn = serde_json::to_string(&arn).expect("failed to serialize"); + //Ok(S3Response::new) + return Ok(S3Response::new((StatusCode::OK, Body::from(jsonarn)))); + } else { + remote_target.arn = arn; + match sys.set_target(bucket, &remote_target, false, false).await { + Ok(_) => { + { + //todo 各种持久化的工作 + let targets = sys.list_targets(Some(bucket), None).await; + info!("targets is {}", targets.len()); + match serde_json::to_vec(&targets) { + Ok(json) => { + //println!("json is:{:?}", json.clone().to_ascii_lowercase()); + //metadata_sys::GLOBAL_BucketMetadataSys:: + //BUCKET_TARGETS_FILE: &str = "bucket-targets.json" + let _ = metadata_sys::update(bucket, "bucket-targets.json", json).await; + // if let Err(err) = metadata_sys::GLOBAL_BucketMetadataSys.get(). + // .update(ctx, bucket, "bucketTargetsFile", tgt_bytes) + // .await + // { + // write_error_response(ctx, &err)?; + // return Err(err); + // } + } + Err(e) => { + error!("序列化失败{}", e); + } + } + } + + let jsonarn = serde_json::to_string(&remote_target.arn.clone()).expect("failed to serialize"); + return Ok(S3Response::new((StatusCode::OK, Body::from(jsonarn)))); + } + Err(e) => { + error!("set target error {}", e); + return Ok(S3Response::new(( + StatusCode::BAD_REQUEST, + Body::from("remote target not ready".to_string()), + ))); + } + } + } + } else { + error!("GLOBAL_BUCKET _TARGET_SYS is not initialized"); + return Err(S3Error::with_message( + S3ErrorCode::InternalError, + "GLOBAL_BUCKET_TARGET_SYS is not initialized".to_string(), + )); + } + } + // return Err(s3_error!(InvalidArgument)); + return Ok(S3Response::new((StatusCode::OK, Body::from("Ok".to_string())))); + } +} + +pub struct ListRemoteTargetHandler {} +#[async_trait::async_trait] +impl Operation for ListRemoteTargetHandler { + async fn call(&self, _req: S3Request, _params: Params<'_, '_>) -> S3Result> { + warn!("list GetRemoteTargetHandler, params: {:?}", _req.credentials); + + let querys = extract_query_params(&_req.uri); + let Some(_cred) = _req.credentials else { + error!("credentials null"); + return Err(s3_error!(InvalidRequest, "get cred failed")); + }; + + if let Some(bucket) = querys.get("bucket") { + if bucket.is_empty() { + error!("bucket parameter is empty"); + return Ok(S3Response::new(( + StatusCode::BAD_REQUEST, + Body::from("Bucket parameter is required".to_string()), + ))); + } + + let Some(store) = new_object_layer_fn() else { + return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not initialized".to_string())); + }; + + match store + .get_bucket_info(bucket, &ecstore::store_api::BucketOptions::default()) + .await + { + Ok(info) => { + println!("Bucket Info: {:?}", info); + if !info.versionning { + return Ok(S3Response::new(( + StatusCode::FORBIDDEN, + Body::from("Bucket needs versioning".to_string()), + ))); + } + } + Err(err) => { + eprintln!("Error fetching bucket info: {:?}", err); + return Ok(S3Response::new((StatusCode::BAD_REQUEST, Body::from("Invalid bucket".to_string())))); + } + } + + if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() { + let targets = sys.list_targets(Some(bucket), None).await; + error!("target sys len {}", targets.len()); + if targets.is_empty() { + return Ok(S3Response::new(( + StatusCode::NOT_FOUND, + Body::from("No remote targets found".to_string()), + ))); + } + + let json_targets = serde_json::to_string(&targets).map_err(|e| { + error!("Serialization error: {}", e); + S3Error::with_message(S3ErrorCode::InternalError, "Failed to serialize targets".to_string()) + })?; + + return Ok(S3Response::new((StatusCode::OK, Body::from(json_targets)))); + } else { + println!("GLOBAL_BUCKET_TARGET_SYS is not initialized"); + return Err(S3Error::with_message( + S3ErrorCode::InternalError, + "GLOBAL_BUCKET_TARGET_SYS is not initialized".to_string(), + )); + } + } + + println!("Bucket parameter missing in request"); + Ok(S3Response::new(( + StatusCode::BAD_REQUEST, + Body::from("Bucket parameter is required".to_string()), + ))) + //return Err(s3_error!(NotImplemented)); + } +} +const COLON: AsciiSet = CONTROLS.add(b':'); +pub struct RemoveRemoteTargetHandler {} +#[async_trait::async_trait] +impl Operation for RemoveRemoteTargetHandler { + async fn call(&self, _req: S3Request, _params: Params<'_, '_>) -> S3Result> { + error!("remove remote target called"); + let querys = extract_query_params(&_req.uri); + + if let Some(arnstr) = querys.get("arn") { + if let Some(bucket) = querys.get("bucket") { + if bucket.is_empty() { + error!("bucket parameter is empty"); + return Ok(S3Response::new((StatusCode::NOT_FOUND, Body::from("bucket not found".to_string())))); + } + let _arn = bucket_targets::ARN::parse(arnstr); + + match get_replication_config(bucket).await { + Ok((conf, _ts)) => { + for ru in conf.rules { + let encoded = percent_encode(ru.destination.bucket.as_bytes(), &COLON); + let encoded_str = encoded.to_string(); + if *arnstr == encoded_str { + error!("target in use"); + return Ok(S3Response::new((StatusCode::FORBIDDEN, Body::from("Ok".to_string())))); + } + info!("bucket: {} and arn str is {} ", encoded_str, arnstr); + } + } + Err(err) => { + error!("get replication config err: {}", err); + return Ok(S3Response::new((StatusCode::NOT_FOUND, Body::from(err.to_string())))); + } + } + //percent_decode_str(&arnstr); + let decoded_str = decode(arnstr).unwrap(); + error!("need delete target is {}", decoded_str); + bucket_targets::remove_bucket_target(bucket, arnstr).await; + } + } + //return Err(s3_error!(InvalidArgument, "Invalid bucket name")); + //Ok(S3Response::with_headers((StatusCode::OK, Body::from()), header)) + return Ok(S3Response::new((StatusCode::OK, Body::from("Ok".to_string())))); + } +} + #[cfg(test)] mod test { use ecstore::heal::heal_commands::HealOpts; diff --git a/rustfs/src/admin/mod.rs b/rustfs/src/admin/mod.rs index f34c1f1f..7f25d355 100644 --- a/rustfs/src/admin/mod.rs +++ b/rustfs/src/admin/mod.rs @@ -10,12 +10,14 @@ use handlers::{ sts, user, }; +use handlers::{GetReplicationMetricsHandler, ListRemoteTargetHandler, RemoveRemoteTargetHandler, SetRemoteTargetHandler}; use hyper::Method; use router::{AdminOperation, S3Router}; use rpc::regist_rpc_route; use s3s::route::S3Route; const ADMIN_PREFIX: &str = "/rustfs/admin"; +const RUSTFS_ADMIN_PREFIX: &str = "/rustfs/admin"; pub fn make_admin_route() -> std::io::Result { let mut r: S3Router = S3Router::new(); @@ -227,6 +229,53 @@ fn register_user_route(r: &mut S3Router) -> std::io::Result<()> AdminOperation(&AddServiceAccount {}), )?; + r.insert( + Method::GET, + format!("{}{}", RUSTFS_ADMIN_PREFIX, "/v3/list-remote-targets").as_str(), + AdminOperation(&ListRemoteTargetHandler {}), + )?; + + r.insert( + Method::GET, + format!("{}{}", ADMIN_PREFIX, "/v3/list-remote-targets").as_str(), + AdminOperation(&ListRemoteTargetHandler {}), + )?; + + r.insert( + Method::GET, + format!("{}{}", RUSTFS_ADMIN_PREFIX, "/v3/replicationmetrics").as_str(), + AdminOperation(&GetReplicationMetricsHandler {}), + )?; + + r.insert( + Method::GET, + format!("{}{}", ADMIN_PREFIX, "/v3/replicationmetrics").as_str(), + AdminOperation(&GetReplicationMetricsHandler {}), + )?; + + r.insert( + Method::PUT, + format!("{}{}", RUSTFS_ADMIN_PREFIX, "/v3/set-remote-target").as_str(), + AdminOperation(&SetRemoteTargetHandler {}), + )?; + r.insert( + Method::PUT, + format!("{}{}", ADMIN_PREFIX, "/v3/set-remote-target").as_str(), + AdminOperation(&SetRemoteTargetHandler {}), + )?; + + r.insert( + Method::DELETE, + format!("{}{}", RUSTFS_ADMIN_PREFIX, "/v3/remove-remote-target").as_str(), + AdminOperation(&RemoveRemoteTargetHandler {}), + )?; + + r.insert( + Method::DELETE, + format!("{}{}", ADMIN_PREFIX, "/v3/remove-remote-target").as_str(), + AdminOperation(&RemoveRemoteTargetHandler {}), + )?; + // list-canned-policies?bucket=xxx r.insert( Method::GET, diff --git a/rustfs/src/admin/router.rs b/rustfs/src/admin/router.rs index dd7544c0..bea785cd 100644 --- a/rustfs/src/admin/router.rs +++ b/rustfs/src/admin/router.rs @@ -14,6 +14,7 @@ use s3s::route::S3Route; use s3s::s3_error; use super::ADMIN_PREFIX; +use super::RUSTFS_ADMIN_PREFIX; use super::rpc::RPC_PREFIX; pub struct S3Router { @@ -63,7 +64,7 @@ where } } - uri.path().starts_with(ADMIN_PREFIX) || uri.path().starts_with(RPC_PREFIX) + uri.path().starts_with(ADMIN_PREFIX) || uri.path().starts_with(RPC_PREFIX) || uri.path().starts_with(RUSTFS_ADMIN_PREFIX) } async fn call(&self, req: S3Request) -> S3Result> { diff --git a/rustfs/src/grpc.rs b/rustfs/src/grpc.rs index cf2a298b..8a94e046 100644 --- a/rustfs/src/grpc.rs +++ b/rustfs/src/grpc.rs @@ -224,6 +224,8 @@ impl Node for NodeService { })); } }; + + println!("bucket info {}", bucket_info.clone()); Ok(tonic::Response::new(GetBucketInfoResponse { success: true, bucket_info, @@ -231,6 +233,7 @@ impl Node for NodeService { })) } + // println!("vuc") Err(err) => Ok(tonic::Response::new(GetBucketInfoResponse { success: false, bucket_info: String::new(), diff --git a/rustfs/src/main.rs b/rustfs/src/main.rs index b968d92c..df7db789 100644 --- a/rustfs/src/main.rs +++ b/rustfs/src/main.rs @@ -24,6 +24,7 @@ use common::{ }; use ecstore::StorageAPI; use ecstore::bucket::metadata_sys::init_bucket_metadata_sys; +use ecstore::cmd::bucket_replication::init_bucket_replication_pool; use ecstore::config as ecconfig; use ecstore::config::GLOBAL_ConfigSys; use ecstore::heal::background_heal_ops::init_auto_heal; @@ -536,6 +537,11 @@ async fn run(opt: config::Opt) -> Result<()> { init_console_cfg(local_ip, server_port); + print_server_info(); + init_bucket_replication_pool().await; + + init_console_cfg(local_ip, server_port); + print_server_info(); if opt.console_enable { diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index 5d30cb7f..98f81c6e 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -12,6 +12,8 @@ use api::query::Context; use api::query::Query; use api::server::dbms::DatabaseManagerSystem; use bytes::Bytes; +use chrono::DateTime; +use chrono::Utc; use datafusion::arrow::csv::WriterBuilder as CsvWriterBuilder; use datafusion::arrow::json::WriterBuilder as JsonWriterBuilder; use datafusion::arrow::json::writer::JsonArray; @@ -28,6 +30,9 @@ use ecstore::bucket::policy_sys::PolicySys; use ecstore::bucket::tagging::decode_tags; use ecstore::bucket::tagging::encode_tags; use ecstore::bucket::versioning_sys::BucketVersioningSys; +use ecstore::cmd::bucket_replication::get_must_replicate_options; +use ecstore::cmd::bucket_replication::must_replicate; +use ecstore::cmd::bucket_replication::schedule_replication; use ecstore::error::StorageError; use ecstore::new_object_layer_fn; use ecstore::set_disk::DEFAULT_READ_BUFFER_SIZE; @@ -41,8 +46,11 @@ use ecstore::store_api::ObjectIO; use ecstore::store_api::ObjectOptions; use ecstore::store_api::ObjectToDelete; use ecstore::store_api::PutObjReader; -use ecstore::store_api::RESERVED_METADATA_PREFIX_LOWER; use ecstore::store_api::StorageAPI; +// use ecstore::store_api::RESERVED_METADATA_PREFIX; +use ecstore::cmd::bucket_replication::ReplicationStatusType; +use ecstore::cmd::bucket_replication::ReplicationType; +use ecstore::store_api::RESERVED_METADATA_PREFIX_LOWER; use ecstore::utils::path::path_join_buf; use ecstore::utils::xml; use ecstore::xhttp; @@ -918,6 +926,7 @@ impl S3 for FS { return self.put_object_extract(req).await; } + info!("put object"); let input = req.input; if let Some(ref storage_class) = input.storage_class { @@ -975,10 +984,27 @@ impl S3 for FS { metadata.insert(xhttp::AMZ_OBJECT_TAGGING.to_owned(), tags); } - let opts: ObjectOptions = put_opts(&bucket, &key, version_id, &req.headers, Some(metadata)) + let mt = metadata.clone(); + let mt2 = metadata.clone(); + + let opts: ObjectOptions = put_opts(&bucket, &key, version_id, &req.headers, Some(mt)) .await .map_err(ApiError::from)?; + let repoptions = + get_must_replicate_options(&mt2, "", ReplicationStatusType::Unknown, ReplicationType::ObjectReplicationType, &opts); + + let dsc = must_replicate(&bucket, &key, &repoptions).await; + warn!("dsc {}", &dsc.replicate_any().clone()); + if dsc.replicate_any() { + let k = format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp"); + let now: DateTime = Utc::now(); + let formatted_time = now.to_rfc3339(); + metadata.insert(k, formatted_time); + let k = format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status"); + metadata.insert(k, dsc.pending_status()); + } + debug!("put_object opts {:?}", &opts); let obj_info = store @@ -986,9 +1012,17 @@ impl S3 for FS { .await .map_err(ApiError::from)?; - let e_tag = obj_info.etag; + let e_tag = obj_info.etag.clone(); - // store.put_object(bucket, object, data, opts); + let repoptions = + get_must_replicate_options(&mt2, "", ReplicationStatusType::Unknown, ReplicationType::ObjectReplicationType, &opts); + + let dsc = must_replicate(&bucket, &key, &repoptions).await; + + if dsc.replicate_any() { + let objectlayer = new_object_layer_fn(); + schedule_replication(obj_info, objectlayer.unwrap(), dsc, 1).await; + } let output = PutObjectOutput { e_tag, @@ -1155,17 +1189,30 @@ impl S3 for FS { return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); }; - let oi = store + let obj_info = store .complete_multipart_upload(&bucket, &key, &upload_id, uploaded_parts, opts) .await .map_err(ApiError::from)?; let output = CompleteMultipartUploadOutput { - bucket: Some(bucket), - key: Some(key), - e_tag: oi.etag, + bucket: Some(bucket.clone()), + key: Some(key.clone()), + e_tag: obj_info.etag.clone(), + location: Some("us-east-1".to_string()), ..Default::default() }; + + let mt2 = HashMap::new(); + let repoptions = + get_must_replicate_options(&mt2, "", ReplicationStatusType::Unknown, ReplicationType::ObjectReplicationType, opts); + + let dsc = must_replicate(&bucket, &key, &repoptions).await; + + if dsc.replicate_any() { + warn!("need multipart replication"); + let objectlayer = new_object_layer_fn(); + schedule_replication(obj_info, objectlayer.unwrap(), dsc, 1).await; + } Ok(S3Response::new(output)) } @@ -1730,17 +1777,35 @@ impl S3 for FS { .await .map_err(ApiError::from)?; - let replication_configuration = match metadata_sys::get_replication_config(&bucket).await { + let rcfg = match metadata_sys::get_replication_config(&bucket).await { Ok((cfg, _created)) => Some(cfg), Err(err) => { - warn!("get_object_lock_config err {:?}", err); - None + error!("get_replication_config err {:?}", err); + return Err(ApiError::from(err).into()); } }; - Ok(S3Response::new(GetBucketReplicationOutput { - replication_configuration, - })) + if rcfg.is_none() { + return Err(S3Error::with_message(S3ErrorCode::NoSuchBucket, "replication not found".to_string())); + } + + // Ok(S3Response::new(GetBucketReplicationOutput { + // replication_configuration: rcfg, + // })) + + if rcfg.is_some() { + Ok(S3Response::new(GetBucketReplicationOutput { + replication_configuration: rcfg, + })) + } else { + let rep = ReplicationConfiguration { + role: "".to_string(), + rules: vec![], + }; + Ok(S3Response::new(GetBucketReplicationOutput { + replication_configuration: Some(rep), + })) + } } async fn put_bucket_replication( @@ -1752,6 +1817,7 @@ impl S3 for FS { replication_configuration, .. } = req.input; + warn!("put bucket replication"); let Some(store) = new_object_layer_fn() else { return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); @@ -1791,6 +1857,7 @@ impl S3 for FS { .map_err(ApiError::from)?; // TODO: remove targets + error!("delete bucket"); Ok(S3Response::new(DeleteBucketReplicationOutput::default())) } @@ -2110,13 +2177,15 @@ impl S3 for FS { None }; - if legal_hold.is_none() { - return Err(s3_error!(InvalidRequest, "Object does not have legal hold")); - } + let status = if let Some(v) = legal_hold { + v + } else { + ObjectLockLegalHoldStatus::OFF.to_string() + }; Ok(S3Response::new(GetObjectLegalHoldOutput { legal_hold: Some(ObjectLockLegalHold { - status: Some(ObjectLockLegalHoldStatus::from(legal_hold.unwrap_or_default())), + status: Some(ObjectLockLegalHoldStatus::from(status)), }), })) } @@ -2178,6 +2247,103 @@ impl S3 for FS { request_charged: Some(RequestCharged::from_static(RequestCharged::REQUESTER)), })) } + + async fn get_object_retention( + &self, + req: S3Request, + ) -> S3Result> { + let GetObjectRetentionInput { + bucket, key, version_id, .. + } = req.input; + + let Some(store) = new_object_layer_fn() else { + return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); + }; + + // check object lock + let _ = metadata_sys::get_object_lock_config(&bucket).await.map_err(ApiError::from)?; + + let opts: ObjectOptions = get_opts(&bucket, &key, version_id, None, &req.headers) + .await + .map_err(ApiError::from)?; + + let object_info = store.get_object_info(&bucket, &key, &opts).await.map_err(|e| { + error!("get_object_info failed, {}", e.to_string()); + s3_error!(InternalError, "{}", e.to_string()) + })?; + + let mode = if let Some(ref ud) = object_info.user_defined { + ud.get("x-amz-object-lock-mode") + .map(|v| ObjectLockRetentionMode::from(v.as_str().to_string())) + } else { + None + }; + + let retain_until_date = if let Some(ref ud) = object_info.user_defined { + ud.get("x-amz-object-lock-retain-until-date") + .and_then(|v| OffsetDateTime::parse(v.as_str(), &Rfc3339).ok()) + .map(Timestamp::from) + } else { + None + }; + + Ok(S3Response::new(GetObjectRetentionOutput { + retention: Some(ObjectLockRetention { mode, retain_until_date }), + })) + } + + async fn put_object_retention( + &self, + req: S3Request, + ) -> S3Result> { + let PutObjectRetentionInput { + bucket, + key, + retention, + version_id, + .. + } = req.input; + + let Some(store) = new_object_layer_fn() else { + return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); + }; + + // check object lock + let _ = metadata_sys::get_object_lock_config(&bucket).await.map_err(ApiError::from)?; + + // TODO: check allow + + let mut eval_metadata = HashMap::new(); + + if let Some(v) = retention { + let mode = v.mode.map(|v| v.as_str().to_string()).unwrap_or_default(); + let retain_until_date = v + .retain_until_date + .map(|v| OffsetDateTime::from(v).format(&Rfc3339).unwrap()) + .unwrap_or_default(); + let now = OffsetDateTime::now_utc(); + eval_metadata.insert("x-amz-object-lock-mode".to_string(), mode); + eval_metadata.insert("x-amz-object-lock-retain-until-date".to_string(), retain_until_date); + eval_metadata.insert( + format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "objectlock-retention-timestamp"), + format!("{}.{:09}Z", now.format(&Rfc3339).unwrap(), now.nanosecond()), + ); + } + + let mut opts: ObjectOptions = get_opts(&bucket, &key, version_id, None, &req.headers) + .await + .map_err(ApiError::from)?; + opts.eval_metadata = Some(eval_metadata); + + store.put_object_metadata(&bucket, &key, &opts).await.map_err(|e| { + error!("put_object_metadata failed, {}", e.to_string()); + s3_error!(InternalError, "{}", e.to_string()) + })?; + + Ok(S3Response::new(PutObjectRetentionOutput { + request_charged: Some(RequestCharged::from_static(RequestCharged::REQUESTER)), + })) + } } #[allow(dead_code)] diff --git a/rustfs/src/storage/error.rs b/rustfs/src/storage/error.rs index c14eb1cf..fd4c95c5 100644 --- a/rustfs/src/storage/error.rs +++ b/rustfs/src/storage/error.rs @@ -1,485 +1,485 @@ -// use common::error::Error; -// use ecstore::error::StorageError; -// use s3s::{S3Error, S3ErrorCode, s3_error}; -// pub fn to_s3_error(err: Error) -> S3Error { -// if let Some(storage_err) = err.downcast_ref::() { -// return match storage_err { -// StorageError::NotImplemented => s3_error!(NotImplemented), -// StorageError::InvalidArgument(bucket, object, version_id) => { -// s3_error!(InvalidArgument, "Invalid arguments provided for {}/{}-{}", bucket, object, version_id) -// } -// StorageError::MethodNotAllowed => s3_error!(MethodNotAllowed), -// StorageError::BucketNotFound(bucket) => { -// s3_error!(NoSuchBucket, "bucket not found {}", bucket) -// } -// StorageError::BucketNotEmpty(bucket) => s3_error!(BucketNotEmpty, "bucket not empty {}", bucket), -// StorageError::BucketNameInvalid(bucket) => s3_error!(InvalidBucketName, "invalid bucket name {}", bucket), -// StorageError::ObjectNameInvalid(bucket, object) => { -// s3_error!(InvalidArgument, "invalid object name {}/{}", bucket, object) -// } -// StorageError::BucketExists(bucket) => s3_error!(BucketAlreadyExists, "{}", bucket), -// StorageError::StorageFull => s3_error!(ServiceUnavailable, "Storage reached its minimum free drive threshold."), -// StorageError::SlowDown => s3_error!(SlowDown, "Please reduce your request rate"), -// StorageError::PrefixAccessDenied(bucket, object) => { -// s3_error!(AccessDenied, "PrefixAccessDenied {}/{}", bucket, object) -// } -// StorageError::InvalidUploadIDKeyCombination(bucket, object) => { -// s3_error!(InvalidArgument, "Invalid UploadID KeyCombination: {}/{}", bucket, object) -// } -// StorageError::MalformedUploadID(bucket) => s3_error!(InvalidArgument, "Malformed UploadID: {}", bucket), -// StorageError::ObjectNameTooLong(bucket, object) => { -// s3_error!(InvalidArgument, "Object name too long: {}/{}", bucket, object) -// } -// StorageError::ObjectNamePrefixAsSlash(bucket, object) => { -// s3_error!(InvalidArgument, "Object name contains forward slash as prefix: {}/{}", bucket, object) -// } -// StorageError::ObjectNotFound(bucket, object) => s3_error!(NoSuchKey, "{}/{}", bucket, object), -// StorageError::VersionNotFound(bucket, object, version_id) => { -// s3_error!(NoSuchVersion, "{}/{}/{}", bucket, object, version_id) -// } -// StorageError::InvalidUploadID(bucket, object, version_id) => { -// s3_error!(InvalidPart, "Invalid upload id: {}/{}-{}", bucket, object, version_id) -// } -// StorageError::InvalidVersionID(bucket, object, version_id) => { -// s3_error!(InvalidArgument, "Invalid version id: {}/{}-{}", bucket, object, version_id) -// } -// // extended -// StorageError::DataMovementOverwriteErr(bucket, object, version_id) => s3_error!( -// InvalidArgument, -// "invalid data movement operation, source and destination pool are the same for : {}/{}-{}", -// bucket, -// object, -// version_id -// ), - -// // extended -// StorageError::ObjectExistsAsDirectory(bucket, object) => { -// s3_error!(InvalidArgument, "Object exists on :{} as directory {}", bucket, object) -// } -// StorageError::InvalidPart(bucket, object, version_id) => { -// s3_error!( -// InvalidPart, -// "Specified part could not be found. PartNumber {}, Expected {}, got {}", -// bucket, -// object, -// version_id -// ) -// } -// StorageError::DoneForNow => s3_error!(InternalError, "DoneForNow"), -// }; -// } - -// if is_err_file_not_found(&err) { -// return S3Error::with_message(S3ErrorCode::NoSuchKey, format!(" ec err {}", err)); -// } - -// S3Error::with_message(S3ErrorCode::InternalError, format!(" ec err {}", err)) -// } - -// #[cfg(test)] -// mod tests { -// use super::*; -// use s3s::S3ErrorCode; - -// #[test] -// fn test_to_s3_error_not_implemented() { -// let storage_err = StorageError::NotImplemented; -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::NotImplemented); -// } - -// #[test] -// fn test_to_s3_error_invalid_argument() { -// let storage_err = -// StorageError::InvalidArgument("test-bucket".to_string(), "test-object".to_string(), "test-version".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); -// assert!(s3_err.message().unwrap().contains("Invalid arguments provided")); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// assert!(s3_err.message().unwrap().contains("test-object")); -// assert!(s3_err.message().unwrap().contains("test-version")); -// } - -// #[test] -// fn test_to_s3_error_method_not_allowed() { -// let storage_err = StorageError::MethodNotAllowed; -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::MethodNotAllowed); -// } - -// #[test] -// fn test_to_s3_error_bucket_not_found() { -// let storage_err = StorageError::BucketNotFound("test-bucket".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchBucket); -// assert!(s3_err.message().unwrap().contains("bucket not found")); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// } - -// #[test] -// fn test_to_s3_error_bucket_not_empty() { -// let storage_err = StorageError::BucketNotEmpty("test-bucket".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::BucketNotEmpty); -// assert!(s3_err.message().unwrap().contains("bucket not empty")); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// } - -// #[test] -// fn test_to_s3_error_bucket_name_invalid() { -// let storage_err = StorageError::BucketNameInvalid("invalid-bucket-name".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidBucketName); -// assert!(s3_err.message().unwrap().contains("invalid bucket name")); -// assert!(s3_err.message().unwrap().contains("invalid-bucket-name")); -// } - -// #[test] -// fn test_to_s3_error_object_name_invalid() { -// let storage_err = StorageError::ObjectNameInvalid("test-bucket".to_string(), "invalid-object".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); -// assert!(s3_err.message().unwrap().contains("invalid object name")); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// assert!(s3_err.message().unwrap().contains("invalid-object")); -// } - -// #[test] -// fn test_to_s3_error_bucket_exists() { -// let storage_err = StorageError::BucketExists("existing-bucket".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::BucketAlreadyExists); -// assert!(s3_err.message().unwrap().contains("existing-bucket")); -// } - -// #[test] -// fn test_to_s3_error_storage_full() { -// let storage_err = StorageError::StorageFull; -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::ServiceUnavailable); -// assert!( -// s3_err -// .message() -// .unwrap() -// .contains("Storage reached its minimum free drive threshold") -// ); -// } - -// #[test] -// fn test_to_s3_error_slow_down() { -// let storage_err = StorageError::SlowDown; -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::SlowDown); -// assert!(s3_err.message().unwrap().contains("Please reduce your request rate")); -// } - -// #[test] -// fn test_to_s3_error_prefix_access_denied() { -// let storage_err = StorageError::PrefixAccessDenied("test-bucket".to_string(), "test-prefix".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::AccessDenied); -// assert!(s3_err.message().unwrap().contains("PrefixAccessDenied")); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// assert!(s3_err.message().unwrap().contains("test-prefix")); -// } - -// #[test] -// fn test_to_s3_error_invalid_upload_id_key_combination() { -// let storage_err = StorageError::InvalidUploadIDKeyCombination("test-bucket".to_string(), "test-object".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); -// assert!(s3_err.message().unwrap().contains("Invalid UploadID KeyCombination")); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// assert!(s3_err.message().unwrap().contains("test-object")); -// } - -// #[test] -// fn test_to_s3_error_malformed_upload_id() { -// let storage_err = StorageError::MalformedUploadID("malformed-id".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); -// assert!(s3_err.message().unwrap().contains("Malformed UploadID")); -// assert!(s3_err.message().unwrap().contains("malformed-id")); -// } - -// #[test] -// fn test_to_s3_error_object_name_too_long() { -// let storage_err = StorageError::ObjectNameTooLong("test-bucket".to_string(), "very-long-object-name".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); -// assert!(s3_err.message().unwrap().contains("Object name too long")); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// assert!(s3_err.message().unwrap().contains("very-long-object-name")); -// } - -// #[test] -// fn test_to_s3_error_object_name_prefix_as_slash() { -// let storage_err = StorageError::ObjectNamePrefixAsSlash("test-bucket".to_string(), "/invalid-object".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); -// assert!( -// s3_err -// .message() -// .unwrap() -// .contains("Object name contains forward slash as prefix") -// ); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// assert!(s3_err.message().unwrap().contains("/invalid-object")); -// } - -// #[test] -// fn test_to_s3_error_object_not_found() { -// let storage_err = StorageError::ObjectNotFound("test-bucket".to_string(), "missing-object".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchKey); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// assert!(s3_err.message().unwrap().contains("missing-object")); -// } - -// #[test] -// fn test_to_s3_error_version_not_found() { -// let storage_err = -// StorageError::VersionNotFound("test-bucket".to_string(), "test-object".to_string(), "missing-version".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchVersion); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// assert!(s3_err.message().unwrap().contains("test-object")); -// assert!(s3_err.message().unwrap().contains("missing-version")); -// } - -// #[test] -// fn test_to_s3_error_invalid_upload_id() { -// let storage_err = -// StorageError::InvalidUploadID("test-bucket".to_string(), "test-object".to_string(), "invalid-upload-id".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidPart); -// assert!(s3_err.message().unwrap().contains("Invalid upload id")); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// assert!(s3_err.message().unwrap().contains("test-object")); -// assert!(s3_err.message().unwrap().contains("invalid-upload-id")); -// } - -// #[test] -// fn test_to_s3_error_invalid_version_id() { -// let storage_err = StorageError::InvalidVersionID( -// "test-bucket".to_string(), -// "test-object".to_string(), -// "invalid-version-id".to_string(), -// ); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); -// assert!(s3_err.message().unwrap().contains("Invalid version id")); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// assert!(s3_err.message().unwrap().contains("test-object")); -// assert!(s3_err.message().unwrap().contains("invalid-version-id")); -// } - -// #[test] -// fn test_to_s3_error_data_movement_overwrite_err() { -// let storage_err = StorageError::DataMovementOverwriteErr( -// "test-bucket".to_string(), -// "test-object".to_string(), -// "test-version".to_string(), -// ); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); -// assert!(s3_err.message().unwrap().contains("invalid data movement operation")); -// assert!(s3_err.message().unwrap().contains("source and destination pool are the same")); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// assert!(s3_err.message().unwrap().contains("test-object")); -// assert!(s3_err.message().unwrap().contains("test-version")); -// } - -// #[test] -// fn test_to_s3_error_object_exists_as_directory() { -// let storage_err = StorageError::ObjectExistsAsDirectory("test-bucket".to_string(), "directory-object".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); -// assert!(s3_err.message().unwrap().contains("Object exists on")); -// assert!(s3_err.message().unwrap().contains("as directory")); -// assert!(s3_err.message().unwrap().contains("test-bucket")); -// assert!(s3_err.message().unwrap().contains("directory-object")); -// } - -// #[test] -// fn test_to_s3_error_insufficient_read_quorum() { -// let storage_err = StorageError::InsufficientReadQuorum; -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::SlowDown); -// assert!( -// s3_err -// .message() -// .unwrap() -// .contains("Storage resources are insufficient for the read operation") -// ); -// } - -// #[test] -// fn test_to_s3_error_insufficient_write_quorum() { -// let storage_err = StorageError::InsufficientWriteQuorum; -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::SlowDown); -// assert!( -// s3_err -// .message() -// .unwrap() -// .contains("Storage resources are insufficient for the write operation") -// ); -// } - -// #[test] -// fn test_to_s3_error_decommission_not_started() { -// let storage_err = StorageError::DecommissionNotStarted; -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); -// assert!(s3_err.message().unwrap().contains("Decommission Not Started")); -// } - -// #[test] -// fn test_to_s3_error_decommission_already_running() { -// let storage_err = StorageError::DecommissionAlreadyRunning; -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InternalError); -// assert!(s3_err.message().unwrap().contains("Decommission already running")); -// } - -// #[test] -// fn test_to_s3_error_volume_not_found() { -// let storage_err = StorageError::VolumeNotFound("test-volume".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchBucket); -// assert!(s3_err.message().unwrap().contains("bucket not found")); -// assert!(s3_err.message().unwrap().contains("test-volume")); -// } - -// #[test] -// fn test_to_s3_error_invalid_part() { -// let storage_err = StorageError::InvalidPart(1, "expected-part".to_string(), "got-part".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidPart); -// assert!(s3_err.message().unwrap().contains("Specified part could not be found")); -// assert!(s3_err.message().unwrap().contains("PartNumber")); -// assert!(s3_err.message().unwrap().contains("expected-part")); -// assert!(s3_err.message().unwrap().contains("got-part")); -// } - -// #[test] -// fn test_to_s3_error_done_for_now() { -// let storage_err = StorageError::DoneForNow; -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InternalError); -// assert!(s3_err.message().unwrap().contains("DoneForNow")); -// } - -// #[test] -// fn test_to_s3_error_non_storage_error() { -// // Test with a non-StorageError -// let err = Error::from_string("Generic error message".to_string()); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InternalError); -// assert!(s3_err.message().unwrap().contains("ec err")); -// assert!(s3_err.message().unwrap().contains("Generic error message")); -// } - -// #[test] -// fn test_to_s3_error_with_unicode_strings() { -// let storage_err = StorageError::BucketNotFound("测试桶".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchBucket); -// assert!(s3_err.message().unwrap().contains("bucket not found")); -// assert!(s3_err.message().unwrap().contains("测试桶")); -// } - -// #[test] -// fn test_to_s3_error_with_special_characters() { -// let storage_err = StorageError::ObjectNameInvalid("bucket-with-@#$%".to_string(), "object-with-!@#$%^&*()".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); -// assert!(s3_err.message().unwrap().contains("invalid object name")); -// assert!(s3_err.message().unwrap().contains("bucket-with-@#$%")); -// assert!(s3_err.message().unwrap().contains("object-with-!@#$%^&*()")); -// } - -// #[test] -// fn test_to_s3_error_with_empty_strings() { -// let storage_err = StorageError::BucketNotFound("".to_string()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchBucket); -// assert!(s3_err.message().unwrap().contains("bucket not found")); -// } - -// #[test] -// fn test_to_s3_error_with_very_long_strings() { -// let long_bucket_name = "a".repeat(1000); -// let storage_err = StorageError::BucketNotFound(long_bucket_name.clone()); -// let err = Error::new(storage_err); -// let s3_err = to_s3_error(err); - -// assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchBucket); -// assert!(s3_err.message().unwrap().contains("bucket not found")); -// assert!(s3_err.message().unwrap().contains(&long_bucket_name)); -// } -// } +use common::error::Error; +use ecstore::error::StorageError; +use s3s::{S3Error, S3ErrorCode, s3_error}; +pub fn to_s3_error(err: Error) -> S3Error { + if let Some(storage_err) = err.downcast_ref::() { + return match storage_err { + StorageError::NotImplemented => s3_error!(NotImplemented), + StorageError::InvalidArgument(bucket, object, version_id) => { + s3_error!(InvalidArgument, "Invalid arguments provided for {}/{}-{}", bucket, object, version_id) + } + StorageError::MethodNotAllowed => s3_error!(MethodNotAllowed), + StorageError::BucketNotFound(bucket) => { + s3_error!(NoSuchBucket, "bucket not found {}", bucket) + } + StorageError::BucketNotEmpty(bucket) => s3_error!(BucketNotEmpty, "bucket not empty {}", bucket), + StorageError::BucketNameInvalid(bucket) => s3_error!(InvalidBucketName, "invalid bucket name {}", bucket), + StorageError::ObjectNameInvalid(bucket, object) => { + s3_error!(InvalidArgument, "invalid object name {}/{}", bucket, object) + } + StorageError::BucketExists(bucket) => s3_error!(BucketAlreadyExists, "{}", bucket), + StorageError::StorageFull => s3_error!(ServiceUnavailable, "Storage reached its minimum free drive threshold."), + StorageError::SlowDown => s3_error!(SlowDown, "Please reduce your request rate"), + StorageError::PrefixAccessDenied(bucket, object) => { + s3_error!(AccessDenied, "PrefixAccessDenied {}/{}", bucket, object) + } + StorageError::InvalidUploadIDKeyCombination(bucket, object) => { + s3_error!(InvalidArgument, "Invalid UploadID KeyCombination: {}/{}", bucket, object) + } + StorageError::MalformedUploadID(bucket) => s3_error!(InvalidArgument, "Malformed UploadID: {}", bucket), + StorageError::ObjectNameTooLong(bucket, object) => { + s3_error!(InvalidArgument, "Object name too long: {}/{}", bucket, object) + } + StorageError::ObjectNamePrefixAsSlash(bucket, object) => { + s3_error!(InvalidArgument, "Object name contains forward slash as prefix: {}/{}", bucket, object) + } + StorageError::ObjectNotFound(bucket, object) => s3_error!(NoSuchKey, "{}/{}", bucket, object), + StorageError::VersionNotFound(bucket, object, version_id) => { + s3_error!(NoSuchVersion, "{}/{}/{}", bucket, object, version_id) + } + StorageError::InvalidUploadID(bucket, object, version_id) => { + s3_error!(InvalidPart, "Invalid upload id: {}/{}-{}", bucket, object, version_id) + } + StorageError::InvalidVersionID(bucket, object, version_id) => { + s3_error!(InvalidArgument, "Invalid version id: {}/{}-{}", bucket, object, version_id) + } + // extended + StorageError::DataMovementOverwriteErr(bucket, object, version_id) => s3_error!( + InvalidArgument, + "invalid data movement operation, source and destination pool are the same for : {}/{}-{}", + bucket, + object, + version_id + ), + + // extended + StorageError::ObjectExistsAsDirectory(bucket, object) => { + s3_error!(InvalidArgument, "Object exists on :{} as directory {}", bucket, object) + } + StorageError::InvalidPart(bucket, object, version_id) => { + s3_error!( + InvalidPart, + "Specified part could not be found. PartNumber {}, Expected {}, got {}", + bucket, + object, + version_id + ) + } + StorageError::DoneForNow => s3_error!(InternalError, "DoneForNow"), + }; + } + + if is_err_file_not_found(&err) { + return S3Error::with_message(S3ErrorCode::NoSuchKey, format!(" ec err {}", err)); + } + + S3Error::with_message(S3ErrorCode::InternalError, format!(" ec err {}", err)) +} + +#[cfg(test)] +mod tests { + use super::*; + use s3s::S3ErrorCode; + + #[test] + fn test_to_s3_error_not_implemented() { + let storage_err = StorageError::NotImplemented; + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::NotImplemented); + } + + #[test] + fn test_to_s3_error_invalid_argument() { + let storage_err = + StorageError::InvalidArgument("test-bucket".to_string(), "test-object".to_string(), "test-version".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); + assert!(s3_err.message().unwrap().contains("Invalid arguments provided")); + assert!(s3_err.message().unwrap().contains("test-bucket")); + assert!(s3_err.message().unwrap().contains("test-object")); + assert!(s3_err.message().unwrap().contains("test-version")); + } + + #[test] + fn test_to_s3_error_method_not_allowed() { + let storage_err = StorageError::MethodNotAllowed; + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::MethodNotAllowed); + } + + #[test] + fn test_to_s3_error_bucket_not_found() { + let storage_err = StorageError::BucketNotFound("test-bucket".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchBucket); + assert!(s3_err.message().unwrap().contains("bucket not found")); + assert!(s3_err.message().unwrap().contains("test-bucket")); + } + + #[test] + fn test_to_s3_error_bucket_not_empty() { + let storage_err = StorageError::BucketNotEmpty("test-bucket".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::BucketNotEmpty); + assert!(s3_err.message().unwrap().contains("bucket not empty")); + assert!(s3_err.message().unwrap().contains("test-bucket")); + } + + #[test] + fn test_to_s3_error_bucket_name_invalid() { + let storage_err = StorageError::BucketNameInvalid("invalid-bucket-name".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidBucketName); + assert!(s3_err.message().unwrap().contains("invalid bucket name")); + assert!(s3_err.message().unwrap().contains("invalid-bucket-name")); + } + + #[test] + fn test_to_s3_error_object_name_invalid() { + let storage_err = StorageError::ObjectNameInvalid("test-bucket".to_string(), "invalid-object".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); + assert!(s3_err.message().unwrap().contains("invalid object name")); + assert!(s3_err.message().unwrap().contains("test-bucket")); + assert!(s3_err.message().unwrap().contains("invalid-object")); + } + + #[test] + fn test_to_s3_error_bucket_exists() { + let storage_err = StorageError::BucketExists("existing-bucket".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::BucketAlreadyExists); + assert!(s3_err.message().unwrap().contains("existing-bucket")); + } + + #[test] + fn test_to_s3_error_storage_full() { + let storage_err = StorageError::StorageFull; + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::ServiceUnavailable); + assert!( + s3_err + .message() + .unwrap() + .contains("Storage reached its minimum free drive threshold") + ); + } + + #[test] + fn test_to_s3_error_slow_down() { + let storage_err = StorageError::SlowDown; + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::SlowDown); + assert!(s3_err.message().unwrap().contains("Please reduce your request rate")); + } + + #[test] + fn test_to_s3_error_prefix_access_denied() { + let storage_err = StorageError::PrefixAccessDenied("test-bucket".to_string(), "test-prefix".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::AccessDenied); + assert!(s3_err.message().unwrap().contains("PrefixAccessDenied")); + assert!(s3_err.message().unwrap().contains("test-bucket")); + assert!(s3_err.message().unwrap().contains("test-prefix")); + } + + #[test] + fn test_to_s3_error_invalid_upload_id_key_combination() { + let storage_err = StorageError::InvalidUploadIDKeyCombination("test-bucket".to_string(), "test-object".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); + assert!(s3_err.message().unwrap().contains("Invalid UploadID KeyCombination")); + assert!(s3_err.message().unwrap().contains("test-bucket")); + assert!(s3_err.message().unwrap().contains("test-object")); + } + + #[test] + fn test_to_s3_error_malformed_upload_id() { + let storage_err = StorageError::MalformedUploadID("malformed-id".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); + assert!(s3_err.message().unwrap().contains("Malformed UploadID")); + assert!(s3_err.message().unwrap().contains("malformed-id")); + } + + #[test] + fn test_to_s3_error_object_name_too_long() { + let storage_err = StorageError::ObjectNameTooLong("test-bucket".to_string(), "very-long-object-name".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); + assert!(s3_err.message().unwrap().contains("Object name too long")); + assert!(s3_err.message().unwrap().contains("test-bucket")); + assert!(s3_err.message().unwrap().contains("very-long-object-name")); + } + + #[test] + fn test_to_s3_error_object_name_prefix_as_slash() { + let storage_err = StorageError::ObjectNamePrefixAsSlash("test-bucket".to_string(), "/invalid-object".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); + assert!( + s3_err + .message() + .unwrap() + .contains("Object name contains forward slash as prefix") + ); + assert!(s3_err.message().unwrap().contains("test-bucket")); + assert!(s3_err.message().unwrap().contains("/invalid-object")); + } + + #[test] + fn test_to_s3_error_object_not_found() { + let storage_err = StorageError::ObjectNotFound("test-bucket".to_string(), "missing-object".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchKey); + assert!(s3_err.message().unwrap().contains("test-bucket")); + assert!(s3_err.message().unwrap().contains("missing-object")); + } + + #[test] + fn test_to_s3_error_version_not_found() { + let storage_err = + StorageError::VersionNotFound("test-bucket".to_string(), "test-object".to_string(), "missing-version".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchVersion); + assert!(s3_err.message().unwrap().contains("test-bucket")); + assert!(s3_err.message().unwrap().contains("test-object")); + assert!(s3_err.message().unwrap().contains("missing-version")); + } + + #[test] + fn test_to_s3_error_invalid_upload_id() { + let storage_err = + StorageError::InvalidUploadID("test-bucket".to_string(), "test-object".to_string(), "invalid-upload-id".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidPart); + assert!(s3_err.message().unwrap().contains("Invalid upload id")); + assert!(s3_err.message().unwrap().contains("test-bucket")); + assert!(s3_err.message().unwrap().contains("test-object")); + assert!(s3_err.message().unwrap().contains("invalid-upload-id")); + } + + #[test] + fn test_to_s3_error_invalid_version_id() { + let storage_err = StorageError::InvalidVersionID( + "test-bucket".to_string(), + "test-object".to_string(), + "invalid-version-id".to_string(), + ); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); + assert!(s3_err.message().unwrap().contains("Invalid version id")); + assert!(s3_err.message().unwrap().contains("test-bucket")); + assert!(s3_err.message().unwrap().contains("test-object")); + assert!(s3_err.message().unwrap().contains("invalid-version-id")); + } + + #[test] + fn test_to_s3_error_data_movement_overwrite_err() { + let storage_err = StorageError::DataMovementOverwriteErr( + "test-bucket".to_string(), + "test-object".to_string(), + "test-version".to_string(), + ); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); + assert!(s3_err.message().unwrap().contains("invalid data movement operation")); + assert!(s3_err.message().unwrap().contains("source and destination pool are the same")); + assert!(s3_err.message().unwrap().contains("test-bucket")); + assert!(s3_err.message().unwrap().contains("test-object")); + assert!(s3_err.message().unwrap().contains("test-version")); + } + + #[test] + fn test_to_s3_error_object_exists_as_directory() { + let storage_err = StorageError::ObjectExistsAsDirectory("test-bucket".to_string(), "directory-object".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); + assert!(s3_err.message().unwrap().contains("Object exists on")); + assert!(s3_err.message().unwrap().contains("as directory")); + assert!(s3_err.message().unwrap().contains("test-bucket")); + assert!(s3_err.message().unwrap().contains("directory-object")); + } + + #[test] + fn test_to_s3_error_insufficient_read_quorum() { + let storage_err = StorageError::InsufficientReadQuorum; + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::SlowDown); + assert!( + s3_err + .message() + .unwrap() + .contains("Storage resources are insufficient for the read operation") + ); + } + + #[test] + fn test_to_s3_error_insufficient_write_quorum() { + let storage_err = StorageError::InsufficientWriteQuorum; + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::SlowDown); + assert!( + s3_err + .message() + .unwrap() + .contains("Storage resources are insufficient for the write operation") + ); + } + + #[test] + fn test_to_s3_error_decommission_not_started() { + let storage_err = StorageError::DecommissionNotStarted; + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); + assert!(s3_err.message().unwrap().contains("Decommission Not Started")); + } + + #[test] + fn test_to_s3_error_decommission_already_running() { + let storage_err = StorageError::DecommissionAlreadyRunning; + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InternalError); + assert!(s3_err.message().unwrap().contains("Decommission already running")); + } + + #[test] + fn test_to_s3_error_volume_not_found() { + let storage_err = StorageError::VolumeNotFound("test-volume".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchBucket); + assert!(s3_err.message().unwrap().contains("bucket not found")); + assert!(s3_err.message().unwrap().contains("test-volume")); + } + + #[test] + fn test_to_s3_error_invalid_part() { + let storage_err = StorageError::InvalidPart(1, "expected-part".to_string(), "got-part".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidPart); + assert!(s3_err.message().unwrap().contains("Specified part could not be found")); + assert!(s3_err.message().unwrap().contains("PartNumber")); + assert!(s3_err.message().unwrap().contains("expected-part")); + assert!(s3_err.message().unwrap().contains("got-part")); + } + + #[test] + fn test_to_s3_error_done_for_now() { + let storage_err = StorageError::DoneForNow; + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InternalError); + assert!(s3_err.message().unwrap().contains("DoneForNow")); + } + + #[test] + fn test_to_s3_error_non_storage_error() { + // Test with a non-StorageError + let err = Error::from_string("Generic error message".to_string()); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InternalError); + assert!(s3_err.message().unwrap().contains("ec err")); + assert!(s3_err.message().unwrap().contains("Generic error message")); + } + + #[test] + fn test_to_s3_error_with_unicode_strings() { + let storage_err = StorageError::BucketNotFound("测试桶".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchBucket); + assert!(s3_err.message().unwrap().contains("bucket not found")); + assert!(s3_err.message().unwrap().contains("测试桶")); + } + + #[test] + fn test_to_s3_error_with_special_characters() { + let storage_err = StorageError::ObjectNameInvalid("bucket-with-@#$%".to_string(), "object-with-!@#$%^&*()".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument); + assert!(s3_err.message().unwrap().contains("invalid object name")); + assert!(s3_err.message().unwrap().contains("bucket-with-@#$%")); + assert!(s3_err.message().unwrap().contains("object-with-!@#$%^&*()")); + } + + #[test] + fn test_to_s3_error_with_empty_strings() { + let storage_err = StorageError::BucketNotFound("".to_string()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchBucket); + assert!(s3_err.message().unwrap().contains("bucket not found")); + } + + #[test] + fn test_to_s3_error_with_very_long_strings() { + let long_bucket_name = "a".repeat(1000); + let storage_err = StorageError::BucketNotFound(long_bucket_name.clone()); + let err = Error::new(storage_err); + let s3_err = to_s3_error(err); + + assert_eq!(*s3_err.code(), S3ErrorCode::NoSuchBucket); + assert!(s3_err.message().unwrap().contains("bucket not found")); + assert!(s3_err.message().unwrap().contains(&long_bucket_name)); + } +} diff --git a/s3select/query/Cargo.toml b/s3select/query/Cargo.toml index 6629927c..0240b880 100644 --- a/s3select/query/Cargo.toml +++ b/s3select/query/Cargo.toml @@ -11,7 +11,7 @@ datafusion = { workspace = true } derive_builder = { workspace = true } futures = { workspace = true } lazy_static = { workspace = true } -parking_lot = { version = "0.12.3" } +parking_lot = { workspace = true } s3s.workspace = true snafu = { workspace = true, features = ["backtrace"] } tokio = { workspace = true } diff --git a/scripts/build.py b/scripts/build.py deleted file mode 100755 index f1beb078..00000000 --- a/scripts/build.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -from dataclasses import dataclass -import argparse -import subprocess -from pathlib import Path - - -@dataclass -class CliArgs: - profile: str - target: str - glibc: str - - @staticmethod - def parse(): - parser = argparse.ArgumentParser() - parser.add_argument("--profile", type=str, required=True) - parser.add_argument("--target", type=str, required=True) - parser.add_argument("--glibc", type=str, required=True) - args = parser.parse_args() - return CliArgs(args.profile, args.target, args.glibc) - - -def shell(cmd: str): - print(cmd, flush=True) - subprocess.run(cmd, shell=True, check=True) - - -def main(args: CliArgs): - use_zigbuild = False - use_old_glibc = False - - if args.glibc and args.glibc != "default": - use_zigbuild = True - use_old_glibc = True - - if args.target and args.target != "x86_64-unknown-linux-gnu": - shell("rustup target add " + args.target) - - cmd = ["cargo", "build"] - if use_zigbuild: - cmd = ["cargo", " zigbuild"] - - cmd.extend(["--profile", args.profile]) - - if use_old_glibc: - cmd.extend(["--target", f"{args.target}.{args.glibc}"]) - else: - cmd.extend(["--target", args.target]) - - cmd.extend(["-p", "rustfs"]) - cmd.extend(["--bins"]) - - shell("touch rustfs/build.rs") # refresh build info for rustfs - shell(" ".join(cmd)) - - if args.profile == "dev": - profile_dir = "debug" - elif args.profile == "release": - profile_dir = "release" - else: - profile_dir = args.profile - - bin_path = Path(f"target/{args.target}/{profile_dir}/rustfs") - - bin_name = f"rustfs.{args.profile}.{args.target}" - if use_old_glibc: - bin_name += f".glibc{args.glibc}" - bin_name += ".bin" - - out_path = Path(f"target/artifacts/{bin_name}") - - out_path.parent.mkdir(parents=True, exist_ok=True) - out_path.hardlink_to(bin_path) - - -if __name__ == "__main__": - main(CliArgs.parse())