refactor(deps): centralize crate versions in root Cargo.toml (#448)

* chore(ci): upgrade protoc from 30.2 to 31.1

- Update protoc version in GitHub Actions setup workflow
- Use arduino/setup-protoc@v3 to install the latest protoc version
- Ensure compatibility with current project requirements
- Improve proto file compilation performance and stability

This upgrade aligns our development environment with the latest protobuf standards.

* modify package version

* refactor(deps): centralize crate versions in root Cargo.toml

- Move all dependency versions to workspace.dependencies section
- Standardize AWS SDK and related crates versions
- Update tokio, bytes, and futures crates to latest stable versions
- Ensure consistent version use across all workspace members
- Implement workspace inheritance for common dependencies

This change simplifies dependency management and ensures version consistency across the project.

* fix

* modify
This commit is contained in:
houseme
2025-06-07 21:52:59 +08:00
committed by GitHub
parent 1432ddb119
commit d66525a22f
36 changed files with 110 additions and 262 deletions

View File

@@ -7,9 +7,10 @@ RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
# install protoc
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip \
&& unzip protoc-30.2-linux-x86_64.zip -d protoc3 \
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc && mv protoc3/include/* /usr/local/include/ && rm -rf protoc-30.2-linux-x86_64.zip protoc3
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
# install flatc
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \

View File

@@ -13,10 +13,10 @@ RUN dnf makecache
RUN yum install wget git unzip gcc openssl-devel pkgconf-pkg-config -y
# install protoc
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip \
&& unzip protoc-30.2-linux-x86_64.zip -d protoc3 \
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
&& rm -rf protoc-30.2-linux-x86_64.zip protoc3
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
# install flatc
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \

View File

@@ -7,9 +7,10 @@ RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
# install protoc
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip \
&& unzip protoc-30.2-linux-x86_64.zip -d protoc3 \
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc && mv protoc3/include/* /usr/local/include/ && rm -rf protoc-30.2-linux-x86_64.zip protoc3
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
# install flatc
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \

View File

@@ -22,21 +22,6 @@ docker compose -f docker-compose.yml up -d
## 配置可观测性
### 创建配置文件
1. 进入 `deploy/config` 目录
2. 复制示例配置:`cp obs.toml.example obs.toml`
3. 编辑 `obs.toml` 配置文件,修改以下关键参数:
| 配置项 | 说明 | 示例值 |
|-----------------|----------------------------|-----------------------|
| endpoint | OpenTelemetry Collector 地址 | http://localhost:4317 |
| service_name | 服务名称 | rustfs |
| service_version | 服务版本 | 1.0.0 |
| environment | 运行环境 | production |
| meter_interval | 指标导出间隔 (秒) | 30 |
| sample_ratio | 采样率 | 1.0 |
| use_stdout | 是否输出到控制台 | true/false |
| logger_level | 日志级别 | info |
```
```shell
export RUSTFS_OBS_ENDPOINT="http://localhost:4317" # OpenTelemetry Collector 地址
```

View File

@@ -1,34 +0,0 @@
[observability]
endpoint = "http://otel-collector:4317" # Default is "http://localhost:4317" if not specified
use_stdout = false # Output with stdout, true output, false no output
sample_ratio = 2.0
meter_interval = 30
service_name = "rustfs"
service_version = "0.1.0"
environments = "production"
logger_level = "debug"
local_logging_enabled = true
#[[sinks]]
#type = "Kafka"
#brokers = "localhost:9092"
#topic = "logs"
#batch_size = 100 # Default is 100 if not specified
#batch_timeout_ms = 1000 # Default is 1000ms if not specified
#
#[[sinks]]
#type = "Webhook"
#endpoint = "http://localhost:8080/webhook"
#auth_token = ""
#batch_size = 100 # Default is 3 if not specified
#batch_timeout_ms = 1000 # Default is 100ms if not specified
[[sinks]]
type = "File"
path = "/root/data/logs/rustfs.log"
buffer_size = 100 # Default is 8192 bytes if not specified
flush_interval_ms = 1000
flush_threshold = 100
[logger]
queue_capacity = 10

View File

@@ -1,34 +0,0 @@
[observability]
endpoint = "http://localhost:4317" # Default is "http://localhost:4317" if not specified
use_stdout = false # Output with stdout, true output, false no output
sample_ratio = 2.0
meter_interval = 30
service_name = "rustfs"
service_version = "0.1.0"
environments = "production"
logger_level = "debug"
local_logging_enabled = true
#[[sinks]]
#type = "Kafka"
#brokers = "localhost:9092"
#topic = "logs"
#batch_size = 100 # Default is 100 if not specified
#batch_timeout_ms = 1000 # Default is 1000ms if not specified
#
#[[sinks]]
#type = "Webhook"
#endpoint = "http://localhost:8080/webhook"
#auth_token = ""
#batch_size = 100 # Default is 3 if not specified
#batch_timeout_ms = 1000 # Default is 100ms if not specified
[[sinks]]
type = "File"
path = "/root/data/logs/rustfs.log"
buffer_size = 100 # Default is 8192 bytes if not specified
flush_interval_ms = 1000
flush_threshold = 100
[logger]
queue_capacity = 10

View File

@@ -32,11 +32,11 @@ runs:
- uses: arduino/setup-protoc@v3
with:
version: "30.2"
version: "31.1"
- uses: Nugine/setup-flatc@v1
with:
version: "24.3.25"
version: "25.2.10"
- uses: dtolnay/rust-toolchain@master
with:

View File

@@ -4,13 +4,13 @@ on:
push:
branches:
- main
paths:
paths:
- '**/Cargo.toml'
- '**/Cargo.lock'
pull_request:
branches:
- main
paths:
paths:
- '**/Cargo.toml'
- '**/Cargo.lock'
schedule:
@@ -20,6 +20,6 @@ jobs:
audit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4.2.2
- uses: taiki-e/install-action@cargo-audit
- run: cargo audit -D warnings

View File

@@ -81,7 +81,7 @@ jobs:
uses: actions/cache@v4.2.3
with:
path: /Users/runner/hostedtoolcache/protoc
key: protoc-${{ runner.os }}-30.2
key: protoc-${{ runner.os }}-31.1
restore-keys: |
protoc-${{ runner.os }}-
@@ -89,7 +89,7 @@ jobs:
if: steps.cache-protoc.outputs.cache-hit != 'true'
uses: arduino/setup-protoc@v3
with:
version: '30.2'
version: '31.1'
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Flatc

View File

@@ -36,7 +36,7 @@ jobs:
if: github.event_name == 'pull_request'
runs-on: self-hosted
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4.2.2
- uses: ./.github/actions/setup
- name: Format Check
@@ -56,7 +56,7 @@ jobs:
if: needs.skip-check.outputs.should_skip != 'true'
runs-on: self-hosted
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4.2.2
- uses: ./.github/actions/setup
- name: Format
@@ -94,7 +94,7 @@ jobs:
if: needs.skip-check.outputs.should_skip != 'true'
runs-on: self-hosted
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4.2.2
- uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
with:

View File

@@ -7,7 +7,7 @@ jobs:
profile:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v4.2.2
- uses: dtolnay/rust-toolchain@nightly
with:

1
.gitignore vendored
View File

@@ -10,7 +10,6 @@ rustfs/static/*
!rustfs/static/.gitkeep
vendor
cli/rustfs-gui/embedded-rustfs/rustfs
deploy/config/obs.toml
*.log
deploy/certs/*
*jsonl

View File

@@ -54,7 +54,9 @@ rustfs-obs = { path = "crates/obs", version = "0.0.1" }
rustfs-event-notifier = { path = "crates/event-notifier", version = "0.0.1" }
rustfs-utils = { path = "crates/utils", version = "0.0.1" }
workers = { path = "./common/workers", version = "0.0.1" }
tokio-tar = "0.3.1"
aes-gcm = { version = "0.10.3", features = ["std"] }
arc-swap = "1.7.1"
argon2 = { version = "0.5.3", features = ["std"] }
atoi = "2.0.0"
async-recursion = "1.1.1"
async-trait = "0.1.88"
@@ -64,16 +66,21 @@ axum = "0.8.4"
axum-extra = "0.10.1"
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
backon = "1.5.1"
base64-simd = "0.8.0"
blake2 = "0.10.6"
bytes = "1.10.1"
bytesize = "2.0.1"
byteorder = "1.5.0"
cfg-if = "1.0.0"
chacha20poly1305 = { version = "0.10.1" }
chrono = { version = "0.4.41", features = ["serde"] }
clap = { version = "4.5.39", features = ["derive", "env"] }
config = "0.15.11"
const-str = { version = "0.6.2", features = ["std", "proc"] }
crc32fast = "1.4.2"
datafusion = "46.0.1"
derive_builder = "0.20.2"
dotenvy = "0.15.7"
dioxus = { version = "0.6.3", features = ["router"] }
dirs = "6.0.0"
flatbuffers = "25.2.10"
@@ -83,6 +90,7 @@ futures-core = "0.3.31"
futures-util = "0.3.31"
glob = "0.3.2"
hex = "0.4.3"
hex-simd = "0.8.0"
highway = { version = "1.3.0" }
hyper = "1.6.0"
hyper-util = { version = "0.1.14", features = [
@@ -94,6 +102,8 @@ http = "1.3.1"
http-body = "1.0.1"
humantime = "2.2.0"
include_dir = "0.7.4"
ipnetwork = { version = "0.21.1", features = ["serde"] }
itertools = "0.14.0"
jsonwebtoken = "9.3.1"
keyring = { version = "3.6.2", features = [
"apple-native",
@@ -129,6 +139,9 @@ opentelemetry-semantic-conventions = { version = "0.30.0", features = [
"semconv_experimental",
] }
parking_lot = "0.12.4"
path-absolutize = "3.1.1"
path-clean = "1.0.1"
pbkdf2 = "0.12.2"
percent-encoding = "2.3.1"
pin-project-lite = "0.2.16"
# pin-utils = "0.1.0"
@@ -154,6 +167,7 @@ rfd = { version = "0.15.3", default-features = false, features = [
] }
rmp = "0.8.14"
rmp-serde = "1.3.0"
rsa = "0.9.8"
rumqttc = { version = "0.24" }
rust-embed = { version = "8.7.2" }
rustfs-rsc = "2025.506.1"
@@ -162,12 +176,14 @@ rustls-pki-types = "1.12.0"
rustls-pemfile = "2.2.0"
s3s = { git = "https://github.com/Nugine/s3s.git", rev = "4733cdfb27b2713e832967232cbff413bb768c10" }
s3s-policy = { git = "https://github.com/Nugine/s3s.git", rev = "4733cdfb27b2713e832967232cbff413bb768c10" }
scopeguard = "1.2.0"
shadow-rs = { version = "1.1.1", default-features = false }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = "1.0.140"
serde_urlencoded = "0.7.1"
serde_with = "3.12.0"
sha2 = "0.10.9"
siphasher = "1.0.1"
smallvec = { version = "1.15.0", features = ["serde"] }
snafu = "0.8.6"
socket2 = "0.5.10"
@@ -188,6 +204,7 @@ tonic = { version = "0.13.1", features = ["gzip"] }
tonic-build = { version = "0.13.1" }
tokio-rustls = { version = "0.26.2", default-features = false }
tokio-stream = { version = "0.1.17" }
tokio-tar = "0.3.1"
tokio-util = { version = "0.7.15", features = ["io", "compat"] }
tower = { version = "0.5.2", features = ["timeout"] }
tower-http = { version = "0.6.6", features = ["cors"] }
@@ -206,7 +223,7 @@ uuid = { version = "1.17.0", features = [
"macro-diagnostics",
] }
winapi = { version = "0.3.9" }
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
[profile.wasm-dev]
inherits = "dev"

View File

@@ -7,7 +7,7 @@
| Package | Version | Download Link |
|---------|---------|----------------------------------------------------------------------------------------------------------------------------------|
| Rust | 1.8.5+ | [rust-lang.org/tools/install](https://www.rust-lang.org/tools/install) |
| protoc | 30.2+ | [protoc-30.2-linux-x86_64.zip](https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip) |
| protoc | 31.1+ | [protoc-31.1-linux-x86_64.zip](https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip) |
| flatc | 24.0+ | [Linux.flatc.binary.g++-13.zip](https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip) |
### Building RustFS

View File

@@ -7,7 +7,7 @@
| 软件包 | 版本 | 下载链接 |
|--------|--------|----------------------------------------------------------------------------------------------------------------------------------|
| Rust | 1.8.5+ | [rust-lang.org/tools/install](https://www.rust-lang.org/tools/install) |
| protoc | 30.2+ | [protoc-30.2-linux-x86_64.zip](https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip) |
| protoc | 31.1+ | [protoc-31.1-linux-x86_64.zip](https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip) |
| flatc | 24.0+ | [Linux.flatc.binary.g++-13.zip](https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip) |
### 构建 RustFS

View File

@@ -7,11 +7,11 @@ rust-version.workspace = true
version.workspace = true
[dependencies]
base64-simd = "0.8.0"
base64-simd = { workspace = true }
common.workspace = true
hex-simd = "0.8.0"
hex-simd = { workspace = true }
rand.workspace = true
rsa = "0.9.8"
rsa = { workspace = true }
serde.workspace = true
serde_json.workspace = true

View File

@@ -1,10 +1,10 @@
#!/bin/bash
clear
# 获取当前平台架构
# Get the current platform architecture
ARCH=$(uname -m)
# 根据架构设置 target 目录
# Set the target directory according to the schema
if [ "$ARCH" == "x86_64" ]; then
TARGET_DIR="target/x86_64"
elif [ "$ARCH" == "aarch64" ]; then
@@ -13,7 +13,7 @@ else
TARGET_DIR="target/unknown"
fi
# 设置 CARGO_TARGET_DIR 并构建项目
# Set CARGO_TARGET_DIR and build the project
CARGO_TARGET_DIR=$TARGET_DIR RUSTFLAGS="-C link-arg=-fuse-ld=mold" cargo build --package rustfs
echo -e "\a"

View File

@@ -9,7 +9,7 @@ workspace = true
[dependencies]
async-trait.workspace = true
lazy_static.workspace = true
scopeguard = "1.2.0"
scopeguard = { workspace = true }
tokio.workspace = true
tonic = { workspace = true }
tracing-error.workspace = true

View File

@@ -37,7 +37,7 @@ tokio = { workspace = true, features = ["test-util"] }
tracing-subscriber = { workspace = true }
http = { workspace = true }
axum = { workspace = true }
dotenvy = "0.15.7"
dotenvy = { workspace = true }
[lints]
workspace = true

View File

@@ -332,10 +332,8 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
let flexi_logger_result = flexi_logger::Logger::try_with_env_or_str(logger_level)
.unwrap_or_else(|e| {
eprintln!(
"Invalid logger level: {}, using default: {},failed error:{}",
logger_level,
DEFAULT_LOG_LEVEL,
e.to_string()
"Invalid logger level: {}, using default: {}, failed error: {:?}",
logger_level, DEFAULT_LOG_LEVEL, e
);
flexi_logger::Logger::with(log_spec.clone())
})

View File

@@ -19,7 +19,7 @@ async-compression = { version = "0.4.0", features = [
async_zip = { version = "0.0.17", features = ["tokio"] }
zip = "2.2.0"
tokio = { workspace = true, features = ["full"] }
tokio-stream = "0.1.17"
tokio-stream = { workspace = true }
tokio-tar = { workspace = true }
xz2 = { version = "0.1", optional = true, features = ["static"] }

View File

@@ -10,12 +10,12 @@ version.workspace = true
workspace = true
[dependencies]
aes-gcm = { version = "0.10.3", features = ["std"], optional = true }
argon2 = { version = "0.5.3", features = ["std"], optional = true }
cfg-if = "1.0.0"
chacha20poly1305 = { version = "0.10.1", optional = true }
aes-gcm = { workspace = true, features = ["std"], optional = true }
argon2 = { workspace = true, features = ["std"], optional = true }
cfg-if = { workspace = true }
chacha20poly1305 = { workspace = true, optional = true }
jsonwebtoken = { workspace = true }
pbkdf2 = { version = "0.12.2", optional = true }
pbkdf2 = { workspace = true, optional = true }
rand = { workspace = true, optional = true }
sha2 = { workspace = true, optional = true }
thiserror.workspace = true

View File

@@ -35,9 +35,7 @@ managing and monitoring the system.
| ├── rustfs_cert.pem
| └── rustfs_key.pem
|--config
| |--obs.example.yaml // example config
| |--rustfs.env // env config
| |--rustfs-zh.env // env config in Chinese
| |--.example.obs.env // example env config
| |--event.example.toml // event config
```

View File

@@ -38,13 +38,13 @@ ExecStart=/usr/local/bin/rustfs \
--volumes /data/rustfs/vol1,/data/rustfs/vol2 \
--obs-config /etc/rustfs/obs.yaml \
--console-enable \
--console-address 0.0.0.0:9002
--console-address 0.0.0.0:9001
# 定义启动命令,运行 /usr/local/bin/rustfs带参数
# --address 0.0.0.0:9000服务监听所有接口的 9000 端口。
# --volumes指定存储卷路径为 /data/rustfs/vol1 和 /data/rustfs/vol2。
# --obs-config指定配置文件路径为 /etc/rustfs/obs.yaml。
# --console-enable启用控制台功能。
# --console-address 0.0.0.0:9002:控制台监听所有接口的 9002 端口。
# --console-address 0.0.0.0:9001:控制台监听所有接口的 9001 端口。
# 定义环境变量配置,用于传递给服务程序,推荐使用且简洁
# rustfs 示例文件 详见: `../config/rustfs-zh.env`

View File

@@ -83,7 +83,7 @@ sudo journalctl -u rustfs --since today
```bash
# 检查服务端口
ss -tunlp | grep 9000
ss -tunlp | grep 9002
ss -tunlp | grep 9001
# 测试服务可用性
curl -I http://localhost:9000

View File

@@ -83,7 +83,7 @@ sudo journalctl -u rustfs --since today
```bash
# Check service ports
ss -tunlp | grep 9000
ss -tunlp | grep 9002
ss -tunlp | grep 9001
# Test service availability
curl -I http://localhost:9000

View File

@@ -24,7 +24,7 @@ ExecStart=/usr/local/bin/rustfs \
--volumes /data/rustfs/vol1,/data/rustfs/vol2 \
--obs-config /etc/rustfs/obs.yaml \
--console-enable \
--console-address 0.0.0.0:9002
--console-address 0.0.0.0:9001
# environment variable configuration (Option 2: Use environment variables)
# rustfs example file see: `../config/rustfs.env`

View File

@@ -13,11 +13,11 @@ RUSTFS_ADDRESS="0.0.0.0:9000"
# 是否启用 RustFS 控制台功能
RUSTFS_CONSOLE_ENABLE=true
# RustFS 控制台监听地址和端口
RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9002"
RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9001"
# RustFS 服务端点地址,用于客户端访问
RUSTFS_SERVER_ENDPOINT="http://127.0.0.1:9000"
# RustFS 服务域名配置
RUSTFS_SERVER_DOMAINS=127.0.0.1:9002
RUSTFS_SERVER_DOMAINS=127.0.0.1:9001
# RustFS 许可证内容
RUSTFS_LICENSE="license content"
# 可观测性配置Endpointhttp://localhost:4317

View File

@@ -13,11 +13,11 @@ RUSTFS_ADDRESS="0.0.0.0:9000"
# Enable RustFS console functionality
RUSTFS_CONSOLE_ENABLE=true
# RustFS console listen address and port
RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9002"
RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9001"
# RustFS service endpoint for client access
RUSTFS_SERVER_ENDPOINT="http://127.0.0.1:9000"
# RustFS service domain configuration
RUSTFS_SERVER_DOMAINS=127.0.0.1:9002
RUSTFS_SERVER_DOMAINS=127.0.0.1:9001
# RustFS license content
RUSTFS_LICENSE="license content"
# Observability configuration endpoint: RUSTFS_OBS_ENDPOINT

View File

@@ -1,6 +1,6 @@
services:
otel-collector:
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.124.0
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0
environment:
- TZ=Asia/Shanghai
volumes:
@@ -16,7 +16,7 @@ services:
networks:
- rustfs-network
jaeger:
image: jaegertracing/jaeger:2.5.0
image: jaegertracing/jaeger:2.6.0
environment:
- TZ=Asia/Shanghai
ports:
@@ -26,7 +26,7 @@ services:
networks:
- rustfs-network
prometheus:
image: prom/prometheus:v3.3.0
image: prom/prometheus:v3.4.1
environment:
- TZ=Asia/Shanghai
volumes:
@@ -36,7 +36,7 @@ services:
networks:
- rustfs-network
loki:
image: grafana/loki:3.5.0
image: grafana/loki:3.5.1
environment:
- TZ=Asia/Shanghai
volumes:
@@ -47,7 +47,7 @@ services:
networks:
- rustfs-network
grafana:
image: grafana/grafana:11.6.1
image: grafana/grafana:12.0.1
ports:
- "3000:3000" # Web UI
environment:

View File

@@ -42,22 +42,22 @@ lock.workspace = true
regex = { workspace = true }
netif = { workspace = true }
nix = { workspace = true }
path-absolutize = "3.1.1"
path-absolutize = { workspace = true }
protos.workspace = true
rmp.workspace = true
rmp-serde.workspace = true
tokio-util = { workspace = true, features = ["io", "compat"] }
crc32fast = "1.4.2"
siphasher = "1.0.1"
base64-simd = "0.8.0"
crc32fast = { workspace = true }
siphasher = { workspace = true }
base64-simd = { workspace = true }
sha2 = { version = "0.11.0-pre.4" }
hex-simd = "0.8.0"
path-clean = "1.0.1"
hex-simd = { workspace = true }
path-clean = { workspace = true }
tempfile.workspace = true
tokio = { workspace = true, features = ["io-util", "sync", "signal"] }
tokio-stream = { workspace = true }
tonic.workspace = true
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
xxhash-rust = { workspace = true, features = ["xxh64", "xxh3"] }
num_cpus = { workspace = true }
rand.workspace = true
pin-project-lite.workspace = true

View File

@@ -136,9 +136,10 @@ pub struct ReplicationPool {
mrf_worker_size: usize,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
#[repr(u8)] // 明确表示底层值为 u8
pub enum ReplicationType {
#[default]
UnsetReplicationType = 0,
ObjectReplicationType = 1,
DeleteReplicationType = 2,
@@ -149,12 +150,6 @@ pub enum ReplicationType {
AllReplicationType = 7,
}
impl Default for ReplicationType {
fn default() -> Self {
ReplicationType::UnsetReplicationType
}
}
impl ReplicationType {
/// 从 u8 转换为枚举
pub fn from_u8(value: u8) -> Option<Self> {
@@ -400,7 +395,7 @@ pub async fn check_replicate_delete(
// use crate::global::*;
fn target_reset_header(arn: &str) -> String {
format!("{}-{}", format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, REPLICATION_RESET), arn)
format!("{}{}-{}", RESERVED_METADATA_PREFIX_LOWER, REPLICATION_RESET, arn)
}
pub async fn get_heal_replicate_object_info(
@@ -461,7 +456,7 @@ pub async fn get_heal_replicate_object_info(
},
None,
)
.await
.await
} else {
// let opts: ObjectOptions = put_opts(&bucket, &key, version_id, &req.headers, Some(mt))
// .await
@@ -839,7 +834,7 @@ impl ReplicationPool {
fn get_worker_ch(&self, bucket: &str, object: &str, _sz: i64) -> Option<&Sender<Box<dyn ReplicationWorkerOperation>>> {
let h = xxh3_64(format!("{}{}", bucket, object).as_bytes()); // 计算哈希值
//need lock;
//need lock;
let workers = &self.workers_sender; // 读锁
if workers.is_empty() {
@@ -1177,7 +1172,7 @@ pub fn get_replication_action(oi1: &ObjectInfo, oi2: &ObjectInfo, op_type: &str)
let _null_version_id = "null";
// 如果是现有对象复制,判断是否需要跳过同步
if op_type == "existing" && oi1.mod_time > oi2.mod_time && oi1.version_id == None {
if op_type == "existing" && oi1.mod_time > oi2.mod_time && oi1.version_id.is_none() {
return ReplicationAction::ReplicateNone;
}
@@ -1532,7 +1527,7 @@ impl ConfigProcess for s3s::dto::ReplicationConfiguration {
continue;
}
if self.role != "" {
if !self.role.is_empty() {
debug!("rule");
arns.push(self.role.clone()); // use legacy RoleArn if present
return arns;
@@ -1559,7 +1554,7 @@ impl ConfigProcess for s3s::dto::ReplicationConfiguration {
if obj.existing_object
&& rule.existing_object_replication.is_some()
&& rule.existing_object_replication.unwrap().status
== ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
== ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
{
warn!("need replicate failed");
return false;
@@ -1595,7 +1590,7 @@ impl ConfigProcess for s3s::dto::ReplicationConfiguration {
return obj.replica
&& rule.source_selection_criteria.is_some()
&& rule.source_selection_criteria.unwrap().replica_modifications.unwrap().status
== ReplicaModificationsStatus::from_static(ReplicaModificationsStatus::ENABLED);
== ReplicaModificationsStatus::from_static(ReplicaModificationsStatus::ENABLED);
}
warn!("need replicate failed");
false
@@ -1869,7 +1864,7 @@ pub async fn must_replicate(bucket: &str, object: &str, mopts: &MustReplicateOpt
let replicate = cfg.replicate(&opts);
info!("need replicate {}", &replicate);
let synchronous = tgt.map_or(false, |t| t.replicate_sync);
let synchronous = tgt.is_ok_and(|t| t.replicate_sync);
//decision.set(ReplicateTargetDecision::new(replicate,synchronous));
info!("targe decision arn is:{}", tgt_arn.clone());
decision.set(ReplicateTargetDecision {
@@ -1976,7 +1971,7 @@ impl ObjectInfoExt for ObjectInfo {
}
fn is_multipart(&self) -> bool {
match &self.etag {
Some(etgval) => etgval.len() != 32 && etgval.len() > 0,
Some(etgval) => etgval.len() != 32 && etgval.is_empty(),
None => false,
}
}
@@ -2086,7 +2081,7 @@ impl ReplicationWorkerOperation for ReplicateObjectInfo {
object: self.name.clone(),
version_id: self.version_id.clone(), // 直接使用计算后的 version_id
retry_count: 0,
sz: self.size.clone(),
sz: self.size,
}
}
fn as_any(&self) -> &dyn Any {
@@ -2469,7 +2464,7 @@ pub fn get_must_replicate_options(
op: ReplicationType, // 假设 `op` 是字符串类型
opts: &ObjectOptions,
) -> MustReplicateOptions {
let mut meta = clone_mss(&user_defined);
let mut meta = clone_mss(user_defined);
if !user_tags.is_empty() {
meta.insert("xhttp.AmzObjectTagging".to_string(), user_tags.to_string());
@@ -2621,7 +2616,7 @@ pub async fn replicate_object(ri: ReplicateObjectInfo, object_api: Arc<store::EC
for tgt_arn in tgt_arns {
let tgt = bucket_targets::get_bucket_target_client(&ri.bucket, &tgt_arn).await;
if !tgt.is_ok() {
if tgt.is_err() {
// repl_log_once_if(ctx, format!("failed to get target for bucket: {} arn: {}", bucket, tgt_arn), &tgt_arn).await;
// send_event(event_args {
// event_name: "ObjectReplicationNotTracked".to_string(),

View File

@@ -275,7 +275,7 @@ impl BucketTargetSys {
targets_map.insert(bucket.to_string(), targets);
arn_remotes_map.remove(arn_str);
let targets = self.list_targets(Some(&bucket), None).await;
let targets = self.list_targets(Some(bucket), None).await;
println!("targets is {}", targets.len());
match serde_json::to_vec(&targets) {
Ok(json) => {
@@ -390,7 +390,7 @@ impl BucketTargetSys {
}
async fn is_bucket_versioned(&self, _bucket: &str) -> bool {
return true;
true
// let url_str = "http://127.0.0.1:9001";
// // 转换为 Url 类型

View File

@@ -18,19 +18,19 @@ policy.workspace = true
serde_json.workspace = true
async-trait.workspace = true
thiserror.workspace = true
strum = { version = "0.27.1", features = ["derive"] }
arc-swap = "1.7.1"
strum = { workspace = true, features = ["derive"] }
arc-swap = { workspace = true }
crypto = { path = "../crypto" }
ipnetwork = { version = "0.21.1", features = ["serde"] }
itertools = "0.14.0"
ipnetwork = { workspace = true, features = ["serde"] }
itertools = { workspace = true }
futures.workspace = true
rand.workspace = true
base64-simd = "0.8.0"
base64-simd = { workspace = true }
jsonwebtoken = { workspace = true }
tracing.workspace = true
madmin.workspace = true
lazy_static.workspace = true
regex = "1.11.1"
regex = { workspace = true }
common.workspace = true
[dev-dependencies]

View File

@@ -16,19 +16,19 @@ serde = { workspace = true, features = ["derive", "rc"] }
serde_json.workspace = true
async-trait.workspace = true
thiserror.workspace = true
strum = { version = "0.27.1", features = ["derive"] }
strum = { workspace = true, features = ["derive"] }
arc-swap = "1.7.1"
crypto = { path = "../crypto" }
ipnetwork = { version = "0.21.1", features = ["serde"] }
itertools = "0.14.0"
ipnetwork = { workspace = true, features = ["serde"] }
itertools = { workspace = true }
futures.workspace = true
rand.workspace = true
base64-simd = "0.8.0"
base64-simd = { workspace = true }
jsonwebtoken = { workspace = true }
tracing.workspace = true
madmin.workspace = true
lazy_static.workspace = true
regex = "1.11.1"
regex = { workspace = true }
common.workspace = true
[dev-dependencies]

View File

@@ -1,78 +0,0 @@
#!/usr/bin/env python
from dataclasses import dataclass
import argparse
import subprocess
from pathlib import Path
@dataclass
class CliArgs:
profile: str
target: str
glibc: str
@staticmethod
def parse():
parser = argparse.ArgumentParser()
parser.add_argument("--profile", type=str, required=True)
parser.add_argument("--target", type=str, required=True)
parser.add_argument("--glibc", type=str, required=True)
args = parser.parse_args()
return CliArgs(args.profile, args.target, args.glibc)
def shell(cmd: str):
print(cmd, flush=True)
subprocess.run(cmd, shell=True, check=True)
def main(args: CliArgs):
use_zigbuild = False
use_old_glibc = False
if args.glibc and args.glibc != "default":
use_zigbuild = True
use_old_glibc = True
if args.target and args.target != "x86_64-unknown-linux-gnu":
shell("rustup target add " + args.target)
cmd = ["cargo", "build"]
if use_zigbuild:
cmd = ["cargo", " zigbuild"]
cmd.extend(["--profile", args.profile])
if use_old_glibc:
cmd.extend(["--target", f"{args.target}.{args.glibc}"])
else:
cmd.extend(["--target", args.target])
cmd.extend(["-p", "rustfs"])
cmd.extend(["--bins"])
shell("touch rustfs/build.rs") # refresh build info for rustfs
shell(" ".join(cmd))
if args.profile == "dev":
profile_dir = "debug"
elif args.profile == "release":
profile_dir = "release"
else:
profile_dir = args.profile
bin_path = Path(f"target/{args.target}/{profile_dir}/rustfs")
bin_name = f"rustfs.{args.profile}.{args.target}"
if use_old_glibc:
bin_name += f".glibc{args.glibc}"
bin_name += ".bin"
out_path = Path(f"target/artifacts/{bin_name}")
out_path.parent.mkdir(parents=True, exist_ok=True)
out_path.hardlink_to(bin_path)
if __name__ == "__main__":
main(CliArgs.parse())