mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 09:40:32 +00:00
Upgrade rand to 0.9.1
This commit is contained in:
12
Cargo.lock
generated
12
Cargo.lock
generated
@@ -209,7 +209,7 @@ dependencies = [
|
||||
"base64-simd",
|
||||
"common",
|
||||
"hex-simd",
|
||||
"rand 0.8.5",
|
||||
"rand 0.9.1",
|
||||
"rsa",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -2164,7 +2164,7 @@ dependencies = [
|
||||
"chacha20poly1305",
|
||||
"jsonwebtoken",
|
||||
"pbkdf2",
|
||||
"rand 0.8.5",
|
||||
"rand 0.9.1",
|
||||
"serde_json",
|
||||
"sha2 0.10.9",
|
||||
"test-case",
|
||||
@@ -3589,7 +3589,7 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
"policy",
|
||||
"protos",
|
||||
"rand 0.8.5",
|
||||
"rand 0.9.1",
|
||||
"reed-solomon-erasure",
|
||||
"regex",
|
||||
"reqwest",
|
||||
@@ -4877,7 +4877,7 @@ dependencies = [
|
||||
"lazy_static",
|
||||
"madmin",
|
||||
"policy",
|
||||
"rand 0.8.5",
|
||||
"rand 0.9.1",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -5619,7 +5619,7 @@ dependencies = [
|
||||
"common",
|
||||
"lazy_static",
|
||||
"protos",
|
||||
"rand 0.8.5",
|
||||
"rand 0.9.1",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
@@ -7207,7 +7207,7 @@ dependencies = [
|
||||
"jsonwebtoken",
|
||||
"lazy_static",
|
||||
"madmin",
|
||||
"rand 0.8.5",
|
||||
"rand 0.9.1",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
|
||||
@@ -149,7 +149,7 @@ pin-project-lite = "0.2.16"
|
||||
prost = "0.13.5"
|
||||
prost-build = "0.13.5"
|
||||
protobuf = "3.7"
|
||||
rand = "0.8.5"
|
||||
rand = "0.9.1"
|
||||
rdkafka = { version = "0.37.0", features = ["tokio"] }
|
||||
reed-solomon-erasure = { version = "6.0.0", features = ["simd-accel"] }
|
||||
regex = { version = "1.11.1" }
|
||||
|
||||
@@ -2,6 +2,7 @@ use common::error::Result;
|
||||
use rsa::Pkcs1v15Encrypt;
|
||||
use rsa::{
|
||||
pkcs8::{DecodePrivateKey, DecodePublicKey},
|
||||
rand_core::OsRng,
|
||||
RsaPrivateKey, RsaPublicKey,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -19,7 +20,7 @@ pub struct Token {
|
||||
pub fn gencode(token: &Token, key: &str) -> Result<String> {
|
||||
let data = serde_json::to_vec(token)?;
|
||||
let public_key = RsaPublicKey::from_public_key_pem(key)?;
|
||||
let encrypted_data = public_key.encrypt(&mut rand::thread_rng(), Pkcs1v15Encrypt, &data)?;
|
||||
let encrypted_data = public_key.encrypt(&mut OsRng, Pkcs1v15Encrypt, &data)?;
|
||||
Ok(base64_simd::URL_SAFE_NO_PAD.encode_to_string(&encrypted_data))
|
||||
}
|
||||
|
||||
@@ -61,7 +62,7 @@ mod tests {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
#[test]
|
||||
fn test_gencode_and_parse() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = OsRng;
|
||||
let bits = 2048;
|
||||
let private_key = RsaPrivateKey::new(&mut rng, bits).expect("Failed to generate private key");
|
||||
let public_key = RsaPublicKey::from(&private_key);
|
||||
@@ -84,7 +85,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_parse_invalid_token() {
|
||||
let private_key_pem = RsaPrivateKey::new(&mut rand::thread_rng(), 2048)
|
||||
let private_key_pem = RsaPrivateKey::new(&mut OsRng, 2048)
|
||||
.expect("Failed to generate private key")
|
||||
.to_pkcs8_pem(LineEnding::LF)
|
||||
.unwrap();
|
||||
|
||||
@@ -77,8 +77,8 @@ impl LRWMutex {
|
||||
}
|
||||
let sleep_time: u64;
|
||||
{
|
||||
let mut rng = rand::thread_rng();
|
||||
sleep_time = rng.gen_range(10..=50);
|
||||
let mut rng = rand::rng();
|
||||
sleep_time = rng.random_range(10..=50);
|
||||
}
|
||||
sleep(Duration::from_millis(sleep_time)).await;
|
||||
}
|
||||
|
||||
@@ -42,8 +42,9 @@ fn encrypt<T: aes_gcm::aead::Aead>(
|
||||
data: &[u8],
|
||||
) -> Result<Vec<u8>, crate::Error> {
|
||||
use crate::error::Error;
|
||||
use aes_gcm::aead::rand_core::OsRng;
|
||||
|
||||
let nonce = T::generate_nonce(rand::thread_rng());
|
||||
let nonce = T::generate_nonce(&mut OsRng);
|
||||
|
||||
let encryptor = stream.encrypt(&nonce, data).map_err(Error::ErrEncryptFailed)?;
|
||||
|
||||
|
||||
@@ -754,7 +754,7 @@ impl MetaCacheEntry {
|
||||
});
|
||||
}
|
||||
|
||||
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false)?;
|
||||
let fi = fm.to_fileinfo(bucket, self.name.as_str(), "", false, false)?;
|
||||
|
||||
return Ok(fi);
|
||||
}
|
||||
@@ -762,7 +762,7 @@ impl MetaCacheEntry {
|
||||
let mut fm = FileMeta::new();
|
||||
fm.unmarshal_msg(&self.metadata)?;
|
||||
|
||||
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false)?;
|
||||
let fi = fm.to_fileinfo(bucket, self.name.as_str(), "", false, false)?;
|
||||
|
||||
Ok(fi)
|
||||
}
|
||||
|
||||
@@ -541,14 +541,7 @@ impl FileMeta {
|
||||
|
||||
// read_data fill fi.dada
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
pub fn into_fileinfo(
|
||||
&self,
|
||||
volume: &str,
|
||||
path: &str,
|
||||
version_id: &str,
|
||||
read_data: bool,
|
||||
all_parts: bool,
|
||||
) -> Result<FileInfo> {
|
||||
pub fn to_fileinfo(&self, volume: &str, path: &str, version_id: &str, read_data: bool, all_parts: bool) -> Result<FileInfo> {
|
||||
let has_vid = {
|
||||
if !version_id.is_empty() {
|
||||
let id = Uuid::parse_str(version_id)?;
|
||||
@@ -2116,7 +2109,7 @@ pub async fn get_file_info(buf: &[u8], volume: &str, path: &str, version_id: &st
|
||||
});
|
||||
}
|
||||
|
||||
let fi = meta.into_fileinfo(volume, path, version_id, opts.data, true)?;
|
||||
let fi = meta.to_fileinfo(volume, path, version_id, opts.data, true)?;
|
||||
Ok(fi)
|
||||
}
|
||||
|
||||
@@ -2947,7 +2940,7 @@ fn test_file_meta_into_fileinfo() {
|
||||
fm.add_version(fi).unwrap();
|
||||
|
||||
// Test into_fileinfo with valid version_id
|
||||
let result = fm.into_fileinfo("test-volume", "test-path", &version_id.to_string(), false, false);
|
||||
let result = fm.to_fileinfo("test-volume", "test-path", &version_id.to_string(), false, false);
|
||||
assert!(result.is_ok());
|
||||
let file_info = result.unwrap();
|
||||
assert_eq!(file_info.volume, "test-volume");
|
||||
@@ -2955,11 +2948,11 @@ fn test_file_meta_into_fileinfo() {
|
||||
|
||||
// Test into_fileinfo with invalid version_id
|
||||
let invalid_id = Uuid::new_v4();
|
||||
let result = fm.into_fileinfo("test-volume", "test-path", &invalid_id.to_string(), false, false);
|
||||
let result = fm.to_fileinfo("test-volume", "test-path", &invalid_id.to_string(), false, false);
|
||||
assert!(result.is_err());
|
||||
|
||||
// Test into_fileinfo with empty version_id (should get latest)
|
||||
let result = fm.into_fileinfo("test-volume", "test-path", "", false, false);
|
||||
let result = fm.to_fileinfo("test-volume", "test-path", "", false, false);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
|
||||
@@ -171,9 +171,9 @@ pub async fn init_data_scanner() {
|
||||
|
||||
// Calculate randomized sleep duration
|
||||
// Use random factor (0.0 to 1.0) multiplied by the scanner cycle duration
|
||||
let random_factor: f64 = {
|
||||
let mut rng = rand::thread_rng();
|
||||
rng.gen_range(1.0..10.0)
|
||||
let random_factor = {
|
||||
let mut rng = rand::rng();
|
||||
rng.random_range(1.0..10.0)
|
||||
};
|
||||
let base_cycle_duration = SCANNER_CYCLE.load(Ordering::SeqCst) as f64;
|
||||
let sleep_duration_secs = random_factor * base_cycle_duration;
|
||||
|
||||
@@ -439,8 +439,8 @@ impl DataUsageCache {
|
||||
}
|
||||
retries += 1;
|
||||
let dur = {
|
||||
let mut rng = rand::thread_rng();
|
||||
rng.gen_range(0..1_000)
|
||||
let mut rng = rand::rng();
|
||||
rng.random_range(0..1_000)
|
||||
};
|
||||
sleep(Duration::from_millis(dur)).await;
|
||||
}
|
||||
|
||||
@@ -61,10 +61,7 @@ use http::HeaderMap;
|
||||
use lock::{namespace_lock::NsLockMap, LockApi};
|
||||
use madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use md5::{Digest as Md5Digest, Md5};
|
||||
use rand::{
|
||||
thread_rng,
|
||||
{seq::SliceRandom, Rng},
|
||||
};
|
||||
use rand::{seq::SliceRandom, Rng};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::hash::Hash;
|
||||
use std::time::SystemTime;
|
||||
@@ -136,7 +133,7 @@ impl SetDisks {
|
||||
}
|
||||
}
|
||||
|
||||
let mut rng = thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
disks.shuffle(&mut rng);
|
||||
|
||||
@@ -145,7 +142,7 @@ impl SetDisks {
|
||||
async fn get_online_local_disks(&self) -> Vec<Option<DiskStore>> {
|
||||
let mut disks = self.get_online_disks().await;
|
||||
|
||||
let mut rng = thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
disks.shuffle(&mut rng);
|
||||
|
||||
@@ -170,10 +167,10 @@ impl SetDisks {
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
let mut numbers: Vec<usize> = (0..disks.len()).collect();
|
||||
{
|
||||
let mut rng = thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
disks.shuffle(&mut rng);
|
||||
|
||||
numbers.shuffle(&mut thread_rng());
|
||||
numbers.shuffle(&mut rng);
|
||||
}
|
||||
|
||||
for &i in numbers.iter() {
|
||||
@@ -247,7 +244,7 @@ impl SetDisks {
|
||||
async fn _get_local_disks(&self) -> Vec<Option<DiskStore>> {
|
||||
let mut disks = self.get_disks_internal().await;
|
||||
|
||||
let mut rng = thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
disks.shuffle(&mut rng);
|
||||
|
||||
@@ -1275,7 +1272,7 @@ impl SetDisks {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let finfo = match meta.into_fileinfo(bucket, object, "", true, true) {
|
||||
let finfo = match meta.to_fileinfo(bucket, object, "", true, true) {
|
||||
Ok(res) => res,
|
||||
Err(err) => {
|
||||
for item in errs.iter_mut() {
|
||||
@@ -1302,7 +1299,7 @@ impl SetDisks {
|
||||
|
||||
for (idx, meta_op) in metadata_array.iter().enumerate() {
|
||||
if let Some(meta) = meta_op {
|
||||
match meta.into_fileinfo(bucket, object, vid.to_string().as_str(), read_data, true) {
|
||||
match meta.to_fileinfo(bucket, object, vid.to_string().as_str(), read_data, true) {
|
||||
Ok(res) => meta_file_infos[idx] = res,
|
||||
Err(err) => errs[idx] = Some(err),
|
||||
}
|
||||
@@ -2929,7 +2926,7 @@ impl SetDisks {
|
||||
// in different order per erasure set, this wider spread is needed when
|
||||
// there are lots of buckets with different order of objects in them.
|
||||
let permutes = {
|
||||
let mut rng = thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
let mut permutes: Vec<usize> = (0..buckets.len()).collect();
|
||||
permutes.shuffle(&mut rng);
|
||||
permutes
|
||||
@@ -2951,8 +2948,8 @@ impl SetDisks {
|
||||
|
||||
let (buckets_results_tx, mut buckets_results_rx) = mpsc::channel::<DataUsageEntryInfo>(disks.len());
|
||||
let update_time = {
|
||||
let mut rng = thread_rng();
|
||||
Duration::from_secs(30) + Duration::from_secs_f64(10.0 * rng.gen_range(0.0..1.0))
|
||||
let mut rng = rand::rng();
|
||||
Duration::from_secs(30) + Duration::from_secs_f64(10.0 * rng.random_range(0.0..1.0))
|
||||
};
|
||||
let mut ticker = interval(update_time);
|
||||
|
||||
@@ -3297,7 +3294,7 @@ impl SetDisks {
|
||||
}
|
||||
|
||||
{
|
||||
let mut rng = thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
// 随机洗牌
|
||||
disks.shuffle(&mut rng);
|
||||
|
||||
@@ -508,8 +508,8 @@ impl ECStore {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let random_u64: u64 = rng.gen();
|
||||
let mut rng = rand::rng();
|
||||
let random_u64: u64 = rng.random_range(0..total);
|
||||
|
||||
let choose = random_u64 % total;
|
||||
let mut at_total = 0;
|
||||
|
||||
@@ -19,7 +19,6 @@ use crate::{store::ECStore, store_api::ListObjectsV2Info};
|
||||
use common::error::{Error, Result};
|
||||
use futures::future::join_all;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::thread_rng;
|
||||
use std::collections::HashMap;
|
||||
use std::io::ErrorKind;
|
||||
use std::sync::Arc;
|
||||
@@ -712,7 +711,7 @@ impl ECStore {
|
||||
|
||||
let fallback_disks = {
|
||||
if ask_disks > 0 && disks.len() > ask_disks as usize {
|
||||
let mut rand = thread_rng();
|
||||
let mut rand = rand::rng();
|
||||
disks.shuffle(&mut rand);
|
||||
disks.split_off(ask_disks as usize)
|
||||
} else {
|
||||
@@ -1241,7 +1240,7 @@ impl SetDisks {
|
||||
let mut fallback_disks = Vec::new();
|
||||
|
||||
if ask_disks > 0 && disks.len() > ask_disks as usize {
|
||||
let mut rand = thread_rng();
|
||||
let mut rand = rand::rng();
|
||||
disks.shuffle(&mut rand);
|
||||
|
||||
fallback_disks = disks.split_off(ask_disks as usize);
|
||||
|
||||
@@ -28,10 +28,10 @@ pub fn gen_access_key(length: usize) -> Result<String> {
|
||||
}
|
||||
|
||||
let mut result = String::with_capacity(length);
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
for _ in 0..length {
|
||||
result.push(ALPHA_NUMERIC_TABLE[rng.gen_range(0..ALPHA_NUMERIC_TABLE.len())]);
|
||||
result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
@@ -57,7 +57,7 @@ pub fn gen_secret_key(length: usize) -> Result<String> {
|
||||
if length < 8 {
|
||||
return Err(Error::msg("secret key length is too short"));
|
||||
}
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)];
|
||||
rng.fill_bytes(&mut key);
|
||||
|
||||
@@ -14,10 +14,10 @@ pub fn gen_access_key(length: usize) -> Result<String> {
|
||||
}
|
||||
|
||||
let mut result = String::with_capacity(length);
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
for _ in 0..length {
|
||||
result.push(ALPHA_NUMERIC_TABLE[rng.gen_range(0..ALPHA_NUMERIC_TABLE.len())]);
|
||||
result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
@@ -29,7 +29,7 @@ pub fn gen_secret_key(length: usize) -> Result<String> {
|
||||
if length < 8 {
|
||||
return Err(Error::msg("secret key length is too short"));
|
||||
}
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)];
|
||||
rng.fill_bytes(&mut key);
|
||||
|
||||
Reference in New Issue
Block a user