fix:Fix some errors tested in mint (#507)

* refactor: replace new_object_layer_fn with get_validated_store for bucket validation

* feat: add validation for object tagging limits and uniqueness

* Apply suggestion from @Copilot

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Apply suggestion from @Copilot

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* feat: add EntityTooSmall error for multipart uploads and update error handling

* feat: validate max_parts input range for S3 multipart uploads

* Update rustfs/src/storage/ecfs.rs

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix: optimize tag key and value length validation checks

---------

Co-authored-by: damon <damonxue2@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
0xdx2
2025-09-10 22:22:29 +08:00
committed by GitHub
parent ca9a2b6ab9
commit 971e74281c
5 changed files with 87 additions and 23 deletions

10
.vscode/launch.json vendored
View File

@@ -20,18 +20,16 @@
}
},
"env": {
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug"
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug,iam=info"
},
"args": [
"--access-key",
"AKEXAMPLERUSTFS",
"rustfsadmin",
"--secret-key",
"SKEXAMPLERUSTFS",
"rustfsadmin",
"--address",
"0.0.0.0:9010",
"--domain-name",
"127.0.0.1:9010",
"./target/volume/test{0...4}"
"./target/volume/test{1...4}"
],
"cwd": "${workspaceFolder}"
},

View File

@@ -148,6 +148,9 @@ pub enum StorageError {
#[error("Specified part could not be found. PartNumber {0}, Expected {1}, got {2}")]
InvalidPart(usize, String, String),
#[error("Your proposed upload is smaller than the minimum allowed size. Part {0} size {1} is less than minimum {2}")]
EntityTooSmall(usize, i64, i64),
#[error("Invalid version id: {0}/{1}-{2}")]
InvalidVersionID(String, String, String),
#[error("invalid data movement operation, source and destination pool are the same for : {0}/{1}-{2}")]
@@ -408,6 +411,7 @@ impl Clone for StorageError {
// StorageError::InsufficientWriteQuorum => StorageError::InsufficientWriteQuorum,
StorageError::DecommissionNotStarted => StorageError::DecommissionNotStarted,
StorageError::InvalidPart(a, b, c) => StorageError::InvalidPart(*a, b.clone(), c.clone()),
StorageError::EntityTooSmall(a, b, c) => StorageError::EntityTooSmall(*a, *b, *c),
StorageError::DoneForNow => StorageError::DoneForNow,
StorageError::DecommissionAlreadyRunning => StorageError::DecommissionAlreadyRunning,
StorageError::ErasureReadQuorum => StorageError::ErasureReadQuorum,
@@ -486,6 +490,7 @@ impl StorageError {
StorageError::InsufficientReadQuorum(_, _) => 0x39,
StorageError::InsufficientWriteQuorum(_, _) => 0x3A,
StorageError::PreconditionFailed => 0x3B,
StorageError::EntityTooSmall(_, _, _) => 0x3C,
}
}

View File

@@ -5268,10 +5268,16 @@ impl StorageAPI for SetDisks {
if (i < uploaded_parts.len() - 1) && !is_min_allowed_part_size(ext_part.actual_size) {
error!(
"complete_multipart_upload is_min_allowed_part_size err {:?}, part_id={}, bucket={}, object={}",
ext_part.actual_size, p.part_num, bucket, object
"complete_multipart_upload part size too small: part {} size {} is less than minimum {}",
p.part_num,
ext_part.actual_size,
GLOBAL_MIN_PART_SIZE.as_u64()
);
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
return Err(Error::EntityTooSmall(
p.part_num,
ext_part.actual_size,
GLOBAL_MIN_PART_SIZE.as_u64() as i64,
));
}
object_size += ext_part.size;

View File

@@ -81,6 +81,7 @@ impl From<StorageError> for ApiError {
StorageError::DataMovementOverwriteErr(_, _, _) => S3ErrorCode::InvalidArgument,
StorageError::ObjectExistsAsDirectory(_, _) => S3ErrorCode::InvalidArgument,
StorageError::InvalidPart(_, _, _) => S3ErrorCode::InvalidPart,
StorageError::EntityTooSmall(_, _, _) => S3ErrorCode::EntityTooSmall,
StorageError::PreconditionFailed => S3ErrorCode::PreconditionFailed,
_ => S3ErrorCode::InternalError,
};

View File

@@ -292,6 +292,21 @@ impl FS {
}
}
/// Helper function to get store and validate bucket exists
async fn get_validated_store(bucket: &str) -> S3Result<Arc<rustfs_ecstore::store::ECStore>> {
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
// Validate bucket exists
store
.get_bucket_info(bucket, &BucketOptions::default())
.await
.map_err(ApiError::from)?;
Ok(store)
}
#[async_trait::async_trait]
impl S3 for FS {
#[tracing::instrument(
@@ -928,9 +943,7 @@ impl S3 for FS {
.await
.map_err(ApiError::from)?;
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let store = get_validated_store(&bucket).await?;
let reader = store
.get_object_reader(bucket.as_str(), key.as_str(), rs.clone(), h, &opts)
@@ -1215,15 +1228,17 @@ impl S3 for FS {
} = req.input;
let prefix = prefix.unwrap_or_default();
let max_keys = max_keys.unwrap_or(1000);
let max_keys = match max_keys {
Some(v) if v > 0 && v <= 1000 => v,
None => 1000,
_ => return Err(s3_error!(InvalidArgument, "max-keys must be between 1 and 1000")),
};
let delimiter = delimiter.filter(|v| !v.is_empty());
let continuation_token = continuation_token.filter(|v| !v.is_empty());
let start_after = start_after.filter(|v| !v.is_empty());
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let store = get_validated_store(&bucket).await?;
let object_infos = store
.list_objects_v2(
@@ -1310,9 +1325,7 @@ impl S3 for FS {
let version_id_marker = version_id_marker.filter(|v| !v.is_empty());
let delimiter = delimiter.filter(|v| !v.is_empty());
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let store = get_validated_store(&bucket).await?;
let object_infos = store
.list_object_versions(&bucket, &prefix, key_marker, version_id_marker, delimiter.clone(), max_keys)
@@ -1423,9 +1436,7 @@ impl S3 for FS {
// let mut reader = PutObjReader::new(body, content_length as usize);
let Some(store) = new_object_layer_fn() else {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
let store = get_validated_store(&bucket).await?;
let mut metadata = metadata.unwrap_or_default();
@@ -1879,7 +1890,15 @@ impl S3 for FS {
};
let part_number_marker = part_number_marker.map(|x| x as usize);
let max_parts = max_parts.map(|x| x as usize).unwrap_or(MAX_PARTS_COUNT);
let max_parts = match max_parts {
Some(parts) => {
if !(1..=1000).contains(&parts) {
return Err(s3_error!(InvalidArgument, "max-parts must be between 1 and 1000"));
}
parts as usize
}
None => 1000,
};
let res = store
.list_object_parts(&bucket, &key, &upload_id, part_number_marker, max_parts, &ObjectOptions::default())
@@ -2139,6 +2158,41 @@ impl S3 for FS {
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
};
// Validate tag_set length doesn't exceed 10
if tagging.tag_set.len() > 10 {
return Err(s3_error!(InvalidArgument, "Object tags cannot be greater than 10"));
}
let mut tag_keys = std::collections::HashSet::with_capacity(tagging.tag_set.len());
for tag in &tagging.tag_set {
let key = tag
.key
.as_ref()
.filter(|k| !k.is_empty())
.ok_or_else(|| s3_error!(InvalidTag, "Tag key cannot be empty"))?;
if key.len() > 128 {
return Err(s3_error!(InvalidTag, "Tag key is too long, maximum allowed length is 128 characters"));
}
let value = tag
.value
.as_ref()
.ok_or_else(|| s3_error!(InvalidTag, "Tag value cannot be null"))?;
if value.is_empty() {
return Err(s3_error!(InvalidTag, "Tag value cannot be empty"));
}
if value.len() > 256 {
return Err(s3_error!(InvalidTag, "Tag value is too long, maximum allowed length is 256 characters"));
}
if !tag_keys.insert(key) {
return Err(s3_error!(InvalidTag, "Cannot provide multiple Tags with the same key"));
}
}
let tags = encode_tags(tagging.tag_set);
// TODO: getOpts