fix:correctly handle copy object (#1512)

Co-authored-by: loverustfs <hello@rustfs.com>
This commit is contained in:
LeonWang0735
2026-01-16 10:07:48 +08:00
committed by GitHub
parent 18b22eedd9
commit ed4329d50c

View File

@@ -117,10 +117,9 @@ use rustfs_utils::{
AMZ_BUCKET_REPLICATION_STATUS, AMZ_CHECKSUM_MODE, AMZ_CHECKSUM_TYPE,
headers::{
AMZ_DECODED_CONTENT_LENGTH, AMZ_OBJECT_TAGGING, AMZ_RESTORE_EXPIRY_DAYS, AMZ_RESTORE_REQUEST_DATE,
RESERVED_METADATA_PREFIX_LOWER,
RESERVED_METADATA_PREFIX, RESERVED_METADATA_PREFIX_LOWER,
},
},
obj::extract_user_defined_metadata,
path::{is_dir_object, path_join_buf},
};
use rustfs_zip::CompressionFormat;
@@ -1075,6 +1074,7 @@ impl S3 for FS {
metadata,
copy_source_if_match,
copy_source_if_none_match,
content_type,
..
} = req.input.clone();
let (src_bucket, src_key, version_id) = match copy_source {
@@ -1090,6 +1090,19 @@ impl S3 for FS {
validate_object_key(&src_key, "COPY (source)")?;
validate_object_key(&key, "COPY (dest)")?;
// AWS S3 allows self-copy when metadata directive is REPLACE (used to update metadata in-place).
// Reject only when the directive is not REPLACE.
if metadata_directive.as_ref().map(|d| d.as_str()) != Some(MetadataDirective::REPLACE)
&& src_bucket == bucket
&& src_key == key
{
error!("Rejected self-copy operation: bucket={}, key={}", bucket, key);
return Err(s3_error!(
InvalidRequest,
"Cannot copy an object to itself. Source and destination must be different."
));
}
// warn!("copy_object {}/{}, to {}/{}", &src_bucket, &src_key, &bucket, &key);
let mut src_opts = copy_src_opts(&src_bucket, &src_key, &req.headers).map_err(ApiError::from)?;
@@ -1214,12 +1227,35 @@ impl S3 for FS {
src_info
.user_defined
.remove(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression"));
src_info
.user_defined
.remove(&format!("{RESERVED_METADATA_PREFIX}compression"));
src_info
.user_defined
.remove(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size"));
src_info
.user_defined
.remove(&format!("{RESERVED_METADATA_PREFIX}actual-size"));
src_info
.user_defined
.remove(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression-size"));
src_info
.user_defined
.remove(&format!("{RESERVED_METADATA_PREFIX}compression-size"));
}
// Handle MetadataDirective REPLACE: replace user metadata while preserving system metadata.
// System metadata (compression, encryption) is added after this block to ensure
// it's not cleared by the REPLACE operation.
if metadata_directive.as_ref().map(|d| d.as_str()) == Some(MetadataDirective::REPLACE) {
src_info.user_defined.clear();
if let Some(metadata) = metadata {
src_info.user_defined.extend(metadata);
}
if let Some(ct) = content_type {
src_info.content_type = Some(ct.clone());
src_info.user_defined.insert("content-type".to_string(), ct);
}
}
let mut reader = HashReader::new(reader, length, actual_size, None, None, false).map_err(ApiError::from)?;
@@ -1304,16 +1340,6 @@ impl S3 for FS {
.insert("x-amz-server-side-encryption-customer-key-md5".to_string(), sse_md5.clone());
}
if metadata_directive.as_ref().map(|d| d.as_str()) == Some(MetadataDirective::REPLACE) {
let src_user_defined = extract_user_defined_metadata(&src_info.user_defined);
src_user_defined.keys().for_each(|k| {
src_info.user_defined.remove(k);
});
if let Some(metadata) = metadata {
src_info.user_defined.extend(metadata);
}
}
// check quota for copy operation
if let Some(metadata_sys) = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get() {
let quota_checker = QuotaChecker::new(metadata_sys.clone());