move xhttp to filemeta

This commit is contained in:
weisd
2025-06-11 16:22:20 +08:00
parent e8a59d7c07
commit bdb7e8d321
8 changed files with 27 additions and 40 deletions

View File

@@ -15,3 +15,7 @@ pub const VERSION_PURGE_STATUS_KEY: &str = "X-Rustfs-Internal-purgestatus";
pub const X_RUSTFS_HEALING: &str = "X-Rustfs-Internal-healing";
pub const X_RUSTFS_DATA_MOV: &str = "X-Rustfs-Internal-data-mov";
pub const AMZ_OBJECT_TAGGING: &str = "X-Amz-Tagging";
pub const AMZ_BUCKET_REPLICATION_STATUS: &str = "X-Amz-Replication-Status";
pub const AMZ_DECODED_CONTENT_LENGTH: &str = "X-Amz-Decoded-Content-Length";

View File

@@ -2,7 +2,7 @@ mod error;
mod fileinfo;
mod filemeta;
mod filemeta_inline;
mod headers;
pub mod headers;
mod metacache;
pub mod test_data;

View File

@@ -25,7 +25,6 @@ pub mod store_api;
mod store_init;
pub mod store_list_objects;
mod store_utils;
pub mod xhttp;
pub use global::new_object_layer_fn;
pub use global::set_global_endpoints;

View File

@@ -39,8 +39,6 @@ use crate::{
ObjectOptions, PartInfo, PutObjReader, StorageAPI,
},
store_init::load_format_erasure,
// utils::crypto::{base64_decode, base64_encode, hex},
xhttp,
};
use crate::{disk::STORAGE_FORMAT_FILE, heal::mrf::PartialOperation};
use crate::{
@@ -58,7 +56,9 @@ use md5::{Digest as Md5Digest, Md5};
use rand::{Rng, seq::SliceRandom};
use rustfs_filemeta::{
FileInfo, FileMeta, FileMetaShallowVersion, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams, ObjectPartInfo,
RawFileInfo, file_info_from_raw, merge_file_meta_versions,
RawFileInfo, file_info_from_raw,
headers::{AMZ_OBJECT_TAGGING, AMZ_STORAGE_CLASS},
merge_file_meta_versions,
};
use rustfs_rio::{EtagResolvable, HashReader};
use rustfs_utils::{
@@ -3784,13 +3784,7 @@ impl ObjectIO for SetDisks {
let sc_parity_drives = {
if let Some(sc) = GLOBAL_StorageClass.get() {
sc.get_parity_for_sc(
user_defined
.get(xhttp::AMZ_STORAGE_CLASS)
.cloned()
.unwrap_or_default()
.as_str(),
)
sc.get_parity_for_sc(user_defined.get(AMZ_STORAGE_CLASS).cloned().unwrap_or_default().as_str())
} else {
None
}
@@ -3915,9 +3909,9 @@ impl ObjectIO for SetDisks {
// get content-type
}
if let Some(sc) = user_defined.get(xhttp::AMZ_STORAGE_CLASS) {
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS) {
if sc == storageclass::STANDARD {
let _ = user_defined.remove(xhttp::AMZ_STORAGE_CLASS);
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
}
@@ -4414,7 +4408,7 @@ impl StorageAPI for SetDisks {
async fn put_object_tags(&self, bucket: &str, object: &str, tags: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
let (mut fi, _, disks) = self.get_object_fileinfo(bucket, object, opts, false).await?;
fi.metadata.insert(xhttp::AMZ_OBJECT_TAGGING.to_owned(), tags.to_owned());
fi.metadata.insert(AMZ_OBJECT_TAGGING.to_owned(), tags.to_owned());
// TODO: userdeefined
@@ -4779,21 +4773,15 @@ impl StorageAPI for SetDisks {
user_defined.insert("etag".to_owned(), etag.clone());
}
if let Some(sc) = user_defined.get(xhttp::AMZ_STORAGE_CLASS) {
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS) {
if sc == storageclass::STANDARD {
let _ = user_defined.remove(xhttp::AMZ_STORAGE_CLASS);
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
}
let sc_parity_drives = {
if let Some(sc) = GLOBAL_StorageClass.get() {
sc.get_parity_for_sc(
user_defined
.get(xhttp::AMZ_STORAGE_CLASS)
.cloned()
.unwrap_or_default()
.as_str(),
)
sc.get_parity_for_sc(user_defined.get(AMZ_STORAGE_CLASS).cloned().unwrap_or_default().as_str())
} else {
None
}
@@ -4831,9 +4819,9 @@ impl StorageAPI for SetDisks {
// TODO: get content-type
}
if let Some(sc) = user_defined.get(xhttp::AMZ_STORAGE_CLASS) {
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS) {
if sc == storageclass::STANDARD {
let _ = user_defined.remove(xhttp::AMZ_STORAGE_CLASS);
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
}

View File

@@ -4,10 +4,10 @@ use crate::cmd::bucket_replication::{ReplicationStatusType, VersionPurgeStatusTy
use crate::error::{Error, Result};
use crate::heal::heal_ops::HealSequence;
use crate::store_utils::clean_metadata;
use crate::{disk::DiskStore, heal::heal_commands::HealOpts, xhttp};
use crate::{disk::DiskStore, heal::heal_commands::HealOpts};
use http::{HeaderMap, HeaderValue};
use madmin::heal_commands::HealResultItem;
use rustfs_filemeta::{FileInfo, MetaCacheEntriesSorted, ObjectPartInfo};
use rustfs_filemeta::{FileInfo, MetaCacheEntriesSorted, ObjectPartInfo, headers::AMZ_OBJECT_TAGGING};
use rustfs_rio::{HashReader, Reader};
use rustfs_utils::path::decode_dir_object;
use serde::{Deserialize, Serialize};
@@ -424,7 +424,7 @@ impl ObjectInfo {
};
// tags
let user_tags = fi.metadata.get(xhttp::AMZ_OBJECT_TAGGING).cloned().unwrap_or_default();
let user_tags = fi.metadata.get(AMZ_OBJECT_TAGGING).cloned().unwrap_or_default();
let inlined = fi.inline_data();

View File

@@ -1,6 +1,6 @@
use crate::config::storageclass::STANDARD;
use crate::xhttp::AMZ_OBJECT_TAGGING;
use crate::xhttp::AMZ_STORAGE_CLASS;
use rustfs_filemeta::headers::AMZ_OBJECT_TAGGING;
use rustfs_filemeta::headers::AMZ_STORAGE_CLASS;
use std::collections::HashMap;
pub fn clean_metadata(metadata: &mut HashMap<String, String>) {

View File

@@ -1,4 +0,0 @@
pub const AMZ_OBJECT_TAGGING: &str = "X-Amz-Tagging";
pub const AMZ_BUCKET_REPLICATION_STATUS: &str = "X-Amz-Replication-Status";
pub const AMZ_STORAGE_CLASS: &str = "x-amz-storage-class";
pub const AMZ_DECODED_CONTENT_LENGTH: &str = "X-Amz-Decoded-Content-Length";

View File

@@ -52,7 +52,6 @@ use ecstore::bucket::utils::serialize;
use ecstore::cmd::bucket_replication::ReplicationStatusType;
use ecstore::cmd::bucket_replication::ReplicationType;
use ecstore::store_api::RESERVED_METADATA_PREFIX_LOWER;
use ecstore::xhttp;
use futures::pin_mut;
use futures::{Stream, StreamExt};
use http::HeaderMap;
@@ -64,6 +63,7 @@ use policy::policy::Validator;
use policy::policy::action::Action;
use policy::policy::action::S3Action;
use query::instance::make_rustfsms;
use rustfs_filemeta::headers::{AMZ_DECODED_CONTENT_LENGTH, AMZ_OBJECT_TAGGING};
use rustfs_rio::HashReader;
use rustfs_utils::path::path_join_buf;
use rustfs_zip::CompressionFormat;
@@ -952,7 +952,7 @@ impl S3 for FS {
let content_length = match content_length {
Some(c) => c,
None => {
if let Some(val) = req.headers.get(xhttp::AMZ_DECODED_CONTENT_LENGTH) {
if let Some(val) = req.headers.get(AMZ_DECODED_CONTENT_LENGTH) {
match atoi::atoi::<i64>(val.as_bytes()) {
Some(x) => x,
None => return Err(s3_error!(UnexpectedContent)),
@@ -981,7 +981,7 @@ impl S3 for FS {
extract_metadata_from_mime(&req.headers, &mut metadata);
if let Some(tags) = tagging {
metadata.insert(xhttp::AMZ_OBJECT_TAGGING.to_owned(), tags);
metadata.insert(AMZ_OBJECT_TAGGING.to_owned(), tags);
}
let mt = metadata.clone();
@@ -1055,7 +1055,7 @@ impl S3 for FS {
let mut metadata = extract_metadata(&req.headers);
if let Some(tags) = tagging {
metadata.insert(xhttp::AMZ_OBJECT_TAGGING.to_owned(), tags);
metadata.insert(AMZ_OBJECT_TAGGING.to_owned(), tags);
}
let opts: ObjectOptions = put_opts(&bucket, &key, version_id, &req.headers, Some(metadata))
@@ -1098,7 +1098,7 @@ impl S3 for FS {
let content_length = match content_length {
Some(c) => c,
None => {
if let Some(val) = req.headers.get(xhttp::AMZ_DECODED_CONTENT_LENGTH) {
if let Some(val) = req.headers.get(AMZ_DECODED_CONTENT_LENGTH) {
match atoi::atoi::<i64>(val.as_bytes()) {
Some(x) => x,
None => return Err(s3_error!(UnexpectedContent)),