2024-04-29 06:23:42 -07:00
|
|
|
package s3api
|
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/md5"
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/pquerna/cachecontrol/cacheobject"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/security"
|
|
|
|
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
|
|
|
weed_server "github.com/seaweedfs/seaweedfs/weed/server"
|
2025-01-16 19:53:35 +03:30
|
|
|
stats_collect "github.com/seaweedfs/seaweedfs/weed/stats"
|
2024-04-29 06:23:42 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
|
|
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
|
|
|
|
|
|
|
|
bucket, object := s3_constants.GetBucketAndObject(r)
|
|
|
|
glog.V(3).Infof("PutObjectHandler %s %s", bucket, object)
|
|
|
|
|
|
|
|
_, err := validateContentMd5(r.Header)
|
|
|
|
if err != nil {
|
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.Header.Get("Cache-Control") != "" {
|
|
|
|
if _, err = cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control")); err != nil {
|
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.Header.Get("Expires") != "" {
|
|
|
|
if _, err = time.Parse(http.TimeFormat, r.Header.Get("Expires")); err != nil {
|
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedDate)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-05-02 05:08:03 +09:00
|
|
|
dataReader, s3ErrCode := getRequestDataReader(s3a, r)
|
|
|
|
if s3ErrCode != s3err.ErrNone {
|
|
|
|
s3err.WriteErrorResponse(w, r, s3ErrCode)
|
|
|
|
return
|
2024-04-29 06:23:42 -07:00
|
|
|
}
|
|
|
|
defer dataReader.Close()
|
|
|
|
|
|
|
|
objectContentType := r.Header.Get("Content-Type")
|
|
|
|
if strings.HasSuffix(object, "/") && r.ContentLength <= 1024 {
|
|
|
|
if err := s3a.mkdir(
|
|
|
|
s3a.option.BucketsPath, bucket+strings.TrimSuffix(object, "/"),
|
|
|
|
func(entry *filer_pb.Entry) {
|
|
|
|
if objectContentType == "" {
|
|
|
|
objectContentType = s3_constants.FolderMimeType
|
|
|
|
}
|
|
|
|
if r.ContentLength > 0 {
|
|
|
|
entry.Content, _ = io.ReadAll(r.Body)
|
|
|
|
}
|
|
|
|
entry.Attributes.Mime = objectContentType
|
|
|
|
}); err != nil {
|
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
2025-07-09 01:51:45 -07:00
|
|
|
// Check if versioning is enabled for the bucket
|
|
|
|
versioningEnabled, err := s3a.isVersioningEnabled(bucket)
|
|
|
|
if err != nil {
|
|
|
|
if err == filer_pb.ErrNotFound {
|
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
glog.Errorf("Error checking versioning status for bucket %s: %v", bucket, err)
|
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
|
|
|
return
|
2024-04-29 06:23:42 -07:00
|
|
|
}
|
|
|
|
|
2025-07-09 01:51:45 -07:00
|
|
|
glog.V(1).Infof("PutObjectHandler: bucket %s, object %s, versioningEnabled=%v", bucket, object, versioningEnabled)
|
2024-04-29 06:23:42 -07:00
|
|
|
|
2025-07-12 21:58:55 -07:00
|
|
|
// Check object lock permissions before PUT operation (only for versioned buckets)
|
|
|
|
bypassGovernance := r.Header.Get("x-amz-bypass-governance-retention") == "true"
|
2025-07-13 16:21:36 -07:00
|
|
|
if err := s3a.checkObjectLockPermissionsForPut(r, bucket, object, bypassGovernance, versioningEnabled); err != nil {
|
2025-07-12 21:58:55 -07:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2025-07-09 01:51:45 -07:00
|
|
|
if versioningEnabled {
|
|
|
|
// Handle versioned PUT
|
|
|
|
glog.V(1).Infof("PutObjectHandler: using versioned PUT for %s/%s", bucket, object)
|
|
|
|
versionId, etag, errCode := s3a.putVersionedObject(r, bucket, object, dataReader, objectContentType)
|
|
|
|
if errCode != s3err.ErrNone {
|
|
|
|
s3err.WriteErrorResponse(w, r, errCode)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set version ID in response header
|
|
|
|
if versionId != "" {
|
|
|
|
w.Header().Set("x-amz-version-id", versionId)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set ETag in response
|
|
|
|
setEtag(w, etag)
|
|
|
|
} else {
|
|
|
|
// Handle regular PUT (non-versioned)
|
|
|
|
glog.V(1).Infof("PutObjectHandler: using regular PUT for %s/%s", bucket, object)
|
|
|
|
uploadUrl := s3a.toFilerUrl(bucket, object)
|
|
|
|
if objectContentType == "" {
|
|
|
|
dataReader = mimeDetect(r, dataReader)
|
|
|
|
}
|
|
|
|
|
|
|
|
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, "", bucket)
|
|
|
|
|
|
|
|
if errCode != s3err.ErrNone {
|
|
|
|
s3err.WriteErrorResponse(w, r, errCode)
|
|
|
|
return
|
|
|
|
}
|
2024-04-29 06:23:42 -07:00
|
|
|
|
2025-07-09 01:51:45 -07:00
|
|
|
setEtag(w, etag)
|
|
|
|
}
|
2024-04-29 06:23:42 -07:00
|
|
|
}
|
2025-02-08 02:26:39 +08:00
|
|
|
stats_collect.RecordBucketActiveTime(bucket)
|
2025-01-26 09:25:06 +03:30
|
|
|
stats_collect.S3UploadedObjectsCounter.WithLabelValues(bucket).Inc()
|
2024-04-29 06:23:42 -07:00
|
|
|
|
|
|
|
writeSuccessResponseEmpty(w, r)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader, destination string, bucket string) (etag string, code s3err.ErrorCode) {
|
|
|
|
|
|
|
|
hash := md5.New()
|
|
|
|
var body = io.TeeReader(dataReader, hash)
|
|
|
|
|
2024-07-01 13:00:39 +05:00
|
|
|
proxyReq, err := http.NewRequest(http.MethodPut, uploadUrl, body)
|
2024-04-29 06:23:42 -07:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("NewRequest %s: %v", uploadUrl, err)
|
|
|
|
return "", s3err.ErrInternalError
|
|
|
|
}
|
|
|
|
|
|
|
|
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
|
|
|
|
if destination != "" {
|
|
|
|
proxyReq.Header.Set(s3_constants.SeaweedStorageDestinationHeader, destination)
|
|
|
|
}
|
|
|
|
|
|
|
|
if s3a.option.FilerGroup != "" {
|
|
|
|
query := proxyReq.URL.Query()
|
|
|
|
query.Add("collection", s3a.getCollectionName(bucket))
|
|
|
|
proxyReq.URL.RawQuery = query.Encode()
|
|
|
|
}
|
|
|
|
|
|
|
|
for header, values := range r.Header {
|
|
|
|
for _, value := range values {
|
|
|
|
proxyReq.Header.Add(header, value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// ensure that the Authorization header is overriding any previous
|
|
|
|
// Authorization header which might be already present in proxyReq
|
|
|
|
s3a.maybeAddFilerJwtAuthorization(proxyReq, true)
|
|
|
|
resp, postErr := s3a.client.Do(proxyReq)
|
|
|
|
|
|
|
|
if postErr != nil {
|
|
|
|
glog.Errorf("post to filer: %v", postErr)
|
2025-07-07 02:18:57 +05:00
|
|
|
if strings.Contains(postErr.Error(), s3err.ErrMsgPayloadChecksumMismatch) {
|
|
|
|
return "", s3err.ErrInvalidDigest
|
|
|
|
}
|
2024-04-29 06:23:42 -07:00
|
|
|
return "", s3err.ErrInternalError
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
|
|
|
|
etag = fmt.Sprintf("%x", hash.Sum(nil))
|
|
|
|
|
|
|
|
resp_body, ra_err := io.ReadAll(resp.Body)
|
|
|
|
if ra_err != nil {
|
|
|
|
glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err)
|
|
|
|
return etag, s3err.ErrInternalError
|
|
|
|
}
|
|
|
|
var ret weed_server.FilerPostResult
|
|
|
|
unmarshal_err := json.Unmarshal(resp_body, &ret)
|
|
|
|
if unmarshal_err != nil {
|
|
|
|
glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body))
|
|
|
|
return "", s3err.ErrInternalError
|
|
|
|
}
|
|
|
|
if ret.Error != "" {
|
|
|
|
glog.Errorf("upload to filer error: %v", ret.Error)
|
|
|
|
return "", filerErrorToS3Error(ret.Error)
|
|
|
|
}
|
2025-02-08 02:26:39 +08:00
|
|
|
stats_collect.RecordBucketActiveTime(bucket)
|
2025-01-16 19:53:35 +03:30
|
|
|
stats_collect.S3BucketTrafficReceivedBytesCounter.WithLabelValues(bucket).Add(float64(ret.Size))
|
2024-04-29 06:23:42 -07:00
|
|
|
return etag, s3err.ErrNone
|
|
|
|
}
|
|
|
|
|
|
|
|
func setEtag(w http.ResponseWriter, etag string) {
|
|
|
|
if etag != "" {
|
|
|
|
if strings.HasPrefix(etag, "\"") {
|
|
|
|
w.Header()["ETag"] = []string{etag}
|
|
|
|
} else {
|
|
|
|
w.Header()["ETag"] = []string{"\"" + etag + "\""}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func filerErrorToS3Error(errString string) s3err.ErrorCode {
|
|
|
|
switch {
|
|
|
|
case strings.HasPrefix(errString, "existing ") && strings.HasSuffix(errString, "is a directory"):
|
|
|
|
return s3err.ErrExistingObjectIsDirectory
|
|
|
|
case strings.HasSuffix(errString, "is a file"):
|
|
|
|
return s3err.ErrExistingObjectIsFile
|
|
|
|
default:
|
|
|
|
return s3err.ErrInternalError
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s3a *S3ApiServer) maybeAddFilerJwtAuthorization(r *http.Request, isWrite bool) {
|
|
|
|
encodedJwt := s3a.maybeGetFilerJwtAuthorizationToken(isWrite)
|
|
|
|
|
|
|
|
if encodedJwt == "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
r.Header.Set("Authorization", "BEARER "+string(encodedJwt))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s3a *S3ApiServer) maybeGetFilerJwtAuthorizationToken(isWrite bool) string {
|
|
|
|
var encodedJwt security.EncodedJwt
|
|
|
|
if isWrite {
|
|
|
|
encodedJwt = security.GenJwtForFilerServer(s3a.filerGuard.SigningKey, s3a.filerGuard.ExpiresAfterSec)
|
|
|
|
} else {
|
|
|
|
encodedJwt = security.GenJwtForFilerServer(s3a.filerGuard.ReadSigningKey, s3a.filerGuard.ReadExpiresAfterSec)
|
|
|
|
}
|
|
|
|
return string(encodedJwt)
|
|
|
|
}
|
2025-07-09 01:51:45 -07:00
|
|
|
|
|
|
|
// putVersionedObject handles PUT operations for versioned buckets using the new layout
|
|
|
|
// where all versions (including latest) are stored in the .versions directory
|
|
|
|
func (s3a *S3ApiServer) putVersionedObject(r *http.Request, bucket, object string, dataReader io.Reader, objectContentType string) (versionId string, etag string, errCode s3err.ErrorCode) {
|
|
|
|
// Generate version ID
|
|
|
|
versionId = generateVersionId()
|
|
|
|
|
|
|
|
glog.V(2).Infof("putVersionedObject: creating version %s for %s/%s", versionId, bucket, object)
|
|
|
|
|
|
|
|
// Create the version file name
|
|
|
|
versionFileName := s3a.getVersionFileName(versionId)
|
|
|
|
|
|
|
|
// Upload directly to the versions directory
|
|
|
|
// We need to construct the object path relative to the bucket
|
|
|
|
versionObjectPath := object + ".versions/" + versionFileName
|
|
|
|
versionUploadUrl := s3a.toFilerUrl(bucket, versionObjectPath)
|
|
|
|
|
|
|
|
hash := md5.New()
|
|
|
|
var body = io.TeeReader(dataReader, hash)
|
|
|
|
if objectContentType == "" {
|
|
|
|
body = mimeDetect(r, body)
|
|
|
|
}
|
|
|
|
|
|
|
|
glog.V(2).Infof("putVersionedObject: uploading %s/%s version %s to %s", bucket, object, versionId, versionUploadUrl)
|
|
|
|
|
|
|
|
etag, errCode = s3a.putToFiler(r, versionUploadUrl, body, "", bucket)
|
|
|
|
if errCode != s3err.ErrNone {
|
|
|
|
glog.Errorf("putVersionedObject: failed to upload version: %v", errCode)
|
|
|
|
return "", "", errCode
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the uploaded entry to add versioning metadata
|
|
|
|
bucketDir := s3a.option.BucketsPath + "/" + bucket
|
|
|
|
versionEntry, err := s3a.getEntry(bucketDir, versionObjectPath)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("putVersionedObject: failed to get version entry: %v", err)
|
|
|
|
return "", "", s3err.ErrInternalError
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add versioning metadata to this version
|
|
|
|
if versionEntry.Extended == nil {
|
|
|
|
versionEntry.Extended = make(map[string][]byte)
|
|
|
|
}
|
|
|
|
versionEntry.Extended[s3_constants.ExtVersionIdKey] = []byte(versionId)
|
|
|
|
|
|
|
|
// Store ETag with quotes for S3 compatibility
|
|
|
|
if !strings.HasPrefix(etag, "\"") {
|
|
|
|
etag = "\"" + etag + "\""
|
|
|
|
}
|
|
|
|
versionEntry.Extended[s3_constants.ExtETagKey] = []byte(etag)
|
|
|
|
|
|
|
|
// Update the version entry with metadata
|
|
|
|
err = s3a.mkFile(bucketDir, versionObjectPath, versionEntry.Chunks, func(updatedEntry *filer_pb.Entry) {
|
|
|
|
updatedEntry.Extended = versionEntry.Extended
|
|
|
|
updatedEntry.Attributes = versionEntry.Attributes
|
|
|
|
updatedEntry.Chunks = versionEntry.Chunks
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("putVersionedObject: failed to update version metadata: %v", err)
|
|
|
|
return "", "", s3err.ErrInternalError
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the .versions directory metadata to indicate this is the latest version
|
|
|
|
err = s3a.updateLatestVersionInDirectory(bucket, object, versionId, versionFileName)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("putVersionedObject: failed to update latest version in directory: %v", err)
|
|
|
|
return "", "", s3err.ErrInternalError
|
|
|
|
}
|
|
|
|
|
|
|
|
glog.V(2).Infof("putVersionedObject: successfully created version %s for %s/%s", versionId, bucket, object)
|
|
|
|
return versionId, etag, s3err.ErrNone
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateLatestVersionInDirectory updates the .versions directory metadata to indicate the latest version
|
|
|
|
func (s3a *S3ApiServer) updateLatestVersionInDirectory(bucket, object, versionId, versionFileName string) error {
|
|
|
|
bucketDir := s3a.option.BucketsPath + "/" + bucket
|
|
|
|
versionsObjectPath := object + ".versions"
|
|
|
|
|
|
|
|
// Get the current .versions directory entry
|
|
|
|
versionsEntry, err := s3a.getEntry(bucketDir, versionsObjectPath)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("updateLatestVersionInDirectory: failed to get .versions entry: %v", err)
|
|
|
|
return fmt.Errorf("failed to get .versions entry: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add or update the latest version metadata
|
|
|
|
if versionsEntry.Extended == nil {
|
|
|
|
versionsEntry.Extended = make(map[string][]byte)
|
|
|
|
}
|
|
|
|
versionsEntry.Extended[s3_constants.ExtLatestVersionIdKey] = []byte(versionId)
|
|
|
|
versionsEntry.Extended[s3_constants.ExtLatestVersionFileNameKey] = []byte(versionFileName)
|
|
|
|
|
|
|
|
// Update the .versions directory entry with metadata
|
|
|
|
err = s3a.mkFile(bucketDir, versionsObjectPath, versionsEntry.Chunks, func(updatedEntry *filer_pb.Entry) {
|
|
|
|
updatedEntry.Extended = versionsEntry.Extended
|
|
|
|
updatedEntry.Attributes = versionsEntry.Attributes
|
|
|
|
updatedEntry.Chunks = versionsEntry.Chunks
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("updateLatestVersionInDirectory: failed to update .versions directory metadata: %v", err)
|
|
|
|
return fmt.Errorf("failed to update .versions directory metadata: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|