mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-08-20 04:46:42 +08:00
fix parsing s3 tag (#7069)
* fix parsing s3 tag fix https://github.com/seaweedfs/seaweedfs/issues/7040#issuecomment-3145615630 * url.ParseQuery
This commit is contained in:
parent
f1eb4dd427
commit
fd447465c2
@ -2,6 +2,7 @@ package basic
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
@ -147,3 +148,81 @@ func TestObjectTaggingWithEncodedValues(t *testing.T) {
|
|||||||
Key: aws.String("testDir/testObjectWithEncodedTags"),
|
Key: aws.String("testDir/testObjectWithEncodedTags"),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestObjectUploadWithEncodedTags tests the specific issue reported in GitHub issue #7040
|
||||||
|
// where tags sent via X-Amz-Tagging header during object upload are not URL decoded properly
|
||||||
|
func TestObjectUploadWithEncodedTags(t *testing.T) {
|
||||||
|
// This test specifically addresses the issue where tags with special characters
|
||||||
|
// (like spaces, colons, slashes) sent during object upload are not URL decoded
|
||||||
|
// This tests the fix in filer_server_handlers_write_autochunk.go
|
||||||
|
|
||||||
|
objectKey := "testDir/testObjectUploadWithTags"
|
||||||
|
|
||||||
|
// Upload object with tags that contain special characters that would be URL encoded
|
||||||
|
// The AWS SDK will automatically URL encode these when sending the X-Amz-Tagging header
|
||||||
|
// Test edge cases that url.ParseQuery handles better than manual parsing:
|
||||||
|
// - Values containing "=" characters
|
||||||
|
// - Empty values
|
||||||
|
// - Complex special characters
|
||||||
|
_, err := svc.PutObject(&s3.PutObjectInput{
|
||||||
|
Bucket: aws.String("theBucket"),
|
||||||
|
Key: aws.String(objectKey),
|
||||||
|
Body: aws.ReadSeekCloser(strings.NewReader("test content")),
|
||||||
|
Tagging: aws.String("Timestamp=2025-07-16 14:40:39&Path=/tmp/file.txt&Description=A test file with spaces&Equation=x=y+1&EmptyValue=&Complex=A%20tag%20with%20%26%20%3D%20chars"),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to upload object with tags: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the tags back to verify they were properly URL decoded during upload
|
||||||
|
response, err := svc.GetObjectTagging(&s3.GetObjectTaggingInput{
|
||||||
|
Bucket: aws.String("theBucket"),
|
||||||
|
Key: aws.String(objectKey),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to get object tags: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the tags are properly decoded (not URL encoded)
|
||||||
|
tagMap := make(map[string]string)
|
||||||
|
for _, tag := range response.TagSet {
|
||||||
|
tagMap[*tag.Key] = *tag.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test cases for values that would be URL encoded in the X-Amz-Tagging header
|
||||||
|
testCases := []struct {
|
||||||
|
key string
|
||||||
|
expectedValue string
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{"Timestamp", "2025-07-16 14:40:39", "timestamp with spaces and colons"},
|
||||||
|
{"Path", "/tmp/file.txt", "file path with slashes"},
|
||||||
|
{"Description", "A test file with spaces", "description with spaces"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
actualValue, exists := tagMap[tc.key]
|
||||||
|
if !exists {
|
||||||
|
t.Errorf("Expected tag key '%s' not found", tc.key)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if actualValue != tc.expectedValue {
|
||||||
|
t.Errorf("Tag '%s' (%s): expected '%s', got '%s'",
|
||||||
|
tc.key, tc.description, tc.expectedValue, actualValue)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("✓ Tag '%s' correctly decoded: '%s'\n", tc.key, actualValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
_, err = svc.DeleteObject(&s3.DeleteObjectInput{
|
||||||
|
Bucket: aws.String("theBucket"),
|
||||||
|
Key: aws.String(objectKey),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("Warning: Failed to clean up test object: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -462,12 +463,19 @@ func SaveAmzMetaData(r *http.Request, existing map[string][]byte, isReplace bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
if tags := r.Header.Get(s3_constants.AmzObjectTagging); tags != "" {
|
if tags := r.Header.Get(s3_constants.AmzObjectTagging); tags != "" {
|
||||||
for _, v := range strings.Split(tags, "&") {
|
// Use url.ParseQuery for robust parsing and automatic URL decoding
|
||||||
tag := strings.Split(v, "=")
|
parsedTags, err := url.ParseQuery(tags)
|
||||||
if len(tag) == 2 {
|
if err != nil {
|
||||||
metadata[s3_constants.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1])
|
glog.Errorf("Failed to parse S3 tags '%s': %v", tags, err)
|
||||||
} else if len(tag) == 1 {
|
} else {
|
||||||
metadata[s3_constants.AmzObjectTagging+"-"+tag[0]] = nil
|
for key, values := range parsedTags {
|
||||||
|
// According to S3 spec, if a key is provided multiple times, the last value is used.
|
||||||
|
// A tag value can be an empty string but not nil.
|
||||||
|
value := ""
|
||||||
|
if len(values) > 0 {
|
||||||
|
value = values[len(values)-1]
|
||||||
|
}
|
||||||
|
metadata[s3_constants.AmzObjectTagging+"-"+key] = []byte(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user