mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-09-19 02:11:34 +08:00
implement PubObjectRetention and WORM (#6969)
* implement PubObjectRetention and WORM
* Update s3_worm_integration_test.go
* avoid previous buckets
* Update s3-versioning-tests.yml
* address comments
* address comments
* rename to ExtObjectLockModeKey
* only checkObjectLockPermissions if versioningEnabled
* address comments
* comments
* Revert "comments"
This reverts commit 6736434176
.
* Update s3api_object_handlers_skip.go
* Update s3api_object_retention_test.go
* add version id to ObjectIdentifier
* address comments
* add comments
* Add proper error logging for timestamp parsing failures
* address comments
* add version id to the error
* Update weed/s3api/s3api_object_retention_test.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* Update weed/s3api/s3api_object_retention.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* constants
* fix comments
* address comments
* address comment
* refactor out handleObjectLockAvailabilityCheck
* errors.Is ErrBucketNotFound
* better error checking
* address comments
---------
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
120
.github/workflows/s3-versioning-tests.yml
vendored
120
.github/workflows/s3-versioning-tests.yml
vendored
@@ -1,10 +1,10 @@
|
||||
name: "S3 Versioning Tests (Go)"
|
||||
name: "S3 Versioning and Retention Tests (Go)"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}/s3-versioning
|
||||
group: ${{ github.head_ref }}/s3-versioning-retention
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
@@ -130,6 +130,122 @@ jobs:
|
||||
path: test/s3/versioning/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-retention-tests:
|
||||
name: S3 Retention Tests
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
test-type: ["quick", "comprehensive"]
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run S3 Retention Tests - ${{ matrix.test-type }}
|
||||
timeout-minutes: 25
|
||||
working-directory: test/s3/retention
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
df -h
|
||||
echo "=== Starting Tests ==="
|
||||
|
||||
# Run tests with automatic server management
|
||||
# The test-with-server target handles server startup/shutdown automatically
|
||||
if [ "${{ matrix.test-type }}" = "quick" ]; then
|
||||
# Override TEST_PATTERN for quick tests only
|
||||
make test-with-server TEST_PATTERN="TestBasicRetentionWorkflow|TestRetentionModeCompliance|TestLegalHoldWorkflow"
|
||||
else
|
||||
# Run all retention tests
|
||||
make test-with-server
|
||||
fi
|
||||
|
||||
- name: Show server logs on failure
|
||||
if: failure()
|
||||
working-directory: test/s3/retention
|
||||
run: |
|
||||
echo "=== Server Logs ==="
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "Last 100 lines of server logs:"
|
||||
tail -100 weed-test.log
|
||||
else
|
||||
echo "No server log file found"
|
||||
fi
|
||||
|
||||
echo "=== Test Environment ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
netstat -tlnp | grep -E "(8333|9333|8080)" || true
|
||||
|
||||
- name: Upload test logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-retention-test-logs-${{ matrix.test-type }}
|
||||
path: test/s3/retention/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-retention-worm:
|
||||
name: S3 Retention WORM Integration Test
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
id: go
|
||||
|
||||
- name: Install SeaweedFS
|
||||
run: |
|
||||
go install -buildvcs=false
|
||||
|
||||
- name: Run WORM Integration Tests
|
||||
timeout-minutes: 15
|
||||
working-directory: test/s3/retention
|
||||
run: |
|
||||
set -x
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
free -h
|
||||
|
||||
# Run the WORM integration tests with automatic server management
|
||||
# The test-with-server target handles server startup/shutdown automatically
|
||||
make test-with-server TEST_PATTERN="TestWORM|TestRetentionExtendedAttributes|TestRetentionConcurrentOperations" || {
|
||||
echo "❌ WORM integration test failed, checking logs..."
|
||||
if [ -f weed-test.log ]; then
|
||||
echo "=== Server logs ==="
|
||||
tail -100 weed-test.log
|
||||
fi
|
||||
echo "=== Process information ==="
|
||||
ps aux | grep -E "(weed|test)" || true
|
||||
exit 1
|
||||
}
|
||||
|
||||
- name: Upload server logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3-retention-worm-logs
|
||||
path: test/s3/retention/weed-test*.log
|
||||
retention-days: 3
|
||||
|
||||
s3-versioning-stress:
|
||||
name: S3 Versioning Stress Test
|
||||
runs-on: ubuntu-22.04
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -102,3 +102,4 @@ bin/weed
|
||||
weed_binary
|
||||
/test/s3/copying/filerldb2
|
||||
/filerldb2
|
||||
/test/s3/retention/test-volume-data
|
||||
|
360
test/s3/retention/Makefile
Normal file
360
test/s3/retention/Makefile
Normal file
@@ -0,0 +1,360 @@
|
||||
# S3 API Retention Test Makefile
|
||||
# This Makefile provides comprehensive targets for running S3 retention tests
|
||||
|
||||
.PHONY: help build-weed setup-server start-server stop-server test-retention test-retention-quick test-retention-comprehensive test-retention-worm test-all clean logs check-deps
|
||||
|
||||
# Configuration
|
||||
WEED_BINARY := ../../../weed/weed_binary
|
||||
S3_PORT := 8333
|
||||
MASTER_PORT := 9333
|
||||
VOLUME_PORT := 8080
|
||||
FILER_PORT := 8888
|
||||
TEST_TIMEOUT := 15m
|
||||
TEST_PATTERN := TestRetention
|
||||
|
||||
# Default target
|
||||
help:
|
||||
@echo "S3 API Retention Test Makefile"
|
||||
@echo ""
|
||||
@echo "Available targets:"
|
||||
@echo " help - Show this help message"
|
||||
@echo " build-weed - Build the SeaweedFS binary"
|
||||
@echo " check-deps - Check dependencies and build binary if needed"
|
||||
@echo " start-server - Start SeaweedFS server for testing"
|
||||
@echo " start-server-simple - Start server without process cleanup (for CI)"
|
||||
@echo " stop-server - Stop SeaweedFS server"
|
||||
@echo " test-retention - Run all retention tests"
|
||||
@echo " test-retention-quick - Run core retention tests only"
|
||||
@echo " test-retention-simple - Run tests without server management"
|
||||
@echo " test-retention-comprehensive - Run comprehensive retention tests"
|
||||
@echo " test-retention-worm - Run WORM integration tests"
|
||||
@echo " test-all - Run all S3 API retention tests"
|
||||
@echo " test-with-server - Start server, run tests, stop server"
|
||||
@echo " logs - Show server logs"
|
||||
@echo " clean - Clean up test artifacts and stop server"
|
||||
@echo " health-check - Check if server is accessible"
|
||||
@echo ""
|
||||
@echo "Configuration:"
|
||||
@echo " S3_PORT=${S3_PORT}"
|
||||
@echo " TEST_TIMEOUT=${TEST_TIMEOUT}"
|
||||
|
||||
# Build the SeaweedFS binary
|
||||
build-weed:
|
||||
@echo "Building SeaweedFS binary..."
|
||||
@cd ../../../weed && go build -o weed_binary .
|
||||
@chmod +x $(WEED_BINARY)
|
||||
@echo "✅ SeaweedFS binary built at $(WEED_BINARY)"
|
||||
|
||||
check-deps: build-weed
|
||||
@echo "Checking dependencies..."
|
||||
@echo "🔍 DEBUG: Checking Go installation..."
|
||||
@command -v go >/dev/null 2>&1 || (echo "Go is required but not installed" && exit 1)
|
||||
@echo "🔍 DEBUG: Go version: $$(go version)"
|
||||
@echo "🔍 DEBUG: Checking binary at $(WEED_BINARY)..."
|
||||
@test -f $(WEED_BINARY) || (echo "SeaweedFS binary not found at $(WEED_BINARY)" && exit 1)
|
||||
@echo "🔍 DEBUG: Binary size: $$(ls -lh $(WEED_BINARY) | awk '{print $$5}')"
|
||||
@echo "🔍 DEBUG: Binary permissions: $$(ls -la $(WEED_BINARY) | awk '{print $$1}')"
|
||||
@echo "🔍 DEBUG: Checking Go module dependencies..."
|
||||
@go list -m github.com/aws/aws-sdk-go-v2 >/dev/null 2>&1 || (echo "AWS SDK Go v2 not found. Run 'go mod tidy'." && exit 1)
|
||||
@go list -m github.com/stretchr/testify >/dev/null 2>&1 || (echo "Testify not found. Run 'go mod tidy'." && exit 1)
|
||||
@echo "✅ All dependencies are available"
|
||||
|
||||
# Start SeaweedFS server for testing
|
||||
start-server: check-deps
|
||||
@echo "Starting SeaweedFS server..."
|
||||
@echo "🔍 DEBUG: Current working directory: $$(pwd)"
|
||||
@echo "🔍 DEBUG: Checking for existing weed processes..."
|
||||
@ps aux | grep weed | grep -v grep || echo "No existing weed processes found"
|
||||
@echo "🔍 DEBUG: Cleaning up any existing PID file..."
|
||||
@rm -f weed-server.pid
|
||||
@echo "🔍 DEBUG: Checking for port conflicts..."
|
||||
@if netstat -tlnp 2>/dev/null | grep $(S3_PORT) >/dev/null; then \
|
||||
echo "⚠️ Port $(S3_PORT) is already in use, trying to find the process..."; \
|
||||
netstat -tlnp 2>/dev/null | grep $(S3_PORT) || true; \
|
||||
else \
|
||||
echo "✅ Port $(S3_PORT) is available"; \
|
||||
fi
|
||||
@echo "🔍 DEBUG: Checking binary at $(WEED_BINARY)"
|
||||
@ls -la $(WEED_BINARY) || (echo "❌ Binary not found!" && exit 1)
|
||||
@echo "🔍 DEBUG: Checking config file at ../../../docker/compose/s3.json"
|
||||
@ls -la ../../../docker/compose/s3.json || echo "⚠️ Config file not found, continuing without it"
|
||||
@echo "🔍 DEBUG: Creating volume directory..."
|
||||
@mkdir -p ./test-volume-data
|
||||
@echo "🔍 DEBUG: Launching SeaweedFS server in background..."
|
||||
@echo "🔍 DEBUG: Command: $(WEED_BINARY) server -debug -s3 -s3.port=$(S3_PORT) -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../../../docker/compose/s3.json -filer -filer.maxMB=64 -master.volumeSizeLimitMB=50 -volume.max=100 -dir=./test-volume-data -volume.preStopSeconds=1 -metricsPort=9324"
|
||||
@$(WEED_BINARY) server \
|
||||
-debug \
|
||||
-s3 \
|
||||
-s3.port=$(S3_PORT) \
|
||||
-s3.allowEmptyFolder=false \
|
||||
-s3.allowDeleteBucketNotEmpty=true \
|
||||
-s3.config=../../../docker/compose/s3.json \
|
||||
-filer \
|
||||
-filer.maxMB=64 \
|
||||
-master.volumeSizeLimitMB=50 \
|
||||
-volume.max=100 \
|
||||
-dir=./test-volume-data \
|
||||
-volume.preStopSeconds=1 \
|
||||
-metricsPort=9324 \
|
||||
> weed-test.log 2>&1 & echo $$! > weed-server.pid
|
||||
@echo "🔍 DEBUG: Server PID: $$(cat weed-server.pid 2>/dev/null || echo 'PID file not found')"
|
||||
@echo "🔍 DEBUG: Checking if PID is still running..."
|
||||
@sleep 2
|
||||
@if [ -f weed-server.pid ]; then \
|
||||
SERVER_PID=$$(cat weed-server.pid); \
|
||||
ps -p $$SERVER_PID || echo "⚠️ Server PID $$SERVER_PID not found after 2 seconds"; \
|
||||
else \
|
||||
echo "⚠️ PID file not found"; \
|
||||
fi
|
||||
@echo "🔍 DEBUG: Waiting for server to start (up to 90 seconds)..."
|
||||
@for i in $$(seq 1 90); do \
|
||||
echo "🔍 DEBUG: Attempt $$i/90 - checking port $(S3_PORT)"; \
|
||||
if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \
|
||||
echo "✅ SeaweedFS server started successfully on port $(S3_PORT) after $$i seconds"; \
|
||||
exit 0; \
|
||||
fi; \
|
||||
if [ $$i -eq 5 ]; then \
|
||||
echo "🔍 DEBUG: After 5 seconds, checking process and logs..."; \
|
||||
ps aux | grep weed | grep -v grep || echo "No weed processes found"; \
|
||||
if [ -f weed-test.log ]; then \
|
||||
echo "=== First server logs ==="; \
|
||||
head -20 weed-test.log; \
|
||||
fi; \
|
||||
fi; \
|
||||
if [ $$i -eq 15 ]; then \
|
||||
echo "🔍 DEBUG: After 15 seconds, checking port bindings..."; \
|
||||
netstat -tlnp 2>/dev/null | grep $(S3_PORT) || echo "Port $(S3_PORT) not bound"; \
|
||||
netstat -tlnp 2>/dev/null | grep 9333 || echo "Port 9333 not bound"; \
|
||||
netstat -tlnp 2>/dev/null | grep 8080 || echo "Port 8080 not bound"; \
|
||||
fi; \
|
||||
if [ $$i -eq 30 ]; then \
|
||||
echo "⚠️ Server taking longer than expected (30s), checking logs..."; \
|
||||
if [ -f weed-test.log ]; then \
|
||||
echo "=== Recent server logs ==="; \
|
||||
tail -20 weed-test.log; \
|
||||
fi; \
|
||||
fi; \
|
||||
sleep 1; \
|
||||
done; \
|
||||
echo "❌ Server failed to start within 90 seconds"; \
|
||||
echo "🔍 DEBUG: Final process check:"; \
|
||||
ps aux | grep weed | grep -v grep || echo "No weed processes found"; \
|
||||
echo "🔍 DEBUG: Final port check:"; \
|
||||
netstat -tlnp 2>/dev/null | grep -E "(8333|9333|8080)" || echo "No ports bound"; \
|
||||
echo "=== Full server logs ==="; \
|
||||
if [ -f weed-test.log ]; then \
|
||||
cat weed-test.log; \
|
||||
else \
|
||||
echo "No log file found"; \
|
||||
fi; \
|
||||
exit 1
|
||||
|
||||
# Stop SeaweedFS server
|
||||
stop-server:
|
||||
@echo "Stopping SeaweedFS server..."
|
||||
@if [ -f weed-server.pid ]; then \
|
||||
SERVER_PID=$$(cat weed-server.pid); \
|
||||
echo "Killing server PID $$SERVER_PID"; \
|
||||
if ps -p $$SERVER_PID >/dev/null 2>&1; then \
|
||||
kill -TERM $$SERVER_PID 2>/dev/null || true; \
|
||||
sleep 2; \
|
||||
if ps -p $$SERVER_PID >/dev/null 2>&1; then \
|
||||
echo "Process still running, sending KILL signal..."; \
|
||||
kill -KILL $$SERVER_PID 2>/dev/null || true; \
|
||||
sleep 1; \
|
||||
fi; \
|
||||
else \
|
||||
echo "Process $$SERVER_PID not found (already stopped)"; \
|
||||
fi; \
|
||||
rm -f weed-server.pid; \
|
||||
else \
|
||||
echo "No PID file found, checking for running processes..."; \
|
||||
echo "⚠️ Skipping automatic process cleanup to avoid CI issues"; \
|
||||
echo "Note: Any remaining weed processes should be cleaned up by the CI environment"; \
|
||||
fi
|
||||
@echo "✅ SeaweedFS server stopped"
|
||||
|
||||
# Show server logs
|
||||
logs:
|
||||
@if test -f weed-test.log; then \
|
||||
echo "=== SeaweedFS Server Logs ==="; \
|
||||
tail -f weed-test.log; \
|
||||
else \
|
||||
echo "No log file found. Server may not be running."; \
|
||||
fi
|
||||
|
||||
# Core retention tests (basic functionality)
|
||||
test-retention-quick: check-deps
|
||||
@echo "Running core S3 retention tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasicRetentionWorkflow|TestRetentionModeCompliance|TestLegalHoldWorkflow" .
|
||||
@echo "✅ Core retention tests completed"
|
||||
|
||||
# All retention tests (comprehensive)
|
||||
test-retention: check-deps
|
||||
@echo "Running all S3 retention tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" .
|
||||
@echo "✅ All retention tests completed"
|
||||
|
||||
# WORM integration tests
|
||||
test-retention-worm: check-deps
|
||||
@echo "Running WORM integration tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestWORM|TestRetentionExtendedAttributes|TestRetentionConcurrentOperations" .
|
||||
@echo "✅ WORM integration tests completed"
|
||||
|
||||
# Comprehensive retention tests (all features)
|
||||
test-retention-comprehensive: check-deps
|
||||
@echo "Running comprehensive S3 retention tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetention|TestObjectLock|TestLegalHold|TestWORM" .
|
||||
@echo "✅ Comprehensive retention tests completed"
|
||||
|
||||
# All tests without server management
|
||||
test-retention-simple: check-deps
|
||||
@echo "Running retention tests (assuming server is already running)..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) .
|
||||
@echo "✅ All retention tests completed"
|
||||
|
||||
# Start server, run tests, stop server
|
||||
test-with-server: start-server
|
||||
@echo "Running retention tests with managed server..."
|
||||
@sleep 5 # Give server time to fully start
|
||||
@make test-retention-comprehensive || (echo "Tests failed, stopping server..." && make stop-server && exit 1)
|
||||
@make stop-server
|
||||
@echo "✅ All tests completed with managed server"
|
||||
|
||||
# Health check
|
||||
health-check:
|
||||
@echo "Checking server health..."
|
||||
@if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \
|
||||
echo "✅ Server is accessible on port $(S3_PORT)"; \
|
||||
else \
|
||||
echo "❌ Server is not accessible on port $(S3_PORT)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
# Clean up
|
||||
clean:
|
||||
@echo "Cleaning up test artifacts..."
|
||||
@make stop-server
|
||||
@rm -f weed-test.log
|
||||
@rm -f weed-server.pid
|
||||
@rm -rf ./test-volume-data
|
||||
@echo "✅ Cleanup completed"
|
||||
|
||||
# Individual test targets for specific functionality
|
||||
test-basic-retention:
|
||||
@echo "Running basic retention tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasicRetentionWorkflow" .
|
||||
|
||||
test-compliance-retention:
|
||||
@echo "Running compliance retention tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionModeCompliance" .
|
||||
|
||||
test-legal-hold:
|
||||
@echo "Running legal hold tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestLegalHoldWorkflow" .
|
||||
|
||||
test-object-lock-config:
|
||||
@echo "Running object lock configuration tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestObjectLockConfiguration" .
|
||||
|
||||
test-retention-versions:
|
||||
@echo "Running retention with versions tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionWithVersions" .
|
||||
|
||||
test-retention-combination:
|
||||
@echo "Running retention and legal hold combination tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionAndLegalHoldCombination" .
|
||||
|
||||
test-expired-retention:
|
||||
@echo "Running expired retention tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestExpiredRetention" .
|
||||
|
||||
test-retention-errors:
|
||||
@echo "Running retention error case tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionErrorCases" .
|
||||
|
||||
# WORM-specific test targets
|
||||
test-worm-integration:
|
||||
@echo "Running WORM integration tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestWORMRetentionIntegration" .
|
||||
|
||||
test-worm-legacy:
|
||||
@echo "Running WORM legacy compatibility tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestWORMLegacyCompatibility" .
|
||||
|
||||
test-retention-overwrite:
|
||||
@echo "Running retention overwrite protection tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionOverwriteProtection" .
|
||||
|
||||
test-retention-bulk:
|
||||
@echo "Running retention bulk operations tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionBulkOperations" .
|
||||
|
||||
test-retention-multipart:
|
||||
@echo "Running retention multipart upload tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionWithMultipartUpload" .
|
||||
|
||||
test-retention-extended-attrs:
|
||||
@echo "Running retention extended attributes tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionExtendedAttributes" .
|
||||
|
||||
test-retention-defaults:
|
||||
@echo "Running retention bucket defaults tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionBucketDefaults" .
|
||||
|
||||
test-retention-concurrent:
|
||||
@echo "Running retention concurrent operations tests..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionConcurrentOperations" .
|
||||
|
||||
# Development targets
|
||||
dev-start: start-server
|
||||
@echo "Development server started. Access S3 API at http://localhost:$(S3_PORT)"
|
||||
@echo "To stop: make stop-server"
|
||||
|
||||
dev-test: check-deps
|
||||
@echo "Running tests in development mode..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasicRetentionWorkflow" .
|
||||
|
||||
# CI targets
|
||||
ci-test: check-deps
|
||||
@echo "Running tests in CI mode..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -race .
|
||||
|
||||
# All targets
|
||||
test-all: test-retention test-retention-worm
|
||||
@echo "✅ All S3 retention tests completed"
|
||||
|
||||
# Benchmark targets
|
||||
benchmark-retention:
|
||||
@echo "Running retention performance benchmarks..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -bench=. -benchmem .
|
||||
|
||||
# Coverage targets
|
||||
coverage:
|
||||
@echo "Running tests with coverage..."
|
||||
@go test -v -timeout=$(TEST_TIMEOUT) -coverprofile=coverage.out .
|
||||
@go tool cover -html=coverage.out -o coverage.html
|
||||
@echo "Coverage report generated: coverage.html"
|
||||
|
||||
# Format and lint
|
||||
fmt:
|
||||
@echo "Formatting Go code..."
|
||||
@go fmt .
|
||||
|
||||
lint:
|
||||
@echo "Running linter..."
|
||||
@golint . || echo "golint not available, skipping..."
|
||||
|
||||
# Install dependencies for development
|
||||
install-deps:
|
||||
@echo "Installing Go dependencies..."
|
||||
@go mod tidy
|
||||
@go mod download
|
||||
|
||||
# Show current configuration
|
||||
show-config:
|
||||
@echo "Current configuration:"
|
||||
@echo " WEED_BINARY: $(WEED_BINARY)"
|
||||
@echo " S3_PORT: $(S3_PORT)"
|
||||
@echo " TEST_TIMEOUT: $(TEST_TIMEOUT)"
|
||||
@echo " TEST_PATTERN: $(TEST_PATTERN)"
|
264
test/s3/retention/README.md
Normal file
264
test/s3/retention/README.md
Normal file
@@ -0,0 +1,264 @@
|
||||
# SeaweedFS S3 Object Retention Tests
|
||||
|
||||
This directory contains comprehensive tests for SeaweedFS S3 Object Retention functionality, including Object Lock, Legal Hold, and WORM (Write Once Read Many) capabilities.
|
||||
|
||||
## Overview
|
||||
|
||||
The test suite validates AWS S3-compatible object retention features including:
|
||||
|
||||
- **Object Retention**: GOVERNANCE and COMPLIANCE modes with retain-until-date
|
||||
- **Legal Hold**: Independent protection that can be applied/removed
|
||||
- **Object Lock Configuration**: Bucket-level default retention policies
|
||||
- **WORM Integration**: Compatibility with legacy WORM functionality
|
||||
- **Version-specific Retention**: Different retention policies per object version
|
||||
- **Enforcement**: Protection against deletion and overwriting
|
||||
|
||||
## Test Files
|
||||
|
||||
- `s3_retention_test.go` - Core retention functionality tests
|
||||
- `s3_worm_integration_test.go` - WORM integration and advanced scenarios
|
||||
- `test_config.json` - Test configuration (endpoints, credentials)
|
||||
- `Makefile` - Comprehensive test automation
|
||||
- `go.mod` - Go module dependencies
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Go 1.21 or later
|
||||
- SeaweedFS binary built (`make build-weed`)
|
||||
- AWS SDK Go v2
|
||||
- Testify testing framework
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Build and Start Server
|
||||
```bash
|
||||
# Build SeaweedFS and start test server
|
||||
make start-server
|
||||
```
|
||||
|
||||
### 2. Run Tests
|
||||
```bash
|
||||
# Run core retention tests
|
||||
make test-retention-quick
|
||||
|
||||
# Run all retention tests
|
||||
make test-retention
|
||||
|
||||
# Run WORM integration tests
|
||||
make test-retention-worm
|
||||
|
||||
# Run all tests with managed server
|
||||
make test-with-server
|
||||
```
|
||||
|
||||
### 3. Cleanup
|
||||
```bash
|
||||
make clean
|
||||
```
|
||||
|
||||
## Test Categories
|
||||
|
||||
### Core Retention Tests
|
||||
- `TestBasicRetentionWorkflow` - Basic GOVERNANCE mode retention
|
||||
- `TestRetentionModeCompliance` - COMPLIANCE mode (immutable)
|
||||
- `TestLegalHoldWorkflow` - Legal hold on/off functionality
|
||||
- `TestObjectLockConfiguration` - Bucket object lock settings
|
||||
|
||||
### Advanced Tests
|
||||
- `TestRetentionWithVersions` - Version-specific retention policies
|
||||
- `TestRetentionAndLegalHoldCombination` - Multiple protection types
|
||||
- `TestExpiredRetention` - Post-expiration behavior
|
||||
- `TestRetentionErrorCases` - Error handling and edge cases
|
||||
|
||||
### WORM Integration Tests
|
||||
- `TestWORMRetentionIntegration` - New retention + legacy WORM
|
||||
- `TestWORMLegacyCompatibility` - Backward compatibility
|
||||
- `TestRetentionOverwriteProtection` - Prevent overwrites
|
||||
- `TestRetentionBulkOperations` - Bulk delete with retention
|
||||
- `TestRetentionWithMultipartUpload` - Multipart upload retention
|
||||
- `TestRetentionExtendedAttributes` - Extended attribute storage
|
||||
- `TestRetentionBucketDefaults` - Default retention application
|
||||
- `TestRetentionConcurrentOperations` - Concurrent operation safety
|
||||
|
||||
## Individual Test Targets
|
||||
|
||||
Run specific test categories:
|
||||
|
||||
```bash
|
||||
# Basic functionality
|
||||
make test-basic-retention
|
||||
make test-compliance-retention
|
||||
make test-legal-hold
|
||||
|
||||
# Advanced features
|
||||
make test-retention-versions
|
||||
make test-retention-combination
|
||||
make test-expired-retention
|
||||
|
||||
# WORM integration
|
||||
make test-worm-integration
|
||||
make test-worm-legacy
|
||||
make test-retention-bulk
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Server Configuration
|
||||
The tests use these default settings:
|
||||
- S3 Port: 8333
|
||||
- Test timeout: 15 minutes
|
||||
- Volume directory: `./test-volume-data`
|
||||
|
||||
### Test Configuration (`test_config.json`)
|
||||
```json
|
||||
{
|
||||
"endpoint": "http://localhost:8333",
|
||||
"access_key": "some_access_key1",
|
||||
"secret_key": "some_secret_key1",
|
||||
"region": "us-east-1",
|
||||
"bucket_prefix": "test-retention-",
|
||||
"use_ssl": false,
|
||||
"skip_verify_ssl": true
|
||||
}
|
||||
```
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
### GOVERNANCE Mode
|
||||
- Objects protected until retain-until-date
|
||||
- Can be bypassed with `x-amz-bypass-governance-retention` header
|
||||
- Supports time extension (not reduction)
|
||||
|
||||
### COMPLIANCE Mode
|
||||
- Objects immutably protected until retain-until-date
|
||||
- Cannot be bypassed or shortened
|
||||
- Strictest protection level
|
||||
|
||||
### Legal Hold
|
||||
- Independent ON/OFF protection
|
||||
- Can coexist with retention policies
|
||||
- Must be explicitly removed to allow deletion
|
||||
|
||||
### Version Support
|
||||
- Each object version can have individual retention
|
||||
- Applies to both versioned and non-versioned buckets
|
||||
- Version-specific retention retrieval
|
||||
|
||||
## Development
|
||||
|
||||
### Running in Development Mode
|
||||
```bash
|
||||
# Start server for development
|
||||
make dev-start
|
||||
|
||||
# Run quick test
|
||||
make dev-test
|
||||
```
|
||||
|
||||
### Code Quality
|
||||
```bash
|
||||
# Format code
|
||||
make fmt
|
||||
|
||||
# Run linter
|
||||
make lint
|
||||
|
||||
# Generate coverage report
|
||||
make coverage
|
||||
```
|
||||
|
||||
### Performance Testing
|
||||
```bash
|
||||
# Run benchmarks
|
||||
make benchmark-retention
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Server Won't Start
|
||||
```bash
|
||||
# Check if port is in use
|
||||
netstat -tlnp | grep 8333
|
||||
|
||||
# View server logs
|
||||
make logs
|
||||
|
||||
# Force cleanup
|
||||
make clean
|
||||
```
|
||||
|
||||
### Test Failures
|
||||
```bash
|
||||
# Run with verbose output
|
||||
go test -v -timeout=15m .
|
||||
|
||||
# Run specific test
|
||||
go test -v -run TestBasicRetentionWorkflow .
|
||||
|
||||
# Check server health
|
||||
make health-check
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
```bash
|
||||
# Install/update dependencies
|
||||
make install-deps
|
||||
|
||||
# Check dependency status
|
||||
make check-deps
|
||||
```
|
||||
|
||||
## Integration with SeaweedFS
|
||||
|
||||
These tests validate the retention implementation in:
|
||||
- `weed/s3api/s3api_object_retention.go` - Core retention logic
|
||||
- `weed/s3api/s3api_object_handlers_retention.go` - HTTP handlers
|
||||
- `weed/s3api/s3_constants/extend_key.go` - Extended attribute keys
|
||||
- `weed/s3api/s3err/s3api_errors.go` - Error definitions
|
||||
- `weed/s3api/s3api_object_handlers_delete.go` - Deletion enforcement
|
||||
- `weed/s3api/s3api_object_handlers_put.go` - Upload enforcement
|
||||
|
||||
## AWS CLI Compatibility
|
||||
|
||||
The retention implementation supports standard AWS CLI commands:
|
||||
|
||||
```bash
|
||||
# Set object retention
|
||||
aws s3api put-object-retention \
|
||||
--bucket mybucket \
|
||||
--key myobject \
|
||||
--retention Mode=GOVERNANCE,RetainUntilDate=2024-12-31T23:59:59Z
|
||||
|
||||
# Get object retention
|
||||
aws s3api get-object-retention \
|
||||
--bucket mybucket \
|
||||
--key myobject
|
||||
|
||||
# Set legal hold
|
||||
aws s3api put-object-legal-hold \
|
||||
--bucket mybucket \
|
||||
--key myobject \
|
||||
--legal-hold Status=ON
|
||||
|
||||
# Configure bucket object lock
|
||||
aws s3api put-object-lock-configuration \
|
||||
--bucket mybucket \
|
||||
--object-lock-configuration ObjectLockEnabled=Enabled,Rule='{DefaultRetention={Mode=GOVERNANCE,Days=30}}'
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
When adding new retention tests:
|
||||
|
||||
1. Follow existing test patterns
|
||||
2. Use descriptive test names
|
||||
3. Include both positive and negative test cases
|
||||
4. Test error conditions
|
||||
5. Update this README with new test descriptions
|
||||
6. Add appropriate Makefile targets for new test categories
|
||||
|
||||
## References
|
||||
|
||||
- [AWS S3 Object Lock Documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html)
|
||||
- [AWS S3 API Reference - Object Retention](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html)
|
||||
- [SeaweedFS S3 API Documentation](https://github.com/seaweedfs/seaweedfs/wiki/Amazon-S3-API)
|
31
test/s3/retention/go.mod
Normal file
31
test/s3/retention/go.mod
Normal file
@@ -0,0 +1,31 @@
|
||||
module github.com/seaweedfs/seaweedfs/test/s3/retention
|
||||
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2 v1.21.2
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.45
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.43
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect
|
||||
github.com/aws/smithy-go v1.15.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
62
test/s3/retention/go.sum
Normal file
62
test/s3/retention/go.sum
Normal file
@@ -0,0 +1,62 @@
|
||||
github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M=
|
||||
github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.45 h1:Aka9bI7n8ysuwPeFdm77nfbyHCAKQ3z9ghB3S/38zes=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.45/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 h1:nFBQlGtkbPzp/NjZLuFxRqmT91rLJkgvsEQs68h962Y=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7E0LDl8K9/G36gjlqca5iQbaqc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4/go.mod h1:1PrKYwxTM+zjpw9Y41KFtoJCQrJ34Z47Y4VgVbfndjo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6 h1:wmGLw2i8ZTlHLw7a9ULGfQbuccw8uIiNr6sol5bFzc8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6/go.mod h1:Q0Hq2X/NuL7z8b1Dww8rmOFl+jzusKEcyvkKspwdpyc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14/go.mod h1:dDilntgHy9WnHXsh7dDtUPgHKEfTJIBUTHM8OWm0f/0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15 h1:7R8uRYyXzdD71KWVCL78lJZltah6VVznXBazvKjfH58=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15/go.mod h1:26SQUPcTNgV1Tapwdt4a1rOsYRsnBsJHLMPoxK2b0d8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36/go.mod h1:lGnOkH9NJATw0XEPcAknFBj3zzNTEGRHtSw+CwC1YTg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38 h1:skaFGzv+3kA+v2BPKhuekeb1Hbb105+44r8ASC+q5SE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38/go.mod h1:epIZoRSSbRIwLPJU5F+OldHhwZPBdpDeQkRdCeY3+00=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4/go.mod h1:LhTyt8J04LL+9cIt7pYJ5lbS/U98ZmXovLOR/4LUsk8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6 h1:9ulSU5ClouoPIYhDQdg9tpl83d5Yb91PXTKK+17q+ow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6/go.mod h1:lnc2taBsR9nTlz9meD+lhFZZ9EWY712QHrRflWpTcOA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0 h1:wl5dxN1NONhTDQD9uaEvNsDRX29cBmGED/nl0jkWlt4=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0/go.mod h1:rDGMZA7f4pbmTtPOk5v5UM2lmX6UAbRnMDJeDvnH7AM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ=
|
||||
github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8=
|
||||
github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
694
test/s3/retention/s3_retention_test.go
Normal file
694
test/s3/retention/s3_retention_test.go
Normal file
@@ -0,0 +1,694 @@
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// S3TestConfig holds configuration for S3 tests
|
||||
type S3TestConfig struct {
|
||||
Endpoint string
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
Region string
|
||||
BucketPrefix string
|
||||
UseSSL bool
|
||||
SkipVerifySSL bool
|
||||
}
|
||||
|
||||
// Default test configuration - should match test_config.json
|
||||
var defaultConfig = &S3TestConfig{
|
||||
Endpoint: "http://localhost:8333", // Default SeaweedFS S3 port
|
||||
AccessKey: "some_access_key1",
|
||||
SecretKey: "some_secret_key1",
|
||||
Region: "us-east-1",
|
||||
BucketPrefix: "test-retention-",
|
||||
UseSSL: false,
|
||||
SkipVerifySSL: true,
|
||||
}
|
||||
|
||||
// getS3Client creates an AWS S3 client for testing
|
||||
func getS3Client(t *testing.T) *s3.Client {
|
||||
cfg, err := config.LoadDefaultConfig(context.TODO(),
|
||||
config.WithRegion(defaultConfig.Region),
|
||||
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
|
||||
defaultConfig.AccessKey,
|
||||
defaultConfig.SecretKey,
|
||||
"",
|
||||
)),
|
||||
config.WithEndpointResolverWithOptions(aws.EndpointResolverWithOptionsFunc(
|
||||
func(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{
|
||||
URL: defaultConfig.Endpoint,
|
||||
SigningRegion: defaultConfig.Region,
|
||||
HostnameImmutable: true,
|
||||
}, nil
|
||||
})),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
return s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.UsePathStyle = true // Important for SeaweedFS
|
||||
})
|
||||
}
|
||||
|
||||
// getNewBucketName generates a unique bucket name
|
||||
func getNewBucketName() string {
|
||||
timestamp := time.Now().UnixNano()
|
||||
return fmt.Sprintf("%s%d", defaultConfig.BucketPrefix, timestamp)
|
||||
}
|
||||
|
||||
// createBucket creates a new bucket for testing
|
||||
func createBucket(t *testing.T, client *s3.Client, bucketName string) {
|
||||
_, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// deleteBucket deletes a bucket and all its contents
|
||||
func deleteBucket(t *testing.T, client *s3.Client, bucketName string) {
|
||||
// First, try to delete all objects and versions
|
||||
err := deleteAllObjectVersions(t, client, bucketName)
|
||||
if err != nil {
|
||||
t.Logf("Warning: failed to delete all object versions in first attempt: %v", err)
|
||||
// Try once more in case of transient errors
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
err = deleteAllObjectVersions(t, client, bucketName)
|
||||
if err != nil {
|
||||
t.Logf("Warning: failed to delete all object versions in second attempt: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait a bit for eventual consistency
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Try to delete the bucket multiple times in case of eventual consistency issues
|
||||
for retries := 0; retries < 3; retries++ {
|
||||
_, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
})
|
||||
if err == nil {
|
||||
t.Logf("Successfully deleted bucket %s", bucketName)
|
||||
return
|
||||
}
|
||||
|
||||
t.Logf("Warning: failed to delete bucket %s (attempt %d): %v", bucketName, retries+1, err)
|
||||
if retries < 2 {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deleteAllObjectVersions deletes all object versions in a bucket
|
||||
func deleteAllObjectVersions(t *testing.T, client *s3.Client, bucketName string) error {
|
||||
// List all object versions
|
||||
paginator := s3.NewListObjectVersionsPaginator(client, &s3.ListObjectVersionsInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
})
|
||||
|
||||
for paginator.HasMorePages() {
|
||||
page, err := paginator.NextPage(context.TODO())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var objectsToDelete []types.ObjectIdentifier
|
||||
|
||||
// Add versions - first try to remove retention/legal hold
|
||||
for _, version := range page.Versions {
|
||||
// Try to remove legal hold if present
|
||||
_, err := client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: version.Key,
|
||||
VersionId: version.VersionId,
|
||||
LegalHold: &types.ObjectLockLegalHold{
|
||||
Status: types.ObjectLockLegalHoldStatusOff,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
// Legal hold might not be set, ignore error
|
||||
t.Logf("Note: could not remove legal hold for %s@%s: %v", *version.Key, *version.VersionId, err)
|
||||
}
|
||||
|
||||
objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{
|
||||
Key: version.Key,
|
||||
VersionId: version.VersionId,
|
||||
})
|
||||
}
|
||||
|
||||
// Add delete markers
|
||||
for _, deleteMarker := range page.DeleteMarkers {
|
||||
objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{
|
||||
Key: deleteMarker.Key,
|
||||
VersionId: deleteMarker.VersionId,
|
||||
})
|
||||
}
|
||||
|
||||
// Delete objects in batches with bypass governance retention
|
||||
if len(objectsToDelete) > 0 {
|
||||
_, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
BypassGovernanceRetention: true,
|
||||
Delete: &types.Delete{
|
||||
Objects: objectsToDelete,
|
||||
Quiet: true,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Logf("Warning: batch delete failed, trying individual deletion: %v", err)
|
||||
// Try individual deletion for each object
|
||||
for _, obj := range objectsToDelete {
|
||||
_, delErr := client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: obj.Key,
|
||||
VersionId: obj.VersionId,
|
||||
BypassGovernanceRetention: true,
|
||||
})
|
||||
if delErr != nil {
|
||||
t.Logf("Warning: failed to delete object %s@%s: %v", *obj.Key, *obj.VersionId, delErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// enableVersioning enables versioning on a bucket
|
||||
func enableVersioning(t *testing.T, client *s3.Client, bucketName string) {
|
||||
_, err := client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
VersioningConfiguration: &types.VersioningConfiguration{
|
||||
Status: types.BucketVersioningStatusEnabled,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// putObject puts an object into a bucket
|
||||
func putObject(t *testing.T, client *s3.Client, bucketName, key, content string) *s3.PutObjectOutput {
|
||||
resp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
Body: strings.NewReader(content),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return resp
|
||||
}
|
||||
|
||||
// cleanupAllTestBuckets cleans up any leftover test buckets
|
||||
func cleanupAllTestBuckets(t *testing.T, client *s3.Client) {
|
||||
// List all buckets
|
||||
listResp, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
|
||||
if err != nil {
|
||||
t.Logf("Warning: failed to list buckets for cleanup: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete buckets that match our test prefix
|
||||
for _, bucket := range listResp.Buckets {
|
||||
if bucket.Name != nil && strings.HasPrefix(*bucket.Name, defaultConfig.BucketPrefix) {
|
||||
t.Logf("Cleaning up leftover test bucket: %s", *bucket.Name)
|
||||
deleteBucket(t, client, *bucket.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestBasicRetentionWorkflow tests the basic retention functionality
|
||||
func TestBasicRetentionWorkflow(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
|
||||
// Enable versioning (required for retention)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Create object
|
||||
key := "test-object"
|
||||
content := "test content for retention"
|
||||
putResp := putObject(t, client, bucketName, key, content)
|
||||
require.NotNil(t, putResp.VersionId)
|
||||
|
||||
// Set retention with GOVERNANCE mode
|
||||
retentionUntil := time.Now().Add(24 * time.Hour)
|
||||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
RetainUntilDate: aws.Time(retentionUntil),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get retention and verify it was set correctly
|
||||
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode)
|
||||
assert.WithinDuration(t, retentionUntil, *retentionResp.Retention.RetainUntilDate, time.Second)
|
||||
|
||||
// Try to delete object without bypass - should fail
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
// Delete object with bypass governance - should succeed
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
BypassGovernanceRetention: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestRetentionModeCompliance tests COMPLIANCE mode retention
|
||||
func TestRetentionModeCompliance(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Create object
|
||||
key := "compliance-test-object"
|
||||
content := "compliance test content"
|
||||
putResp := putObject(t, client, bucketName, key, content)
|
||||
require.NotNil(t, putResp.VersionId)
|
||||
|
||||
// Set retention with COMPLIANCE mode
|
||||
retentionUntil := time.Now().Add(1 * time.Hour)
|
||||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeCompliance,
|
||||
RetainUntilDate: aws.Time(retentionUntil),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get retention and verify
|
||||
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.ObjectLockRetentionModeCompliance, retentionResp.Retention.Mode)
|
||||
|
||||
// Try to delete object with bypass - should still fail (compliance mode)
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
BypassGovernanceRetention: true,
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
// Try to delete object without bypass - should also fail
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// TestLegalHoldWorkflow tests legal hold functionality
|
||||
func TestLegalHoldWorkflow(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Create object
|
||||
key := "legal-hold-test-object"
|
||||
content := "legal hold test content"
|
||||
putResp := putObject(t, client, bucketName, key, content)
|
||||
require.NotNil(t, putResp.VersionId)
|
||||
|
||||
// Set legal hold ON
|
||||
_, err := client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
LegalHold: &types.ObjectLockLegalHold{
|
||||
Status: types.ObjectLockLegalHoldStatusOn,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get legal hold and verify
|
||||
legalHoldResp, err := client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.ObjectLockLegalHoldStatusOn, legalHoldResp.LegalHold.Status)
|
||||
|
||||
// Try to delete object - should fail due to legal hold
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
// Remove legal hold
|
||||
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
LegalHold: &types.ObjectLockLegalHold{
|
||||
Status: types.ObjectLockLegalHoldStatusOff,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify legal hold is off
|
||||
legalHoldResp, err = client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.ObjectLockLegalHoldStatusOff, legalHoldResp.LegalHold.Status)
|
||||
|
||||
// Now delete should succeed
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestObjectLockConfiguration tests bucket object lock configuration
|
||||
func TestObjectLockConfiguration(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
// Use a more unique bucket name to avoid conflicts
|
||||
bucketName := fmt.Sprintf("object-lock-config-%d-%d", time.Now().UnixNano(), time.Now().UnixMilli()%10000)
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Set object lock configuration
|
||||
_, err := client.PutObjectLockConfiguration(context.TODO(), &s3.PutObjectLockConfigurationInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
ObjectLockConfiguration: &types.ObjectLockConfiguration{
|
||||
ObjectLockEnabled: types.ObjectLockEnabledEnabled,
|
||||
Rule: &types.ObjectLockRule{
|
||||
DefaultRetention: &types.DefaultRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
Days: 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Logf("PutObjectLockConfiguration failed (may not be supported): %v", err)
|
||||
t.Skip("Object lock configuration not supported, skipping test")
|
||||
return
|
||||
}
|
||||
|
||||
// Get object lock configuration and verify
|
||||
configResp, err := client.GetObjectLockConfiguration(context.TODO(), &s3.GetObjectLockConfigurationInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.ObjectLockEnabledEnabled, configResp.ObjectLockConfiguration.ObjectLockEnabled)
|
||||
assert.Equal(t, types.ObjectLockRetentionModeGovernance, configResp.ObjectLockConfiguration.Rule.DefaultRetention.Mode)
|
||||
assert.Equal(t, int32(30), configResp.ObjectLockConfiguration.Rule.DefaultRetention.Days)
|
||||
}
|
||||
|
||||
// TestRetentionWithVersions tests retention with specific object versions
|
||||
func TestRetentionWithVersions(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Create multiple versions of the same object
|
||||
key := "versioned-retention-test"
|
||||
content1 := "version 1 content"
|
||||
content2 := "version 2 content"
|
||||
|
||||
putResp1 := putObject(t, client, bucketName, key, content1)
|
||||
require.NotNil(t, putResp1.VersionId)
|
||||
|
||||
putResp2 := putObject(t, client, bucketName, key, content2)
|
||||
require.NotNil(t, putResp2.VersionId)
|
||||
|
||||
// Set retention on first version only
|
||||
retentionUntil := time.Now().Add(1 * time.Hour)
|
||||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
VersionId: putResp1.VersionId,
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
RetainUntilDate: aws.Time(retentionUntil),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get retention for first version
|
||||
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
VersionId: putResp1.VersionId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode)
|
||||
|
||||
// Try to get retention for second version - should fail (no retention set)
|
||||
_, err = client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
VersionId: putResp2.VersionId,
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
// Delete second version should succeed (no retention)
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
VersionId: putResp2.VersionId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete first version should fail (has retention)
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
VersionId: putResp1.VersionId,
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
// Delete first version with bypass should succeed
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
VersionId: putResp1.VersionId,
|
||||
BypassGovernanceRetention: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestRetentionAndLegalHoldCombination tests retention and legal hold together
|
||||
func TestRetentionAndLegalHoldCombination(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Create object
|
||||
key := "combined-protection-test"
|
||||
content := "combined protection test content"
|
||||
putResp := putObject(t, client, bucketName, key, content)
|
||||
require.NotNil(t, putResp.VersionId)
|
||||
|
||||
// Set both retention and legal hold
|
||||
retentionUntil := time.Now().Add(1 * time.Hour)
|
||||
|
||||
// Set retention
|
||||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
RetainUntilDate: aws.Time(retentionUntil),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set legal hold
|
||||
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
LegalHold: &types.ObjectLockLegalHold{
|
||||
Status: types.ObjectLockLegalHoldStatusOn,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to delete with bypass governance - should still fail due to legal hold
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
BypassGovernanceRetention: true,
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
// Remove legal hold
|
||||
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
LegalHold: &types.ObjectLockLegalHold{
|
||||
Status: types.ObjectLockLegalHoldStatusOff,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Now delete with bypass governance should succeed
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
BypassGovernanceRetention: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestExpiredRetention tests that objects can be deleted after retention expires
|
||||
func TestExpiredRetention(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Create object
|
||||
key := "expired-retention-test"
|
||||
content := "expired retention test content"
|
||||
putResp := putObject(t, client, bucketName, key, content)
|
||||
require.NotNil(t, putResp.VersionId)
|
||||
|
||||
// Set retention for a very short time (2 seconds)
|
||||
retentionUntil := time.Now().Add(2 * time.Second)
|
||||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
RetainUntilDate: aws.Time(retentionUntil),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to delete immediately - should fail
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
// Wait for retention to expire
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Now delete should succeed
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestRetentionErrorCases tests various error conditions
|
||||
func TestRetentionErrorCases(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Test setting retention on non-existent object
|
||||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String("non-existent-key"),
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
RetainUntilDate: aws.Time(time.Now().Add(1 * time.Hour)),
|
||||
},
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
// Test getting retention on non-existent object
|
||||
_, err = client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String("non-existent-key"),
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
// Test setting legal hold on non-existent object
|
||||
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String("non-existent-key"),
|
||||
LegalHold: &types.ObjectLockLegalHold{
|
||||
Status: types.ObjectLockLegalHoldStatusOn,
|
||||
},
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
// Test getting legal hold on non-existent object
|
||||
_, err = client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String("non-existent-key"),
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
// Test setting retention with past date
|
||||
key := "retention-past-date-test"
|
||||
content := "test content"
|
||||
putObject(t, client, bucketName, key, content)
|
||||
|
||||
pastDate := time.Now().Add(-1 * time.Hour)
|
||||
_, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
RetainUntilDate: aws.Time(pastDate),
|
||||
},
|
||||
})
|
||||
require.Error(t, err)
|
||||
}
|
519
test/s3/retention/s3_worm_integration_test.go
Normal file
519
test/s3/retention/s3_worm_integration_test.go
Normal file
@@ -0,0 +1,519 @@
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestWORMRetentionIntegration tests that both retention and legacy WORM work together
|
||||
func TestWORMRetentionIntegration(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Create object
|
||||
key := "worm-retention-integration-test"
|
||||
content := "worm retention integration test content"
|
||||
putResp := putObject(t, client, bucketName, key, content)
|
||||
require.NotNil(t, putResp.VersionId)
|
||||
|
||||
// Set retention (new system)
|
||||
retentionUntil := time.Now().Add(1 * time.Hour)
|
||||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
RetainUntilDate: aws.Time(retentionUntil),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to delete - should fail due to retention
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
// Delete with bypass should succeed
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
BypassGovernanceRetention: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestWORMLegacyCompatibility tests that legacy WORM functionality still works
|
||||
func TestWORMLegacyCompatibility(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Create object with legacy WORM headers (if supported)
|
||||
key := "legacy-worm-test"
|
||||
content := "legacy worm test content"
|
||||
|
||||
// Try to create object with legacy WORM TTL header
|
||||
putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
Body: strings.NewReader(content),
|
||||
// Add legacy WORM headers if supported
|
||||
Metadata: map[string]string{
|
||||
"x-amz-meta-worm-ttl": fmt.Sprintf("%d", time.Now().Add(1*time.Hour).Unix()),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, putResp.VersionId)
|
||||
|
||||
// Object should be created successfully
|
||||
resp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp.Metadata)
|
||||
}
|
||||
|
||||
// TestRetentionOverwriteProtection tests that retention prevents overwrites
|
||||
func TestRetentionOverwriteProtection(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Create object
|
||||
key := "overwrite-protection-test"
|
||||
content := "original content"
|
||||
putResp := putObject(t, client, bucketName, key, content)
|
||||
require.NotNil(t, putResp.VersionId)
|
||||
|
||||
// Verify object exists before setting retention
|
||||
_, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.NoError(t, err, "Object should exist before setting retention")
|
||||
|
||||
// Set retention with specific version ID
|
||||
retentionUntil := time.Now().Add(1 * time.Hour)
|
||||
_, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
VersionId: putResp.VersionId,
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
RetainUntilDate: aws.Time(retentionUntil),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to overwrite object - should fail in non-versioned bucket context
|
||||
content2 := "new content"
|
||||
_, err = client.PutObject(context.TODO(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
Body: strings.NewReader(content2),
|
||||
})
|
||||
// Note: In a real scenario, this might fail or create a new version
|
||||
// The actual behavior depends on the implementation
|
||||
if err != nil {
|
||||
t.Logf("Expected behavior: overwrite blocked due to retention: %v", err)
|
||||
} else {
|
||||
t.Logf("Overwrite allowed, likely created new version")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRetentionBulkOperations tests retention with bulk operations
|
||||
func TestRetentionBulkOperations(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Create multiple objects with retention
|
||||
var objectsToDelete []types.ObjectIdentifier
|
||||
retentionUntil := time.Now().Add(1 * time.Hour)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
key := fmt.Sprintf("bulk-test-object-%d", i)
|
||||
content := fmt.Sprintf("bulk test content %d", i)
|
||||
|
||||
putResp := putObject(t, client, bucketName, key, content)
|
||||
require.NotNil(t, putResp.VersionId)
|
||||
|
||||
// Set retention on each object with version ID
|
||||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
VersionId: putResp.VersionId,
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
RetainUntilDate: aws.Time(retentionUntil),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{
|
||||
Key: aws.String(key),
|
||||
VersionId: putResp.VersionId,
|
||||
})
|
||||
}
|
||||
|
||||
// Try bulk delete without bypass - should fail or have errors
|
||||
deleteResp, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Delete: &types.Delete{
|
||||
Objects: objectsToDelete,
|
||||
Quiet: false,
|
||||
},
|
||||
})
|
||||
|
||||
// Check if operation failed or returned errors for protected objects
|
||||
if err != nil {
|
||||
t.Logf("Expected: bulk delete failed due to retention: %v", err)
|
||||
} else if deleteResp != nil && len(deleteResp.Errors) > 0 {
|
||||
t.Logf("Expected: bulk delete returned %d errors due to retention", len(deleteResp.Errors))
|
||||
for _, delErr := range deleteResp.Errors {
|
||||
t.Logf("Delete error: %s - %s", *delErr.Code, *delErr.Message)
|
||||
}
|
||||
} else {
|
||||
t.Logf("Warning: bulk delete succeeded - retention may not be enforced for bulk operations")
|
||||
}
|
||||
|
||||
// Try bulk delete with bypass - should succeed
|
||||
_, err = client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
BypassGovernanceRetention: true,
|
||||
Delete: &types.Delete{
|
||||
Objects: objectsToDelete,
|
||||
Quiet: false,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Logf("Bulk delete with bypass failed (may not be supported): %v", err)
|
||||
} else {
|
||||
t.Logf("Bulk delete with bypass succeeded")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRetentionWithMultipartUpload tests retention with multipart uploads
|
||||
func TestRetentionWithMultipartUpload(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Start multipart upload
|
||||
key := "multipart-retention-test"
|
||||
createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
uploadId := createResp.UploadId
|
||||
|
||||
// Upload a part
|
||||
partContent := "This is a test part for multipart upload"
|
||||
uploadResp, err := client.UploadPart(context.TODO(), &s3.UploadPartInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
PartNumber: 1,
|
||||
UploadId: uploadId,
|
||||
Body: strings.NewReader(partContent),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Complete multipart upload
|
||||
completeResp, err := client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
UploadId: uploadId,
|
||||
MultipartUpload: &types.CompletedMultipartUpload{
|
||||
Parts: []types.CompletedPart{
|
||||
{
|
||||
ETag: uploadResp.ETag,
|
||||
PartNumber: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add a small delay to ensure the object is fully created
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// Verify object exists after multipart upload - retry if needed
|
||||
var headErr error
|
||||
for retries := 0; retries < 10; retries++ {
|
||||
_, headErr = client.HeadObject(context.TODO(), &s3.HeadObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if headErr == nil {
|
||||
break
|
||||
}
|
||||
t.Logf("HeadObject attempt %d failed: %v", retries+1, headErr)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
|
||||
if headErr != nil {
|
||||
t.Logf("Object not found after multipart upload completion, checking if multipart upload is fully supported")
|
||||
// Check if the object exists by trying to list it
|
||||
listResp, listErr := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(bucketName),
|
||||
Prefix: aws.String(key),
|
||||
})
|
||||
if listErr != nil || len(listResp.Contents) == 0 {
|
||||
t.Skip("Multipart upload may not be fully supported, skipping test")
|
||||
return
|
||||
}
|
||||
// If object exists in listing but not accessible via HeadObject, skip test
|
||||
t.Skip("Object exists in listing but not accessible via HeadObject, multipart upload may not be fully supported")
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, headErr, "Object should exist after multipart upload")
|
||||
|
||||
// Set retention on the completed multipart object with version ID
|
||||
retentionUntil := time.Now().Add(1 * time.Hour)
|
||||
_, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
VersionId: completeResp.VersionId,
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
RetainUntilDate: aws.Time(retentionUntil),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to delete - should fail
|
||||
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// TestRetentionExtendedAttributes tests that retention uses extended attributes correctly
|
||||
func TestRetentionExtendedAttributes(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Create object
|
||||
key := "extended-attrs-test"
|
||||
content := "extended attributes test content"
|
||||
putResp := putObject(t, client, bucketName, key, content)
|
||||
require.NotNil(t, putResp.VersionId)
|
||||
|
||||
// Set retention
|
||||
retentionUntil := time.Now().Add(1 * time.Hour)
|
||||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
VersionId: putResp.VersionId,
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
RetainUntilDate: aws.Time(retentionUntil),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set legal hold
|
||||
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
VersionId: putResp.VersionId,
|
||||
LegalHold: &types.ObjectLockLegalHold{
|
||||
Status: types.ObjectLockLegalHoldStatusOn,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get object metadata to verify extended attributes are set
|
||||
resp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the object has metadata (may be empty in some implementations)
|
||||
// Note: The actual metadata keys depend on the implementation
|
||||
if resp.Metadata != nil && len(resp.Metadata) > 0 {
|
||||
t.Logf("Object metadata: %+v", resp.Metadata)
|
||||
} else {
|
||||
t.Logf("Object metadata: empty (extended attributes may be stored internally)")
|
||||
}
|
||||
|
||||
// Verify retention can be retrieved
|
||||
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode)
|
||||
|
||||
// Verify legal hold can be retrieved
|
||||
legalHoldResp, err := client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.ObjectLockLegalHoldStatusOn, legalHoldResp.LegalHold.Status)
|
||||
}
|
||||
|
||||
// TestRetentionBucketDefaults tests object lock configuration defaults
|
||||
func TestRetentionBucketDefaults(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
// Use a very unique bucket name to avoid conflicts
|
||||
bucketName := fmt.Sprintf("bucket-defaults-%d-%d", time.Now().UnixNano(), time.Now().UnixMilli()%10000)
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Set bucket object lock configuration with default retention
|
||||
_, err := client.PutObjectLockConfiguration(context.TODO(), &s3.PutObjectLockConfigurationInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
ObjectLockConfiguration: &types.ObjectLockConfiguration{
|
||||
ObjectLockEnabled: types.ObjectLockEnabledEnabled,
|
||||
Rule: &types.ObjectLockRule{
|
||||
DefaultRetention: &types.DefaultRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
Days: 1, // 1 day default
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Logf("PutObjectLockConfiguration failed (may not be supported): %v", err)
|
||||
t.Skip("Object lock configuration not supported, skipping test")
|
||||
return
|
||||
}
|
||||
|
||||
// Create object (should inherit default retention)
|
||||
key := "bucket-defaults-test"
|
||||
content := "bucket defaults test content"
|
||||
putResp := putObject(t, client, bucketName, key, content)
|
||||
require.NotNil(t, putResp.VersionId)
|
||||
|
||||
// Check if object has default retention applied
|
||||
// Note: This depends on the implementation - some S3 services apply
|
||||
// default retention automatically, others require explicit setting
|
||||
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
t.Logf("No automatic default retention applied: %v", err)
|
||||
} else {
|
||||
t.Logf("Default retention applied: %+v", retentionResp.Retention)
|
||||
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRetentionConcurrentOperations tests concurrent retention operations
|
||||
func TestRetentionConcurrentOperations(t *testing.T) {
|
||||
client := getS3Client(t)
|
||||
bucketName := getNewBucketName()
|
||||
|
||||
// Create bucket and enable versioning
|
||||
createBucket(t, client, bucketName)
|
||||
defer deleteBucket(t, client, bucketName)
|
||||
enableVersioning(t, client, bucketName)
|
||||
|
||||
// Create object
|
||||
key := "concurrent-ops-test"
|
||||
content := "concurrent operations test content"
|
||||
putResp := putObject(t, client, bucketName, key, content)
|
||||
require.NotNil(t, putResp.VersionId)
|
||||
|
||||
// Test concurrent retention and legal hold operations
|
||||
retentionUntil := time.Now().Add(1 * time.Hour)
|
||||
|
||||
// Set retention and legal hold concurrently
|
||||
errChan := make(chan error, 2)
|
||||
|
||||
go func() {
|
||||
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
Retention: &types.ObjectLockRetention{
|
||||
Mode: types.ObjectLockRetentionModeGovernance,
|
||||
RetainUntilDate: aws.Time(retentionUntil),
|
||||
},
|
||||
})
|
||||
errChan <- err
|
||||
}()
|
||||
|
||||
go func() {
|
||||
_, err := client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
LegalHold: &types.ObjectLockLegalHold{
|
||||
Status: types.ObjectLockLegalHoldStatusOn,
|
||||
},
|
||||
})
|
||||
errChan <- err
|
||||
}()
|
||||
|
||||
// Wait for both operations to complete
|
||||
for i := 0; i < 2; i++ {
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
t.Logf("Concurrent operation failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify both settings are applied
|
||||
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err == nil {
|
||||
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode)
|
||||
}
|
||||
|
||||
legalHoldResp, err := client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err == nil {
|
||||
assert.Equal(t, types.ObjectLockLegalHoldStatusOn, legalHoldResp.LegalHold.Status)
|
||||
}
|
||||
}
|
9
test/s3/retention/test_config.json
Normal file
9
test/s3/retention/test_config.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"endpoint": "http://localhost:8333",
|
||||
"access_key": "some_access_key1",
|
||||
"secret_key": "some_secret_key1",
|
||||
"region": "us-east-1",
|
||||
"bucket_prefix": "test-retention-",
|
||||
"use_ssl": false,
|
||||
"skip_verify_ssl": true
|
||||
}
|
@@ -11,4 +11,25 @@ const (
|
||||
ExtETagKey = "Seaweed-X-Amz-ETag"
|
||||
ExtLatestVersionIdKey = "Seaweed-X-Amz-Latest-Version-Id"
|
||||
ExtLatestVersionFileNameKey = "Seaweed-X-Amz-Latest-Version-File-Name"
|
||||
|
||||
// Object Retention and Legal Hold
|
||||
ExtObjectLockModeKey = "Seaweed-X-Amz-Object-Lock-Mode"
|
||||
ExtRetentionUntilDateKey = "Seaweed-X-Amz-Retention-Until-Date"
|
||||
ExtLegalHoldKey = "Seaweed-X-Amz-Legal-Hold"
|
||||
ExtObjectLockEnabledKey = "Seaweed-X-Amz-Object-Lock-Enabled"
|
||||
ExtObjectLockConfigKey = "Seaweed-X-Amz-Object-Lock-Config"
|
||||
)
|
||||
|
||||
// Object Lock and Retention Constants
|
||||
const (
|
||||
// Retention modes
|
||||
RetentionModeGovernance = "GOVERNANCE"
|
||||
RetentionModeCompliance = "COMPLIANCE"
|
||||
|
||||
// Legal hold status
|
||||
LegalHoldOn = "ON"
|
||||
LegalHoldOff = "OFF"
|
||||
|
||||
// Object lock enabled status
|
||||
ObjectLockEnabled = "Enabled"
|
||||
)
|
||||
|
@@ -49,6 +49,16 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque
|
||||
auditLog = s3err.GetAccessLog(r, http.StatusNoContent, s3err.ErrNone)
|
||||
}
|
||||
|
||||
// Check object lock permissions before deletion (only for versioned buckets)
|
||||
if versioningEnabled {
|
||||
bypassGovernance := r.Header.Get("x-amz-bypass-governance-retention") == "true"
|
||||
if err := s3a.checkObjectLockPermissions(bucket, object, versionId, bypassGovernance); err != nil {
|
||||
glog.V(2).Infof("DeleteObjectHandler: object lock check failed for %s/%s: %v", bucket, object, err)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if versioningEnabled {
|
||||
// Handle versioned delete
|
||||
if versionId != "" {
|
||||
@@ -117,9 +127,10 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// / ObjectIdentifier carries key name for the object to delete.
|
||||
// ObjectIdentifier represents an object to be deleted with its key name and optional version ID.
|
||||
type ObjectIdentifier struct {
|
||||
ObjectName string `xml:"Key"`
|
||||
Key string `xml:"Key"`
|
||||
VersionId string `xml:"VersionId,omitempty"`
|
||||
}
|
||||
|
||||
// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted.
|
||||
@@ -132,9 +143,10 @@ type DeleteObjectsRequest struct {
|
||||
|
||||
// DeleteError structure.
|
||||
type DeleteError struct {
|
||||
Code string
|
||||
Message string
|
||||
Key string
|
||||
Code string `xml:"Code"`
|
||||
Message string `xml:"Message"`
|
||||
Key string `xml:"Key"`
|
||||
VersionId string `xml:"VersionId,omitempty"`
|
||||
}
|
||||
|
||||
// DeleteObjectsResponse container for multiple object deletes.
|
||||
@@ -180,18 +192,48 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
|
||||
if s3err.Logger != nil {
|
||||
auditLog = s3err.GetAccessLog(r, http.StatusNoContent, s3err.ErrNone)
|
||||
}
|
||||
|
||||
// Check for bypass governance retention header
|
||||
bypassGovernance := r.Header.Get("x-amz-bypass-governance-retention") == "true"
|
||||
|
||||
// Check if versioning is enabled for the bucket (needed for object lock checks)
|
||||
versioningEnabled, err := s3a.isVersioningEnabled(bucket)
|
||||
if err != nil {
|
||||
if err == filer_pb.ErrNotFound {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
|
||||
return
|
||||
}
|
||||
glog.Errorf("Error checking versioning status for bucket %s: %v", bucket, err)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
// delete file entries
|
||||
for _, object := range deleteObjects.Objects {
|
||||
if object.ObjectName == "" {
|
||||
if object.Key == "" {
|
||||
continue
|
||||
}
|
||||
lastSeparator := strings.LastIndex(object.ObjectName, "/")
|
||||
parentDirectoryPath, entryName, isDeleteData, isRecursive := "", object.ObjectName, true, false
|
||||
if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) {
|
||||
entryName = object.ObjectName[lastSeparator+1:]
|
||||
parentDirectoryPath = "/" + object.ObjectName[:lastSeparator]
|
||||
|
||||
// Check object lock permissions before deletion (only for versioned buckets)
|
||||
if versioningEnabled {
|
||||
if err := s3a.checkObjectLockPermissions(bucket, object.Key, object.VersionId, bypassGovernance); err != nil {
|
||||
glog.V(2).Infof("DeleteMultipleObjectsHandler: object lock check failed for %s/%s (version: %s): %v", bucket, object.Key, object.VersionId, err)
|
||||
deleteErrors = append(deleteErrors, DeleteError{
|
||||
Code: s3err.GetAPIError(s3err.ErrAccessDenied).Code,
|
||||
Message: s3err.GetAPIError(s3err.ErrAccessDenied).Description,
|
||||
Key: object.Key,
|
||||
VersionId: object.VersionId,
|
||||
})
|
||||
continue
|
||||
}
|
||||
}
|
||||
lastSeparator := strings.LastIndex(object.Key, "/")
|
||||
parentDirectoryPath, entryName, isDeleteData, isRecursive := "", object.Key, true, false
|
||||
if lastSeparator > 0 && lastSeparator+1 < len(object.Key) {
|
||||
entryName = object.Key[lastSeparator+1:]
|
||||
parentDirectoryPath = "/" + object.Key[:lastSeparator]
|
||||
}
|
||||
parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath)
|
||||
|
||||
@@ -204,9 +246,10 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
|
||||
} else {
|
||||
delete(directoriesWithDeletion, parentDirectoryPath)
|
||||
deleteErrors = append(deleteErrors, DeleteError{
|
||||
Code: "",
|
||||
Message: err.Error(),
|
||||
Key: object.ObjectName,
|
||||
Code: "",
|
||||
Message: err.Error(),
|
||||
Key: object.Key,
|
||||
VersionId: object.VersionId,
|
||||
})
|
||||
}
|
||||
if auditLog != nil {
|
||||
|
@@ -85,6 +85,13 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
glog.V(1).Infof("PutObjectHandler: bucket %s, object %s, versioningEnabled=%v", bucket, object, versioningEnabled)
|
||||
|
||||
// Check object lock permissions before PUT operation (only for versioned buckets)
|
||||
bypassGovernance := r.Header.Get("x-amz-bypass-governance-retention") == "true"
|
||||
if err := s3a.checkObjectLockPermissionsForPut(bucket, object, bypassGovernance, versioningEnabled); err != nil {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied)
|
||||
return
|
||||
}
|
||||
|
||||
if versioningEnabled {
|
||||
// Handle versioned PUT
|
||||
glog.V(1).Infof("PutObjectHandler: using versioned PUT for %s/%s", bucket, object)
|
||||
|
356
weed/s3api/s3api_object_handlers_retention.go
Normal file
356
weed/s3api/s3api_object_handlers_retention.go
Normal file
@@ -0,0 +1,356 @@
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
|
||||
stats_collect "github.com/seaweedfs/seaweedfs/weed/stats"
|
||||
)
|
||||
|
||||
// PutObjectRetentionHandler Put object Retention
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html
|
||||
func (s3a *S3ApiServer) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("PutObjectRetentionHandler %s %s", bucket, object)
|
||||
|
||||
// Check if Object Lock is available for this bucket (requires versioning)
|
||||
if !s3a.handleObjectLockAvailabilityCheck(w, r, bucket, "PutObjectRetentionHandler") {
|
||||
return
|
||||
}
|
||||
|
||||
// Get version ID from query parameters
|
||||
versionId := r.URL.Query().Get("versionId")
|
||||
|
||||
// Check for bypass governance retention header
|
||||
bypassGovernance := r.Header.Get("x-amz-bypass-governance-retention") == "true"
|
||||
|
||||
// Parse retention configuration from request body
|
||||
retention, err := parseObjectRetention(r)
|
||||
if err != nil {
|
||||
glog.Errorf("PutObjectRetentionHandler: failed to parse retention config: %v", err)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate retention configuration
|
||||
if err := validateRetention(retention); err != nil {
|
||||
glog.Errorf("PutObjectRetentionHandler: invalid retention config: %v", err)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Set retention on the object
|
||||
if err := s3a.setObjectRetention(bucket, object, versionId, retention, bypassGovernance); err != nil {
|
||||
glog.Errorf("PutObjectRetentionHandler: failed to set retention: %v", err)
|
||||
|
||||
// Handle specific error cases
|
||||
if errors.Is(err, ErrObjectNotFound) || errors.Is(err, ErrVersionNotFound) || errors.Is(err, ErrLatestVersionNotFound) {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
|
||||
return
|
||||
}
|
||||
|
||||
if errors.Is(err, ErrComplianceModeActive) || errors.Is(err, ErrGovernanceModeActive) {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied)
|
||||
return
|
||||
}
|
||||
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
// Record metrics
|
||||
stats_collect.RecordBucketActiveTime(bucket)
|
||||
|
||||
// Return success (HTTP 200 with no body)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
glog.V(3).Infof("PutObjectRetentionHandler: successfully set retention for %s/%s", bucket, object)
|
||||
}
|
||||
|
||||
// GetObjectRetentionHandler Get object Retention
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html
|
||||
func (s3a *S3ApiServer) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("GetObjectRetentionHandler %s %s", bucket, object)
|
||||
|
||||
// Check if Object Lock is available for this bucket (requires versioning)
|
||||
if !s3a.handleObjectLockAvailabilityCheck(w, r, bucket, "GetObjectRetentionHandler") {
|
||||
return
|
||||
}
|
||||
|
||||
// Get version ID from query parameters
|
||||
versionId := r.URL.Query().Get("versionId")
|
||||
|
||||
// Get retention configuration for the object
|
||||
retention, err := s3a.getObjectRetention(bucket, object, versionId)
|
||||
if err != nil {
|
||||
glog.Errorf("GetObjectRetentionHandler: failed to get retention: %v", err)
|
||||
|
||||
// Handle specific error cases
|
||||
if errors.Is(err, ErrObjectNotFound) || errors.Is(err, ErrVersionNotFound) {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
|
||||
return
|
||||
}
|
||||
|
||||
if errors.Is(err, ErrNoRetentionConfiguration) {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchObjectLockConfiguration)
|
||||
return
|
||||
}
|
||||
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
// Marshal retention configuration to XML
|
||||
retentionXML, err := xml.Marshal(retention)
|
||||
if err != nil {
|
||||
glog.Errorf("GetObjectRetentionHandler: failed to marshal retention: %v", err)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
// Set response headers
|
||||
w.Header().Set("Content-Type", "application/xml")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
// Write XML response
|
||||
if _, err := w.Write([]byte(xml.Header)); err != nil {
|
||||
glog.Errorf("GetObjectRetentionHandler: failed to write XML header: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := w.Write(retentionXML); err != nil {
|
||||
glog.Errorf("GetObjectRetentionHandler: failed to write retention XML: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Record metrics
|
||||
stats_collect.RecordBucketActiveTime(bucket)
|
||||
|
||||
glog.V(3).Infof("GetObjectRetentionHandler: successfully retrieved retention for %s/%s", bucket, object)
|
||||
}
|
||||
|
||||
// PutObjectLegalHoldHandler Put object Legal Hold
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLegalHold.html
|
||||
func (s3a *S3ApiServer) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("PutObjectLegalHoldHandler %s %s", bucket, object)
|
||||
|
||||
// Check if Object Lock is available for this bucket (requires versioning)
|
||||
if !s3a.handleObjectLockAvailabilityCheck(w, r, bucket, "PutObjectLegalHoldHandler") {
|
||||
return
|
||||
}
|
||||
|
||||
// Get version ID from query parameters
|
||||
versionId := r.URL.Query().Get("versionId")
|
||||
|
||||
// Parse legal hold configuration from request body
|
||||
legalHold, err := parseObjectLegalHold(r)
|
||||
if err != nil {
|
||||
glog.Errorf("PutObjectLegalHoldHandler: failed to parse legal hold config: %v", err)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate legal hold configuration
|
||||
if err := validateLegalHold(legalHold); err != nil {
|
||||
glog.Errorf("PutObjectLegalHoldHandler: invalid legal hold config: %v", err)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Set legal hold on the object
|
||||
if err := s3a.setObjectLegalHold(bucket, object, versionId, legalHold); err != nil {
|
||||
glog.Errorf("PutObjectLegalHoldHandler: failed to set legal hold: %v", err)
|
||||
|
||||
// Handle specific error cases
|
||||
if errors.Is(err, ErrObjectNotFound) || errors.Is(err, ErrVersionNotFound) {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
|
||||
return
|
||||
}
|
||||
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
// Record metrics
|
||||
stats_collect.RecordBucketActiveTime(bucket)
|
||||
|
||||
// Return success (HTTP 200 with no body)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
glog.V(3).Infof("PutObjectLegalHoldHandler: successfully set legal hold for %s/%s", bucket, object)
|
||||
}
|
||||
|
||||
// GetObjectLegalHoldHandler Get object Legal Hold
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html
|
||||
func (s3a *S3ApiServer) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("GetObjectLegalHoldHandler %s %s", bucket, object)
|
||||
|
||||
// Check if Object Lock is available for this bucket (requires versioning)
|
||||
if !s3a.handleObjectLockAvailabilityCheck(w, r, bucket, "GetObjectLegalHoldHandler") {
|
||||
return
|
||||
}
|
||||
|
||||
// Get version ID from query parameters
|
||||
versionId := r.URL.Query().Get("versionId")
|
||||
|
||||
// Get legal hold configuration for the object
|
||||
legalHold, err := s3a.getObjectLegalHold(bucket, object, versionId)
|
||||
if err != nil {
|
||||
glog.Errorf("GetObjectLegalHoldHandler: failed to get legal hold: %v", err)
|
||||
|
||||
// Handle specific error cases
|
||||
if errors.Is(err, ErrObjectNotFound) || errors.Is(err, ErrVersionNotFound) {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
|
||||
return
|
||||
}
|
||||
|
||||
if errors.Is(err, ErrNoLegalHoldConfiguration) {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchObjectLegalHold)
|
||||
return
|
||||
}
|
||||
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
// Marshal legal hold configuration to XML
|
||||
legalHoldXML, err := xml.Marshal(legalHold)
|
||||
if err != nil {
|
||||
glog.Errorf("GetObjectLegalHoldHandler: failed to marshal legal hold: %v", err)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
|
||||
// Set response headers
|
||||
w.Header().Set("Content-Type", "application/xml")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
// Write XML response
|
||||
if _, err := w.Write([]byte(xml.Header)); err != nil {
|
||||
glog.Errorf("GetObjectLegalHoldHandler: failed to write XML header: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := w.Write(legalHoldXML); err != nil {
|
||||
glog.Errorf("GetObjectLegalHoldHandler: failed to write legal hold XML: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Record metrics
|
||||
stats_collect.RecordBucketActiveTime(bucket)
|
||||
|
||||
glog.V(3).Infof("GetObjectLegalHoldHandler: successfully retrieved legal hold for %s/%s", bucket, object)
|
||||
}
|
||||
|
||||
// PutObjectLockConfigurationHandler Put object Lock configuration
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLockConfiguration.html
|
||||
func (s3a *S3ApiServer) PutObjectLockConfigurationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, _ := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("PutObjectLockConfigurationHandler %s", bucket)
|
||||
|
||||
// Check if Object Lock is available for this bucket (requires versioning)
|
||||
if !s3a.handleObjectLockAvailabilityCheck(w, r, bucket, "PutObjectLockConfigurationHandler") {
|
||||
return
|
||||
}
|
||||
|
||||
// Parse object lock configuration from request body
|
||||
config, err := parseObjectLockConfiguration(r)
|
||||
if err != nil {
|
||||
glog.Errorf("PutObjectLockConfigurationHandler: failed to parse object lock config: %v", err)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate object lock configuration
|
||||
if err := validateObjectLockConfiguration(config); err != nil {
|
||||
glog.Errorf("PutObjectLockConfigurationHandler: invalid object lock config: %v", err)
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Set object lock configuration on the bucket
|
||||
errCode := s3a.updateBucketConfig(bucket, func(bucketConfig *BucketConfig) error {
|
||||
if bucketConfig.Entry.Extended == nil {
|
||||
bucketConfig.Entry.Extended = make(map[string][]byte)
|
||||
}
|
||||
|
||||
// Store the configuration as JSON in extended attributes
|
||||
configXML, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucketConfig.Entry.Extended[s3_constants.ExtObjectLockConfigKey] = configXML
|
||||
|
||||
if config.ObjectLockEnabled != "" {
|
||||
bucketConfig.Entry.Extended[s3_constants.ExtObjectLockEnabledKey] = []byte(config.ObjectLockEnabled)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
glog.Errorf("PutObjectLockConfigurationHandler: failed to set object lock config: %v", errCode)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
// Record metrics
|
||||
stats_collect.RecordBucketActiveTime(bucket)
|
||||
|
||||
// Return success (HTTP 200 with no body)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
glog.V(3).Infof("PutObjectLockConfigurationHandler: successfully set object lock config for %s", bucket)
|
||||
}
|
||||
|
||||
// GetObjectLockConfigurationHandler Get object Lock configuration
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html
|
||||
func (s3a *S3ApiServer) GetObjectLockConfigurationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, _ := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("GetObjectLockConfigurationHandler %s", bucket)
|
||||
|
||||
// Get bucket configuration
|
||||
bucketConfig, errCode := s3a.getBucketConfig(bucket)
|
||||
if errCode != s3err.ErrNone {
|
||||
glog.Errorf("GetObjectLockConfigurationHandler: failed to get bucket config: %v", errCode)
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if object lock configuration exists
|
||||
if bucketConfig.Entry.Extended == nil {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchObjectLockConfiguration)
|
||||
return
|
||||
}
|
||||
|
||||
configXML, exists := bucketConfig.Entry.Extended[s3_constants.ExtObjectLockConfigKey]
|
||||
if !exists {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchObjectLockConfiguration)
|
||||
return
|
||||
}
|
||||
|
||||
// Set response headers
|
||||
w.Header().Set("Content-Type", "application/xml")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
// Write XML response
|
||||
if _, err := w.Write([]byte(xml.Header)); err != nil {
|
||||
glog.Errorf("GetObjectLockConfigurationHandler: failed to write XML header: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := w.Write(configXML); err != nil {
|
||||
glog.Errorf("GetObjectLockConfigurationHandler: failed to write config XML: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Record metrics
|
||||
stats_collect.RecordBucketActiveTime(bucket)
|
||||
|
||||
glog.V(3).Infof("GetObjectLockConfigurationHandler: successfully retrieved object lock config for %s", bucket)
|
||||
}
|
@@ -4,7 +4,7 @@ import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// GetObjectAclHandler Put object ACL
|
||||
// GetObjectAclHandler Get object ACL
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
|
||||
func (s3a *S3ApiServer) GetObjectAclHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -19,27 +19,3 @@ func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Reque
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
}
|
||||
|
||||
// PutObjectRetentionHandler Put object Retention
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html
|
||||
func (s3a *S3ApiServer) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
}
|
||||
|
||||
// PutObjectLegalHoldHandler Put object Legal Hold
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLegalHold.html
|
||||
func (s3a *S3ApiServer) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
}
|
||||
|
||||
// PutObjectLockConfigurationHandler Put object Lock configuration
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLockConfiguration.html
|
||||
func (s3a *S3ApiServer) PutObjectLockConfigurationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
}
|
||||
|
598
weed/s3api/s3api_object_retention.go
Normal file
598
weed/s3api/s3api_object_retention.go
Normal file
@@ -0,0 +1,598 @@
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
|
||||
)
|
||||
|
||||
// Sentinel errors for proper error handling instead of string matching
|
||||
var (
|
||||
ErrNoRetentionConfiguration = errors.New("no retention configuration found")
|
||||
ErrNoLegalHoldConfiguration = errors.New("no legal hold configuration found")
|
||||
ErrBucketNotFound = errors.New("bucket not found")
|
||||
ErrObjectNotFound = errors.New("object not found")
|
||||
ErrVersionNotFound = errors.New("version not found")
|
||||
ErrLatestVersionNotFound = errors.New("latest version not found")
|
||||
ErrComplianceModeActive = errors.New("object is under COMPLIANCE mode retention and cannot be deleted or modified")
|
||||
ErrGovernanceModeActive = errors.New("object is under GOVERNANCE mode retention and cannot be deleted or modified without bypass")
|
||||
)
|
||||
|
||||
const (
|
||||
// Maximum retention period limits according to AWS S3 specifications
|
||||
MaxRetentionDays = 36500 // Maximum number of days for object retention (100 years)
|
||||
MaxRetentionYears = 100 // Maximum number of years for object retention
|
||||
)
|
||||
|
||||
// ObjectRetention represents S3 Object Retention configuration
|
||||
type ObjectRetention struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Retention"`
|
||||
Mode string `xml:"Mode,omitempty"`
|
||||
RetainUntilDate *time.Time `xml:"RetainUntilDate,omitempty"`
|
||||
}
|
||||
|
||||
// ObjectLegalHold represents S3 Object Legal Hold configuration
|
||||
type ObjectLegalHold struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LegalHold"`
|
||||
Status string `xml:"Status,omitempty"`
|
||||
}
|
||||
|
||||
// ObjectLockConfiguration represents S3 Object Lock Configuration
|
||||
type ObjectLockConfiguration struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ObjectLockConfiguration"`
|
||||
ObjectLockEnabled string `xml:"ObjectLockEnabled,omitempty"`
|
||||
Rule *ObjectLockRule `xml:"Rule,omitempty"`
|
||||
}
|
||||
|
||||
// ObjectLockRule represents an Object Lock Rule
|
||||
type ObjectLockRule struct {
|
||||
XMLName xml.Name `xml:"Rule"`
|
||||
DefaultRetention *DefaultRetention `xml:"DefaultRetention,omitempty"`
|
||||
}
|
||||
|
||||
// DefaultRetention represents default retention settings
|
||||
type DefaultRetention struct {
|
||||
XMLName xml.Name `xml:"DefaultRetention"`
|
||||
Mode string `xml:"Mode,omitempty"`
|
||||
Days int `xml:"Days,omitempty"`
|
||||
Years int `xml:"Years,omitempty"`
|
||||
}
|
||||
|
||||
// Custom time unmarshalling for AWS S3 ISO8601 format
|
||||
func (or *ObjectRetention) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
type Alias ObjectRetention
|
||||
aux := &struct {
|
||||
*Alias
|
||||
RetainUntilDate *string `xml:"RetainUntilDate,omitempty"`
|
||||
}{
|
||||
Alias: (*Alias)(or),
|
||||
}
|
||||
|
||||
if err := d.DecodeElement(aux, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if aux.RetainUntilDate != nil {
|
||||
t, err := time.Parse(time.RFC3339, *aux.RetainUntilDate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
or.RetainUntilDate = &t
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseXML is a generic helper function to parse XML from an HTTP request body.
|
||||
// It uses xml.Decoder for streaming XML parsing, which is more memory-efficient
|
||||
// and avoids loading the entire request body into memory.
|
||||
//
|
||||
// The function assumes:
|
||||
// - The request body is not nil (returns error if it is)
|
||||
// - The request body will be closed after parsing (deferred close)
|
||||
// - The XML content matches the structure of the provided result type T
|
||||
//
|
||||
// This approach is optimized for small XML payloads typical in S3 API requests
|
||||
// (retention configurations, legal hold settings, etc.) where the overhead of
|
||||
// streaming parsing is acceptable for the memory efficiency benefits.
|
||||
func parseXML[T any](r *http.Request, result *T) error {
|
||||
if r.Body == nil {
|
||||
return fmt.Errorf("error parsing XML: empty request body")
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
decoder := xml.NewDecoder(r.Body)
|
||||
if err := decoder.Decode(result); err != nil {
|
||||
return fmt.Errorf("error parsing XML: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseObjectRetention parses XML retention configuration from request body
|
||||
func parseObjectRetention(r *http.Request) (*ObjectRetention, error) {
|
||||
var retention ObjectRetention
|
||||
if err := parseXML(r, &retention); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &retention, nil
|
||||
}
|
||||
|
||||
// parseObjectLegalHold parses XML legal hold configuration from request body
|
||||
func parseObjectLegalHold(r *http.Request) (*ObjectLegalHold, error) {
|
||||
var legalHold ObjectLegalHold
|
||||
if err := parseXML(r, &legalHold); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &legalHold, nil
|
||||
}
|
||||
|
||||
// parseObjectLockConfiguration parses XML object lock configuration from request body
|
||||
func parseObjectLockConfiguration(r *http.Request) (*ObjectLockConfiguration, error) {
|
||||
var config ObjectLockConfiguration
|
||||
if err := parseXML(r, &config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// validateRetention validates retention configuration
|
||||
func validateRetention(retention *ObjectRetention) error {
|
||||
// AWS requires both Mode and RetainUntilDate for PutObjectRetention
|
||||
if retention.Mode == "" {
|
||||
return fmt.Errorf("retention configuration must specify Mode")
|
||||
}
|
||||
|
||||
if retention.RetainUntilDate == nil {
|
||||
return fmt.Errorf("retention configuration must specify RetainUntilDate")
|
||||
}
|
||||
|
||||
if retention.Mode != s3_constants.RetentionModeGovernance && retention.Mode != s3_constants.RetentionModeCompliance {
|
||||
return fmt.Errorf("invalid retention mode: %s", retention.Mode)
|
||||
}
|
||||
|
||||
if retention.RetainUntilDate.Before(time.Now()) {
|
||||
return fmt.Errorf("retain until date must be in the future")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateLegalHold validates legal hold configuration
|
||||
func validateLegalHold(legalHold *ObjectLegalHold) error {
|
||||
if legalHold.Status != s3_constants.LegalHoldOn && legalHold.Status != s3_constants.LegalHoldOff {
|
||||
return fmt.Errorf("invalid legal hold status: %s", legalHold.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateObjectLockConfiguration validates object lock configuration
|
||||
func validateObjectLockConfiguration(config *ObjectLockConfiguration) error {
|
||||
// ObjectLockEnabled is required for bucket-level configuration
|
||||
if config.ObjectLockEnabled == "" {
|
||||
return fmt.Errorf("object lock configuration must specify ObjectLockEnabled")
|
||||
}
|
||||
|
||||
// Validate ObjectLockEnabled value
|
||||
if config.ObjectLockEnabled != s3_constants.ObjectLockEnabled {
|
||||
return fmt.Errorf("invalid object lock enabled value: %s", config.ObjectLockEnabled)
|
||||
}
|
||||
|
||||
// Validate Rule if present
|
||||
if config.Rule != nil {
|
||||
if config.Rule.DefaultRetention == nil {
|
||||
return fmt.Errorf("rule configuration must specify DefaultRetention")
|
||||
}
|
||||
return validateDefaultRetention(config.Rule.DefaultRetention)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateDefaultRetention validates default retention configuration
|
||||
func validateDefaultRetention(retention *DefaultRetention) error {
|
||||
// Mode is required
|
||||
if retention.Mode == "" {
|
||||
return fmt.Errorf("default retention must specify Mode")
|
||||
}
|
||||
|
||||
// Mode must be valid
|
||||
if retention.Mode != s3_constants.RetentionModeGovernance && retention.Mode != s3_constants.RetentionModeCompliance {
|
||||
return fmt.Errorf("invalid default retention mode: %s", retention.Mode)
|
||||
}
|
||||
|
||||
// Exactly one of Days or Years must be specified
|
||||
if retention.Days == 0 && retention.Years == 0 {
|
||||
return fmt.Errorf("default retention must specify either Days or Years")
|
||||
}
|
||||
|
||||
if retention.Days > 0 && retention.Years > 0 {
|
||||
return fmt.Errorf("default retention cannot specify both Days and Years")
|
||||
}
|
||||
|
||||
// Validate ranges
|
||||
if retention.Days < 0 || retention.Days > MaxRetentionDays {
|
||||
return fmt.Errorf("default retention days must be between 0 and %d", MaxRetentionDays)
|
||||
}
|
||||
|
||||
if retention.Years < 0 || retention.Years > MaxRetentionYears {
|
||||
return fmt.Errorf("default retention years must be between 0 and %d", MaxRetentionYears)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getObjectEntry retrieves the appropriate object entry based on versioning and versionId
|
||||
func (s3a *S3ApiServer) getObjectEntry(bucket, object, versionId string) (*filer_pb.Entry, error) {
|
||||
var entry *filer_pb.Entry
|
||||
var err error
|
||||
|
||||
if versionId != "" {
|
||||
entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId)
|
||||
} else {
|
||||
// Check if versioning is enabled
|
||||
versioningEnabled, vErr := s3a.isVersioningEnabled(bucket)
|
||||
if vErr != nil {
|
||||
return nil, fmt.Errorf("error checking versioning: %v", vErr)
|
||||
}
|
||||
|
||||
if versioningEnabled {
|
||||
entry, err = s3a.getLatestObjectVersion(bucket, object)
|
||||
} else {
|
||||
bucketDir := s3a.option.BucketsPath + "/" + bucket
|
||||
entry, err = s3a.getEntry(bucketDir, object)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve object %s/%s: %w", bucket, object, ErrObjectNotFound)
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// getObjectRetention retrieves retention configuration from object metadata
|
||||
func (s3a *S3ApiServer) getObjectRetention(bucket, object, versionId string) (*ObjectRetention, error) {
|
||||
entry, err := s3a.getObjectEntry(bucket, object, versionId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if entry.Extended == nil {
|
||||
return nil, ErrNoRetentionConfiguration
|
||||
}
|
||||
|
||||
retention := &ObjectRetention{}
|
||||
|
||||
if modeBytes, exists := entry.Extended[s3_constants.ExtObjectLockModeKey]; exists {
|
||||
retention.Mode = string(modeBytes)
|
||||
}
|
||||
|
||||
if dateBytes, exists := entry.Extended[s3_constants.ExtRetentionUntilDateKey]; exists {
|
||||
if timestamp, err := strconv.ParseInt(string(dateBytes), 10, 64); err == nil {
|
||||
t := time.Unix(timestamp, 0)
|
||||
retention.RetainUntilDate = &t
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to parse retention timestamp for %s/%s: corrupted timestamp data", bucket, object)
|
||||
}
|
||||
}
|
||||
|
||||
if retention.Mode == "" || retention.RetainUntilDate == nil {
|
||||
return nil, ErrNoRetentionConfiguration
|
||||
}
|
||||
|
||||
return retention, nil
|
||||
}
|
||||
|
||||
// setObjectRetention sets retention configuration on object metadata
|
||||
func (s3a *S3ApiServer) setObjectRetention(bucket, object, versionId string, retention *ObjectRetention, bypassGovernance bool) error {
|
||||
var entry *filer_pb.Entry
|
||||
var err error
|
||||
var entryPath string
|
||||
|
||||
if versionId != "" {
|
||||
entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get version %s for object %s/%s: %w", versionId, bucket, object, ErrVersionNotFound)
|
||||
}
|
||||
entryPath = object + ".versions/" + s3a.getVersionFileName(versionId)
|
||||
} else {
|
||||
// Check if versioning is enabled
|
||||
versioningEnabled, vErr := s3a.isVersioningEnabled(bucket)
|
||||
if vErr != nil {
|
||||
return fmt.Errorf("error checking versioning: %v", vErr)
|
||||
}
|
||||
|
||||
if versioningEnabled {
|
||||
entry, err = s3a.getLatestObjectVersion(bucket, object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest version for object %s/%s: %w", bucket, object, ErrLatestVersionNotFound)
|
||||
}
|
||||
// Extract version ID from entry metadata
|
||||
if entry.Extended != nil {
|
||||
if versionIdBytes, exists := entry.Extended[s3_constants.ExtVersionIdKey]; exists {
|
||||
versionId = string(versionIdBytes)
|
||||
entryPath = object + ".versions/" + s3a.getVersionFileName(versionId)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bucketDir := s3a.option.BucketsPath + "/" + bucket
|
||||
entry, err = s3a.getEntry(bucketDir, object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get object %s/%s: %w", bucket, object, ErrObjectNotFound)
|
||||
}
|
||||
entryPath = object
|
||||
}
|
||||
}
|
||||
|
||||
// Check if object is already under retention
|
||||
if entry.Extended != nil {
|
||||
if existingMode, exists := entry.Extended[s3_constants.ExtObjectLockModeKey]; exists {
|
||||
if string(existingMode) == s3_constants.RetentionModeCompliance && !bypassGovernance {
|
||||
return fmt.Errorf("cannot modify retention on object under COMPLIANCE mode")
|
||||
}
|
||||
|
||||
if existingDateBytes, dateExists := entry.Extended[s3_constants.ExtRetentionUntilDateKey]; dateExists {
|
||||
if timestamp, err := strconv.ParseInt(string(existingDateBytes), 10, 64); err == nil {
|
||||
existingDate := time.Unix(timestamp, 0)
|
||||
if existingDate.After(time.Now()) && string(existingMode) == s3_constants.RetentionModeGovernance && !bypassGovernance {
|
||||
return fmt.Errorf("cannot modify retention on object under GOVERNANCE mode without bypass")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update retention metadata
|
||||
if entry.Extended == nil {
|
||||
entry.Extended = make(map[string][]byte)
|
||||
}
|
||||
|
||||
if retention.Mode != "" {
|
||||
entry.Extended[s3_constants.ExtObjectLockModeKey] = []byte(retention.Mode)
|
||||
}
|
||||
|
||||
if retention.RetainUntilDate != nil {
|
||||
entry.Extended[s3_constants.ExtRetentionUntilDateKey] = []byte(strconv.FormatInt(retention.RetainUntilDate.Unix(), 10))
|
||||
|
||||
// Also update the existing WORM fields for compatibility
|
||||
entry.WormEnforcedAtTsNs = time.Now().UnixNano()
|
||||
}
|
||||
|
||||
// Update the entry
|
||||
// NOTE: Potential race condition exists if concurrent calls to PutObjectRetention
|
||||
// and PutObjectLegalHold update the same object simultaneously, as they might
|
||||
// overwrite each other's Extended map changes. This is mitigated by the fact
|
||||
// that mkFile operations are typically serialized at the filer level, but
|
||||
// future implementations might consider using atomic update operations or
|
||||
// entry-level locking for complete safety.
|
||||
bucketDir := s3a.option.BucketsPath + "/" + bucket
|
||||
return s3a.mkFile(bucketDir, entryPath, entry.Chunks, func(updatedEntry *filer_pb.Entry) {
|
||||
updatedEntry.Extended = entry.Extended
|
||||
updatedEntry.WormEnforcedAtTsNs = entry.WormEnforcedAtTsNs
|
||||
})
|
||||
}
|
||||
|
||||
// getObjectLegalHold retrieves legal hold configuration from object metadata
|
||||
func (s3a *S3ApiServer) getObjectLegalHold(bucket, object, versionId string) (*ObjectLegalHold, error) {
|
||||
entry, err := s3a.getObjectEntry(bucket, object, versionId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if entry.Extended == nil {
|
||||
return nil, ErrNoLegalHoldConfiguration
|
||||
}
|
||||
|
||||
legalHold := &ObjectLegalHold{}
|
||||
|
||||
if statusBytes, exists := entry.Extended[s3_constants.ExtLegalHoldKey]; exists {
|
||||
legalHold.Status = string(statusBytes)
|
||||
} else {
|
||||
return nil, ErrNoLegalHoldConfiguration
|
||||
}
|
||||
|
||||
return legalHold, nil
|
||||
}
|
||||
|
||||
// setObjectLegalHold sets legal hold configuration on object metadata
|
||||
func (s3a *S3ApiServer) setObjectLegalHold(bucket, object, versionId string, legalHold *ObjectLegalHold) error {
|
||||
var entry *filer_pb.Entry
|
||||
var err error
|
||||
var entryPath string
|
||||
|
||||
if versionId != "" {
|
||||
entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get version %s for object %s/%s: %w", versionId, bucket, object, ErrVersionNotFound)
|
||||
}
|
||||
entryPath = object + ".versions/" + s3a.getVersionFileName(versionId)
|
||||
} else {
|
||||
// Check if versioning is enabled
|
||||
versioningEnabled, vErr := s3a.isVersioningEnabled(bucket)
|
||||
if vErr != nil {
|
||||
return fmt.Errorf("error checking versioning: %v", vErr)
|
||||
}
|
||||
|
||||
if versioningEnabled {
|
||||
entry, err = s3a.getLatestObjectVersion(bucket, object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest version for object %s/%s: %w", bucket, object, ErrLatestVersionNotFound)
|
||||
}
|
||||
// Extract version ID from entry metadata
|
||||
if entry.Extended != nil {
|
||||
if versionIdBytes, exists := entry.Extended[s3_constants.ExtVersionIdKey]; exists {
|
||||
versionId = string(versionIdBytes)
|
||||
entryPath = object + ".versions/" + s3a.getVersionFileName(versionId)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bucketDir := s3a.option.BucketsPath + "/" + bucket
|
||||
entry, err = s3a.getEntry(bucketDir, object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get object %s/%s: %w", bucket, object, ErrObjectNotFound)
|
||||
}
|
||||
entryPath = object
|
||||
}
|
||||
}
|
||||
|
||||
// Update legal hold metadata
|
||||
if entry.Extended == nil {
|
||||
entry.Extended = make(map[string][]byte)
|
||||
}
|
||||
|
||||
entry.Extended[s3_constants.ExtLegalHoldKey] = []byte(legalHold.Status)
|
||||
|
||||
// Update the entry
|
||||
// NOTE: Potential race condition exists if concurrent calls to PutObjectRetention
|
||||
// and PutObjectLegalHold update the same object simultaneously, as they might
|
||||
// overwrite each other's Extended map changes. This is mitigated by the fact
|
||||
// that mkFile operations are typically serialized at the filer level, but
|
||||
// future implementations might consider using atomic update operations or
|
||||
// entry-level locking for complete safety.
|
||||
bucketDir := s3a.option.BucketsPath + "/" + bucket
|
||||
return s3a.mkFile(bucketDir, entryPath, entry.Chunks, func(updatedEntry *filer_pb.Entry) {
|
||||
updatedEntry.Extended = entry.Extended
|
||||
})
|
||||
}
|
||||
|
||||
// isObjectRetentionActive checks if an object is currently under retention
|
||||
func (s3a *S3ApiServer) isObjectRetentionActive(bucket, object, versionId string) (bool, error) {
|
||||
retention, err := s3a.getObjectRetention(bucket, object, versionId)
|
||||
if err != nil {
|
||||
// If no retention found, object is not under retention
|
||||
if errors.Is(err, ErrNoRetentionConfiguration) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
if retention.RetainUntilDate != nil && retention.RetainUntilDate.After(time.Now()) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// getObjectRetentionWithStatus retrieves retention configuration and returns both the data and active status
|
||||
// This is an optimization to avoid duplicate fetches when both retention data and status are needed
|
||||
func (s3a *S3ApiServer) getObjectRetentionWithStatus(bucket, object, versionId string) (*ObjectRetention, bool, error) {
|
||||
retention, err := s3a.getObjectRetention(bucket, object, versionId)
|
||||
if err != nil {
|
||||
// If no retention found, object is not under retention
|
||||
if errors.Is(err, ErrNoRetentionConfiguration) {
|
||||
return nil, false, nil
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// Check if retention is currently active
|
||||
isActive := retention.RetainUntilDate != nil && retention.RetainUntilDate.After(time.Now())
|
||||
return retention, isActive, nil
|
||||
}
|
||||
|
||||
// isObjectLegalHoldActive checks if an object is currently under legal hold
|
||||
func (s3a *S3ApiServer) isObjectLegalHoldActive(bucket, object, versionId string) (bool, error) {
|
||||
legalHold, err := s3a.getObjectLegalHold(bucket, object, versionId)
|
||||
if err != nil {
|
||||
// If no legal hold found, object is not under legal hold
|
||||
if errors.Is(err, ErrNoLegalHoldConfiguration) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return legalHold.Status == s3_constants.LegalHoldOn, nil
|
||||
}
|
||||
|
||||
// checkObjectLockPermissions checks if an object can be deleted or modified
|
||||
func (s3a *S3ApiServer) checkObjectLockPermissions(bucket, object, versionId string, bypassGovernance bool) error {
|
||||
// Get retention configuration and status in a single call to avoid duplicate fetches
|
||||
retention, retentionActive, err := s3a.getObjectRetentionWithStatus(bucket, object, versionId)
|
||||
if err != nil {
|
||||
glog.Warningf("Error checking retention for %s/%s: %v", bucket, object, err)
|
||||
}
|
||||
|
||||
// Check if object is under legal hold
|
||||
legalHoldActive, err := s3a.isObjectLegalHoldActive(bucket, object, versionId)
|
||||
if err != nil {
|
||||
glog.Warningf("Error checking legal hold for %s/%s: %v", bucket, object, err)
|
||||
}
|
||||
|
||||
// If object is under legal hold, it cannot be deleted or modified
|
||||
if legalHoldActive {
|
||||
return fmt.Errorf("object is under legal hold and cannot be deleted or modified")
|
||||
}
|
||||
|
||||
// If object is under retention, check the mode
|
||||
if retentionActive && retention != nil {
|
||||
if retention.Mode == s3_constants.RetentionModeCompliance {
|
||||
return ErrComplianceModeActive
|
||||
}
|
||||
|
||||
if retention.Mode == s3_constants.RetentionModeGovernance && !bypassGovernance {
|
||||
return ErrGovernanceModeActive
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isObjectLockAvailable checks if Object Lock features are available for the bucket
|
||||
// Object Lock requires versioning to be enabled (AWS S3 requirement)
|
||||
func (s3a *S3ApiServer) isObjectLockAvailable(bucket string) error {
|
||||
versioningEnabled, err := s3a.isVersioningEnabled(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, filer_pb.ErrNotFound) {
|
||||
return ErrBucketNotFound
|
||||
}
|
||||
return fmt.Errorf("error checking versioning status: %v", err)
|
||||
}
|
||||
|
||||
if !versioningEnabled {
|
||||
return fmt.Errorf("object lock requires versioning to be enabled")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkObjectLockPermissionsForPut checks object lock permissions for PUT operations
|
||||
// This is a shared helper to avoid code duplication in PUT handlers
|
||||
func (s3a *S3ApiServer) checkObjectLockPermissionsForPut(bucket, object string, bypassGovernance bool, versioningEnabled bool) error {
|
||||
// Object Lock only applies to versioned buckets (AWS S3 requirement)
|
||||
if !versioningEnabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// For PUT operations, we check permissions on the current object (empty versionId)
|
||||
if err := s3a.checkObjectLockPermissions(bucket, object, "", bypassGovernance); err != nil {
|
||||
glog.V(2).Infof("checkObjectLockPermissionsForPut: object lock check failed for %s/%s: %v", bucket, object, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleObjectLockAvailabilityCheck is a helper function to check object lock availability
|
||||
// and write the appropriate error response if not available. This reduces code duplication
|
||||
// across all retention handlers.
|
||||
func (s3a *S3ApiServer) handleObjectLockAvailabilityCheck(w http.ResponseWriter, r *http.Request, bucket, handlerName string) bool {
|
||||
if err := s3a.isObjectLockAvailable(bucket); err != nil {
|
||||
glog.Errorf("%s: object lock not available for bucket %s: %v", handlerName, bucket, err)
|
||||
if errors.Is(err, ErrBucketNotFound) {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
|
||||
} else {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
726
weed/s3api/s3api_object_retention_test.go
Normal file
726
weed/s3api/s3api_object_retention_test.go
Normal file
@@ -0,0 +1,726 @@
|
||||
package s3api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
|
||||
)
|
||||
|
||||
// TODO: If needed, re-implement TestPutObjectRetention with proper setup for buckets, objects, and versioning.
|
||||
|
||||
func TestValidateRetention(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
retention *ObjectRetention
|
||||
expectError bool
|
||||
errorMsg string
|
||||
}{
|
||||
{
|
||||
name: "Valid GOVERNANCE retention",
|
||||
retention: &ObjectRetention{
|
||||
Mode: s3_constants.RetentionModeGovernance,
|
||||
RetainUntilDate: timePtr(time.Now().Add(24 * time.Hour)),
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Valid COMPLIANCE retention",
|
||||
retention: &ObjectRetention{
|
||||
Mode: s3_constants.RetentionModeCompliance,
|
||||
RetainUntilDate: timePtr(time.Now().Add(24 * time.Hour)),
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Missing Mode",
|
||||
retention: &ObjectRetention{
|
||||
RetainUntilDate: timePtr(time.Now().Add(24 * time.Hour)),
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "retention configuration must specify Mode",
|
||||
},
|
||||
{
|
||||
name: "Missing RetainUntilDate",
|
||||
retention: &ObjectRetention{
|
||||
Mode: s3_constants.RetentionModeGovernance,
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "retention configuration must specify RetainUntilDate",
|
||||
},
|
||||
{
|
||||
name: "Invalid Mode",
|
||||
retention: &ObjectRetention{
|
||||
Mode: "INVALID_MODE",
|
||||
RetainUntilDate: timePtr(time.Now().Add(24 * time.Hour)),
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid retention mode",
|
||||
},
|
||||
{
|
||||
name: "Past RetainUntilDate",
|
||||
retention: &ObjectRetention{
|
||||
Mode: s3_constants.RetentionModeGovernance,
|
||||
RetainUntilDate: timePtr(time.Now().Add(-24 * time.Hour)),
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "retain until date must be in the future",
|
||||
},
|
||||
{
|
||||
name: "Empty retention",
|
||||
retention: &ObjectRetention{},
|
||||
expectError: true,
|
||||
errorMsg: "retention configuration must specify Mode",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateRetention(tt.retention)
|
||||
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
} else if !strings.Contains(err.Error(), tt.errorMsg) {
|
||||
t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateLegalHold(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
legalHold *ObjectLegalHold
|
||||
expectError bool
|
||||
errorMsg string
|
||||
}{
|
||||
{
|
||||
name: "Valid ON status",
|
||||
legalHold: &ObjectLegalHold{
|
||||
Status: s3_constants.LegalHoldOn,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Valid OFF status",
|
||||
legalHold: &ObjectLegalHold{
|
||||
Status: s3_constants.LegalHoldOff,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid status",
|
||||
legalHold: &ObjectLegalHold{
|
||||
Status: "INVALID_STATUS",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid legal hold status",
|
||||
},
|
||||
{
|
||||
name: "Empty status",
|
||||
legalHold: &ObjectLegalHold{
|
||||
Status: "",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid legal hold status",
|
||||
},
|
||||
{
|
||||
name: "Lowercase on",
|
||||
legalHold: &ObjectLegalHold{
|
||||
Status: "on",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid legal hold status",
|
||||
},
|
||||
{
|
||||
name: "Lowercase off",
|
||||
legalHold: &ObjectLegalHold{
|
||||
Status: "off",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid legal hold status",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateLegalHold(tt.legalHold)
|
||||
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
} else if !strings.Contains(err.Error(), tt.errorMsg) {
|
||||
t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseObjectRetention(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
xmlBody string
|
||||
expectError bool
|
||||
errorMsg string
|
||||
expectedResult *ObjectRetention
|
||||
}{
|
||||
{
|
||||
name: "Valid retention XML",
|
||||
xmlBody: `<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Mode>GOVERNANCE</Mode>
|
||||
<RetainUntilDate>2024-12-31T23:59:59Z</RetainUntilDate>
|
||||
</Retention>`,
|
||||
expectError: false,
|
||||
expectedResult: &ObjectRetention{
|
||||
Mode: "GOVERNANCE",
|
||||
RetainUntilDate: timePtr(time.Date(2024, 12, 31, 23, 59, 59, 0, time.UTC)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid compliance retention XML",
|
||||
xmlBody: `<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Mode>COMPLIANCE</Mode>
|
||||
<RetainUntilDate>2025-01-01T00:00:00Z</RetainUntilDate>
|
||||
</Retention>`,
|
||||
expectError: false,
|
||||
expectedResult: &ObjectRetention{
|
||||
Mode: "COMPLIANCE",
|
||||
RetainUntilDate: timePtr(time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Empty XML body",
|
||||
xmlBody: "",
|
||||
expectError: true,
|
||||
errorMsg: "error parsing XML",
|
||||
},
|
||||
{
|
||||
name: "Invalid XML",
|
||||
xmlBody: `<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Mode>GOVERNANCE</Mode><RetainUntilDate>invalid-date</RetainUntilDate></Retention>`,
|
||||
expectError: true,
|
||||
errorMsg: "cannot parse",
|
||||
},
|
||||
{
|
||||
name: "Malformed XML",
|
||||
xmlBody: "<Retention><Mode>GOVERNANCE</Mode><RetainUntilDate>2024-12-31T23:59:59Z</Retention>",
|
||||
expectError: true,
|
||||
errorMsg: "error parsing XML",
|
||||
},
|
||||
{
|
||||
name: "Missing Mode",
|
||||
xmlBody: `<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<RetainUntilDate>2024-12-31T23:59:59Z</RetainUntilDate>
|
||||
</Retention>`,
|
||||
expectError: false,
|
||||
expectedResult: &ObjectRetention{
|
||||
Mode: "",
|
||||
RetainUntilDate: timePtr(time.Date(2024, 12, 31, 23, 59, 59, 0, time.UTC)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Missing RetainUntilDate",
|
||||
xmlBody: `<Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Mode>GOVERNANCE</Mode>
|
||||
</Retention>`,
|
||||
expectError: false,
|
||||
expectedResult: &ObjectRetention{
|
||||
Mode: "GOVERNANCE",
|
||||
RetainUntilDate: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create a mock HTTP request with XML body
|
||||
req := &http.Request{
|
||||
Body: io.NopCloser(strings.NewReader(tt.xmlBody)),
|
||||
}
|
||||
|
||||
result, err := parseObjectRetention(req)
|
||||
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
} else if !strings.Contains(err.Error(), tt.errorMsg) {
|
||||
t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if result == nil {
|
||||
t.Errorf("Expected result but got nil")
|
||||
} else {
|
||||
if result.Mode != tt.expectedResult.Mode {
|
||||
t.Errorf("Expected Mode %s, got %s", tt.expectedResult.Mode, result.Mode)
|
||||
}
|
||||
if tt.expectedResult.RetainUntilDate == nil {
|
||||
if result.RetainUntilDate != nil {
|
||||
t.Errorf("Expected RetainUntilDate to be nil, got %v", result.RetainUntilDate)
|
||||
}
|
||||
} else if result.RetainUntilDate == nil {
|
||||
t.Errorf("Expected RetainUntilDate to be %v, got nil", tt.expectedResult.RetainUntilDate)
|
||||
} else if !result.RetainUntilDate.Equal(*tt.expectedResult.RetainUntilDate) {
|
||||
t.Errorf("Expected RetainUntilDate %v, got %v", tt.expectedResult.RetainUntilDate, result.RetainUntilDate)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseObjectLegalHold(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
xmlBody string
|
||||
expectError bool
|
||||
errorMsg string
|
||||
expectedResult *ObjectLegalHold
|
||||
}{
|
||||
{
|
||||
name: "Valid legal hold ON",
|
||||
xmlBody: `<LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Status>ON</Status>
|
||||
</LegalHold>`,
|
||||
expectError: false,
|
||||
expectedResult: &ObjectLegalHold{
|
||||
Status: "ON",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid legal hold OFF",
|
||||
xmlBody: `<LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Status>OFF</Status>
|
||||
</LegalHold>`,
|
||||
expectError: false,
|
||||
expectedResult: &ObjectLegalHold{
|
||||
Status: "OFF",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Empty XML body",
|
||||
xmlBody: "",
|
||||
expectError: true,
|
||||
errorMsg: "error parsing XML",
|
||||
},
|
||||
{
|
||||
name: "Invalid XML",
|
||||
xmlBody: "<LegalHold><Status>ON</Status>",
|
||||
expectError: true,
|
||||
errorMsg: "error parsing XML",
|
||||
},
|
||||
{
|
||||
name: "Missing Status",
|
||||
xmlBody: `<LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
</LegalHold>`,
|
||||
expectError: false,
|
||||
expectedResult: &ObjectLegalHold{
|
||||
Status: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create a mock HTTP request with XML body
|
||||
req := &http.Request{
|
||||
Body: io.NopCloser(strings.NewReader(tt.xmlBody)),
|
||||
}
|
||||
|
||||
result, err := parseObjectLegalHold(req)
|
||||
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
} else if !strings.Contains(err.Error(), tt.errorMsg) {
|
||||
t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if result == nil {
|
||||
t.Errorf("Expected result but got nil")
|
||||
} else {
|
||||
if result.Status != tt.expectedResult.Status {
|
||||
t.Errorf("Expected Status %s, got %s", tt.expectedResult.Status, result.Status)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseObjectLockConfiguration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
xmlBody string
|
||||
expectError bool
|
||||
errorMsg string
|
||||
expectedResult *ObjectLockConfiguration
|
||||
}{
|
||||
{
|
||||
name: "Valid object lock configuration",
|
||||
xmlBody: `<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<ObjectLockEnabled>Enabled</ObjectLockEnabled>
|
||||
</ObjectLockConfiguration>`,
|
||||
expectError: false,
|
||||
expectedResult: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid object lock configuration with rule",
|
||||
xmlBody: `<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<ObjectLockEnabled>Enabled</ObjectLockEnabled>
|
||||
<Rule>
|
||||
<DefaultRetention>
|
||||
<Mode>GOVERNANCE</Mode>
|
||||
<Days>30</Days>
|
||||
</DefaultRetention>
|
||||
</Rule>
|
||||
</ObjectLockConfiguration>`,
|
||||
expectError: false,
|
||||
expectedResult: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
Rule: &ObjectLockRule{
|
||||
DefaultRetention: &DefaultRetention{
|
||||
Mode: "GOVERNANCE",
|
||||
Days: 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Empty XML body",
|
||||
xmlBody: "",
|
||||
expectError: true,
|
||||
errorMsg: "error parsing XML",
|
||||
},
|
||||
{
|
||||
name: "Invalid XML",
|
||||
xmlBody: "<ObjectLockConfiguration><ObjectLockEnabled>Enabled</ObjectLockEnabled>",
|
||||
expectError: true,
|
||||
errorMsg: "error parsing XML",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create a mock HTTP request with XML body
|
||||
req := &http.Request{
|
||||
Body: io.NopCloser(strings.NewReader(tt.xmlBody)),
|
||||
}
|
||||
|
||||
result, err := parseObjectLockConfiguration(req)
|
||||
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
} else if !strings.Contains(err.Error(), tt.errorMsg) {
|
||||
t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if result == nil {
|
||||
t.Errorf("Expected result but got nil")
|
||||
} else {
|
||||
if result.ObjectLockEnabled != tt.expectedResult.ObjectLockEnabled {
|
||||
t.Errorf("Expected ObjectLockEnabled %s, got %s", tt.expectedResult.ObjectLockEnabled, result.ObjectLockEnabled)
|
||||
}
|
||||
if tt.expectedResult.Rule == nil {
|
||||
if result.Rule != nil {
|
||||
t.Errorf("Expected Rule to be nil, got %v", result.Rule)
|
||||
}
|
||||
} else if result.Rule == nil {
|
||||
t.Errorf("Expected Rule to be non-nil")
|
||||
} else {
|
||||
if result.Rule.DefaultRetention == nil {
|
||||
t.Errorf("Expected DefaultRetention to be non-nil")
|
||||
} else {
|
||||
if result.Rule.DefaultRetention.Mode != tt.expectedResult.Rule.DefaultRetention.Mode {
|
||||
t.Errorf("Expected DefaultRetention Mode %s, got %s", tt.expectedResult.Rule.DefaultRetention.Mode, result.Rule.DefaultRetention.Mode)
|
||||
}
|
||||
if result.Rule.DefaultRetention.Days != tt.expectedResult.Rule.DefaultRetention.Days {
|
||||
t.Errorf("Expected DefaultRetention Days %d, got %d", tt.expectedResult.Rule.DefaultRetention.Days, result.Rule.DefaultRetention.Days)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateObjectLockConfiguration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config *ObjectLockConfiguration
|
||||
expectError bool
|
||||
errorMsg string
|
||||
}{
|
||||
{
|
||||
name: "Valid config with ObjectLockEnabled only",
|
||||
config: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Missing ObjectLockEnabled",
|
||||
config: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "object lock configuration must specify ObjectLockEnabled",
|
||||
},
|
||||
{
|
||||
name: "Valid config with rule and days",
|
||||
config: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
Rule: &ObjectLockRule{
|
||||
DefaultRetention: &DefaultRetention{
|
||||
Mode: "GOVERNANCE",
|
||||
Days: 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Valid config with rule and years",
|
||||
config: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
Rule: &ObjectLockRule{
|
||||
DefaultRetention: &DefaultRetention{
|
||||
Mode: "COMPLIANCE",
|
||||
Years: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid ObjectLockEnabled value",
|
||||
config: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "InvalidValue",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid object lock enabled value",
|
||||
},
|
||||
{
|
||||
name: "Invalid rule - missing mode",
|
||||
config: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
Rule: &ObjectLockRule{
|
||||
DefaultRetention: &DefaultRetention{
|
||||
Days: 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "default retention must specify Mode",
|
||||
},
|
||||
{
|
||||
name: "Invalid rule - both days and years",
|
||||
config: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
Rule: &ObjectLockRule{
|
||||
DefaultRetention: &DefaultRetention{
|
||||
Mode: "GOVERNANCE",
|
||||
Days: 30,
|
||||
Years: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "default retention cannot specify both Days and Years",
|
||||
},
|
||||
{
|
||||
name: "Invalid rule - neither days nor years",
|
||||
config: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
Rule: &ObjectLockRule{
|
||||
DefaultRetention: &DefaultRetention{
|
||||
Mode: "GOVERNANCE",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "default retention must specify either Days or Years",
|
||||
},
|
||||
{
|
||||
name: "Invalid rule - invalid mode",
|
||||
config: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
Rule: &ObjectLockRule{
|
||||
DefaultRetention: &DefaultRetention{
|
||||
Mode: "INVALID_MODE",
|
||||
Days: 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid default retention mode",
|
||||
},
|
||||
{
|
||||
name: "Invalid rule - days out of range",
|
||||
config: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
Rule: &ObjectLockRule{
|
||||
DefaultRetention: &DefaultRetention{
|
||||
Mode: "GOVERNANCE",
|
||||
Days: 50000,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: fmt.Sprintf("default retention days must be between 0 and %d", MaxRetentionDays),
|
||||
},
|
||||
{
|
||||
name: "Invalid rule - years out of range",
|
||||
config: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
Rule: &ObjectLockRule{
|
||||
DefaultRetention: &DefaultRetention{
|
||||
Mode: "GOVERNANCE",
|
||||
Years: 200,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: fmt.Sprintf("default retention years must be between 0 and %d", MaxRetentionYears),
|
||||
},
|
||||
{
|
||||
name: "Invalid rule - missing DefaultRetention",
|
||||
config: &ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
Rule: &ObjectLockRule{
|
||||
DefaultRetention: nil,
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "rule configuration must specify DefaultRetention",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateObjectLockConfiguration(tt.config)
|
||||
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
} else if !strings.Contains(err.Error(), tt.errorMsg) {
|
||||
t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateDefaultRetention(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
retention *DefaultRetention
|
||||
expectError bool
|
||||
errorMsg string
|
||||
}{
|
||||
{
|
||||
name: "Valid retention with days",
|
||||
retention: &DefaultRetention{
|
||||
Mode: "GOVERNANCE",
|
||||
Days: 30,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Valid retention with years",
|
||||
retention: &DefaultRetention{
|
||||
Mode: "COMPLIANCE",
|
||||
Years: 1,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Missing mode",
|
||||
retention: &DefaultRetention{
|
||||
Days: 30,
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "default retention must specify Mode",
|
||||
},
|
||||
{
|
||||
name: "Invalid mode",
|
||||
retention: &DefaultRetention{
|
||||
Mode: "INVALID",
|
||||
Days: 30,
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid default retention mode",
|
||||
},
|
||||
{
|
||||
name: "Both days and years specified",
|
||||
retention: &DefaultRetention{
|
||||
Mode: "GOVERNANCE",
|
||||
Days: 30,
|
||||
Years: 1,
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "default retention cannot specify both Days and Years",
|
||||
},
|
||||
{
|
||||
name: "Neither days nor years specified",
|
||||
retention: &DefaultRetention{
|
||||
Mode: "GOVERNANCE",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "default retention must specify either Days or Years",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateDefaultRetention(tt.retention)
|
||||
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
} else if !strings.Contains(err.Error(), tt.errorMsg) {
|
||||
t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to create a time pointer
|
||||
func timePtr(t time.Time) *time.Time {
|
||||
return &t
|
||||
}
|
@@ -206,11 +206,13 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectRetentionHandler, ACTION_WRITE)), "PUT")).Queries("retention", "")
|
||||
// PutObjectLegalHold
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLegalHoldHandler, ACTION_WRITE)), "PUT")).Queries("legal-hold", "")
|
||||
// PutObjectLockConfiguration
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("object-lock", "")
|
||||
|
||||
// GetObjectACL
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectAclHandler, ACTION_READ_ACP)), "GET")).Queries("acl", "")
|
||||
// GetObjectRetention
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectRetentionHandler, ACTION_READ)), "GET")).Queries("retention", "")
|
||||
// GetObjectLegalHold
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectLegalHoldHandler, ACTION_READ)), "GET")).Queries("legal-hold", "")
|
||||
|
||||
// objects with query
|
||||
|
||||
@@ -272,6 +274,10 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketVersioningHandler, ACTION_READ)), "GET")).Queries("versioning", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketVersioningHandler, ACTION_WRITE)), "PUT")).Queries("versioning", "")
|
||||
|
||||
// GetObjectLockConfiguration / PutObjectLockConfiguration (bucket-level operations)
|
||||
bucket.Methods(http.MethodGet).Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectLockConfigurationHandler, ACTION_READ)), "GET")).Queries("object-lock", "")
|
||||
bucket.Methods(http.MethodPut).Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("object-lock", "")
|
||||
|
||||
// GetBucketTagging
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketTaggingHandler, ACTION_TAGGING)), "GET")).Queries("tagging", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketTaggingHandler, ACTION_TAGGING)), "PUT")).Queries("tagging", "")
|
||||
|
@@ -110,6 +110,8 @@ const (
|
||||
|
||||
OwnershipControlsNotFoundError
|
||||
ErrNoSuchTagSet
|
||||
ErrNoSuchObjectLockConfiguration
|
||||
ErrNoSuchObjectLegalHold
|
||||
)
|
||||
|
||||
// Error message constants for checksum validation
|
||||
@@ -197,6 +199,16 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
||||
Description: "The TagSet does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchObjectLockConfiguration: {
|
||||
Code: "NoSuchObjectLockConfiguration",
|
||||
Description: "The specified object does not have an ObjectLock configuration",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchObjectLegalHold: {
|
||||
Code: "NoSuchObjectLegalHold",
|
||||
Description: "The specified object does not have a legal hold configuration",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchCORSConfiguration: {
|
||||
Code: "NoSuchCORSConfiguration",
|
||||
Description: "The CORS configuration does not exist",
|
||||
|
Reference in New Issue
Block a user