implement PubObjectRetention and WORM (#6969)

* implement PubObjectRetention and WORM

* Update s3_worm_integration_test.go

* avoid previous buckets

* Update s3-versioning-tests.yml

* address comments

* address comments

* rename to ExtObjectLockModeKey

* only checkObjectLockPermissions if versioningEnabled

* address comments

* comments

* Revert "comments"

This reverts commit 6736434176.

* Update s3api_object_handlers_skip.go

* Update s3api_object_retention_test.go

* add version id to ObjectIdentifier

* address comments

* add comments

* Add proper error logging for timestamp parsing failures

* address comments

* add version id to the error

* Update weed/s3api/s3api_object_retention_test.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/s3api/s3api_object_retention.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* constants

* fix comments

* address comments

* address comment

* refactor out handleObjectLockAvailabilityCheck

* errors.Is ErrBucketNotFound

* better error checking

* address comments

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
Chris Lu
2025-07-12 21:58:55 -07:00
committed by GitHub
parent 687a6a6c1d
commit 1549ee2e15
18 changed files with 3844 additions and 43 deletions

360
test/s3/retention/Makefile Normal file
View File

@@ -0,0 +1,360 @@
# S3 API Retention Test Makefile
# This Makefile provides comprehensive targets for running S3 retention tests
.PHONY: help build-weed setup-server start-server stop-server test-retention test-retention-quick test-retention-comprehensive test-retention-worm test-all clean logs check-deps
# Configuration
WEED_BINARY := ../../../weed/weed_binary
S3_PORT := 8333
MASTER_PORT := 9333
VOLUME_PORT := 8080
FILER_PORT := 8888
TEST_TIMEOUT := 15m
TEST_PATTERN := TestRetention
# Default target
help:
@echo "S3 API Retention Test Makefile"
@echo ""
@echo "Available targets:"
@echo " help - Show this help message"
@echo " build-weed - Build the SeaweedFS binary"
@echo " check-deps - Check dependencies and build binary if needed"
@echo " start-server - Start SeaweedFS server for testing"
@echo " start-server-simple - Start server without process cleanup (for CI)"
@echo " stop-server - Stop SeaweedFS server"
@echo " test-retention - Run all retention tests"
@echo " test-retention-quick - Run core retention tests only"
@echo " test-retention-simple - Run tests without server management"
@echo " test-retention-comprehensive - Run comprehensive retention tests"
@echo " test-retention-worm - Run WORM integration tests"
@echo " test-all - Run all S3 API retention tests"
@echo " test-with-server - Start server, run tests, stop server"
@echo " logs - Show server logs"
@echo " clean - Clean up test artifacts and stop server"
@echo " health-check - Check if server is accessible"
@echo ""
@echo "Configuration:"
@echo " S3_PORT=${S3_PORT}"
@echo " TEST_TIMEOUT=${TEST_TIMEOUT}"
# Build the SeaweedFS binary
build-weed:
@echo "Building SeaweedFS binary..."
@cd ../../../weed && go build -o weed_binary .
@chmod +x $(WEED_BINARY)
@echo "✅ SeaweedFS binary built at $(WEED_BINARY)"
check-deps: build-weed
@echo "Checking dependencies..."
@echo "🔍 DEBUG: Checking Go installation..."
@command -v go >/dev/null 2>&1 || (echo "Go is required but not installed" && exit 1)
@echo "🔍 DEBUG: Go version: $$(go version)"
@echo "🔍 DEBUG: Checking binary at $(WEED_BINARY)..."
@test -f $(WEED_BINARY) || (echo "SeaweedFS binary not found at $(WEED_BINARY)" && exit 1)
@echo "🔍 DEBUG: Binary size: $$(ls -lh $(WEED_BINARY) | awk '{print $$5}')"
@echo "🔍 DEBUG: Binary permissions: $$(ls -la $(WEED_BINARY) | awk '{print $$1}')"
@echo "🔍 DEBUG: Checking Go module dependencies..."
@go list -m github.com/aws/aws-sdk-go-v2 >/dev/null 2>&1 || (echo "AWS SDK Go v2 not found. Run 'go mod tidy'." && exit 1)
@go list -m github.com/stretchr/testify >/dev/null 2>&1 || (echo "Testify not found. Run 'go mod tidy'." && exit 1)
@echo "✅ All dependencies are available"
# Start SeaweedFS server for testing
start-server: check-deps
@echo "Starting SeaweedFS server..."
@echo "🔍 DEBUG: Current working directory: $$(pwd)"
@echo "🔍 DEBUG: Checking for existing weed processes..."
@ps aux | grep weed | grep -v grep || echo "No existing weed processes found"
@echo "🔍 DEBUG: Cleaning up any existing PID file..."
@rm -f weed-server.pid
@echo "🔍 DEBUG: Checking for port conflicts..."
@if netstat -tlnp 2>/dev/null | grep $(S3_PORT) >/dev/null; then \
echo "⚠️ Port $(S3_PORT) is already in use, trying to find the process..."; \
netstat -tlnp 2>/dev/null | grep $(S3_PORT) || true; \
else \
echo "✅ Port $(S3_PORT) is available"; \
fi
@echo "🔍 DEBUG: Checking binary at $(WEED_BINARY)"
@ls -la $(WEED_BINARY) || (echo "❌ Binary not found!" && exit 1)
@echo "🔍 DEBUG: Checking config file at ../../../docker/compose/s3.json"
@ls -la ../../../docker/compose/s3.json || echo "⚠️ Config file not found, continuing without it"
@echo "🔍 DEBUG: Creating volume directory..."
@mkdir -p ./test-volume-data
@echo "🔍 DEBUG: Launching SeaweedFS server in background..."
@echo "🔍 DEBUG: Command: $(WEED_BINARY) server -debug -s3 -s3.port=$(S3_PORT) -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../../../docker/compose/s3.json -filer -filer.maxMB=64 -master.volumeSizeLimitMB=50 -volume.max=100 -dir=./test-volume-data -volume.preStopSeconds=1 -metricsPort=9324"
@$(WEED_BINARY) server \
-debug \
-s3 \
-s3.port=$(S3_PORT) \
-s3.allowEmptyFolder=false \
-s3.allowDeleteBucketNotEmpty=true \
-s3.config=../../../docker/compose/s3.json \
-filer \
-filer.maxMB=64 \
-master.volumeSizeLimitMB=50 \
-volume.max=100 \
-dir=./test-volume-data \
-volume.preStopSeconds=1 \
-metricsPort=9324 \
> weed-test.log 2>&1 & echo $$! > weed-server.pid
@echo "🔍 DEBUG: Server PID: $$(cat weed-server.pid 2>/dev/null || echo 'PID file not found')"
@echo "🔍 DEBUG: Checking if PID is still running..."
@sleep 2
@if [ -f weed-server.pid ]; then \
SERVER_PID=$$(cat weed-server.pid); \
ps -p $$SERVER_PID || echo "⚠️ Server PID $$SERVER_PID not found after 2 seconds"; \
else \
echo "⚠️ PID file not found"; \
fi
@echo "🔍 DEBUG: Waiting for server to start (up to 90 seconds)..."
@for i in $$(seq 1 90); do \
echo "🔍 DEBUG: Attempt $$i/90 - checking port $(S3_PORT)"; \
if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \
echo "✅ SeaweedFS server started successfully on port $(S3_PORT) after $$i seconds"; \
exit 0; \
fi; \
if [ $$i -eq 5 ]; then \
echo "🔍 DEBUG: After 5 seconds, checking process and logs..."; \
ps aux | grep weed | grep -v grep || echo "No weed processes found"; \
if [ -f weed-test.log ]; then \
echo "=== First server logs ==="; \
head -20 weed-test.log; \
fi; \
fi; \
if [ $$i -eq 15 ]; then \
echo "🔍 DEBUG: After 15 seconds, checking port bindings..."; \
netstat -tlnp 2>/dev/null | grep $(S3_PORT) || echo "Port $(S3_PORT) not bound"; \
netstat -tlnp 2>/dev/null | grep 9333 || echo "Port 9333 not bound"; \
netstat -tlnp 2>/dev/null | grep 8080 || echo "Port 8080 not bound"; \
fi; \
if [ $$i -eq 30 ]; then \
echo "⚠️ Server taking longer than expected (30s), checking logs..."; \
if [ -f weed-test.log ]; then \
echo "=== Recent server logs ==="; \
tail -20 weed-test.log; \
fi; \
fi; \
sleep 1; \
done; \
echo "❌ Server failed to start within 90 seconds"; \
echo "🔍 DEBUG: Final process check:"; \
ps aux | grep weed | grep -v grep || echo "No weed processes found"; \
echo "🔍 DEBUG: Final port check:"; \
netstat -tlnp 2>/dev/null | grep -E "(8333|9333|8080)" || echo "No ports bound"; \
echo "=== Full server logs ==="; \
if [ -f weed-test.log ]; then \
cat weed-test.log; \
else \
echo "No log file found"; \
fi; \
exit 1
# Stop SeaweedFS server
stop-server:
@echo "Stopping SeaweedFS server..."
@if [ -f weed-server.pid ]; then \
SERVER_PID=$$(cat weed-server.pid); \
echo "Killing server PID $$SERVER_PID"; \
if ps -p $$SERVER_PID >/dev/null 2>&1; then \
kill -TERM $$SERVER_PID 2>/dev/null || true; \
sleep 2; \
if ps -p $$SERVER_PID >/dev/null 2>&1; then \
echo "Process still running, sending KILL signal..."; \
kill -KILL $$SERVER_PID 2>/dev/null || true; \
sleep 1; \
fi; \
else \
echo "Process $$SERVER_PID not found (already stopped)"; \
fi; \
rm -f weed-server.pid; \
else \
echo "No PID file found, checking for running processes..."; \
echo "⚠️ Skipping automatic process cleanup to avoid CI issues"; \
echo "Note: Any remaining weed processes should be cleaned up by the CI environment"; \
fi
@echo "✅ SeaweedFS server stopped"
# Show server logs
logs:
@if test -f weed-test.log; then \
echo "=== SeaweedFS Server Logs ==="; \
tail -f weed-test.log; \
else \
echo "No log file found. Server may not be running."; \
fi
# Core retention tests (basic functionality)
test-retention-quick: check-deps
@echo "Running core S3 retention tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasicRetentionWorkflow|TestRetentionModeCompliance|TestLegalHoldWorkflow" .
@echo "✅ Core retention tests completed"
# All retention tests (comprehensive)
test-retention: check-deps
@echo "Running all S3 retention tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" .
@echo "✅ All retention tests completed"
# WORM integration tests
test-retention-worm: check-deps
@echo "Running WORM integration tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestWORM|TestRetentionExtendedAttributes|TestRetentionConcurrentOperations" .
@echo "✅ WORM integration tests completed"
# Comprehensive retention tests (all features)
test-retention-comprehensive: check-deps
@echo "Running comprehensive S3 retention tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetention|TestObjectLock|TestLegalHold|TestWORM" .
@echo "✅ Comprehensive retention tests completed"
# All tests without server management
test-retention-simple: check-deps
@echo "Running retention tests (assuming server is already running)..."
@go test -v -timeout=$(TEST_TIMEOUT) .
@echo "✅ All retention tests completed"
# Start server, run tests, stop server
test-with-server: start-server
@echo "Running retention tests with managed server..."
@sleep 5 # Give server time to fully start
@make test-retention-comprehensive || (echo "Tests failed, stopping server..." && make stop-server && exit 1)
@make stop-server
@echo "✅ All tests completed with managed server"
# Health check
health-check:
@echo "Checking server health..."
@if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \
echo "✅ Server is accessible on port $(S3_PORT)"; \
else \
echo "❌ Server is not accessible on port $(S3_PORT)"; \
exit 1; \
fi
# Clean up
clean:
@echo "Cleaning up test artifacts..."
@make stop-server
@rm -f weed-test.log
@rm -f weed-server.pid
@rm -rf ./test-volume-data
@echo "✅ Cleanup completed"
# Individual test targets for specific functionality
test-basic-retention:
@echo "Running basic retention tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasicRetentionWorkflow" .
test-compliance-retention:
@echo "Running compliance retention tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionModeCompliance" .
test-legal-hold:
@echo "Running legal hold tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestLegalHoldWorkflow" .
test-object-lock-config:
@echo "Running object lock configuration tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestObjectLockConfiguration" .
test-retention-versions:
@echo "Running retention with versions tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionWithVersions" .
test-retention-combination:
@echo "Running retention and legal hold combination tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionAndLegalHoldCombination" .
test-expired-retention:
@echo "Running expired retention tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestExpiredRetention" .
test-retention-errors:
@echo "Running retention error case tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionErrorCases" .
# WORM-specific test targets
test-worm-integration:
@echo "Running WORM integration tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestWORMRetentionIntegration" .
test-worm-legacy:
@echo "Running WORM legacy compatibility tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestWORMLegacyCompatibility" .
test-retention-overwrite:
@echo "Running retention overwrite protection tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionOverwriteProtection" .
test-retention-bulk:
@echo "Running retention bulk operations tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionBulkOperations" .
test-retention-multipart:
@echo "Running retention multipart upload tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionWithMultipartUpload" .
test-retention-extended-attrs:
@echo "Running retention extended attributes tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionExtendedAttributes" .
test-retention-defaults:
@echo "Running retention bucket defaults tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionBucketDefaults" .
test-retention-concurrent:
@echo "Running retention concurrent operations tests..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionConcurrentOperations" .
# Development targets
dev-start: start-server
@echo "Development server started. Access S3 API at http://localhost:$(S3_PORT)"
@echo "To stop: make stop-server"
dev-test: check-deps
@echo "Running tests in development mode..."
@go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasicRetentionWorkflow" .
# CI targets
ci-test: check-deps
@echo "Running tests in CI mode..."
@go test -v -timeout=$(TEST_TIMEOUT) -race .
# All targets
test-all: test-retention test-retention-worm
@echo "✅ All S3 retention tests completed"
# Benchmark targets
benchmark-retention:
@echo "Running retention performance benchmarks..."
@go test -v -timeout=$(TEST_TIMEOUT) -bench=. -benchmem .
# Coverage targets
coverage:
@echo "Running tests with coverage..."
@go test -v -timeout=$(TEST_TIMEOUT) -coverprofile=coverage.out .
@go tool cover -html=coverage.out -o coverage.html
@echo "Coverage report generated: coverage.html"
# Format and lint
fmt:
@echo "Formatting Go code..."
@go fmt .
lint:
@echo "Running linter..."
@golint . || echo "golint not available, skipping..."
# Install dependencies for development
install-deps:
@echo "Installing Go dependencies..."
@go mod tidy
@go mod download
# Show current configuration
show-config:
@echo "Current configuration:"
@echo " WEED_BINARY: $(WEED_BINARY)"
@echo " S3_PORT: $(S3_PORT)"
@echo " TEST_TIMEOUT: $(TEST_TIMEOUT)"
@echo " TEST_PATTERN: $(TEST_PATTERN)"

264
test/s3/retention/README.md Normal file
View File

@@ -0,0 +1,264 @@
# SeaweedFS S3 Object Retention Tests
This directory contains comprehensive tests for SeaweedFS S3 Object Retention functionality, including Object Lock, Legal Hold, and WORM (Write Once Read Many) capabilities.
## Overview
The test suite validates AWS S3-compatible object retention features including:
- **Object Retention**: GOVERNANCE and COMPLIANCE modes with retain-until-date
- **Legal Hold**: Independent protection that can be applied/removed
- **Object Lock Configuration**: Bucket-level default retention policies
- **WORM Integration**: Compatibility with legacy WORM functionality
- **Version-specific Retention**: Different retention policies per object version
- **Enforcement**: Protection against deletion and overwriting
## Test Files
- `s3_retention_test.go` - Core retention functionality tests
- `s3_worm_integration_test.go` - WORM integration and advanced scenarios
- `test_config.json` - Test configuration (endpoints, credentials)
- `Makefile` - Comprehensive test automation
- `go.mod` - Go module dependencies
## Prerequisites
- Go 1.21 or later
- SeaweedFS binary built (`make build-weed`)
- AWS SDK Go v2
- Testify testing framework
## Quick Start
### 1. Build and Start Server
```bash
# Build SeaweedFS and start test server
make start-server
```
### 2. Run Tests
```bash
# Run core retention tests
make test-retention-quick
# Run all retention tests
make test-retention
# Run WORM integration tests
make test-retention-worm
# Run all tests with managed server
make test-with-server
```
### 3. Cleanup
```bash
make clean
```
## Test Categories
### Core Retention Tests
- `TestBasicRetentionWorkflow` - Basic GOVERNANCE mode retention
- `TestRetentionModeCompliance` - COMPLIANCE mode (immutable)
- `TestLegalHoldWorkflow` - Legal hold on/off functionality
- `TestObjectLockConfiguration` - Bucket object lock settings
### Advanced Tests
- `TestRetentionWithVersions` - Version-specific retention policies
- `TestRetentionAndLegalHoldCombination` - Multiple protection types
- `TestExpiredRetention` - Post-expiration behavior
- `TestRetentionErrorCases` - Error handling and edge cases
### WORM Integration Tests
- `TestWORMRetentionIntegration` - New retention + legacy WORM
- `TestWORMLegacyCompatibility` - Backward compatibility
- `TestRetentionOverwriteProtection` - Prevent overwrites
- `TestRetentionBulkOperations` - Bulk delete with retention
- `TestRetentionWithMultipartUpload` - Multipart upload retention
- `TestRetentionExtendedAttributes` - Extended attribute storage
- `TestRetentionBucketDefaults` - Default retention application
- `TestRetentionConcurrentOperations` - Concurrent operation safety
## Individual Test Targets
Run specific test categories:
```bash
# Basic functionality
make test-basic-retention
make test-compliance-retention
make test-legal-hold
# Advanced features
make test-retention-versions
make test-retention-combination
make test-expired-retention
# WORM integration
make test-worm-integration
make test-worm-legacy
make test-retention-bulk
```
## Configuration
### Server Configuration
The tests use these default settings:
- S3 Port: 8333
- Test timeout: 15 minutes
- Volume directory: `./test-volume-data`
### Test Configuration (`test_config.json`)
```json
{
"endpoint": "http://localhost:8333",
"access_key": "some_access_key1",
"secret_key": "some_secret_key1",
"region": "us-east-1",
"bucket_prefix": "test-retention-",
"use_ssl": false,
"skip_verify_ssl": true
}
```
## Expected Behavior
### GOVERNANCE Mode
- Objects protected until retain-until-date
- Can be bypassed with `x-amz-bypass-governance-retention` header
- Supports time extension (not reduction)
### COMPLIANCE Mode
- Objects immutably protected until retain-until-date
- Cannot be bypassed or shortened
- Strictest protection level
### Legal Hold
- Independent ON/OFF protection
- Can coexist with retention policies
- Must be explicitly removed to allow deletion
### Version Support
- Each object version can have individual retention
- Applies to both versioned and non-versioned buckets
- Version-specific retention retrieval
## Development
### Running in Development Mode
```bash
# Start server for development
make dev-start
# Run quick test
make dev-test
```
### Code Quality
```bash
# Format code
make fmt
# Run linter
make lint
# Generate coverage report
make coverage
```
### Performance Testing
```bash
# Run benchmarks
make benchmark-retention
```
## Troubleshooting
### Server Won't Start
```bash
# Check if port is in use
netstat -tlnp | grep 8333
# View server logs
make logs
# Force cleanup
make clean
```
### Test Failures
```bash
# Run with verbose output
go test -v -timeout=15m .
# Run specific test
go test -v -run TestBasicRetentionWorkflow .
# Check server health
make health-check
```
### Dependencies
```bash
# Install/update dependencies
make install-deps
# Check dependency status
make check-deps
```
## Integration with SeaweedFS
These tests validate the retention implementation in:
- `weed/s3api/s3api_object_retention.go` - Core retention logic
- `weed/s3api/s3api_object_handlers_retention.go` - HTTP handlers
- `weed/s3api/s3_constants/extend_key.go` - Extended attribute keys
- `weed/s3api/s3err/s3api_errors.go` - Error definitions
- `weed/s3api/s3api_object_handlers_delete.go` - Deletion enforcement
- `weed/s3api/s3api_object_handlers_put.go` - Upload enforcement
## AWS CLI Compatibility
The retention implementation supports standard AWS CLI commands:
```bash
# Set object retention
aws s3api put-object-retention \
--bucket mybucket \
--key myobject \
--retention Mode=GOVERNANCE,RetainUntilDate=2024-12-31T23:59:59Z
# Get object retention
aws s3api get-object-retention \
--bucket mybucket \
--key myobject
# Set legal hold
aws s3api put-object-legal-hold \
--bucket mybucket \
--key myobject \
--legal-hold Status=ON
# Configure bucket object lock
aws s3api put-object-lock-configuration \
--bucket mybucket \
--object-lock-configuration ObjectLockEnabled=Enabled,Rule='{DefaultRetention={Mode=GOVERNANCE,Days=30}}'
```
## Contributing
When adding new retention tests:
1. Follow existing test patterns
2. Use descriptive test names
3. Include both positive and negative test cases
4. Test error conditions
5. Update this README with new test descriptions
6. Add appropriate Makefile targets for new test categories
## References
- [AWS S3 Object Lock Documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html)
- [AWS S3 API Reference - Object Retention](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html)
- [SeaweedFS S3 API Documentation](https://github.com/seaweedfs/seaweedfs/wiki/Amazon-S3-API)

31
test/s3/retention/go.mod Normal file
View File

@@ -0,0 +1,31 @@
module github.com/seaweedfs/seaweedfs/test/s3/retention
go 1.21
require (
github.com/aws/aws-sdk-go-v2 v1.21.2
github.com/aws/aws-sdk-go-v2/config v1.18.45
github.com/aws/aws-sdk-go-v2/credentials v1.13.43
github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0
github.com/stretchr/testify v1.8.4
)
require (
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect
github.com/aws/smithy-go v1.15.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

62
test/s3/retention/go.sum Normal file
View File

@@ -0,0 +1,62 @@
github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M=
github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA=
github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM=
github.com/aws/aws-sdk-go-v2/config v1.18.45 h1:Aka9bI7n8ysuwPeFdm77nfbyHCAKQ3z9ghB3S/38zes=
github.com/aws/aws-sdk-go-v2/config v1.18.45/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE=
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8=
github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 h1:nFBQlGtkbPzp/NjZLuFxRqmT91rLJkgvsEQs68h962Y=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7E0LDl8K9/G36gjlqca5iQbaqc=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4/go.mod h1:1PrKYwxTM+zjpw9Y41KFtoJCQrJ34Z47Y4VgVbfndjo=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6 h1:wmGLw2i8ZTlHLw7a9ULGfQbuccw8uIiNr6sol5bFzc8=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6/go.mod h1:Q0Hq2X/NuL7z8b1Dww8rmOFl+jzusKEcyvkKspwdpyc=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14/go.mod h1:dDilntgHy9WnHXsh7dDtUPgHKEfTJIBUTHM8OWm0f/0=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15 h1:7R8uRYyXzdD71KWVCL78lJZltah6VVznXBazvKjfH58=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15/go.mod h1:26SQUPcTNgV1Tapwdt4a1rOsYRsnBsJHLMPoxK2b0d8=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36/go.mod h1:lGnOkH9NJATw0XEPcAknFBj3zzNTEGRHtSw+CwC1YTg=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38 h1:skaFGzv+3kA+v2BPKhuekeb1Hbb105+44r8ASC+q5SE=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38/go.mod h1:epIZoRSSbRIwLPJU5F+OldHhwZPBdpDeQkRdCeY3+00=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4/go.mod h1:LhTyt8J04LL+9cIt7pYJ5lbS/U98ZmXovLOR/4LUsk8=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6 h1:9ulSU5ClouoPIYhDQdg9tpl83d5Yb91PXTKK+17q+ow=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6/go.mod h1:lnc2taBsR9nTlz9meD+lhFZZ9EWY712QHrRflWpTcOA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0 h1:wl5dxN1NONhTDQD9uaEvNsDRX29cBmGED/nl0jkWlt4=
github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0/go.mod h1:rDGMZA7f4pbmTtPOk5v5UM2lmX6UAbRnMDJeDvnH7AM=
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k=
github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg=
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU=
github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ=
github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8=
github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -0,0 +1,694 @@
package s3api
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// S3TestConfig holds configuration for S3 tests
type S3TestConfig struct {
Endpoint string
AccessKey string
SecretKey string
Region string
BucketPrefix string
UseSSL bool
SkipVerifySSL bool
}
// Default test configuration - should match test_config.json
var defaultConfig = &S3TestConfig{
Endpoint: "http://localhost:8333", // Default SeaweedFS S3 port
AccessKey: "some_access_key1",
SecretKey: "some_secret_key1",
Region: "us-east-1",
BucketPrefix: "test-retention-",
UseSSL: false,
SkipVerifySSL: true,
}
// getS3Client creates an AWS S3 client for testing
func getS3Client(t *testing.T) *s3.Client {
cfg, err := config.LoadDefaultConfig(context.TODO(),
config.WithRegion(defaultConfig.Region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
defaultConfig.AccessKey,
defaultConfig.SecretKey,
"",
)),
config.WithEndpointResolverWithOptions(aws.EndpointResolverWithOptionsFunc(
func(service, region string, options ...interface{}) (aws.Endpoint, error) {
return aws.Endpoint{
URL: defaultConfig.Endpoint,
SigningRegion: defaultConfig.Region,
HostnameImmutable: true,
}, nil
})),
)
require.NoError(t, err)
return s3.NewFromConfig(cfg, func(o *s3.Options) {
o.UsePathStyle = true // Important for SeaweedFS
})
}
// getNewBucketName generates a unique bucket name
func getNewBucketName() string {
timestamp := time.Now().UnixNano()
return fmt.Sprintf("%s%d", defaultConfig.BucketPrefix, timestamp)
}
// createBucket creates a new bucket for testing
func createBucket(t *testing.T, client *s3.Client, bucketName string) {
_, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
}
// deleteBucket deletes a bucket and all its contents
func deleteBucket(t *testing.T, client *s3.Client, bucketName string) {
// First, try to delete all objects and versions
err := deleteAllObjectVersions(t, client, bucketName)
if err != nil {
t.Logf("Warning: failed to delete all object versions in first attempt: %v", err)
// Try once more in case of transient errors
time.Sleep(500 * time.Millisecond)
err = deleteAllObjectVersions(t, client, bucketName)
if err != nil {
t.Logf("Warning: failed to delete all object versions in second attempt: %v", err)
}
}
// Wait a bit for eventual consistency
time.Sleep(100 * time.Millisecond)
// Try to delete the bucket multiple times in case of eventual consistency issues
for retries := 0; retries < 3; retries++ {
_, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
if err == nil {
t.Logf("Successfully deleted bucket %s", bucketName)
return
}
t.Logf("Warning: failed to delete bucket %s (attempt %d): %v", bucketName, retries+1, err)
if retries < 2 {
time.Sleep(200 * time.Millisecond)
}
}
}
// deleteAllObjectVersions deletes all object versions in a bucket
func deleteAllObjectVersions(t *testing.T, client *s3.Client, bucketName string) error {
// List all object versions
paginator := s3.NewListObjectVersionsPaginator(client, &s3.ListObjectVersionsInput{
Bucket: aws.String(bucketName),
})
for paginator.HasMorePages() {
page, err := paginator.NextPage(context.TODO())
if err != nil {
return err
}
var objectsToDelete []types.ObjectIdentifier
// Add versions - first try to remove retention/legal hold
for _, version := range page.Versions {
// Try to remove legal hold if present
_, err := client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: version.Key,
VersionId: version.VersionId,
LegalHold: &types.ObjectLockLegalHold{
Status: types.ObjectLockLegalHoldStatusOff,
},
})
if err != nil {
// Legal hold might not be set, ignore error
t.Logf("Note: could not remove legal hold for %s@%s: %v", *version.Key, *version.VersionId, err)
}
objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{
Key: version.Key,
VersionId: version.VersionId,
})
}
// Add delete markers
for _, deleteMarker := range page.DeleteMarkers {
objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{
Key: deleteMarker.Key,
VersionId: deleteMarker.VersionId,
})
}
// Delete objects in batches with bypass governance retention
if len(objectsToDelete) > 0 {
_, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
Bucket: aws.String(bucketName),
BypassGovernanceRetention: true,
Delete: &types.Delete{
Objects: objectsToDelete,
Quiet: true,
},
})
if err != nil {
t.Logf("Warning: batch delete failed, trying individual deletion: %v", err)
// Try individual deletion for each object
for _, obj := range objectsToDelete {
_, delErr := client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: obj.Key,
VersionId: obj.VersionId,
BypassGovernanceRetention: true,
})
if delErr != nil {
t.Logf("Warning: failed to delete object %s@%s: %v", *obj.Key, *obj.VersionId, delErr)
}
}
}
}
}
return nil
}
// enableVersioning enables versioning on a bucket
func enableVersioning(t *testing.T, client *s3.Client, bucketName string) {
_, err := client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{
Bucket: aws.String(bucketName),
VersioningConfiguration: &types.VersioningConfiguration{
Status: types.BucketVersioningStatusEnabled,
},
})
require.NoError(t, err)
}
// putObject puts an object into a bucket
func putObject(t *testing.T, client *s3.Client, bucketName, key, content string) *s3.PutObjectOutput {
resp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Body: strings.NewReader(content),
})
require.NoError(t, err)
return resp
}
// cleanupAllTestBuckets cleans up any leftover test buckets
func cleanupAllTestBuckets(t *testing.T, client *s3.Client) {
// List all buckets
listResp, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
if err != nil {
t.Logf("Warning: failed to list buckets for cleanup: %v", err)
return
}
// Delete buckets that match our test prefix
for _, bucket := range listResp.Buckets {
if bucket.Name != nil && strings.HasPrefix(*bucket.Name, defaultConfig.BucketPrefix) {
t.Logf("Cleaning up leftover test bucket: %s", *bucket.Name)
deleteBucket(t, client, *bucket.Name)
}
}
}
// TestBasicRetentionWorkflow tests the basic retention functionality
func TestBasicRetentionWorkflow(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
// Enable versioning (required for retention)
enableVersioning(t, client, bucketName)
// Create object
key := "test-object"
content := "test content for retention"
putResp := putObject(t, client, bucketName, key, content)
require.NotNil(t, putResp.VersionId)
// Set retention with GOVERNANCE mode
retentionUntil := time.Now().Add(24 * time.Hour)
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeGovernance,
RetainUntilDate: aws.Time(retentionUntil),
},
})
require.NoError(t, err)
// Get retention and verify it was set correctly
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err)
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode)
assert.WithinDuration(t, retentionUntil, *retentionResp.Retention.RetainUntilDate, time.Second)
// Try to delete object without bypass - should fail
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.Error(t, err)
// Delete object with bypass governance - should succeed
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
BypassGovernanceRetention: true,
})
require.NoError(t, err)
}
// TestRetentionModeCompliance tests COMPLIANCE mode retention
func TestRetentionModeCompliance(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Create object
key := "compliance-test-object"
content := "compliance test content"
putResp := putObject(t, client, bucketName, key, content)
require.NotNil(t, putResp.VersionId)
// Set retention with COMPLIANCE mode
retentionUntil := time.Now().Add(1 * time.Hour)
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeCompliance,
RetainUntilDate: aws.Time(retentionUntil),
},
})
require.NoError(t, err)
// Get retention and verify
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err)
assert.Equal(t, types.ObjectLockRetentionModeCompliance, retentionResp.Retention.Mode)
// Try to delete object with bypass - should still fail (compliance mode)
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
BypassGovernanceRetention: true,
})
require.Error(t, err)
// Try to delete object without bypass - should also fail
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.Error(t, err)
}
// TestLegalHoldWorkflow tests legal hold functionality
func TestLegalHoldWorkflow(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Create object
key := "legal-hold-test-object"
content := "legal hold test content"
putResp := putObject(t, client, bucketName, key, content)
require.NotNil(t, putResp.VersionId)
// Set legal hold ON
_, err := client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
LegalHold: &types.ObjectLockLegalHold{
Status: types.ObjectLockLegalHoldStatusOn,
},
})
require.NoError(t, err)
// Get legal hold and verify
legalHoldResp, err := client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err)
assert.Equal(t, types.ObjectLockLegalHoldStatusOn, legalHoldResp.LegalHold.Status)
// Try to delete object - should fail due to legal hold
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.Error(t, err)
// Remove legal hold
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
LegalHold: &types.ObjectLockLegalHold{
Status: types.ObjectLockLegalHoldStatusOff,
},
})
require.NoError(t, err)
// Verify legal hold is off
legalHoldResp, err = client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err)
assert.Equal(t, types.ObjectLockLegalHoldStatusOff, legalHoldResp.LegalHold.Status)
// Now delete should succeed
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err)
}
// TestObjectLockConfiguration tests bucket object lock configuration
func TestObjectLockConfiguration(t *testing.T) {
client := getS3Client(t)
// Use a more unique bucket name to avoid conflicts
bucketName := fmt.Sprintf("object-lock-config-%d-%d", time.Now().UnixNano(), time.Now().UnixMilli()%10000)
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Set object lock configuration
_, err := client.PutObjectLockConfiguration(context.TODO(), &s3.PutObjectLockConfigurationInput{
Bucket: aws.String(bucketName),
ObjectLockConfiguration: &types.ObjectLockConfiguration{
ObjectLockEnabled: types.ObjectLockEnabledEnabled,
Rule: &types.ObjectLockRule{
DefaultRetention: &types.DefaultRetention{
Mode: types.ObjectLockRetentionModeGovernance,
Days: 30,
},
},
},
})
if err != nil {
t.Logf("PutObjectLockConfiguration failed (may not be supported): %v", err)
t.Skip("Object lock configuration not supported, skipping test")
return
}
// Get object lock configuration and verify
configResp, err := client.GetObjectLockConfiguration(context.TODO(), &s3.GetObjectLockConfigurationInput{
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
assert.Equal(t, types.ObjectLockEnabledEnabled, configResp.ObjectLockConfiguration.ObjectLockEnabled)
assert.Equal(t, types.ObjectLockRetentionModeGovernance, configResp.ObjectLockConfiguration.Rule.DefaultRetention.Mode)
assert.Equal(t, int32(30), configResp.ObjectLockConfiguration.Rule.DefaultRetention.Days)
}
// TestRetentionWithVersions tests retention with specific object versions
func TestRetentionWithVersions(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Create multiple versions of the same object
key := "versioned-retention-test"
content1 := "version 1 content"
content2 := "version 2 content"
putResp1 := putObject(t, client, bucketName, key, content1)
require.NotNil(t, putResp1.VersionId)
putResp2 := putObject(t, client, bucketName, key, content2)
require.NotNil(t, putResp2.VersionId)
// Set retention on first version only
retentionUntil := time.Now().Add(1 * time.Hour)
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
VersionId: putResp1.VersionId,
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeGovernance,
RetainUntilDate: aws.Time(retentionUntil),
},
})
require.NoError(t, err)
// Get retention for first version
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
VersionId: putResp1.VersionId,
})
require.NoError(t, err)
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode)
// Try to get retention for second version - should fail (no retention set)
_, err = client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
VersionId: putResp2.VersionId,
})
require.Error(t, err)
// Delete second version should succeed (no retention)
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
VersionId: putResp2.VersionId,
})
require.NoError(t, err)
// Delete first version should fail (has retention)
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
VersionId: putResp1.VersionId,
})
require.Error(t, err)
// Delete first version with bypass should succeed
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
VersionId: putResp1.VersionId,
BypassGovernanceRetention: true,
})
require.NoError(t, err)
}
// TestRetentionAndLegalHoldCombination tests retention and legal hold together
func TestRetentionAndLegalHoldCombination(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Create object
key := "combined-protection-test"
content := "combined protection test content"
putResp := putObject(t, client, bucketName, key, content)
require.NotNil(t, putResp.VersionId)
// Set both retention and legal hold
retentionUntil := time.Now().Add(1 * time.Hour)
// Set retention
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeGovernance,
RetainUntilDate: aws.Time(retentionUntil),
},
})
require.NoError(t, err)
// Set legal hold
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
LegalHold: &types.ObjectLockLegalHold{
Status: types.ObjectLockLegalHoldStatusOn,
},
})
require.NoError(t, err)
// Try to delete with bypass governance - should still fail due to legal hold
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
BypassGovernanceRetention: true,
})
require.Error(t, err)
// Remove legal hold
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
LegalHold: &types.ObjectLockLegalHold{
Status: types.ObjectLockLegalHoldStatusOff,
},
})
require.NoError(t, err)
// Now delete with bypass governance should succeed
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
BypassGovernanceRetention: true,
})
require.NoError(t, err)
}
// TestExpiredRetention tests that objects can be deleted after retention expires
func TestExpiredRetention(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Create object
key := "expired-retention-test"
content := "expired retention test content"
putResp := putObject(t, client, bucketName, key, content)
require.NotNil(t, putResp.VersionId)
// Set retention for a very short time (2 seconds)
retentionUntil := time.Now().Add(2 * time.Second)
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeGovernance,
RetainUntilDate: aws.Time(retentionUntil),
},
})
require.NoError(t, err)
// Try to delete immediately - should fail
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.Error(t, err)
// Wait for retention to expire
time.Sleep(3 * time.Second)
// Now delete should succeed
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err)
}
// TestRetentionErrorCases tests various error conditions
func TestRetentionErrorCases(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Test setting retention on non-existent object
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String("non-existent-key"),
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeGovernance,
RetainUntilDate: aws.Time(time.Now().Add(1 * time.Hour)),
},
})
require.Error(t, err)
// Test getting retention on non-existent object
_, err = client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String("non-existent-key"),
})
require.Error(t, err)
// Test setting legal hold on non-existent object
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String("non-existent-key"),
LegalHold: &types.ObjectLockLegalHold{
Status: types.ObjectLockLegalHoldStatusOn,
},
})
require.Error(t, err)
// Test getting legal hold on non-existent object
_, err = client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String("non-existent-key"),
})
require.Error(t, err)
// Test setting retention with past date
key := "retention-past-date-test"
content := "test content"
putObject(t, client, bucketName, key, content)
pastDate := time.Now().Add(-1 * time.Hour)
_, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeGovernance,
RetainUntilDate: aws.Time(pastDate),
},
})
require.Error(t, err)
}

View File

@@ -0,0 +1,519 @@
package s3api
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestWORMRetentionIntegration tests that both retention and legacy WORM work together
func TestWORMRetentionIntegration(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Create object
key := "worm-retention-integration-test"
content := "worm retention integration test content"
putResp := putObject(t, client, bucketName, key, content)
require.NotNil(t, putResp.VersionId)
// Set retention (new system)
retentionUntil := time.Now().Add(1 * time.Hour)
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeGovernance,
RetainUntilDate: aws.Time(retentionUntil),
},
})
require.NoError(t, err)
// Try to delete - should fail due to retention
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.Error(t, err)
// Delete with bypass should succeed
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
BypassGovernanceRetention: true,
})
require.NoError(t, err)
}
// TestWORMLegacyCompatibility tests that legacy WORM functionality still works
func TestWORMLegacyCompatibility(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Create object with legacy WORM headers (if supported)
key := "legacy-worm-test"
content := "legacy worm test content"
// Try to create object with legacy WORM TTL header
putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Body: strings.NewReader(content),
// Add legacy WORM headers if supported
Metadata: map[string]string{
"x-amz-meta-worm-ttl": fmt.Sprintf("%d", time.Now().Add(1*time.Hour).Unix()),
},
})
require.NoError(t, err)
require.NotNil(t, putResp.VersionId)
// Object should be created successfully
resp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err)
assert.NotNil(t, resp.Metadata)
}
// TestRetentionOverwriteProtection tests that retention prevents overwrites
func TestRetentionOverwriteProtection(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Create object
key := "overwrite-protection-test"
content := "original content"
putResp := putObject(t, client, bucketName, key, content)
require.NotNil(t, putResp.VersionId)
// Verify object exists before setting retention
_, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err, "Object should exist before setting retention")
// Set retention with specific version ID
retentionUntil := time.Now().Add(1 * time.Hour)
_, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
VersionId: putResp.VersionId,
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeGovernance,
RetainUntilDate: aws.Time(retentionUntil),
},
})
require.NoError(t, err)
// Try to overwrite object - should fail in non-versioned bucket context
content2 := "new content"
_, err = client.PutObject(context.TODO(), &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Body: strings.NewReader(content2),
})
// Note: In a real scenario, this might fail or create a new version
// The actual behavior depends on the implementation
if err != nil {
t.Logf("Expected behavior: overwrite blocked due to retention: %v", err)
} else {
t.Logf("Overwrite allowed, likely created new version")
}
}
// TestRetentionBulkOperations tests retention with bulk operations
func TestRetentionBulkOperations(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Create multiple objects with retention
var objectsToDelete []types.ObjectIdentifier
retentionUntil := time.Now().Add(1 * time.Hour)
for i := 0; i < 3; i++ {
key := fmt.Sprintf("bulk-test-object-%d", i)
content := fmt.Sprintf("bulk test content %d", i)
putResp := putObject(t, client, bucketName, key, content)
require.NotNil(t, putResp.VersionId)
// Set retention on each object with version ID
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
VersionId: putResp.VersionId,
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeGovernance,
RetainUntilDate: aws.Time(retentionUntil),
},
})
require.NoError(t, err)
objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{
Key: aws.String(key),
VersionId: putResp.VersionId,
})
}
// Try bulk delete without bypass - should fail or have errors
deleteResp, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
Bucket: aws.String(bucketName),
Delete: &types.Delete{
Objects: objectsToDelete,
Quiet: false,
},
})
// Check if operation failed or returned errors for protected objects
if err != nil {
t.Logf("Expected: bulk delete failed due to retention: %v", err)
} else if deleteResp != nil && len(deleteResp.Errors) > 0 {
t.Logf("Expected: bulk delete returned %d errors due to retention", len(deleteResp.Errors))
for _, delErr := range deleteResp.Errors {
t.Logf("Delete error: %s - %s", *delErr.Code, *delErr.Message)
}
} else {
t.Logf("Warning: bulk delete succeeded - retention may not be enforced for bulk operations")
}
// Try bulk delete with bypass - should succeed
_, err = client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{
Bucket: aws.String(bucketName),
BypassGovernanceRetention: true,
Delete: &types.Delete{
Objects: objectsToDelete,
Quiet: false,
},
})
if err != nil {
t.Logf("Bulk delete with bypass failed (may not be supported): %v", err)
} else {
t.Logf("Bulk delete with bypass succeeded")
}
}
// TestRetentionWithMultipartUpload tests retention with multipart uploads
func TestRetentionWithMultipartUpload(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Start multipart upload
key := "multipart-retention-test"
createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err)
uploadId := createResp.UploadId
// Upload a part
partContent := "This is a test part for multipart upload"
uploadResp, err := client.UploadPart(context.TODO(), &s3.UploadPartInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
PartNumber: 1,
UploadId: uploadId,
Body: strings.NewReader(partContent),
})
require.NoError(t, err)
// Complete multipart upload
completeResp, err := client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
UploadId: uploadId,
MultipartUpload: &types.CompletedMultipartUpload{
Parts: []types.CompletedPart{
{
ETag: uploadResp.ETag,
PartNumber: 1,
},
},
},
})
require.NoError(t, err)
// Add a small delay to ensure the object is fully created
time.Sleep(500 * time.Millisecond)
// Verify object exists after multipart upload - retry if needed
var headErr error
for retries := 0; retries < 10; retries++ {
_, headErr = client.HeadObject(context.TODO(), &s3.HeadObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
if headErr == nil {
break
}
t.Logf("HeadObject attempt %d failed: %v", retries+1, headErr)
time.Sleep(200 * time.Millisecond)
}
if headErr != nil {
t.Logf("Object not found after multipart upload completion, checking if multipart upload is fully supported")
// Check if the object exists by trying to list it
listResp, listErr := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
Bucket: aws.String(bucketName),
Prefix: aws.String(key),
})
if listErr != nil || len(listResp.Contents) == 0 {
t.Skip("Multipart upload may not be fully supported, skipping test")
return
}
// If object exists in listing but not accessible via HeadObject, skip test
t.Skip("Object exists in listing but not accessible via HeadObject, multipart upload may not be fully supported")
return
}
require.NoError(t, headErr, "Object should exist after multipart upload")
// Set retention on the completed multipart object with version ID
retentionUntil := time.Now().Add(1 * time.Hour)
_, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
VersionId: completeResp.VersionId,
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeGovernance,
RetainUntilDate: aws.Time(retentionUntil),
},
})
require.NoError(t, err)
// Try to delete - should fail
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.Error(t, err)
}
// TestRetentionExtendedAttributes tests that retention uses extended attributes correctly
func TestRetentionExtendedAttributes(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Create object
key := "extended-attrs-test"
content := "extended attributes test content"
putResp := putObject(t, client, bucketName, key, content)
require.NotNil(t, putResp.VersionId)
// Set retention
retentionUntil := time.Now().Add(1 * time.Hour)
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
VersionId: putResp.VersionId,
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeGovernance,
RetainUntilDate: aws.Time(retentionUntil),
},
})
require.NoError(t, err)
// Set legal hold
_, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
VersionId: putResp.VersionId,
LegalHold: &types.ObjectLockLegalHold{
Status: types.ObjectLockLegalHoldStatusOn,
},
})
require.NoError(t, err)
// Get object metadata to verify extended attributes are set
resp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err)
// Check that the object has metadata (may be empty in some implementations)
// Note: The actual metadata keys depend on the implementation
if resp.Metadata != nil && len(resp.Metadata) > 0 {
t.Logf("Object metadata: %+v", resp.Metadata)
} else {
t.Logf("Object metadata: empty (extended attributes may be stored internally)")
}
// Verify retention can be retrieved
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err)
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode)
// Verify legal hold can be retrieved
legalHoldResp, err := client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
require.NoError(t, err)
assert.Equal(t, types.ObjectLockLegalHoldStatusOn, legalHoldResp.LegalHold.Status)
}
// TestRetentionBucketDefaults tests object lock configuration defaults
func TestRetentionBucketDefaults(t *testing.T) {
client := getS3Client(t)
// Use a very unique bucket name to avoid conflicts
bucketName := fmt.Sprintf("bucket-defaults-%d-%d", time.Now().UnixNano(), time.Now().UnixMilli()%10000)
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Set bucket object lock configuration with default retention
_, err := client.PutObjectLockConfiguration(context.TODO(), &s3.PutObjectLockConfigurationInput{
Bucket: aws.String(bucketName),
ObjectLockConfiguration: &types.ObjectLockConfiguration{
ObjectLockEnabled: types.ObjectLockEnabledEnabled,
Rule: &types.ObjectLockRule{
DefaultRetention: &types.DefaultRetention{
Mode: types.ObjectLockRetentionModeGovernance,
Days: 1, // 1 day default
},
},
},
})
if err != nil {
t.Logf("PutObjectLockConfiguration failed (may not be supported): %v", err)
t.Skip("Object lock configuration not supported, skipping test")
return
}
// Create object (should inherit default retention)
key := "bucket-defaults-test"
content := "bucket defaults test content"
putResp := putObject(t, client, bucketName, key, content)
require.NotNil(t, putResp.VersionId)
// Check if object has default retention applied
// Note: This depends on the implementation - some S3 services apply
// default retention automatically, others require explicit setting
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
if err != nil {
t.Logf("No automatic default retention applied: %v", err)
} else {
t.Logf("Default retention applied: %+v", retentionResp.Retention)
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode)
}
}
// TestRetentionConcurrentOperations tests concurrent retention operations
func TestRetentionConcurrentOperations(t *testing.T) {
client := getS3Client(t)
bucketName := getNewBucketName()
// Create bucket and enable versioning
createBucket(t, client, bucketName)
defer deleteBucket(t, client, bucketName)
enableVersioning(t, client, bucketName)
// Create object
key := "concurrent-ops-test"
content := "concurrent operations test content"
putResp := putObject(t, client, bucketName, key, content)
require.NotNil(t, putResp.VersionId)
// Test concurrent retention and legal hold operations
retentionUntil := time.Now().Add(1 * time.Hour)
// Set retention and legal hold concurrently
errChan := make(chan error, 2)
go func() {
_, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Retention: &types.ObjectLockRetention{
Mode: types.ObjectLockRetentionModeGovernance,
RetainUntilDate: aws.Time(retentionUntil),
},
})
errChan <- err
}()
go func() {
_, err := client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
LegalHold: &types.ObjectLockLegalHold{
Status: types.ObjectLockLegalHoldStatusOn,
},
})
errChan <- err
}()
// Wait for both operations to complete
for i := 0; i < 2; i++ {
err := <-errChan
if err != nil {
t.Logf("Concurrent operation failed: %v", err)
}
}
// Verify both settings are applied
retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
if err == nil {
assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode)
}
legalHoldResp, err := client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
if err == nil {
assert.Equal(t, types.ObjectLockLegalHoldStatusOn, legalHoldResp.LegalHold.Status)
}
}

View File

@@ -0,0 +1,9 @@
{
"endpoint": "http://localhost:8333",
"access_key": "some_access_key1",
"secret_key": "some_secret_key1",
"region": "us-east-1",
"bucket_prefix": "test-retention-",
"use_ssl": false,
"skip_verify_ssl": true
}