Files
seaweedfs/weed/s3api/filer_util.go
Konstantin Lebedev 084b377f87 do delete expired entries on s3 list request (#7426)
* do delete expired entries on s3 list request
https://github.com/seaweedfs/seaweedfs/issues/6837

* disable delete expires s3 entry in filer

* pass opt allowDeleteObjectsByTTL to all servers

* delete on get and head

* add lifecycle expiration s3 tests

* fix opt allowDeleteObjectsByTTL for server

* fix test lifecycle expiration

* fix IsExpired

* fix locationPrefix for updateEntriesTTL

* fix s3tests

* resolv  coderabbitai

* GetS3ExpireTime on filer

* go mod

* clear TtlSeconds for volume

* move s3 delete expired entry to filer

* filer delete meta and data

* del unusing func removeExpiredObject

* test s3 put

* test s3 put multipart

* allowDeleteObjectsByTTL by default

* fix pipline tests

* rm dublicate SeaweedFSExpiresS3

* revert expiration tests

* fix updateTTL

* rm log

* resolv comment

* fix delete version object

* fix S3Versioning

* fix delete on FindEntry

* fix delete chunks

* fix sqlite not support concurrent writes/reads

* move deletion out of listing transaction; delete entries and empty folders

* Revert "fix sqlite not support concurrent writes/reads"

This reverts commit 5d5da14e0e.

* clearer handling on recursive empty directory deletion

* handle listing errors

* strut copying

* reuse code to delete empty folders

* use iterative approach with a queue to avoid recursive WithFilerClient calls

* stop a gRPC stream from the client-side callback is to return a specific error, e.g., io.EOF

* still issue UpdateEntry when the flag must be added

* errors join

* join path

* cleaner

* add context, sort directories by depth (deepest first) to avoid redundant checks

* batched operation, refactoring

* prevent deleting bucket

* constant

* reuse code

* more logging

* refactoring

* s3 TTL time

* Safety check

---------

Co-authored-by: chrislu <chris.lu@gmail.com>
2025-11-05 22:05:54 -08:00

232 lines
7.0 KiB
Go

package s3api
import (
"context"
"errors"
"fmt"
"strings"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
"github.com/seaweedfs/seaweedfs/weed/util"
)
func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error {
return filer_pb.Mkdir(context.Background(), s3a, parentDirectoryPath, dirName, fn)
}
func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk, fn func(entry *filer_pb.Entry)) error {
return filer_pb.MkFile(context.Background(), s3a, parentDirectoryPath, fileName, chunks, fn)
}
func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit uint32) (entries []*filer_pb.Entry, isLast bool, err error) {
err = filer_pb.List(context.Background(), s3a, parentDirectoryPath, prefix, func(entry *filer_pb.Entry, isLastEntry bool) error {
entries = append(entries, entry)
if isLastEntry {
isLast = true
}
return nil
}, startFrom, inclusive, limit)
if len(entries) == 0 {
isLast = true
}
return
}
func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDeleteData, isRecursive bool) error {
return s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive)
if err != nil {
return err
}
return nil
})
}
func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath string, entryName string, isDeleteData bool, isRecursive bool) error {
request := &filer_pb.DeleteEntryRequest{
Directory: parentDirectoryPath,
Name: entryName,
IsDeleteData: isDeleteData,
IsRecursive: isRecursive,
IgnoreRecursiveError: true,
}
glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request)
if resp, err := client.DeleteEntry(context.Background(), request); err != nil {
glog.V(0).Infof("delete entry %v: %v", request, err)
return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err)
} else {
if resp.Error != "" {
return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, resp.Error)
}
}
return nil
}
func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
return filer_pb.Exists(context.Background(), s3a, parentDirectoryPath, entryName, isDirectory)
}
func (s3a *S3ApiServer) touch(parentDirectoryPath string, entryName string, entry *filer_pb.Entry) (err error) {
return filer_pb.Touch(context.Background(), s3a, parentDirectoryPath, entryName, entry)
}
func (s3a *S3ApiServer) getEntry(parentDirectoryPath, entryName string) (entry *filer_pb.Entry, err error) {
fullPath := util.NewFullPath(parentDirectoryPath, entryName)
return filer_pb.GetEntry(context.Background(), s3a, fullPath)
}
func (s3a *S3ApiServer) updateEntry(parentDirectoryPath string, newEntry *filer_pb.Entry) error {
updateEntryRequest := &filer_pb.UpdateEntryRequest{
Directory: parentDirectoryPath,
Entry: newEntry,
}
err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
err := filer_pb.UpdateEntry(context.Background(), client, updateEntryRequest)
if err != nil {
return err
}
return nil
})
return err
}
func (s3a *S3ApiServer) updateEntriesTTL(parentDirectoryPath string, ttlSec int32) error {
// Use iterative approach with a queue to avoid recursive WithFilerClient calls
// which would create a new connection for each subdirectory
return s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
ctx := context.Background()
var updateErrors []error
dirsToProcess := []string{parentDirectoryPath}
for len(dirsToProcess) > 0 {
dir := dirsToProcess[0]
dirsToProcess = dirsToProcess[1:]
// Process directory in paginated batches
if err := s3a.processDirectoryTTL(ctx, client, dir, ttlSec, &dirsToProcess, &updateErrors); err != nil {
updateErrors = append(updateErrors, err)
}
}
if len(updateErrors) > 0 {
return errors.Join(updateErrors...)
}
return nil
})
}
// processDirectoryTTL processes a single directory in paginated batches
func (s3a *S3ApiServer) processDirectoryTTL(ctx context.Context, client filer_pb.SeaweedFilerClient,
dir string, ttlSec int32, dirsToProcess *[]string, updateErrors *[]error) error {
const batchSize = filer.PaginationSize
startFrom := ""
for {
lastEntryName, entryCount, err := s3a.processTTLBatch(ctx, client, dir, ttlSec, startFrom, batchSize, dirsToProcess, updateErrors)
if err != nil {
return fmt.Errorf("list entries in %s: %w", dir, err)
}
// If we got fewer entries than batch size, we've reached the end
if entryCount < batchSize {
break
}
startFrom = lastEntryName
}
return nil
}
// processTTLBatch processes a single batch of entries
func (s3a *S3ApiServer) processTTLBatch(ctx context.Context, client filer_pb.SeaweedFilerClient,
dir string, ttlSec int32, startFrom string, batchSize uint32,
dirsToProcess *[]string, updateErrors *[]error) (lastEntry string, count int, err error) {
err = filer_pb.SeaweedList(ctx, client, dir, "", func(entry *filer_pb.Entry, isLast bool) error {
lastEntry = entry.Name
count++
if entry.IsDirectory {
*dirsToProcess = append(*dirsToProcess, string(util.NewFullPath(dir, entry.Name)))
return nil
}
// Update entry TTL and S3 expiry flag
if updateErr := s3a.updateEntryTTL(ctx, client, dir, entry, ttlSec); updateErr != nil {
*updateErrors = append(*updateErrors, updateErr)
}
return nil
}, startFrom, false, batchSize)
return lastEntry, count, err
}
// updateEntryTTL updates a single entry's TTL and S3 expiry flag
func (s3a *S3ApiServer) updateEntryTTL(ctx context.Context, client filer_pb.SeaweedFilerClient,
dir string, entry *filer_pb.Entry, ttlSec int32) error {
if entry.Attributes == nil {
entry.Attributes = &filer_pb.FuseAttributes{}
}
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
// Check if both TTL and S3 expiry flag are already set correctly
flagAlreadySet := string(entry.Extended[s3_constants.SeaweedFSExpiresS3]) == "true"
if entry.Attributes.TtlSec == ttlSec && flagAlreadySet {
return nil // Already up to date
}
// Set the S3 expiry flag
entry.Extended[s3_constants.SeaweedFSExpiresS3] = []byte("true")
// Update TTL if needed
if entry.Attributes.TtlSec != ttlSec {
entry.Attributes.TtlSec = ttlSec
}
if err := filer_pb.UpdateEntry(ctx, client, &filer_pb.UpdateEntryRequest{
Directory: dir,
Entry: entry,
}); err != nil {
return fmt.Errorf("file %s/%s: %w", dir, entry.Name, err)
}
return nil
}
func (s3a *S3ApiServer) getCollectionName(bucket string) string {
if s3a.option.FilerGroup != "" {
return fmt.Sprintf("%s_%s", s3a.option.FilerGroup, bucket)
}
return bucket
}
func objectKey(key *string) *string {
if strings.HasPrefix(*key, "/") {
t := (*key)[1:]
return &t
}
return key
}