2018-09-03 13:03:16 -07:00
|
|
|
package s3api
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"context"
|
2025-11-06 11:05:54 +05:00
|
|
|
"errors"
|
2018-09-03 16:47:00 -07:00
|
|
|
"fmt"
|
2025-07-09 01:51:45 -07:00
|
|
|
"strings"
|
|
|
|
|
|
2025-11-06 11:05:54 +05:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
2022-07-29 00:17:28 -07:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
2025-11-06 11:05:54 +05:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
|
2022-07-29 00:17:28 -07:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
2018-09-03 13:03:16 -07:00
|
|
|
)
|
|
|
|
|
|
2020-02-25 22:23:59 -08:00
|
|
|
func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error {
|
2018-09-03 13:03:16 -07:00
|
|
|
|
2025-05-22 19:46:49 +03:00
|
|
|
return filer_pb.Mkdir(context.Background(), s3a, parentDirectoryPath, dirName, fn)
|
2018-09-03 13:03:16 -07:00
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
2021-07-02 11:00:42 +08:00
|
|
|
func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk, fn func(entry *filer_pb.Entry)) error {
|
2018-09-09 16:25:43 -07:00
|
|
|
|
2025-05-22 19:46:49 +03:00
|
|
|
return filer_pb.MkFile(context.Background(), s3a, parentDirectoryPath, fileName, chunks, fn)
|
2018-09-09 16:25:43 -07:00
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-11 14:53:50 -07:00
|
|
|
func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit uint32) (entries []*filer_pb.Entry, isLast bool, err error) {
|
2018-09-03 13:03:16 -07:00
|
|
|
|
2025-05-22 19:46:49 +03:00
|
|
|
err = filer_pb.List(context.Background(), s3a, parentDirectoryPath, prefix, func(entry *filer_pb.Entry, isLastEntry bool) error {
|
2020-03-23 00:30:02 -07:00
|
|
|
entries = append(entries, entry)
|
2020-09-11 14:53:50 -07:00
|
|
|
if isLastEntry {
|
|
|
|
|
isLast = true
|
|
|
|
|
}
|
2020-04-29 17:40:08 -07:00
|
|
|
return nil
|
2020-03-23 00:30:02 -07:00
|
|
|
}, startFrom, inclusive, limit)
|
2018-09-03 13:03:16 -07:00
|
|
|
|
2021-03-11 15:20:50 +08:00
|
|
|
if len(entries) == 0 {
|
|
|
|
|
isLast = true
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-03 13:03:16 -07:00
|
|
|
return
|
|
|
|
|
|
|
|
|
|
}
|
2018-09-03 13:16:26 -07:00
|
|
|
|
2020-03-20 14:17:31 -07:00
|
|
|
func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDeleteData, isRecursive bool) error {
|
2018-09-03 13:16:26 -07:00
|
|
|
|
2021-12-26 00:15:03 -08:00
|
|
|
return s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
2018-09-03 13:16:26 -07:00
|
|
|
|
2020-03-20 14:17:31 -07:00
|
|
|
err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
2018-09-03 13:16:26 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
}
|
2018-09-04 00:42:44 -07:00
|
|
|
|
2020-03-20 14:17:31 -07:00
|
|
|
func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath string, entryName string, isDeleteData bool, isRecursive bool) error {
|
|
|
|
|
request := &filer_pb.DeleteEntryRequest{
|
2022-03-23 01:03:51 -07:00
|
|
|
Directory: parentDirectoryPath,
|
|
|
|
|
Name: entryName,
|
|
|
|
|
IsDeleteData: isDeleteData,
|
|
|
|
|
IsRecursive: isRecursive,
|
|
|
|
|
IgnoreRecursiveError: true,
|
2020-03-20 14:17:31 -07:00
|
|
|
}
|
2020-02-25 14:38:36 -08:00
|
|
|
|
2020-03-20 14:17:31 -07:00
|
|
|
glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request)
|
|
|
|
|
if resp, err := client.DeleteEntry(context.Background(), request); err != nil {
|
|
|
|
|
glog.V(0).Infof("delete entry %v: %v", request, err)
|
|
|
|
|
return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err)
|
|
|
|
|
} else {
|
|
|
|
|
if resp.Error != "" {
|
|
|
|
|
return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, resp.Error)
|
2020-02-25 14:38:36 -08:00
|
|
|
}
|
2020-03-20 14:17:31 -07:00
|
|
|
}
|
|
|
|
|
return nil
|
2020-02-25 14:38:36 -08:00
|
|
|
}
|
|
|
|
|
|
2020-02-25 22:23:59 -08:00
|
|
|
func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
|
2018-09-04 00:42:44 -07:00
|
|
|
|
2025-05-22 19:46:49 +03:00
|
|
|
return filer_pb.Exists(context.Background(), s3a, parentDirectoryPath, entryName, isDirectory)
|
2018-09-04 00:42:44 -07:00
|
|
|
|
|
|
|
|
}
|
2019-07-08 12:37:20 -07:00
|
|
|
|
2021-03-19 01:31:49 -07:00
|
|
|
func (s3a *S3ApiServer) touch(parentDirectoryPath string, entryName string, entry *filer_pb.Entry) (err error) {
|
|
|
|
|
|
2025-05-22 19:46:49 +03:00
|
|
|
return filer_pb.Touch(context.Background(), s3a, parentDirectoryPath, entryName, entry)
|
2021-03-19 01:31:49 -07:00
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-12 13:30:08 -08:00
|
|
|
func (s3a *S3ApiServer) getEntry(parentDirectoryPath, entryName string) (entry *filer_pb.Entry, err error) {
|
2020-11-12 16:15:59 +08:00
|
|
|
fullPath := util.NewFullPath(parentDirectoryPath, entryName)
|
2025-05-22 19:46:49 +03:00
|
|
|
return filer_pb.GetEntry(context.Background(), s3a, fullPath)
|
2020-11-12 16:15:59 +08:00
|
|
|
}
|
|
|
|
|
|
2022-10-02 10:18:00 +08:00
|
|
|
func (s3a *S3ApiServer) updateEntry(parentDirectoryPath string, newEntry *filer_pb.Entry) error {
|
|
|
|
|
updateEntryRequest := &filer_pb.UpdateEntryRequest{
|
|
|
|
|
Directory: parentDirectoryPath,
|
|
|
|
|
Entry: newEntry,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
2025-05-22 19:46:49 +03:00
|
|
|
err := filer_pb.UpdateEntry(context.Background(), client, updateEntryRequest)
|
2022-10-02 10:18:00 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
})
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-06 11:05:54 +05:00
|
|
|
func (s3a *S3ApiServer) updateEntriesTTL(parentDirectoryPath string, ttlSec int32) error {
|
|
|
|
|
// Use iterative approach with a queue to avoid recursive WithFilerClient calls
|
|
|
|
|
// which would create a new connection for each subdirectory
|
|
|
|
|
return s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
var updateErrors []error
|
|
|
|
|
dirsToProcess := []string{parentDirectoryPath}
|
|
|
|
|
|
|
|
|
|
for len(dirsToProcess) > 0 {
|
|
|
|
|
dir := dirsToProcess[0]
|
|
|
|
|
dirsToProcess = dirsToProcess[1:]
|
|
|
|
|
|
|
|
|
|
// Process directory in paginated batches
|
|
|
|
|
if err := s3a.processDirectoryTTL(ctx, client, dir, ttlSec, &dirsToProcess, &updateErrors); err != nil {
|
|
|
|
|
updateErrors = append(updateErrors, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if len(updateErrors) > 0 {
|
|
|
|
|
return errors.Join(updateErrors...)
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// processDirectoryTTL processes a single directory in paginated batches
|
|
|
|
|
func (s3a *S3ApiServer) processDirectoryTTL(ctx context.Context, client filer_pb.SeaweedFilerClient,
|
|
|
|
|
dir string, ttlSec int32, dirsToProcess *[]string, updateErrors *[]error) error {
|
|
|
|
|
|
|
|
|
|
const batchSize = filer.PaginationSize
|
|
|
|
|
startFrom := ""
|
|
|
|
|
|
|
|
|
|
for {
|
|
|
|
|
lastEntryName, entryCount, err := s3a.processTTLBatch(ctx, client, dir, ttlSec, startFrom, batchSize, dirsToProcess, updateErrors)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("list entries in %s: %w", dir, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If we got fewer entries than batch size, we've reached the end
|
|
|
|
|
if entryCount < batchSize {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
startFrom = lastEntryName
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// processTTLBatch processes a single batch of entries
|
|
|
|
|
func (s3a *S3ApiServer) processTTLBatch(ctx context.Context, client filer_pb.SeaweedFilerClient,
|
|
|
|
|
dir string, ttlSec int32, startFrom string, batchSize uint32,
|
|
|
|
|
dirsToProcess *[]string, updateErrors *[]error) (lastEntry string, count int, err error) {
|
|
|
|
|
|
|
|
|
|
err = filer_pb.SeaweedList(ctx, client, dir, "", func(entry *filer_pb.Entry, isLast bool) error {
|
|
|
|
|
lastEntry = entry.Name
|
|
|
|
|
count++
|
|
|
|
|
|
|
|
|
|
if entry.IsDirectory {
|
|
|
|
|
*dirsToProcess = append(*dirsToProcess, string(util.NewFullPath(dir, entry.Name)))
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Update entry TTL and S3 expiry flag
|
|
|
|
|
if updateErr := s3a.updateEntryTTL(ctx, client, dir, entry, ttlSec); updateErr != nil {
|
|
|
|
|
*updateErrors = append(*updateErrors, updateErr)
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}, startFrom, false, batchSize)
|
|
|
|
|
|
|
|
|
|
return lastEntry, count, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// updateEntryTTL updates a single entry's TTL and S3 expiry flag
|
|
|
|
|
func (s3a *S3ApiServer) updateEntryTTL(ctx context.Context, client filer_pb.SeaweedFilerClient,
|
|
|
|
|
dir string, entry *filer_pb.Entry, ttlSec int32) error {
|
|
|
|
|
|
|
|
|
|
if entry.Attributes == nil {
|
|
|
|
|
entry.Attributes = &filer_pb.FuseAttributes{}
|
|
|
|
|
}
|
|
|
|
|
if entry.Extended == nil {
|
|
|
|
|
entry.Extended = make(map[string][]byte)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check if both TTL and S3 expiry flag are already set correctly
|
|
|
|
|
flagAlreadySet := string(entry.Extended[s3_constants.SeaweedFSExpiresS3]) == "true"
|
|
|
|
|
if entry.Attributes.TtlSec == ttlSec && flagAlreadySet {
|
|
|
|
|
return nil // Already up to date
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Set the S3 expiry flag
|
|
|
|
|
entry.Extended[s3_constants.SeaweedFSExpiresS3] = []byte("true")
|
|
|
|
|
// Update TTL if needed
|
|
|
|
|
if entry.Attributes.TtlSec != ttlSec {
|
|
|
|
|
entry.Attributes.TtlSec = ttlSec
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := filer_pb.UpdateEntry(ctx, client, &filer_pb.UpdateEntryRequest{
|
|
|
|
|
Directory: dir,
|
|
|
|
|
Entry: entry,
|
|
|
|
|
}); err != nil {
|
|
|
|
|
return fmt.Errorf("file %s/%s: %w", dir, entry.Name, err)
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-16 20:09:43 +03:30
|
|
|
func (s3a *S3ApiServer) getCollectionName(bucket string) string {
|
|
|
|
|
if s3a.option.FilerGroup != "" {
|
|
|
|
|
return fmt.Sprintf("%s_%s", s3a.option.FilerGroup, bucket)
|
|
|
|
|
}
|
|
|
|
|
return bucket
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-08 12:37:20 -07:00
|
|
|
func objectKey(key *string) *string {
|
|
|
|
|
if strings.HasPrefix(*key, "/") {
|
|
|
|
|
t := (*key)[1:]
|
|
|
|
|
return &t
|
|
|
|
|
}
|
|
|
|
|
return key
|
2019-07-21 21:51:38 -07:00
|
|
|
}
|