mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-11-24 16:53:14 +08:00
data sink: add incremental mode
This commit is contained in:
@@ -42,7 +42,7 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p
|
||||
return nil
|
||||
}
|
||||
var dateKey string
|
||||
if r.sink.GetName() == "local_incremental" {
|
||||
if r.sink.IsIncremental() {
|
||||
var mTime int64
|
||||
if message.NewEntry != nil {
|
||||
mTime = message.NewEntry.Attributes.Mtime
|
||||
|
||||
@@ -18,10 +18,11 @@ import (
|
||||
)
|
||||
|
||||
type AzureSink struct {
|
||||
containerURL azblob.ContainerURL
|
||||
container string
|
||||
dir string
|
||||
filerSource *source.FilerSource
|
||||
containerURL azblob.ContainerURL
|
||||
container string
|
||||
dir string
|
||||
filerSource *source.FilerSource
|
||||
isIncremental bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -36,7 +37,12 @@ func (g *AzureSink) GetSinkToDirectory() string {
|
||||
return g.dir
|
||||
}
|
||||
|
||||
func (g *AzureSink) IsIncremental() bool {
|
||||
return g.isIncremental
|
||||
}
|
||||
|
||||
func (g *AzureSink) Initialize(configuration util.Configuration, prefix string) error {
|
||||
g.isIncremental = configuration.GetBool(prefix+"is_incremental")
|
||||
return g.initialize(
|
||||
configuration.GetString(prefix+"account_name"),
|
||||
configuration.GetString(prefix+"account_key"),
|
||||
|
||||
@@ -18,6 +18,7 @@ type B2Sink struct {
|
||||
bucket string
|
||||
dir string
|
||||
filerSource *source.FilerSource
|
||||
isIncremental bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -32,7 +33,12 @@ func (g *B2Sink) GetSinkToDirectory() string {
|
||||
return g.dir
|
||||
}
|
||||
|
||||
func (g *B2Sink) IsIncremental() bool {
|
||||
return g.isIncremental
|
||||
}
|
||||
|
||||
func (g *B2Sink) Initialize(configuration util.Configuration, prefix string) error {
|
||||
g.isIncremental = configuration.GetBool(prefix+"is_incremental")
|
||||
return g.initialize(
|
||||
configuration.GetString(prefix+"b2_account_id"),
|
||||
configuration.GetString(prefix+"b2_master_application_key"),
|
||||
|
||||
@@ -30,6 +30,7 @@ type FilerSink struct {
|
||||
grpcDialOption grpc.DialOption
|
||||
address string
|
||||
writeChunkByFiler bool
|
||||
isIncremental bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -44,7 +45,12 @@ func (fs *FilerSink) GetSinkToDirectory() string {
|
||||
return fs.dir
|
||||
}
|
||||
|
||||
func (fs *FilerSink) IsIncremental() bool {
|
||||
return fs.isIncremental
|
||||
}
|
||||
|
||||
func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error {
|
||||
fs.isIncremental = configuration.GetBool(prefix+"is_incremental")
|
||||
return fs.DoInitialize(
|
||||
"",
|
||||
configuration.GetString(prefix+"grpcAddress"),
|
||||
|
||||
@@ -22,6 +22,7 @@ type GcsSink struct {
|
||||
bucket string
|
||||
dir string
|
||||
filerSource *source.FilerSource
|
||||
isIncremental bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -36,7 +37,12 @@ func (g *GcsSink) GetSinkToDirectory() string {
|
||||
return g.dir
|
||||
}
|
||||
|
||||
func (g *GcsSink) IsIncremental() bool {
|
||||
return g.isIncremental
|
||||
}
|
||||
|
||||
func (g *GcsSink) Initialize(configuration util.Configuration, prefix string) error {
|
||||
g.isIncremental = configuration.GetBool(prefix+"is_incremental")
|
||||
return g.initialize(
|
||||
configuration.GetString(prefix+"google_application_credentials"),
|
||||
configuration.GetString(prefix+"bucket"),
|
||||
|
||||
@@ -50,6 +50,10 @@ func (localsink *LocalSink) GetSinkToDirectory() string {
|
||||
return localsink.Dir
|
||||
}
|
||||
|
||||
func (localsink *LocalSink) IsIncremental() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (localsink *LocalSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
|
||||
if localsink.isMultiPartEntry(key) {
|
||||
return nil
|
||||
|
||||
@@ -14,6 +14,7 @@ type ReplicationSink interface {
|
||||
UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error)
|
||||
GetSinkToDirectory() string
|
||||
SetSourceFiler(s *source.FilerSource)
|
||||
IsIncremental() bool
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -21,12 +21,13 @@ import (
|
||||
)
|
||||
|
||||
type S3Sink struct {
|
||||
conn s3iface.S3API
|
||||
region string
|
||||
bucket string
|
||||
dir string
|
||||
endpoint string
|
||||
filerSource *source.FilerSource
|
||||
conn s3iface.S3API
|
||||
region string
|
||||
bucket string
|
||||
dir string
|
||||
endpoint string
|
||||
filerSource *source.FilerSource
|
||||
isIncremental bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -41,11 +42,17 @@ func (s3sink *S3Sink) GetSinkToDirectory() string {
|
||||
return s3sink.dir
|
||||
}
|
||||
|
||||
func (s3sink *S3Sink) IsIncremental() bool {
|
||||
return s3sink.isIncremental
|
||||
}
|
||||
|
||||
func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error {
|
||||
glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region"))
|
||||
glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket"))
|
||||
glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory"))
|
||||
glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint"))
|
||||
glog.V(0).Infof("sink.s3.is_incremental: %v", configuration.GetString(prefix+"is_incremental"))
|
||||
s3sink.isIncremental = configuration.GetBool(prefix + "is_incremental")
|
||||
return s3sink.initialize(
|
||||
configuration.GetString(prefix+"aws_access_key_id"),
|
||||
configuration.GetString(prefix+"aws_secret_access_key"),
|
||||
@@ -67,8 +74,8 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, buc
|
||||
s3sink.endpoint = endpoint
|
||||
|
||||
config := &aws.Config{
|
||||
Region: aws.String(s3sink.region),
|
||||
Endpoint: aws.String(s3sink.endpoint),
|
||||
Region: aws.String(s3sink.region),
|
||||
Endpoint: aws.String(s3sink.endpoint),
|
||||
}
|
||||
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
||||
config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
|
||||
@@ -104,7 +111,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures
|
||||
|
||||
uploadId, err := s3sink.createMultipartUpload(key, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("createMultipartUpload: %v", err)
|
||||
}
|
||||
|
||||
totalSize := filer.FileSize(entry)
|
||||
@@ -120,6 +127,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures
|
||||
defer wg.Done()
|
||||
if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil {
|
||||
err = uploadErr
|
||||
glog.Errorf("uploadPart: %v", uploadErr)
|
||||
} else {
|
||||
parts[index] = part
|
||||
}
|
||||
@@ -129,7 +137,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures
|
||||
|
||||
if err != nil {
|
||||
s3sink.abortMultipartUpload(key, uploadId)
|
||||
return err
|
||||
return fmt.Errorf("uploadPart: %v", err)
|
||||
}
|
||||
|
||||
return s3sink.completeMultipartUpload(context.Background(), key, uploadId, parts)
|
||||
|
||||
Reference in New Issue
Block a user