avoid reusing context object

fix https://github.com/chrislusf/seaweedfs/issues/1182
This commit is contained in:
Chris Lu
2020-02-25 21:50:12 -08:00
parent bd3254b53f
commit 892e726eb9
86 changed files with 501 additions and 568 deletions

View File

@@ -41,28 +41,28 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p
key = newKey
if message.OldEntry != nil && message.NewEntry == nil {
glog.V(4).Infof("deleting %v", key)
return r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, message.DeleteChunks)
return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks)
}
if message.OldEntry == nil && message.NewEntry != nil {
glog.V(4).Infof("creating %v", key)
return r.sink.CreateEntry(ctx, key, message.NewEntry)
return r.sink.CreateEntry(key, message.NewEntry)
}
if message.OldEntry == nil && message.NewEntry == nil {
glog.V(0).Infof("weird message %+v", message)
return nil
}
foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks)
foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks)
if foundExisting {
glog.V(4).Infof("updated %v", key)
return err
}
err = r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, false)
err = r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, false)
if err != nil {
return fmt.Errorf("delete old entry %v: %v", key, err)
}
glog.V(4).Infof("creating missing %v", key)
return r.sink.CreateEntry(ctx, key, message.NewEntry)
return r.sink.CreateEntry(key, message.NewEntry)
}

View File

@@ -70,7 +70,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e
return nil
}
func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
key = cleanKey(key)
@@ -78,7 +78,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de
key = key + "/"
}
if _, err := g.containerURL.NewBlobURL(key).Delete(ctx,
if _, err := g.containerURL.NewBlobURL(key).Delete(context.Background(),
azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil {
return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err)
}
@@ -87,7 +87,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de
}
func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
key = cleanKey(key)
@@ -102,21 +102,21 @@ func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb
// Azure Storage account's container.
appendBlobURL := g.containerURL.NewAppendBlobURL(key)
_, err := appendBlobURL.Create(ctx, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
_, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
if err != nil {
return err
}
for _, chunk := range chunkViews {
fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return err
}
var writeErr error
_, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) {
_, writeErr = appendBlobURL.AppendBlock(ctx, bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil)
_, writeErr = appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil)
})
if readErr != nil {
@@ -132,7 +132,7 @@ func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb
}
func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
func (g *AzureSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
key = cleanKey(key)
// TODO improve efficiency
return false, nil

View File

@@ -58,7 +58,7 @@ func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error {
return nil
}
func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
key = cleanKey(key)
@@ -66,18 +66,18 @@ func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, delet
key = key + "/"
}
bucket, err := g.client.Bucket(ctx, g.bucket)
bucket, err := g.client.Bucket(context.Background(), g.bucket)
if err != nil {
return err
}
targetObject := bucket.Object(key)
return targetObject.Delete(ctx)
return targetObject.Delete(context.Background())
}
func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
key = cleanKey(key)
@@ -88,17 +88,17 @@ func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.En
totalSize := filer2.TotalSize(entry.Chunks)
chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
bucket, err := g.client.Bucket(ctx, g.bucket)
bucket, err := g.client.Bucket(context.Background(), g.bucket)
if err != nil {
return err
}
targetObject := bucket.Object(key)
writer := targetObject.NewWriter(ctx)
writer := targetObject.NewWriter(context.Background())
for _, chunk := range chunkViews {
fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return err
}
@@ -124,7 +124,7 @@ func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.En
}
func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
func (g *B2Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
key = cleanKey(key)

View File

@@ -15,7 +15,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
)
func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) {
func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) {
if len(sourceChunks) == 0 {
return
}
@@ -24,7 +24,7 @@ func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_
wg.Add(1)
go func(chunk *filer_pb.FileChunk) {
defer wg.Done()
replicatedChunk, e := fs.replicateOneChunk(ctx, chunk, dir)
replicatedChunk, e := fs.replicateOneChunk(chunk, dir)
if e != nil {
err = e
}
@@ -36,9 +36,9 @@ func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_
return
}
func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk, dir string) (*filer_pb.FileChunk, error) {
func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, dir string) (*filer_pb.FileChunk, error) {
fileId, err := fs.fetchAndWrite(ctx, sourceChunk, dir)
fileId, err := fs.fetchAndWrite(sourceChunk, dir)
if err != nil {
return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err)
}
@@ -53,9 +53,9 @@ func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_p
}, nil
}
func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk, dir string) (fileId string, err error) {
func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, dir string) (fileId string, err error) {
filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString())
filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString())
if err != nil {
return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
}
@@ -64,7 +64,7 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
var host string
var auth security.EncodedJwt
if err := fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
@@ -75,7 +75,7 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
ParentPath: dir,
}
resp, err := client.AssignVolume(ctx, request)
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
@@ -109,11 +109,11 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
return
}
func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error {
func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
return util.WithCachedGrpcClient(ctx, func(ctx context.Context, grpcConnection *grpc.ClientConn) error {
return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(ctx, client)
return fn(client)
}, fs.grpcAddress, fs.grpcDialOption)
}

View File

@@ -64,8 +64,8 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string,
return nil
}
func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
return fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
dir, name := filer2.FullPath(key).DirAndName()
@@ -76,7 +76,7 @@ func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, d
}
glog.V(1).Infof("delete entry: %v", request)
_, err := client.DeleteEntry(ctx, request)
_, err := client.DeleteEntry(context.Background(), request)
if err != nil {
glog.V(0).Infof("delete entry %s: %v", key, err)
return fmt.Errorf("delete entry %s: %v", key, err)
@@ -86,9 +86,9 @@ func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, d
})
}
func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
return fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
dir, name := filer2.FullPath(key).DirAndName()
@@ -98,14 +98,14 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p
Name: name,
}
glog.V(1).Infof("lookup: %v", lookupRequest)
if resp, err := client.LookupDirectoryEntry(ctx, lookupRequest); err == nil && resp.Entry != nil {
if resp, err := client.LookupDirectoryEntry(context.Background(), lookupRequest); err == nil && resp.Entry != nil {
if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) {
glog.V(0).Infof("already replicated %s", key)
return nil
}
}
replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks, dir)
replicatedChunks, err := fs.replicateChunks(entry.Chunks, dir)
if err != nil {
glog.V(0).Infof("replicate entry chunks %s: %v", key, err)
@@ -125,7 +125,7 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p
}
glog.V(1).Infof("create: %v", request)
if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("create entry %s: %v", key, err)
return fmt.Errorf("create entry %s: %v", key, err)
}
@@ -134,13 +134,13 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p
})
}
func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
dir, name := filer2.FullPath(key).DirAndName()
// read existing entry
var existingEntry *filer_pb.Entry
err = fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
@@ -148,7 +148,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
}
glog.V(4).Infof("lookup entry: %v", request)
resp, err := client.LookupDirectoryEntry(ctx, request)
resp, err := client.LookupDirectoryEntry(context.Background(), request)
if err != nil {
glog.V(0).Infof("lookup %s: %v", key, err)
return err
@@ -187,7 +187,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
}
// replicate the chunks that are new in the source
replicatedChunks, err := fs.replicateChunks(ctx, newChunks, newParentPath)
replicatedChunks, err := fs.replicateChunks(newChunks, newParentPath)
if err != nil {
return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
}
@@ -195,14 +195,14 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
}
// save updated meta data
return true, fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
return true, fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{
Directory: newParentPath,
Entry: existingEntry,
}
if _, err := client.UpdateEntry(ctx, request); err != nil {
if _, err := client.UpdateEntry(context.Background(), request); err != nil {
return fmt.Errorf("update existingEntry %s: %v", key, err)
}

View File

@@ -69,13 +69,13 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
return nil
}
func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
if isDirectory {
key = key + "/"
}
if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != nil {
if err := g.client.Bucket(g.bucket).Object(key).Delete(context.Background()); err != nil {
return fmt.Errorf("gcs delete %s%s: %v", g.bucket, key, err)
}
@@ -83,7 +83,7 @@ func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, dele
}
func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
if entry.IsDirectory {
return nil
@@ -92,11 +92,11 @@ func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.E
totalSize := filer2.TotalSize(entry.Chunks)
chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx)
wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background())
for _, chunk := range chunkViews {
fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return err
}
@@ -119,7 +119,7 @@ func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.E
}
func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
func (g *GcsSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
// TODO improve efficiency
return false, nil
}

View File

@@ -1,7 +1,6 @@
package sink
import (
"context"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util"
@@ -10,9 +9,9 @@ import (
type ReplicationSink interface {
GetName() string
Initialize(configuration util.Configuration, prefix string) error
DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error
CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error
UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error)
DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error
CreateEntry(key string, entry *filer_pb.Entry) error
UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error)
GetSinkToDirectory() string
SetSourceFiler(s *source.FilerSource)
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@@ -77,7 +78,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, buc
return nil
}
func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
key = cleanKey(key)
@@ -89,7 +90,7 @@ func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory,
}
func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
key = cleanKey(key)
@@ -112,7 +113,7 @@ func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_
wg.Add(1)
go func(chunk *filer2.ChunkView) {
defer wg.Done()
if part, uploadErr := s3sink.uploadPart(ctx, key, uploadId, partId, chunk); uploadErr != nil {
if part, uploadErr := s3sink.uploadPart(context.Background(), key, uploadId, partId, chunk); uploadErr != nil {
err = uploadErr
} else {
parts = append(parts, part)
@@ -126,11 +127,11 @@ func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_
return err
}
return s3sink.completeMultipartUpload(ctx, key, uploadId, parts)
return s3sink.completeMultipartUpload(context.Background(), key, uploadId, parts)
}
func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
func (s3sink *S3Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
key = cleanKey(key)
// TODO improve efficiency
return false, nil

View File

@@ -157,7 +157,7 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
}
func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) {
fileUrl, err := s3sink.filerSource.LookupFileId(ctx, chunk.FileId)
fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return nil, err
}

View File

@@ -40,16 +40,16 @@ func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) {
return nil
}
func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl string, err error) {
func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) {
vid2Locations := make(map[string]*filer_pb.Locations)
vid := volumeId(part)
err = fs.withFilerClient(ctx, fs.grpcDialOption, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
glog.V(4).Infof("read lookup volume id locations: %v", vid)
resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid},
})
if err != nil {
@@ -78,9 +78,9 @@ func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl s
return
}
func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) {
func (fs *FilerSource) ReadPart(part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) {
fileUrl, err := fs.LookupFileId(ctx, part)
fileUrl, err := fs.LookupFileId(part)
if err != nil {
return "", nil, nil, err
}
@@ -90,11 +90,11 @@ func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename stri
return filename, header, readCloser, err
}
func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error {
func (fs *FilerSource) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error {
return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(ctx2, client)
return fn(client)
}, fs.grpcAddress, fs.grpcDialOption)
}