avoid reusing context object

fix https://github.com/chrislusf/seaweedfs/issues/1182
This commit is contained in:
Chris Lu
2020-02-25 21:50:12 -08:00
parent bd3254b53f
commit 892e726eb9
86 changed files with 501 additions and 568 deletions

View File

@@ -15,7 +15,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
)
func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) {
func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) {
if len(sourceChunks) == 0 {
return
}
@@ -24,7 +24,7 @@ func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_
wg.Add(1)
go func(chunk *filer_pb.FileChunk) {
defer wg.Done()
replicatedChunk, e := fs.replicateOneChunk(ctx, chunk, dir)
replicatedChunk, e := fs.replicateOneChunk(chunk, dir)
if e != nil {
err = e
}
@@ -36,9 +36,9 @@ func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_
return
}
func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk, dir string) (*filer_pb.FileChunk, error) {
func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, dir string) (*filer_pb.FileChunk, error) {
fileId, err := fs.fetchAndWrite(ctx, sourceChunk, dir)
fileId, err := fs.fetchAndWrite(sourceChunk, dir)
if err != nil {
return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err)
}
@@ -53,9 +53,9 @@ func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_p
}, nil
}
func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk, dir string) (fileId string, err error) {
func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, dir string) (fileId string, err error) {
filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString())
filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString())
if err != nil {
return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err)
}
@@ -64,7 +64,7 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
var host string
var auth security.EncodedJwt
if err := fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
@@ -75,7 +75,7 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
ParentPath: dir,
}
resp, err := client.AssignVolume(ctx, request)
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
return err
@@ -109,11 +109,11 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi
return
}
func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error {
func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
return util.WithCachedGrpcClient(ctx, func(ctx context.Context, grpcConnection *grpc.ClientConn) error {
return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(ctx, client)
return fn(client)
}, fs.grpcAddress, fs.grpcDialOption)
}

View File

@@ -64,8 +64,8 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string,
return nil
}
func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
return fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
dir, name := filer2.FullPath(key).DirAndName()
@@ -76,7 +76,7 @@ func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, d
}
glog.V(1).Infof("delete entry: %v", request)
_, err := client.DeleteEntry(ctx, request)
_, err := client.DeleteEntry(context.Background(), request)
if err != nil {
glog.V(0).Infof("delete entry %s: %v", key, err)
return fmt.Errorf("delete entry %s: %v", key, err)
@@ -86,9 +86,9 @@ func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, d
})
}
func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
return fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
dir, name := filer2.FullPath(key).DirAndName()
@@ -98,14 +98,14 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p
Name: name,
}
glog.V(1).Infof("lookup: %v", lookupRequest)
if resp, err := client.LookupDirectoryEntry(ctx, lookupRequest); err == nil && resp.Entry != nil {
if resp, err := client.LookupDirectoryEntry(context.Background(), lookupRequest); err == nil && resp.Entry != nil {
if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) {
glog.V(0).Infof("already replicated %s", key)
return nil
}
}
replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks, dir)
replicatedChunks, err := fs.replicateChunks(entry.Chunks, dir)
if err != nil {
glog.V(0).Infof("replicate entry chunks %s: %v", key, err)
@@ -125,7 +125,7 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p
}
glog.V(1).Infof("create: %v", request)
if err := filer_pb.CreateEntry(ctx, client, request); err != nil {
if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("create entry %s: %v", key, err)
return fmt.Errorf("create entry %s: %v", key, err)
}
@@ -134,13 +134,13 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p
})
}
func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
dir, name := filer2.FullPath(key).DirAndName()
// read existing entry
var existingEntry *filer_pb.Entry
err = fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
@@ -148,7 +148,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
}
glog.V(4).Infof("lookup entry: %v", request)
resp, err := client.LookupDirectoryEntry(ctx, request)
resp, err := client.LookupDirectoryEntry(context.Background(), request)
if err != nil {
glog.V(0).Infof("lookup %s: %v", key, err)
return err
@@ -187,7 +187,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
}
// replicate the chunks that are new in the source
replicatedChunks, err := fs.replicateChunks(ctx, newChunks, newParentPath)
replicatedChunks, err := fs.replicateChunks(newChunks, newParentPath)
if err != nil {
return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
}
@@ -195,14 +195,14 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file
}
// save updated meta data
return true, fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
return true, fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{
Directory: newParentPath,
Entry: existingEntry,
}
if _, err := client.UpdateEntry(ctx, request); err != nil {
if _, err := client.UpdateEntry(context.Background(), request); err != nil {
return fmt.Errorf("update existingEntry %s: %v", key, err)
}