mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-07-16 04:10:07 +08:00
parallelize remote content fetching
This commit is contained in:
parent
00ffbb4c9a
commit
f365af81c2
@ -78,25 +78,32 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
|||||||
dest := util.FullPath(remoteStorageMountedLocation.Path).Child(string(util.FullPath(req.Directory).Child(req.Name))[len(localMountedDir):])
|
dest := util.FullPath(remoteStorageMountedLocation.Path).Child(string(util.FullPath(req.Directory).Child(req.Name))[len(localMountedDir):])
|
||||||
|
|
||||||
var chunks []*filer_pb.FileChunk
|
var chunks []*filer_pb.FileChunk
|
||||||
|
var fetchAndWriteErr error
|
||||||
|
|
||||||
// FIXME limit on parallel
|
limitedConcurrentExecutor := util.NewLimitedConcurrentExecutor(8)
|
||||||
for offset := int64(0); offset < entry.Remote.RemoteSize; offset += chunkSize {
|
for offset := int64(0); offset < entry.Remote.RemoteSize; offset += chunkSize {
|
||||||
|
localOffset := offset
|
||||||
|
|
||||||
|
limitedConcurrentExecutor.Execute(func() {
|
||||||
size := chunkSize
|
size := chunkSize
|
||||||
if offset+chunkSize > entry.Remote.RemoteSize {
|
if localOffset+chunkSize > entry.Remote.RemoteSize {
|
||||||
size = entry.Remote.RemoteSize - offset
|
size = entry.Remote.RemoteSize - localOffset
|
||||||
}
|
}
|
||||||
|
|
||||||
// assign one volume server
|
// assign one volume server
|
||||||
assignResult, err := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest)
|
assignResult, err := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
fetchAndWriteErr = err
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if assignResult.Error != "" {
|
if assignResult.Error != "" {
|
||||||
return resp, fmt.Errorf("assign: %v", assignResult.Error)
|
fetchAndWriteErr = fmt.Errorf("assign: %v", assignResult.Error)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
fileId, parseErr := needle.ParseFileIdFromString(assignResult.Fid)
|
fileId, parseErr := needle.ParseFileIdFromString(assignResult.Fid)
|
||||||
if assignResult.Error != "" {
|
if assignResult.Error != "" {
|
||||||
return resp, fmt.Errorf("unrecognized file id %s: %v", assignResult.Fid, parseErr)
|
fetchAndWriteErr = fmt.Errorf("unrecognized file id %s: %v", assignResult.Fid, parseErr)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// tell filer to tell volume server to download into needles
|
// tell filer to tell volume server to download into needles
|
||||||
@ -105,7 +112,7 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
|||||||
VolumeId: uint32(fileId.VolumeId),
|
VolumeId: uint32(fileId.VolumeId),
|
||||||
NeedleId: uint64(fileId.Key),
|
NeedleId: uint64(fileId.Key),
|
||||||
Cookie: uint32(fileId.Cookie),
|
Cookie: uint32(fileId.Cookie),
|
||||||
Offset: offset,
|
Offset: localOffset,
|
||||||
Size: size,
|
Size: size,
|
||||||
RemoteType: storageConf.Type,
|
RemoteType: storageConf.Type,
|
||||||
RemoteName: storageConf.Name,
|
RemoteName: storageConf.Name,
|
||||||
@ -123,12 +130,13 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
fetchAndWriteErr = err
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
chunks = append(chunks, &filer_pb.FileChunk{
|
chunks = append(chunks, &filer_pb.FileChunk{
|
||||||
FileId: assignResult.Fid,
|
FileId: assignResult.Fid,
|
||||||
Offset: offset,
|
Offset: localOffset,
|
||||||
Size: uint64(size),
|
Size: uint64(size),
|
||||||
Mtime: time.Now().Unix(),
|
Mtime: time.Now().Unix(),
|
||||||
Fid: &filer_pb.FileId{
|
Fid: &filer_pb.FileId{
|
||||||
@ -137,7 +145,7 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
|||||||
Cookie: uint32(fileId.Cookie),
|
Cookie: uint32(fileId.Cookie),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
garbage := entry.Chunks
|
garbage := entry.Chunks
|
||||||
|
Loading…
Reference in New Issue
Block a user