mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-09-19 11:37:56 +08:00
refactor filer_pb.Entry and filer.Entry to use GetChunks()
for later locking on reading chunks
This commit is contained in:
@@ -103,7 +103,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []
|
||||
}
|
||||
|
||||
totalSize := filer.FileSize(entry)
|
||||
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
|
||||
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize))
|
||||
|
||||
// Create a URL that references a to-be-created blob in your
|
||||
// Azure Storage account's container.
|
||||
|
@@ -92,7 +92,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int
|
||||
}
|
||||
|
||||
totalSize := filer.FileSize(entry)
|
||||
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
|
||||
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize))
|
||||
|
||||
bucket, err := g.client.Bucket(context.Background(), g.bucket)
|
||||
if err != nil {
|
||||
|
@@ -120,14 +120,14 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
|
||||
}
|
||||
}
|
||||
|
||||
replicatedChunks, err := fs.replicateChunks(entry.Chunks, key)
|
||||
replicatedChunks, err := fs.replicateChunks(entry.GetChunks(), key)
|
||||
|
||||
if err != nil {
|
||||
// only warning here since the source chunk may have been deleted already
|
||||
glog.Warningf("replicate entry chunks %s: %v", key, err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
|
||||
glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.GetChunks(), replicatedChunks)
|
||||
|
||||
request := &filer_pb.CreateEntryRequest{
|
||||
Directory: dir,
|
||||
@@ -199,7 +199,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
|
||||
// delete the chunks that are deleted from the source
|
||||
if deleteIncludeChunks {
|
||||
// remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
|
||||
existingEntry.Chunks = filer.DoMinusChunksBySourceFileId(existingEntry.Chunks, deletedChunks)
|
||||
existingEntry.Chunks = filer.DoMinusChunksBySourceFileId(existingEntry.GetChunks(), deletedChunks)
|
||||
}
|
||||
|
||||
// replicate the chunks that are new in the source
|
||||
@@ -207,7 +207,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("replicate %s chunks error: %v", key, err)
|
||||
}
|
||||
existingEntry.Chunks = append(existingEntry.Chunks, replicatedChunks...)
|
||||
existingEntry.Chunks = append(existingEntry.GetChunks(), replicatedChunks...)
|
||||
existingEntry.Attributes = newEntry.Attributes
|
||||
existingEntry.Extended = newEntry.Extended
|
||||
existingEntry.HardLinkId = newEntry.HardLinkId
|
||||
@@ -235,11 +235,11 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
|
||||
|
||||
}
|
||||
func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
|
||||
aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks, 0, math.MaxInt64)
|
||||
aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.GetChunks(), 0, math.MaxInt64)
|
||||
if aErr != nil {
|
||||
return nil, nil, aErr
|
||||
}
|
||||
bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks, 0, math.MaxInt64)
|
||||
bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.GetChunks(), 0, math.MaxInt64)
|
||||
if bErr != nil {
|
||||
return nil, nil, bErr
|
||||
}
|
||||
|
@@ -97,7 +97,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []in
|
||||
}
|
||||
|
||||
totalSize := filer.FileSize(entry)
|
||||
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
|
||||
chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize))
|
||||
|
||||
wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background())
|
||||
defer wc.Close()
|
||||
|
@@ -75,7 +75,7 @@ func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signa
|
||||
glog.V(4).Infof("Create Entry key: %s", key)
|
||||
|
||||
totalSize := filer.FileSize(entry)
|
||||
chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize))
|
||||
chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize))
|
||||
|
||||
dir := filepath.Dir(key)
|
||||
|
||||
|
Reference in New Issue
Block a user