refactor filer_pb.Entry and filer.Entry to use GetChunks()

for later locking on reading chunks
This commit is contained in:
chrislu
2022-11-15 06:33:36 -08:00
parent 371972a1c2
commit 70a4c98b00
56 changed files with 107 additions and 103 deletions

View File

@@ -55,7 +55,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write
return err
}
return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, int64(filer.FileSize(respLookupEntry.Entry)))
return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.GetChunks(), 0, int64(filer.FileSize(respLookupEntry.Entry)))
})

View File

@@ -69,7 +69,7 @@ func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir
byteCount += numByte
}
} else {
fileBlockCount = uint64(len(entry.Chunks))
fileBlockCount = uint64(len(entry.GetChunks()))
fileByteCount = filer.FileSize(entry)
blockCount += fileBlockCount
byteCount += fileByteCount

View File

@@ -93,7 +93,7 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer
dir = dir[:len(dir)-1]
}
fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n",
fileMode, len(entry.Chunks),
fileMode, len(entry.GetChunks()),
userName, groupName,
filer.FileSize(entry), dir, entry.Name)
} else {

View File

@@ -54,8 +54,8 @@ func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.W
bytes, _ := proto.Marshal(respLookupEntry.Entry)
gzippedBytes, _ := util.GzipData(bytes)
// zstdBytes, _ := util.ZstdData(bytes)
// fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d zstd:%d\n", len(respLookupEntry.Entry.Chunks), len(bytes), len(gzippedBytes), len(zstdBytes))
fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d\n", len(respLookupEntry.Entry.Chunks), len(bytes), len(gzippedBytes))
// fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d zstd:%d\n", len(respLookupEntry.Entry.GetChunks()), len(bytes), len(gzippedBytes), len(zstdBytes))
fmt.Fprintf(writer, "chunks %d meta size: %d gzip:%d\n", len(respLookupEntry.Entry.GetChunks()), len(bytes), len(gzippedBytes))
return nil

View File

@@ -216,7 +216,7 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo m
if *c.verbose && entry.Entry.IsDirectory {
fmt.Fprintf(c.writer, "checking directory %s\n", util.NewFullPath(entry.Dir, entry.Entry.Name))
}
dataChunks, manifestChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.Chunks, 0, math.MaxInt64)
dataChunks, manifestChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.GetChunks(), 0, math.MaxInt64)
if resolveErr != nil {
return fmt.Errorf("failed to ResolveChunkManifest: %+v", resolveErr)
}