mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-09-19 09:27:56 +08:00
refactoring to typed Size
Go is amazing with refactoring!
This commit is contained in:
@@ -52,7 +52,7 @@ func FindDatFileSize(baseFileName string) (datSize int64, err error) {
|
||||
return 0, fmt.Errorf("read ec volume %s version: %v", baseFileName, err)
|
||||
}
|
||||
|
||||
err = iterateEcxFile(baseFileName, func(key types.NeedleId, offset types.Offset, size uint32) error {
|
||||
err = iterateEcxFile(baseFileName, func(key types.NeedleId, offset types.Offset, size types.Size) error {
|
||||
|
||||
if size == types.TombstoneFileSize {
|
||||
return nil
|
||||
@@ -88,7 +88,7 @@ func readEcVolumeVersion(baseFileName string) (version needle.Version, err error
|
||||
|
||||
}
|
||||
|
||||
func iterateEcxFile(baseFileName string, processNeedleFn func(key types.NeedleId, offset types.Offset, size uint32) error) error {
|
||||
func iterateEcxFile(baseFileName string, processNeedleFn func(key types.NeedleId, offset types.Offset, size types.Size) error) error {
|
||||
ecxFile, openErr := os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644)
|
||||
if openErr != nil {
|
||||
return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr)
|
||||
|
@@ -294,7 +294,7 @@ func readNeedleMap(baseFileName string) (*needle_map.MemDb, error) {
|
||||
defer indexFile.Close()
|
||||
|
||||
cm := needle_map.NewMemDb()
|
||||
err = idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
|
||||
err = idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size types.Size) error {
|
||||
if !offset.IsZero() && size != types.TombstoneFileSize {
|
||||
cm.Set(key, offset, size)
|
||||
} else {
|
||||
|
@@ -1,14 +1,18 @@
|
||||
package erasure_coding
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
type Interval struct {
|
||||
BlockIndex int
|
||||
InnerBlockOffset int64
|
||||
Size uint32
|
||||
Size types.Size
|
||||
IsLargeBlock bool
|
||||
LargeBlockRowsCount int
|
||||
}
|
||||
|
||||
func LocateData(largeBlockLength, smallBlockLength int64, datSize int64, offset int64, size uint32) (intervals []Interval) {
|
||||
func LocateData(largeBlockLength, smallBlockLength int64, datSize int64, offset int64, size types.Size) (intervals []Interval) {
|
||||
blockIndex, isLargeBlock, innerBlockOffset := locateOffset(largeBlockLength, smallBlockLength, datSize, offset)
|
||||
|
||||
// adding DataShardsCount*smallBlockLength to ensure we can derive the number of large block size from a shard size
|
||||
@@ -32,7 +36,7 @@ func LocateData(largeBlockLength, smallBlockLength int64, datSize int64, offset
|
||||
intervals = append(intervals, interval)
|
||||
return
|
||||
}
|
||||
interval.Size = uint32(blockRemaining)
|
||||
interval.Size = types.Size(blockRemaining)
|
||||
intervals = append(intervals, interval)
|
||||
|
||||
size -= interval.Size
|
||||
|
@@ -71,7 +71,7 @@ func validateFiles(baseFileName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func assertSame(datFile *os.File, datSize int64, ecFiles []*os.File, offset types.Offset, size uint32) error {
|
||||
func assertSame(datFile *os.File, datSize int64, ecFiles []*os.File, offset types.Offset, size types.Size) error {
|
||||
|
||||
data, err := readDatFile(datFile, offset, size)
|
||||
if err != nil {
|
||||
@@ -90,7 +90,7 @@ func assertSame(datFile *os.File, datSize int64, ecFiles []*os.File, offset type
|
||||
return nil
|
||||
}
|
||||
|
||||
func readDatFile(datFile *os.File, offset types.Offset, size uint32) ([]byte, error) {
|
||||
func readDatFile(datFile *os.File, offset types.Offset, size types.Size) ([]byte, error) {
|
||||
|
||||
data := make([]byte, size)
|
||||
n, err := datFile.ReadAt(data, offset.ToAcutalOffset())
|
||||
@@ -103,7 +103,7 @@ func readDatFile(datFile *os.File, offset types.Offset, size uint32) ([]byte, er
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func readEcFile(datSize int64, ecFiles []*os.File, offset types.Offset, size uint32) (data []byte, err error) {
|
||||
func readEcFile(datSize int64, ecFiles []*os.File, offset types.Offset, size types.Size) (data []byte, err error) {
|
||||
|
||||
intervals := LocateData(largeBlockSize, smallBlockSize, datSize, offset.ToAcutalOffset(), size)
|
||||
|
||||
@@ -140,7 +140,7 @@ func readOneInterval(interval Interval, ecFiles []*os.File) (data []byte, err er
|
||||
return
|
||||
}
|
||||
|
||||
func readFromOtherEcFiles(ecFiles []*os.File, ecFileIndex int, ecFileOffset int64, size uint32) (data []byte, err error) {
|
||||
func readFromOtherEcFiles(ecFiles []*os.File, ecFileIndex int, ecFileOffset int64, size types.Size) (data []byte, err error) {
|
||||
enc, err := reedsolomon.New(DataShardsCount, ParityShardsCount)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create encoder: %v", err)
|
||||
|
@@ -187,7 +187,7 @@ func (ev *EcVolume) ToVolumeEcShardInformationMessage() (messages []*master_pb.V
|
||||
return
|
||||
}
|
||||
|
||||
func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle.Version) (offset types.Offset, size uint32, intervals []Interval, err error) {
|
||||
func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle.Version) (offset types.Offset, size types.Size, intervals []Interval, err error) {
|
||||
|
||||
// find the needle from ecx file
|
||||
offset, size, err = ev.FindNeedleFromEcx(needleId)
|
||||
@@ -198,16 +198,16 @@ func (ev *EcVolume) LocateEcShardNeedle(needleId types.NeedleId, version needle.
|
||||
shard := ev.Shards[0]
|
||||
|
||||
// calculate the locations in the ec shards
|
||||
intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset.ToAcutalOffset(), uint32(needle.GetActualSize(size, version)))
|
||||
intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset.ToAcutalOffset(), types.Size(needle.GetActualSize(size, version)))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (ev *EcVolume) FindNeedleFromEcx(needleId types.NeedleId) (offset types.Offset, size uint32, err error) {
|
||||
func (ev *EcVolume) FindNeedleFromEcx(needleId types.NeedleId) (offset types.Offset, size types.Size, err error) {
|
||||
return SearchNeedleFromSortedIndex(ev.ecxFile, ev.ecxFileSize, needleId, nil)
|
||||
}
|
||||
|
||||
func SearchNeedleFromSortedIndex(ecxFile *os.File, ecxFileSize int64, needleId types.NeedleId, processNeedleFn func(file *os.File, offset int64) error) (offset types.Offset, size uint32, err error) {
|
||||
func SearchNeedleFromSortedIndex(ecxFile *os.File, ecxFileSize int64, needleId types.NeedleId, processNeedleFn func(file *os.File, offset int64) error) (offset types.Offset, size types.Size, err error) {
|
||||
var key types.NeedleId
|
||||
buf := make([]byte, types.NeedleMapEntrySize)
|
||||
l, h := int64(0), ecxFileSize/types.NeedleMapEntrySize
|
||||
|
@@ -44,7 +44,7 @@ func TestPositioning(t *testing.T) {
|
||||
fmt.Printf("offset: %d size: %d\n", offset.ToAcutalOffset(), size)
|
||||
|
||||
var shardEcdFileSize int64 = 1118830592 // 1024*1024*1024*3
|
||||
intervals := LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shardEcdFileSize, offset.ToAcutalOffset(), uint32(needle.GetActualSize(size, needle.CurrentVersion)))
|
||||
intervals := LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shardEcdFileSize, offset.ToAcutalOffset(), types.Size(needle.GetActualSize(size, needle.CurrentVersion)))
|
||||
|
||||
for _, interval := range intervals {
|
||||
shardId, shardOffset := interval.ToShardIdAndOffset(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize)
|
||||
|
Reference in New Issue
Block a user