mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-10-21 16:47:24 +08:00
volume: load ec shards during heartbeats to master
This commit is contained in:
@@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
)
|
||||
|
||||
@@ -17,43 +18,52 @@ type DiskLocation struct {
|
||||
MaxVolumeCount int
|
||||
volumes map[needle.VolumeId]*Volume
|
||||
sync.RWMutex
|
||||
|
||||
// erasure coding
|
||||
ecShards map[needle.VolumeId]erasure_coding.EcVolumeShards
|
||||
ecShardsLock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewDiskLocation(dir string, maxVolumeCount int) *DiskLocation {
|
||||
location := &DiskLocation{Directory: dir, MaxVolumeCount: maxVolumeCount}
|
||||
location.volumes = make(map[needle.VolumeId]*Volume)
|
||||
location.ecShards = make(map[needle.VolumeId]erasure_coding.EcVolumeShards)
|
||||
return location
|
||||
}
|
||||
|
||||
func (l *DiskLocation) volumeIdFromPath(dir os.FileInfo) (needle.VolumeId, string, error) {
|
||||
name := dir.Name()
|
||||
if !dir.IsDir() && strings.HasSuffix(name, ".dat") {
|
||||
collection := ""
|
||||
base := name[:len(name)-len(".dat")]
|
||||
i := strings.LastIndex(base, "_")
|
||||
if i > 0 {
|
||||
collection, base = base[0:i], base[i+1:]
|
||||
}
|
||||
vol, err := needle.NewVolumeId(base)
|
||||
return vol, collection, err
|
||||
collection, volumeId, err := parseCollectionVolumeId(base)
|
||||
return volumeId, collection, err
|
||||
}
|
||||
|
||||
return 0, "", fmt.Errorf("Path is not a volume: %s", name)
|
||||
}
|
||||
|
||||
func (l *DiskLocation) loadExistingVolume(dir os.FileInfo, needleMapKind NeedleMapType, mutex *sync.RWMutex) {
|
||||
name := dir.Name()
|
||||
if !dir.IsDir() && strings.HasSuffix(name, ".dat") {
|
||||
vid, collection, err := l.volumeIdFromPath(dir)
|
||||
func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeId, err error) {
|
||||
i := strings.LastIndex(base, "_")
|
||||
if i > 0 {
|
||||
collection, base = base[0:i], base[i+1:]
|
||||
}
|
||||
vol, err := needle.NewVolumeId(base)
|
||||
return collection, vol, err
|
||||
}
|
||||
|
||||
func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapType) {
|
||||
name := fileInfo.Name()
|
||||
if !fileInfo.IsDir() && strings.HasSuffix(name, ".dat") {
|
||||
vid, collection, err := l.volumeIdFromPath(fileInfo)
|
||||
if err == nil {
|
||||
mutex.RLock()
|
||||
l.RLock()
|
||||
_, found := l.volumes[vid]
|
||||
mutex.RUnlock()
|
||||
l.RUnlock()
|
||||
if !found {
|
||||
if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0); e == nil {
|
||||
mutex.Lock()
|
||||
l.Lock()
|
||||
l.volumes[vid] = v
|
||||
mutex.Unlock()
|
||||
l.Unlock()
|
||||
size, _, _ := v.FileStat()
|
||||
glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s",
|
||||
l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
|
||||
@@ -80,13 +90,12 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, con
|
||||
}()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var mutex sync.RWMutex
|
||||
for workerNum := 0; workerNum < concurrency; workerNum++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for dir := range task_queue {
|
||||
l.loadExistingVolume(dir, needleMapKind, &mutex)
|
||||
l.loadExistingVolume(dir, needleMapKind)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -95,12 +104,13 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, con
|
||||
}
|
||||
|
||||
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
l.concurrentLoadingVolumes(needleMapKind, 10)
|
||||
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
|
||||
|
||||
l.loadAllEcShards()
|
||||
glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecShards))
|
||||
|
||||
glog.V(0).Infoln("Store started on dir:", l.Directory, "with", len(l.volumes), "volumes", "max", l.MaxVolumeCount)
|
||||
}
|
||||
|
||||
func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) {
|
||||
@@ -132,12 +142,11 @@ func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (e error) {
|
||||
}
|
||||
|
||||
func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapType) bool {
|
||||
if dirs, err := ioutil.ReadDir(l.Directory); err == nil {
|
||||
for _, dir := range dirs {
|
||||
volId, _, err := l.volumeIdFromPath(dir)
|
||||
if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil {
|
||||
for _, fileInfo := range fileInfos {
|
||||
volId, _, err := l.volumeIdFromPath(fileInfo)
|
||||
if vid == volId && err == nil {
|
||||
var mutex sync.RWMutex
|
||||
l.loadExistingVolume(dir, needleMapKind, &mutex)
|
||||
l.loadExistingVolume(fileInfo, needleMapKind)
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -194,10 +203,16 @@ func (l *DiskLocation) VolumesLen() int {
|
||||
|
||||
func (l *DiskLocation) Close() {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
for _, v := range l.volumes {
|
||||
v.Close()
|
||||
}
|
||||
l.Unlock()
|
||||
|
||||
l.ecShardsLock.Lock()
|
||||
for _, shards := range l.ecShards {
|
||||
shards.Close()
|
||||
}
|
||||
l.ecShardsLock.Unlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
84
weed/storage/disk_location_ec.go
Normal file
84
weed/storage/disk_location_ec.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
)
|
||||
|
||||
var (
|
||||
re = regexp.MustCompile("\\.ec[0-9][0-9]")
|
||||
)
|
||||
|
||||
func (l *DiskLocation) loadEcShards(baseName string, shards []string, collection string, vid needle.VolumeId) (err error){
|
||||
|
||||
for _, shard := range shards{
|
||||
shardId, err := strconv.ParseInt(path.Ext(shard)[3:], 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse ec shard name %v: %v", shard, err)
|
||||
}
|
||||
ecVolumeShard, err := erasure_coding.NewEcVolumeShard(l.Directory, collection, vid, int(shardId))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create ec shard %v: %v", shard, err)
|
||||
}
|
||||
l.ecShardsLock.Lock()
|
||||
l.ecShards[vid] = append(l.ecShards[vid], ecVolumeShard)
|
||||
l.ecShardsLock.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *DiskLocation) loadAllEcShards() (err error){
|
||||
|
||||
fileInfos, err := ioutil.ReadDir(l.Directory)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load all ec shards in dir %s: %v", l.Directory, err)
|
||||
}
|
||||
|
||||
sort.Slice(fileInfos, func(i, j int) bool {
|
||||
return fileInfos[i].Name() < fileInfos[j].Name()
|
||||
})
|
||||
|
||||
var sameVolumeShards []string
|
||||
var prevVolumeId needle.VolumeId
|
||||
for _, fileInfo := range fileInfos{
|
||||
if fileInfo.IsDir(){
|
||||
continue
|
||||
}
|
||||
ext := path.Ext(fileInfo.Name())
|
||||
name := fileInfo.Name()
|
||||
baseName := name[:len(name)-len(ext)]
|
||||
|
||||
collection, volumeId, err := parseCollectionVolumeId(baseName)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if re.MatchString(ext){
|
||||
if prevVolumeId == 0 || volumeId == prevVolumeId {
|
||||
sameVolumeShards = append(sameVolumeShards, fileInfo.Name())
|
||||
}else{
|
||||
sameVolumeShards = []string{fileInfo.Name()}
|
||||
}
|
||||
prevVolumeId = volumeId
|
||||
continue
|
||||
}
|
||||
|
||||
if ext == ".ecx" && volumeId == prevVolumeId{
|
||||
if err = l.loadEcShards(baseName, sameVolumeShards, collection, volumeId);err!=nil{
|
||||
return fmt.Errorf("loadEcShards collection:%v volumeId:%d : %v", collection, volumeId, err)
|
||||
}
|
||||
prevVolumeId = volumeId
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
17
weed/storage/disk_location_ec_test.go
Normal file
17
weed/storage/disk_location_ec_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLoadingEcShards(t *testing.T) {
|
||||
dl := NewDiskLocation("./erasure_coding", 100)
|
||||
err := dl.loadAllEcShards()
|
||||
if err != nil {
|
||||
t.Errorf("load all ec shards: %v", err)
|
||||
}
|
||||
|
||||
if len(dl.ecShards)!=1 {
|
||||
t.Errorf("loading err")
|
||||
}
|
||||
}
|
0
weed/storage/erasure_coding/3.ec07
Normal file
0
weed/storage/erasure_coding/3.ec07
Normal file
0
weed/storage/erasure_coding/3.ecx
Normal file
0
weed/storage/erasure_coding/3.ecx
Normal file
@@ -6,7 +6,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/klauspost/reedsolomon"
|
||||
@@ -190,7 +190,7 @@ func readCompactMap(baseFileName string) (*needle_map.CompactMap, error) {
|
||||
defer indexFile.Close()
|
||||
|
||||
cm := needle_map.NewCompactMap()
|
||||
err = storage.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
|
||||
err = idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
|
||||
if !offset.IsZero() && size != types.TombstoneFileSize {
|
||||
cm.Set(key, offset, size)
|
||||
} else {
|
||||
|
108
weed/storage/erasure_coding/ec_volume.go
Normal file
108
weed/storage/erasure_coding/ec_volume.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package erasure_coding
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
)
|
||||
|
||||
type EcVolumeShard struct {
|
||||
VolumeId needle.VolumeId
|
||||
ShardId uint8
|
||||
Collection string
|
||||
dir string
|
||||
ecdFile *os.File
|
||||
ecxFile *os.File
|
||||
}
|
||||
type EcVolumeShards []*EcVolumeShard
|
||||
|
||||
func NewEcVolumeShard(dirname string, collection string, id needle.VolumeId, shardId int) (v *EcVolumeShard, e error) {
|
||||
|
||||
v = &EcVolumeShard{dir: dirname, Collection: collection, VolumeId: id, ShardId: uint8(shardId)}
|
||||
|
||||
baseFileName := v.FileName()
|
||||
if v.ecxFile, e = os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644); e != nil {
|
||||
return nil, fmt.Errorf("cannot read ec volume index %s.ecx: %v", baseFileName, e)
|
||||
}
|
||||
if v.ecdFile, e = os.OpenFile(baseFileName+ToExt(shardId), os.O_RDONLY, 0644); e != nil {
|
||||
return nil, fmt.Errorf("cannot read ec volume shard %s.%s: %v", baseFileName, ToExt(shardId), e)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (shards *EcVolumeShards) AddEcVolumeShard(ecVolumeShard *EcVolumeShard) bool {
|
||||
for _, s := range *shards {
|
||||
if s.ShardId == ecVolumeShard.ShardId {
|
||||
return false
|
||||
}
|
||||
}
|
||||
*shards = append(*shards, ecVolumeShard)
|
||||
return true
|
||||
}
|
||||
|
||||
func (shards *EcVolumeShards) DeleteEcVolumeShard(ecVolumeShard *EcVolumeShard) bool {
|
||||
foundPosition := -1
|
||||
for i, s := range *shards {
|
||||
if s.ShardId == ecVolumeShard.ShardId {
|
||||
foundPosition = i
|
||||
}
|
||||
}
|
||||
if foundPosition < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
*shards = append((*shards)[:foundPosition], (*shards)[foundPosition+1:]...)
|
||||
return true
|
||||
}
|
||||
|
||||
func (shards *EcVolumeShards) Close() {
|
||||
for _, s := range *shards {
|
||||
s.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (shards *EcVolumeShards) ToVolumeInformationMessage() (messages []*master_pb.VolumeEcShardInformationMessage) {
|
||||
for _, s := range *shards {
|
||||
m := &master_pb.VolumeEcShardInformationMessage{
|
||||
Id: uint32(s.VolumeId),
|
||||
Collection: s.Collection,
|
||||
EcIndex: uint32(s.ShardId),
|
||||
}
|
||||
messages = append(messages, m)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (v *EcVolumeShard) String() string {
|
||||
return fmt.Sprintf("ec shard %v:%v, dir:%s, Collection:%s", v.VolumeId, v.ShardId, v.dir, v.Collection)
|
||||
}
|
||||
|
||||
func (v *EcVolumeShard) FileName() (fileName string) {
|
||||
return EcShardFileName(v.Collection, v.dir, int(v.VolumeId))
|
||||
}
|
||||
|
||||
func EcShardFileName(collection string, dir string, id int) (fileName string) {
|
||||
idString := strconv.Itoa(id)
|
||||
if collection == "" {
|
||||
fileName = path.Join(dir, idString)
|
||||
} else {
|
||||
fileName = path.Join(dir, collection+"_"+idString)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (v *EcVolumeShard) Close() {
|
||||
if v.ecdFile != nil {
|
||||
_ = v.ecdFile.Close()
|
||||
v.ecdFile = nil
|
||||
}
|
||||
if v.ecxFile != nil {
|
||||
_ = v.ecxFile.Close()
|
||||
v.ecxFile = nil
|
||||
}
|
||||
}
|
54
weed/storage/idx/walk.go
Normal file
54
weed/storage/idx/walk.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package idx
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
// walks through the index file, calls fn function with each key, offset, size
|
||||
// stops with the error returned by the fn function
|
||||
func WalkIndexFile(r *os.File, fn func(key types.NeedleId, offset types.Offset, size uint32) error) error {
|
||||
var readerOffset int64
|
||||
bytes := make([]byte, types.NeedleMapEntrySize*RowsToRead)
|
||||
count, e := r.ReadAt(bytes, readerOffset)
|
||||
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
|
||||
readerOffset += int64(count)
|
||||
var (
|
||||
key types.NeedleId
|
||||
offset types.Offset
|
||||
size uint32
|
||||
i int
|
||||
)
|
||||
|
||||
for count > 0 && e == nil || e == io.EOF {
|
||||
for i = 0; i+types.NeedleMapEntrySize <= count; i += types.NeedleMapEntrySize {
|
||||
key, offset, size = IdxFileEntry(bytes[i : i+types.NeedleMapEntrySize])
|
||||
if e = fn(key, offset, size); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
if e == io.EOF {
|
||||
return nil
|
||||
}
|
||||
count, e = r.ReadAt(bytes, readerOffset)
|
||||
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
|
||||
readerOffset += int64(count)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func IdxFileEntry(bytes []byte) (key types.NeedleId, offset types.Offset, size uint32) {
|
||||
key = types.BytesToNeedleId(bytes[:types.NeedleIdSize])
|
||||
offset = types.BytesToOffset(bytes[types.NeedleIdSize : types.NeedleIdSize+types.OffsetSize])
|
||||
size = util.BytesToUint32(bytes[types.NeedleIdSize+types.OffsetSize : types.NeedleIdSize+types.OffsetSize+types.SizeSize])
|
||||
return
|
||||
}
|
||||
|
||||
const (
|
||||
RowsToRead = 1024
|
||||
)
|
||||
|
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
|
||||
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
type NeedleMapType int
|
||||
@@ -55,12 +54,6 @@ func (nm *baseNeedleMapper) IndexFileName() string {
|
||||
return nm.indexFile.Name()
|
||||
}
|
||||
|
||||
func IdxFileEntry(bytes []byte) (key NeedleId, offset Offset, size uint32) {
|
||||
key = BytesToNeedleId(bytes[:NeedleIdSize])
|
||||
offset = BytesToOffset(bytes[NeedleIdSize : NeedleIdSize+OffsetSize])
|
||||
size = util.BytesToUint32(bytes[NeedleIdSize+OffsetSize : NeedleIdSize+OffsetSize+SizeSize])
|
||||
return
|
||||
}
|
||||
func (nm *baseNeedleMapper) appendToIndexFile(key NeedleId, offset Offset, size uint32) error {
|
||||
bytes := needle_map.ToBytes(key, offset, size)
|
||||
|
||||
|
@@ -2,10 +2,12 @@ package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
|
||||
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
@@ -64,7 +66,7 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
return WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error {
|
||||
return idx.WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error {
|
||||
if !offset.IsZero() && size != TombstoneFileSize {
|
||||
levelDbWrite(db, key, offset, size)
|
||||
} else {
|
||||
|
@@ -1,10 +1,10 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
|
||||
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
)
|
||||
@@ -30,10 +30,6 @@ func NewBtreeNeedleMap(file *os.File) *NeedleMap {
|
||||
return nm
|
||||
}
|
||||
|
||||
const (
|
||||
RowsToRead = 1024
|
||||
)
|
||||
|
||||
func LoadCompactNeedleMap(file *os.File) (*NeedleMap, error) {
|
||||
nm := NewCompactNeedleMap(file)
|
||||
return doLoading(file, nm)
|
||||
@@ -45,7 +41,7 @@ func LoadBtreeNeedleMap(file *os.File) (*NeedleMap, error) {
|
||||
}
|
||||
|
||||
func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) {
|
||||
e := WalkIndexFile(file, func(key NeedleId, offset Offset, size uint32) error {
|
||||
e := idx.WalkIndexFile(file, func(key NeedleId, offset Offset, size uint32) error {
|
||||
nm.MaybeSetMaxFileKey(key)
|
||||
if !offset.IsZero() && size != TombstoneFileSize {
|
||||
nm.FileCounter++
|
||||
@@ -68,38 +64,6 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) {
|
||||
return nm, e
|
||||
}
|
||||
|
||||
// walks through the index file, calls fn function with each key, offset, size
|
||||
// stops with the error returned by the fn function
|
||||
func WalkIndexFile(r *os.File, fn func(key NeedleId, offset Offset, size uint32) error) error {
|
||||
var readerOffset int64
|
||||
bytes := make([]byte, NeedleMapEntrySize*RowsToRead)
|
||||
count, e := r.ReadAt(bytes, readerOffset)
|
||||
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
|
||||
readerOffset += int64(count)
|
||||
var (
|
||||
key NeedleId
|
||||
offset Offset
|
||||
size uint32
|
||||
i int
|
||||
)
|
||||
|
||||
for count > 0 && e == nil || e == io.EOF {
|
||||
for i = 0; i+NeedleMapEntrySize <= count; i += NeedleMapEntrySize {
|
||||
key, offset, size = IdxFileEntry(bytes[i : i+NeedleMapEntrySize])
|
||||
if e = fn(key, offset, size); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
if e == io.EOF {
|
||||
return nil
|
||||
}
|
||||
count, e = r.ReadAt(bytes, readerOffset)
|
||||
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
|
||||
readerOffset += int64(count)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (nm *NeedleMap) Put(key NeedleId, offset Offset, size uint32) error {
|
||||
_, oldSize := nm.m.Set(NeedleId(key), offset, size)
|
||||
nm.logPut(key, oldSize, size)
|
||||
|
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/willf/bloom"
|
||||
)
|
||||
@@ -119,7 +120,7 @@ func reverseWalkIndexFile(r *os.File, initFn func(entryCount int64), fn func(key
|
||||
return e
|
||||
}
|
||||
for i := int(nextBatchSize) - 1; i >= 0; i-- {
|
||||
key, offset, size := IdxFileEntry(bytes[i*NeedleMapEntrySize : i*NeedleMapEntrySize+NeedleMapEntrySize])
|
||||
key, offset, size := idx.IdxFileEntry(bytes[i*NeedleMapEntrySize : i*NeedleMapEntrySize+NeedleMapEntrySize])
|
||||
if e = fn(key, offset, size); e != nil {
|
||||
return e
|
||||
}
|
||||
|
@@ -30,6 +30,8 @@ type Store struct {
|
||||
NeedleMapType NeedleMapType
|
||||
NewVolumesChan chan master_pb.VolumeShortInformationMessage
|
||||
DeletedVolumesChan chan master_pb.VolumeShortInformationMessage
|
||||
NewEcShardsChan chan master_pb.VolumeEcShardInformationMessage
|
||||
DeletedEcShardsChan chan master_pb.VolumeEcShardInformationMessage
|
||||
}
|
||||
|
||||
func (s *Store) String() (str string) {
|
||||
@@ -47,6 +49,10 @@ func NewStore(port int, ip, publicUrl string, dirnames []string, maxVolumeCounts
|
||||
}
|
||||
s.NewVolumesChan = make(chan master_pb.VolumeShortInformationMessage, 3)
|
||||
s.DeletedVolumesChan = make(chan master_pb.VolumeShortInformationMessage, 3)
|
||||
|
||||
s.NewEcShardsChan = make(chan master_pb.VolumeEcShardInformationMessage, 3)
|
||||
s.DeletedEcShardsChan = make(chan master_pb.VolumeEcShardInformationMessage, 3)
|
||||
|
||||
return
|
||||
}
|
||||
func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement string, ttlString string, preallocate int64) error {
|
||||
@@ -186,6 +192,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *Store) Close() {
|
||||
for _, location := range s.Locations {
|
||||
location.Close()
|
||||
|
21
weed/storage/store_ec.go
Normal file
21
weed/storage/store_ec.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
)
|
||||
|
||||
func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat {
|
||||
var ecShardMessages []*master_pb.VolumeEcShardInformationMessage
|
||||
for _, location := range s.Locations {
|
||||
location.ecShardsLock.RLock()
|
||||
for _, ecShards := range location.ecShards {
|
||||
ecShardMessages = append(ecShardMessages, ecShards.ToVolumeInformationMessage()...)
|
||||
}
|
||||
location.ecShardsLock.RUnlock()
|
||||
}
|
||||
|
||||
return &master_pb.Heartbeat{
|
||||
EcShards: ecShardMessages,
|
||||
}
|
||||
|
||||
}
|
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"google.golang.org/grpc"
|
||||
@@ -142,7 +143,7 @@ func (v *Volume) locateLastAppendEntry() (Offset, error) {
|
||||
if n != NeedleMapEntrySize {
|
||||
return Offset{}, fmt.Errorf("file %s read error: %v", indexFile.Name(), e)
|
||||
}
|
||||
_, offset, _ := IdxFileEntry(bytes)
|
||||
_, offset, _ := idx.IdxFileEntry(bytes)
|
||||
|
||||
return offset, nil
|
||||
}
|
||||
@@ -230,7 +231,7 @@ func (v *Volume) readAppendAtNsForIndexEntry(indexFile *os.File, bytes []byte, m
|
||||
if _, readErr := indexFile.ReadAt(bytes, m*NeedleMapEntrySize); readErr != nil && readErr != io.EOF {
|
||||
return Offset{}, readErr
|
||||
}
|
||||
_, offset, _ := IdxFileEntry(bytes)
|
||||
_, offset, _ := idx.IdxFileEntry(bytes)
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
|
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
@@ -21,7 +22,7 @@ func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAtNs uin
|
||||
if lastIdxEntry, e = readIndexEntryAtOffset(indexFile, indexSize-NeedleMapEntrySize); e != nil {
|
||||
return 0, fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), e)
|
||||
}
|
||||
key, offset, size := IdxFileEntry(lastIdxEntry)
|
||||
key, offset, size := idx.IdxFileEntry(lastIdxEntry)
|
||||
if offset.IsZero() {
|
||||
return 0, nil
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
idx2 "github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
@@ -143,7 +144,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
|
||||
if IdxEntry, err = readIndexEntryAtOffset(oldIdxFile, idxOffset); err != nil {
|
||||
return fmt.Errorf("readIndexEntry %s at offset %d failed: %v", oldIdxFileName, idxOffset, err)
|
||||
}
|
||||
key, offset, size := IdxFileEntry(IdxEntry)
|
||||
key, offset, size := idx2.IdxFileEntry(IdxEntry)
|
||||
glog.V(4).Infof("key %d offset %d size %d", key, offset, size)
|
||||
if _, found := incrementedHasUpdatedIndexEntry[key]; !found {
|
||||
incrementedHasUpdatedIndexEntry[key] = keyField{
|
||||
@@ -329,7 +330,7 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) {
|
||||
dst.Write(v.SuperBlock.Bytes())
|
||||
newOffset := int64(v.SuperBlock.BlockSize())
|
||||
|
||||
WalkIndexFile(oldIndexFile, func(key NeedleId, offset Offset, size uint32) error {
|
||||
idx2.WalkIndexFile(oldIndexFile, func(key NeedleId, offset Offset, size uint32) error {
|
||||
if offset.IsZero() || size == TombstoneFileSize {
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user