mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-11-24 16:53:14 +08:00
adding volume type
This commit is contained in:
@@ -2,6 +2,7 @@ package topology
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
@@ -29,13 +30,16 @@ func (c *Collection) String() string {
|
||||
return fmt.Sprintf("Name:%s, volumeSizeLimit:%d, storageType2VolumeLayout:%v", c.Name, c.volumeSizeLimit, c.storageType2VolumeLayout)
|
||||
}
|
||||
|
||||
func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout {
|
||||
func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeType storage.VolumeType) *VolumeLayout {
|
||||
keyString := rp.String()
|
||||
if ttl != nil {
|
||||
keyString += ttl.String()
|
||||
}
|
||||
if volumeType != storage.HardDriveType {
|
||||
keyString += string(volumeType)
|
||||
}
|
||||
vl := c.storageType2VolumeLayout.Get(keyString, func() interface{} {
|
||||
return NewVolumeLayout(rp, ttl, c.volumeSizeLimit, c.replicationAsMin)
|
||||
return NewVolumeLayout(rp, ttl, volumeType, c.volumeSizeLimit, c.replicationAsMin)
|
||||
})
|
||||
return vl.(*VolumeLayout)
|
||||
}
|
||||
|
||||
@@ -121,12 +121,12 @@ func (t *Topology) NextVolumeId() (needle.VolumeId, error) {
|
||||
}
|
||||
|
||||
func (t *Topology) HasWritableVolume(option *VolumeGrowOption) bool {
|
||||
vl := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl)
|
||||
vl := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.VolumeType)
|
||||
return vl.GetActiveVolumeCount(option) > 0
|
||||
}
|
||||
|
||||
func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, uint64, *DataNode, error) {
|
||||
vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl).PickForWrite(count, option)
|
||||
vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.VolumeType).PickForWrite(count, option)
|
||||
if err != nil {
|
||||
return "", 0, nil, fmt.Errorf("failed to find writable volumes for collection:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err)
|
||||
}
|
||||
@@ -137,10 +137,10 @@ func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string,
|
||||
return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil
|
||||
}
|
||||
|
||||
func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL) *VolumeLayout {
|
||||
func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeType storage.VolumeType) *VolumeLayout {
|
||||
return t.collectionMap.Get(collectionName, func() interface{} {
|
||||
return NewCollection(collectionName, t.volumeSizeLimit, t.replicationAsMin)
|
||||
}).(*Collection).GetOrCreateVolumeLayout(rp, ttl)
|
||||
}).(*Collection).GetOrCreateVolumeLayout(rp, ttl, volumeType)
|
||||
}
|
||||
|
||||
func (t *Topology) ListCollections(includeNormalVolumes, includeEcVolumes bool) (ret []string) {
|
||||
@@ -177,13 +177,13 @@ func (t *Topology) DeleteCollection(collectionName string) {
|
||||
}
|
||||
|
||||
func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
|
||||
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl)
|
||||
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, v.VolumeType)
|
||||
vl.RegisterVolume(&v, dn)
|
||||
vl.EnsureCorrectWritables(&v)
|
||||
}
|
||||
func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
|
||||
glog.Infof("removing volume info:%+v", v)
|
||||
volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl)
|
||||
glog.Infof("removing volume info: %+v", v)
|
||||
volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, v.VolumeType)
|
||||
volumeLayout.UnRegisterVolume(&v, dn)
|
||||
if volumeLayout.isEmpty() {
|
||||
t.DeleteCollection(v.Collection)
|
||||
@@ -222,7 +222,7 @@ func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformati
|
||||
t.UnRegisterVolumeLayout(v, dn)
|
||||
}
|
||||
for _, v := range changedVolumes {
|
||||
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl)
|
||||
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, v.VolumeType)
|
||||
vl.EnsureCorrectWritables(&v)
|
||||
}
|
||||
return
|
||||
|
||||
@@ -37,7 +37,7 @@ func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, g
|
||||
}()
|
||||
}
|
||||
func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
|
||||
vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl)
|
||||
vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, volumeInfo.VolumeType)
|
||||
if !vl.SetVolumeCapacityFull(volumeInfo.Id) {
|
||||
return false
|
||||
}
|
||||
@@ -55,7 +55,7 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
|
||||
func (t *Topology) UnRegisterDataNode(dn *DataNode) {
|
||||
for _, v := range dn.GetVolumes() {
|
||||
glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id())
|
||||
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl)
|
||||
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, v.VolumeType)
|
||||
vl.SetVolumeUnavailable(dn, v.Id)
|
||||
}
|
||||
dn.UpAdjustVolumeCountDelta(-dn.GetVolumeCount())
|
||||
|
||||
@@ -96,7 +96,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
|
||||
nil,
|
||||
dn)
|
||||
rp, _ := super_block.NewReplicaPlacementFromString("000")
|
||||
layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL)
|
||||
layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL, storage.HardDriveType)
|
||||
assert(t, "writables after repeated add", len(layout.writables), volumeCount)
|
||||
|
||||
assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount)
|
||||
|
||||
@@ -27,6 +27,7 @@ type VolumeGrowOption struct {
|
||||
Collection string
|
||||
ReplicaPlacement *super_block.ReplicaPlacement
|
||||
Ttl *needle.TTL
|
||||
VolumeType storage.VolumeType
|
||||
Prealloacte int64
|
||||
DataCenter string
|
||||
Rack string
|
||||
|
||||
@@ -103,6 +103,7 @@ func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState {
|
||||
type VolumeLayout struct {
|
||||
rp *super_block.ReplicaPlacement
|
||||
ttl *needle.TTL
|
||||
volumeType storage.VolumeType
|
||||
vid2location map[needle.VolumeId]*VolumeLocationList
|
||||
writables []needle.VolumeId // transient array of writable volume id
|
||||
readonlyVolumes *volumesBinaryState // readonly volumes
|
||||
@@ -118,10 +119,11 @@ type VolumeLayoutStats struct {
|
||||
FileCount uint64
|
||||
}
|
||||
|
||||
func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
|
||||
func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, volumeType storage.VolumeType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
|
||||
return &VolumeLayout{
|
||||
rp: rp,
|
||||
ttl: ttl,
|
||||
volumeType: volumeType,
|
||||
vid2location: make(map[needle.VolumeId]*VolumeLocationList),
|
||||
writables: *new([]needle.VolumeId),
|
||||
readonlyVolumes: NewVolumesBinaryState(readOnlyState, rp, ExistCopies()),
|
||||
|
||||
Reference in New Issue
Block a user