Files
seaweedfs/weed/topology/topology_test.go

285 lines
8.8 KiB
Go
Raw Normal View History

2012-08-29 01:42:24 -07:00
package topology
import (
"reflect"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/sequence"
"github.com/seaweedfs/seaweedfs/weed/storage"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
2019-04-18 21:43:36 -07:00
"testing"
2012-08-29 01:42:24 -07:00
)
func TestRemoveDataCenter(t *testing.T) {
topo := setup(topologyLayout)
topo.UnlinkChildNode(NodeId("dc2"))
2021-02-16 10:51:03 -08:00
if topo.diskUsages.usages[types.HardDriveType].activeVolumeCount != 15 {
2012-09-01 02:20:59 -07:00
t.Fail()
}
topo.UnlinkChildNode(NodeId("dc3"))
2021-02-16 10:51:03 -08:00
if topo.diskUsages.usages[types.HardDriveType].activeVolumeCount != 12 {
2012-09-01 02:20:59 -07:00
t.Fail()
}
}
func TestHandlingVolumeServerHeartbeat(t *testing.T) {
topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
dc := topo.GetOrCreateDataCenter("dc1")
rack := dc.GetOrCreateRack("rack1")
2021-02-16 10:51:03 -08:00
maxVolumeCounts := make(map[string]uint32)
maxVolumeCounts[""] = 25
maxVolumeCounts["ssd"] = 12
2021-09-14 10:37:06 -07:00
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, 0, "127.0.0.1", maxVolumeCounts)
{
volumeCount := 7
var volumeMessages []*master_pb.VolumeInformationMessage
for k := 1; k <= volumeCount; k++ {
volumeMessage := &master_pb.VolumeInformationMessage{
Id: uint32(k),
Size: uint64(25432),
Collection: "",
FileCount: uint64(2343),
DeleteCount: uint64(345),
DeletedByteCount: 34524,
ReadOnly: false,
ReplicaPlacement: uint32(0),
2025-06-16 22:25:22 -07:00
Version: uint32(needle.GetCurrentVersion()),
Ttl: 0,
}
volumeMessages = append(volumeMessages, volumeMessage)
}
for k := 1; k <= volumeCount; k++ {
volumeMessage := &master_pb.VolumeInformationMessage{
Id: uint32(volumeCount + k),
Size: uint64(25432),
Collection: "",
FileCount: uint64(2343),
DeleteCount: uint64(345),
DeletedByteCount: 34524,
ReadOnly: false,
ReplicaPlacement: uint32(0),
2025-06-16 22:25:22 -07:00
Version: uint32(needle.GetCurrentVersion()),
Ttl: 0,
2020-12-16 09:14:05 -08:00
DiskType: "ssd",
}
volumeMessages = append(volumeMessages, volumeMessage)
}
2018-06-25 00:01:53 -07:00
topo.SyncDataNodeRegistration(volumeMessages, dn)
2021-02-16 10:51:03 -08:00
usageCounts := topo.diskUsages.usages[types.HardDriveType]
assert(t, "activeVolumeCount1", int(usageCounts.activeVolumeCount), volumeCount)
assert(t, "volumeCount", int(usageCounts.volumeCount), volumeCount)
assert(t, "ssdVolumeCount", int(topo.diskUsages.usages[types.SsdType].volumeCount), volumeCount)
}
{
volumeCount := 7 - 1
var volumeMessages []*master_pb.VolumeInformationMessage
for k := 1; k <= volumeCount; k++ {
volumeMessage := &master_pb.VolumeInformationMessage{
Id: uint32(k),
Size: uint64(254320),
Collection: "",
FileCount: uint64(2343),
DeleteCount: uint64(345),
DeletedByteCount: 345240,
ReadOnly: false,
ReplicaPlacement: uint32(0),
2025-06-16 22:25:22 -07:00
Version: uint32(needle.GetCurrentVersion()),
Ttl: 0,
}
volumeMessages = append(volumeMessages, volumeMessage)
}
2018-06-25 00:01:53 -07:00
topo.SyncDataNodeRegistration(volumeMessages, dn)
2019-04-20 23:53:37 -07:00
//rp, _ := storage.NewReplicaPlacementFromString("000")
//layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL)
//assert(t, "writables", len(layout.writables), volumeCount)
2021-02-16 10:51:03 -08:00
usageCounts := topo.diskUsages.usages[types.HardDriveType]
assert(t, "activeVolumeCount1", int(usageCounts.activeVolumeCount), volumeCount)
assert(t, "volumeCount", int(usageCounts.volumeCount), volumeCount)
2019-04-20 23:53:37 -07:00
}
{
volumeCount := 6
newVolumeShortMessage := &master_pb.VolumeShortInformationMessage{
Id: uint32(3),
Collection: "",
ReplicaPlacement: uint32(0),
2025-06-16 22:25:22 -07:00
Version: uint32(needle.GetCurrentVersion()),
2019-04-20 23:53:37 -07:00
Ttl: 0,
}
topo.IncrementalSyncDataNodeRegistration(
[]*master_pb.VolumeShortInformationMessage{newVolumeShortMessage},
nil,
dn)
2019-12-23 12:48:20 -08:00
rp, _ := super_block.NewReplicaPlacementFromString("000")
2021-02-16 02:47:02 -08:00
layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL, types.HardDriveType)
2019-04-20 23:53:37 -07:00
assert(t, "writables after repeated add", len(layout.writables), volumeCount)
2021-02-16 10:51:03 -08:00
usageCounts := topo.diskUsages.usages[types.HardDriveType]
assert(t, "activeVolumeCount1", int(usageCounts.activeVolumeCount), volumeCount)
assert(t, "volumeCount", int(usageCounts.volumeCount), volumeCount)
2019-04-20 23:53:37 -07:00
topo.IncrementalSyncDataNodeRegistration(
nil,
[]*master_pb.VolumeShortInformationMessage{newVolumeShortMessage},
dn)
assert(t, "writables after deletion", len(layout.writables), volumeCount-1)
2021-02-16 10:51:03 -08:00
assert(t, "activeVolumeCount1", int(usageCounts.activeVolumeCount), volumeCount-1)
assert(t, "volumeCount", int(usageCounts.volumeCount), volumeCount-1)
2019-04-20 23:53:37 -07:00
topo.IncrementalSyncDataNodeRegistration(
[]*master_pb.VolumeShortInformationMessage{newVolumeShortMessage},
nil,
dn)
for vid := range layout.vid2location {
2019-04-20 23:53:37 -07:00
println("after add volume id", vid)
}
2019-04-30 03:22:19 +00:00
for _, vid := range layout.writables {
2019-04-20 23:53:37 -07:00
println("after add writable volume id", vid)
}
assert(t, "writables after add back", len(layout.writables), volumeCount)
}
topo.UnRegisterDataNode(dn)
2021-02-16 10:51:03 -08:00
usageCounts := topo.diskUsages.usages[types.HardDriveType]
assert(t, "activeVolumeCount2", int(usageCounts.activeVolumeCount), 0)
}
func assert(t *testing.T, message string, actual, expected int) {
if actual != expected {
t.Fatalf("unexpected %s: %d, expected: %d", message, actual, expected)
}
}
func TestAddRemoveVolume(t *testing.T) {
topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
dc := topo.GetOrCreateDataCenter("dc1")
rack := dc.GetOrCreateRack("rack1")
2021-02-16 10:51:03 -08:00
maxVolumeCounts := make(map[string]uint32)
maxVolumeCounts[""] = 25
maxVolumeCounts["ssd"] = 12
2021-09-14 10:37:06 -07:00
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, 0, "127.0.0.1", maxVolumeCounts)
v := storage.VolumeInfo{
2019-04-18 21:43:36 -07:00
Id: needle.VolumeId(1),
Size: 100,
Collection: "xcollection",
2020-12-16 09:14:05 -08:00
DiskType: "ssd",
FileCount: 123,
DeleteCount: 23,
DeletedByteCount: 45,
ReadOnly: false,
2025-06-16 22:25:22 -07:00
Version: needle.GetCurrentVersion(),
2019-12-23 12:48:20 -08:00
ReplicaPlacement: &super_block.ReplicaPlacement{},
2019-04-18 21:43:36 -07:00
Ttl: needle.EMPTY_TTL,
}
dn.UpdateVolumes([]storage.VolumeInfo{v})
topo.RegisterVolumeLayout(v, dn)
2019-04-20 23:53:37 -07:00
topo.RegisterVolumeLayout(v, dn)
2018-07-11 12:52:48 -07:00
if _, hasCollection := topo.FindCollection(v.Collection); !hasCollection {
t.Errorf("collection %v should exist", v.Collection)
}
topo.UnRegisterVolumeLayout(v, dn)
2018-07-11 12:52:48 -07:00
if _, hasCollection := topo.FindCollection(v.Collection); hasCollection {
t.Errorf("collection %v should not exist", v.Collection)
}
}
func TestListCollections(t *testing.T) {
rp, _ := super_block.NewReplicaPlacementFromString("002")
topo := NewTopology("weedfs", sequence.NewMemorySequencer(), 32*1024, 5, false)
dc := topo.GetOrCreateDataCenter("dc1")
rack := dc.GetOrCreateRack("rack1")
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, 0, "127.0.0.1", nil)
topo.RegisterVolumeLayout(storage.VolumeInfo{
Id: needle.VolumeId(1111),
ReplicaPlacement: rp,
}, dn)
topo.RegisterVolumeLayout(storage.VolumeInfo{
Id: needle.VolumeId(2222),
ReplicaPlacement: rp,
Collection: "vol_collection_a",
}, dn)
topo.RegisterVolumeLayout(storage.VolumeInfo{
Id: needle.VolumeId(3333),
ReplicaPlacement: rp,
Collection: "vol_collection_b",
}, dn)
topo.RegisterEcShards(&erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(4444),
Collection: "ec_collection_a",
}, dn)
topo.RegisterEcShards(&erasure_coding.EcVolumeInfo{
VolumeId: needle.VolumeId(5555),
Collection: "ec_collection_b",
}, dn)
testCases := []struct {
name string
includeNormalVolumes bool
includeEcVolumes bool
want []string
}{
{
name: "no volume types selected",
includeNormalVolumes: false,
includeEcVolumes: false,
want: nil,
}, {
name: "normal volumes",
includeNormalVolumes: true,
includeEcVolumes: false,
want: []string{"", "vol_collection_a", "vol_collection_b"},
}, {
name: "EC volumes",
includeNormalVolumes: false,
includeEcVolumes: true,
want: []string{"ec_collection_a", "ec_collection_b"},
}, {
name: "normal + EC volumes",
includeNormalVolumes: true,
includeEcVolumes: true,
want: []string{"", "ec_collection_a", "ec_collection_b", "vol_collection_a", "vol_collection_b"},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := topo.ListCollections(tc.includeNormalVolumes, tc.includeEcVolumes)
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("got %v, want %v", got, tc.want)
}
})
}
}