mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-11-24 08:46:54 +08:00
Some checks are pending
go: build dev binaries / cleanup (push) Waiting to run
go: build dev binaries / build_dev_linux_windows (amd64, linux) (push) Blocked by required conditions
go: build dev binaries / build_dev_linux_windows (amd64, windows) (push) Blocked by required conditions
go: build dev binaries / build_dev_darwin (amd64, darwin) (push) Blocked by required conditions
go: build dev binaries / build_dev_darwin (arm64, darwin) (push) Blocked by required conditions
docker: build dev containers / build-dev-containers (push) Waiting to run
End to End / FUSE Mount (push) Waiting to run
FUSE Integration Tests / FUSE Integration Testing (push) Waiting to run
go: build binary / Build (push) Waiting to run
Java Client Integration Tests / Java Integration Tests (11) (push) Waiting to run
Java Client Integration Tests / Java Integration Tests (17) (push) Waiting to run
Java Client Unit Tests / Java Unit Tests (11) (push) Waiting to run
Java Client Unit Tests / Java Unit Tests (17) (push) Waiting to run
Java Client Unit Tests / Java Unit Tests (21) (push) Waiting to run
Java Client Unit Tests / Java Unit Tests (8) (push) Waiting to run
Kafka Quick Test (Load Test with Schema Registry) / Kafka Client Load Test (Quick) (push) Waiting to run
Kafka Gateway Tests / Kafka Unit Tests (unit-tests-1) (push) Waiting to run
Kafka Gateway Tests / Kafka Integration Tests (Critical) (integration-1) (push) Waiting to run
Kafka Gateway Tests / Kafka End-to-End Tests (with SMQ) (e2e-1) (push) Waiting to run
Kafka Gateway Tests / Kafka Consumer Group Tests (Highly Isolated) (consumer-group-1) (push) Waiting to run
Kafka Gateway Tests / Kafka Client Compatibility (with SMQ) (client-compat-1) (push) Waiting to run
Kafka Gateway Tests / Kafka SMQ Integration Tests (Full Stack) (smq-integration-1) (push) Waiting to run
Kafka Gateway Tests / Kafka Protocol Tests (Isolated) (protocol-1) (push) Waiting to run
PostgreSQL Gateway Tests / PostgreSQL Basic Tests (push) Waiting to run
S3 IAM Integration Tests / IAM Unit Tests (push) Waiting to run
S3 IAM Integration Tests / S3 IAM Integration Tests (advanced) (push) Waiting to run
S3 IAM Integration Tests / S3 IAM Integration Tests (basic) (push) Waiting to run
S3 IAM Integration Tests / S3 IAM Integration Tests (policy-enforcement) (push) Waiting to run
S3 IAM Integration Tests / S3 IAM Distributed Tests (push) Waiting to run
S3 IAM Integration Tests / S3 IAM Performance Tests (push) Waiting to run
S3 Keycloak Integration Tests / S3 Keycloak Integration Tests (push) Waiting to run
S3 PyArrow Parquet Tests / PyArrow Parquet Tests (Python 3.11) (push) Waiting to run
S3 PyArrow Parquet Tests / PyArrow Parquet Tests (Python 3.12) (push) Waiting to run
S3 PyArrow Parquet Tests / PyArrow Parquet Tests (Python 3.9) (push) Waiting to run
S3 PyArrow Parquet Tests / Go Unit Tests (Implicit Directory) (push) Waiting to run
S3 SSE Tests / S3 SSE Integration Tests (comprehensive) (push) Waiting to run
S3 SSE Tests / S3 SSE Integration Tests (quick) (push) Waiting to run
S3 SSE Tests / S3 SSE Compatibility Test (push) Waiting to run
S3 SSE Tests / S3 SSE Metadata Persistence Test (push) Waiting to run
S3 SSE Tests / S3 SSE Copy Operations Test (push) Waiting to run
S3 SSE Tests / S3 SSE Multipart Upload Test (push) Waiting to run
S3 SSE Tests / S3 SSE Performance Test (push) Waiting to run
Ceph S3 tests / Basic S3 tests (KV store) (push) Waiting to run
Ceph S3 tests / S3 Versioning & Object Lock tests (push) Waiting to run
Ceph S3 tests / S3 CORS tests (push) Waiting to run
Ceph S3 tests / SeaweedFS Custom S3 Copy tests (push) Waiting to run
Ceph S3 tests / Basic S3 tests (SQL store) (push) Waiting to run
test s3 over https using aws-cli / awscli-tests (push) Waiting to run
* Parallelize `ec.rebuild` operations per affected volume. * node.freeEcSlot >= slotsNeeded * variable names, help messages, * Protected the read operation with the same mutex * accurate error message * fix broken test --------- Co-authored-by: chrislu <chris.lu@gmail.com> Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>
260 lines
7.9 KiB
Go
260 lines
7.9 KiB
Go
package shell
|
|
|
|
import (
|
|
"bytes"
|
|
"strings"
|
|
"testing"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
|
)
|
|
|
|
// TestEcShardMapRegister tests that EC shards are properly registered
|
|
func TestEcShardMapRegister(t *testing.T) {
|
|
ecShardMap := make(EcShardMap)
|
|
|
|
// Create test nodes with EC shards
|
|
node1 := newEcNode("dc1", "rack1", "node1", 100).
|
|
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6})
|
|
node2 := newEcNode("dc1", "rack1", "node2", 100).
|
|
addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13})
|
|
|
|
ecShardMap.registerEcNode(node1, "c1")
|
|
ecShardMap.registerEcNode(node2, "c1")
|
|
|
|
// Verify volume 1 is registered
|
|
locations, found := ecShardMap[needle.VolumeId(1)]
|
|
if !found {
|
|
t.Fatal("Expected volume 1 to be registered")
|
|
}
|
|
|
|
// Check shard count
|
|
count := locations.shardCount()
|
|
if count != erasure_coding.TotalShardsCount {
|
|
t.Errorf("Expected %d shards, got %d", erasure_coding.TotalShardsCount, count)
|
|
}
|
|
|
|
// Verify shard distribution
|
|
for i := 0; i < 7; i++ {
|
|
if len(locations[i]) != 1 || locations[i][0].info.Id != "node1" {
|
|
t.Errorf("Shard %d should be on node1", i)
|
|
}
|
|
}
|
|
for i := 7; i < erasure_coding.TotalShardsCount; i++ {
|
|
if len(locations[i]) != 1 || locations[i][0].info.Id != "node2" {
|
|
t.Errorf("Shard %d should be on node2", i)
|
|
}
|
|
}
|
|
}
|
|
|
|
// TestEcShardMapShardCount tests shard counting
|
|
func TestEcShardMapShardCount(t *testing.T) {
|
|
testCases := []struct {
|
|
name string
|
|
shardIds []uint32
|
|
expectedCount int
|
|
}{
|
|
{"all shards", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 14},
|
|
{"data shards only", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, 10},
|
|
{"parity shards only", []uint32{10, 11, 12, 13}, 4},
|
|
{"missing some shards", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8}, 9},
|
|
{"single shard", []uint32{0}, 1},
|
|
{"no shards", []uint32{}, 0},
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
locations := make(EcShardLocations, erasure_coding.MaxShardCount)
|
|
for _, shardId := range tc.shardIds {
|
|
locations[shardId] = []*EcNode{
|
|
newEcNode("dc1", "rack1", "node1", 100),
|
|
}
|
|
}
|
|
|
|
count := locations.shardCount()
|
|
if count != tc.expectedCount {
|
|
t.Errorf("Expected %d shards, got %d", tc.expectedCount, count)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestRebuildEcVolumesInsufficientShards tests error handling for unrepairable volumes
|
|
func TestRebuildEcVolumesInsufficientShards(t *testing.T) {
|
|
var logBuffer bytes.Buffer
|
|
|
|
// Create a volume with insufficient shards (less than DataShardsCount)
|
|
node1 := newEcNode("dc1", "rack1", "node1", 100).
|
|
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4}) // Only 5 shards
|
|
|
|
erb := &ecRebuilder{
|
|
commandEnv: &CommandEnv{
|
|
env: make(map[string]string),
|
|
noLock: true, // Bypass lock check for unit test
|
|
},
|
|
ewg: NewErrorWaitGroup(DefaultMaxParallelization),
|
|
ecNodes: []*EcNode{node1},
|
|
writer: &logBuffer,
|
|
}
|
|
|
|
erb.rebuildEcVolumes("c1")
|
|
err := erb.ewg.Wait()
|
|
|
|
if err == nil {
|
|
t.Fatal("Expected error for insufficient shards, got nil")
|
|
}
|
|
if !strings.Contains(err.Error(), "unrepairable") {
|
|
t.Errorf("Expected 'unrepairable' in error message, got: %s", err.Error())
|
|
}
|
|
}
|
|
|
|
// TestRebuildEcVolumesCompleteVolume tests that complete volumes are skipped
|
|
func TestRebuildEcVolumesCompleteVolume(t *testing.T) {
|
|
var logBuffer bytes.Buffer
|
|
|
|
// Create a volume with all shards
|
|
node1 := newEcNode("dc1", "rack1", "node1", 100).
|
|
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
|
|
|
|
erb := &ecRebuilder{
|
|
commandEnv: &CommandEnv{
|
|
env: make(map[string]string),
|
|
noLock: true, // Bypass lock check for unit test
|
|
},
|
|
ewg: NewErrorWaitGroup(DefaultMaxParallelization),
|
|
ecNodes: []*EcNode{node1},
|
|
writer: &logBuffer,
|
|
applyChanges: false,
|
|
}
|
|
|
|
erb.rebuildEcVolumes("c1")
|
|
err := erb.ewg.Wait()
|
|
|
|
if err != nil {
|
|
t.Fatalf("Expected no error for complete volume, got: %v", err)
|
|
}
|
|
|
|
// The function should return quickly without attempting rebuild
|
|
// since the volume is already complete
|
|
}
|
|
|
|
// TestRebuildEcVolumesInsufficientSpace tests error handling for insufficient disk space
|
|
func TestRebuildEcVolumesInsufficientSpace(t *testing.T) {
|
|
var logBuffer bytes.Buffer
|
|
|
|
// Create a volume with missing shards but insufficient free slots
|
|
// Node has 10 local shards, missing 4 shards (10,11,12,13), so needs 4 free slots
|
|
// Set free slots to 3 (insufficient)
|
|
node1 := newEcNode("dc1", "rack1", "node1", 3). // Only 3 free slots, need 4
|
|
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
|
|
|
|
erb := &ecRebuilder{
|
|
commandEnv: &CommandEnv{
|
|
env: make(map[string]string),
|
|
noLock: true, // Bypass lock check for unit test
|
|
},
|
|
ewg: NewErrorWaitGroup(DefaultMaxParallelization),
|
|
ecNodes: []*EcNode{node1},
|
|
writer: &logBuffer,
|
|
applyChanges: false,
|
|
}
|
|
|
|
erb.rebuildEcVolumes("c1")
|
|
err := erb.ewg.Wait()
|
|
|
|
if err == nil {
|
|
t.Fatal("Expected error for insufficient disk space, got nil")
|
|
}
|
|
if !strings.Contains(err.Error(), "no node has sufficient free slots") {
|
|
t.Errorf("Expected 'no node has sufficient free slots' in error message, got: %s", err.Error())
|
|
}
|
|
// Verify the enhanced error message includes diagnostic information
|
|
if !strings.Contains(err.Error(), "need") || !strings.Contains(err.Error(), "max available") {
|
|
t.Errorf("Expected diagnostic information in error message, got: %s", err.Error())
|
|
}
|
|
}
|
|
|
|
// TestMultipleNodesWithShards tests rebuild with shards distributed across multiple nodes
|
|
func TestMultipleNodesWithShards(t *testing.T) {
|
|
ecShardMap := make(EcShardMap)
|
|
|
|
// Create 3 nodes with different shards
|
|
node1 := newEcNode("dc1", "rack1", "node1", 100).
|
|
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3})
|
|
node2 := newEcNode("dc1", "rack1", "node2", 100).
|
|
addEcVolumeAndShardsForTest(1, "c1", []uint32{4, 5, 6, 7})
|
|
node3 := newEcNode("dc1", "rack1", "node3", 100).
|
|
addEcVolumeAndShardsForTest(1, "c1", []uint32{8, 9})
|
|
|
|
ecShardMap.registerEcNode(node1, "c1")
|
|
ecShardMap.registerEcNode(node2, "c1")
|
|
ecShardMap.registerEcNode(node3, "c1")
|
|
|
|
locations := ecShardMap[needle.VolumeId(1)]
|
|
count := locations.shardCount()
|
|
|
|
// We have 10 shards total, which is enough for data shards
|
|
if count != 10 {
|
|
t.Errorf("Expected 10 shards, got %d", count)
|
|
}
|
|
|
|
// Verify each shard is on the correct node
|
|
for i := 0; i < 4; i++ {
|
|
if len(locations[i]) != 1 || locations[i][0].info.Id != "node1" {
|
|
t.Errorf("Shard %d should be on node1", i)
|
|
}
|
|
}
|
|
for i := 4; i < 8; i++ {
|
|
if len(locations[i]) != 1 || locations[i][0].info.Id != "node2" {
|
|
t.Errorf("Shard %d should be on node2", i)
|
|
}
|
|
}
|
|
for i := 8; i < 10; i++ {
|
|
if len(locations[i]) != 1 || locations[i][0].info.Id != "node3" {
|
|
t.Errorf("Shard %d should be on node3", i)
|
|
}
|
|
}
|
|
}
|
|
|
|
// TestDuplicateShards tests handling of duplicate shards on multiple nodes
|
|
func TestDuplicateShards(t *testing.T) {
|
|
ecShardMap := make(EcShardMap)
|
|
|
|
// Create 2 nodes with overlapping shards (both have shard 0)
|
|
node1 := newEcNode("dc1", "rack1", "node1", 100).
|
|
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3})
|
|
node2 := newEcNode("dc1", "rack1", "node2", 100).
|
|
addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 4, 5, 6}) // Duplicate shard 0
|
|
|
|
ecShardMap.registerEcNode(node1, "c1")
|
|
ecShardMap.registerEcNode(node2, "c1")
|
|
|
|
locations := ecShardMap[needle.VolumeId(1)]
|
|
|
|
// Shard 0 should be on both nodes
|
|
if len(locations[0]) != 2 {
|
|
t.Errorf("Expected shard 0 on 2 nodes, got %d", len(locations[0]))
|
|
}
|
|
|
|
// Verify both nodes are registered for shard 0
|
|
foundNode1 := false
|
|
foundNode2 := false
|
|
for _, node := range locations[0] {
|
|
if node.info.Id == "node1" {
|
|
foundNode1 = true
|
|
}
|
|
if node.info.Id == "node2" {
|
|
foundNode2 = true
|
|
}
|
|
}
|
|
if !foundNode1 || !foundNode2 {
|
|
t.Error("Both nodes should have shard 0")
|
|
}
|
|
|
|
// Shard count should be 7 (unique shards: 0, 1, 2, 3, 4, 5, 6)
|
|
count := locations.shardCount()
|
|
if count != 7 {
|
|
t.Errorf("Expected 7 unique shards, got %d", count)
|
|
}
|
|
}
|