mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-11-24 16:53:14 +08:00
* refactor: add ECContext structure to encapsulate EC parameters
- Create ec_context.go with ECContext struct
- NewDefaultECContext() creates context with default 10+4 configuration
- Helper methods: CreateEncoder(), ToExt(), String()
- Foundation for cleaner function signatures
- No behavior change, still uses hardcoded 10+4
* refactor: update ec_encoder.go to use ECContext
- Add WriteEcFilesWithContext() and RebuildEcFilesWithContext() functions
- Keep old functions for backward compatibility (call new versions)
- Update all internal functions to accept ECContext parameter
- Use ctx.DataShards, ctx.ParityShards, ctx.TotalShards consistently
- Use ctx.CreateEncoder() instead of hardcoded reedsolomon.New()
- Use ctx.ToExt() for shard file extensions
- No behavior change, still uses default 10+4 configuration
* refactor: update ec_volume.go to use ECContext
- Add ECContext field to EcVolume struct
- Initialize ECContext with default configuration in NewEcVolume()
- Update LocateEcShardNeedleInterval() to use ECContext.DataShards
- Phase 1: Always uses default 10+4 configuration
- No behavior change
* refactor: add EC shard count fields to VolumeInfo protobuf
- Add data_shards_count field (field 8) to VolumeInfo message
- Add parity_shards_count field (field 9) to VolumeInfo message
- Fields are optional, 0 means use default (10+4)
- Backward compatible: fields added at end
- Phase 1: Foundation for future customization
* refactor: regenerate protobuf Go files with EC shard count fields
- Regenerated volume_server_pb/*.go with new EC fields
- DataShardsCount and ParityShardsCount accessors added to VolumeInfo
- No behavior change, fields not yet used
* refactor: update VolumeEcShardsGenerate to use ECContext
- Create ECContext with default configuration in VolumeEcShardsGenerate
- Use ecCtx.TotalShards and ecCtx.ToExt() in cleanup
- Call WriteEcFilesWithContext() instead of WriteEcFiles()
- Save EC configuration (DataShardsCount, ParityShardsCount) to VolumeInfo
- Log EC context being used
- Phase 1: Always uses default 10+4 configuration
- No behavior change
* fmt
* refactor: update ec_test.go to use ECContext
- Update TestEncodingDecoding to create and use ECContext
- Update validateFiles() to accept ECContext parameter
- Update removeGeneratedFiles() to use ctx.TotalShards and ctx.ToExt()
- Test passes with default 10+4 configuration
* refactor: use EcShardConfig message instead of separate fields
* optimize: pre-calculate row sizes in EC encoding loop
* refactor: replace TotalShards field with Total() method
- Remove TotalShards field from ECContext to avoid field drift
- Add Total() method that computes DataShards + ParityShards
- Update all references to use ctx.Total() instead of ctx.TotalShards
- Read EC config from VolumeInfo when loading EC volumes
- Read data shard count from .vif in VolumeEcShardsToVolume
- Use >= instead of > for exact boundary handling in encoding loops
* optimize: simplify VolumeEcShardsToVolume to use existing EC context
- Remove redundant CollectEcShards call
- Remove redundant .vif file loading
- Use v.ECContext.DataShards directly (already loaded by NewEcVolume)
- Slice tempShards instead of collecting again
* refactor: rename MaxShardId to MaxShardCount for clarity
- Change from MaxShardId=31 to MaxShardCount=32
- Eliminates confusing +1 arithmetic (MaxShardId+1)
- More intuitive: MaxShardCount directly represents the limit
fix: support custom EC ratios beyond 14 shards in VolumeEcShardsToVolume
- Add MaxShardId constant (31, since ShardBits is uint32)
- Use MaxShardId+1 (32) instead of TotalShardsCount (14) for tempShards buffer
- Prevents panic when slicing for volumes with >14 total shards
- Critical fix for custom EC configurations like 20+10
* fix: add validation for EC shard counts from VolumeInfo
- Validate DataShards/ParityShards are positive and within MaxShardCount
- Prevent zero or invalid values that could cause divide-by-zero
- Fallback to defaults if validation fails, with warning log
- VolumeEcShardsGenerate now preserves existing EC config when regenerating
- Critical safety fix for corrupted or legacy .vif files
* fix: RebuildEcFiles now loads EC config from .vif file
- Critical: RebuildEcFiles was always using default 10+4 config
- Now loads actual EC config from .vif file when rebuilding shards
- Validates config before use (positive shards, within MaxShardCount)
- Falls back to default if .vif missing or invalid
- Prevents data corruption when rebuilding custom EC volumes
* add: defensive validation for dataShards in VolumeEcShardsToVolume
- Validate dataShards > 0 and <= MaxShardCount before use
- Prevents panic from corrupted or uninitialized ECContext
- Returns clear error message instead of panic
- Defense-in-depth: validates even though upstream should catch issues
* fix: replace TotalShardsCount with MaxShardCount for custom EC ratio support
Critical fixes to support custom EC ratios > 14 shards:
disk_location_ec.go:
- validateEcVolume: Check shards 0-31 instead of 0-13 during validation
- removeEcVolumeFiles: Remove shards 0-31 instead of 0-13 during cleanup
ec_volume_info.go ShardBits methods:
- ShardIds(): Iterate up to MaxShardCount (32) instead of TotalShardsCount (14)
- ToUint32Slice(): Iterate up to MaxShardCount (32)
- IndexToShardId(): Iterate up to MaxShardCount (32)
- MinusParityShards(): Remove shards 10-31 instead of 10-13 (added note about Phase 2)
- Minus() shard size copy: Iterate up to MaxShardCount (32)
- resizeShardSizes(): Iterate up to MaxShardCount (32)
Without these changes:
- Custom EC ratios > 14 total shards would fail validation on startup
- Shards 14-31 would never be discovered or cleaned up
- ShardBits operations would miss shards >= 14
These changes are backward compatible - MaxShardCount (32) includes
the default TotalShardsCount (14), so existing 10+4 volumes work as before.
* fix: replace TotalShardsCount with MaxShardCount in critical data structures
Critical fixes for buffer allocations and loops that must support
custom EC ratios up to 32 shards:
Data Structures:
- store_ec.go:354: Buffer allocation for shard recovery (bufs array)
- topology_ec.go:14: EcShardLocations.Locations fixed array size
- command_ec_rebuild.go:268: EC shard map allocation
- command_ec_common.go:626: Shard-to-locations map allocation
Shard Discovery Loops:
- ec_task.go:378: Loop to find generated shard files
- ec_shard_management.go: All 8 loops that check/count EC shards
These changes are critical because:
1. Buffer allocations sized to 14 would cause index-out-of-bounds panics
when accessing shards 14-31
2. Fixed arrays sized to 14 would truncate shard location data
3. Loops limited to 0-13 would never discover/manage shards 14-31
Note: command_ec_encode.go:208 intentionally NOT changed - it creates
shard IDs to mount after encoding. In Phase 1 we always generate 14
shards, so this remains TotalShardsCount and will be made dynamic in
Phase 2 based on actual EC context.
Without these fixes, custom EC ratios > 14 total shards would cause:
- Runtime panics (array index out of bounds)
- Data loss (shards 14-31 never discovered/tracked)
- Incomplete shard management (missing shards not detected)
* refactor: move MaxShardCount constant to ec_encoder.go
Moved MaxShardCount from ec_volume_info.go to ec_encoder.go to group it
with other shard count constants (DataShardsCount, ParityShardsCount,
TotalShardsCount). This improves code organization and makes it easier
to understand the relationship between these constants.
Location: ec_encoder.go line 22, between TotalShardsCount and MinTotalDisks
* improve: add defensive programming and better error messages for EC
Code review improvements from CodeRabbit:
1. ShardBits Guardrails (ec_volume_info.go):
- AddShardId, RemoveShardId: Reject shard IDs >= MaxShardCount
- HasShardId: Return false for out-of-range shard IDs
- Prevents silent no-ops from bit shifts with invalid IDs
2. Future-Proof Regex (disk_location_ec.go):
- Updated regex from \.ec[0-9][0-9] to \.ec\d{2,3}
- Now matches .ec00 through .ec999 (currently .ec00-.ec31 used)
- Supports future increases to MaxShardCount beyond 99
3. Better Error Messages (volume_grpc_erasure_coding.go):
- Include valid range (1..32) in dataShards validation error
- Helps operators quickly identify the problem
4. Validation Before Save (volume_grpc_erasure_coding.go):
- Validate ECContext (DataShards > 0, ParityShards > 0, Total <= MaxShardCount)
- Log EC config being saved to .vif for debugging
- Prevents writing invalid configs to disk
These changes improve robustness and debuggability without changing
core functionality.
* fmt
* fix: critical bugs from code review + clean up comments
Critical bug fixes:
1. command_ec_rebuild.go: Fixed indentation causing compilation error
- Properly nested if/for blocks in registerEcNode
2. ec_shard_management.go: Fixed isComplete logic incorrectly using MaxShardCount
- Changed from MaxShardCount (32) back to TotalShardsCount (14)
- Default 10+4 volumes were being incorrectly reported as incomplete
- Missing shards 14-31 were being incorrectly reported as missing
- Fixed in 4 locations: volume completeness checks and getMissingShards
3. ec_volume_info.go: Fixed MinusParityShards removing too many shards
- Changed from MaxShardCount (32) back to TotalShardsCount (14)
- Was incorrectly removing shard IDs 10-31 instead of just 10-13
Comment cleanup:
- Removed Phase 1/Phase 2 references (development plan context)
- Replaced with clear statements about default 10+4 configuration
- SeaweedFS repo uses fixed 10+4 EC ratio, no phases needed
Root cause: Over-aggressive replacement of TotalShardsCount with MaxShardCount.
MaxShardCount (32) is the limit for buffer allocations and shard ID loops,
but TotalShardsCount (14) must be used for default EC configuration logic.
* fix: add defensive bounds checks and compute actual shard counts
Critical fixes from code review:
1. topology_ec.go: Add defensive bounds checks to AddShard/DeleteShard
- Prevent panic when shardId >= MaxShardCount (32)
- Return false instead of crashing on out-of-range shard IDs
2. command_ec_common.go: Fix doBalanceEcShardsAcrossRacks
- Was using hardcoded TotalShardsCount (14) for all volumes
- Now computes actual totalShardsForVolume from rackToShardCount
- Fixes incorrect rebalancing for volumes with custom EC ratios
- Example: 5+2=7 shards would incorrectly use 14 as average
These fixes improve robustness and prepare for future custom EC ratios
without changing current behavior for default 10+4 volumes.
Note: MinusParityShards and ec_task.go intentionally NOT changed for
seaweedfs repo - these will be enhanced in seaweed-enterprise repo
where custom EC ratio configuration is added.
* fmt
* style: make MaxShardCount type casting explicit in loops
Improved code clarity by explicitly casting MaxShardCount to the
appropriate type when used in loop comparisons:
- ShardId comparisons: Cast to ShardId(MaxShardCount)
- uint32 comparisons: Cast to uint32(MaxShardCount)
Changed in 5 locations:
- Minus() loop (line 90)
- ShardIds() loop (line 143)
- ToUint32Slice() loop (line 152)
- IndexToShardId() loop (line 219)
- resizeShardSizes() loop (line 248)
This makes the intent explicit and improves type safety readability.
No functional changes - purely a style improvement.
287 lines
8.8 KiB
Go
287 lines
8.8 KiB
Go
package shell
|
|
|
|
import (
|
|
"context"
|
|
"flag"
|
|
"fmt"
|
|
"io"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/operation"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
|
"google.golang.org/grpc"
|
|
)
|
|
|
|
func init() {
|
|
Commands = append(Commands, &commandEcRebuild{})
|
|
}
|
|
|
|
type commandEcRebuild struct {
|
|
}
|
|
|
|
func (c *commandEcRebuild) Name() string {
|
|
return "ec.rebuild"
|
|
}
|
|
|
|
func (c *commandEcRebuild) Help() string {
|
|
return `find and rebuild missing ec shards among volume servers
|
|
|
|
ec.rebuild [-c EACH_COLLECTION|<collection_name>] [-force]
|
|
|
|
Algorithm:
|
|
|
|
For each type of volume server (different max volume count limit){
|
|
for each collection {
|
|
rebuildEcVolumes()
|
|
}
|
|
}
|
|
|
|
func rebuildEcVolumes(){
|
|
idealWritableVolumes = totalWritableVolumes / numVolumeServers
|
|
for {
|
|
sort all volume servers ordered by the number of local writable volumes
|
|
pick the volume server A with the lowest number of writable volumes x
|
|
pick the volume server B with the highest number of writable volumes y
|
|
if y > idealWritableVolumes and x +1 <= idealWritableVolumes {
|
|
if B has a writable volume id v that A does not have {
|
|
move writable volume v from A to B
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
`
|
|
}
|
|
|
|
func (c *commandEcRebuild) HasTag(CommandTag) bool {
|
|
return false
|
|
}
|
|
|
|
func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
|
|
|
fixCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
|
collection := fixCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection")
|
|
applyChanges := fixCommand.Bool("force", false, "apply the changes")
|
|
if err = fixCommand.Parse(args); err != nil {
|
|
return nil
|
|
}
|
|
infoAboutSimulationMode(writer, *applyChanges, "-force")
|
|
|
|
if err = commandEnv.confirmIsLocked(args); err != nil {
|
|
return
|
|
}
|
|
|
|
// collect all ec nodes
|
|
allEcNodes, _, err := collectEcNodes(commandEnv)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if *collection == "EACH_COLLECTION" {
|
|
collections, err := ListCollectionNames(commandEnv, false, true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
fmt.Printf("rebuildEcVolumes collections %+v\n", len(collections))
|
|
for _, c := range collections {
|
|
fmt.Printf("rebuildEcVolumes collection %+v\n", c)
|
|
if err = rebuildEcVolumes(commandEnv, allEcNodes, c, writer, *applyChanges); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
} else {
|
|
if err = rebuildEcVolumes(commandEnv, allEcNodes, *collection, writer, *applyChanges); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, writer io.Writer, applyChanges bool) error {
|
|
|
|
fmt.Printf("rebuildEcVolumes %s\n", collection)
|
|
|
|
// collect vid => each shard locations, similar to ecShardMap in topology.go
|
|
ecShardMap := make(EcShardMap)
|
|
for _, ecNode := range allEcNodes {
|
|
ecShardMap.registerEcNode(ecNode, collection)
|
|
}
|
|
|
|
for vid, locations := range ecShardMap {
|
|
shardCount := locations.shardCount()
|
|
if shardCount == erasure_coding.TotalShardsCount {
|
|
continue
|
|
}
|
|
if shardCount < erasure_coding.DataShardsCount {
|
|
return fmt.Errorf("ec volume %d is unrepairable with %d shards\n", vid, shardCount)
|
|
}
|
|
|
|
sortEcNodesByFreeslotsDescending(allEcNodes)
|
|
|
|
if allEcNodes[0].freeEcSlot < erasure_coding.TotalShardsCount {
|
|
return fmt.Errorf("disk space is not enough")
|
|
}
|
|
|
|
if err := rebuildOneEcVolume(commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error {
|
|
|
|
if !commandEnv.isLocked() {
|
|
return fmt.Errorf("lock is lost")
|
|
}
|
|
|
|
fmt.Printf("rebuildOneEcVolume %s %d\n", collection, volumeId)
|
|
|
|
// collect shard files to rebuilder local disk
|
|
var generatedShardIds []uint32
|
|
copiedShardIds, _, err := prepareDataToRecover(commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() {
|
|
// clean up working files
|
|
|
|
// ask the rebuilder to delete the copied shards
|
|
err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info), copiedShardIds)
|
|
if err != nil {
|
|
fmt.Fprintf(writer, "%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds)
|
|
}
|
|
|
|
}()
|
|
|
|
if !applyChanges {
|
|
return nil
|
|
}
|
|
|
|
// generate ec shards, and maybe ecx file
|
|
generatedShardIds, err = generateMissingShards(commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// mount the generated shards
|
|
err = mountEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, pb.NewServerAddressFromDataNode(rebuilder.info), generatedShardIds)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
rebuilder.addEcVolumeShards(volumeId, collection, generatedShardIds)
|
|
|
|
return nil
|
|
}
|
|
|
|
func generateMissingShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation pb.ServerAddress) (rebuiltShardIds []uint32, err error) {
|
|
|
|
err = operation.WithVolumeServerClient(false, sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
|
resp, rebuildErr := volumeServerClient.VolumeEcShardsRebuild(context.Background(), &volume_server_pb.VolumeEcShardsRebuildRequest{
|
|
VolumeId: uint32(volumeId),
|
|
Collection: collection,
|
|
})
|
|
if rebuildErr == nil {
|
|
rebuiltShardIds = resp.RebuiltShardIds
|
|
}
|
|
return rebuildErr
|
|
})
|
|
return
|
|
}
|
|
|
|
func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) {
|
|
|
|
needEcxFile := true
|
|
var localShardBits erasure_coding.ShardBits
|
|
for _, diskInfo := range rebuilder.info.DiskInfos {
|
|
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
|
if ecShardInfo.Collection == collection && needle.VolumeId(ecShardInfo.Id) == volumeId {
|
|
needEcxFile = false
|
|
localShardBits = erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
|
|
}
|
|
}
|
|
}
|
|
|
|
for shardId, ecNodes := range locations {
|
|
|
|
if len(ecNodes) == 0 {
|
|
fmt.Fprintf(writer, "missing shard %d.%d\n", volumeId, shardId)
|
|
continue
|
|
}
|
|
|
|
if localShardBits.HasShardId(erasure_coding.ShardId(shardId)) {
|
|
localShardIds = append(localShardIds, uint32(shardId))
|
|
fmt.Fprintf(writer, "use existing shard %d.%d\n", volumeId, shardId)
|
|
continue
|
|
}
|
|
|
|
var copyErr error
|
|
if applyBalancing {
|
|
copyErr = operation.WithVolumeServerClient(false, pb.NewServerAddressFromDataNode(rebuilder.info), commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
|
_, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
|
|
VolumeId: uint32(volumeId),
|
|
Collection: collection,
|
|
ShardIds: []uint32{uint32(shardId)},
|
|
CopyEcxFile: needEcxFile,
|
|
CopyEcjFile: true,
|
|
CopyVifFile: needEcxFile,
|
|
SourceDataNode: ecNodes[0].info.Id,
|
|
})
|
|
return copyErr
|
|
})
|
|
if copyErr == nil && needEcxFile {
|
|
needEcxFile = false
|
|
}
|
|
}
|
|
if copyErr != nil {
|
|
fmt.Fprintf(writer, "%s failed to copy %d.%d from %s: %v\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id, copyErr)
|
|
} else {
|
|
fmt.Fprintf(writer, "%s copied %d.%d from %s\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id)
|
|
copiedShardIds = append(copiedShardIds, uint32(shardId))
|
|
}
|
|
|
|
}
|
|
|
|
if len(copiedShardIds)+len(localShardIds) >= erasure_coding.DataShardsCount {
|
|
return copiedShardIds, localShardIds, nil
|
|
}
|
|
|
|
return nil, nil, fmt.Errorf("%d shards are not enough to recover volume %d", len(copiedShardIds)+len(localShardIds), volumeId)
|
|
|
|
}
|
|
|
|
type EcShardMap map[needle.VolumeId]EcShardLocations
|
|
type EcShardLocations [][]*EcNode
|
|
|
|
func (ecShardMap EcShardMap) registerEcNode(ecNode *EcNode, collection string) {
|
|
for _, diskInfo := range ecNode.info.DiskInfos {
|
|
for _, shardInfo := range diskInfo.EcShardInfos {
|
|
if shardInfo.Collection == collection {
|
|
existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)]
|
|
if !found {
|
|
// Use MaxShardCount (32) to support custom EC ratios
|
|
existing = make([][]*EcNode, erasure_coding.MaxShardCount)
|
|
ecShardMap[needle.VolumeId(shardInfo.Id)] = existing
|
|
}
|
|
for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() {
|
|
existing[shardId] = append(existing[shardId], ecNode)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func (ecShardLocations EcShardLocations) shardCount() (count int) {
|
|
for _, locations := range ecShardLocations {
|
|
if len(locations) > 0 {
|
|
count++
|
|
}
|
|
}
|
|
return
|
|
}
|