mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-10-21 06:47:24 +08:00
Merge branch 'master' into add_remote_storage
This commit is contained in:
@@ -22,6 +22,14 @@ func (c *commandCollectionList) Help() string {
|
||||
return `list all collections`
|
||||
}
|
||||
|
||||
type CollectionInfo struct {
|
||||
FileCount uint64
|
||||
DeleteCount uint64
|
||||
DeletedByteCount uint64
|
||||
Size uint64
|
||||
VolumeCount int
|
||||
}
|
||||
|
||||
func (c *commandCollectionList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
||||
|
||||
collections, err := ListCollectionNames(commandEnv, true, true)
|
||||
@@ -30,8 +38,21 @@ func (c *commandCollectionList) Do(args []string, commandEnv *CommandEnv, writer
|
||||
return err
|
||||
}
|
||||
|
||||
topologyInfo, _, err := collectTopologyInfo(commandEnv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
collectionInfos := make(map[string]*CollectionInfo)
|
||||
|
||||
writeCollectionInfo(writer, topologyInfo, collectionInfos)
|
||||
|
||||
for _, c := range collections {
|
||||
fmt.Fprintf(writer, "collection:\"%s\"\n", c)
|
||||
cif, found := collectionInfos[c]
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(writer, "collection:\"%s\"\tvolumeCount:%d\tsize:%d\tfileCount:%d\tdeletedBytes:%d\tdeletion:%d\n", c, cif.VolumeCount, cif.Size, cif.FileCount, cif.DeletedByteCount, cif.DeleteCount)
|
||||
}
|
||||
|
||||
fmt.Fprintf(writer, "Total %d collections.\n", len(collections))
|
||||
@@ -56,3 +77,34 @@ func ListCollectionNames(commandEnv *CommandEnv, includeNormalVolumes, includeEc
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func addToCollection(collectionInfos map[string]*CollectionInfo, vif *master_pb.VolumeInformationMessage) {
|
||||
c := vif.Collection
|
||||
cif, found := collectionInfos[c]
|
||||
if !found {
|
||||
cif = &CollectionInfo{}
|
||||
collectionInfos[c] = cif
|
||||
}
|
||||
cif.Size += vif.Size
|
||||
cif.DeleteCount += vif.DeleteCount
|
||||
cif.FileCount += vif.FileCount
|
||||
cif.DeletedByteCount += vif.DeletedByteCount
|
||||
cif.VolumeCount++
|
||||
}
|
||||
|
||||
func writeCollectionInfo(writer io.Writer, t *master_pb.TopologyInfo, collectionInfos map[string]*CollectionInfo) {
|
||||
for _, dc := range t.DataCenterInfos {
|
||||
for _, r := range dc.RackInfos {
|
||||
for _, dn := range r.DataNodeInfos {
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, vi := range diskInfo.VolumeInfos {
|
||||
addToCollection(collectionInfos, vi)
|
||||
}
|
||||
//for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||
//
|
||||
//}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -223,21 +223,6 @@ func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyIn
|
||||
|
||||
}
|
||||
|
||||
func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) {
|
||||
|
||||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, v := range diskInfo.EcShardInfos {
|
||||
if v.Collection == selectedCollection && v.Id == uint32(vid) {
|
||||
ecShardInfos = append(ecShardInfos, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) {
|
||||
|
||||
vidMap := make(map[uint32]bool)
|
||||
|
@@ -90,7 +90,7 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
|
||||
}
|
||||
|
||||
if err := c.syncTwoReplicas(aDB, bDB, a, verbose, writer, b, err, applyChanges, nonRepairThreshold); err != nil {
|
||||
fmt.Fprintf(writer, "snyc volume %d on %s and %s: %v\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id, err)
|
||||
fmt.Fprintf(writer, "sync volume %d on %s and %s: %v\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id, err)
|
||||
}
|
||||
replicas = replicas[1:]
|
||||
}
|
||||
|
@@ -103,7 +103,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||
}
|
||||
// for each volume, check filer file ids
|
||||
if err = c.findFilerChunksMissingInVolumeServers(volumeIdToVInfo, tempFolder, writer, *verbose, applyPurging); err != nil {
|
||||
return fmt.Errorf("findExtraChunksInVolumeServers: %v", err)
|
||||
return fmt.Errorf("findFilerChunksMissingInVolumeServers: %v", err)
|
||||
}
|
||||
} else {
|
||||
// collect all filer file ids
|
||||
|
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
@@ -26,7 +27,7 @@ func (c *commandVolumeTierMove) Name() string {
|
||||
func (c *commandVolumeTierMove) Help() string {
|
||||
return `change a volume from one disk type to another
|
||||
|
||||
volume.tier.move -fromDiskType=hdd -toDiskType=ssd [-collection=""] [-fullPercent=95] [-quietFor=1h]
|
||||
volume.tier.move -fromDiskType=hdd -toDiskType=ssd [-collectionPattern=""] [-fullPercent=95] [-quietFor=1h]
|
||||
|
||||
Even if the volume is replicated, only one replica will be changed and the rest replicas will be dropped.
|
||||
So "volume.fix.replication" and "volume.balance" should be followed.
|
||||
@@ -41,7 +42,7 @@ func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer
|
||||
}
|
||||
|
||||
tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||
collection := tierCommand.String("collection", "", "the collection name")
|
||||
collectionPattern := tierCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
|
||||
fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size")
|
||||
quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period")
|
||||
source := tierCommand.String("fromDiskType", "", "the source disk type")
|
||||
@@ -65,7 +66,7 @@ func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer
|
||||
}
|
||||
|
||||
// collect all volumes that should change
|
||||
volumeIds, err := collectVolumeIdsForTierChange(commandEnv, topologyInfo, volumeSizeLimitMb, fromDiskType, *collection, *fullPercentage, *quietPeriod)
|
||||
volumeIds, err := collectVolumeIdsForTierChange(commandEnv, topologyInfo, volumeSizeLimitMb, fromDiskType, *collectionPattern, *fullPercentage, *quietPeriod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -73,7 +74,7 @@ func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer
|
||||
|
||||
_, allLocations := collectVolumeReplicaLocations(topologyInfo)
|
||||
for _, vid := range volumeIds {
|
||||
if err = doVolumeTierMove(commandEnv, writer, *collection, vid, toDiskType, allLocations, *applyChange); err != nil {
|
||||
if err = doVolumeTierMove(commandEnv, writer, vid, toDiskType, allLocations, *applyChange); err != nil {
|
||||
fmt.Printf("tier move volume %d: %v\n", vid, err)
|
||||
}
|
||||
}
|
||||
@@ -90,7 +91,7 @@ func isOneOf(server string, locations []wdclient.Location) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func doVolumeTierMove(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, toDiskType types.DiskType, allLocations []location, applyChanges bool) (err error) {
|
||||
func doVolumeTierMove(commandEnv *CommandEnv, writer io.Writer, vid needle.VolumeId, toDiskType types.DiskType, allLocations []location, applyChanges bool) (err error) {
|
||||
// find volume location
|
||||
locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
|
||||
if !found {
|
||||
@@ -149,7 +150,7 @@ func doVolumeTierMove(commandEnv *CommandEnv, writer io.Writer, collection strin
|
||||
return nil
|
||||
}
|
||||
|
||||
func collectVolumeIdsForTierChange(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, sourceTier types.DiskType, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
|
||||
func collectVolumeIdsForTierChange(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, sourceTier types.DiskType, collectionPattern string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
|
||||
|
||||
quietSeconds := int64(quietPeriod / time.Second)
|
||||
nowUnixSeconds := time.Now().Unix()
|
||||
@@ -160,7 +161,18 @@ func collectVolumeIdsForTierChange(commandEnv *CommandEnv, topologyInfo *master_
|
||||
eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds && types.ToDiskType(v.DiskType) == sourceTier {
|
||||
// check collection name pattern
|
||||
if collectionPattern != "" {
|
||||
matched, err := filepath.Match(collectionPattern, v.Collection)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !matched {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if v.ModifiedAtSecond+quietSeconds < nowUnixSeconds && types.ToDiskType(v.DiskType) == sourceTier {
|
||||
if float64(v.Size) > fullPercentage/100*float64(volumeSizeLimitMb)*1024*1024 {
|
||||
vidMap[v.Id] = true
|
||||
}
|
||||
|
Reference in New Issue
Block a user