Bump github.com/rclone/rclone from 1.63.1 to 1.64.0 (#4850)

* Bump github.com/rclone/rclone from 1.63.1 to 1.64.0

Bumps [github.com/rclone/rclone](https://github.com/rclone/rclone) from 1.63.1 to 1.64.0.
- [Release notes](https://github.com/rclone/rclone/releases)
- [Changelog](https://github.com/rclone/rclone/blob/master/RELEASE.md)
- [Commits](https://github.com/rclone/rclone/compare/v1.63.1...v1.64.0)

---
updated-dependencies:
- dependency-name: github.com/rclone/rclone
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* API changes

* go mod

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>
Co-authored-by: chrislu <chris.lu@gmail.com>
This commit is contained in:
dependabot[bot]
2023-09-18 14:43:05 -07:00
committed by GitHub
parent a0b60dd641
commit a04bd4d26f
26 changed files with 235 additions and 122 deletions

View File

@@ -411,8 +411,8 @@ func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool
hasMove := true
for hasMove {
hasMove = false
slices.SortFunc(rackEcNodes, func(a, b *EcNode) bool {
return a.freeEcSlot > b.freeEcSlot
slices.SortFunc(rackEcNodes, func(a, b *EcNode) int {
return b.freeEcSlot - a.freeEcSlot
})
emptyNode, fullNode := rackEcNodes[0], rackEcNodes[len(rackEcNodes)-1]
emptyNodeShardCount, fullNodeShardCount := ecNodeIdToShardCount[emptyNode.info.Id], ecNodeIdToShardCount[fullNode.info.Id]
@@ -492,8 +492,8 @@ func pickNEcShardsToMoveFrom(ecNodes []*EcNode, vid needle.VolumeId, n int) map[
})
}
}
slices.SortFunc(candidateEcNodes, func(a, b *CandidateEcNode) bool {
return a.shardCount > b.shardCount
slices.SortFunc(candidateEcNodes, func(a, b *CandidateEcNode) int {
return b.shardCount - a.shardCount
})
for i := 0; i < n; i++ {
selectedEcNodeIndex := -1

View File

@@ -119,14 +119,14 @@ func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId,
}
func sortEcNodesByFreeslotsDescending(ecNodes []*EcNode) {
slices.SortFunc(ecNodes, func(a, b *EcNode) bool {
return a.freeEcSlot > b.freeEcSlot
slices.SortFunc(ecNodes, func(a, b *EcNode) int {
return b.freeEcSlot - a.freeEcSlot
})
}
func sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) {
slices.SortFunc(ecNodes, func(a, b *EcNode) bool {
return a.freeEcSlot < b.freeEcSlot
slices.SortFunc(ecNodes, func(a, b *EcNode) int {
return a.freeEcSlot - b.freeEcSlot
})
}

View File

@@ -243,8 +243,8 @@ func (n *Node) selectVolumes(fn func(v *master_pb.VolumeInformationMessage) bool
}
func sortWritableVolumes(volumes []*master_pb.VolumeInformationMessage) {
slices.SortFunc(volumes, func(a, b *master_pb.VolumeInformationMessage) bool {
return a.Size < b.Size
slices.SortFunc(volumes, func(a, b *master_pb.VolumeInformationMessage) int {
return int(a.Size - b.Size)
})
}
@@ -269,8 +269,8 @@ func balanceSelectedVolume(commandEnv *CommandEnv, diskType types.DiskType, volu
for hasMoved {
hasMoved = false
slices.SortFunc(nodesWithCapacity, func(a, b *Node) bool {
return a.localVolumeRatio(capacityFunc) < b.localVolumeRatio(capacityFunc)
slices.SortFunc(nodesWithCapacity, func(a, b *Node) int {
return int(a.localVolumeRatio(capacityFunc) - b.localVolumeRatio(capacityFunc))
})
if len(nodesWithCapacity) == 0 {
fmt.Printf("no volume server found with capacity for %s", diskType.ReadableString())

View File

@@ -80,8 +80,8 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
if *volumeId > 0 && replicas[0].info.Id != uint32(*volumeId) {
continue
}
slices.SortFunc(replicas, func(a, b *VolumeReplica) bool {
return fileCount(a) > fileCount(b)
slices.SortFunc(replicas, func(a, b *VolumeReplica) int {
return int(fileCount(b) - fileCount(a))
})
for len(replicas) >= 2 {
a, b := replicas[0], replicas[1]

View File

@@ -328,8 +328,8 @@ func (c *commandVolumeFixReplication) fixOneUnderReplicatedVolume(commandEnv *Co
func keepDataNodesSorted(dataNodes []location, diskType types.DiskType) {
fn := capacityByFreeVolumeCount(diskType)
slices.SortFunc(dataNodes, func(a, b location) bool {
return fn(a.dataNode) > fn(b.dataNode)
slices.SortFunc(dataNodes, func(a, b location) int {
return int(fn(b.dataNode) - fn(a.dataNode))
})
}
@@ -514,17 +514,17 @@ func countReplicas(replicas []*VolumeReplica) (diffDc, diffRack, diffNode map[st
}
func pickOneReplicaToDelete(replicas []*VolumeReplica, replicaPlacement *super_block.ReplicaPlacement) *VolumeReplica {
slices.SortFunc(replicas, func(a, b *VolumeReplica) bool {
slices.SortFunc(replicas, func(a, b *VolumeReplica) int {
if a.info.Size != b.info.Size {
return a.info.Size < b.info.Size
return int(a.info.Size - b.info.Size)
}
if a.info.ModifiedAtSecond != b.info.ModifiedAtSecond {
return a.info.ModifiedAtSecond < b.info.ModifiedAtSecond
return int(a.info.ModifiedAtSecond - b.info.ModifiedAtSecond)
}
if a.info.CompactRevision != b.info.CompactRevision {
return a.info.CompactRevision < b.info.CompactRevision
return int(a.info.CompactRevision - b.info.CompactRevision)
}
return false
return 0
})
return replicas[0]

View File

@@ -8,6 +8,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"golang.org/x/exp/slices"
"path/filepath"
"strings"
"io"
)
@@ -81,8 +82,8 @@ func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
output(verbosityLevel >= 0, writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) bool {
return a.Id < b.Id
slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) int {
return strings.Compare(a.Id, b.Id)
})
var s statistics
for _, dc := range t.DataCenterInfos {
@@ -98,8 +99,8 @@ func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.Top
func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 1, writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) bool {
return a.Id < b.Id
slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) int {
return strings.Compare(a.Id, b.Id)
})
for _, r := range t.RackInfos {
if *c.rack != "" && *c.rack != r.Id {
@@ -114,8 +115,8 @@ func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.D
func (c *commandVolumeList) writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 2, writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) bool {
return a.Id < b.Id
slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) int {
return strings.Compare(a.Id, b.Id)
})
for _, dn := range t.DataNodeInfos {
if *c.dataNode != "" && *c.dataNode != dn.Id {
@@ -159,8 +160,8 @@ func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInf
diskType = "hdd"
}
output(verbosityLevel >= 4, writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
slices.SortFunc(t.VolumeInfos, func(a, b *master_pb.VolumeInformationMessage) bool {
return a.Id < b.Id
slices.SortFunc(t.VolumeInfos, func(a, b *master_pb.VolumeInformationMessage) int {
return int(a.Id - b.Id)
})
for _, vi := range t.VolumeInfos {
if c.isNotMatchDiskInfo(vi.ReadOnly, vi.Collection, vi.Id) {

View File

@@ -179,8 +179,8 @@ func (c *commandVolumeServerEvacuate) evacuateEcVolumes(commandEnv *CommandEnv,
func (c *commandVolumeServerEvacuate) moveAwayOneEcVolume(commandEnv *CommandEnv, ecShardInfo *master_pb.VolumeEcShardInformationMessage, thisNode *EcNode, otherNodes []*EcNode, applyChange bool) (hasMoved bool, err error) {
for _, shardId := range erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds() {
slices.SortFunc(otherNodes, func(a, b *EcNode) bool {
return a.localShardIdCount(ecShardInfo.Id) < b.localShardIdCount(ecShardInfo.Id)
slices.SortFunc(otherNodes, func(a, b *EcNode) int {
return a.localShardIdCount(ecShardInfo.Id) - b.localShardIdCount(ecShardInfo.Id)
})
for i := 0; i < len(otherNodes); i++ {
emptyNode := otherNodes[i]
@@ -214,8 +214,8 @@ func moveAwayOneNormalVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][
})
}
// most empty one is in the front
slices.SortFunc(otherNodes, func(a, b *Node) bool {
return a.localVolumeRatio(maxVolumeCountFn) < b.localVolumeRatio(maxVolumeCountFn)
slices.SortFunc(otherNodes, func(a, b *Node) int {
return int(a.localVolumeRatio(maxVolumeCountFn) - b.localVolumeRatio(maxVolumeCountFn))
})
for i := 0; i < len(otherNodes); i++ {
emptyNode := otherNodes[i]

View File

@@ -26,8 +26,8 @@ var (
)
func RunShell(options ShellOptions) {
slices.SortFunc(Commands, func(a, b command) bool {
return strings.Compare(a.Name(), b.Name()) < 0
slices.SortFunc(Commands, func(a, b command) int {
return strings.Compare(a.Name(), b.Name())
})
line = liner.NewLiner()
defer line.Close()