mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-10-15 20:06:19 +08:00
toughen weedfs clustering, adding synchronizing max volume id among
peers in order to avoid the same volume id being assigned twice 1. moving raft.Server to topology 2. adding max volume id command for raft
This commit is contained in:
31
go/topology/cluster_commands.go
Normal file
31
go/topology/cluster_commands.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package topology
|
||||
|
||||
import (
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"github.com/goraft/raft"
|
||||
)
|
||||
|
||||
type MaxVolumeIdCommand struct {
|
||||
MaxVolumeId storage.VolumeId `json:"maxVolumeId"`
|
||||
}
|
||||
|
||||
func NewMaxVolumeIdCommand(value storage.VolumeId) *MaxVolumeIdCommand {
|
||||
return &MaxVolumeIdCommand{
|
||||
MaxVolumeId: value,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *MaxVolumeIdCommand) CommandName() string {
|
||||
return "MaxVolumeId"
|
||||
}
|
||||
|
||||
func (c *MaxVolumeIdCommand) Apply(server raft.Server) (interface{}, error) {
|
||||
topo := server.Context().(*Topology)
|
||||
before := topo.GetMaxVolumeId()
|
||||
topo.UpAdjustMaxVolumeId(c.MaxVolumeId)
|
||||
|
||||
glog.V(0).Infoln("max volume id", before, "==>", topo.GetMaxVolumeId())
|
||||
|
||||
return nil, nil
|
||||
}
|
@@ -5,6 +5,7 @@ import (
|
||||
"code.google.com/p/weed-fs/go/sequence"
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"errors"
|
||||
"github.com/goraft/raft"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
)
|
||||
@@ -12,8 +13,6 @@ import (
|
||||
type Topology struct {
|
||||
NodeImpl
|
||||
|
||||
IsLeader bool
|
||||
|
||||
collectionMap map[string]*Collection
|
||||
|
||||
pulse int64
|
||||
@@ -27,6 +26,8 @@ type Topology struct {
|
||||
chanFullVolumes chan storage.VolumeInfo
|
||||
|
||||
configuration *Configuration
|
||||
|
||||
RaftServer raft.Server
|
||||
}
|
||||
|
||||
func NewTopology(id string, confFile string, seq sequence.Sequencer, volumeSizeLimit uint64, pulse int) (*Topology, error) {
|
||||
@@ -50,6 +51,24 @@ func NewTopology(id string, confFile string, seq sequence.Sequencer, volumeSizeL
|
||||
return t, err
|
||||
}
|
||||
|
||||
func (t *Topology) IsLeader() bool {
|
||||
return t.RaftServer == nil || t.Leader() == t.RaftServer.Name()
|
||||
}
|
||||
|
||||
func (t *Topology) Leader() string {
|
||||
l := ""
|
||||
if t.RaftServer != nil {
|
||||
l = t.RaftServer.Leader()
|
||||
}
|
||||
|
||||
if l == "" {
|
||||
// We are a single node cluster, we are the leader
|
||||
return t.RaftServer.Name()
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (t *Topology) loadConfiguration(configurationFile string) error {
|
||||
b, e := ioutil.ReadFile(configurationFile)
|
||||
if e == nil {
|
||||
@@ -79,7 +98,9 @@ func (t *Topology) Lookup(collection string, vid storage.VolumeId) []*DataNode {
|
||||
|
||||
func (t *Topology) NextVolumeId() storage.VolumeId {
|
||||
vid := t.GetMaxVolumeId()
|
||||
return vid.Next()
|
||||
next := vid.Next()
|
||||
go t.RaftServer.Do(NewMaxVolumeIdCommand(next))
|
||||
return next
|
||||
}
|
||||
|
||||
func (t *Topology) PickForWrite(collectionName string, rp *storage.ReplicaPlacement, count int, dataCenter string) (string, int, *DataNode, error) {
|
||||
|
@@ -10,7 +10,7 @@ import (
|
||||
func (t *Topology) StartRefreshWritableVolumes(garbageThreshold string) {
|
||||
go func() {
|
||||
for {
|
||||
if t.IsLeader {
|
||||
if t.IsLeader() {
|
||||
freshThreshHold := time.Now().Unix() - 3*t.pulse //3 times of sleep interval
|
||||
t.CollectDeadNodeAndFullVolumes(freshThreshHold, t.volumeSizeLimit)
|
||||
}
|
||||
@@ -19,7 +19,7 @@ func (t *Topology) StartRefreshWritableVolumes(garbageThreshold string) {
|
||||
}()
|
||||
go func(garbageThreshold string) {
|
||||
c := time.Tick(15 * time.Minute)
|
||||
if t.IsLeader {
|
||||
if t.IsLeader() {
|
||||
for _ = range c {
|
||||
t.Vacuum(garbageThreshold)
|
||||
}
|
||||
|
Reference in New Issue
Block a user