toughen weedfs clustering, adding synchronizing max volume id among

peers in order to avoid the same volume id being assigned twice
1. moving raft.Server to topology
2. adding max volume id command for raft
This commit is contained in:
Chris Lu
2014-03-15 23:03:49 -07:00
parent fb75fe852c
commit 41143b3b78
9 changed files with 114 additions and 72 deletions

View File

@@ -25,11 +25,10 @@ type MasterServer struct {
whiteList []string
version string
topo *topology.Topology
Topo *topology.Topology
vg *replication.VolumeGrowth
vgLock sync.Mutex
raftServer *RaftServer
bounedLeaderChan chan int
}
@@ -52,7 +51,7 @@ func NewMasterServer(r *mux.Router, version string, port int, metaFolder string,
ms.bounedLeaderChan = make(chan int, 16)
seq := sequence.NewFileSequencer(path.Join(metaFolder, "weed.seq"))
var e error
if ms.topo, e = topology.NewTopology("topo", confFile, seq,
if ms.Topo, e = topology.NewTopology("topo", confFile, seq,
uint64(volumeSizeLimitMB)*1024*1024, pulseSeconds); e != nil {
glog.Fatalf("cannot create topology:%s", e)
}
@@ -70,42 +69,36 @@ func NewMasterServer(r *mux.Router, version string, port int, metaFolder string,
r.HandleFunc("/submit", secure(ms.whiteList, ms.submitFromMasterServerHandler))
r.HandleFunc("/{filekey}", ms.redirectHandler)
ms.topo.StartRefreshWritableVolumes(garbageThreshold)
ms.Topo.StartRefreshWritableVolumes(garbageThreshold)
return ms
}
func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) {
ms.raftServer = raftServer
ms.raftServer.raftServer.AddEventListener(raft.LeaderChangeEventType, func(e raft.Event) {
ms.topo.IsLeader = ms.IsLeader()
glog.V(0).Infoln("[", ms.raftServer.Name(), "]", ms.raftServer.Leader(), "becomes leader.")
ms.Topo.RaftServer = raftServer.raftServer
ms.Topo.RaftServer.AddEventListener(raft.LeaderChangeEventType, func(e raft.Event) {
glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "becomes leader.")
})
ms.topo.IsLeader = ms.IsLeader()
if ms.topo.IsLeader {
glog.V(0).Infoln("[", ms.raftServer.Name(), "]", "I am the leader!")
if ms.Topo.IsLeader() {
glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", "I am the leader!")
} else {
glog.V(0).Infoln("[", ms.raftServer.Name(), "]", ms.raftServer.Leader(), "is the leader.")
glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "is the leader.")
}
}
func (ms *MasterServer) IsLeader() bool {
return ms.raftServer == nil || ms.raftServer.IsLeader()
}
func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
if ms.IsLeader() {
if ms.Topo.IsLeader() {
f(w, r)
} else {
ms.bounedLeaderChan <- 1
defer func() { <-ms.bounedLeaderChan }()
targetUrl, err := url.Parse("http://" + ms.raftServer.Leader())
targetUrl, err := url.Parse("http://" + ms.Topo.RaftServer.Leader())
if err != nil {
writeJsonQuiet(w, r, map[string]interface{}{"error": "Leader URL Parse Error " + err.Error()})
return
}
glog.V(4).Infoln("proxying to leader", ms.raftServer.Leader())
glog.V(4).Infoln("proxying to leader", ms.Topo.RaftServer.Leader())
proxy := httputil.NewSingleHostReverseProxy(targetUrl)
proxy.Transport = util.Transport
proxy.ServeHTTP(w, r)

View File

@@ -19,7 +19,7 @@ func (ms *MasterServer) dirLookupHandler(w http.ResponseWriter, r *http.Request)
}
volumeId, err := storage.NewVolumeId(vid)
if err == nil {
machines := ms.topo.Lookup(collection, volumeId)
machines := ms.Topo.Lookup(collection, volumeId)
if machines != nil {
ret := []map[string]string{}
for _, dn := range machines {
@@ -54,23 +54,23 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
return
}
if ms.topo.GetVolumeLayout(collection, replicaPlacement).GetActiveVolumeCount(dataCenter) <= 0 {
if ms.topo.FreeSpace() <= 0 {
if ms.Topo.GetVolumeLayout(collection, replicaPlacement).GetActiveVolumeCount(dataCenter) <= 0 {
if ms.Topo.FreeSpace() <= 0 {
w.WriteHeader(http.StatusNotFound)
writeJsonQuiet(w, r, map[string]string{"error": "No free volumes left!"})
return
} else {
ms.vgLock.Lock()
defer ms.vgLock.Unlock()
if ms.topo.GetVolumeLayout(collection, replicaPlacement).GetActiveVolumeCount(dataCenter) <= 0 {
if _, err = ms.vg.AutomaticGrowByType(collection, replicaPlacement, dataCenter, ms.topo); err != nil {
if ms.Topo.GetVolumeLayout(collection, replicaPlacement).GetActiveVolumeCount(dataCenter) <= 0 {
if _, err = ms.vg.AutomaticGrowByType(collection, replicaPlacement, dataCenter, ms.Topo); err != nil {
writeJsonQuiet(w, r, map[string]string{"error": "Cannot grow volume group! " + err.Error()})
return
}
}
}
}
fid, count, dn, err := ms.topo.PickForWrite(collection, replicaPlacement, c, dataCenter)
fid, count, dn, err := ms.Topo.PickForWrite(collection, replicaPlacement, c, dataCenter)
if err == nil {
writeJsonQuiet(w, r, map[string]interface{}{"fid": fid, "url": dn.Url(), "publicUrl": dn.PublicUrl, "count": count})
} else {
@@ -80,7 +80,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
}
func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.Request) {
collection, ok := ms.topo.GetCollection(r.FormValue("collection"))
collection, ok := ms.Topo.GetCollection(r.FormValue("collection"))
if !ok {
writeJsonQuiet(w, r, map[string]interface{}{"error": "collection " + r.FormValue("collection") + "does not exist!"})
return
@@ -92,7 +92,7 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R
return
}
}
ms.topo.DeleteCollection(r.FormValue("collection"))
ms.Topo.DeleteCollection(r.FormValue("collection"))
}
func (ms *MasterServer) dirJoinHandler(w http.ResponseWriter, r *http.Request) {
@@ -111,7 +111,7 @@ func (ms *MasterServer) dirJoinHandler(w http.ResponseWriter, r *http.Request) {
return
}
debug(s, "volumes", r.FormValue("volumes"))
ms.topo.RegisterVolumes(init, *volumes, ip, port, publicUrl, maxVolumeCount, r.FormValue("dataCenter"), r.FormValue("rack"))
ms.Topo.RegisterVolumes(init, *volumes, ip, port, publicUrl, maxVolumeCount, r.FormValue("dataCenter"), r.FormValue("rack"))
m := make(map[string]interface{})
m["VolumeSizeLimit"] = uint64(ms.volumeSizeLimitMB) * 1024 * 1024
writeJsonQuiet(w, r, m)
@@ -120,7 +120,7 @@ func (ms *MasterServer) dirJoinHandler(w http.ResponseWriter, r *http.Request) {
func (ms *MasterServer) dirStatusHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
m["Version"] = ms.version
m["Topology"] = ms.topo.ToMap()
m["Topology"] = ms.Topo.ToMap()
writeJsonQuiet(w, r, m)
}
@@ -130,7 +130,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque
gcThreshold = ms.garbageThreshold
}
debug("garbageThreshold =", gcThreshold)
ms.topo.Vacuum(gcThreshold)
ms.Topo.Vacuum(gcThreshold)
ms.dirStatusHandler(w, r)
}
@@ -139,10 +139,10 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request
replicaPlacement, err := storage.NewReplicaPlacementFromString(r.FormValue("replication"))
if err == nil {
if count, err = strconv.Atoi(r.FormValue("count")); err == nil {
if ms.topo.FreeSpace() < count*replicaPlacement.GetCopyCount() {
err = errors.New("Only " + strconv.Itoa(ms.topo.FreeSpace()) + " volumes left! Not enough for " + strconv.Itoa(count*replicaPlacement.GetCopyCount()))
if ms.Topo.FreeSpace() < count*replicaPlacement.GetCopyCount() {
err = errors.New("Only " + strconv.Itoa(ms.Topo.FreeSpace()) + " volumes left! Not enough for " + strconv.Itoa(count*replicaPlacement.GetCopyCount()))
} else {
count, err = ms.vg.GrowByCountAndType(count, r.FormValue("collection"), replicaPlacement, r.FormValue("dataCenter"), ms.topo)
count, err = ms.vg.GrowByCountAndType(count, r.FormValue("collection"), replicaPlacement, r.FormValue("dataCenter"), ms.Topo)
}
} else {
err = errors.New("parameter count is not found")
@@ -160,7 +160,7 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request
func (ms *MasterServer) volumeStatusHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
m["Version"] = ms.version
m["Volumes"] = ms.topo.ToVolumeMap()
m["Volumes"] = ms.Topo.ToVolumeMap()
writeJsonQuiet(w, r, m)
}
@@ -171,7 +171,7 @@ func (ms *MasterServer) redirectHandler(w http.ResponseWriter, r *http.Request)
debug("parsing error:", err, r.URL.Path)
return
}
machines := ms.topo.Lookup("", volumeId)
machines := ms.Topo.Lookup("", volumeId)
if machines != nil && len(machines) > 0 {
http.Redirect(w, r, "http://"+machines[0].PublicUrl+r.URL.Path, http.StatusMovedPermanently)
} else {
@@ -181,9 +181,9 @@ func (ms *MasterServer) redirectHandler(w http.ResponseWriter, r *http.Request)
}
func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) {
if ms.IsLeader() {
if ms.Topo.IsLeader() {
submitForClientHandler(w, r, "localhost:"+strconv.Itoa(ms.port))
} else {
submitForClientHandler(w, r, ms.raftServer.Leader())
submitForClientHandler(w, r, ms.Topo.RaftServer.Leader())
}
}

View File

@@ -3,6 +3,7 @@ package weed_server
import (
"bytes"
"code.google.com/p/weed-fs/go/glog"
"code.google.com/p/weed-fs/go/topology"
"encoding/json"
"errors"
"fmt"
@@ -22,31 +23,35 @@ type RaftServer struct {
httpAddr string
version string
router *mux.Router
topo *topology.Topology
}
func NewRaftServer(r *mux.Router, version string, peers []string, httpAddr string, dataDir string) *RaftServer {
func NewRaftServer(r *mux.Router, version string, peers []string, httpAddr string, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer {
s := &RaftServer{
version: version,
peers: peers,
httpAddr: httpAddr,
dataDir: dataDir,
router: r,
topo: topo,
}
if glog.V(4) {
raft.SetLogLevel(2)
}
raft.RegisterCommand(&topology.MaxVolumeIdCommand{})
var err error
transporter := raft.NewHTTPTransporter("/cluster")
s.raftServer, err = raft.NewServer(s.httpAddr, s.dataDir, transporter, nil, nil, "")
s.raftServer, err = raft.NewServer(s.httpAddr, s.dataDir, transporter, nil, topo, "")
if err != nil {
glog.V(0).Infoln(err)
return nil
}
transporter.Install(s.raftServer, s)
s.raftServer.SetHeartbeatInterval(1 * time.Second)
s.raftServer.SetElectionTimeout(1500 * time.Millisecond)
s.raftServer.SetElectionTimeout(time.Duration(pulseSeconds) * 1150 * time.Millisecond)
s.raftServer.Start()
s.router.HandleFunc("/cluster/join", s.joinHandler).Methods("POST")
@@ -86,25 +91,6 @@ func NewRaftServer(r *mux.Router, version string, peers []string, httpAddr strin
return s
}
func (s *RaftServer) Name() string {
return s.raftServer.Name()
}
func (s *RaftServer) IsLeader() bool {
return s.Leader() == s.raftServer.Name()
}
func (s *RaftServer) Leader() string {
l := s.raftServer.Leader()
if l == "" {
// We are a single node cluster, we are the leader
return s.raftServer.Name()
}
return l
}
func (s *RaftServer) Peers() (members []string) {
peers := s.raftServer.Peers()

View File

@@ -40,10 +40,10 @@ func (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter
}
func (s *RaftServer) redirectToLeader(w http.ResponseWriter, req *http.Request) {
if s.Leader() != "" {
if s.topo.Leader() != "" {
//http.StatusMovedPermanently does not cause http POST following redirection
glog.V(0).Infoln("Redirecting to", http.StatusMovedPermanently, "http://"+s.Leader()+req.URL.Path)
http.Redirect(w, req, "http://"+s.Leader()+req.URL.Path, http.StatusMovedPermanently)
glog.V(0).Infoln("Redirecting to", http.StatusMovedPermanently, "http://"+s.topo.Leader()+req.URL.Path)
http.Redirect(w, req, "http://"+s.topo.Leader()+req.URL.Path, http.StatusMovedPermanently)
} else {
glog.V(0).Infoln("Error: Leader Unknown")
http.Error(w, "Leader unknown", http.StatusInternalServerError)
@@ -52,8 +52,8 @@ func (s *RaftServer) redirectToLeader(w http.ResponseWriter, req *http.Request)
func (s *RaftServer) statusHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
m["IsLeader"] = s.IsLeader()
m["Leader"] = s.Leader()
m["IsLeader"] = s.topo.IsLeader()
m["Leader"] = s.topo.Leader()
m["Peers"] = s.Peers()
writeJsonQuiet(w, r, m)
}