mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-10-21 18:37:24 +08:00
Add optional admin port to volume server, to seperate admin operations from normal file operations.
This commit is contained in:
@@ -19,7 +19,7 @@ func AllocateVolume(dn *DataNode, vid storage.VolumeId, option *VolumeGrowOption
|
||||
values.Add("collection", option.Collection)
|
||||
values.Add("replication", option.ReplicaPlacement.String())
|
||||
values.Add("ttl", option.Ttl.String())
|
||||
jsonBlob, err := util.Post("http://"+dn.PublicUrl+"/admin/assign_volume", values)
|
||||
jsonBlob, err := util.Post("http://"+dn.AdminUrl()+"/admin/assign_volume", values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -13,6 +13,7 @@ type DataNode struct {
|
||||
volumes map[storage.VolumeId]storage.VolumeInfo
|
||||
Ip string
|
||||
Port int
|
||||
AdminPort int
|
||||
PublicUrl string
|
||||
LastSeen int64 // unix time in seconds
|
||||
Dead bool
|
||||
@@ -28,7 +29,7 @@ func NewDataNode(id string) *DataNode {
|
||||
}
|
||||
|
||||
func (dn *DataNode) String() string {
|
||||
return fmt.Sprintf("NodeImpl:%s ,volumes:%v, Ip:%s, Port:%d, PublicUrl:%s, Dead:%v", dn.NodeImpl.String(), dn.volumes, dn.Ip, dn.Port, dn.PublicUrl, dn.Dead)
|
||||
return fmt.Sprintf("Node:%s, volumes:%v, Ip:%s, Port:%d, PublicUrl:%s, Dead:%v", dn.NodeImpl.String(), dn.volumes, dn.Ip, dn.Port, dn.PublicUrl, dn.Dead)
|
||||
}
|
||||
|
||||
func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) {
|
||||
@@ -89,6 +90,10 @@ func (dn *DataNode) Url() string {
|
||||
return dn.Ip + ":" + strconv.Itoa(dn.Port)
|
||||
}
|
||||
|
||||
func (dn *DataNode) AdminUrl() string {
|
||||
return dn.Ip + ":" + strconv.Itoa(dn.AdminPort)
|
||||
}
|
||||
|
||||
func (dn *DataNode) ToMap() interface{} {
|
||||
ret := make(map[string]interface{})
|
||||
ret["Url"] = dn.Url()
|
||||
|
@@ -27,7 +27,7 @@ func (r *Rack) FindDataNode(ip string, port int) *DataNode {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int) *DataNode {
|
||||
func (r *Rack) GetOrCreateDataNode(ip string, port, adminPort int, publicUrl string, maxVolumeCount int) *DataNode {
|
||||
for _, c := range r.Children() {
|
||||
dn := c.(*DataNode)
|
||||
if dn.MatchLocation(ip, port) {
|
||||
@@ -43,6 +43,7 @@ func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVol
|
||||
dn := NewDataNode(ip + ":" + strconv.Itoa(port))
|
||||
dn.Ip = ip
|
||||
dn.Port = port
|
||||
dn.AdminPort = adminPort
|
||||
dn.PublicUrl = publicUrl
|
||||
dn.maxVolumeCount = maxVolumeCount
|
||||
dn.LastSeen = time.Now().Unix()
|
||||
|
@@ -157,7 +157,13 @@ func (t *Topology) ProcessJoinMessage(joinMessage *operation.JoinMessage) {
|
||||
if *joinMessage.IsInit && dn != nil {
|
||||
t.UnRegisterDataNode(dn)
|
||||
}
|
||||
dn = rack.GetOrCreateDataNode(*joinMessage.Ip, int(*joinMessage.Port), *joinMessage.PublicUrl, int(*joinMessage.MaxVolumeCount))
|
||||
adminPort = *joinMessage.Port
|
||||
if joinMessage.AdminPort != nil {
|
||||
adminPort = *joinMessage.AdminPort
|
||||
}
|
||||
dn = rack.GetOrCreateDataNode(*joinMessage.Ip,
|
||||
int(*joinMessage.Port), int(adminPort), *joinMessage.PublicUrl,
|
||||
int(*joinMessage.MaxVolumeCount))
|
||||
var volumeInfos []storage.VolumeInfo
|
||||
for _, v := range joinMessage.Volumes {
|
||||
if vi, err := storage.NewVolumeInfo(v); err == nil {
|
||||
|
@@ -23,7 +23,7 @@ func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist
|
||||
//glog.V(0).Infoln(index, "Checked vacuuming", vid, "on", url, "needVacuum", ret)
|
||||
ch <- ret
|
||||
}
|
||||
}(index, dn.Url(), vid)
|
||||
}(index, dn.AdminUrl(), vid)
|
||||
}
|
||||
isCheckSuccess := true
|
||||
for _ = range locationlist.list {
|
||||
@@ -50,7 +50,7 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli
|
||||
glog.V(0).Infoln(index, "Complete vacuuming", vid, "on", url)
|
||||
ch <- true
|
||||
}
|
||||
}(index, dn.Url(), vid)
|
||||
}(index, dn.AdminUrl(), vid)
|
||||
}
|
||||
isVacuumSuccess := true
|
||||
for _ = range locationlist.list {
|
||||
@@ -66,12 +66,12 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli
|
||||
func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool {
|
||||
isCommitSuccess := true
|
||||
for _, dn := range locationlist.list {
|
||||
glog.V(0).Infoln("Start Commiting vacuum", vid, "on", dn.Url())
|
||||
if e := vacuumVolume_Commit(dn.Url(), vid); e != nil {
|
||||
glog.V(0).Infoln("Error when committing vacuum", vid, "on", dn.Url(), e)
|
||||
glog.V(0).Infoln("Start Commiting vacuum", vid, "on", dn.AdminUrl())
|
||||
if e := vacuumVolume_Commit(dn.AdminUrl(), vid); e != nil {
|
||||
glog.V(0).Infoln("Error when committing vacuum", vid, "on", dn.AdminUrl(), e)
|
||||
isCommitSuccess = false
|
||||
} else {
|
||||
glog.V(0).Infoln("Complete Commiting vacuum", vid, "on", dn.Url())
|
||||
glog.V(0).Infoln("Complete Commiting vacuum", vid, "on", dn.AdminUrl())
|
||||
}
|
||||
if isCommitSuccess {
|
||||
vl.SetVolumeAvailable(dn, vid)
|
||||
|
@@ -201,7 +201,7 @@ func (vg *VolumeGrowth) grow(topo *Topology, vid storage.VolumeId, option *Volum
|
||||
}
|
||||
server.AddOrUpdateVolume(vi)
|
||||
topo.RegisterVolumeLayout(vi, server)
|
||||
glog.V(0).Infoln("Created Volume", vid, "on", server)
|
||||
glog.V(0).Infoln("Created Volume", vid, "on", server.NodeImpl.String())
|
||||
} else {
|
||||
glog.V(0).Infoln("Failed to assign", vid, "to", servers, "error", err)
|
||||
return fmt.Errorf("Failed to assign %d: %v", vid, err)
|
||||
|
Reference in New Issue
Block a user