mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-12-21 11:00:08 +08:00
Squashed commit of the following:
commit4827425146Author: chrislu <chris.lu@gmail.com> Date: Sat Sep 16 15:05:38 2023 -0700 balancer works commit3b50139f68Author: chrislu <chris.lu@gmail.com> Date: Fri Sep 15 22:22:32 2023 -0700 comments commit7f685ce7baAuthor: chrislu <chris.lu@gmail.com> Date: Fri Sep 15 22:20:05 2023 -0700 adjust APIs commit436d99443bAuthor: chrislu <chris.lu@gmail.com> Date: Thu Sep 14 23:49:05 2023 -0700 receive broker stats commitb771fefa37Merge:0a851ec00890881037Author: chrislu <chris.lu@gmail.com> Date: Wed Sep 13 00:03:47 2023 -0700 Merge branch 'master' into sub commit0a851ec00bAuthor: chrislu <chris.lu@gmail.com> Date: Sun Sep 10 22:01:25 2023 -0700 Create balancer.go commit39941edc0bAuthor: chrislu <chris.lu@gmail.com> Date: Thu Sep 7 23:55:19 2023 -0700 add publisher shutdown commit875f562779Author: chrislu <chris.lu@gmail.com> Date: Wed Sep 6 23:16:41 2023 -0700 server side send response at least once per second commit984b6c54cfAuthor: chrislu <chris.lu@gmail.com> Date: Wed Sep 6 23:15:29 2023 -0700 ack interval 128 commit2492a45499Author: chrislu <chris.lu@gmail.com> Date: Wed Sep 6 22:39:46 2023 -0700 ack interval commitba67e6ca29Author: chrislu <chris.lu@gmail.com> Date: Mon Sep 4 21:43:50 2023 -0700 api for sub commit9e4f985698Author: chrislu <chris.lu@gmail.com> Date: Mon Sep 4 21:43:30 2023 -0700 publish, benchmark commitcb470d44dfAuthor: chrislu <chris.lu@gmail.com> Date: Fri Sep 1 00:36:51 2023 -0700 can pub and sub commit1eb2da46d5Author: chrislu <chris.lu@gmail.com> Date: Mon Aug 28 09:02:12 2023 -0700 connect and publish commit504ae8383aAuthor: chrislu <chris.lu@gmail.com> Date: Mon Aug 28 09:01:25 2023 -0700 protoc version commitdbcba75271Author: chrislu <chris.lu@gmail.com> Date: Sun Aug 27 18:59:04 2023 -0700 rename to lookup commitc9caf33119Author: chrislu <chris.lu@gmail.com> Date: Sun Aug 27 18:33:46 2023 -0700 move functions commit4d6c18d86fAuthor: chrislu <chris.lu@gmail.com> Date: Sun Aug 27 17:50:59 2023 -0700 pub sub initial tests commit4eb8e8624dAuthor: chrislu <chris.lu@gmail.com> Date: Sun Aug 27 13:14:39 2023 -0700 rename commit1990456670Author: chrislu <chris.lu@gmail.com> Date: Sun Aug 27 13:13:14 2023 -0700 sub commit905911853dAuthor: chrislu <chris.lu@gmail.com> Date: Sat Aug 26 13:39:21 2023 -0700 adjust proto
This commit is contained in:
252
weed/mq/broker/broker_grpc_admin.go
Normal file
252
weed/mq/broker/broker_grpc_admin.go
Normal file
@@ -0,0 +1,252 @@
|
||||
package broker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/seaweedfs/seaweedfs/weed/cluster"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxPartitionCount = 1024
|
||||
)
|
||||
|
||||
func (broker *MessageQueueBroker) FindBrokerLeader(c context.Context, request *mq_pb.FindBrokerLeaderRequest) (*mq_pb.FindBrokerLeaderResponse, error) {
|
||||
ret := &mq_pb.FindBrokerLeaderResponse{}
|
||||
err := broker.withMasterClient(false, broker.MasterClient.GetMaster(), func(client master_pb.SeaweedClient) error {
|
||||
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
|
||||
ClientType: cluster.BrokerType,
|
||||
FilerGroup: request.FilerGroup,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(resp.ClusterNodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
ret.Broker = resp.ClusterNodes[0].Address
|
||||
return nil
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (broker *MessageQueueBroker) AssignSegmentBrokers(c context.Context, request *mq_pb.AssignSegmentBrokersRequest) (*mq_pb.AssignSegmentBrokersResponse, error) {
|
||||
ret := &mq_pb.AssignSegmentBrokersResponse{}
|
||||
segment := topic.FromPbSegment(request.Segment)
|
||||
|
||||
// check existing segment locations on filer
|
||||
existingBrokers, err := broker.checkSegmentOnFiler(segment)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
if len(existingBrokers) > 0 {
|
||||
// good if the segment is still on the brokers
|
||||
isActive, err := broker.checkSegmentsOnBrokers(segment, existingBrokers)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
if isActive {
|
||||
for _, broker := range existingBrokers {
|
||||
ret.Brokers = append(ret.Brokers, string(broker))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
|
||||
// randomly pick up to 10 brokers, and find the ones with the lightest load
|
||||
selectedBrokers, err := broker.selectBrokers()
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// save the allocated brokers info for this segment on the filer
|
||||
if err := broker.saveSegmentBrokersOnFiler(segment, selectedBrokers); err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
for _, broker := range selectedBrokers {
|
||||
ret.Brokers = append(ret.Brokers, string(broker))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (broker *MessageQueueBroker) CheckSegmentStatus(c context.Context, request *mq_pb.CheckSegmentStatusRequest) (*mq_pb.CheckSegmentStatusResponse, error) {
|
||||
ret := &mq_pb.CheckSegmentStatusResponse{}
|
||||
// TODO add in memory active segment
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (broker *MessageQueueBroker) CheckBrokerLoad(c context.Context, request *mq_pb.CheckBrokerLoadRequest) (*mq_pb.CheckBrokerLoadResponse, error) {
|
||||
ret := &mq_pb.CheckBrokerLoadResponse{}
|
||||
// TODO read broker's load
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// createOrUpdateTopicPartitions creates the topic partitions on the broker
|
||||
// 1. check
|
||||
func (broker *MessageQueueBroker) createOrUpdateTopicPartitions(topic *topic.Topic, prevAssignments []*mq_pb.BrokerPartitionAssignment) (err error) {
|
||||
// create or update each partition
|
||||
if prevAssignments == nil {
|
||||
broker.createOrUpdateTopicPartition(topic, nil)
|
||||
} else {
|
||||
for _, brokerPartitionAssignment := range prevAssignments {
|
||||
broker.createOrUpdateTopicPartition(topic, brokerPartitionAssignment)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (broker *MessageQueueBroker) createOrUpdateTopicPartition(topic *topic.Topic, oldAssignment *mq_pb.BrokerPartitionAssignment) (newAssignment *mq_pb.BrokerPartitionAssignment) {
|
||||
shouldCreate := broker.confirmBrokerPartitionAssignment(topic, oldAssignment)
|
||||
if !shouldCreate {
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
func (broker *MessageQueueBroker) confirmBrokerPartitionAssignment(topic *topic.Topic, oldAssignment *mq_pb.BrokerPartitionAssignment) (shouldCreate bool) {
|
||||
if oldAssignment == nil {
|
||||
return true
|
||||
}
|
||||
for _, b := range oldAssignment.FollowerBrokers {
|
||||
pb.WithBrokerGrpcClient(false, b, broker.grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error {
|
||||
_, err := client.CheckTopicPartitionsStatus(context.Background(), &mq_pb.CheckTopicPartitionsStatusRequest{
|
||||
Namespace: string(topic.Namespace),
|
||||
Topic: topic.Name,
|
||||
BrokerPartitionAssignment: oldAssignment,
|
||||
ShouldCancelIfNotMatch: true,
|
||||
})
|
||||
if err != nil {
|
||||
shouldCreate = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (broker *MessageQueueBroker) checkSegmentsOnBrokers(segment *topic.Segment, brokers []pb.ServerAddress) (active bool, err error) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, candidate := range brokers {
|
||||
wg.Add(1)
|
||||
go func(candidate pb.ServerAddress) {
|
||||
defer wg.Done()
|
||||
broker.withBrokerClient(false, candidate, func(client mq_pb.SeaweedMessagingClient) error {
|
||||
resp, checkErr := client.CheckSegmentStatus(context.Background(), &mq_pb.CheckSegmentStatusRequest{
|
||||
Segment: &mq_pb.Segment{
|
||||
Namespace: string(segment.Topic.Namespace),
|
||||
Topic: segment.Topic.Name,
|
||||
Id: segment.Id,
|
||||
},
|
||||
})
|
||||
if checkErr != nil {
|
||||
err = checkErr
|
||||
glog.V(0).Infof("check segment status on %s: %v", candidate, checkErr)
|
||||
return nil
|
||||
}
|
||||
if resp.IsActive == false {
|
||||
active = false
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(candidate)
|
||||
}
|
||||
wg.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
func (broker *MessageQueueBroker) selectBrokers() (brokers []pb.ServerAddress, err error) {
|
||||
candidates, err := broker.selectCandidatesFromMaster(10)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
brokers, err = broker.pickLightestCandidates(candidates, 3)
|
||||
return
|
||||
}
|
||||
|
||||
func (broker *MessageQueueBroker) selectCandidatesFromMaster(limit int32) (candidates []pb.ServerAddress, err error) {
|
||||
err = broker.withMasterClient(false, broker.MasterClient.GetMaster(), func(client master_pb.SeaweedClient) error {
|
||||
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
|
||||
ClientType: cluster.BrokerType,
|
||||
FilerGroup: broker.option.FilerGroup,
|
||||
Limit: limit,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(resp.ClusterNodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, node := range resp.ClusterNodes {
|
||||
candidates = append(candidates, pb.ServerAddress(node.Address))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
type CandidateStatus struct {
|
||||
address pb.ServerAddress
|
||||
messageCount int64
|
||||
bytesCount int64
|
||||
load int64
|
||||
}
|
||||
|
||||
func (broker *MessageQueueBroker) pickLightestCandidates(candidates []pb.ServerAddress, limit int) (selected []pb.ServerAddress, err error) {
|
||||
|
||||
if len(candidates) <= limit {
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
candidateStatuses, err := broker.checkBrokerStatus(candidates)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Slice(candidateStatuses, func(i, j int) bool {
|
||||
return candidateStatuses[i].load < candidateStatuses[j].load
|
||||
})
|
||||
|
||||
for i, candidate := range candidateStatuses {
|
||||
if i >= limit {
|
||||
break
|
||||
}
|
||||
selected = append(selected, candidate.address)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (broker *MessageQueueBroker) checkBrokerStatus(candidates []pb.ServerAddress) (candidateStatuses []*CandidateStatus, err error) {
|
||||
|
||||
candidateStatuses = make([]*CandidateStatus, len(candidates))
|
||||
var wg sync.WaitGroup
|
||||
for i, candidate := range candidates {
|
||||
wg.Add(1)
|
||||
go func(i int, candidate pb.ServerAddress) {
|
||||
defer wg.Done()
|
||||
err = broker.withBrokerClient(false, candidate, func(client mq_pb.SeaweedMessagingClient) error {
|
||||
resp, checkErr := client.CheckBrokerLoad(context.Background(), &mq_pb.CheckBrokerLoadRequest{})
|
||||
if checkErr != nil {
|
||||
err = checkErr
|
||||
return err
|
||||
}
|
||||
candidateStatuses[i] = &CandidateStatus{
|
||||
address: candidate,
|
||||
messageCount: resp.MessageCount,
|
||||
bytesCount: resp.BytesCount,
|
||||
load: resp.MessageCount + resp.BytesCount/(64*1024),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(i, candidate)
|
||||
}
|
||||
wg.Wait()
|
||||
return
|
||||
}
|
||||
Reference in New Issue
Block a user