mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-11-24 08:46:54 +08:00
coordinator receives unassignment ack
This commit is contained in:
@@ -38,11 +38,16 @@ func (b *MessageQueueBroker) SubscriberToSubCoordinator(stream mq_pb.SeaweedMess
|
||||
go func() {
|
||||
// process ack messages
|
||||
for {
|
||||
_, err := stream.Recv()
|
||||
req, err := stream.Recv()
|
||||
if err != nil {
|
||||
glog.V(0).Infof("subscriber %s/%s/%s receive: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err)
|
||||
}
|
||||
|
||||
if ackUnAssignment := req.GetAckUnAssignment(); ackUnAssignment != nil {
|
||||
glog.V(0).Infof("subscriber %s/%s/%s ack close of %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, ackUnAssignment)
|
||||
cgi.AckUnAssignment(ackUnAssignment)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err := ctx.Err()
|
||||
|
||||
@@ -56,6 +56,15 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
for reply := range sub.brokerPartitionAssignmentAckChan {
|
||||
if err := stream.Send(reply); err != nil {
|
||||
glog.V(0).Infof("subscriber %s reply: %v", sub.ContentConfig.Topic, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// keep receiving messages from the sub coordinator
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
|
||||
@@ -65,6 +65,13 @@ func (sub *TopicSubscriber) startProcessors() {
|
||||
} else {
|
||||
glog.V(0).Infof("subscriber %s/%s partition %+v at %v completed", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker)
|
||||
}
|
||||
sub.brokerPartitionAssignmentAckChan <- &mq_pb.SubscriberToSubCoordinatorRequest{
|
||||
Message: &mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignment{
|
||||
AckUnAssignment: &mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage{
|
||||
Partition: assigned.Partition,
|
||||
},
|
||||
},
|
||||
}
|
||||
}(assigned.PartitionAssignment, topicPartition)
|
||||
}
|
||||
if unAssignment := message.GetUnAssignment(); unAssignment != nil {
|
||||
|
||||
@@ -27,25 +27,27 @@ type OnEachMessageFunc func(key, value []byte) (err error)
|
||||
type OnCompletionFunc func()
|
||||
|
||||
type TopicSubscriber struct {
|
||||
SubscriberConfig *SubscriberConfiguration
|
||||
ContentConfig *ContentConfiguration
|
||||
brokerPartitionAssignmentChan chan *mq_pb.SubscriberToSubCoordinatorResponse
|
||||
OnEachMessageFunc OnEachMessageFunc
|
||||
OnCompletionFunc OnCompletionFunc
|
||||
bootstrapBrokers []string
|
||||
waitForMoreMessage bool
|
||||
activeProcessors map[topic.Partition]*ProcessorState
|
||||
activeProcessorsLock sync.Mutex
|
||||
SubscriberConfig *SubscriberConfiguration
|
||||
ContentConfig *ContentConfiguration
|
||||
brokerPartitionAssignmentChan chan *mq_pb.SubscriberToSubCoordinatorResponse
|
||||
brokerPartitionAssignmentAckChan chan *mq_pb.SubscriberToSubCoordinatorRequest
|
||||
OnEachMessageFunc OnEachMessageFunc
|
||||
OnCompletionFunc OnCompletionFunc
|
||||
bootstrapBrokers []string
|
||||
waitForMoreMessage bool
|
||||
activeProcessors map[topic.Partition]*ProcessorState
|
||||
activeProcessorsLock sync.Mutex
|
||||
}
|
||||
|
||||
func NewTopicSubscriber(bootstrapBrokers []string, subscriber *SubscriberConfiguration, content *ContentConfiguration) *TopicSubscriber {
|
||||
return &TopicSubscriber{
|
||||
SubscriberConfig: subscriber,
|
||||
ContentConfig: content,
|
||||
brokerPartitionAssignmentChan: make(chan *mq_pb.SubscriberToSubCoordinatorResponse, 1024),
|
||||
bootstrapBrokers: bootstrapBrokers,
|
||||
waitForMoreMessage: true,
|
||||
activeProcessors: make(map[topic.Partition]*ProcessorState),
|
||||
SubscriberConfig: subscriber,
|
||||
ContentConfig: content,
|
||||
brokerPartitionAssignmentChan: make(chan *mq_pb.SubscriberToSubCoordinatorResponse, 1024),
|
||||
brokerPartitionAssignmentAckChan: make(chan *mq_pb.SubscriberToSubCoordinatorRequest, 1024),
|
||||
bootstrapBrokers: bootstrapBrokers,
|
||||
waitForMoreMessage: true,
|
||||
activeProcessors: make(map[topic.Partition]*ProcessorState),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package sub_coordinator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
cmap "github.com/orcaman/concurrent-map/v2"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
|
||||
@@ -16,6 +17,11 @@ type ConsumerGroupInstance struct {
|
||||
ResponseChan chan *mq_pb.SubscriberToSubCoordinatorResponse
|
||||
MaxPartitionCount int32
|
||||
}
|
||||
|
||||
func (i ConsumerGroupInstance) AckUnAssignment(assignment *mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage) {
|
||||
fmt.Printf("ack unassignment %v\n", assignment)
|
||||
}
|
||||
|
||||
type ConsumerGroup struct {
|
||||
topic topic.Topic
|
||||
// map a consumer group instance id to a consumer group instance
|
||||
|
||||
@@ -177,8 +177,12 @@ message SubscriberToSubCoordinatorRequest {
|
||||
// Default is 10 seconds.
|
||||
int32 rebalance_seconds = 5;
|
||||
}
|
||||
message AckUnAssignmentMessage {
|
||||
Partition partition = 1;
|
||||
}
|
||||
oneof message {
|
||||
InitMessage init = 1;
|
||||
AckUnAssignmentMessage ack_un_assignment = 2;
|
||||
}
|
||||
}
|
||||
message SubscriberToSubCoordinatorResponse {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user