mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-10-15 20:06:19 +08:00
go fmt
This commit is contained in:
@@ -44,7 +44,7 @@ func main() {
|
||||
subscriber := sub_client.NewTopicSubscriber(brokers, subscriberConfig, contentConfig, processorConfig)
|
||||
|
||||
counter := 0
|
||||
subscriber.SetEachMessageFunc(func(key, value []byte) (error) {
|
||||
subscriber.SetEachMessageFunc(func(key, value []byte) error {
|
||||
counter++
|
||||
println(string(key), "=>", string(value), counter)
|
||||
return nil
|
||||
|
@@ -63,7 +63,7 @@ func main() {
|
||||
}
|
||||
|
||||
processorConfig := sub_client.ProcessorConfiguration{
|
||||
MaxPartitionCount: 3,
|
||||
MaxPartitionCount: 3,
|
||||
PerPartitionConcurrency: 1,
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func main() {
|
||||
subscriber := sub_client.NewTopicSubscriber(brokers, subscriberConfig, contentConfig, processorConfig)
|
||||
|
||||
counter := 0
|
||||
subscriber.SetEachMessageFunc(func(key, value []byte) (error) {
|
||||
subscriber.SetEachMessageFunc(func(key, value []byte) error {
|
||||
counter++
|
||||
record := &schema_pb.RecordValue{}
|
||||
proto.Unmarshal(value, record)
|
||||
|
@@ -142,11 +142,11 @@ func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) erro
|
||||
if err = publishClient.Send(&mq_pb.PublishMessageRequest{
|
||||
Message: &mq_pb.PublishMessageRequest_Init{
|
||||
Init: &mq_pb.PublishMessageRequest_InitMessage{
|
||||
Topic: p.config.Topic.ToPbTopic(),
|
||||
Partition: job.Partition,
|
||||
AckInterval: 128,
|
||||
FollowerBroker: job.FollowerBroker,
|
||||
PublisherName: p.config.PublisherName,
|
||||
Topic: p.config.Topic.ToPbTopic(),
|
||||
Partition: job.Partition,
|
||||
AckInterval: 128,
|
||||
FollowerBroker: job.FollowerBroker,
|
||||
PublisherName: p.config.PublisherName,
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
|
@@ -51,7 +51,7 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() {
|
||||
ConsumerGroup: sub.SubscriberConfig.ConsumerGroup,
|
||||
ConsumerGroupInstanceId: sub.SubscriberConfig.ConsumerGroupInstanceId,
|
||||
Topic: sub.ContentConfig.Topic.ToPbTopic(),
|
||||
MaxPartitionCount: sub.ProcessorConfig.MaxPartitionCount,
|
||||
MaxPartitionCount: sub.ProcessorConfig.MaxPartitionCount,
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
@@ -105,12 +105,12 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
|
||||
Partition: assigned.Partition,
|
||||
StartType: mq_pb.PartitionOffsetStartType_EARLIEST_IN_MEMORY,
|
||||
},
|
||||
Filter: sub.ContentConfig.Filter,
|
||||
Filter: sub.ContentConfig.Filter,
|
||||
FollowerBroker: assigned.FollowerBroker,
|
||||
Concurrency: sub.ProcessorConfig.PerPartitionConcurrency,
|
||||
Concurrency: sub.ProcessorConfig.PerPartitionConcurrency,
|
||||
},
|
||||
},
|
||||
});err != nil {
|
||||
}); err != nil {
|
||||
glog.V(0).Infof("subscriber %s connected to partition %+v at %v: %v", sub.ContentConfig.Topic, assigned.Partition, assigned.LeaderBroker, err)
|
||||
}
|
||||
|
||||
@@ -120,16 +120,16 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
|
||||
defer sub.OnCompletionFunc()
|
||||
}
|
||||
|
||||
partitionOffsetChan:= make(chan int64, 1024)
|
||||
partitionOffsetChan := make(chan int64, 1024)
|
||||
defer func() {
|
||||
close(partitionOffsetChan)
|
||||
}()
|
||||
|
||||
concurrentPartitionLimit := int(sub.ProcessorConfig.MaxPartitionCount)
|
||||
if concurrentPartitionLimit <= 0 {
|
||||
concurrentPartitionLimit = 1
|
||||
perPartitionConcurrency := int(sub.ProcessorConfig.PerPartitionConcurrency)
|
||||
if perPartitionConcurrency <= 0 {
|
||||
perPartitionConcurrency = 1
|
||||
}
|
||||
executors := util.NewLimitedConcurrentExecutor(concurrentPartitionLimit)
|
||||
executors := util.NewLimitedConcurrentExecutor(perPartitionConcurrency)
|
||||
|
||||
go func() {
|
||||
for ack := range partitionOffsetChan {
|
||||
@@ -162,7 +162,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
|
||||
processErr := sub.OnEachMessageFunc(m.Data.Key, m.Data.Value)
|
||||
if processErr == nil {
|
||||
partitionOffsetChan <- m.Data.TsNs
|
||||
}else{
|
||||
} else {
|
||||
lastErr = processErr
|
||||
}
|
||||
})
|
||||
|
@@ -9,7 +9,6 @@ import (
|
||||
)
|
||||
|
||||
type ProcessorState struct {
|
||||
|
||||
}
|
||||
|
||||
// Subscribe subscribes to a topic's specified partitions.
|
||||
|
@@ -22,7 +22,7 @@ type ContentConfiguration struct {
|
||||
}
|
||||
|
||||
type ProcessorConfiguration struct {
|
||||
MaxPartitionCount int32 // how many partitions to process concurrently
|
||||
MaxPartitionCount int32 // how many partitions to process concurrently
|
||||
PerPartitionConcurrency int32 // how many messages to process concurrently per partition
|
||||
}
|
||||
|
||||
@@ -30,16 +30,16 @@ type OnEachMessageFunc func(key, value []byte) (err error)
|
||||
type OnCompletionFunc func()
|
||||
|
||||
type TopicSubscriber struct {
|
||||
SubscriberConfig *SubscriberConfiguration
|
||||
ContentConfig *ContentConfiguration
|
||||
SubscriberConfig *SubscriberConfiguration
|
||||
ContentConfig *ContentConfiguration
|
||||
ProcessorConfig *ProcessorConfiguration
|
||||
brokerPartitionAssignmentChan chan *mq_pb.BrokerPartitionAssignment
|
||||
OnEachMessageFunc OnEachMessageFunc
|
||||
OnCompletionFunc OnCompletionFunc
|
||||
bootstrapBrokers []string
|
||||
waitForMoreMessage bool
|
||||
activeProcessors map[topic.Partition]*ProcessorState
|
||||
activeProcessorsLock sync.Mutex
|
||||
OnCompletionFunc OnCompletionFunc
|
||||
bootstrapBrokers []string
|
||||
waitForMoreMessage bool
|
||||
activeProcessors map[topic.Partition]*ProcessorState
|
||||
activeProcessorsLock sync.Mutex
|
||||
}
|
||||
|
||||
func NewTopicSubscriber(bootstrapBrokers []string, subscriber *SubscriberConfiguration, content *ContentConfiguration, processor ProcessorConfiguration) *TopicSubscriber {
|
||||
|
Reference in New Issue
Block a user