mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-09-24 17:23:40 +08:00
go fmt
This commit is contained in:
@@ -67,7 +67,7 @@ func (mc *MemChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64)
|
|||||||
maxStop = max(maxStop, logicStop)
|
maxStop = max(maxStop, logicStop)
|
||||||
|
|
||||||
if t.TsNs >= tsNs {
|
if t.TsNs >= tsNs {
|
||||||
println("read new data1", t.TsNs - tsNs, "ns")
|
println("read new data1", t.TsNs-tsNs, "ns")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -137,7 +137,7 @@ func (sc *SwapFileChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop in
|
|||||||
maxStop = max(maxStop, logicStop)
|
maxStop = max(maxStop, logicStop)
|
||||||
|
|
||||||
if t.TsNs >= tsNs {
|
if t.TsNs >= tsNs {
|
||||||
println("read new data2", t.TsNs - tsNs, "ns")
|
println("read new data2", t.TsNs-tsNs, "ns")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -35,7 +35,6 @@ func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
t := topic.FromPbTopic(request.Topic)
|
t := topic.FromPbTopic(request.Topic)
|
||||||
var readErr, assignErr error
|
var readErr, assignErr error
|
||||||
resp, readErr = b.readTopicConfFromFiler(t)
|
resp, readErr = b.readTopicConfFromFiler(t)
|
||||||
|
@@ -85,7 +85,7 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
|
|||||||
lastAckTime := time.Now()
|
lastAckTime := time.Now()
|
||||||
for !isClosed {
|
for !isClosed {
|
||||||
receivedSequence = atomic.LoadInt64(&localTopicPartition.AckTsNs)
|
receivedSequence = atomic.LoadInt64(&localTopicPartition.AckTsNs)
|
||||||
if acknowledgedSequence < receivedSequence && (receivedSequence - acknowledgedSequence >= ackInterval || time.Since(lastAckTime) > 1*time.Second){
|
if acknowledgedSequence < receivedSequence && (receivedSequence-acknowledgedSequence >= ackInterval || time.Since(lastAckTime) > 1*time.Second) {
|
||||||
acknowledgedSequence = receivedSequence
|
acknowledgedSequence = receivedSequence
|
||||||
response := &mq_pb.PublishMessageResponse{
|
response := &mq_pb.PublishMessageResponse{
|
||||||
AckSequence: acknowledgedSequence,
|
AckSequence: acknowledgedSequence,
|
||||||
@@ -101,7 +101,6 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
||||||
// process each published messages
|
// process each published messages
|
||||||
clientName := fmt.Sprintf("%v-%4d/%s/%v", findClientAddress(stream.Context()), rand.Intn(10000), initMessage.Topic, initMessage.Partition)
|
clientName := fmt.Sprintf("%v-%4d/%s/%v", findClientAddress(stream.Context()), rand.Intn(10000), initMessage.Topic, initMessage.Partition)
|
||||||
localTopicPartition.Publishers.AddPublisher(clientName, topic.NewLocalPublisher())
|
localTopicPartition.Publishers.AddPublisher(clientName, topic.NewLocalPublisher())
|
||||||
|
@@ -17,6 +17,7 @@ type memBuffer struct {
|
|||||||
startTime time.Time
|
startTime time.Time
|
||||||
stopTime time.Time
|
stopTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_PublishFollowMeServer) (err error) {
|
func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_PublishFollowMeServer) (err error) {
|
||||||
var req *mq_pb.PublishFollowMeRequest
|
var req *mq_pb.PublishFollowMeRequest
|
||||||
req, err = stream.Recv()
|
req, err = stream.Recv()
|
||||||
@@ -84,7 +85,6 @@ func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_Publi
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition)
|
t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition)
|
||||||
|
|
||||||
logBuffer.ShutdownLogBuffer()
|
logBuffer.ShutdownLogBuffer()
|
||||||
@@ -97,7 +97,6 @@ func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_Publi
|
|||||||
partitionGeneration := time.Unix(0, p.UnixTimeNs).UTC().Format(topic.TIME_FORMAT)
|
partitionGeneration := time.Unix(0, p.UnixTimeNs).UTC().Format(topic.TIME_FORMAT)
|
||||||
partitionDir := fmt.Sprintf("%s/%s/%04d-%04d", topicDir, partitionGeneration, p.RangeStart, p.RangeStop)
|
partitionDir := fmt.Sprintf("%s/%s/%04d-%04d", topicDir, partitionGeneration, p.RangeStart, p.RangeStop)
|
||||||
|
|
||||||
|
|
||||||
// flush the remaining messages
|
// flush the remaining messages
|
||||||
inMemoryBuffers.CloseInput()
|
inMemoryBuffers.CloseInput()
|
||||||
for mem, found := inMemoryBuffers.Dequeue(); found; mem, found = inMemoryBuffers.Dequeue() {
|
for mem, found := inMemoryBuffers.Dequeue(); found; mem, found = inMemoryBuffers.Dequeue() {
|
||||||
|
@@ -45,7 +45,7 @@ func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, partition *mq_pb.Par
|
|||||||
b.accessLock.Lock()
|
b.accessLock.Lock()
|
||||||
defer b.accessLock.Unlock()
|
defer b.accessLock.Unlock()
|
||||||
p := topic.FromPbPartition(partition)
|
p := topic.FromPbPartition(partition)
|
||||||
if localPartition:=b.localTopicManager.GetLocalPartition(t, p); localPartition!=nil {
|
if localPartition := b.localTopicManager.GetLocalPartition(t, p); localPartition != nil {
|
||||||
localPartition.NotifyLogFlushed(logBuffer.LastFlushTsNs)
|
localPartition.NotifyLogFlushed(logBuffer.LastFlushTsNs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -10,7 +10,7 @@ type Schema struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewSchema(recordType *schema_pb.RecordType) (*Schema, error) {
|
func NewSchema(recordType *schema_pb.RecordType) (*Schema, error) {
|
||||||
fieldMap := make( map[string]*schema_pb.Field)
|
fieldMap := make(map[string]*schema_pb.Field)
|
||||||
for _, field := range recordType.Fields {
|
for _, field := range recordType.Fields {
|
||||||
fieldMap[field.Name] = field
|
fieldMap[field.Name] = field
|
||||||
}
|
}
|
||||||
|
@@ -31,7 +31,6 @@ func toParquetFieldType(fieldType *schema_pb.Type) (dataType parquet.Node, err e
|
|||||||
return nil, fmt.Errorf("unknown field type: %T", fieldType.Kind)
|
return nil, fmt.Errorf("unknown field type: %T", fieldType.Kind)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
return dataType, err
|
return dataType, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -47,7 +47,7 @@ func toRecordValue(recordType *schema_pb.RecordType, levels *ParquetLevels, valu
|
|||||||
func toListValue(listType *schema_pb.ListType, levels *ParquetLevels, values []parquet.Value, valueIndex int) (listValue *schema_pb.Value, endValueIndex int, err error) {
|
func toListValue(listType *schema_pb.ListType, levels *ParquetLevels, values []parquet.Value, valueIndex int) (listValue *schema_pb.Value, endValueIndex int, err error) {
|
||||||
listValues := make([]*schema_pb.Value, 0)
|
listValues := make([]*schema_pb.Value, 0)
|
||||||
var value *schema_pb.Value
|
var value *schema_pb.Value
|
||||||
for ;valueIndex < len(values); {
|
for valueIndex < len(values) {
|
||||||
if values[valueIndex].Column() != levels.startColumnIndex {
|
if values[valueIndex].Column() != levels.startColumnIndex {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -67,19 +67,19 @@ func toScalarValue(scalarType schema_pb.ScalarType, levels *ParquetLevels, value
|
|||||||
}
|
}
|
||||||
switch scalarType {
|
switch scalarType {
|
||||||
case schema_pb.ScalarType_BOOL:
|
case schema_pb.ScalarType_BOOL:
|
||||||
return &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: value.Boolean()}}, valueIndex+1, nil
|
return &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: value.Boolean()}}, valueIndex + 1, nil
|
||||||
case schema_pb.ScalarType_INT32:
|
case schema_pb.ScalarType_INT32:
|
||||||
return &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: value.Int32()}}, valueIndex+1, nil
|
return &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: value.Int32()}}, valueIndex + 1, nil
|
||||||
case schema_pb.ScalarType_INT64:
|
case schema_pb.ScalarType_INT64:
|
||||||
return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: value.Int64()}}, valueIndex+1, nil
|
return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: value.Int64()}}, valueIndex + 1, nil
|
||||||
case schema_pb.ScalarType_FLOAT:
|
case schema_pb.ScalarType_FLOAT:
|
||||||
return &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: value.Float()}}, valueIndex+1, nil
|
return &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: value.Float()}}, valueIndex + 1, nil
|
||||||
case schema_pb.ScalarType_DOUBLE:
|
case schema_pb.ScalarType_DOUBLE:
|
||||||
return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: value.Double()}}, valueIndex+1, nil
|
return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: value.Double()}}, valueIndex + 1, nil
|
||||||
case schema_pb.ScalarType_BYTES:
|
case schema_pb.ScalarType_BYTES:
|
||||||
return &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: value.ByteArray()}}, valueIndex+1, nil
|
return &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: value.ByteArray()}}, valueIndex + 1, nil
|
||||||
case schema_pb.ScalarType_STRING:
|
case schema_pb.ScalarType_STRING:
|
||||||
return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: string(value.ByteArray())}}, valueIndex+1, nil
|
return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: string(value.ByteArray())}}, valueIndex + 1, nil
|
||||||
}
|
}
|
||||||
return nil, valueIndex, fmt.Errorf("unsupported scalar type: %v", scalarType)
|
return nil, valueIndex, fmt.Errorf("unsupported scalar type: %v", scalarType)
|
||||||
}
|
}
|
||||||
|
@@ -77,7 +77,8 @@ func NewDefaultVolumeGrowth() *VolumeGrowth {
|
|||||||
// given copyCount, how many logical volumes to create
|
// given copyCount, how many logical volumes to create
|
||||||
func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
|
func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
|
||||||
switch copyCount {
|
switch copyCount {
|
||||||
case 1: count = VolumeGrowStrategy.Copy1Count
|
case 1:
|
||||||
|
count = VolumeGrowStrategy.Copy1Count
|
||||||
case 2:
|
case 2:
|
||||||
count = VolumeGrowStrategy.Copy2Count
|
count = VolumeGrowStrategy.Copy2Count
|
||||||
case 3:
|
case 3:
|
||||||
|
@@ -381,7 +381,7 @@ func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) (total, a
|
|||||||
}
|
}
|
||||||
active++
|
active++
|
||||||
info, _ := dn.GetVolumesById(v)
|
info, _ := dn.GetVolumesById(v)
|
||||||
if float64(info.Size) > float64(vl.volumeSizeLimit)* VolumeGrowStrategy.Threshold{
|
if float64(info.Size) > float64(vl.volumeSizeLimit)*VolumeGrowStrategy.Threshold {
|
||||||
crowded++
|
crowded++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -19,7 +19,7 @@ func TestNewLogBufferFirstBuffer(t *testing.T) {
|
|||||||
}, nil, func() {
|
}, nil, func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
startTime := MessagePosition{Time:time.Now()}
|
startTime := MessagePosition{Time: time.Now()}
|
||||||
|
|
||||||
messageSize := 1024
|
messageSize := 1024
|
||||||
messageCount := 5000
|
messageCount := 5000
|
||||||
@@ -38,7 +38,7 @@ func TestNewLogBufferFirstBuffer(t *testing.T) {
|
|||||||
println("processed all messages")
|
println("processed all messages")
|
||||||
return true, io.EOF
|
return true, io.EOF
|
||||||
}
|
}
|
||||||
return false,nil
|
return false, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
fmt.Printf("before flush: sent %d received %d\n", messageCount, receivedMessageCount)
|
fmt.Printf("before flush: sent %d received %d\n", messageCount, receivedMessageCount)
|
||||||
|
Reference in New Issue
Block a user