Admin UI: Add message queue to admin UI (#6958)

* add a menu item "Message Queue"

* add a menu item "Message Queue"
  * move the "brokers" link under it.
  * add "topics", "subscribers". Add pages for them.

* refactor

* show topic details

* admin display publisher and subscriber info

* remove publisher and subscribers from the topic row pull down

* collecting more stats from publishers and subscribers

* fix layout

* fix publisher name

* add local listeners for mq broker and agent

* render consumer group offsets

* remove subscribers from left menu

* topic with retention

* support editing topic retention

* show retention when listing topics

* create bucket

* Update s3_buckets_templ.go

* embed the static assets into the binary

fix https://github.com/seaweedfs/seaweedfs/issues/6964
This commit is contained in:
Chris Lu
2025-07-11 10:19:27 -07:00
committed by GitHub
parent a9e1f00673
commit 51543bbb87
44 changed files with 8296 additions and 1156 deletions

228
test/mq/Makefile Normal file
View File

@@ -0,0 +1,228 @@
# SeaweedFS Message Queue Test Makefile
# Build configuration
GO_BUILD_CMD=go build -o bin/$(1) $(2)
GO_RUN_CMD=go run $(1) $(2)
# Default values
AGENT_ADDR?=localhost:16777
TOPIC_NAMESPACE?=test
TOPIC_NAME?=test-topic
PARTITION_COUNT?=4
MESSAGE_COUNT?=100
CONSUMER_GROUP?=test-consumer-group
CONSUMER_INSTANCE?=test-consumer-1
# Create bin directory
$(shell mkdir -p bin)
.PHONY: all build clean producer consumer test help
all: build
# Build targets
build: build-producer build-consumer
build-producer:
@echo "Building producer..."
$(call GO_BUILD_CMD,producer,./producer)
build-consumer:
@echo "Building consumer..."
$(call GO_BUILD_CMD,consumer,./consumer)
# Run targets
producer: build-producer
@echo "Starting producer..."
./bin/producer \
-agent=$(AGENT_ADDR) \
-namespace=$(TOPIC_NAMESPACE) \
-topic=$(TOPIC_NAME) \
-partitions=$(PARTITION_COUNT) \
-messages=$(MESSAGE_COUNT) \
-publisher=test-producer \
-size=1024 \
-interval=100ms
consumer: build-consumer
@echo "Starting consumer..."
./bin/consumer \
-agent=$(AGENT_ADDR) \
-namespace=$(TOPIC_NAMESPACE) \
-topic=$(TOPIC_NAME) \
-group=$(CONSUMER_GROUP) \
-instance=$(CONSUMER_INSTANCE) \
-max-partitions=10 \
-window-size=100 \
-offset=latest \
-show-messages=true \
-log-progress=true
# Run producer directly with go run
run-producer:
@echo "Running producer directly..."
$(call GO_RUN_CMD,./producer, \
-agent=$(AGENT_ADDR) \
-namespace=$(TOPIC_NAMESPACE) \
-topic=$(TOPIC_NAME) \
-partitions=$(PARTITION_COUNT) \
-messages=$(MESSAGE_COUNT) \
-publisher=test-producer \
-size=1024 \
-interval=100ms)
# Run consumer directly with go run
run-consumer:
@echo "Running consumer directly..."
$(call GO_RUN_CMD,./consumer, \
-agent=$(AGENT_ADDR) \
-namespace=$(TOPIC_NAMESPACE) \
-topic=$(TOPIC_NAME) \
-group=$(CONSUMER_GROUP) \
-instance=$(CONSUMER_INSTANCE) \
-max-partitions=10 \
-window-size=100 \
-offset=latest \
-show-messages=true \
-log-progress=true)
# Test scenarios
test: test-basic
test-basic: build
@echo "Running basic producer/consumer test..."
@echo "1. Starting consumer in background..."
./bin/consumer \
-agent=$(AGENT_ADDR) \
-namespace=$(TOPIC_NAMESPACE) \
-topic=$(TOPIC_NAME) \
-group=$(CONSUMER_GROUP) \
-instance=$(CONSUMER_INSTANCE) \
-offset=earliest \
-show-messages=false \
-log-progress=true & \
CONSUMER_PID=$$!; \
echo "Consumer PID: $$CONSUMER_PID"; \
sleep 2; \
echo "2. Starting producer..."; \
./bin/producer \
-agent=$(AGENT_ADDR) \
-namespace=$(TOPIC_NAMESPACE) \
-topic=$(TOPIC_NAME) \
-partitions=$(PARTITION_COUNT) \
-messages=$(MESSAGE_COUNT) \
-publisher=test-producer \
-size=1024 \
-interval=50ms; \
echo "3. Waiting for consumer to process messages..."; \
sleep 5; \
echo "4. Stopping consumer..."; \
kill $$CONSUMER_PID || true; \
echo "Test completed!"
test-performance: build
@echo "Running performance test..."
@echo "1. Starting consumer in background..."
./bin/consumer \
-agent=$(AGENT_ADDR) \
-namespace=$(TOPIC_NAMESPACE) \
-topic=perf-test \
-group=perf-consumer-group \
-instance=perf-consumer-1 \
-offset=earliest \
-show-messages=false \
-log-progress=true & \
CONSUMER_PID=$$!; \
echo "Consumer PID: $$CONSUMER_PID"; \
sleep 2; \
echo "2. Starting high-throughput producer..."; \
./bin/producer \
-agent=$(AGENT_ADDR) \
-namespace=$(TOPIC_NAMESPACE) \
-topic=perf-test \
-partitions=8 \
-messages=1000 \
-publisher=perf-producer \
-size=512 \
-interval=10ms; \
echo "3. Waiting for consumer to process messages..."; \
sleep 10; \
echo "4. Stopping consumer..."; \
kill $$CONSUMER_PID || true; \
echo "Performance test completed!"
test-multiple-consumers: build
@echo "Running multiple consumers test..."
@echo "1. Starting multiple consumers in background..."
./bin/consumer \
-agent=$(AGENT_ADDR) \
-namespace=$(TOPIC_NAMESPACE) \
-topic=multi-test \
-group=multi-consumer-group \
-instance=consumer-1 \
-offset=earliest \
-show-messages=false \
-log-progress=true & \
CONSUMER1_PID=$$!; \
./bin/consumer \
-agent=$(AGENT_ADDR) \
-namespace=$(TOPIC_NAMESPACE) \
-topic=multi-test \
-group=multi-consumer-group \
-instance=consumer-2 \
-offset=earliest \
-show-messages=false \
-log-progress=true & \
CONSUMER2_PID=$$!; \
echo "Consumer PIDs: $$CONSUMER1_PID, $$CONSUMER2_PID"; \
sleep 2; \
echo "2. Starting producer..."; \
./bin/producer \
-agent=$(AGENT_ADDR) \
-namespace=$(TOPIC_NAMESPACE) \
-topic=multi-test \
-partitions=8 \
-messages=200 \
-publisher=multi-producer \
-size=256 \
-interval=50ms; \
echo "3. Waiting for consumers to process messages..."; \
sleep 10; \
echo "4. Stopping consumers..."; \
kill $$CONSUMER1_PID $$CONSUMER2_PID || true; \
echo "Multiple consumers test completed!"
# Clean up
clean:
@echo "Cleaning up..."
rm -rf bin/
go clean -cache
# Help
help:
@echo "SeaweedFS Message Queue Test Makefile"
@echo ""
@echo "Usage:"
@echo " make build - Build producer and consumer binaries"
@echo " make producer - Run producer (builds first)"
@echo " make consumer - Run consumer (builds first)"
@echo " make run-producer - Run producer directly with go run"
@echo " make run-consumer - Run consumer directly with go run"
@echo " make test - Run basic producer/consumer test"
@echo " make test-performance - Run performance test"
@echo " make test-multiple-consumers - Run multiple consumers test"
@echo " make clean - Clean up build artifacts"
@echo ""
@echo "Configuration (set via environment variables):"
@echo " AGENT_ADDR=10.21.152.113:16777 - MQ agent address"
@echo " TOPIC_NAMESPACE=test - Topic namespace"
@echo " TOPIC_NAME=test-topic - Topic name"
@echo " PARTITION_COUNT=4 - Number of partitions"
@echo " MESSAGE_COUNT=100 - Number of messages to produce"
@echo " CONSUMER_GROUP=test-consumer-group - Consumer group name"
@echo " CONSUMER_INSTANCE=test-consumer-1 - Consumer instance ID"
@echo ""
@echo "Examples:"
@echo " make producer MESSAGE_COUNT=1000 PARTITION_COUNT=8"
@echo " make consumer CONSUMER_GROUP=my-group"
@echo " make test AGENT_ADDR=10.21.152.113:16777 MESSAGE_COUNT=500"

244
test/mq/README.md Normal file
View File

@@ -0,0 +1,244 @@
# SeaweedFS Message Queue Test Suite
This directory contains test programs for SeaweedFS Message Queue (MQ) functionality, including message producers and consumers.
## Prerequisites
1. **SeaweedFS with MQ Broker and Agent**: You need a running SeaweedFS instance with MQ broker and agent enabled
2. **Go**: Go 1.19 or later required for building the test programs
## Quick Start
### 1. Start SeaweedFS with MQ Broker and Agent
```bash
# Start SeaweedFS server with MQ broker and agent
weed server -mq.broker -mq.agent -filer -volume
# Or start components separately
weed master
weed volume -mserver=localhost:9333
weed filer -master=localhost:9333
weed mq.broker -filer=localhost:8888
weed mq.agent -brokers=localhost:17777
```
### 2. Build Test Programs
```bash
# Build both producer and consumer
make build
# Or build individually
make build-producer
make build-consumer
```
### 3. Run Basic Test
```bash
# Run a basic producer/consumer test
make test
# Or run producer and consumer manually
make consumer & # Start consumer in background
make producer # Start producer
```
## Test Programs
### Producer (`producer/main.go`)
Generates structured messages and publishes them to a SeaweedMQ topic via the MQ agent.
**Usage:**
```bash
./bin/producer [options]
```
**Options:**
- `-agent`: MQ agent address (default: localhost:16777)
- `-namespace`: Topic namespace (default: test)
- `-topic`: Topic name (default: test-topic)
- `-partitions`: Number of partitions (default: 4)
- `-messages`: Number of messages to produce (default: 100)
- `-publisher`: Publisher name (default: test-producer)
- `-size`: Message size in bytes (default: 1024)
- `-interval`: Interval between messages (default: 100ms)
**Example:**
```bash
./bin/producer -agent=localhost:16777 -namespace=test -topic=my-topic -messages=1000 -interval=50ms
```
### Consumer (`consumer/main.go`)
Consumes structured messages from a SeaweedMQ topic via the MQ agent.
**Usage:**
```bash
./bin/consumer [options]
```
**Options:**
- `-agent`: MQ agent address (default: localhost:16777)
- `-namespace`: Topic namespace (default: test)
- `-topic`: Topic name (default: test-topic)
- `-group`: Consumer group name (default: test-consumer-group)
- `-instance`: Consumer group instance ID (default: test-consumer-1)
- `-max-partitions`: Maximum number of partitions to consume (default: 10)
- `-window-size`: Sliding window size for concurrent processing (default: 100)
- `-offset`: Offset type: earliest, latest, timestamp (default: latest)
- `-offset-ts`: Offset timestamp in nanoseconds (for timestamp offset type)
- `-filter`: Message filter (default: empty)
- `-show-messages`: Show consumed messages (default: true)
- `-log-progress`: Log progress every 10 messages (default: true)
**Example:**
```bash
./bin/consumer -agent=localhost:16777 -namespace=test -topic=my-topic -group=my-group -offset=earliest
```
## Makefile Commands
### Building
- `make build`: Build both producer and consumer binaries
- `make build-producer`: Build producer only
- `make build-consumer`: Build consumer only
### Running
- `make producer`: Build and run producer
- `make consumer`: Build and run consumer
- `make run-producer`: Run producer directly with go run
- `make run-consumer`: Run consumer directly with go run
### Testing
- `make test`: Run basic producer/consumer test
- `make test-performance`: Run performance test (1000 messages, 8 partitions)
- `make test-multiple-consumers`: Run test with multiple consumers
### Cleanup
- `make clean`: Remove build artifacts
### Help
- `make help`: Show detailed help
## Configuration
Configure tests using environment variables:
```bash
export AGENT_ADDR=localhost:16777
export TOPIC_NAMESPACE=test
export TOPIC_NAME=test-topic
export PARTITION_COUNT=4
export MESSAGE_COUNT=100
export CONSUMER_GROUP=test-consumer-group
export CONSUMER_INSTANCE=test-consumer-1
```
## Example Usage Scenarios
### 1. Basic Producer/Consumer Test
```bash
# Terminal 1: Start consumer
make consumer
# Terminal 2: Run producer
make producer MESSAGE_COUNT=50
```
### 2. Performance Testing
```bash
# Test with high throughput
make test-performance
```
### 3. Multiple Consumer Groups
```bash
# Terminal 1: Consumer group 1
make consumer CONSUMER_GROUP=group1
# Terminal 2: Consumer group 2
make consumer CONSUMER_GROUP=group2
# Terminal 3: Producer
make producer MESSAGE_COUNT=200
```
### 4. Different Offset Types
```bash
# Consume from earliest
make consumer OFFSET=earliest
# Consume from latest
make consumer OFFSET=latest
# Consume from timestamp
make consumer OFFSET=timestamp OFFSET_TS=1699000000000000000
```
## Troubleshooting
### Common Issues
1. **Connection Refused**: Make sure SeaweedFS MQ agent is running on the specified address
2. **Agent Not Found**: Ensure both MQ broker and agent are running (agent requires broker)
3. **Topic Not Found**: The producer will create the topic automatically on first publish
4. **Consumer Not Receiving Messages**: Check if consumer group offset is correct (try `earliest`)
5. **Build Failures**: Ensure you're running from the SeaweedFS root directory
### Debug Mode
Enable verbose logging:
```bash
# Run with debug logging
GLOG_v=4 make producer
GLOG_v=4 make consumer
```
### Check Broker and Agent Status
```bash
# Check if broker is running
curl http://localhost:9333/cluster/brokers
# Check if agent is running (if running as server)
curl http://localhost:9333/cluster/agents
# Or use weed shell
weed shell -master=localhost:9333
> mq.broker.list
```
## Architecture
The test setup demonstrates:
1. **Agent-Based Architecture**: Uses MQ agent as intermediary between clients and brokers
2. **Structured Messages**: Messages use schema-based RecordValue format instead of raw bytes
3. **Topic Management**: Creating and configuring topics with multiple partitions
4. **Message Production**: Publishing structured messages with keys for partitioning
5. **Message Consumption**: Consuming structured messages with consumer groups and offset management
6. **Load Balancing**: Multiple consumers in same group share partition assignments
7. **Fault Tolerance**: Graceful handling of agent and broker failures and reconnections
## Files
- `producer/main.go`: Message producer implementation
- `consumer/main.go`: Message consumer implementation
- `Makefile`: Build and test automation
- `README.md`: This documentation
- `bin/`: Built binaries (created during build)
## Next Steps
1. Modify the producer to send structured data using `RecordType`
2. Implement message filtering in the consumer
3. Add metrics collection and monitoring
4. Test with multiple broker instances
5. Implement schema evolution testing

192
test/mq/consumer/main.go Normal file
View File

@@ -0,0 +1,192 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/seaweedfs/seaweedfs/weed/mq/client/agent_client"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
)
var (
agentAddr = flag.String("agent", "localhost:16777", "MQ agent address")
topicNamespace = flag.String("namespace", "test", "topic namespace")
topicName = flag.String("topic", "test-topic", "topic name")
consumerGroup = flag.String("group", "test-consumer-group", "consumer group name")
consumerGroupInstanceId = flag.String("instance", "test-consumer-1", "consumer group instance id")
maxPartitions = flag.Int("max-partitions", 10, "maximum number of partitions to consume")
slidingWindowSize = flag.Int("window-size", 100, "sliding window size for concurrent processing")
offsetType = flag.String("offset", "latest", "offset type: earliest, latest, timestamp")
offsetTsNs = flag.Int64("offset-ts", 0, "offset timestamp in nanoseconds (for timestamp offset type)")
showMessages = flag.Bool("show-messages", true, "show consumed messages")
logProgress = flag.Bool("log-progress", true, "log progress every 10 messages")
filter = flag.String("filter", "", "message filter")
)
func main() {
flag.Parse()
fmt.Printf("Starting message consumer:\n")
fmt.Printf(" Agent: %s\n", *agentAddr)
fmt.Printf(" Topic: %s.%s\n", *topicNamespace, *topicName)
fmt.Printf(" Consumer Group: %s\n", *consumerGroup)
fmt.Printf(" Consumer Instance: %s\n", *consumerGroupInstanceId)
fmt.Printf(" Max Partitions: %d\n", *maxPartitions)
fmt.Printf(" Sliding Window Size: %d\n", *slidingWindowSize)
fmt.Printf(" Offset Type: %s\n", *offsetType)
fmt.Printf(" Filter: %s\n", *filter)
// Create topic
topicObj := topic.NewTopic(*topicNamespace, *topicName)
// Determine offset type
var pbOffsetType schema_pb.OffsetType
switch *offsetType {
case "earliest":
pbOffsetType = schema_pb.OffsetType_RESET_TO_EARLIEST
case "latest":
pbOffsetType = schema_pb.OffsetType_RESET_TO_LATEST
case "timestamp":
pbOffsetType = schema_pb.OffsetType_EXACT_TS_NS
default:
pbOffsetType = schema_pb.OffsetType_RESET_TO_LATEST
}
// Create subscribe option
option := &agent_client.SubscribeOption{
ConsumerGroup: *consumerGroup,
ConsumerGroupInstanceId: *consumerGroupInstanceId,
Topic: topicObj,
OffsetType: pbOffsetType,
OffsetTsNs: *offsetTsNs,
Filter: *filter,
MaxSubscribedPartitions: int32(*maxPartitions),
SlidingWindowSize: int32(*slidingWindowSize),
}
// Create subscribe session
session, err := agent_client.NewSubscribeSession(*agentAddr, option)
if err != nil {
log.Fatalf("Failed to create subscribe session: %v", err)
}
defer session.CloseSession()
// Statistics
var messageCount int64
var mu sync.Mutex
startTime := time.Now()
// Handle graceful shutdown
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
// Channel to signal completion
done := make(chan error, 1)
// Start consuming messages
fmt.Printf("\nStarting to consume messages...\n")
go func() {
err := session.SubscribeMessageRecord(
// onEachMessageFn
func(key []byte, record *schema_pb.RecordValue) {
mu.Lock()
messageCount++
currentCount := messageCount
mu.Unlock()
if *showMessages {
fmt.Printf("Received message: key=%s\n", string(key))
printRecordValue(record)
}
if *logProgress && currentCount%10 == 0 {
elapsed := time.Since(startTime)
rate := float64(currentCount) / elapsed.Seconds()
fmt.Printf("Consumed %d messages (%.2f msg/sec)\n", currentCount, rate)
}
},
// onCompletionFn
func() {
fmt.Printf("Subscription completed\n")
done <- nil
},
)
if err != nil {
done <- err
}
}()
// Wait for signal or completion
select {
case <-sigChan:
fmt.Printf("\nReceived shutdown signal, stopping consumer...\n")
case err := <-done:
if err != nil {
log.Printf("Subscription error: %v", err)
}
}
// Print final statistics
mu.Lock()
finalCount := messageCount
mu.Unlock()
duration := time.Since(startTime)
fmt.Printf("Consumed %d messages in %v\n", finalCount, duration)
if duration.Seconds() > 0 {
fmt.Printf("Average throughput: %.2f messages/sec\n", float64(finalCount)/duration.Seconds())
}
}
func printRecordValue(record *schema_pb.RecordValue) {
if record == nil || record.Fields == nil {
fmt.Printf(" (empty record)\n")
return
}
for fieldName, value := range record.Fields {
fmt.Printf(" %s: %s\n", fieldName, formatValue(value))
}
}
func formatValue(value *schema_pb.Value) string {
if value == nil {
return "(nil)"
}
switch kind := value.Kind.(type) {
case *schema_pb.Value_BoolValue:
return fmt.Sprintf("%t", kind.BoolValue)
case *schema_pb.Value_Int32Value:
return fmt.Sprintf("%d", kind.Int32Value)
case *schema_pb.Value_Int64Value:
return fmt.Sprintf("%d", kind.Int64Value)
case *schema_pb.Value_FloatValue:
return fmt.Sprintf("%f", kind.FloatValue)
case *schema_pb.Value_DoubleValue:
return fmt.Sprintf("%f", kind.DoubleValue)
case *schema_pb.Value_BytesValue:
if len(kind.BytesValue) > 50 {
return fmt.Sprintf("bytes[%d] %x...", len(kind.BytesValue), kind.BytesValue[:50])
}
return fmt.Sprintf("bytes[%d] %x", len(kind.BytesValue), kind.BytesValue)
case *schema_pb.Value_StringValue:
if len(kind.StringValue) > 100 {
return fmt.Sprintf("\"%s...\"", kind.StringValue[:100])
}
return fmt.Sprintf("\"%s\"", kind.StringValue)
case *schema_pb.Value_ListValue:
return fmt.Sprintf("list[%d items]", len(kind.ListValue.Values))
case *schema_pb.Value_RecordValue:
return fmt.Sprintf("record[%d fields]", len(kind.RecordValue.Fields))
default:
return "(unknown)"
}
}

172
test/mq/producer/main.go Normal file
View File

@@ -0,0 +1,172 @@
package main
import (
"flag"
"fmt"
"log"
"time"
"github.com/seaweedfs/seaweedfs/weed/mq/client/agent_client"
"github.com/seaweedfs/seaweedfs/weed/mq/schema"
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
)
var (
agentAddr = flag.String("agent", "localhost:16777", "MQ agent address")
topicNamespace = flag.String("namespace", "test", "topic namespace")
topicName = flag.String("topic", "test-topic", "topic name")
partitionCount = flag.Int("partitions", 4, "number of partitions")
messageCount = flag.Int("messages", 100, "number of messages to produce")
publisherName = flag.String("publisher", "test-producer", "publisher name")
messageSize = flag.Int("size", 1024, "message size in bytes")
interval = flag.Duration("interval", 100*time.Millisecond, "interval between messages")
)
// TestMessage represents the structure of messages we'll be sending
type TestMessage struct {
ID int64 `json:"id"`
Message string `json:"message"`
Payload []byte `json:"payload"`
Timestamp int64 `json:"timestamp"`
}
func main() {
flag.Parse()
fmt.Printf("Starting message producer:\n")
fmt.Printf(" Agent: %s\n", *agentAddr)
fmt.Printf(" Topic: %s.%s\n", *topicNamespace, *topicName)
fmt.Printf(" Partitions: %d\n", *partitionCount)
fmt.Printf(" Messages: %d\n", *messageCount)
fmt.Printf(" Publisher: %s\n", *publisherName)
fmt.Printf(" Message Size: %d bytes\n", *messageSize)
fmt.Printf(" Interval: %v\n", *interval)
// Create an instance of the message struct to generate schema from
messageInstance := TestMessage{}
// Automatically generate RecordType from the struct
recordType := schema.StructToSchema(messageInstance)
if recordType == nil {
log.Fatalf("Failed to generate schema from struct")
}
fmt.Printf("\nGenerated schema with %d fields:\n", len(recordType.Fields))
for _, field := range recordType.Fields {
fmt.Printf(" - %s: %s\n", field.Name, getTypeString(field.Type))
}
topicSchema := schema.NewSchema(*topicNamespace, *topicName, recordType)
// Create publish session
session, err := agent_client.NewPublishSession(*agentAddr, topicSchema, *partitionCount, *publisherName)
if err != nil {
log.Fatalf("Failed to create publish session: %v", err)
}
defer session.CloseSession()
// Create message payload
payload := make([]byte, *messageSize)
for i := range payload {
payload[i] = byte(i % 256)
}
// Start producing messages
fmt.Printf("\nStarting to produce messages...\n")
startTime := time.Now()
for i := 0; i < *messageCount; i++ {
key := fmt.Sprintf("key-%d", i)
// Create a message struct
message := TestMessage{
ID: int64(i),
Message: fmt.Sprintf("This is message number %d", i),
Payload: payload[:min(100, len(payload))], // First 100 bytes
Timestamp: time.Now().UnixNano(),
}
// Convert struct to RecordValue
record := structToRecordValue(message)
err := session.PublishMessageRecord([]byte(key), record)
if err != nil {
log.Printf("Failed to publish message %d: %v", i, err)
continue
}
if (i+1)%10 == 0 {
fmt.Printf("Published %d messages\n", i+1)
}
if *interval > 0 {
time.Sleep(*interval)
}
}
duration := time.Since(startTime)
fmt.Printf("\nCompleted producing %d messages in %v\n", *messageCount, duration)
fmt.Printf("Throughput: %.2f messages/sec\n", float64(*messageCount)/duration.Seconds())
}
// Helper function to convert struct to RecordValue
func structToRecordValue(msg TestMessage) *schema_pb.RecordValue {
return &schema_pb.RecordValue{
Fields: map[string]*schema_pb.Value{
"ID": {
Kind: &schema_pb.Value_Int64Value{
Int64Value: msg.ID,
},
},
"Message": {
Kind: &schema_pb.Value_StringValue{
StringValue: msg.Message,
},
},
"Payload": {
Kind: &schema_pb.Value_BytesValue{
BytesValue: msg.Payload,
},
},
"Timestamp": {
Kind: &schema_pb.Value_Int64Value{
Int64Value: msg.Timestamp,
},
},
},
}
}
func getTypeString(t *schema_pb.Type) string {
switch kind := t.Kind.(type) {
case *schema_pb.Type_ScalarType:
switch kind.ScalarType {
case schema_pb.ScalarType_BOOL:
return "bool"
case schema_pb.ScalarType_INT32:
return "int32"
case schema_pb.ScalarType_INT64:
return "int64"
case schema_pb.ScalarType_FLOAT:
return "float"
case schema_pb.ScalarType_DOUBLE:
return "double"
case schema_pb.ScalarType_BYTES:
return "bytes"
case schema_pb.ScalarType_STRING:
return "string"
}
case *schema_pb.Type_ListType:
return fmt.Sprintf("list<%s>", getTypeString(kind.ListType.ElementType))
case *schema_pb.Type_RecordType:
return "record"
}
return "unknown"
}
func min(a, b int) int {
if a < b {
return a
}
return b
}