Here are examples of Kafka producer and consumer in Go (Golang) that connect to a Bitnami Kafka instance in Kubernetes via port forwarding:
package main
import (
"context"
"fmt"
"log"
"time"
"github.com/segmentio/kafka-go"
)
func main() {
// Configure the writer
writer := kafka.NewWriter(kafka.WriterConfig{
Brokers: []string{"localhost:9092"},
Topic: "my-topic",
BatchSize: 1, // For simplicity, send each message immediately
BatchTimeout: 10 * time.Millisecond,
// Uncomment if using SASL authentication
// Transport: &kafka.Transport{
// SASL: kafka.SASL{
// Mechanism: kafka.PLAIN,
// Username: "user",
// Password: "password",
// },
// },
})
// Ensure writer gets closed
defer writer.Close()
// Create a message
message := kafka.Message{
Key: []byte("key1"),
Value: []byte("Hello from Go to K8s Kafka!"),
Time: time.Now(),
}
// Send the message
err := writer.WriteMessages(context.Background(), message)
if err != nil {
log.Fatalf("Failed to write message: %v", err)
}
fmt.Println("Message sent successfully!")
}
package main
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"syscall"
"github.com/segmentio/kafka-go"
)
func main() {
// Set up a context that can be cancelled
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Set up signal handling to gracefully exit
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
// Configure the reader
reader := kafka.NewReader(kafka.ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: "my-topic",
GroupID: "my-go-consumer-group",
MinBytes: 10e3, // 10KB
MaxBytes: 10e6, // 10MB
StartOffset: kafka.FirstOffset, // Start from the oldest message
// Uncomment if using SASL authentication
// Transport: &kafka.Transport{
// SASL: kafka.SASL{
// Mechanism: kafka.PLAIN,
// Username: "user",
// Password: "password",
// },
// },
})
// Ensure reader gets closed
defer reader.Close()
// Start consuming in a separate goroutine
go func() {
for {
// Check if context was cancelled
select {
case <-ctx.Done():
return
default:
// Continue processing
}
// Read message
message, err := reader.ReadMessage(ctx)
if err != nil {
// Don't fatally exit if context was cancelled
select {
case <-ctx.Done():
return
default:
log.Fatalf("Failed to read message: %v", err)
}
}
fmt.Printf("Message received: partition=%d offset=%d key=%s value=%s\n",
message.Partition, message.Offset, string(message.Key), string(message.Value))
}
}()
// Wait for termination signal
<-signals
fmt.Println("\nShutting down consumer...")
}
- First, install the Kafka Go client library:
go mod init kafka-example
go get github.com/segmentio/kafka-go
- Ensure you have port forwarding set up:
kubectl port-forward svc/<release-name>-kafka -n <namespace> 9092:9092
- Make sure the topic exists or create it:
kubectl exec -it <kafka-pod-name> -n <namespace> -- kafka-topics.sh --create --topic my-topic --bootstrap-server localhost:9092 --partitions 3 --replication-factor 1
-
Error Handling: The examples above include basic error handling, but in production, you might want more robust error handling and retry mechanisms.
-
TLS Support: If your Bitnami Kafka uses TLS, configure the dialer with TLS:
dialer := &kafka.Dialer{
Timeout: 10 * time.Second,
DualStack: true,
TLS: &tls.Config{},
}
reader := kafka.NewReader(kafka.ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: "my-topic",
GroupID: "my-go-consumer-group",
Dialer: dialer,
})
-
Batch Processing: For higher throughput in the producer, adjust the
BatchSize
andBatchTimeout
settings. -
Connection Pooling: The
segmentio/kafka-go
library handles connection pooling for you, but you can tune it with theMaxAttempts
andMaxBackoff
settings in the dialer.
These Go examples should work well with a Bitnami Kafka instance accessed through Kubernetes port forwarding.
package main
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"syscall"
)
func main() {
// Set up a context that can be cancelled
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
}