-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathwriter.go
129 lines (113 loc) · 3.44 KB
/
writer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
package kafka
import (
"context"
"encoding/binary"
"fmt"
"github.com/segmentio/kafka-go"
"github.com/smarty/messaging/v4"
)
type defaultWriter struct {
logger Logger
monitor Monitor
messageTypes map[string]uint32
contentTypes map[string]uint8
transactional bool
writer *kafka.Writer
lifecycle context.Context
cancel context.CancelFunc
pending []kafka.Message
}
func newWriter(config configuration, parent context.Context, transactional bool) messaging.CommitWriter {
this := &defaultWriter{
logger: config.Logger,
monitor: config.Monitor,
messageTypes: config.messageTypeIdentifiers,
contentTypes: config.contentTypeIdentifiers,
transactional: transactional,
writer: &kafka.Writer{
Addr: kafka.TCP(config.Brokers...),
Compression: computeCompressionMethod(config.CompressionMethod),
Balancer: computePartitionSelection(config.PartitionSelection),
RequiredAcks: computeRequiredWrites(config.RequiredWrites),
MaxAttempts: int(config.MaxWriteAttempts),
BatchSize: int(config.MaxWriteBatchSize),
BatchTimeout: config.BatchWriteInterval,
Async: false,
Logger: config.DriverLogger,
ErrorLogger: config.DriverLogger,
},
}
this.lifecycle, this.cancel = context.WithCancel(parent)
go func() {
<-this.lifecycle.Done()
_ = this.writer.Close()
}()
return this
}
func (this *defaultWriter) Write(_ context.Context, dispatches ...messaging.Dispatch) (int, error) {
for i, dispatch := range dispatches {
if len(dispatch.Topic) == 0 {
return i, messaging.ErrEmptyDispatchTopic
}
this.pending = append(this.pending, this.newMessage(dispatch))
}
if !this.transactional {
return len(dispatches), nil
}
if err := this.Commit(); err != nil {
return 0, err
}
return len(dispatches), nil
}
func (this *defaultWriter) newMessage(dispatch messaging.Dispatch) kafka.Message {
messageType := this.computeMessageType(dispatch.MessageType, dispatch.ContentType)
return kafka.Message{
Time: dispatch.Timestamp,
Topic: dispatch.Topic,
Headers: computeHeadersFromDispatch(dispatch),
Key: computeKafkaMessageKey(dispatch.Partition),
Value: append(messageType, dispatch.Payload...),
}
}
func (this *defaultWriter) computeMessageType(messageType, contentType string) []byte {
// header: 0x0 magic byte and 32-bit unsigned integer containing message type and content type
value := this.messageTypes[messageType] << 8
value += uint32(this.contentTypes[contentType])
target := make([]byte, 4+1)
binary.LittleEndian.PutUint32(target[1:], value)
return target
}
func computeKafkaMessageKey(partition uint64) []byte {
if partition == 0 {
return nil
}
target := make([]byte, 8)
binary.LittleEndian.PutUint64(target, partition)
return target
}
func computeHeadersFromDispatch(dispatch messaging.Dispatch) []kafka.Header {
if len(dispatch.Headers) == 0 {
return nil
}
targetHeaders := make([]kafka.Header, 0, len(dispatch.Headers))
for key, value := range dispatch.Headers {
targetHeaders = append(targetHeaders, kafka.Header{
Key: key,
Value: []byte(fmt.Sprint(value)),
})
}
return targetHeaders
}
func (this *defaultWriter) Commit() error {
err := this.writer.WriteMessages(this.lifecycle, this.pending...)
this.pending = this.pending[0:0]
return err
}
func (this *defaultWriter) Rollback() error {
this.pending = this.pending[0:0]
return nil
}
func (this *defaultWriter) Close() error {
this.cancel()
return nil
}