fluent-plugin-kafka 0.0.12 → 0.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +25 -0
- data/fluent-plugin-kafka.gemspec +1 -1
- data/lib/fluent/plugin/out_kafka.rb +8 -3
- data/lib/fluent/plugin/out_kafka_buffered.rb +6 -2
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 18708524a30b618692c9df1cf9b303a5f40a7625
|
4
|
+
data.tar.gz: cf0f5cb7e2d1deee2736ea6f25391c33766adbfb
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 73973698fb730b5f2eb18161c557252cc990cefa4d02ac03dc49ff7523242bb9f78052b54b79f8eb514fd39858a4038547aa318a131825e78c1e81b0e834a993
|
7
|
+
data.tar.gz: 86b72a4927cd364f3b66d58a1fb22abdc78c36bf6ab4560ec5474d6781e2b210d12926f81829a61f8e44448970cf8173a27d49dcbe05c7be719b2205da67efa4
|
data/README.md
CHANGED
@@ -41,6 +41,7 @@ Or install it yourself as:
|
|
41
41
|
zookeeper <zookeeper_host>:<zookeeper_port> # Set brokers via Zookeeper
|
42
42
|
|
43
43
|
default_topic <output topic>
|
44
|
+
default_partition_key (string) :default => nil
|
44
45
|
output_data_type (json|ltsv|msgpack|attr:<record name>|<formatter name>)
|
45
46
|
output_include_tag (true|false) :default => false
|
46
47
|
output_include_time (true|false) :default => false
|
@@ -64,6 +65,18 @@ Install snappy module before you use snappy compression.
|
|
64
65
|
|
65
66
|
$ gem install snappy
|
66
67
|
|
68
|
+
#### Load balancing
|
69
|
+
|
70
|
+
Messages will be sent broker in a round-robin manner as default by Poseidon, but you can set `default_partition_key` in config file to route messages to a specific broker.
|
71
|
+
If key name `partition_key` exists in a message, this plugin set its value of partition_key as key.
|
72
|
+
|
73
|
+
|default_partition_key|partition_key| behavior |
|
74
|
+
|-|-|
|
75
|
+
|Not set|Not exists| All messages are sent in round-robin |
|
76
|
+
|Set| Not exists| All messages are sent to specific broker |
|
77
|
+
|Not set| Exists | Messages which have partition_key record are sent to specific broker, others are sent in round-robin|
|
78
|
+
|Set| Exists | Messages which have partition_key record are sent to specific broker with parition_key, others are sent to specific broker with default_parition_key|
|
79
|
+
|
67
80
|
|
68
81
|
### Buffered output plugin
|
69
82
|
|
@@ -75,6 +88,7 @@ Install snappy module before you use snappy compression.
|
|
75
88
|
zookeeper <zookeeper_host>:<zookeeper_port> # Set brokers via Zookeeper
|
76
89
|
|
77
90
|
default_topic <output topic>
|
91
|
+
default_partition_key (string) :default => nil
|
78
92
|
flush_interval <flush interval (sec) :default => 60>
|
79
93
|
buffer_type (file|memory)
|
80
94
|
output_data_type (json|ltsv|msgpack|attr:<record name>|<formatter name>)
|
@@ -100,6 +114,17 @@ Install snappy module before you use snappy compression.
|
|
100
114
|
|
101
115
|
$ gem install snappy
|
102
116
|
|
117
|
+
#### Load balancing
|
118
|
+
|
119
|
+
Messages will be sent broker in a round-robin manner as default by Poseidon, but you can set `default_partition_key` in config file to route messages to a specific broker.
|
120
|
+
If key name `partition_key` exists in a message, this plugin set its value of partition_key as key.
|
121
|
+
|
122
|
+
|default_partition_key|partition_key| behavior |
|
123
|
+
|-|-|
|
124
|
+
|Not set|Not exists| All messages are sent in round-robin |
|
125
|
+
|Set| Not exists| All messages are sent to specific broker |
|
126
|
+
|Not set| Exists | Messages which have partition_key record are sent to specific broker, others are sent in round-robin|
|
127
|
+
|Set| Exists | Messages which have partition_key record are sent to specific broker with parition_key, others are sent to specific broker with default_parition_key|
|
103
128
|
|
104
129
|
## Contributing
|
105
130
|
|
data/fluent-plugin-kafka.gemspec
CHANGED
@@ -12,7 +12,7 @@ Gem::Specification.new do |gem|
|
|
12
12
|
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
|
13
13
|
gem.name = "fluent-plugin-kafka"
|
14
14
|
gem.require_paths = ["lib"]
|
15
|
-
gem.version = '0.0.
|
15
|
+
gem.version = '0.0.13'
|
16
16
|
gem.add_dependency 'fluentd'
|
17
17
|
gem.add_dependency 'poseidon'
|
18
18
|
gem.add_dependency 'ltsv'
|
@@ -9,7 +9,7 @@ class Fluent::KafkaOutput < Fluent::Output
|
|
9
9
|
config_param :brokers, :string, :default => 'localhost:9092'
|
10
10
|
config_param :zookeeper, :string, :default => nil
|
11
11
|
config_param :default_topic, :string, :default => nil
|
12
|
-
config_param :
|
12
|
+
config_param :default_partition_key, :string, :default => nil
|
13
13
|
config_param :client_id, :string, :default => 'kafka'
|
14
14
|
config_param :output_data_type, :string, :default => 'json'
|
15
15
|
config_param :output_include_tag, :bool, :default => false
|
@@ -26,6 +26,10 @@ class Fluent::KafkaOutput < Fluent::Output
|
|
26
26
|
|
27
27
|
@seed_brokers = []
|
28
28
|
|
29
|
+
unless method_defined?(:log)
|
30
|
+
define_method("log") { $log }
|
31
|
+
end
|
32
|
+
|
29
33
|
def refresh_producer()
|
30
34
|
if @zookeeper
|
31
35
|
@seed_brokers = []
|
@@ -129,9 +133,10 @@ class Fluent::KafkaOutput < Fluent::Output
|
|
129
133
|
record['time'] = time if @output_include_time
|
130
134
|
record['tag'] = tag if @output_include_tag
|
131
135
|
topic = record['topic'] || self.default_topic || tag
|
132
|
-
|
136
|
+
partition_key = record['partition_key'] || @default_partition_key
|
133
137
|
value = @formatter.nil? ? parse_record(record) : @formatter.format(tag, time, record)
|
134
|
-
message
|
138
|
+
log.trace("message send to #{topic} with key: #{partition_key} and value: #{value}.")
|
139
|
+
message = Poseidon::MessageToSend.new(topic, value, partition_key)
|
135
140
|
@producer.send_messages([message])
|
136
141
|
end
|
137
142
|
rescue Exception => e
|
@@ -10,7 +10,7 @@ class Fluent::KafkaOutputBuffered < Fluent::BufferedOutput
|
|
10
10
|
config_param :brokers, :string, :default => 'localhost:9092'
|
11
11
|
config_param :zookeeper, :string, :default => nil
|
12
12
|
config_param :default_topic, :string, :default => nil
|
13
|
-
config_param :
|
13
|
+
config_param :default_partition_key, :string, :default => nil
|
14
14
|
config_param :client_id, :string, :default => 'kafka'
|
15
15
|
config_param :output_data_type, :string, :default => 'json'
|
16
16
|
config_param :output_include_tag, :bool, :default => false
|
@@ -141,6 +141,7 @@ class Fluent::KafkaOutputBuffered < Fluent::BufferedOutput
|
|
141
141
|
record['time'] = time if @output_include_time
|
142
142
|
record['tag'] = tag if @output_include_tag
|
143
143
|
topic = record['topic'] || @default_topic || tag
|
144
|
+
partition_key = record['partition_key'] || @default_partition_key
|
144
145
|
|
145
146
|
records_by_topic[topic] ||= 0
|
146
147
|
bytes_by_topic[topic] ||= 0
|
@@ -148,17 +149,20 @@ class Fluent::KafkaOutputBuffered < Fluent::BufferedOutput
|
|
148
149
|
record_buf = @formatter.nil? ? parse_record(record) : @formatter.format(tag, time, record)
|
149
150
|
record_buf_bytes = record_buf.bytesize
|
150
151
|
if messages.length > 0 and messages_bytes + record_buf_bytes > @kafka_agg_max_bytes
|
152
|
+
log.trace("#{messages.length} messages send.")
|
151
153
|
@producer.send_messages(messages)
|
152
154
|
messages = []
|
153
155
|
messages_bytes = 0
|
154
156
|
end
|
155
|
-
|
157
|
+
log.trace("message will send to #{topic} with key: #{partition_key} and value: #{record_buf}.")
|
158
|
+
messages << Poseidon::MessageToSend.new(topic, record_buf, partition_key)
|
156
159
|
messages_bytes += record_buf_bytes
|
157
160
|
|
158
161
|
records_by_topic[topic] += 1
|
159
162
|
bytes_by_topic[topic] += record_buf_bytes
|
160
163
|
}
|
161
164
|
if messages.length > 0
|
165
|
+
log.trace("#{messages.length} messages send.")
|
162
166
|
@producer.send_messages(messages)
|
163
167
|
end
|
164
168
|
log.debug "(records|bytes) (#{records_by_topic}|#{bytes_by_topic})"
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: fluent-plugin-kafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.13
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Hidemasa Togashi
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2015-
|
11
|
+
date: 2015-06-06 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: fluentd
|