fluent-plugin-kafka 0.6.5 → 0.6.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ChangeLog +4 -0
- data/README.md +47 -2
- data/fluent-plugin-kafka.gemspec +1 -1
- data/lib/fluent/plugin/out_kafka2.rb +11 -5
- data/lib/fluent/plugin/out_kafka_buffered.rb +12 -9
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f03db16ea9d9f3e937b8f2e5ee3840ac92929f98
|
4
|
+
data.tar.gz: 8f5db823ac54b6154c38571585e18915b38d0e71
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 5a032b3b1cbabc0ec9678257151cba70d98338999bd028c865419790817259e63eb7172c24a9956db3bfe0ee2b35a6c27615bbb1e6c39e466dfac39cf9282bb0
|
7
|
+
data.tar.gz: fa7da134390f0532f399b5e0d802c377e21d917d4ed606e0ec6d1df1b0a1e81c54e37b7897643fc191cf3d41ba420e1859424170b3fbe07fd7039413300c61c5
|
data/ChangeLog
CHANGED
data/README.md
CHANGED
@@ -129,7 +129,7 @@ Consuming topic name is used for event tag. So when the target topic name is `ap
|
|
129
129
|
|
130
130
|
This plugin uses ruby-kafka producer for writing data. This plugin works with recent kafka versions.
|
131
131
|
|
132
|
-
<match
|
132
|
+
<match app.**>
|
133
133
|
@type kafka_buffered
|
134
134
|
|
135
135
|
# Brokers: you can choose either brokers or zookeeper. If you are not familiar with zookeeper, use brokers parameters.
|
@@ -137,6 +137,10 @@ This plugin uses ruby-kafka producer for writing data. This plugin works with re
|
|
137
137
|
zookeeper <zookeeper_host>:<zookeeper_port> # Set brokers via Zookeeper
|
138
138
|
zookeeper_path <broker path in zookeeper> :default => /brokers/ids # Set path in zookeeper for kafka
|
139
139
|
|
140
|
+
topic_key (string) :default => 'topic'
|
141
|
+
partition_key (string) :default => 'partition'
|
142
|
+
partition_key_key (string) :default => 'partition_key'
|
143
|
+
message_key_key (string) :default => 'message_key'
|
140
144
|
default_topic (string) :default => nil
|
141
145
|
default_partition_key (string) :default => nil
|
142
146
|
default_message_key (string) :default => nil
|
@@ -207,11 +211,52 @@ If key name `partition_key` exists in a message, this plugin set its value of pa
|
|
207
211
|
|
208
212
|
If key name `message_key` exists in a message, this plugin publishes the value of message_key to kafka and can be read by consumers. Same message key will be assigned to all messages by setting `default_message_key` in config file. If message_key exists and if partition_key is not set explicitly, messsage_key will be used for partitioning.
|
209
213
|
|
214
|
+
### Output plugin
|
215
|
+
|
216
|
+
This plugin is for v1.0. This will be `out_kafka` plugin in the feature.
|
217
|
+
|
218
|
+
<match app.**>
|
219
|
+
@type kafka2
|
220
|
+
|
221
|
+
brokers <broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,.. # Set brokers directly
|
222
|
+
|
223
|
+
topic_key (string) :default => 'topic'
|
224
|
+
partition_key (string) :default => 'partition'
|
225
|
+
partition_key_key (string) :default => 'partition_key'
|
226
|
+
message_key_key (string) :default => 'message_key'
|
227
|
+
default_topic (string) :default => nil
|
228
|
+
default_partition_key (string) :default => nil
|
229
|
+
default_message_key (string) :default => nil
|
230
|
+
exclude_topic_key (bool) :default => false
|
231
|
+
exclude_partition_key (bool) :default => false
|
232
|
+
get_kafka_client_log (bool) :default => false
|
233
|
+
|
234
|
+
<format>
|
235
|
+
@type (json|ltsv|msgpack|attr:<record name>|<formatter name>) :default => json
|
236
|
+
</format>
|
237
|
+
<inject>
|
238
|
+
tag_key tag
|
239
|
+
time_key time
|
240
|
+
</inject>
|
241
|
+
|
242
|
+
# See fluentd document for buffer related parameters: http://docs.fluentd.org/articles/buffer-plugin-overview
|
243
|
+
# Buffer chunk key should be same with topic_key. If value is not found in the record, default_topic is used.
|
244
|
+
<buffer topic>
|
245
|
+
flush_interavl 10s
|
246
|
+
</buffer>
|
247
|
+
|
248
|
+
# ruby-kafka producer options
|
249
|
+
max_send_retries (integer) :default => 1
|
250
|
+
required_acks (integer) :default => -1
|
251
|
+
ack_timeout (integer) :default => nil (Use default of ruby-kafka)
|
252
|
+
compression_codec (gzip|snappy) :default => nil (No compression)
|
253
|
+
</match>
|
254
|
+
|
210
255
|
### Non-buffered output plugin
|
211
256
|
|
212
257
|
This plugin uses ruby-kafka producer for writing data. For performance and reliability concerns, use `kafka_bufferd` output instead. This is mainly for testing.
|
213
258
|
|
214
|
-
<match
|
259
|
+
<match app.**>
|
215
260
|
@type kafka
|
216
261
|
|
217
262
|
# Brokers: you can choose either brokers or zookeeper.
|
data/fluent-plugin-kafka.gemspec
CHANGED
@@ -13,7 +13,7 @@ Gem::Specification.new do |gem|
|
|
13
13
|
gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
|
14
14
|
gem.name = "fluent-plugin-kafka"
|
15
15
|
gem.require_paths = ["lib"]
|
16
|
-
gem.version = '0.6.
|
16
|
+
gem.version = '0.6.6'
|
17
17
|
gem.required_ruby_version = ">= 2.1.0"
|
18
18
|
|
19
19
|
gem.add_dependency "fluentd", [">= 0.10.58", "< 2"]
|
@@ -15,10 +15,14 @@ module Fluent::Plugin
|
|
15
15
|
Set brokers directly:
|
16
16
|
<broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,..
|
17
17
|
DESC
|
18
|
+
config_param :topic_key, :string, :default => 'topic', :desc => "Field for kafka topic"
|
18
19
|
config_param :default_topic, :string, :default => nil,
|
19
20
|
:desc => "Default output topic when record doesn't have topic field"
|
21
|
+
config_param :message_key_key, :string, :default => 'message_key', :desc => "Field for kafka message key"
|
20
22
|
config_param :default_message_key, :string, :default => nil
|
23
|
+
config_param :partition_key_key, :string, :default => 'partition_key', :desc => "Field for kafka partition key"
|
21
24
|
config_param :default_partition_key, :string, :default => nil
|
25
|
+
config_param :partition_key, :string, :default => 'partition', :desc => "Field for kafka partition"
|
22
26
|
config_param :default_partition, :integer, :default => nil
|
23
27
|
config_param :client_id, :string, :default => 'fluentd'
|
24
28
|
config_param :exclude_partition_key, :bool, :default => false,
|
@@ -123,6 +127,8 @@ DESC
|
|
123
127
|
@router.emit("fluent_kafka_stats.#{event.name}", Time.now.to_i, message)
|
124
128
|
end
|
125
129
|
end
|
130
|
+
|
131
|
+
@topic_key_sym = @topic_key.to_sym
|
126
132
|
end
|
127
133
|
|
128
134
|
def multi_workers_ready?
|
@@ -168,7 +174,7 @@ DESC
|
|
168
174
|
# TODO: optimize write performance
|
169
175
|
def write(chunk)
|
170
176
|
tag = chunk.metadata.tag
|
171
|
-
topic = chunk.metadata.variables[
|
177
|
+
topic = chunk.metadata.variables[@topic_key_sym] || @default_topic || tag
|
172
178
|
producer = @kafka.topic_producer(topic, @producer_opts)
|
173
179
|
|
174
180
|
messages = 0
|
@@ -178,10 +184,10 @@ DESC
|
|
178
184
|
chunk.msgpack_each { |time, record|
|
179
185
|
begin
|
180
186
|
record = inject_values_to_record(tag, time, record)
|
181
|
-
record.delete(
|
182
|
-
partition_key = (@exclude_partition_key ? record.delete(
|
183
|
-
partition = (@exclude_partition ? record.delete(
|
184
|
-
message_key = (@exclude_message_key ? record.delete(
|
187
|
+
record.delete(@topic_key) if @exclude_topic_key
|
188
|
+
partition_key = (@exclude_partition_key ? record.delete(@partition_key_key) : record[@partition_key_key]) || @default_partition_key
|
189
|
+
partition = (@exclude_partition ? record.delete(@partition_key) : record[@partition_key]) || @default_partition
|
190
|
+
message_key = (@exclude_message_key ? record.delete(@message_key) : record[@message_key]) || @default_message_key
|
185
191
|
|
186
192
|
record_buf = @formatter_proc.call(tag, time, record)
|
187
193
|
rescue StandardError => e
|
@@ -16,12 +16,15 @@ DESC
|
|
16
16
|
Set brokers via Zookeeper:
|
17
17
|
<zookeeper_host>:<zookeeper_port>
|
18
18
|
DESC
|
19
|
-
config_param :zookeeper_path, :string, :default => '/brokers/ids',
|
20
|
-
|
21
|
-
config_param :
|
22
|
-
|
19
|
+
config_param :zookeeper_path, :string, :default => '/brokers/ids', :desc => "Path in path for Broker id. Default to /brokers/ids"
|
20
|
+
|
21
|
+
config_param :topic_key, :string, :default => 'topic', :desc => "Field for kafka topic"
|
22
|
+
config_param :default_topic, :string, :default => nil, :desc => "Default output topic when record doesn't have topic field"
|
23
|
+
config_param :message_key_key, :string, :default => 'message_key', :desc => "Field for kafka message key"
|
23
24
|
config_param :default_message_key, :string, :default => nil
|
25
|
+
config_param :partition_key_key, :string, :default => 'partition_key', :desc => "Field for kafka partition key"
|
24
26
|
config_param :default_partition_key, :string, :default => nil
|
27
|
+
config_param :partition_key, :string, :default => 'partition', :desc => "Field for kafka partition"
|
25
28
|
config_param :default_partition, :integer, :default => nil
|
26
29
|
config_param :client_id, :string, :default => 'kafka'
|
27
30
|
config_param :output_data_type, :string, :default => 'json',
|
@@ -40,7 +43,7 @@ Set true to remove partition from data
|
|
40
43
|
DESC
|
41
44
|
config_param :exclude_message_key, :bool, :default => false,
|
42
45
|
:desc => <<-DESC
|
43
|
-
Set true to remove
|
46
|
+
Set true to remove message key from data
|
44
47
|
DESC
|
45
48
|
config_param :exclude_topic_key, :bool, :default => false,
|
46
49
|
:desc => <<-DESC
|
@@ -289,10 +292,10 @@ DESC
|
|
289
292
|
end
|
290
293
|
|
291
294
|
record['tag'] = tag if @output_include_tag
|
292
|
-
topic = (@exclude_topic_key ? record.delete(
|
293
|
-
partition_key = (@exclude_partition_key ? record.delete(
|
294
|
-
partition = (@exclude_partition ? record.delete(
|
295
|
-
message_key = (@exclude_message_key ? record.delete(
|
295
|
+
topic = (@exclude_topic_key ? record.delete(@topic_key) : record[@topic_key]) || def_topic
|
296
|
+
partition_key = (@exclude_partition_key ? record.delete(@partition_key_key) : record[@partition_key_key]) || @default_partition_key
|
297
|
+
partition = (@exclude_partition ? record.delete(@partition) : record[@partition]) || @default_partition
|
298
|
+
message_key = (@exclude_message_key ? record.delete(@message_key_key) : record[@message_key_key]) || @default_message_key
|
296
299
|
|
297
300
|
records_by_topic[topic] ||= 0
|
298
301
|
bytes_by_topic[topic] ||= 0
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: fluent-plugin-kafka
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.6.
|
4
|
+
version: 0.6.6
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Hidemasa Togashi
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2017-12-
|
12
|
+
date: 2017-12-25 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: fluentd
|