fluent-plugin-kafka 0.1.3 → 0.1.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 1359c3e9e2c824e3ad36cf21eae0eeae3aea80f4
4
- data.tar.gz: 0593adb6cac966b2461b52cacdc9b4d7be06a112
3
+ metadata.gz: 12a7a6c9b46188b26eddffb85896388788d5b63b
4
+ data.tar.gz: 851a09fe0932c3ec7001d671cac9fded8a707b67
5
5
  SHA512:
6
- metadata.gz: d23dcc2070e66fb1eea47ad58318489db7c3ecc9b0e1dbf7fac434266bdfebcd5ae8933fd91208ca71c3422f00a2b354bc71425301a0ce3b8ccc036c774d2dae
7
- data.tar.gz: b72d588c40e65e30be3f491743898f8cc0585430191c20b5d40f35eb2e26f283092eb96d6d9350ed89f49173791ae757ed7928a6146fbc63d9ad83cbb19e5d90
6
+ metadata.gz: 71fa902e08bafcc1f29719246626c0d646d24478b859592b41cd06b38fc3f56d1a72bfd37f5e95891da1397d08cba39d117160aa531763a400186870f0a022d7
7
+ data.tar.gz: 1bf5eaa93b4cf5899e9c82951abb58e80d104eb074869a9d63d532e623fe51d61e71137b5706ce0ee4eb9e5c5626f29feef250306ad9b364abbe81db17464d5c
data/README.md CHANGED
@@ -70,6 +70,7 @@ See also [Poseidon::PartitionConsumer](http://www.rubydoc.info/github/bpot/posei
70
70
  @type kafka_group
71
71
  brokers <list of broker-host:port, separate with comma, must set>
72
72
  zookeepers <list of broker-host:port, separate with comma, must set>
73
+ zookeeper_path <broker path in zookeeper> :default => /brokers/ids # Set path in zookeeper for brokers
73
74
  consumer_group <consumer group name, must set>
74
75
  topics <listening topics(separate with comma',')>
75
76
  format <input text type (text|json|ltsv|msgpack)>
@@ -99,7 +100,7 @@ See also [Poseidon::PartitionConsumer](http://www.rubydoc.info/github/bpot/posei
99
100
  # Brokers: you can choose either brokers or zookeeper.
100
101
  brokers <broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,.. # Set brokers directly
101
102
  zookeeper <zookeeper_host>:<zookeeper_port> # Set brokers via Zookeeper
102
-
103
+ zookeeper_path <broker path in zookeeper> :default => /brokers/ids # Set path in zookeeper for kafka
103
104
  default_topic <output topic>
104
105
  default_partition_key (string) :default => nil
105
106
  output_data_type (json|ltsv|msgpack|attr:<record name>|<formatter name>)
@@ -131,7 +132,7 @@ Messages will be sent broker in a round-robin manner as default by Poseidon, but
131
132
  If key name `partition_key` exists in a message, this plugin set its value of partition_key as key.
132
133
 
133
134
  |default_partition_key|partition_key| behavior |
134
- |-|-|
135
+ | --- | --- | --- |
135
136
  |Not set|Not exists| All messages are sent in round-robin |
136
137
  |Set| Not exists| All messages are sent to specific broker |
137
138
  |Not set| Exists | Messages which have partition_key record are sent to specific broker, others are sent in round-robin|
@@ -146,7 +147,7 @@ If key name `partition_key` exists in a message, this plugin set its value of pa
146
147
  # Brokers: you can choose either brokers or zookeeper.
147
148
  brokers <broker1_host>:<broker1_port>,<broker2_host>:<broker2_port>,.. # Set brokers directly
148
149
  zookeeper <zookeeper_host>:<zookeeper_port> # Set brokers via Zookeeper
149
-
150
+ zookeeper_path <broker path in zookeeper> :default => /brokers/ids # Set path in zookeeper for kafka
150
151
  default_topic <output topic>
151
152
  default_partition_key (string) :default => nil
152
153
  flush_interval <flush interval (sec) :default => 60>
@@ -174,18 +175,6 @@ Install snappy module before you use snappy compression.
174
175
 
175
176
  $ gem install snappy
176
177
 
177
- #### Load balancing
178
-
179
- Messages will be sent broker in a round-robin manner as default by Poseidon, but you can set `default_partition_key` in config file to route messages to a specific broker.
180
- If key name `partition_key` exists in a message, this plugin set its value of partition_key as key.
181
-
182
- |default_partition_key|partition_key| behavior |
183
- |-|-|
184
- |Not set|Not exists| All messages are sent in round-robin |
185
- |Set| Not exists| All messages are sent to specific broker |
186
- |Not set| Exists | Messages which have partition_key record are sent to specific broker, others are sent in round-robin|
187
- |Set| Exists | Messages which have partition_key record are sent to specific broker with parition_key, others are sent to specific broker with default_parition_key|
188
-
189
178
  ## Contributing
190
179
 
191
180
  1. Fork it
@@ -12,7 +12,7 @@ Gem::Specification.new do |gem|
12
12
  gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
13
13
  gem.name = "fluent-plugin-kafka"
14
14
  gem.require_paths = ["lib"]
15
- gem.version = '0.1.3'
15
+ gem.version = '0.1.4'
16
16
  gem.add_dependency 'fluentd'
17
17
  gem.add_dependency 'poseidon_cluster'
18
18
  gem.add_dependency 'ltsv'
@@ -14,6 +14,8 @@ Note that you can choose to use either brokers or zookeeper.
14
14
  DESC
15
15
  config_param :zookeeper, :string, :default => nil,
16
16
  :desc => "Set brokers via Zookeeper: <zookeeper_host>:<zookeeper_port>"
17
+ config_param :zookeeper_path, :string, :default => '/brokers/ids',
18
+ :desc => "Path in path for Broker id. Default to /brokers/ids"
17
19
  config_param :default_topic, :string, :default => nil,
18
20
  :desc => "Output topic."
19
21
  config_param :default_partition_key, :string, :default => nil
@@ -33,6 +35,8 @@ DESC
33
35
  config_param :compression_codec, :string, :default => 'none',
34
36
  :desc => "The codec the producer uses to compress messages."
35
37
 
38
+ config_param :time_format, :string, :default => nil
39
+
36
40
  attr_accessor :output_data_type
37
41
  attr_accessor :field_separator
38
42
 
@@ -46,8 +50,8 @@ DESC
46
50
  if @zookeeper
47
51
  @seed_brokers = []
48
52
  z = Zookeeper.new(@zookeeper)
49
- z.get_children(:path => '/brokers/ids')[:children].each do |id|
50
- broker = Yajl.load(z.get(:path => "/brokers/ids/#{id}")[:data])
53
+ z.get_children(:path => @zookeeper_path)[:children].each do |id|
54
+ broker = Yajl.load(z.get(:path => @zookeeper_path + "/#{id}")[:data])
51
55
  @seed_brokers.push("#{broker['host']}:#{broker['port']}")
52
56
  end
53
57
  z.close
@@ -143,7 +147,13 @@ DESC
143
147
  begin
144
148
  chain.next
145
149
  es.each do |time,record|
146
- record['time'] = time if @output_include_time
150
+ if @output_include_time
151
+ if @time_format
152
+ record['time'] = Time.at(time).strftime(@time_format)
153
+ else
154
+ record['time'] = time
155
+ end
156
+ end
147
157
  record['tag'] = tag if @output_include_tag
148
158
  topic = record['topic'] || self.default_topic || tag
149
159
  partition_key = record['partition_key'] || @default_partition_key
@@ -18,6 +18,8 @@ DESC
18
18
  Set brokers via Zookeeper:
19
19
  <zookeeper_host>:<zookeeper_port>
20
20
  DESC
21
+ config_param :zookeeper_path, :string, :default => '/brokers/ids',
22
+ :desc => "Path in path for Broker id. Default to /brokers/ids"
21
23
  config_param :default_topic, :string, :default => nil,
22
24
  :desc => "Output topic"
23
25
  config_param :default_partition_key, :string, :default => nil
@@ -43,6 +45,8 @@ The codec the producer uses to compress messages.
43
45
  Supported codecs: (none|gzip|snappy)
44
46
  DESC
45
47
 
48
+ config_param :time_format, :string, :default => nil
49
+
46
50
  attr_accessor :output_data_type
47
51
  attr_accessor :field_separator
48
52
 
@@ -56,8 +60,8 @@ DESC
56
60
  if @zookeeper
57
61
  @seed_brokers = []
58
62
  z = Zookeeper.new(@zookeeper)
59
- z.get_children(:path => '/brokers/ids')[:children].each do |id|
60
- broker = Yajl.load(z.get(:path => "/brokers/ids/#{id}")[:data])
63
+ z.get_children(:path => @zookeeper_path)[:children].each do |id|
64
+ broker = Yajl.load(z.get(:path => @zookeeper_path + "/#{id}")[:data])
61
65
  @seed_brokers.push("#{broker['host']}:#{broker['port']}")
62
66
  end
63
67
  z.close
@@ -146,7 +150,14 @@ DESC
146
150
  messages_bytes = 0
147
151
  begin
148
152
  chunk.msgpack_each { |tag, time, record|
149
- record['time'] = time if @output_include_time
153
+ if @output_include_time
154
+ if @time_format
155
+ record['time'] = Time.at(time).strftime(@time_format)
156
+ else
157
+ record['time'] = time
158
+ end
159
+ end
160
+
150
161
  record['tag'] = tag if @output_include_tag
151
162
  topic = record['topic'] || @default_topic || tag
152
163
  partition_key = record['partition_key'] || @default_partition_key
metadata CHANGED
@@ -1,97 +1,97 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.3
4
+ version: 0.1.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Hidemasa Togashi
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-02-13 00:00:00.000000000 Z
11
+ date: 2016-05-05 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: fluentd
15
15
  requirement: !ruby/object:Gem::Requirement
16
16
  requirements:
17
- - - '>='
17
+ - - ">="
18
18
  - !ruby/object:Gem::Version
19
19
  version: '0'
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
- - - '>='
24
+ - - ">="
25
25
  - !ruby/object:Gem::Version
26
26
  version: '0'
27
27
  - !ruby/object:Gem::Dependency
28
28
  name: poseidon_cluster
29
29
  requirement: !ruby/object:Gem::Requirement
30
30
  requirements:
31
- - - '>='
31
+ - - ">="
32
32
  - !ruby/object:Gem::Version
33
33
  version: '0'
34
34
  type: :runtime
35
35
  prerelease: false
36
36
  version_requirements: !ruby/object:Gem::Requirement
37
37
  requirements:
38
- - - '>='
38
+ - - ">="
39
39
  - !ruby/object:Gem::Version
40
40
  version: '0'
41
41
  - !ruby/object:Gem::Dependency
42
42
  name: ltsv
43
43
  requirement: !ruby/object:Gem::Requirement
44
44
  requirements:
45
- - - '>='
45
+ - - ">="
46
46
  - !ruby/object:Gem::Version
47
47
  version: '0'
48
48
  type: :runtime
49
49
  prerelease: false
50
50
  version_requirements: !ruby/object:Gem::Requirement
51
51
  requirements:
52
- - - '>='
52
+ - - ">="
53
53
  - !ruby/object:Gem::Version
54
54
  version: '0'
55
55
  - !ruby/object:Gem::Dependency
56
56
  name: yajl-ruby
57
57
  requirement: !ruby/object:Gem::Requirement
58
58
  requirements:
59
- - - '>='
59
+ - - ">="
60
60
  - !ruby/object:Gem::Version
61
61
  version: '0'
62
62
  type: :runtime
63
63
  prerelease: false
64
64
  version_requirements: !ruby/object:Gem::Requirement
65
65
  requirements:
66
- - - '>='
66
+ - - ">="
67
67
  - !ruby/object:Gem::Version
68
68
  version: '0'
69
69
  - !ruby/object:Gem::Dependency
70
70
  name: msgpack
71
71
  requirement: !ruby/object:Gem::Requirement
72
72
  requirements:
73
- - - '>='
73
+ - - ">="
74
74
  - !ruby/object:Gem::Version
75
75
  version: '0'
76
76
  type: :runtime
77
77
  prerelease: false
78
78
  version_requirements: !ruby/object:Gem::Requirement
79
79
  requirements:
80
- - - '>='
80
+ - - ">="
81
81
  - !ruby/object:Gem::Version
82
82
  version: '0'
83
83
  - !ruby/object:Gem::Dependency
84
84
  name: zookeeper
85
85
  requirement: !ruby/object:Gem::Requirement
86
86
  requirements:
87
- - - '>='
87
+ - - ">="
88
88
  - !ruby/object:Gem::Version
89
89
  version: '0'
90
90
  type: :runtime
91
91
  prerelease: false
92
92
  version_requirements: !ruby/object:Gem::Requirement
93
93
  requirements:
94
- - - '>='
94
+ - - ">="
95
95
  - !ruby/object:Gem::Version
96
96
  version: '0'
97
97
  description: Fluentd plugin for Apache Kafka > 0.8
@@ -121,17 +121,17 @@ require_paths:
121
121
  - lib
122
122
  required_ruby_version: !ruby/object:Gem::Requirement
123
123
  requirements:
124
- - - '>='
124
+ - - ">="
125
125
  - !ruby/object:Gem::Version
126
126
  version: '0'
127
127
  required_rubygems_version: !ruby/object:Gem::Requirement
128
128
  requirements:
129
- - - '>='
129
+ - - ">="
130
130
  - !ruby/object:Gem::Version
131
131
  version: '0'
132
132
  requirements: []
133
133
  rubyforge_project:
134
- rubygems_version: 2.0.14
134
+ rubygems_version: 2.4.8
135
135
  signing_key:
136
136
  specification_version: 4
137
137
  summary: Fluentd plugin for Apache Kafka > 0.8