logstash-output-dis 1.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (58) hide show
  1. checksums.yaml +7 -0
  2. data/Gemfile +11 -0
  3. data/LICENSE +13 -0
  4. data/NOTICE.TXT +5 -0
  5. data/README.md +70 -0
  6. data/lib/com/fasterxml/jackson/core/jackson-annotations/2.8.11/jackson-annotations-2.8.11.jar +0 -0
  7. data/lib/com/fasterxml/jackson/core/jackson-core/2.8.11/jackson-core-2.8.11.jar +0 -0
  8. data/lib/com/fasterxml/jackson/core/jackson-databind/2.8.11.3/jackson-databind-2.8.11.3.jar +0 -0
  9. data/lib/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar +0 -0
  10. data/lib/com/huaweicloud/dis/huaweicloud-dis-kafka-adapter/1.2.1/huaweicloud-dis-kafka-adapter-1.2.1.jar +0 -0
  11. data/lib/com/huaweicloud/dis/huaweicloud-dis-kafka-adapter-common/1.2.1/huaweicloud-dis-kafka-adapter-common-1.2.1.jar +0 -0
  12. data/lib/com/huaweicloud/dis/huaweicloud-sdk-java-dis/1.3.3/huaweicloud-sdk-java-dis-1.3.3.jar +0 -0
  13. data/lib/com/huaweicloud/dis/huaweicloud-sdk-java-dis-iface/1.3.3/huaweicloud-sdk-java-dis-iface-1.3.3.jar +0 -0
  14. data/lib/commons-codec/commons-codec/1.9/commons-codec-1.9.jar +0 -0
  15. data/lib/commons-io/commons-io/2.4/commons-io-2.4.jar +0 -0
  16. data/lib/commons-logging/commons-logging/1.2/commons-logging-1.2.jar +0 -0
  17. data/lib/joda-time/joda-time/2.8.1/joda-time-2.8.1.jar +0 -0
  18. data/lib/log4j/log4j/1.2.17/log4j-1.2.17.jar +0 -0
  19. data/lib/logstash/outputs/dis.rb +318 -0
  20. data/lib/logstash-output-dis_jars.rb +5 -0
  21. data/lib/org/apache/httpcomponents/httpasyncclient/4.1.3/httpasyncclient-4.1.3.jar +0 -0
  22. data/lib/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar +0 -0
  23. data/lib/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar +0 -0
  24. data/lib/org/apache/httpcomponents/httpcore-nio/4.4.6/httpcore-nio-4.4.6.jar +0 -0
  25. data/lib/org/apache/httpcomponents/httpmime/4.5.2/httpmime-4.5.2.jar +0 -0
  26. data/lib/org/apache/logging/log4j/log4j-1.2-api/2.6.2/log4j-1.2-api-2.6.2.jar +0 -0
  27. data/lib/org/apache/logging/log4j/log4j-api/2.6.2/log4j-api-2.6.2.jar +0 -0
  28. data/lib/org/apache/logging/log4j/log4j-core/2.6.2/log4j-core-2.6.2.jar +0 -0
  29. data/lib/org/slf4j/slf4j-api/1.7.21/slf4j-api-1.7.21.jar +0 -0
  30. data/lib/org/slf4j/slf4j-log4j12/1.7.21/slf4j-log4j12-1.7.21.jar +0 -0
  31. data/lib/org/xerial/snappy/snappy-java/1.1.7.2/snappy-java-1.1.7.2.jar +0 -0
  32. data/logstash-output-dis.gemspec +36 -0
  33. data/spec/unit/outputs/dis_spec.rb +128 -0
  34. data/vendor/jar-dependencies/runtime-jars/commons-codec-1.9.jar +0 -0
  35. data/vendor/jar-dependencies/runtime-jars/commons-io-2.4.jar +0 -0
  36. data/vendor/jar-dependencies/runtime-jars/commons-logging-1.2.jar +0 -0
  37. data/vendor/jar-dependencies/runtime-jars/httpasyncclient-4.1.3.jar +0 -0
  38. data/vendor/jar-dependencies/runtime-jars/httpclient-4.5.2.jar +0 -0
  39. data/vendor/jar-dependencies/runtime-jars/httpcore-4.4.4.jar +0 -0
  40. data/vendor/jar-dependencies/runtime-jars/httpcore-nio-4.4.6.jar +0 -0
  41. data/vendor/jar-dependencies/runtime-jars/httpmime-4.5.2.jar +0 -0
  42. data/vendor/jar-dependencies/runtime-jars/huaweicloud-dis-kafka-adapter-1.2.1.jar +0 -0
  43. data/vendor/jar-dependencies/runtime-jars/huaweicloud-dis-kafka-adapter-common-1.2.1.jar +0 -0
  44. data/vendor/jar-dependencies/runtime-jars/huaweicloud-sdk-java-dis-1.3.3.jar +0 -0
  45. data/vendor/jar-dependencies/runtime-jars/huaweicloud-sdk-java-dis-iface-1.3.3.jar +0 -0
  46. data/vendor/jar-dependencies/runtime-jars/jackson-annotations-2.8.11.jar +0 -0
  47. data/vendor/jar-dependencies/runtime-jars/jackson-core-2.8.11.jar +0 -0
  48. data/vendor/jar-dependencies/runtime-jars/jackson-databind-2.8.11.3.jar +0 -0
  49. data/vendor/jar-dependencies/runtime-jars/joda-time-2.8.1.jar +0 -0
  50. data/vendor/jar-dependencies/runtime-jars/log4j-1.2-api-2.6.2.jar +0 -0
  51. data/vendor/jar-dependencies/runtime-jars/log4j-1.2.17.jar +0 -0
  52. data/vendor/jar-dependencies/runtime-jars/log4j-api-2.6.2.jar +0 -0
  53. data/vendor/jar-dependencies/runtime-jars/log4j-core-2.6.2.jar +0 -0
  54. data/vendor/jar-dependencies/runtime-jars/protobuf-java-2.5.0.jar +0 -0
  55. data/vendor/jar-dependencies/runtime-jars/slf4j-api-1.7.21.jar +0 -0
  56. data/vendor/jar-dependencies/runtime-jars/slf4j-log4j12-1.7.21.jar +0 -0
  57. data/vendor/jar-dependencies/runtime-jars/snappy-java-1.1.7.2.jar +0 -0
  58. metadata +212 -0
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: c1b97f4e8aa0bb1c00789410e2a4c5ce410a7cb3
4
+ data.tar.gz: a30635f4d15b0d268acb8802475f67d9dcea9dc1
5
+ SHA512:
6
+ metadata.gz: '099ec05c009c6c54e0d8c8de13ffdb9042fd47504db2a230af830492c515ab2025f57e79e5cc1a47912a03267afd757ebb7f60db1e014cd7f3906f0cba8fab99'
7
+ data.tar.gz: 7f2147aa307fca93ec180eb6a5623b6451f50f36901647385f1ceedce85abfed32d94034a84c799ba30c6ed46d4715287b03e683c0280f0270e979f4aac19de9
data/Gemfile ADDED
@@ -0,0 +1,11 @@
1
+ source 'http://gems.ruby-china.com'
2
+
3
+ gemspec
4
+
5
+ logstash_path = ENV["LOGSTASH_PATH"] || "../../logstash"
6
+ use_logstash_source = ENV["LOGSTASH_SOURCE"] && ENV["LOGSTASH_SOURCE"].to_s == "1"
7
+
8
+ if Dir.exist?(logstash_path) && use_logstash_source
9
+ gem 'logstash-core', :path => "#{logstash_path}/logstash-core"
10
+ gem 'logstash-core-plugin-api', :path => "#{logstash_path}/logstash-core-plugin-api"
11
+ end
data/LICENSE ADDED
@@ -0,0 +1,13 @@
1
+ Copyright (c) 2012-2018 Elasticsearch <http://www.elastic.co>
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
data/NOTICE.TXT ADDED
@@ -0,0 +1,5 @@
1
+ Elasticsearch
2
+ Copyright 2012-2015 Elasticsearch
3
+
4
+ This product includes software developed by The Apache Software
5
+ Foundation (http://www.apache.org/).
data/README.md ADDED
@@ -0,0 +1,70 @@
1
+ # Logstash Output DIS
2
+
3
+ This is a plugin for [Logstash](https://github.com/elastic/logstash). It will send log records to a DIS stream, using the DIS-Kafka-Adapter.
4
+
5
+ ## Requirements
6
+
7
+ To get started using this plugin, you will need three things:
8
+
9
+ 1. JDK 1.8 +
10
+ 2. JRuby with the Bundler gem installed, 9.0.0.0 +
11
+ 3. Maven
12
+ 4. Logstash, 6.0.0 to 6.1.0
13
+
14
+ ## Installation
15
+ 当前插件未发布到`RubyGems.org`,无法直接从`RubyGems.org`安装插件,只能从本地安装。
16
+ ### 0. 修改 RubyGems 镜像地址
17
+ gem sources --add https://gems.ruby-china.org/ --remove https://rubygems.org/
18
+
19
+ ### 0. 安装 dis-kafka-adapter
20
+
21
+
22
+ ### 1. 安装 JRuby
23
+ ### 2. 安装 Bundler gem
24
+ gem install bundler
25
+
26
+ ### 3. 安装依赖
27
+ bundle install
28
+ rake install_jars
29
+ gem build logstash-output-dis.gemspec
30
+
31
+ ### 4. 编辑 Logstash 的`Gemfile`,并添加本地插件路径
32
+ gem "logstash-output-dis", :path => "/your/local/logstash-output-dis"
33
+
34
+ ### 5. 安装插件到 Logstash
35
+ bin/logstash-plugin install --no-verify
36
+
37
+ ## Usage
38
+
39
+ ```properties
40
+ output
41
+ {
42
+ dis {
43
+ stream => ["YOU_DIS_STREAM_NAME"]
44
+ endpoint => "https://dis.cn-north-1.myhuaweicloud.com"
45
+ ak => "YOU_ACCESS_KEY_ID"
46
+ sk => "YOU_SECRET_KEY_ID"
47
+ region => "cn-north-1"
48
+ project_id => "YOU_PROJECT_ID"
49
+ group_id => "YOU_GROUP_ID"
50
+ decorate_events => true
51
+ auto_offset_reset => "earliest"
52
+ }
53
+ }
54
+ ```
55
+
56
+ ## Configuration
57
+
58
+ ### Parameters
59
+
60
+ | Name | Description | Default |
61
+ | :----------------------- | :--------------------------------------- | :--------------------------------------- |
62
+ | stream | 指定在DIS服务上创建的通道名称。 | - |
63
+ | ak | 用户的Access Key,可从华为云控制台“我的凭证”页获取。 | - |
64
+ | sk | 用户的Secret Key,可从华为云控制台“我的凭证”页获取。 | - |
65
+ | region | 将数据上传到指定Region的DIS服务。 | cn-north-1 |
66
+ | project_id | 用户所属区域的项目ID,可从华为云控制台“我的凭证”页获取。 | - |
67
+ | endpoint | DIS对应Region的数据接口地址。 | https://dis.cn-north-1.myhuaweicloud.com |
68
+
69
+ ## License
70
+ [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0.html)
@@ -0,0 +1,318 @@
1
+ require 'logstash/namespace'
2
+ require 'logstash/outputs/base'
3
+ require 'java'
4
+ require 'logstash-output-dis_jars.rb'
5
+
6
+ java_import com.huaweicloud.dis.adapter.kafka.clients.producer.ProducerRecord
7
+
8
+ # Write events to a DIS stream, using DIS Kafka Adapter.
9
+ class LogStash::Outputs::Dis < LogStash::Outputs::Base
10
+ declare_threadsafe!
11
+
12
+ config_name 'dis'
13
+
14
+ default :codec, 'plain'
15
+
16
+ config :default_trusted_jks_enabled, :validate => :boolean, :default => false
17
+ config :security_token, :validate => :string
18
+ config :exception_retries, :validate => :number, :default => 8
19
+ config :records_retries, :validate => :number, :default => 20
20
+ config :proxy_host, :validate => :string
21
+ config :proxy_port, :validate => :number, :default => 80
22
+ config :proxy_protocol, :validate => ["http", "https"], :default => "http"
23
+ config :proxy_username, :validate => :string
24
+ config :proxy_password, :validate => :string
25
+ config :proxy_workstation, :validate => :string
26
+ config :proxy_domain, :validate => :string
27
+ config :proxy_non_proxy_hosts, :validate => :string
28
+
29
+ # The producer will attempt to batch records together into fewer requests whenever multiple
30
+ # records are being sent to the same partition. This helps performance on both the client
31
+ # and the server. This configuration controls the default batch size in bytes.
32
+ config :batch_size, :validate => :number, :default => 16384
33
+ config :batch_count, :validate => :number, :default => 5000
34
+
35
+ # The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
36
+ config :buffer_memory, :validate => :number, :default => 33554432
37
+ config :buffer_count, :validate => :number, :default => 5000
38
+ # The producer groups together any records that arrive in between request
39
+ # transmissions into a single batched request. Normally this occurs only under
40
+ # load when records arrive faster than they can be sent out. However in some circumstances
41
+ # the client may want to reduce the number of requests even under moderate load.
42
+ # This setting accomplishes this by adding a small amount of artificial delay—that is,
43
+ # rather than immediately sending out a record the producer will wait for up to the given delay
44
+ # to allow other records to be sent so that the sends can be batched together.
45
+ config :linger_ms, :validate => :number, :default => 50
46
+ config :block_on_buffer_full, :validate => :boolean, :default => false
47
+ # block time when buffer is full
48
+ config :max_block_ms, :validate => :number, :default => 60000
49
+ # max wait time in single backoff
50
+ config :backoff_max_interval_ms, :validate => :number, :default => 30000
51
+ config :max_in_flight_requests_per_connection, :validate => :number, :default => 50
52
+ config :records_retriable_error_code, :validate => :string, :default => "DIS.4303,DIS.5"
53
+ config :order_by_partition, :validate => :boolean, :default => false
54
+ config :metadata_timeout_ms, :validate => :number, :default => 600000
55
+ # The key for the message
56
+ config :message_key, :validate => :string
57
+ config :partition_id, :validate => :string
58
+ # the timeout setting for initial metadata request to fetch topic metadata.
59
+ config :metadata_fetch_timeout_ms, :validate => :number, :default => 60000
60
+ # the max time in milliseconds before a metadata refresh is forced.
61
+ config :metadata_max_age_ms, :validate => :number, :default => 300000
62
+ # The size of the TCP receive buffer to use when reading data
63
+ config :receive_buffer_bytes, :validate => :number, :default => 32768
64
+ # The configuration controls the maximum amount of time the client will wait
65
+ # for the response of a request. If the response is not received before the timeout
66
+ # elapses the client will resend the request if necessary or fail the request if
67
+ # retries are exhausted.
68
+ config :request_timeout_ms, :validate => :string
69
+ # The default retry behavior is to retry until successful. To prevent data loss,
70
+ # the use of this setting is discouraged.
71
+ #
72
+ # If you choose to set `retries`, a value greater than zero will cause the
73
+ # client to only retry a fixed number of times. This will result in data loss
74
+ # if a transient error outlasts your retry count.
75
+ #
76
+ # A value less than zero is a configuration error.
77
+ config :retries, :validate => :number
78
+ # The amount of time to wait before attempting to retry a failed produce request to a given topic partition.
79
+ config :retry_backoff_ms, :validate => :number, :default => 100
80
+
81
+
82
+ # The DIS stream to produce messages to
83
+ config :stream, :validate => :string, :required => true
84
+ # DIS Gateway endpoint
85
+ config :endpoint, :validate => :string, :default => "https://dis.cn-north-1.myhuaweicloud.com"
86
+ # The ProjectId of the specified region, it can be obtained from My Credential Page
87
+ config :project_id, :validate => :string
88
+ # Specifies use which region of DIS, now DIS only support cn-north-1
89
+ config :region, :validate => :string, :default => "cn-north-1"
90
+ # The Access Key ID for hwclouds, it can be obtained from My Credential Page
91
+ config :ak, :validate => :string, :required => true
92
+ # The Secret key ID is encrypted or not
93
+ config :is_sk_encrypted, :default => false
94
+ # The encrypt key used to encypt the Secret Key Id
95
+ config :encrypt_key, :validate => :string
96
+ # The Secret Key ID for hwclouds, it can be obtained from My Credential Page
97
+ config :sk, :validate => :string, :required => true
98
+ # Serializer class for the key of the message
99
+ config :key_serializer, :validate => :string, :default => 'com.huaweicloud.dis.adapter.kafka.common.serialization.StringSerializer'
100
+ # Serializer class for the value of the message
101
+ config :value_serializer, :validate => :string, :default => 'com.huaweicloud.dis.adapter.kafka.common.serialization.StringSerializer'
102
+
103
+ public
104
+ def register
105
+ @thread_batch_map = Concurrent::Hash.new
106
+
107
+ if !@retries.nil?
108
+ if @retries < 0
109
+ raise ConfigurationError, "A negative retry count (#{@retries}) is not valid. Must be a value >= 0"
110
+ end
111
+
112
+ @logger.warn("Kafka output is configured with finite retry. This instructs Logstash to LOSE DATA after a set number of send attempts fails. If you do not want to lose data if Kafka is down, then you must remove the retry setting.", :retries => @retries)
113
+ end
114
+
115
+
116
+ @producer = create_producer
117
+ if value_serializer == 'com.huaweicloud.dis.adapter.kafka.common.serialization.StringSerializer'
118
+ @codec.on_event do |event, data|
119
+ write_to_dis(event, data)
120
+ end
121
+ elsif value_serializer == 'com.huaweicloud.dis.adapter.kafka.common.serialization.ByteArraySerializer'
122
+ @codec.on_event do |event, data|
123
+ write_to_dis(event, data.to_java_bytes)
124
+ end
125
+ else
126
+ raise ConfigurationError, "'value_serializer' only supports com.huaweicloud.dis.adapter.kafka.common.serialization.ByteArraySerializer and com.huaweicloud.dis.adapter.kafka.common.serialization.StringSerializer"
127
+ end
128
+ end
129
+
130
+ # def register
131
+
132
+ def prepare(record)
133
+ # This output is threadsafe, so we need to keep a batch per thread.
134
+ @thread_batch_map[Thread.current].add(record)
135
+ end
136
+
137
+ def multi_receive(events)
138
+ t = Thread.current
139
+ if !@thread_batch_map.include?(t)
140
+ @thread_batch_map[t] = java.util.ArrayList.new(events.size)
141
+ end
142
+
143
+ events.each do |event|
144
+ break if event == LogStash::SHUTDOWN
145
+ @codec.encode(event)
146
+ end
147
+
148
+ batch = @thread_batch_map[t]
149
+ if batch.any?
150
+ retrying_send(batch)
151
+ batch.clear
152
+ end
153
+ end
154
+
155
+ def retrying_send(batch)
156
+ remaining = @retries;
157
+
158
+ while batch.any?
159
+ if !remaining.nil?
160
+ if remaining < 0
161
+ # TODO(sissel): Offer to DLQ? Then again, if it's a transient fault,
162
+ # DLQing would make things worse (you dlq data that would be successful
163
+ # after the fault is repaired)
164
+ logger.info("Exhausted user-configured retry count when sending to Kafka. Dropping these events.",
165
+ :max_retries => @retries, :drop_count => batch.count)
166
+ break
167
+ end
168
+
169
+ remaining -= 1
170
+ end
171
+
172
+ failures = []
173
+
174
+ futures = batch.collect do |record|
175
+ begin
176
+ # send() can throw an exception even before the future is created.
177
+ @producer.send(record)
178
+ rescue org.apache.kafka.common.errors.TimeoutException => e
179
+ failures << record
180
+ nil
181
+ rescue org.apache.kafka.common.errors.InterruptException => e
182
+ failures << record
183
+ nil
184
+ rescue com.huaweicloud.dis.adapter.kafka.common.errors.SerializationException => e
185
+ # TODO(sissel): Retrying will fail because the data itself has a problem serializing.
186
+ # TODO(sissel): Let's add DLQ here.
187
+ failures << record
188
+ nil
189
+ end
190
+ end.compact
191
+
192
+ futures.each_with_index do |future, i|
193
+ begin
194
+ result = future.get()
195
+ rescue => e
196
+ # TODO(sissel): Add metric to count failures, possibly by exception type.
197
+ logger.warn("KafkaProducer.send() failed: #{e}", :exception => e)
198
+ failures << batch[i]
199
+ end
200
+ end
201
+
202
+ # No failures? Cool. Let's move on.
203
+ break if failures.empty?
204
+
205
+ # Otherwise, retry with any failed transmissions
206
+ batch = failures
207
+ delay = @retry_backoff_ms / 1000.0
208
+ logger.info("Sending batch to DIS failed. Will retry after a delay.", :batch_size => batch.size,
209
+ :failures => failures.size, :sleep => delay);
210
+ sleep(delay)
211
+ end
212
+
213
+ end
214
+
215
+ def close
216
+ @producer.close
217
+ end
218
+
219
+ private
220
+
221
+ def write_to_dis(event, serialized_data)
222
+ stream = event.get("stream");
223
+ if stream.nil?
224
+ stream = @stream;
225
+ end
226
+
227
+ message_key = event.get("partition_key");
228
+ if message_key.nil?
229
+ message_key = @message_key;
230
+ end
231
+
232
+ partition_id = event.get("partition_id");
233
+
234
+ if message_key.nil? && partition_id.nil?
235
+ # record = ProducerRecord.new(event.sprintf(@stream), serialized_data)
236
+ record = ProducerRecord.new(stream, serialized_data)
237
+ elsif partition_id.nil?
238
+ # record = ProducerRecord.new(event.sprintf(@stream), event.sprintf(@message_key), serialized_data)
239
+ # record = ProducerRecord.new(stream, event.sprintf(@message_key), serialized_data)
240
+ record = ProducerRecord.new(stream, message_key, serialized_data)
241
+ else
242
+ record = ProducerRecord.new(stream, partition_id.to_i, message_key, serialized_data)
243
+ end
244
+ prepare(record)
245
+ rescue LogStash::ShutdownSignal
246
+ @logger.debug('DIS Kafka producer got shutdown signal')
247
+ rescue => e
248
+ @logger.warn('DIS kafka producer threw exception, restarting',
249
+ :exception => e)
250
+ end
251
+
252
+ def create_producer
253
+ begin
254
+ props = java.util.Properties.new
255
+ kafka = com.huaweicloud.dis.adapter.kafka.clients.producer.ProducerConfig
256
+
257
+ props.put("IS_DEFAULT_TRUSTED_JKS_ENABLED", default_trusted_jks_enabled.to_s)
258
+ props.put("security.token", security_token) unless security_token.nil?
259
+ props.put("exception.retries", exception_retries.to_s)
260
+ props.put("records.retries", records_retries.to_s)
261
+ props.put("PROXY_HOST", proxy_host) unless proxy_host.nil?
262
+ props.put("PROXY_PORT", proxy_port.to_s)
263
+ props.put("PROXY_PROTOCOL", proxy_protocol)
264
+ props.put("PROXY_USERNAME", proxy_username) unless proxy_username.nil?
265
+ props.put("PROXY_PASSWORD", proxy_password) unless proxy_password.nil?
266
+ props.put("PROXY_WORKSTATION", proxy_workstation) unless proxy_workstation.nil?
267
+ props.put("PROXY_DOMAIN", proxy_domain) unless proxy_domain.nil?
268
+ props.put("NON_PROXY_HOSTS", proxy_non_proxy_hosts) unless proxy_non_proxy_hosts.nil?
269
+
270
+ props.put("batch.size", batch_size.to_s)
271
+ props.put("batch.count", batch_count.to_s)
272
+ props.put("buffer.memory", buffer_memory.to_s)
273
+ props.put("buffer.count", buffer_count.to_s)
274
+ props.put("linger.ms", linger_ms.to_s)
275
+ props.put("block.on.buffer.full", block_on_buffer_full.to_s)
276
+ props.put("max.block.ms", max_block_ms.to_s)
277
+ props.put("backoff.max.interval.ms", backoff_max_interval_ms.to_s)
278
+ props.put("max.in.flight.requests.per.connection", max_in_flight_requests_per_connection.to_s)
279
+ props.put("records.retriable.error.code", records_retriable_error_code) unless records_retriable_error_code.nil?
280
+ props.put("order.by.partition", order_by_partition.to_s)
281
+ props.put("metadata.timeout.ms", metadata_timeout_ms.to_s)
282
+ # props.put(kafka::RETRIES_CONFIG, retries.to_s) unless retries.nil?
283
+ # props.put(kafka::RETRY_BACKOFF_MS_CONFIG, retry_backoff_ms.to_s)
284
+ props.put("key.deserializer", "com.huaweicloud.dis.adapter.kafka.common.serialization.StringDeserializer")
285
+ props.put("value.deserializer", "com.huaweicloud.dis.adapter.kafka.common.serialization.StringDeserializer")
286
+
287
+ # endpoint, project_id, region, ak, sk
288
+ props.put("endpoint", endpoint)
289
+ props.put("projectId", project_id)
290
+ props.put("region", region)
291
+ props.put("ak", ak)
292
+ if is_sk_encrypted
293
+ decrypted_sk = decrypt(@sk)
294
+ props.put("sk", decrypted_sk)
295
+ else
296
+ props.put("sk", sk)
297
+ end
298
+
299
+
300
+ com.huaweicloud.dis.adapter.kafka.clients.producer.DISKafkaProducer.new(props)
301
+ rescue => e
302
+ logger.error("Unable to create DIS Kafka producer from given configuration",
303
+ :kafka_error_message => e,
304
+ :cause => e.respond_to?(:getCause) ? e.getCause() : nil)
305
+ raise e
306
+ end
307
+ end
308
+
309
+ private
310
+ def decrypt(encrypted_sk)
311
+ com.huaweicloud.dis.util.encrypt.EncryptUtils.dec([@encrypt_key].to_java(java.lang.String), encrypted_sk)
312
+ rescue => e
313
+ logger.error("Unable to decrypt sk from given configuration",
314
+ :decrypt_error_message => e,
315
+ :cause => e.respond_to?(:getCause) ? e.getCause() : nil)
316
+ end
317
+
318
+ end #class LogStash::Outputs::Dis
@@ -0,0 +1,5 @@
1
+ # encoding: utf-8
2
+ require 'logstash/environment'
3
+
4
+ root_dir = File.expand_path(File.join(File.dirname(__FILE__), ".."))
5
+ LogStash::Environment.load_runtime_jars! File.join(root_dir, "vendor")
@@ -0,0 +1,36 @@
1
+ Gem::Specification.new do |s|
2
+
3
+ s.name = 'logstash-output-dis'
4
+ s.version = '1.1.1'
5
+ s.licenses = ['Apache License (2.0)']
6
+ s.summary = "Writes events to a DIS stream"
7
+ s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
8
+ s.authors = ['Data Ingestion Service']
9
+ s.email = 'dis@huaweicloud.com'
10
+ s.homepage = "https://www.huaweicloud.com/product/dis.html"
11
+ s.require_paths = ['lib', 'vendor/jar-dependencies']
12
+
13
+ # Files
14
+ s.files = Dir["lib/**/*","spec/**/*","*.gemspec","*.md","CONTRIBUTORS","Gemfile","LICENSE","NOTICE.TXT", "vendor/jar-dependencies/**/*.jar", "vendor/jar-dependencies/**/*.rb", "VERSION", "docs/**/*"]
15
+
16
+ # Tests
17
+ s.test_files = s.files.grep(%r{^(test|spec|features)/})
18
+
19
+ # Special flag to let us know this is actually a logstash plugin
20
+ s.metadata = { 'logstash_plugin' => 'true', 'group' => 'output'}
21
+
22
+ s.requirements << "jar 'com.huaweicloud.dis:huaweicloud-dis-kafka-adapter', '1.2.1'"
23
+ s.requirements << "jar 'org.slf4j:slf4j-log4j12', '1.7.21'"
24
+ s.requirements << "jar 'org.apache.logging.log4j:log4j-1.2-api', '2.6.2'"
25
+
26
+ s.add_development_dependency 'jar-dependencies', '~> 0.3.2'
27
+
28
+ # Gem dependencies
29
+ s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
30
+ s.add_runtime_dependency 'logstash-codec-plain'
31
+ s.add_runtime_dependency 'logstash-codec-json'
32
+
33
+ s.add_development_dependency 'logstash-devutils'
34
+ s.add_development_dependency 'poseidon'
35
+ s.add_development_dependency 'snappy'
36
+ end
@@ -0,0 +1,128 @@
1
+ # encoding: utf-8
2
+ require "logstash/devutils/rspec/spec_helper"
3
+ require 'logstash/outputs/dis'
4
+ require 'json'
5
+
6
+ describe "outputs/dis" do
7
+ let (:simple_dis_config) {{'stream' => 'test', 'project_id' => 'test_project_id', 'ak' => 'test_ak', 'sk' => 'test_sk'}}
8
+ let (:event) { LogStash::Event.new({'message' => 'hello', 'stream_name' => 'my_stream', 'host' => '127.0.0.1',
9
+ '@timestamp' => LogStash::Timestamp.now}) }
10
+
11
+ context 'when initializing' do
12
+ it "should register" do
13
+ output = LogStash::Plugin.lookup("output", "dis").new(simple_dis_config)
14
+ expect {output.register}.to_not raise_error
15
+ end
16
+
17
+ it 'should populate dis config with default values' do
18
+ dis = LogStash::Outputs::Dis.new(simple_dis_config)
19
+ insist {dis.endpoint} == 'https://dis.cn-north-1.myhuaweicloud.com'
20
+ insist {dis.stream} == 'test'
21
+ insist {dis.key_serializer} == 'com.huaweicloud.dis.adapter.kafka.common.serialization.StringSerializer'
22
+ end
23
+ end
24
+
25
+ context 'when outputting messages' do
26
+ #it 'should send logstash event to DIS' do
27
+ #expect_any_instance_of(com.huaweicloud.dis.adapter.kafka.clients.producer.DISKafkaProducer).to receive(:send)
28
+ #.with(an_instance_of(com.huaweicloud.dis.adapter.kafka.clients.producer.ProducerRecord)).and_call_original
29
+ #dis = LogStash::Outputs::Dis.new(simple_dis_config)
30
+ #dis.register
31
+ #dis.multi_receive([event])
32
+ #end
33
+
34
+ #it 'should support field referenced message_keys' do
35
+ #expect(com.huaweicloud.dis.adapter.kafka.clients.producer.ProducerRecord).to receive(:new)
36
+ #.with("test", "127.0.0.1", event.to_s).and_call_original
37
+ #expect_any_instance_of(com.huaweicloud.dis.adapter.kafka.clients.producer.DISKafkaProducer).to receive(:send).and_call_original
38
+ #dis = LogStash::Outputs::Dis.new(simple_dis_config.merge({"message_key" => "%{host}"}))
39
+ #dis.register
40
+ #dis.multi_receive([event])
41
+ #end
42
+ end
43
+
44
+ context "when DISKafkaProducer#send() raises an exception" do
45
+ let(:failcount) { (rand * 10).to_i }
46
+ let(:sendcount) { failcount + 1 }
47
+
48
+ let(:exception_classes) { [
49
+ com.huaweicloud.dis.adapter.kafka.common.errors.TimeoutException,
50
+ com.huaweicloud.dis.adapter.kafka.common.errors.InterruptException,
51
+ com.huaweicloud.dis.adapter.kafka.common.errors.SerializationException
52
+ ] }
53
+
54
+ before do
55
+ count = 0
56
+ expect_any_instance_of(com.huaweicloud.dis.adapter.kafka.clients.producer.DISKafkaProducer).to receive(:send)
57
+ .exactly(sendcount).times
58
+ .and_wrap_original do |m, *args|
59
+ if count < failcount # fail 'failcount' times in a row.
60
+ count += 1
61
+ # Pick an exception at random
62
+ raise exception_classes.shuffle.first.new("injected exception for testing")
63
+ else
64
+ #m.call(*args) # call original
65
+ end
66
+ end
67
+ end
68
+
69
+ it "should retry until successful" do
70
+ dis = LogStash::Outputs::Dis.new(simple_dis_config)
71
+ dis.register
72
+ dis.multi_receive([event])
73
+ end
74
+ end
75
+
76
+ context "when a send fails" do
77
+ context "and the default retries behavior is used" do
78
+ # Fail this many times and then finally succeed.
79
+ let(:failcount) { (rand * 10).to_i }
80
+
81
+ # Expect DISKafkaProducer.send() to get called again after every failure, plus the successful one.
82
+ let(:sendcount) { failcount + 1 }
83
+
84
+
85
+ it "should retry until successful" do
86
+ count = 0;
87
+
88
+ expect_any_instance_of(com.huaweicloud.dis.adapter.kafka.clients.producer.DISKafkaProducer).to receive(:send)
89
+ .exactly(sendcount).times
90
+ .and_wrap_original do |m, *args|
91
+ if count < failcount
92
+ count += 1
93
+ # inject some failures.
94
+
95
+ # Return a custom Future that will raise an exception to simulate a DIS send() problem.
96
+ future = java.util.concurrent.FutureTask.new { raise "Failed" }
97
+ future.run
98
+ future
99
+ else
100
+ #m.call(*args)
101
+ end
102
+ end
103
+ dis = LogStash::Outputs::Dis.new(simple_dis_config)
104
+ dis.register
105
+ dis.multi_receive([event])
106
+ end
107
+ end
108
+
109
+ context "and when retries is set by the user" do
110
+ let(:retries) { (rand * 10).to_i }
111
+ let(:max_sends) { retries + 1 }
112
+
113
+ it "should give up after retries are exhausted" do
114
+ expect_any_instance_of(com.huaweicloud.dis.adapter.kafka.clients.producer.DISKafkaProducer).to receive(:send)
115
+ .at_most(max_sends).times
116
+ .and_wrap_original do |m, *args|
117
+ # Always fail.
118
+ future = java.util.concurrent.FutureTask.new { raise "Failed" }
119
+ future.run
120
+ future
121
+ end
122
+ dis = LogStash::Outputs::Dis.new(simple_dis_config.merge("retries" => retries))
123
+ dis.register
124
+ dis.multi_receive([event])
125
+ end
126
+ end
127
+ end
128
+ end
metadata ADDED
@@ -0,0 +1,212 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: logstash-output-dis
3
+ version: !ruby/object:Gem::Version
4
+ version: 1.1.1
5
+ platform: ruby
6
+ authors:
7
+ - Data Ingestion Service
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2019-04-23 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ requirement: !ruby/object:Gem::Requirement
15
+ requirements:
16
+ - - "~>"
17
+ - !ruby/object:Gem::Version
18
+ version: 0.3.2
19
+ name: jar-dependencies
20
+ prerelease: false
21
+ type: :development
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: 0.3.2
27
+ - !ruby/object:Gem::Dependency
28
+ requirement: !ruby/object:Gem::Requirement
29
+ requirements:
30
+ - - ">="
31
+ - !ruby/object:Gem::Version
32
+ version: '1.60'
33
+ - - "<="
34
+ - !ruby/object:Gem::Version
35
+ version: '2.99'
36
+ name: logstash-core-plugin-api
37
+ prerelease: false
38
+ type: :runtime
39
+ version_requirements: !ruby/object:Gem::Requirement
40
+ requirements:
41
+ - - ">="
42
+ - !ruby/object:Gem::Version
43
+ version: '1.60'
44
+ - - "<="
45
+ - !ruby/object:Gem::Version
46
+ version: '2.99'
47
+ - !ruby/object:Gem::Dependency
48
+ requirement: !ruby/object:Gem::Requirement
49
+ requirements:
50
+ - - ">="
51
+ - !ruby/object:Gem::Version
52
+ version: '0'
53
+ name: logstash-codec-plain
54
+ prerelease: false
55
+ type: :runtime
56
+ version_requirements: !ruby/object:Gem::Requirement
57
+ requirements:
58
+ - - ">="
59
+ - !ruby/object:Gem::Version
60
+ version: '0'
61
+ - !ruby/object:Gem::Dependency
62
+ requirement: !ruby/object:Gem::Requirement
63
+ requirements:
64
+ - - ">="
65
+ - !ruby/object:Gem::Version
66
+ version: '0'
67
+ name: logstash-codec-json
68
+ prerelease: false
69
+ type: :runtime
70
+ version_requirements: !ruby/object:Gem::Requirement
71
+ requirements:
72
+ - - ">="
73
+ - !ruby/object:Gem::Version
74
+ version: '0'
75
+ - !ruby/object:Gem::Dependency
76
+ requirement: !ruby/object:Gem::Requirement
77
+ requirements:
78
+ - - ">="
79
+ - !ruby/object:Gem::Version
80
+ version: '0'
81
+ name: logstash-devutils
82
+ prerelease: false
83
+ type: :development
84
+ version_requirements: !ruby/object:Gem::Requirement
85
+ requirements:
86
+ - - ">="
87
+ - !ruby/object:Gem::Version
88
+ version: '0'
89
+ - !ruby/object:Gem::Dependency
90
+ requirement: !ruby/object:Gem::Requirement
91
+ requirements:
92
+ - - ">="
93
+ - !ruby/object:Gem::Version
94
+ version: '0'
95
+ name: poseidon
96
+ prerelease: false
97
+ type: :development
98
+ version_requirements: !ruby/object:Gem::Requirement
99
+ requirements:
100
+ - - ">="
101
+ - !ruby/object:Gem::Version
102
+ version: '0'
103
+ - !ruby/object:Gem::Dependency
104
+ requirement: !ruby/object:Gem::Requirement
105
+ requirements:
106
+ - - ">="
107
+ - !ruby/object:Gem::Version
108
+ version: '0'
109
+ name: snappy
110
+ prerelease: false
111
+ type: :development
112
+ version_requirements: !ruby/object:Gem::Requirement
113
+ requirements:
114
+ - - ">="
115
+ - !ruby/object:Gem::Version
116
+ version: '0'
117
+ description: This gem is a Logstash plugin required to be installed on top of the
118
+ Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This
119
+ gem is not a stand-alone program
120
+ email: dis@huaweicloud.com
121
+ executables: []
122
+ extensions: []
123
+ extra_rdoc_files: []
124
+ files:
125
+ - Gemfile
126
+ - LICENSE
127
+ - NOTICE.TXT
128
+ - README.md
129
+ - lib/com/fasterxml/jackson/core/jackson-annotations/2.8.11/jackson-annotations-2.8.11.jar
130
+ - lib/com/fasterxml/jackson/core/jackson-core/2.8.11/jackson-core-2.8.11.jar
131
+ - lib/com/fasterxml/jackson/core/jackson-databind/2.8.11.3/jackson-databind-2.8.11.3.jar
132
+ - lib/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar
133
+ - lib/com/huaweicloud/dis/huaweicloud-dis-kafka-adapter-common/1.2.1/huaweicloud-dis-kafka-adapter-common-1.2.1.jar
134
+ - lib/com/huaweicloud/dis/huaweicloud-dis-kafka-adapter/1.2.1/huaweicloud-dis-kafka-adapter-1.2.1.jar
135
+ - lib/com/huaweicloud/dis/huaweicloud-sdk-java-dis-iface/1.3.3/huaweicloud-sdk-java-dis-iface-1.3.3.jar
136
+ - lib/com/huaweicloud/dis/huaweicloud-sdk-java-dis/1.3.3/huaweicloud-sdk-java-dis-1.3.3.jar
137
+ - lib/commons-codec/commons-codec/1.9/commons-codec-1.9.jar
138
+ - lib/commons-io/commons-io/2.4/commons-io-2.4.jar
139
+ - lib/commons-logging/commons-logging/1.2/commons-logging-1.2.jar
140
+ - lib/joda-time/joda-time/2.8.1/joda-time-2.8.1.jar
141
+ - lib/log4j/log4j/1.2.17/log4j-1.2.17.jar
142
+ - lib/logstash-output-dis_jars.rb
143
+ - lib/logstash/outputs/dis.rb
144
+ - lib/org/apache/httpcomponents/httpasyncclient/4.1.3/httpasyncclient-4.1.3.jar
145
+ - lib/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar
146
+ - lib/org/apache/httpcomponents/httpcore-nio/4.4.6/httpcore-nio-4.4.6.jar
147
+ - lib/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar
148
+ - lib/org/apache/httpcomponents/httpmime/4.5.2/httpmime-4.5.2.jar
149
+ - lib/org/apache/logging/log4j/log4j-1.2-api/2.6.2/log4j-1.2-api-2.6.2.jar
150
+ - lib/org/apache/logging/log4j/log4j-api/2.6.2/log4j-api-2.6.2.jar
151
+ - lib/org/apache/logging/log4j/log4j-core/2.6.2/log4j-core-2.6.2.jar
152
+ - lib/org/slf4j/slf4j-api/1.7.21/slf4j-api-1.7.21.jar
153
+ - lib/org/slf4j/slf4j-log4j12/1.7.21/slf4j-log4j12-1.7.21.jar
154
+ - lib/org/xerial/snappy/snappy-java/1.1.7.2/snappy-java-1.1.7.2.jar
155
+ - logstash-output-dis.gemspec
156
+ - spec/unit/outputs/dis_spec.rb
157
+ - vendor/jar-dependencies/runtime-jars/commons-codec-1.9.jar
158
+ - vendor/jar-dependencies/runtime-jars/commons-io-2.4.jar
159
+ - vendor/jar-dependencies/runtime-jars/commons-logging-1.2.jar
160
+ - vendor/jar-dependencies/runtime-jars/httpasyncclient-4.1.3.jar
161
+ - vendor/jar-dependencies/runtime-jars/httpclient-4.5.2.jar
162
+ - vendor/jar-dependencies/runtime-jars/httpcore-4.4.4.jar
163
+ - vendor/jar-dependencies/runtime-jars/httpcore-nio-4.4.6.jar
164
+ - vendor/jar-dependencies/runtime-jars/httpmime-4.5.2.jar
165
+ - vendor/jar-dependencies/runtime-jars/huaweicloud-dis-kafka-adapter-1.2.1.jar
166
+ - vendor/jar-dependencies/runtime-jars/huaweicloud-dis-kafka-adapter-common-1.2.1.jar
167
+ - vendor/jar-dependencies/runtime-jars/huaweicloud-sdk-java-dis-1.3.3.jar
168
+ - vendor/jar-dependencies/runtime-jars/huaweicloud-sdk-java-dis-iface-1.3.3.jar
169
+ - vendor/jar-dependencies/runtime-jars/jackson-annotations-2.8.11.jar
170
+ - vendor/jar-dependencies/runtime-jars/jackson-core-2.8.11.jar
171
+ - vendor/jar-dependencies/runtime-jars/jackson-databind-2.8.11.3.jar
172
+ - vendor/jar-dependencies/runtime-jars/joda-time-2.8.1.jar
173
+ - vendor/jar-dependencies/runtime-jars/log4j-1.2-api-2.6.2.jar
174
+ - vendor/jar-dependencies/runtime-jars/log4j-1.2.17.jar
175
+ - vendor/jar-dependencies/runtime-jars/log4j-api-2.6.2.jar
176
+ - vendor/jar-dependencies/runtime-jars/log4j-core-2.6.2.jar
177
+ - vendor/jar-dependencies/runtime-jars/protobuf-java-2.5.0.jar
178
+ - vendor/jar-dependencies/runtime-jars/slf4j-api-1.7.21.jar
179
+ - vendor/jar-dependencies/runtime-jars/slf4j-log4j12-1.7.21.jar
180
+ - vendor/jar-dependencies/runtime-jars/snappy-java-1.1.7.2.jar
181
+ homepage: https://www.huaweicloud.com/product/dis.html
182
+ licenses:
183
+ - Apache License (2.0)
184
+ metadata:
185
+ logstash_plugin: 'true'
186
+ group: output
187
+ post_install_message:
188
+ rdoc_options: []
189
+ require_paths:
190
+ - lib
191
+ - vendor/jar-dependencies
192
+ required_ruby_version: !ruby/object:Gem::Requirement
193
+ requirements:
194
+ - - ">="
195
+ - !ruby/object:Gem::Version
196
+ version: '0'
197
+ required_rubygems_version: !ruby/object:Gem::Requirement
198
+ requirements:
199
+ - - ">="
200
+ - !ruby/object:Gem::Version
201
+ version: '0'
202
+ requirements:
203
+ - jar 'com.huaweicloud.dis:huaweicloud-dis-kafka-adapter', '1.2.1'
204
+ - jar 'org.slf4j:slf4j-log4j12', '1.7.21'
205
+ - jar 'org.apache.logging.log4j:log4j-1.2-api', '2.6.2'
206
+ rubyforge_project:
207
+ rubygems_version: 2.6.14.1
208
+ signing_key:
209
+ specification_version: 4
210
+ summary: Writes events to a DIS stream
211
+ test_files:
212
+ - spec/unit/outputs/dis_spec.rb