logstash-filter-kafka_time_machine 3.0.0 → 3.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/logstash/filters/kafka_time_machine.rb +24 -16
- data/logstash-filter-kafka_time_machine.gemspec +2 -2
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 61eed3c10584ae85f6fed99b68b4a217e628d37a61471cf6d36fe3a1abcbf74d
|
4
|
+
data.tar.gz: 9e73985ed0bc52b621c9fc68539c8eac0bc9f6fadfa66f429ec8011d86c67891
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: b8e9551aa515878242d9528048ba731e6a1c59bc4bbadffd6f916a736685dd6749cdeed9cad1b55498eb1519366e8c07acf7eeb2e74ccf2f71686fa08835c456
|
7
|
+
data.tar.gz: 332d6d9f0756db77f0176337a370430cb622e6968e20f470727cd3d6f9979509b54472f398701b500651f0f6f30dcdeb4e0ff54db65d34c0c687c76fc661b307
|
@@ -49,7 +49,7 @@ class LogStash::Filters::KafkaTimeMachine < LogStash::Filters::Base
|
|
49
49
|
|
50
50
|
public
|
51
51
|
def register
|
52
|
-
|
52
|
+
|
53
53
|
end
|
54
54
|
|
55
55
|
public
|
@@ -57,7 +57,7 @@ class LogStash::Filters::KafkaTimeMachine < LogStash::Filters::Base
|
|
57
57
|
|
58
58
|
@logger.debug("Starting filter calculations")
|
59
59
|
|
60
|
-
# Note - It was considered to error check for strings that are invalid, i.e. "%{[@metadata][ktm][kafka_datacenter_shipper]}". However, this string being present is a good way to identify
|
60
|
+
# Note - It was considered to error check for strings that are invalid, i.e. "%{[@metadata][ktm][kafka_datacenter_shipper]}". However, this string being present is a good way to identify
|
61
61
|
# shipper/indexer logstash configs that are wrong so its allowed to pass through unaltered.
|
62
62
|
#
|
63
63
|
# Extract all string values to local variables.
|
@@ -78,7 +78,7 @@ class LogStash::Filters::KafkaTimeMachine < LogStash::Filters::Base
|
|
78
78
|
shipper_logstash_kafka_read_time = get_numeric(event.sprintf(@logstash_kafka_read_time_shipper))
|
79
79
|
indexer_kafka_append_time = get_numeric(event.sprintf(@kafka_append_time_indexer))
|
80
80
|
indexer_logstash_kafka_read_time = get_numeric(event.sprintf(@logstash_kafka_read_time_indexer))
|
81
|
-
|
81
|
+
|
82
82
|
# Validate the shipper data
|
83
83
|
shipper_kafka_array = Array[shipper_kafka_datacenter, shipper_kafka_topic, shipper_kafka_consumer_group, shipper_kafka_append_time, shipper_logstash_kafka_read_time, event_owner, event_time_ms, elasticsearch_cluster, elasticsearch_cluster_index]
|
84
84
|
if (shipper_kafka_array.any? { |text| text.nil? || text.to_s.empty? })
|
@@ -109,10 +109,18 @@ class LogStash::Filters::KafkaTimeMachine < LogStash::Filters::Base
|
|
109
109
|
indexer_kafka_lag_ms = indexer_logstash_kafka_read_time - indexer_kafka_append_time
|
110
110
|
end
|
111
111
|
|
112
|
-
# Add in the size of the payload field
|
113
|
-
payload_size_bytes = 0
|
112
|
+
# Add in the size of the payload field if it exist
|
114
113
|
if event.get("[payload]")
|
115
|
-
|
114
|
+
# pipeline gets random poison pill that crashes KTM with error: "error=>"(NoMethodError) undefined method `bytesize' for #Hash:0x38fe76d4", :exception=>Java::OrgJrubyExceptions::NoMethodError"
|
115
|
+
# This should prevent the crash and log the error
|
116
|
+
begin
|
117
|
+
payload_size_bytes = event.get("[payload]").bytesize
|
118
|
+
rescue NoMethodError => e
|
119
|
+
payload_size_bytes = 0
|
120
|
+
@logger.fatal("payload bytesize operation failed: #{e.message}")
|
121
|
+
end
|
122
|
+
else
|
123
|
+
payload_size_bytes = 0
|
116
124
|
end
|
117
125
|
|
118
126
|
# Set time (nanoseconds) for event that is generated
|
@@ -152,7 +160,7 @@ class LogStash::Filters::KafkaTimeMachine < LogStash::Filters::Base
|
|
152
160
|
|
153
161
|
error_string = sprintf("Error kafka_time_machine: Could not build valid response --> %s, %s", error_string_shipper, error_string_indexer)
|
154
162
|
@logger.debug(error_string)
|
155
|
-
|
163
|
+
|
156
164
|
else
|
157
165
|
|
158
166
|
point_ktm = create_point_ktm_error(shipper_kafka_datacenter, event_owner, epoch_time_ns, "unknown", elasticsearch_cluster, elasticsearch_cluster_index)
|
@@ -165,14 +173,14 @@ class LogStash::Filters::KafkaTimeMachine < LogStash::Filters::Base
|
|
165
173
|
|
166
174
|
# Publish even event in our array
|
167
175
|
ktm_metric_event_array.each do |metric_event|
|
168
|
-
|
176
|
+
|
169
177
|
# Create new event for KTM metric
|
170
178
|
event_ktm = LogStash::Event.new(metric_event)
|
171
179
|
event_ktm.set("[@metadata][ktm_tags][ktm_metric]", "true")
|
172
180
|
|
173
181
|
filter_matched(event_ktm)
|
174
182
|
yield event_ktm
|
175
|
-
|
183
|
+
|
176
184
|
end
|
177
185
|
|
178
186
|
end # def filter
|
@@ -180,7 +188,7 @@ class LogStash::Filters::KafkaTimeMachine < LogStash::Filters::Base
|
|
180
188
|
# Creates hash with ktm data point to return
|
181
189
|
public
|
182
190
|
def create_point_ktm(datacenter, event_owner, payload_size_bytes, lag_type, lag_ms, epoch_time_ns, elasticsearch_cluster, elasticsearch_cluster_index)
|
183
|
-
|
191
|
+
|
184
192
|
point = Hash.new
|
185
193
|
|
186
194
|
# Name of point and time created
|
@@ -225,33 +233,33 @@ class LogStash::Filters::KafkaTimeMachine < LogStash::Filters::Base
|
|
225
233
|
# Name of point and time created
|
226
234
|
point["name"] = "ktm_error"
|
227
235
|
point["epoch_time_ns"] = epoch_time_ns
|
228
|
-
|
236
|
+
|
229
237
|
# tags
|
230
238
|
point["datacenter"] = datacenter
|
231
239
|
point["owner"] = event_owner
|
232
240
|
point["source"] = type
|
233
241
|
point["es_cluster"] = elasticsearch_cluster
|
234
242
|
point["es_cluster_index"] = elasticsearch_cluster_index
|
235
|
-
|
243
|
+
|
236
244
|
# fields
|
237
245
|
point["count"] = 1
|
238
|
-
|
246
|
+
|
239
247
|
return point
|
240
248
|
|
241
249
|
end # def create_point_ktm_error
|
242
250
|
|
243
251
|
# Ensures the provided value is numeric; if not returns 'nil'
|
244
252
|
public
|
245
|
-
def get_numeric(input_str)
|
253
|
+
def get_numeric(input_str)
|
246
254
|
|
247
255
|
# @logger.debug("Aggregate timeout for '#{@task_id}' pattern: #{@timeout} seconds")
|
248
256
|
@logger.debug("get_numeric operating on: #{input_str} ")
|
249
|
-
|
257
|
+
|
250
258
|
is_numeric = input_str.to_s.match(/\A[+-]?\d+?(\.\d+)?\Z/) == nil ? false : true
|
251
259
|
if (true == is_numeric)
|
252
260
|
@logger.debug("get_numeric - valid value provided")
|
253
261
|
num_value = Float(sprintf(input_str))
|
254
|
-
|
262
|
+
|
255
263
|
if (false == num_value.positive?)
|
256
264
|
@logger.debug("get_numeric - negative value provided")
|
257
265
|
num_value = nil
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
s.name = 'logstash-filter-kafka_time_machine'
|
3
|
-
s.version = '3.0.
|
3
|
+
s.version = '3.0.1'
|
4
4
|
s.licenses = ['Apache-2.0']
|
5
5
|
s.summary = "Calculate total time of logstash event that traversed 2 Kafka queues from a shipper site to an indexer site"
|
6
6
|
s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
|
@@ -11,7 +11,7 @@ Gem::Specification.new do |s|
|
|
11
11
|
|
12
12
|
# Files
|
13
13
|
s.files = Dir['lib/**/*','spec/**/*','vendor/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.TXT']
|
14
|
-
|
14
|
+
|
15
15
|
# Tests
|
16
16
|
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
17
17
|
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: logstash-filter-kafka_time_machine
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 3.0.
|
4
|
+
version: 3.0.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Chris Foster
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2024-02-27 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: logstash-core-plugin-api
|