fluent-plugin-jfrog-metrics 0.1.0 → 0.2.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 880965fb77886a036bc1de886a64b1a28a81be51303981359e801d44bd425d32
4
- data.tar.gz: 895972cbf8078c299a777b31e286644200e2716514c3fc22697efa00fa2013d6
3
+ metadata.gz: ba1a5b8b11e87e7309ca6831f129fc0a5cffb8dbbf0a89bf4fa753f3b21850e9
4
+ data.tar.gz: 474f79299a90b246ba24e00f4e204a2929643b50afa43df976bfc50664729663
5
5
  SHA512:
6
- metadata.gz: 40e81bba201bfec407c34e556843501ad9011490b6a2c55dc179fb5e2948948e05df86e2654af3c96f1ea6498f94a80771947340d850a03798b7fdc306a40fa1
7
- data.tar.gz: da1ead96440264cf9e12180fd4fca0cbffd9e838a8b8cf98f9e35dfe0876e32fa52dfa659fd7b9975175387595a007080f76ad662395a11cb70a972beb182f6c
6
+ metadata.gz: 2c10e6dd3dd51fbb7ba9c195df17bf459e1500d53c50b1b607d066c63dfe0703d34525dd1c0f3764913e3d236f0edd3d673476efb21001561eff7f4d4e5b8204
7
+ data.tar.gz: 435cc105038ce6dc74183a1933aefffbd572da45460589d229326bc4b5b3bba61f7a7dabf2441c825aafbe999a1477580a8aeff4e3b73e15829b4c9c7112723e
data/.gitignore CHANGED
@@ -1 +1,3 @@
1
1
  .DS_Store
2
+ fluent-plugin-jfrog-metrics-*.gem
3
+ fluent-plugin-jfrog-metrics.iml
data/Gemfile CHANGED
@@ -1,8 +1,11 @@
1
- source "https://rubygems.org"
1
+ # frozen_string_literal: true
2
+
3
+ source 'https://rubygems.org'
2
4
 
3
5
  gemspec
4
6
 
5
7
  group :test do
6
- gem "minitest"
7
- gem "minitest-reporters", '>= 0.5.0'
8
- end
8
+ gem 'minitest'
9
+ gem 'minitest-reporters', '>= 0.5.0'
10
+ gem 'rest-client', '>= 2.1.0'
11
+ end
data/Gemfile.lock CHANGED
@@ -1,8 +1,9 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- fluent-plugin-jfrog-metrics (0.1.2)
4
+ fluent-plugin-jfrog-metrics (0.2.1)
5
5
  fluentd (>= 0.14.10, < 2)
6
+ rest-client (~> 2.0)
6
7
 
7
8
  GEM
8
9
  remote: https://rubygems.org/
@@ -11,30 +12,44 @@ GEM
11
12
  builder (3.2.4)
12
13
  concurrent-ruby (1.1.9)
13
14
  cool.io (1.7.1)
14
- fluentd (1.14.2)
15
+ domain_name (0.5.20190701)
16
+ unf (>= 0.0.5, < 1.0.0)
17
+ fluentd (1.14.5)
15
18
  bundler
16
19
  cool.io (>= 1.4.5, < 2.0.0)
17
- http_parser.rb (>= 0.5.1, < 0.8.0)
20
+ http_parser.rb (>= 0.5.1, < 0.9.0)
18
21
  msgpack (>= 1.3.1, < 2.0.0)
19
- serverengine (>= 2.2.2, < 3.0.0)
22
+ serverengine (>= 2.2.5, < 3.0.0)
20
23
  sigdump (~> 0.2.2)
21
24
  strptime (>= 0.2.4, < 1.0.0)
22
25
  tzinfo (>= 1.0, < 3.0)
23
26
  tzinfo-data (~> 1.0)
24
27
  webrick (>= 1.4.2, < 1.8.0)
25
28
  yajl-ruby (~> 1.0)
26
- http_parser.rb (0.7.0)
29
+ http-accept (1.7.0)
30
+ http-cookie (1.0.4)
31
+ domain_name (~> 0.5)
32
+ http_parser.rb (0.8.0)
33
+ mime-types (3.4.1)
34
+ mime-types-data (~> 3.2015)
35
+ mime-types-data (3.2021.1115)
27
36
  minitest (5.14.4)
28
37
  minitest-reporters (1.4.3)
29
38
  ansi
30
39
  builder
31
40
  minitest (>= 5.0)
32
41
  ruby-progressbar
33
- msgpack (1.4.2)
42
+ msgpack (1.4.5)
43
+ netrc (0.11.0)
34
44
  power_assert (2.0.1)
35
45
  rake (12.3.3)
46
+ rest-client (2.1.0)
47
+ http-accept (>= 1.7.0, < 2.0)
48
+ http-cookie (>= 1.0.2, < 2.0)
49
+ mime-types (>= 1.16, < 4.0)
50
+ netrc (~> 0.8)
36
51
  ruby-progressbar (1.11.0)
37
- serverengine (2.2.4)
52
+ serverengine (2.2.5)
38
53
  sigdump (~> 0.2.2)
39
54
  sigdump (0.2.4)
40
55
  strptime (0.2.5)
@@ -44,6 +59,9 @@ GEM
44
59
  concurrent-ruby (~> 1.0)
45
60
  tzinfo-data (1.2021.5)
46
61
  tzinfo (>= 1.0.0)
62
+ unf (0.1.4)
63
+ unf_ext
64
+ unf_ext (0.0.8)
47
65
  webrick (1.7.0)
48
66
  yajl-ruby (1.4.1)
49
67
 
@@ -56,6 +74,7 @@ DEPENDENCIES
56
74
  minitest
57
75
  minitest-reporters (>= 0.5.0)
58
76
  rake (~> 12.0)
77
+ rest-client (>= 2.1.0)
59
78
  test-unit (~> 3.0)
60
79
 
61
80
  BUNDLED WITH
data/README.md CHANGED
@@ -6,19 +6,18 @@
6
6
 
7
7
  To build / test locally use rake:
8
8
 
9
- ```
9
+ ```
10
10
  rake
11
11
  ```
12
12
 
13
13
  To build install locally use bundler:
14
14
 
15
- ```
15
+ ```
16
16
  bundle install
17
17
  ```
18
18
 
19
19
  This will install the gem shown below from source.
20
20
 
21
-
22
21
  ## Development
23
22
 
24
23
  ### Bundler
@@ -45,44 +44,64 @@ $ fluent-plugin-config-format input jfrog-metrics
45
44
 
46
45
  You can copy and paste generated documents here.
47
46
 
48
- ## Installation
47
+ ## Installation
49
48
 
50
49
  ### RubyGems
50
+
51
51
  ```
52
52
  $ gem install rest-client
53
53
  ```
54
+
54
55
  ```
55
56
  $ gem install thread
56
57
  ```
58
+
57
59
  ```
58
60
  $ gem install fluent-plugin-jfrog-metrics
59
61
  ```
60
62
 
61
63
  ### Setup & configuration
62
- Fluentd is the supported log collector for this integration.
64
+
65
+ Fluentd is the supported log collector for this integration.
63
66
  For Fluentd setup and information, read the JFrog log analytics repository's [README.](https://github.com/jfrog/log-analytics/blob/master/README.md)
64
67
 
65
68
  #### Fluentd Output
69
+
66
70
  Download fluentd conf for different log-vendors. For example
67
- Splunk:
71
+ Splunk:
68
72
 
69
73
  Splunk setup can be found at [README.](https://github.com/jfrog/metrics/blob/main/splunk/README.md)
70
- ````text
74
+
75
+ ```text
71
76
  wget https://raw.githubusercontent.com/jfrog/metrics/master/splunk/splunk_metrics.conf
72
- ````
77
+ ```
73
78
 
74
79
  #### Configuration parameters
80
+
75
81
  Integration is done by setting up Xray. Obtain JPD url and access token for API. Configure the source directive parameters specified below
76
- * **tag** (string) (required): The value is the tag assigned to the generated events.
77
- * **jpd_url** (string) (required): JPD url required to pull Xray SIEM violations
78
- * **apikey** (string) (required): API Key is the [Artifactory API Key](https://www.jfrog.com/confluence/display/JFROG/User+Profile#UserProfile-APIKey) for authentication
79
- * **username** (string) (required): USER is the Artifactory username for authentication
80
- * **metric_prefix** (string) (required): This values pulls the specific metrics. Values can be - jfrog.artifactory, jfrog.xray
81
- * **interval** (integer) (optional): Wait interval between pulling new events
82
- * Default value: `60`
83
-
82
+
83
+ - **tag** (string) (required): The value is the tag assigned to the generated metrics.
84
+ - **jpd_url** (string) (required): JPD url required to pull metrics, (note - if deployed on K8s use the localhost and port number combination per sidecar)
85
+ - **username** (string) (required): USER is the Artifactory username for authentication
86
+ - **apikey** (string) (required if token is not used, do refer Note section for specifics): API Key is the [Artifactory API Key](https://www.jfrog.com/confluence/display/JFROG/User+Profile#UserProfile-APIKey) for authentication
87
+ - **token** (string) (required if apikey is not used, do refer Note section for specifics): Admin token is the [Artifactory Scoped Tokens](https://www.jfrog.com/confluence/display/JFROG/Access+Tokens#AccessTokens-GeneratingAdminTokens) for authentication
88
+ - **metric_prefix** (string) (required): This values pulls the specific metrics. Values can be - jfrog.artifactory, jfrog.xray
89
+ - **interval** (integer) (optional): Wait interval between pulling new events
90
+ - Default value: `60`
91
+ - **common_jpd** (true / false) (optional): This flag should be set as true only for non-kubernetes installations or installations where JPD base URL is same to access both Artifactory and Xray,
92
+ - ex: https://sample_base_url/artifactory or https://sample_base_url/xray
93
+ - Default value: false
94
+ - **target_platform** (string) (optional): Output format of target platform allowed values SPLUNK and ELASTIC
95
+ - Default value: `SPLUNK`
96
+
97
+ Note:
98
+
99
+ - For Artifactory v7.4 and below only API Key must be used,
100
+ - For Artifactory v7.4 to 7.29 either Token or API Key can be used,
101
+ - For Artifactory v7.30 and above token only must be used.
102
+
84
103
  ## Copyright
85
- * Copyright(c) 2020 - JFrog
86
- * License
87
- * Apache License, Version 2.0
88
104
 
105
+ - Copyright(c) 2020 - JFrog
106
+ - License
107
+ - Apache License, Version 2.0
@@ -3,12 +3,13 @@ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
3
3
 
4
4
  Gem::Specification.new do |spec|
5
5
  spec.name = 'fluent-plugin-jfrog-metrics'
6
- spec.version = '0.1.0'
7
- spec.authors = ['MahithaB, VasukiN']
8
- spec.email = ['60710901+MahithaB@users.noreply.github.com']
6
+ spec.version = '0.2.1'
7
+ spec.authors = ['MahithaB, VasukiN, giri-vsr']
8
+ spec.email = ['cpe-support@jfrog.com']
9
+
10
+ spec.summary = %q{Fluentd Plugin for converting JFrog Artifactory, Xray generated metrics (Prometheus Exposition Format) to target observability platform format (Splunk HEC, New Relic, Elastic)}
11
+ spec.description = %q{Fluentd Plugin for converting JFrog Artifactory, Xray generated metrics (Prometheus Exposition Format) to target observability platform format (Splunk HEC, New Relic, Elastic)}
9
12
 
10
- spec.summary = %q{Fluentd Plugin for converting one metrics form of data to another, this is from Prometheus format to Splunk HEC required format}
11
- spec.description = %q{Fluentd Plugin for converting one metrics form of data to another, this is from Prometheus format to Splunk HEC required format}
12
13
  spec.homepage = 'https://github.com/jfrog/jfrog-fluentd-plugins/tree/main/fluent-plugin-jfrog-metrics'
13
14
  spec.license = 'Apache-2.0'
14
15
 
@@ -23,5 +24,7 @@ Gem::Specification.new do |spec|
23
24
  spec.add_development_dependency 'bundler', '~> 1.14'
24
25
  spec.add_development_dependency 'rake', '~> 12.0'
25
26
  spec.add_development_dependency 'test-unit', '~> 3.0'
27
+ spec.add_development_dependency "rest-client", "~> 2.0"
26
28
  spec.add_runtime_dependency 'fluentd', ['>= 0.14.10', '< 2']
29
+ spec.add_runtime_dependency "rest-client", "~> 2.0"
27
30
  end
@@ -0,0 +1,45 @@
1
+ # frozen_string_literal: true
2
+ require 'json'
3
+
4
+ class BaseMetricsParser
5
+
6
+ def normalise_data(data_to_normalize = [])
7
+ normalized_data = []
8
+ if data_to_normalize.length == 1
9
+ data_to_normalize.each do |interim_data|
10
+ normalized_data = interim_data.split(/\\n+|\\r+/).reject(&:empty?)
11
+ end
12
+ else
13
+ data_to_normalize.each_line do |interim_data|
14
+ normalized_data << interim_data.strip unless interim_data == "\n"
15
+ end
16
+ end
17
+ normalized_data
18
+ end
19
+
20
+ def clean_data(data_to_clean = [])
21
+ cleaned_data = []
22
+ data_to_clean.each do |interim_data|
23
+ cleaned_data << interim_data unless interim_data.include? '#'
24
+ end
25
+ cleaned_data
26
+ end
27
+
28
+ def serialize_data(hashed_data_array = [])
29
+ serialised_data = []
30
+ hashed_data_array.each do |interim_data|
31
+ serialised_data << JSON.parse(interim_data.to_json)
32
+ end
33
+ serialised_data
34
+ end
35
+
36
+ def emit_parsed_metrics(platform_metrics)
37
+ normalized_data = normalise_data(platform_metrics)
38
+ cleaned_data = clean_data(normalized_data)
39
+ hash_data_array = format_data(cleaned_data, @metric_prefix, '.')
40
+ serialized_data = serialize_data(hash_data_array)
41
+ serialized_data.each do |interim_data|
42
+ @router.emit(@tag, Fluent::Engine.now, interim_data)
43
+ end
44
+ end
45
+ end
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+ require 'json'
3
+ require_relative 'base_metrics_parser'
4
+
5
+ class ElasticMetricsParser < BaseMetricsParser
6
+ def initialize(metric_prefix, router, tag)
7
+ @metric_prefix = metric_prefix
8
+ @router = router
9
+ @tag = tag
10
+ end
11
+
12
+ def format_data(cleaned_data = [], prefix = '', separator = '')
13
+ hash_data_array = []
14
+ hash_data = {}
15
+ cleaned_data.each do |interim_data|
16
+ if interim_data =~ /{/ && interim_data =~ /}/
17
+ metric_name, additional_dims, metric_val_and_time = interim_data.match(/(.*){(.*)}(.*)/i).captures
18
+ additional_dim_name = ''
19
+ if additional_dims =~ /,/
20
+ additional_dims.split(/,/).map do |interim_dim_data|
21
+ additional_dim_name += '_' unless additional_dim_name.nil? || additional_dim_name.empty?
22
+ additional_dim_name += interim_dim_data.gsub(/"/, '').gsub(/=/, '_').gsub(/-/, '_') if interim_data =~ /=/
23
+ end
24
+ else
25
+ additional_dim_name = additional_dims.gsub(/"/, '').gsub(/=/, '_').gsub(/-/, '_') if interim_data =~ /=/
26
+ end
27
+ if metric_val_and_time =~ / /
28
+ hash_data["#{prefix}#{separator}#{metric_name}_#{additional_dim_name}"] =
29
+ metric_val_and_time.strip.split[0] =~ /^\S*\.\S*$/ ? metric_val_and_time.strip.split[0].to_f : metric_val_and_time.strip.split[0].to_i
30
+ end
31
+ else
32
+ metric_name, value, = interim_data.split
33
+ hash_data[prefix + separator + metric_name] =
34
+ value =~ /^\S*\.\S*$/ ? value.to_f : value.to_i
35
+ end
36
+ end
37
+ hash_data_array << hash_data
38
+ end
39
+ end
@@ -17,8 +17,11 @@
17
17
  require 'concurrent'
18
18
  require 'rest-client'
19
19
  require 'fluent/plugin/input'
20
- require_relative 'metrics_parser'
20
+ require_relative 'base_metrics_parser'
21
21
  require_relative 'metrics_helper'
22
+ require_relative 'newrelic_metrics_parser'
23
+ require_relative 'splunk_metrics_parser'
24
+ require_relative 'elastic_metrics_parser'
22
25
 
23
26
  module Fluent
24
27
  module Plugin
@@ -31,9 +34,12 @@ module Fluent
31
34
  config_param :tag, :string, default: ''
32
35
  config_param :jpd_url, :string, default: ''
33
36
  config_param :username, :string, default: ''
34
- config_param :apikey, :string, default: ''
37
+ config_param :apikey, :string, default: '', :secret => true
38
+ config_param :token, :string, default: '', :secret => true
35
39
  config_param :interval, :time, default: 10
36
40
  config_param :metric_prefix, :string, default: ''
41
+ config_param :target_platform, :string, default: 'SPLUNK'
42
+ config_param :common_jpd, :bool, default: false
37
43
 
38
44
  # `configure` is called before `start`.
39
45
  # 'conf' is a `Hash` that includes the configuration parameters.
@@ -42,15 +48,17 @@ module Fluent
42
48
  super
43
49
  raise Fluent::ConfigError, 'Must define the tag for metrics data.' if @tag == ''
44
50
 
45
- raise Fluent::ConfigError, 'Must define the jpd_url to scrape metrics' if @jpd_url == ''
51
+ raise Fluent::ConfigError, 'Must define the jpd_url to scrape metrics.' if @jpd_url == ''
46
52
 
47
- raise Fluent::ConfigError, 'Must define the username for authentication' if @username == ''
53
+ raise Fluent::ConfigError, 'Must define the username for authentication.' if @username == ''
48
54
 
49
- raise Fluent::ConfigError, 'Must define the apikey to use for authentication.' if @apikey == ''
55
+ raise Fluent::ConfigError, 'Must define the apikey or token for authentication.' if @token == '' && @apikey == ''
50
56
 
51
57
  raise Fluent::ConfigError, 'Must define the interval to use for gathering the metrics.' if @interval == ''
52
58
 
53
59
  raise Fluent::ConfigError, 'Must define the metric_prefix to use for getting the metrics.' if @metric_prefix == ''
60
+
61
+ raise Fluent::ConfigError, 'Must define the vendor to use for getting the metrics.' if @target_platform == ''
54
62
  end
55
63
 
56
64
  # `start` is called when starting and after `configure` is successfully completed.
@@ -77,9 +85,21 @@ module Fluent
77
85
 
78
86
  def do_execute
79
87
  puts('Executing metrics collection')
80
- metrics_helper = MetricsHelper.new(@metric_prefix, @jpd_url, @username, @apikey)
88
+ metrics_helper = MetricsHelper.new(@metric_prefix, @jpd_url, @username, @apikey, @token, @common_jpd)
81
89
  platform_metrics = metrics_helper.get_metrics
82
- parser = MetricsParser.new(@metric_prefix, router, @tag)
90
+ additional_metrics = metrics_helper.get_additional_metrics
91
+ if !additional_metrics.nil? && additional_metrics != ''
92
+ platform_metrics += additional_metrics.to_s
93
+ end
94
+ if @target_platform == 'SPLUNK'
95
+ parser = SplunkMetricsParser.new(@metric_prefix, router, @tag)
96
+ elsif @target_platform == 'ELASTIC'
97
+ parser = ElasticMetricsParser.new(@metric_prefix, router, @tag)
98
+ elsif @target_platform == 'NEWRELIC'
99
+ parser = NewRelicMetricsParser.new(@metric_prefix, router, @tag)
100
+ else
101
+ raise 'Parser Type is not valid.Should be SPLUNK or ELASTIC or NEWRELIC'
102
+ end
83
103
  parser.emit_parsed_metrics(platform_metrics)
84
104
  end
85
105
  end
@@ -1,12 +1,17 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  require 'rest-client'
3
4
 
4
5
  class MetricsHelper
5
- def initialize(metric_prefix, jpd_url, username, apikey)
6
+ @@obs_endpoint_exists = false
7
+
8
+ def initialize(metric_prefix, jpd_url, username, apikey, token, common_jpd)
6
9
  @metric_prefix = metric_prefix
7
10
  @jpd_url = jpd_url
8
11
  @username = username
9
12
  @apikey = apikey
13
+ @token = token
14
+ @common_jpd = common_jpd
10
15
  end
11
16
 
12
17
  def get_metrics
@@ -19,22 +24,62 @@ class MetricsHelper
19
24
  else
20
25
  "#{@jpd_url}/artifactory/api/v1/metrics"
21
26
  end
22
- execute_rest_call(url, @username, @apikey)
27
+ if !@token.nil? && @token != ''
28
+ execute_rest_call(url, @username, nil, @token, false, true)
29
+ elsif !@apikey.nil? && @apikey != ''
30
+ execute_rest_call(url, @username, @apikey, nil, false, false)
31
+ end
32
+
33
+ end
34
+
35
+ def get_additional_metrics
36
+ if (@metric_prefix == 'jfrog.artifactory' || @common_jpd == false) && !@token.nil? && @token != ''
37
+ puts 'Executing additional metrics collection'
38
+ url = "#{@jpd_url}/observability/api/v1/metrics"
39
+ check_endpoint(url, @token) if @@obs_endpoint_exists == nil? || !@@obs_endpoint_exists
40
+ execute_rest_call(url, @username, nil, @token, true, true) if @@obs_endpoint_exists
41
+ end
23
42
  end
24
43
 
25
- def execute_rest_call(url, user, password)
44
+ def check_endpoint(url, token)
26
45
  response = RestClient::Request.new(
27
46
  method: :get,
28
47
  url: url,
29
- user: user,
30
- password: password
48
+ headers: { Authorization: "Bearer #{token}" }
31
49
  ).execute do |response, request, result|
50
+ @@obs_endpoint_exists = true if response.code == 200
51
+ puts "#{url} exists? -> #{@@obs_endpoint_exists}, storing the result for next executions"
52
+ end
53
+ end
54
+
55
+ def execute_rest_call(url, user, password, token, ignore_exception, use_token)
56
+ request = if use_token == true
57
+ RestClient::Request.new(
58
+ method: :get,
59
+ url: url,
60
+ headers: { Authorization: "Bearer #{token}" }
61
+ )
62
+ else
63
+ RestClient::Request.new(
64
+ method: :get,
65
+ url: url,
66
+ user: user,
67
+ password: password
68
+ )
69
+ end
70
+
71
+ request.execute do |response, request, result|
32
72
  case response.code
33
73
  when 200
34
74
  return response.body
35
75
  else
36
- raise Fluent::ConfigError, 'Cannot fetch #{@metric_prefix} metrics'
76
+ if ignore_exception == true
77
+ return ''
78
+ else
79
+ raise Fluent::ConfigError, 'Cannot fetch #{@metric_prefix} metrics'
80
+ end
37
81
  end
38
82
  end
39
83
  end
84
+
40
85
  end
@@ -0,0 +1,50 @@
1
+ # frozen_string_literal: true
2
+ require 'json'
3
+ require_relative 'base_metrics_parser'
4
+
5
+ class NewRelicMetricsParser < BaseMetricsParser
6
+ def initialize(metric_prefix, router, tag)
7
+ @metric_prefix = metric_prefix
8
+ @router = router
9
+ @tag = tag
10
+ end
11
+
12
+ def format_data(cleaned_data = [], prefix = '', separator = '')
13
+ hash_data_array = []
14
+ data_hash = {}
15
+ data_array = []
16
+ cleaned_data.each do |interim_data|
17
+ metrics_hash = {}
18
+ if interim_data =~ /{/ && interim_data =~ /}/
19
+ attributes = {}
20
+ metric_name, additional_dims, metric_val_and_time = interim_data.match(/(.*){(.*)}(.*)/i).captures
21
+ if additional_dims =~ /,/
22
+ additional_dims.split(/,/).map do |interim_data|
23
+ attributes[interim_data.split(/=/)[0]] = interim_data.split(/=/)[1].gsub(/"/, '') if interim_data =~ /=/
24
+ end
25
+ else
26
+ attribute_name, attribute_value = additional_dims.split('=')
27
+ attributes[attribute_name] = attribute_value.delete_prefix('"').delete_suffix('"')
28
+ end
29
+ if metric_val_and_time =~ / /
30
+ metrics_hash['name'] = prefix + separator + metric_name
31
+ metrics_hash['value'] =
32
+ metric_val_and_time.strip.split[0] =~ /^\S*\.\S*$/ ? metric_val_and_time.strip.split[0].to_f : metric_val_and_time.strip.split[0].to_i
33
+ metrics_hash['timestamp'] = metric_val_and_time.strip.split[1].to_i
34
+ metrics_hash['attributes'] = attributes
35
+ end
36
+ else
37
+ metrics_hash['name'], metrics_hash['value'], metrics_hash['timestamp'] = interim_data.split
38
+ metrics_hash['name'] = prefix + separator + metrics_hash['name']
39
+ metrics_hash['value'] =
40
+ metrics_hash['value'] =~ /^\S*\.\S*$/ ? metrics_hash['value'].to_f : metrics_hash['value'].to_i
41
+ metrics_hash['timestamp'] = metrics_hash['timestamp'].to_i
42
+ end
43
+ data_array << metrics_hash
44
+ end
45
+ data_hash["metrics"] = data_array
46
+ hash_data_array.push(data_hash)
47
+ hash_data_array
48
+ end
49
+
50
+ end
@@ -0,0 +1,44 @@
1
+ # frozen_string_literal: true
2
+ require 'json'
3
+ require_relative 'base_metrics_parser'
4
+
5
+ class SplunkMetricsParser < BaseMetricsParser
6
+ def initialize(metric_prefix, router, tag)
7
+ @metric_prefix = metric_prefix
8
+ @router = router
9
+ @tag = tag
10
+ end
11
+
12
+ def format_data(cleaned_data = [], prefix = '', separator = '')
13
+ hash_data_array = []
14
+ cleaned_data.each do |interim_data|
15
+ hash_data_array << generate_hash_from_data(interim_data, prefix, separator)
16
+ end
17
+ hash_data_array
18
+ end
19
+
20
+ def generate_hash_from_data(data = '', prefix = '', separator = '')
21
+ metrics_hash = {}
22
+ if data =~ /{/ && data =~ /}/
23
+ metric_name, additional_dims, metric_val_and_time = data.match(/(.*){(.*)}(.*)/i).captures
24
+ if metric_val_and_time =~ / /
25
+ metrics_hash['metric_name'] = prefix + separator + metric_name
26
+ metrics_hash['value'] =
27
+ metric_val_and_time.strip.split[0] =~ /^\S*\.\S*$/ ? metric_val_and_time.strip.split[0].to_f : metric_val_and_time.strip.split[0].to_i
28
+ metrics_hash['time'] = metric_val_and_time.strip.split[1].to_i
29
+ if additional_dims =~ /,/
30
+ additional_dims.split(/,/).map do |interim_data|
31
+ metrics_hash[interim_data.split(/=/)[0]] = interim_data.split(/=/)[1].gsub(/"/, '') if interim_data =~ /=/
32
+ end
33
+ end
34
+ end
35
+ else
36
+ metrics_hash['metric_name'], metrics_hash['value'], metrics_hash['time'] = data.split
37
+ metrics_hash['metric_name'] = prefix + separator + metrics_hash['metric_name']
38
+ metrics_hash['value'] =
39
+ metrics_hash['value'] =~ /^\S*\.\S*$/ ? metrics_hash['value'].to_f : metrics_hash['value'].to_i
40
+ metrics_hash['time'] = metrics_hash['time'].to_i
41
+ end
42
+ metrics_hash
43
+ end
44
+ end
@@ -2,7 +2,7 @@
2
2
  $jpd_url = ''
3
3
  $username = ''
4
4
  $apikey = ''
5
-
5
+ $token = ''
6
6
  def get_credentials
7
- [$jpd_url, $username, $apikey]
7
+ [$jpd_url, $username, $apikey, $token]
8
8
  end
@@ -1,64 +1,64 @@
1
1
  # HELP sys_memory_used_bytes Host used virtual memory
2
- # UPDATED sys_memory_used_bytes 1636577501390
2
+ # UPDATED sys_memory_used_bytes 1645738619452
3
3
  # TYPE sys_memory_used_bytes gauge
4
- sys_memory_used_bytes 3836522496 1636577501390
4
+ sys_memory_used_bytes 3836522496 1645738619452
5
5
  # HELP sys_memory_free_bytes Host free virtual memory
6
- # UPDATED sys_memory_free_bytes 1636577501390
6
+ # UPDATED sys_memory_free_bytes 1645738619452
7
7
  # TYPE sys_memory_free_bytes gauge
8
- sys_memory_free_bytes 8396328960 1636577501390
8
+ sys_memory_free_bytes 8396328960 1645738619452
9
9
  # HELP jfrt_runtime_heap_freememory_bytes Free Memory
10
- # UPDATED jfrt_runtime_heap_freememory_bytes 1636577501390
10
+ # UPDATED jfrt_runtime_heap_freememory_bytes 1645738619452
11
11
  # TYPE jfrt_runtime_heap_freememory_bytes gauge
12
- jfrt_runtime_heap_freememory_bytes 464534120 1636577501390
12
+ jfrt_runtime_heap_freememory_bytes 464534120 1645738619452
13
13
  # HELP jfrt_runtime_heap_maxmemory_bytes Max Memory
14
- # UPDATED jfrt_runtime_heap_maxmemory_bytes 1636577501390
14
+ # UPDATED jfrt_runtime_heap_maxmemory_bytes 1645738619452
15
15
  # TYPE jfrt_runtime_heap_maxmemory_bytes gauge
16
- jfrt_runtime_heap_maxmemory_bytes 2147483648 1636577501390
16
+ jfrt_runtime_heap_maxmemory_bytes 2147483648 1645738619452
17
17
  # HELP jfrt_runtime_heap_totalmemory_bytes Total Memory
18
- # UPDATED jfrt_runtime_heap_totalmemory_bytes 1636577501390
18
+ # UPDATED jfrt_runtime_heap_totalmemory_bytes 1645738619452
19
19
  # TYPE jfrt_runtime_heap_totalmemory_bytes gauge
20
- jfrt_runtime_heap_totalmemory_bytes 1358954496 1636577501390
20
+ jfrt_runtime_heap_totalmemory_bytes 1358954496 1645738619452
21
21
  # HELP jfrt_runtime_heap_processors_total Available Processors
22
- # UPDATED jfrt_runtime_heap_processors_total 1636577501390
22
+ # UPDATED jfrt_runtime_heap_processors_total 1645738619452
23
23
  # TYPE jfrt_runtime_heap_processors_total counter
24
- jfrt_runtime_heap_processors_total 1 1636577501390
24
+ jfrt_runtime_heap_processors_total 1 1645738619452
25
25
  # HELP jfrt_db_connections_active_total Total Active Connections
26
26
  # UPDATED jfrt_db_connections_active_total 1636577471195
27
27
  # TYPE jfrt_db_connections_active_total gauge
28
- jfrt_db_connections_active_total 0 1636577501390
28
+ jfrt_db_connections_active_total 0 1645738619452
29
29
  # HELP jfrt_db_connections_idle_total Total Idle Connections
30
30
  # UPDATED jfrt_db_connections_idle_total 1636577471195
31
31
  # TYPE jfrt_db_connections_idle_total gauge
32
- jfrt_db_connections_idle_total 1 1636577501390
32
+ jfrt_db_connections_idle_total 1 1645738619452
33
33
  # HELP jfrt_db_connections_max_active_total Total Max Active Connections
34
34
  # UPDATED jfrt_db_connections_max_active_total 1636577471195
35
35
  # TYPE jfrt_db_connections_max_active_total gauge
36
- jfrt_db_connections_max_active_total 80 1636577501390
36
+ jfrt_db_connections_max_active_total 80 1645738619452
37
37
  # HELP jfrt_db_connections_min_idle_total Total Min Idle Connections
38
38
  # UPDATED jfrt_db_connections_min_idle_total 1636577471195
39
39
  # TYPE jfrt_db_connections_min_idle_total gauge
40
- jfrt_db_connections_min_idle_total 1 1636577501390
40
+ jfrt_db_connections_min_idle_total 1 1645738619452
41
41
  # HELP sys_cpu_ratio Total cpu load ratio
42
- # UPDATED sys_cpu_ratio 1636577501390
42
+ # UPDATED sys_cpu_ratio 1645738619452
43
43
  # TYPE sys_cpu_ratio gauge
44
- sys_cpu_ratio 0.16 1636577501390
44
+ sys_cpu_ratio 0.16 1645738619452
45
45
  # HELP jfrt_projects_active_total Projects Amount
46
- # UPDATED jfrt_projects_active_total 1636577501390
46
+ # UPDATED jfrt_projects_active_total 1645738619452
47
47
  # TYPE jfrt_projects_active_total counter
48
- jfrt_projects_active_total 0 1636577501390
48
+ jfrt_projects_active_total 0 1645738619452
49
49
  # HELP jfrt_storage_current_total_size_bytes Used Storage
50
- # UPDATED jfrt_storage_current_total_size_bytes 1636577501390
50
+ # UPDATED jfrt_storage_current_total_size_bytes 1645738619452
51
51
  # TYPE jfrt_storage_current_total_size_bytes gauge
52
- jfrt_storage_current_total_size_bytes 0 1636577501390
52
+ jfrt_storage_current_total_size_bytes 0 1645738619452
53
53
  # HELP app_disk_used_bytes Used bytes for app home directory disk device
54
- # UPDATED app_disk_used_bytes 1636577501390
54
+ # UPDATED app_disk_used_bytes 1645738619452
55
55
  # TYPE app_disk_used_bytes gauge
56
- app_disk_used_bytes 730750976 1636577501390
56
+ app_disk_used_bytes 730750976 1645738619452
57
57
  # HELP app_disk_free_bytes Free bytes for app home directory disk device
58
- # UPDATED app_disk_free_bytes 1636577501390
58
+ # UPDATED app_disk_free_bytes 1645738619452
59
59
  # TYPE app_disk_free_bytes gauge
60
- app_disk_free_bytes 209510809600 1636577501390
60
+ app_disk_free_bytes 209510809600 1645738619452
61
61
  # HELP jfrt_artifacts_gc_next_run_seconds Next GC Run
62
62
  # UPDATED jfrt_artifacts_gc_next_run_seconds 1636574411092
63
63
  # TYPE jfrt_artifacts_gc_next_run_seconds gauge
64
- jfrt_artifacts_gc_next_run_seconds 14388 1636577501390
64
+ jfrt_artifacts_gc_next_run_seconds 14388 1645738619452
@@ -1,138 +1,138 @@
1
1
  # HELP app_disk_used_bytes Used bytes for app home directory disk device
2
2
  # TYPE app_disk_used_bytes gauge
3
- app_disk_used_bytes 1.48081664e+10 1636577556068
3
+ app_disk_used_bytes 1.48081664e+10 1645738679875
4
4
  # HELP app_disk_free_bytes Free bytes for app home directory disk device
5
5
  # TYPE app_disk_free_bytes gauge
6
- app_disk_free_bytes 3.356854272e+10 1636577556068
6
+ app_disk_free_bytes 3.356854272e+10 1645738679875
7
7
  # HELP app_io_counters_write_bytes Process io total write bytes
8
8
  # TYPE app_io_counters_write_bytes gauge
9
- app_io_counters_write_bytes 1.217306624e+09 1636577556068
9
+ app_io_counters_write_bytes 1.217306624e+09 1645738679875
10
10
  # HELP app_io_counters_read_bytes Process io total read bytes
11
11
  # TYPE app_io_counters_read_bytes gauge
12
- app_io_counters_read_bytes 2.10030592e+08 1636577556068
12
+ app_io_counters_read_bytes 2.10030592e+08 1645738679875
13
13
  # HELP app_self_metrics_calc_seconds Total time to collect all metrics
14
14
  # TYPE app_self_metrics_calc_seconds gauge
15
- app_self_metrics_calc_seconds 0.050925766 1636577556068
15
+ app_self_metrics_calc_seconds 0.050925766 1645738679875
16
16
  # HELP app_self_metrics_total Count of collected metrics
17
17
  # TYPE app_self_metrics_total gauge
18
- app_self_metrics_total 31 1636577556068
18
+ app_self_metrics_total 31 1645738679875
19
19
  # HELP db_connection_pool_in_use_total The number of connections currently in use
20
20
  # TYPE db_connection_pool_in_use_total gauge
21
- db_connection_pool_in_use_total 0 1636577556068
21
+ db_connection_pool_in_use_total 0 1645738679875
22
22
  # HELP db_connection_pool_idle_total The number of idle connections
23
23
  # TYPE db_connection_pool_idle_total gauge
24
- db_connection_pool_idle_total 5 1636577556068
24
+ db_connection_pool_idle_total 5 1645738679875
25
25
  # HELP db_connection_pool_max_open_total The maximum number of open connections
26
26
  # TYPE db_connection_pool_max_open_total gauge
27
- db_connection_pool_max_open_total 60 1636577556068
27
+ db_connection_pool_max_open_total 60 1645738679875
28
28
  # HELP go_memstats_heap_in_use_bytes Process go heap bytes in use
29
29
  # TYPE go_memstats_heap_in_use_bytes gauge
30
- go_memstats_heap_in_use_bytes 1.98139904e+08 1636577556068
30
+ go_memstats_heap_in_use_bytes 1.98139904e+08 1645738679875
31
31
  # HELP go_memstats_heap_allocated_bytes Process go heap allocated bytes
32
32
  # TYPE go_memstats_heap_allocated_bytes gauge
33
- go_memstats_heap_allocated_bytes 1.85429088e+08 1636577556068
33
+ go_memstats_heap_allocated_bytes 1.85429088e+08 1645738679875
34
34
  # HELP go_memstats_heap_idle_bytes Process go heap idle bytes
35
35
  # TYPE go_memstats_heap_idle_bytes gauge
36
- go_memstats_heap_idle_bytes 1.35733248e+08 1636577556068
36
+ go_memstats_heap_idle_bytes 1.35733248e+08 1645738679875
37
37
  # HELP go_memstats_heap_objects_total Process go heap number of objects
38
38
  # TYPE go_memstats_heap_objects_total gauge
39
- go_memstats_heap_objects_total 1.93186e+06 1636577556068
39
+ go_memstats_heap_objects_total 1.93186e+06 1645738679875
40
40
  # HELP go_memstats_heap_reserved_bytes Process go heap reserved bytes
41
41
  # TYPE go_memstats_heap_reserved_bytes gauge
42
- go_memstats_heap_reserved_bytes 3.33873152e+08 1636577556068
42
+ go_memstats_heap_reserved_bytes 3.33873152e+08 1645738679875
43
43
  # HELP go_memstats_gc_cpu_fraction_ratio Process go cpu used by gc. value is between 0 and 1
44
44
  # TYPE go_memstats_gc_cpu_fraction_ratio gauge
45
- go_memstats_gc_cpu_fraction_ratio 0.00024063137131169772 1636577556068
45
+ go_memstats_gc_cpu_fraction_ratio 0.00024063137131169772 1645738679875
46
46
  # HELP go_routines_total Number of goroutines that currently exist
47
47
  # TYPE go_routines_total gauge
48
- go_routines_total 169 1636577556068
48
+ go_routines_total 169 1645738679875
49
49
  # HELP jfxr_data_artifacts_total Artifacts of pkg type generic count in Xray
50
50
  # UPDATED jfxr_data_artifacts_total 1636513309792
51
51
  # TYPE jfxr_data_artifacts_total counter
52
- jfxr_data_artifacts_total{package_type="generic"} 1 1636577556068
52
+ jfxr_data_artifacts_total{package_type="generic"} 1 1645738679875
53
53
  # HELP jfxr_data_components_total Components of pkg type generic count in Xray
54
54
  # UPDATED jfxr_data_components_total 1636513309792
55
55
  # TYPE jfxr_data_components_total counter
56
- jfxr_data_components_total{package_type="generic"} 1 1636577556068
56
+ jfxr_data_components_total{package_type="generic"} 1 1645738679875
57
57
  # HELP jfxr_db_sync_running_total Is dbsync running
58
58
  # UPDATED jfxr_db_sync_running_total 1636577439791
59
59
  # TYPE jfxr_db_sync_running_total gauge
60
- jfxr_db_sync_running_total 0 1636577556068
60
+ jfxr_db_sync_running_total 0 1645738679875
61
61
  # HELP jfxr_jira_last_ticket_creation_time_seconds Last ticket creation time
62
62
  # UPDATED jfxr_jira_last_ticket_creation_time_seconds 1636577309791
63
63
  # TYPE jfxr_jira_last_ticket_creation_time_seconds gauge
64
- jfxr_jira_last_ticket_creation_time_seconds 0 1636577556068
64
+ jfxr_jira_last_ticket_creation_time_seconds 0 1645738679875
65
65
  # HELP jfxr_jira_no_of_errors_in_last_hour_total Total no of errors in last one hour
66
66
  # UPDATED jfxr_jira_no_of_errors_in_last_hour_total 1636577309791
67
67
  # TYPE jfxr_jira_no_of_errors_in_last_hour_total counter
68
- jfxr_jira_no_of_errors_in_last_hour_total 0 1636577556068
68
+ jfxr_jira_no_of_errors_in_last_hour_total 0 1645738679875
69
69
  # HELP jfxr_jira_last_error_time_seconds Last error occurred time
70
70
  # UPDATED jfxr_jira_last_error_time_seconds 1636577309791
71
71
  # TYPE jfxr_jira_last_error_time_seconds gauge
72
- jfxr_jira_last_error_time_seconds 0 1636577556068
72
+ jfxr_jira_last_error_time_seconds 0 1645738679875
73
73
  # HELP jfxr_jira_no_of_integrations_total Total no of jira integrations
74
74
  # UPDATED jfxr_jira_no_of_integrations_total 1636577309791
75
75
  # TYPE jfxr_jira_no_of_integrations_total counter
76
- jfxr_jira_no_of_integrations_total 0 1636577556068
76
+ jfxr_jira_no_of_integrations_total 0 1645738679875
77
77
  # HELP jfxr_jira_no_of_profiles_total Total no of profiles created
78
78
  # UPDATED jfxr_jira_no_of_profiles_total 1636577309791
79
79
  # TYPE jfxr_jira_no_of_profiles_total counter
80
- jfxr_jira_no_of_profiles_total 0 1636577556068
80
+ jfxr_jira_no_of_profiles_total 0 1645738679875
81
81
  # HELP jfxr_jira_no_of_tickets_created_in_last_one_hour_total Total no of jira tickets created in past one hour
82
82
  # UPDATED jfxr_jira_no_of_tickets_created_in_last_one_hour_total 1636577309791
83
83
  # TYPE jfxr_jira_no_of_tickets_created_in_last_one_hour_total counter
84
- jfxr_jira_no_of_tickets_created_in_last_one_hour_total 0 1636577556068
84
+ jfxr_jira_no_of_tickets_created_in_last_one_hour_total 0 1645738679875
85
85
  # HELP jfxr_performance_server_up_time_seconds Xray server up time
86
86
  # TYPE jfxr_performance_server_up_time_seconds gauge
87
- jfxr_performance_server_up_time_seconds 928277.850744045 1636577556068
87
+ jfxr_performance_server_up_time_seconds 928277.850744045 1645738679875
88
88
  # HELP queue_messages_total The number of messages currently in queue
89
89
  # UPDATED queue_messages_total 1636577509792
90
90
  # TYPE queue_messages_total gauge
91
- queue_messages_total{queue_name="alert"} 0 1636577556068
92
- queue_messages_total{queue_name="alertImpactAnalysis"} 0 1636577556068
93
- queue_messages_total{queue_name="alertImpactAnalysisRetry"} 0 1636577556068
94
- queue_messages_total{queue_name="alertRetry"} 0 1636577556068
95
- queue_messages_total{queue_name="analysis"} 0 1636577556068
96
- queue_messages_total{queue_name="analysisExistingContent"} 0 1636577556068
97
- queue_messages_total{queue_name="analysisExistingContentRetry"} 0 1636577556068
98
- queue_messages_total{queue_name="analysisRetry"} 0 1636577556068
99
- queue_messages_total{queue_name="buildReport_xray-0"} 0 1636577556068
100
- queue_messages_total{queue_name="failure"} 0 1636577556068
101
- queue_messages_total{queue_name="gcSyncMaster"} 0 1636577556068
102
- queue_messages_total{queue_name="impactAnalysis"} 0 1636577556068
103
- queue_messages_total{queue_name="impactAnalysisRetry"} 0 1636577556068
104
- queue_messages_total{queue_name="impactPathRecovery"} 0 1636577556068
105
- queue_messages_total{queue_name="impactPathRecoveryRetry"} 0 1636577556068
106
- queue_messages_total{queue_name="index"} 0 1636577556068
107
- queue_messages_total{queue_name="indexExistingContentRetry"} 0 1636577556068
108
- queue_messages_total{queue_name="indexExistsContent"} 0 1636577556068
109
- queue_messages_total{queue_name="indexRetry"} 0 1636577556068
110
- queue_messages_total{queue_name="job"} 0 1636577556068
111
- queue_messages_total{queue_name="mdsUpdate"} 0 1636577556068
112
- queue_messages_total{queue_name="mdsUpdateExistingContent"} 0 1636577556068
113
- queue_messages_total{queue_name="mdsUpdateExistingContentRetry"} 0 1636577556068
114
- queue_messages_total{queue_name="mdsUpdateRetry"} 0 1636577556068
115
- queue_messages_total{queue_name="notification"} 0 1636577556068
116
- queue_messages_total{queue_name="notificationRetry"} 0 1636577556068
117
- queue_messages_total{queue_name="persist"} 0 1636577556068
118
- queue_messages_total{queue_name="persistExistingContent"} 0 1636577556068
119
- queue_messages_total{queue_name="persistExistingContentRetry"} 0 1636577556068
120
- queue_messages_total{queue_name="persistRetry"} 0 1636577556068
121
- queue_messages_total{queue_name="report"} 0 1636577556068
122
- queue_messages_total{queue_name="reportRetry"} 0 1636577556068
123
- queue_messages_total{queue_name="ticketing"} 0 1636577556068
124
- queue_messages_total{queue_name="ticketingRetry"} 0 1636577556068
91
+ queue_messages_total{queue_name="alert"} 0 1645738679875
92
+ queue_messages_total{queue_name="alertImpactAnalysis"} 0 1645738679875
93
+ queue_messages_total{queue_name="alertImpactAnalysisRetry"} 0 1645738679875
94
+ queue_messages_total{queue_name="alertRetry"} 0 1645738679875
95
+ queue_messages_total{queue_name="analysis"} 0 1645738679875
96
+ queue_messages_total{queue_name="analysisExistingContent"} 0 1645738679875
97
+ queue_messages_total{queue_name="analysisExistingContentRetry"} 0 1645738679875
98
+ queue_messages_total{queue_name="analysisRetry"} 0 1645738679875
99
+ queue_messages_total{queue_name="buildReport_xray-0"} 0 1645738679875
100
+ queue_messages_total{queue_name="failure"} 0 1645738679875
101
+ queue_messages_total{queue_name="gcSyncMaster"} 0 1645738679875
102
+ queue_messages_total{queue_name="impactAnalysis"} 0 1645738679875
103
+ queue_messages_total{queue_name="impactAnalysisRetry"} 0 1645738679875
104
+ queue_messages_total{queue_name="impactPathRecovery"} 0 1645738679875
105
+ queue_messages_total{queue_name="impactPathRecoveryRetry"} 0 1645738679875
106
+ queue_messages_total{queue_name="index"} 0 1645738679875
107
+ queue_messages_total{queue_name="indexExistingContentRetry"} 0 1645738679875
108
+ queue_messages_total{queue_name="indexExistsContent"} 0 1645738679875
109
+ queue_messages_total{queue_name="indexRetry"} 0 1645738679875
110
+ queue_messages_total{queue_name="job"} 0 1645738679875
111
+ queue_messages_total{queue_name="mdsUpdate"} 0 1645738679875
112
+ queue_messages_total{queue_name="mdsUpdateExistingContent"} 0 1645738679875
113
+ queue_messages_total{queue_name="mdsUpdateExistingContentRetry"} 0 1645738679875
114
+ queue_messages_total{queue_name="mdsUpdateRetry"} 0 1645738679875
115
+ queue_messages_total{queue_name="notification"} 0 1645738679875
116
+ queue_messages_total{queue_name="notificationRetry"} 0 1645738679875
117
+ queue_messages_total{queue_name="persist"} 0 1645738679875
118
+ queue_messages_total{queue_name="persistExistingContent"} 0 1645738679875
119
+ queue_messages_total{queue_name="persistExistingContentRetry"} 0 1645738679875
120
+ queue_messages_total{queue_name="persistRetry"} 0 1645738679875
121
+ queue_messages_total{queue_name="report"} 0 1645738679875
122
+ queue_messages_total{queue_name="reportRetry"} 0 1645738679875
123
+ queue_messages_total{queue_name="ticketing"} 0 1645738679875
124
+ queue_messages_total{queue_name="ticketingRetry",new_queue_name="ticketingRetry2"} 0 1645738679875
125
125
  # TYPE sys_cpu_ratio gauge
126
- sys_cpu_ratio 0.3333333337029058 1636577556068
126
+ sys_cpu_ratio 0.3333333337029058 1645738679875
127
127
  # HELP sys_load_1 Host load average in the last minute
128
- sys_load_1 2 1636577556068
128
+ sys_load_1 2 1645738679875
129
129
  # HELP sys_load_5 Host load average in the last 5 minutes
130
- sys_load_5 1.82 1636577556068
130
+ sys_load_5 1.82 1645738679875
131
131
  # HELP sys_load_15 Host load average in the last 15 minutes
132
- sys_load_15 1.73 1636577556068
132
+ sys_load_15 1.73 1645738679875
133
133
  # HELP sys_memory_used_bytes Host used virtual memory
134
134
  # TYPE sys_memory_used_bytes gauge
135
- sys_memory_used_bytes 6.9718016e+09 1636577556068
135
+ sys_memory_used_bytes 6.9718016e+09 1645738679875
136
136
  # HELP sys_memory_free_bytes Host free virtual memory
137
137
  # TYPE sys_memory_free_bytes gauge
138
- sys_memory_free_bytes 3.17022208e+08 1636577556068
138
+ sys_memory_free_bytes 3.17022208e+08 1645738679875
@@ -7,18 +7,18 @@
7
7
  $LOAD_PATH.unshift(dir) unless $LOAD_PATH.include?(dir)
8
8
  end
9
9
 
10
- require 'metrics_parser'
10
+ require 'elastic_metrics_parser'
11
11
  require 'date'
12
12
  require 'rspec'
13
13
 
14
14
 
15
- RSpec.describe MetricsParser do
15
+ RSpec.describe ElasticMetricsParser do
16
16
  describe "#emit_parsed_metrics" do
17
17
  it 'should read sample Artifactory metrics data and verify the size of parsed data > 1' do
18
18
  platform_metrics = File.read('./spec/fixtures/files/sample_artifactory_metrics.txt')
19
19
  expect(platform_metrics.size).to be > 1
20
20
 
21
- parser = MetricsParser.new('jfrog.artifactory', '', 'jfrog.artifactory.metrics')
21
+ parser = ElasticMetricsParser.new('jfrog.artifactory', '', 'jfrog.artifactory.metrics')
22
22
 
23
23
  normalized_data = parser.normalise_data(platform_metrics)
24
24
  expect(normalized_data.size).to be > 1
@@ -37,7 +37,7 @@ RSpec.describe MetricsParser do
37
37
  platform_metrics = File.read('./spec/fixtures/files/sample_xray_metrics.txt')
38
38
  expect(platform_metrics.size).to be > 1
39
39
 
40
- parser = MetricsParser.new('jfrog.xray', '', 'jfrog.xray.metrics')
40
+ parser = ElasticMetricsParser.new('jfrog.xray', '', 'jfrog.xray.metrics')
41
41
 
42
42
  normalized_data = parser.normalise_data(platform_metrics)
43
43
  expect(normalized_data.size).to be > 1
@@ -15,7 +15,7 @@ require './spec/fixtures/files/creds'
15
15
 
16
16
 
17
17
  RSpec.describe MetricsHelper do
18
- jpd_url, username, apikey = get_credentials
18
+ jpd_url, username, apikey, token = get_credentials
19
19
  describe '#get_metrics' do
20
20
  it 'should return response code 200 and response body > 1' do
21
21
  response = RestClient::Request.new(
@@ -28,5 +28,12 @@ RSpec.describe MetricsHelper do
28
28
  expect(response.size).to be > 1
29
29
  end
30
30
  end
31
+
32
+ it 'should return response code 200 and response body > 1' do
33
+ helper = MetricsHelper.new('jfrog.artifactory', jpd_url, username, apikey, token)
34
+ url = "#{jpd_url}/observability/api/v1/metrics"
35
+ helper.check_endpoint(url, token)
36
+ end
37
+
31
38
  end
32
39
  end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+ [
3
+ File.join(File.dirname(__FILE__), '..'),
4
+ File.join(File.dirname(__FILE__), '..', 'lib/fluent/plugin'),
5
+ File.join(File.dirname(__FILE__), '..', 'spec'),
6
+ ].each do |dir|
7
+ $LOAD_PATH.unshift(dir) unless $LOAD_PATH.include?(dir)
8
+ end
9
+
10
+ require 'newrelic_metrics_parser'
11
+ require 'date'
12
+ require 'rspec'
13
+
14
+
15
+ RSpec.describe NewRelicMetricsParser do
16
+ describe "#emit_parsed_metrics" do
17
+ it 'should read sample Artifactory metrics data and verify the size of parsed data > 1' do
18
+ platform_metrics = File.read('./spec/fixtures/files/sample_artifactory_metrics.txt')
19
+ expect(platform_metrics.size).to be > 1
20
+
21
+ parser = NewRelicMetricsParser.new('jfrog.artifactory', '', 'jfrog.artifactory.metrics')
22
+
23
+ normalized_data = parser.normalise_data(platform_metrics)
24
+ expect(normalized_data.size).to be > 1
25
+
26
+ cleaned_data = parser.clean_data(normalized_data)
27
+ expect(cleaned_data.size).to be > 1
28
+
29
+ hash_data_array = parser.format_data(cleaned_data, 'jfrog.artifactory', '.')
30
+ expect(hash_data_array.size).to be 1
31
+
32
+ serialized_data = parser.serialize_data(hash_data_array)
33
+ expect(serialized_data.size).to be 1
34
+ end
35
+
36
+ it 'should read sample Xray metrics data and verify the size of parsed data > 1' do
37
+ platform_metrics = File.read('./spec/fixtures/files/sample_xray_metrics.txt')
38
+ expect(platform_metrics.size).to be > 1
39
+
40
+ parser = NewRelicMetricsParser.new('jfrog.xray', '', 'jfrog.xray.metrics')
41
+
42
+ normalized_data = parser.normalise_data(platform_metrics)
43
+ expect(normalized_data.size).to be > 1
44
+
45
+ cleaned_data = parser.clean_data(normalized_data)
46
+ expect(cleaned_data.size).to be > 1
47
+
48
+ hash_data_array = parser.format_data(cleaned_data, 'jfrog.xray', '.')
49
+ expect(hash_data_array.size).to be 1
50
+
51
+ serialized_data = parser.serialize_data(hash_data_array)
52
+ expect(serialized_data.size).to be 1
53
+ end
54
+ end
55
+ end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+ [
3
+ File.join(File.dirname(__FILE__), '..'),
4
+ File.join(File.dirname(__FILE__), '..', 'lib/fluent/plugin'),
5
+ File.join(File.dirname(__FILE__), '..', 'spec'),
6
+ ].each do |dir|
7
+ $LOAD_PATH.unshift(dir) unless $LOAD_PATH.include?(dir)
8
+ end
9
+
10
+ require 'splunk_metrics_parser'
11
+ require 'date'
12
+ require 'rspec'
13
+
14
+
15
+ RSpec.describe SplunkMetricsParser do
16
+ describe "#emit_parsed_metrics" do
17
+ it 'should read sample Artifactory metrics data and verify the size of parsed data > 1' do
18
+ platform_metrics = File.read('./spec/fixtures/files/sample_artifactory_metrics.txt')
19
+ expect(platform_metrics.size).to be > 1
20
+
21
+ parser = SplunkMetricsParser.new('jfrog.artifactory', '', 'jfrog.artifactory.metrics')
22
+
23
+ normalized_data = parser.normalise_data(platform_metrics)
24
+ expect(normalized_data.size).to be > 1
25
+
26
+ cleaned_data = parser.clean_data(normalized_data)
27
+ expect(cleaned_data.size).to be > 1
28
+
29
+ hash_data_array = parser.format_data(cleaned_data, 'jfrog.artifactory', '.')
30
+ expect(hash_data_array.size).to be > 1
31
+
32
+ serialized_data = parser.serialize_data(hash_data_array)
33
+ expect(serialized_data.size).to be > 1
34
+ end
35
+
36
+ it 'should read sample Xray metrics data and verify the size of parsed data > 1' do
37
+ platform_metrics = File.read('./spec/fixtures/files/sample_xray_metrics.txt')
38
+ expect(platform_metrics.size).to be > 1
39
+
40
+ parser = SplunkMetricsParser.new('jfrog.xray', '', 'jfrog.xray.metrics')
41
+
42
+ normalized_data = parser.normalise_data(platform_metrics)
43
+ expect(normalized_data.size).to be > 1
44
+
45
+ cleaned_data = parser.clean_data(normalized_data)
46
+ expect(cleaned_data.size).to be > 1
47
+
48
+ hash_data_array = parser.format_data(cleaned_data, 'jfrog.xray', '.')
49
+ expect(hash_data_array.size).to be > 1
50
+
51
+ serialized_data = parser.serialize_data(hash_data_array)
52
+ expect(serialized_data.size).to be > 1
53
+ end
54
+ end
55
+ end
data/spec/spec_helper.rb CHANGED
@@ -23,7 +23,9 @@
23
23
  end
24
24
 
25
25
  require 'metrics_helper'
26
- require 'metrics_parser'
26
+ require 'splunk_metrics_parser'
27
+ require 'newrelic_metrics_parser'
28
+ require 'elastic_metrics_parser'
27
29
 
28
30
  RSpec.configure do |config|
29
31
  # rspec-expectations config goes here. You can use an alternate
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-jfrog-metrics
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0
4
+ version: 0.2.1
5
5
  platform: ruby
6
6
  authors:
7
- - MahithaB, VasukiN
8
- autorequire:
7
+ - MahithaB, VasukiN, giri-vsr
8
+ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-12-14 00:00:00.000000000 Z
11
+ date: 2022-03-16 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -52,6 +52,20 @@ dependencies:
52
52
  - - "~>"
53
53
  - !ruby/object:Gem::Version
54
54
  version: '3.0'
55
+ - !ruby/object:Gem::Dependency
56
+ name: rest-client
57
+ requirement: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - "~>"
60
+ - !ruby/object:Gem::Version
61
+ version: '2.0'
62
+ type: :development
63
+ prerelease: false
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - "~>"
67
+ - !ruby/object:Gem::Version
68
+ version: '2.0'
55
69
  - !ruby/object:Gem::Dependency
56
70
  name: fluentd
57
71
  requirement: !ruby/object:Gem::Requirement
@@ -72,10 +86,25 @@ dependencies:
72
86
  - - "<"
73
87
  - !ruby/object:Gem::Version
74
88
  version: '2'
75
- description: Fluentd Plugin for converting one metrics form of data to another, this
76
- is from Prometheus format to Splunk HEC required format
89
+ - !ruby/object:Gem::Dependency
90
+ name: rest-client
91
+ requirement: !ruby/object:Gem::Requirement
92
+ requirements:
93
+ - - "~>"
94
+ - !ruby/object:Gem::Version
95
+ version: '2.0'
96
+ type: :runtime
97
+ prerelease: false
98
+ version_requirements: !ruby/object:Gem::Requirement
99
+ requirements:
100
+ - - "~>"
101
+ - !ruby/object:Gem::Version
102
+ version: '2.0'
103
+ description: Fluentd Plugin for converting JFrog Artifactory, Xray generated metrics
104
+ (Prometheus Exposition Format) to target observability platform format (Splunk HEC,
105
+ New Relic, Elastic)
77
106
  email:
78
- - 60710901+MahithaB@users.noreply.github.com
107
+ - cpe-support@jfrog.com
79
108
  executables: []
80
109
  extensions: []
81
110
  extra_rdoc_files: []
@@ -88,14 +117,19 @@ files:
88
117
  - README.md
89
118
  - Rakefile
90
119
  - fluent-plugin-jfrog-metrics.gemspec
120
+ - lib/fluent/plugin/base_metrics_parser.rb
121
+ - lib/fluent/plugin/elastic_metrics_parser.rb
91
122
  - lib/fluent/plugin/in_jfrog_metrics.rb
92
123
  - lib/fluent/plugin/metrics_helper.rb
93
- - lib/fluent/plugin/metrics_parser.rb
124
+ - lib/fluent/plugin/newrelic_metrics_parser.rb
125
+ - lib/fluent/plugin/splunk_metrics_parser.rb
94
126
  - spec/fixtures/files/creds.rb
95
127
  - spec/fixtures/files/sample_artifactory_metrics.txt
96
128
  - spec/fixtures/files/sample_xray_metrics.txt
129
+ - spec/lib/elastic_metrics_parser_spec.rb
97
130
  - spec/lib/metrics_helper_spec.rb
98
- - spec/lib/metrics_parser_spec.rb
131
+ - spec/lib/newrelic_metrics_parser_spec.rb
132
+ - spec/lib/splunk_metrics_parser_spec.rb
99
133
  - spec/spec_helper.rb
100
134
  - test/helper.rb
101
135
  - test/plugin/test_in_jfrog_metrics.rb
@@ -103,7 +137,7 @@ homepage: https://github.com/jfrog/jfrog-fluentd-plugins/tree/main/fluent-plugin
103
137
  licenses:
104
138
  - Apache-2.0
105
139
  metadata: {}
106
- post_install_message:
140
+ post_install_message:
107
141
  rdoc_options: []
108
142
  require_paths:
109
143
  - lib
@@ -118,17 +152,20 @@ required_rubygems_version: !ruby/object:Gem::Requirement
118
152
  - !ruby/object:Gem::Version
119
153
  version: '0'
120
154
  requirements: []
121
- rubygems_version: 3.0.3
122
- signing_key:
155
+ rubygems_version: 3.1.6
156
+ signing_key:
123
157
  specification_version: 4
124
- summary: Fluentd Plugin for converting one metrics form of data to another, this is
125
- from Prometheus format to Splunk HEC required format
158
+ summary: Fluentd Plugin for converting JFrog Artifactory, Xray generated metrics (Prometheus
159
+ Exposition Format) to target observability platform format (Splunk HEC, New Relic,
160
+ Elastic)
126
161
  test_files:
127
162
  - spec/fixtures/files/creds.rb
128
163
  - spec/fixtures/files/sample_artifactory_metrics.txt
129
164
  - spec/fixtures/files/sample_xray_metrics.txt
165
+ - spec/lib/elastic_metrics_parser_spec.rb
130
166
  - spec/lib/metrics_helper_spec.rb
131
- - spec/lib/metrics_parser_spec.rb
167
+ - spec/lib/newrelic_metrics_parser_spec.rb
168
+ - spec/lib/splunk_metrics_parser_spec.rb
132
169
  - spec/spec_helper.rb
133
170
  - test/helper.rb
134
171
  - test/plugin/test_in_jfrog_metrics.rb
@@ -1,83 +0,0 @@
1
- # frozen_string_literal: true
2
- require 'json'
3
-
4
- class MetricsParser
5
- def initialize(metric_prefix, router, tag)
6
- @metric_prefix = metric_prefix
7
- @router = router
8
- @tag = tag
9
- end
10
-
11
- def normalise_data(data_to_normalize = [])
12
- normalized_data = []
13
- if data_to_normalize.length == 1
14
- data_to_normalize.each do |interim_data|
15
- normalized_data = interim_data.split(/\\n+|\\r+/).reject(&:empty?)
16
- end
17
- else
18
- data_to_normalize.each_line do |interim_data|
19
- normalized_data << interim_data.strip unless interim_data == "\n"
20
- end
21
- end
22
- normalized_data
23
- end
24
-
25
- def clean_data(data_to_clean = [])
26
- cleaned_data = []
27
- data_to_clean.each do |interim_data|
28
- cleaned_data << interim_data unless interim_data.include? '#'
29
- end
30
- cleaned_data
31
- end
32
-
33
- def extract_metrics_in_hash(cleaned_data = [], prefix = '', separator = '')
34
- hash_data_array = []
35
- cleaned_data.each do |interim_data|
36
- hash_data_array << generate_hash_from_data(interim_data, prefix, separator)
37
- end
38
- hash_data_array
39
- end
40
-
41
- def generate_hash_from_data(data = '', prefix = '', separator = '')
42
- metrics_hash = {}
43
- if data =~ /{/ && data =~ /}/
44
- metric_name, additional_dims, metric_val_and_time = data.match(/(.*){(.*)}(.*)/i).captures
45
- if metric_val_and_time =~ / /
46
- metrics_hash['metric_name'] = prefix + separator + metric_name
47
- metrics_hash['value'] =
48
- metric_val_and_time.strip.split[0] =~ /^\S*\.\S*$/ ? metric_val_and_time.strip.split[0].to_f : metric_val_and_time.strip.split[0].to_i
49
- metrics_hash['time'] = metric_val_and_time.strip.split[1].to_i
50
- if additional_dims =~ /,/
51
- additional_dims.split(/,/).map do |interim_data|
52
- metrics_hash[interim_data.split(/=/)[0]] = interim_data.split(/=/)[1].gsub(/"/, '') if interim_data =~ /=/
53
- end
54
- end
55
- end
56
- else
57
- metrics_hash['metric_name'], metrics_hash['value'], metrics_hash['time'] = data.split
58
- metrics_hash['metric_name'] = prefix + separator + metrics_hash['metric_name']
59
- metrics_hash['value'] =
60
- metrics_hash['value'] =~ /^\S*\.\S*$/ ? metrics_hash['value'].to_f : metrics_hash['value'].to_i
61
- metrics_hash['time'] = metrics_hash['time'].to_i
62
- end
63
- metrics_hash
64
- end
65
-
66
- def serialize_data(hashed_data_array = [])
67
- serialised_data = []
68
- hashed_data_array.each do |interim_data|
69
- serialised_data << JSON.parse(interim_data.to_json)
70
- end
71
- serialised_data
72
- end
73
-
74
- def emit_parsed_metrics(platform_metrics)
75
- normalized_data = normalise_data(platform_metrics)
76
- cleaned_data = clean_data(normalized_data)
77
- hash_data_array = extract_metrics_in_hash(cleaned_data, @metric_prefix, '.')
78
- serialized_data = serialize_data(hash_data_array)
79
- serialized_data.each do |interim_data|
80
- @router.emit(@tag, Fluent::Engine.now, interim_data)
81
- end
82
- end
83
- end