fluent-plugin-jfrog-metrics-deepakk 0.2.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,158 @@
1
+ # frozen_string_literal: true
2
+ #
3
+ # Copyright 2021- MahithaB
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ require 'concurrent'
18
+ require 'rest-client'
19
+ require 'fluent/plugin/input'
20
+ require_relative 'base_metrics_parser'
21
+ require_relative 'metrics_helper'
22
+ require_relative 'newrelic_metrics_parser'
23
+ require_relative 'splunk_metrics_parser'
24
+ require_relative 'datadog_metrics_parser'
25
+
26
+ module Fluent
27
+ module Plugin
28
+ class JfrogMetricsInput < Fluent::Plugin::Input
29
+ Fluent::Plugin.register_input('jfrog_metrics', self)
30
+ helpers :timer
31
+ # `config_param` defines a parameter.
32
+ # You can refer to a parameter like an instance variable e.g. @port.
33
+ # `:default` means that the parameter is optional.
34
+ config_param :tag, :string, default: ''
35
+ config_param :jpd_url, :string, default: ''
36
+ config_param :username, :string, default: ''
37
+ config_param :apikey, :string, default: '', :secret => true
38
+ config_param :token, :string, default: '', :secret => true
39
+ config_param :execution_interval, :time, default: 60
40
+ config_param :request_timeout, :time, default: 20
41
+ config_param :metric_prefix, :string, default: ''
42
+ config_param :target_platform, :string, default: 'SPLUNK'
43
+ config_param :common_jpd, :bool, default: false
44
+ config_param :verify_ssl, :bool, default: true
45
+
46
+ $logger = nil
47
+
48
+ # `configure` is called before `start`.
49
+ # 'conf' is a `Hash` that includes the configuration parameters.
50
+ # If the configuration is invalid, raise `Fluent::ConfigError`.
51
+ def configure(conf)
52
+ super
53
+ raise Fluent::ConfigError, 'Must define the tag for metrics data.' if @tag == ''
54
+
55
+ raise Fluent::ConfigError, 'Must define the jpd_url to scrape metrics.' if @jpd_url == ''
56
+
57
+ raise Fluent::ConfigError, 'Must define the username for authentication.' if @username == ''
58
+
59
+ raise Fluent::ConfigError, 'Must define the apikey or token for authentication.' if @token == '' && @apikey == ''
60
+
61
+ raise Fluent::ConfigError, 'Must define the metric_prefix to use for getting the metrics.' if @metric_prefix == ''
62
+
63
+ raise Fluent::ConfigError, 'Must define the target_platform to use for getting the metrics.' if @target_platform == ''
64
+
65
+ raise Fluent::ConfigError, 'Must define the target_platform to be fone of the following (DATADOG, NEWRELIC, SPLUNK).' if !(['DATADOG', 'NEWRELIC', 'SPLUNK'].include?(@target_platform))
66
+ end
67
+
68
+ def initialize
69
+ super
70
+ end
71
+
72
+ # `start` is called when starting and after `configure` is successfully completed.
73
+ def start
74
+ super
75
+ @running = true
76
+ $logger = log
77
+ @thread = Thread.new do
78
+ run
79
+ end
80
+ end
81
+
82
+ def shutdown
83
+ @running = false
84
+ @thread.join
85
+ super
86
+ end
87
+
88
+ def run
89
+ $logger.info("Preparing metrics collection, creating timer task")
90
+ timer_task = Concurrent::TimerTask.new(execution_interval: @execution_interval, run_now: true) do
91
+ begin
92
+ $logger.info("Timer task execution started")
93
+ do_execute
94
+ next "Timer task execution finished successfully"
95
+ rescue => e
96
+ $logger.error("Error occurred when running Timer task: #{e.message}")
97
+ raise e
98
+ end
99
+ end
100
+ timer_task.add_observer(TaskObserver.new)
101
+ timer_task.execute
102
+ sleep 100
103
+ end
104
+
105
+ def do_execute
106
+ begin
107
+ $logger.info("Metrics collection started")
108
+ metrics_helper = MetricsHelper.new($logger, @metric_prefix, @jpd_url, @username, @apikey, @token, @common_jpd, @verify_ssl, @request_timeout)
109
+ platform_metrics = metrics_helper.get_metrics
110
+
111
+ if platform_metrics.nil?
112
+ raise "Error while fetching platform metrics. Metrics response was null"
113
+ end
114
+
115
+ additional_metrics = metrics_helper.get_additional_metrics
116
+ if !additional_metrics.nil? && additional_metrics != ''
117
+ platform_metrics += additional_metrics.to_s
118
+ end
119
+ $logger.info("Metrics collection finished")
120
+
121
+ if @target_platform == 'SPLUNK'
122
+ parser = SplunkMetricsParser.new(@metric_prefix, router, @tag)
123
+ elsif @target_platform == 'NEWRELIC'
124
+ parser = NewRelicMetricsParser.new(@metric_prefix, router, @tag)
125
+ elsif @target_platform == 'DATADOG'
126
+ parser = DatadogMetricsParser.new(@metric_prefix, router, @tag)
127
+ else
128
+ raise 'Parser Type is not valid. target_platform Should be SPLUNK or NEWRELIC or DATADOG'
129
+ end
130
+ $logger.debug("Emitting collected metrics started")
131
+ parser.emit_parsed_metrics(platform_metrics)
132
+ $logger.debug("Emitting collected metrics finished")
133
+
134
+ rescue RestClient::Exceptions::OpenTimeout
135
+ $logger.error("The request timed out while trying to open a connection. The configured request timeout is: #{@request_timeout}")
136
+ rescue RestClient::Exceptions::ReadTimeout
137
+ $logger.error("The request timed out while waiting for a response. The configured request timeout is: #{@request_timeout}")
138
+ rescue RestClient::ExceptionWithResponse => e
139
+ $logger.error("HTTP request failed: #{e.response}")
140
+ rescue StandardError => e
141
+ $logger.error("An unexpected error occurred during metrics collection: #{e.message}")
142
+ else
143
+ $logger.debug("Metrics collection and emission do_execute finished with no errors")
144
+ end
145
+ end
146
+
147
+ class TaskObserver
148
+ def update(time, result, e)
149
+ if result
150
+ $logger.info("Timer task Execution successfully returned: '#{result}' at: #{time}")
151
+ else
152
+ $logger.error("Timer task Execution failed with error: #{e} at: #{time}")
153
+ end
154
+ end
155
+ end
156
+ end
157
+ end
158
+ end
@@ -0,0 +1,120 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rest-client'
4
+ require_relative 'proxy_helper'
5
+
6
+ class MetricsHelper
7
+ @@obs_endpoint_exists = false
8
+
9
+ def initialize(logger, metric_prefix, jpd_url, username, apikey, token, common_jpd, verify_ssl, request_timeout)
10
+ @logger = logger
11
+ @metric_prefix = metric_prefix
12
+ @jpd_url = jpd_url
13
+ @username = username
14
+ @apikey = apikey
15
+ @token = token
16
+ @common_jpd = common_jpd
17
+ @verify_ssl = verify_ssl
18
+ @request_timeout = request_timeout
19
+ end
20
+
21
+ def get_metrics
22
+ @logger.debug("Get metrics started")
23
+ url = nil
24
+ url = case @metric_prefix
25
+ when 'jfrog.artifactory'
26
+ "#{@jpd_url}/artifactory/api/v1/metrics"
27
+ when 'jfrog.xray'
28
+ "#{@jpd_url}/xray/api/v1/metrics"
29
+ else
30
+ "#{@jpd_url}/artifactory/api/v1/metrics"
31
+ end
32
+
33
+ @logger.info("Executing #{@metric_prefix} metrics collection from: #{url}")
34
+ metrics = nil
35
+ if !@token.nil? && @token != ''
36
+ metrics = execute_rest_call(url, @username, nil, @token, true, @verify_ssl, @request_timeout)
37
+ elsif !@apikey.nil? && @apikey != ''
38
+ metrics = execute_rest_call(url, @username, @apikey, nil, false, @verify_ssl, @request_timeout)
39
+ end
40
+ @logger.debug("Get metrics finished")
41
+ return metrics
42
+ end
43
+
44
+ def get_additional_metrics
45
+ @logger.info("Aadditional metrics collection started")
46
+ if (@metric_prefix == 'jfrog.artifactory' || @common_jpd == false) && !@token.nil? && @token != ''
47
+ url = "#{@jpd_url}/observability/api/v1/metrics"
48
+ @logger.info("Collecting additional metrics from: #{url}")
49
+ check_endpoint(url, @token, @verify_ssl, @request_timeout) if @@obs_endpoint_exists == nil? || !@@obs_endpoint_exists
50
+ additional_metrics = execute_rest_call(url, @username, nil, @token, true, @verify_ssl, @request_timeout) if @@obs_endpoint_exists
51
+ end
52
+ @logger.info("Aadditional metrics collection finished")
53
+ return additional_metrics
54
+ end
55
+
56
+ def check_endpoint(url, token, verify_ssl, request_timeout)
57
+ @logger.debug("Checking connectivity to endpoint: #{url} started")
58
+ # Configure proxy with NO_PROXY support
59
+ ProxyHelper.configure_rest_client_proxy(url, nil, @logger)
60
+
61
+ request = RestClient::Request.new(
62
+ method: :get,
63
+ url: url,
64
+ headers: { Authorization: "Bearer #{token}"},
65
+ verify_ssl: verify_ssl,
66
+ timeout: request_timeout
67
+ )
68
+
69
+ request.execute do |response, request, result|
70
+ if response.code == 200
71
+ @@obs_endpoint_exists = true
72
+ @logger.info("#{url} exists: #{@@obs_endpoint_exists}. Storing the result for next executions")
73
+ else
74
+ @@obs_endpoint_exists = false
75
+ @logger.info("Cannot verify endpoint. Skipping metrics collection from #{url}. Received response code: #{response.code}, Response body:\n#{response.body}")
76
+ end
77
+ end
78
+ @logger.debug("Checking connectivity to endpoint: #{url} finished")
79
+ end
80
+
81
+ def execute_rest_call(url, user, password, token, use_token, verify_ssl, request_timeout)
82
+ @logger.debug("Rest call to fetch metrics started")
83
+ # Configure proxy with NO_PROXY support
84
+ ProxyHelper.configure_rest_client_proxy(url, nil, @logger)
85
+
86
+ request = if use_token == true
87
+ @logger.debug("Using token for authentication")
88
+ RestClient::Request.new(
89
+ method: :get,
90
+ url: url,
91
+ headers: { Authorization: "Bearer #{token}" },
92
+ verify_ssl: verify_ssl,
93
+ timeout: request_timeout
94
+ )
95
+ else
96
+ @logger.debug("Using apiKey for authentication")
97
+ RestClient::Request.new(
98
+ method: :get,
99
+ url: url,
100
+ user: user,
101
+ password: password,
102
+ verify_ssl: verify_ssl,
103
+ timeout: request_timeout
104
+ )
105
+ end
106
+
107
+ request.execute do |response, request, result|
108
+ @logger.debug("Recieved response body: #{response.body} when fetching metrics")
109
+ case response.code
110
+ when 200
111
+ @logger.info("#{@metric_prefix} metrics were successfully collected from url: #{url}")
112
+ return response.body
113
+ else
114
+ @logger.info("Cannot fetch #{@metric_prefix} metrics from url: #{url}. Received response code: #{response.code}, Response body:\n#{response.body}")
115
+ raise "Unexpected response code: #{response.code} when calling #{url}"
116
+ end
117
+ end
118
+ end
119
+
120
+ end
@@ -0,0 +1,48 @@
1
+ # frozen_string_literal: true
2
+ require 'json'
3
+ require_relative 'base_metrics_parser'
4
+
5
+ class NewRelicMetricsParser < BaseMetricsParser
6
+ def initialize(metric_prefix, router, tag)
7
+ @metric_prefix = metric_prefix
8
+ @router = router
9
+ @tag = tag
10
+ end
11
+
12
+ def format_data(cleaned_data = [], prefix = '', separator = '')
13
+ hash_data_array = []
14
+ data_hash = {}
15
+ data_array = []
16
+ cleaned_data.each do |interim_data|
17
+ metrics_hash = {}
18
+ if interim_data =~ /{/ && interim_data =~ /}/
19
+ attributes = {}
20
+ metric_name, additional_dims, metric_val_and_time = interim_data.match(/(.*){(.*)}(.*)/i).captures
21
+ additional_dims.split("\",").each do |interim_data|
22
+ pair_data = interim_data.gsub("\"", "").gsub("{", "").gsub("}", "")
23
+ interim_data_value = pair_data.split("=", 2)[1]
24
+ interim_data_key = pair_data.split("=", 2)[0]
25
+ attributes[interim_data_key] = interim_data_value
26
+ end
27
+ if metric_val_and_time =~ / /
28
+ metrics_hash['name'] = prefix + separator + metric_name
29
+ metrics_hash['value'] =
30
+ metric_val_and_time.strip.split[0] =~ /^\S*\.\S*$/ ? metric_val_and_time.strip.split[0].to_f : metric_val_and_time.strip.split[0].to_i
31
+ metrics_hash['timestamp'] = metric_val_and_time.strip.split[1].to_i
32
+ metrics_hash['attributes'] = attributes
33
+ end
34
+ else
35
+ metrics_hash['name'], metrics_hash['value'], metrics_hash['timestamp'] = interim_data.split
36
+ metrics_hash['name'] = prefix + separator + metrics_hash['name']
37
+ metrics_hash['value'] =
38
+ metrics_hash['value'] =~ /^\S*\.\S*$/ ? metrics_hash['value'].to_f : metrics_hash['value'].to_i
39
+ metrics_hash['timestamp'] = metrics_hash['timestamp'].to_i
40
+ end
41
+ data_array << metrics_hash
42
+ end
43
+ data_hash["metrics"] = data_array
44
+ hash_data_array.push(data_hash)
45
+ hash_data_array
46
+ end
47
+
48
+ end
@@ -0,0 +1,88 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'uri'
4
+
5
+ module ProxyHelper
6
+ # Check if a host should bypass the proxy based on NO_PROXY environment variable
7
+ def self.should_bypass_proxy?(url, logger = nil)
8
+ no_proxy = ENV['NO_PROXY'] || ENV['no_proxy']
9
+ return false if no_proxy.nil? || no_proxy.empty?
10
+
11
+ begin
12
+ target_host = URI.parse(url).host
13
+ return false if target_host.nil?
14
+
15
+ no_proxy_hosts = no_proxy.split(',').map(&:strip)
16
+
17
+ no_proxy_hosts.each do |pattern|
18
+ next if pattern.empty?
19
+
20
+ # Remove leading dot if present (e.g., ".example.com" -> "example.com")
21
+ # Downcase both for case-insensitive matching (domain names are case-insensitive per RFC 1035)
22
+ pattern = pattern.sub(/^\./, '').downcase
23
+ target_host_lower = target_host.downcase
24
+
25
+ # Check for exact match or subdomain match
26
+ if target_host_lower == pattern || target_host_lower.end_with?(".#{pattern}")
27
+ logger&.debug("Host '#{target_host}' matches NO_PROXY pattern '#{pattern}', bypassing proxy")
28
+ return true
29
+ end
30
+
31
+ # Check for wildcard
32
+ if pattern == '*'
33
+ logger&.debug("NO_PROXY contains '*', bypassing proxy for all hosts")
34
+ return true
35
+ end
36
+ end
37
+ rescue URI::InvalidURIError => e
38
+ logger&.warn("Failed to parse URL '#{url}': #{e.message}")
39
+ return false
40
+ end
41
+
42
+ false
43
+ end
44
+
45
+ # Get the proxy URL to use for a given target URL
46
+ # Returns nil if proxy should be bypassed, otherwise returns the proxy URL
47
+ def self.get_proxy_for_url(url, http_proxy_param = nil, logger = nil)
48
+ # Check if this URL should bypass proxy
49
+ if should_bypass_proxy?(url, logger)
50
+ return nil
51
+ end
52
+
53
+ # Return proxy URL in order of precedence
54
+ if http_proxy_param && !http_proxy_param.empty?
55
+ logger&.debug("Using http_proxy param for request. Proxy url: #{http_proxy_param}")
56
+ return http_proxy_param
57
+ elsif ENV['HTTP_PROXY'] && !ENV['HTTP_PROXY'].empty?
58
+ logger&.debug("Using 'HTTP_PROXY' environment variable for request. Proxy url: #{ENV['HTTP_PROXY']}")
59
+ return ENV['HTTP_PROXY']
60
+ elsif ENV['http_proxy'] && !ENV['http_proxy'].empty?
61
+ logger&.debug("Using 'http_proxy' environment variable for request. Proxy url: #{ENV['http_proxy']}")
62
+ return ENV['http_proxy']
63
+ elsif ENV['HTTPS_PROXY'] && !ENV['HTTPS_PROXY'].empty?
64
+ logger&.debug("Using 'HTTPS_PROXY' environment variable for request. Proxy url: #{ENV['HTTPS_PROXY']}")
65
+ return ENV['HTTPS_PROXY']
66
+ elsif ENV['https_proxy'] && !ENV['https_proxy'].empty?
67
+ logger&.debug("Using 'https_proxy' environment variable for request. Proxy url: #{ENV['https_proxy']}")
68
+ return ENV['https_proxy']
69
+ end
70
+
71
+ nil
72
+ end
73
+
74
+ # Configure RestClient proxy for a given URL
75
+ def self.configure_rest_client_proxy(url, http_proxy_param = nil, logger = nil)
76
+ proxy_url = get_proxy_for_url(url, http_proxy_param, logger)
77
+
78
+ if proxy_url.nil?
79
+ RestClient.proxy = nil
80
+ logger&.debug("Proxy disabled for URL: #{url}")
81
+ else
82
+ RestClient.proxy = proxy_url
83
+ logger&.debug("Proxy enabled for URL: #{url}, using: #{proxy_url}")
84
+ end
85
+ end
86
+ end
87
+
88
+
@@ -0,0 +1,44 @@
1
+ # frozen_string_literal: true
2
+ require 'json'
3
+ require_relative 'base_metrics_parser'
4
+
5
+ class SplunkMetricsParser < BaseMetricsParser
6
+ def initialize(metric_prefix, router, tag)
7
+ @metric_prefix = metric_prefix
8
+ @router = router
9
+ @tag = tag
10
+ end
11
+
12
+ def format_data(cleaned_data = [], prefix = '', separator = '')
13
+ hash_data_array = []
14
+ cleaned_data.each do |interim_data|
15
+ hash_data_array << generate_hash_from_data(interim_data, prefix, separator)
16
+ end
17
+ hash_data_array
18
+ end
19
+
20
+ def generate_hash_from_data(data = '', prefix = '', separator = '')
21
+ metrics_hash = {}
22
+ if data =~ /{/ && data =~ /}/
23
+ metric_name, additional_dims, metric_val_and_time = data.match(/(.*){(.*)}(.*)/i).captures
24
+ if metric_val_and_time =~ / /
25
+ metrics_hash['metric_name'] = prefix + separator + metric_name
26
+ metrics_hash['value'] =
27
+ metric_val_and_time.strip.split[0] =~ /^\S*\.\S*$/ ? metric_val_and_time.strip.split[0].to_f : metric_val_and_time.strip.split[0].to_i
28
+ metrics_hash['time'] = metric_val_and_time.strip.split[1].to_i
29
+ if additional_dims =~ /,/
30
+ additional_dims.split(/,/).map do |interim_data|
31
+ metrics_hash[interim_data.split(/=/)[0]] = interim_data.split(/=/)[1].gsub(/"/, '') if interim_data =~ /=/
32
+ end
33
+ end
34
+ end
35
+ else
36
+ metrics_hash['metric_name'], metrics_hash['value'], metrics_hash['time'] = data.split
37
+ metrics_hash['metric_name'] = prefix + separator + metrics_hash['metric_name']
38
+ metrics_hash['value'] =
39
+ metrics_hash['value'] =~ /^\S*\.\S*$/ ? metrics_hash['value'].to_f : metrics_hash['value'].to_i
40
+ metrics_hash['time'] = metrics_hash['time'].to_i
41
+ end
42
+ metrics_hash
43
+ end
44
+ end
@@ -0,0 +1,8 @@
1
+ # frozen_string_literal: true
2
+ $jpd_url = ''
3
+ $username = ''
4
+ $apikey = ''
5
+ $token = ''
6
+ def get_credentials
7
+ [$jpd_url, $username, $apikey, $token]
8
+ end
@@ -0,0 +1,64 @@
1
+ # HELP sys_memory_used_bytes Host used virtual memory
2
+ # UPDATED sys_memory_used_bytes 1645738619452
3
+ # TYPE sys_memory_used_bytes gauge
4
+ sys_memory_used_bytes 3836522496 1645738619452
5
+ # HELP sys_memory_free_bytes Host free virtual memory
6
+ # UPDATED sys_memory_free_bytes 1645738619452
7
+ # TYPE sys_memory_free_bytes gauge
8
+ sys_memory_free_bytes 8396328960 1645738619452
9
+ # HELP jfrt_runtime_heap_freememory_bytes Free Memory
10
+ # UPDATED jfrt_runtime_heap_freememory_bytes 1645738619452
11
+ # TYPE jfrt_runtime_heap_freememory_bytes gauge
12
+ jfrt_runtime_heap_freememory_bytes 464534120 1645738619452
13
+ # HELP jfrt_runtime_heap_maxmemory_bytes Max Memory
14
+ # UPDATED jfrt_runtime_heap_maxmemory_bytes 1645738619452
15
+ # TYPE jfrt_runtime_heap_maxmemory_bytes gauge
16
+ jfrt_runtime_heap_maxmemory_bytes 2147483648 1645738619452
17
+ # HELP jfrt_runtime_heap_totalmemory_bytes Total Memory
18
+ # UPDATED jfrt_runtime_heap_totalmemory_bytes 1645738619452
19
+ # TYPE jfrt_runtime_heap_totalmemory_bytes gauge
20
+ jfrt_runtime_heap_totalmemory_bytes 1358954496 1645738619452
21
+ # HELP jfrt_runtime_heap_processors_total Available Processors
22
+ # UPDATED jfrt_runtime_heap_processors_total 1645738619452
23
+ # TYPE jfrt_runtime_heap_processors_total counter
24
+ jfrt_runtime_heap_processors_total 1 1645738619452
25
+ # HELP jfrt_db_connections_active_total Total Active Connections
26
+ # UPDATED jfrt_db_connections_active_total 1636577471195
27
+ # TYPE jfrt_db_connections_active_total gauge
28
+ jfrt_db_connections_active_total 0 1645738619452
29
+ # HELP jfrt_db_connections_idle_total Total Idle Connections
30
+ # UPDATED jfrt_db_connections_idle_total 1636577471195
31
+ # TYPE jfrt_db_connections_idle_total gauge
32
+ jfrt_db_connections_idle_total 1 1645738619452
33
+ # HELP jfrt_db_connections_max_active_total Total Max Active Connections
34
+ # UPDATED jfrt_db_connections_max_active_total 1636577471195
35
+ # TYPE jfrt_db_connections_max_active_total gauge
36
+ jfrt_db_connections_max_active_total 80 1645738619452
37
+ # HELP jfrt_db_connections_min_idle_total Total Min Idle Connections
38
+ # UPDATED jfrt_db_connections_min_idle_total 1636577471195
39
+ # TYPE jfrt_db_connections_min_idle_total gauge
40
+ jfrt_db_connections_min_idle_total 1 1645738619452
41
+ # HELP sys_cpu_ratio Total cpu load ratio
42
+ # UPDATED sys_cpu_ratio 1645738619452
43
+ # TYPE sys_cpu_ratio gauge
44
+ sys_cpu_ratio 0.16 1645738619452
45
+ # HELP jfrt_projects_active_total Projects Amount
46
+ # UPDATED jfrt_projects_active_total 1645738619452
47
+ # TYPE jfrt_projects_active_total counter
48
+ jfrt_projects_active_total 0 1645738619452
49
+ # HELP jfrt_storage_current_total_size_bytes Used Storage
50
+ # UPDATED jfrt_storage_current_total_size_bytes 1645738619452
51
+ # TYPE jfrt_storage_current_total_size_bytes gauge
52
+ jfrt_storage_current_total_size_bytes 0 1645738619452
53
+ # HELP app_disk_used_bytes Used bytes for app home directory disk device
54
+ # UPDATED app_disk_used_bytes 1645738619452
55
+ # TYPE app_disk_used_bytes gauge
56
+ app_disk_used_bytes 730750976 1645738619452
57
+ # HELP app_disk_free_bytes Free bytes for app home directory disk device
58
+ # UPDATED app_disk_free_bytes 1645738619452
59
+ # TYPE app_disk_free_bytes gauge
60
+ app_disk_free_bytes 209510809600 1645738619452
61
+ # HELP jfrt_artifacts_gc_next_run_seconds Next GC Run
62
+ # UPDATED jfrt_artifacts_gc_next_run_seconds 1636574411092
63
+ # TYPE jfrt_artifacts_gc_next_run_seconds gauge
64
+ jfrt_artifacts_gc_next_run_seconds 14388 1645738619452
@@ -0,0 +1,138 @@
1
+ # HELP app_disk_used_bytes Used bytes for app home directory disk device
2
+ # TYPE app_disk_used_bytes gauge
3
+ app_disk_used_bytes 1.48081664e+10 1645738679875
4
+ # HELP app_disk_free_bytes Free bytes for app home directory disk device
5
+ # TYPE app_disk_free_bytes gauge
6
+ app_disk_free_bytes 3.356854272e+10 1645738679875
7
+ # HELP app_io_counters_write_bytes Process io total write bytes
8
+ # TYPE app_io_counters_write_bytes gauge
9
+ app_io_counters_write_bytes 1.217306624e+09 1645738679875
10
+ # HELP app_io_counters_read_bytes Process io total read bytes
11
+ # TYPE app_io_counters_read_bytes gauge
12
+ app_io_counters_read_bytes 2.10030592e+08 1645738679875
13
+ # HELP app_self_metrics_calc_seconds Total time to collect all metrics
14
+ # TYPE app_self_metrics_calc_seconds gauge
15
+ app_self_metrics_calc_seconds 0.050925766 1645738679875
16
+ # HELP app_self_metrics_total Count of collected metrics
17
+ # TYPE app_self_metrics_total gauge
18
+ app_self_metrics_total 31 1645738679875
19
+ # HELP db_connection_pool_in_use_total The number of connections currently in use
20
+ # TYPE db_connection_pool_in_use_total gauge
21
+ db_connection_pool_in_use_total 0 1645738679875
22
+ # HELP db_connection_pool_idle_total The number of idle connections
23
+ # TYPE db_connection_pool_idle_total gauge
24
+ db_connection_pool_idle_total 5 1645738679875
25
+ # HELP db_connection_pool_max_open_total The maximum number of open connections
26
+ # TYPE db_connection_pool_max_open_total gauge
27
+ db_connection_pool_max_open_total 60 1645738679875
28
+ # HELP go_memstats_heap_in_use_bytes Process go heap bytes in use
29
+ # TYPE go_memstats_heap_in_use_bytes gauge
30
+ go_memstats_heap_in_use_bytes 1.98139904e+08 1645738679875
31
+ # HELP go_memstats_heap_allocated_bytes Process go heap allocated bytes
32
+ # TYPE go_memstats_heap_allocated_bytes gauge
33
+ go_memstats_heap_allocated_bytes 1.85429088e+08 1645738679875
34
+ # HELP go_memstats_heap_idle_bytes Process go heap idle bytes
35
+ # TYPE go_memstats_heap_idle_bytes gauge
36
+ go_memstats_heap_idle_bytes 1.35733248e+08 1645738679875
37
+ # HELP go_memstats_heap_objects_total Process go heap number of objects
38
+ # TYPE go_memstats_heap_objects_total gauge
39
+ go_memstats_heap_objects_total 1.93186e+06 1645738679875
40
+ # HELP go_memstats_heap_reserved_bytes Process go heap reserved bytes
41
+ # TYPE go_memstats_heap_reserved_bytes gauge
42
+ go_memstats_heap_reserved_bytes 3.33873152e+08 1645738679875
43
+ # HELP go_memstats_gc_cpu_fraction_ratio Process go cpu used by gc. value is between 0 and 1
44
+ # TYPE go_memstats_gc_cpu_fraction_ratio gauge
45
+ go_memstats_gc_cpu_fraction_ratio 0.00024063137131169772 1645738679875
46
+ # HELP go_routines_total Number of goroutines that currently exist
47
+ # TYPE go_routines_total gauge
48
+ go_routines_total 169 1645738679875
49
+ # HELP jfxr_data_artifacts_total Artifacts of pkg type generic count in Xray
50
+ # UPDATED jfxr_data_artifacts_total 1636513309792
51
+ # TYPE jfxr_data_artifacts_total counter
52
+ jfxr_data_artifacts_total{package_type="generic"} 1 1645738679875
53
+ # HELP jfxr_data_components_total Components of pkg type generic count in Xray
54
+ # UPDATED jfxr_data_components_total 1636513309792
55
+ # TYPE jfxr_data_components_total counter
56
+ jfxr_data_components_total{package_type="generic"} 1 1645738679875
57
+ # HELP jfxr_db_sync_running_total Is dbsync running
58
+ # UPDATED jfxr_db_sync_running_total 1636577439791
59
+ # TYPE jfxr_db_sync_running_total gauge
60
+ jfxr_db_sync_running_total 0 1645738679875
61
+ # HELP jfxr_jira_last_ticket_creation_time_seconds Last ticket creation time
62
+ # UPDATED jfxr_jira_last_ticket_creation_time_seconds 1636577309791
63
+ # TYPE jfxr_jira_last_ticket_creation_time_seconds gauge
64
+ jfxr_jira_last_ticket_creation_time_seconds 0 1645738679875
65
+ # HELP jfxr_jira_no_of_errors_in_last_hour_total Total no of errors in last one hour
66
+ # UPDATED jfxr_jira_no_of_errors_in_last_hour_total 1636577309791
67
+ # TYPE jfxr_jira_no_of_errors_in_last_hour_total counter
68
+ jfxr_jira_no_of_errors_in_last_hour_total 0 1645738679875
69
+ # HELP jfxr_jira_last_error_time_seconds Last error occurred time
70
+ # UPDATED jfxr_jira_last_error_time_seconds 1636577309791
71
+ # TYPE jfxr_jira_last_error_time_seconds gauge
72
+ jfxr_jira_last_error_time_seconds 0 1645738679875
73
+ # HELP jfxr_jira_no_of_integrations_total Total no of jira integrations
74
+ # UPDATED jfxr_jira_no_of_integrations_total 1636577309791
75
+ # TYPE jfxr_jira_no_of_integrations_total counter
76
+ jfxr_jira_no_of_integrations_total 0 1645738679875
77
+ # HELP jfxr_jira_no_of_profiles_total Total no of profiles created
78
+ # UPDATED jfxr_jira_no_of_profiles_total 1636577309791
79
+ # TYPE jfxr_jira_no_of_profiles_total counter
80
+ jfxr_jira_no_of_profiles_total 0 1645738679875
81
+ # HELP jfxr_jira_no_of_tickets_created_in_last_one_hour_total Total no of jira tickets created in past one hour
82
+ # UPDATED jfxr_jira_no_of_tickets_created_in_last_one_hour_total 1636577309791
83
+ # TYPE jfxr_jira_no_of_tickets_created_in_last_one_hour_total counter
84
+ jfxr_jira_no_of_tickets_created_in_last_one_hour_total 0 1645738679875
85
+ # HELP jfxr_performance_server_up_time_seconds Xray server up time
86
+ # TYPE jfxr_performance_server_up_time_seconds gauge
87
+ jfxr_performance_server_up_time_seconds 928277.850744045 1645738679875
88
+ # HELP queue_messages_total The number of messages currently in queue
89
+ # UPDATED queue_messages_total 1636577509792
90
+ # TYPE queue_messages_total gauge
91
+ queue_messages_total{queue_name="alert"} 0 1645738679875
92
+ queue_messages_total{queue_name="alertImpactAnalysis"} 0 1645738679875
93
+ queue_messages_total{queue_name="alertImpactAnalysisRetry"} 0 1645738679875
94
+ queue_messages_total{queue_name="alertRetry"} 0 1645738679875
95
+ queue_messages_total{queue_name="analysis"} 0 1645738679875
96
+ queue_messages_total{queue_name="analysisExistingContent"} 0 1645738679875
97
+ queue_messages_total{queue_name="analysisExistingContentRetry"} 0 1645738679875
98
+ queue_messages_total{queue_name="analysisRetry"} 0 1645738679875
99
+ queue_messages_total{queue_name="buildReport_xray-0"} 0 1645738679875
100
+ queue_messages_total{queue_name="failure"} 0 1645738679875
101
+ queue_messages_total{queue_name="gcSyncMaster"} 0 1645738679875
102
+ queue_messages_total{queue_name="impactAnalysis"} 0 1645738679875
103
+ queue_messages_total{queue_name="impactAnalysisRetry"} 0 1645738679875
104
+ queue_messages_total{queue_name="impactPathRecovery"} 0 1645738679875
105
+ queue_messages_total{queue_name="impactPathRecoveryRetry"} 0 1645738679875
106
+ queue_messages_total{queue_name="index"} 0 1645738679875
107
+ queue_messages_total{queue_name="indexExistingContentRetry"} 0 1645738679875
108
+ queue_messages_total{queue_name="indexExistsContent"} 0 1645738679875
109
+ queue_messages_total{queue_name="indexRetry"} 0 1645738679875
110
+ queue_messages_total{queue_name="job"} 0 1645738679875
111
+ queue_messages_total{queue_name="mdsUpdate"} 0 1645738679875
112
+ queue_messages_total{queue_name="mdsUpdateExistingContent"} 0 1645738679875
113
+ queue_messages_total{queue_name="mdsUpdateExistingContentRetry"} 0 1645738679875
114
+ queue_messages_total{queue_name="mdsUpdateRetry"} 0 1645738679875
115
+ queue_messages_total{queue_name="notification"} 0 1645738679875
116
+ queue_messages_total{queue_name="notificationRetry"} 0 1645738679875
117
+ queue_messages_total{queue_name="persist"} 0 1645738679875
118
+ queue_messages_total{queue_name="persistExistingContent"} 0 1645738679875
119
+ queue_messages_total{queue_name="persistExistingContentRetry"} 0 1645738679875
120
+ queue_messages_total{queue_name="persistRetry"} 0 1645738679875
121
+ queue_messages_total{queue_name="report"} 0 1645738679875
122
+ queue_messages_total{queue_name="reportRetry"} 0 1645738679875
123
+ queue_messages_total{queue_name="ticketing"} 0 1645738679875
124
+ queue_messages_total{queue_name="ticketingRetry",new_queue_name="ticketingRetry2"} 0 1645738679875
125
+ # TYPE sys_cpu_ratio gauge
126
+ sys_cpu_ratio 0.3333333337029058 1645738679875
127
+ # HELP sys_load_1 Host load average in the last minute
128
+ sys_load_1 2 1645738679875
129
+ # HELP sys_load_5 Host load average in the last 5 minutes
130
+ sys_load_5 1.82 1645738679875
131
+ # HELP sys_load_15 Host load average in the last 15 minutes
132
+ sys_load_15 1.73 1645738679875
133
+ # HELP sys_memory_used_bytes Host used virtual memory
134
+ # TYPE sys_memory_used_bytes gauge
135
+ sys_memory_used_bytes 6.9718016e+09 1645738679875
136
+ # HELP sys_memory_free_bytes Host free virtual memory
137
+ # TYPE sys_memory_free_bytes gauge
138
+ sys_memory_free_bytes 3.17022208e+08 1645738679875