stackify-api-ruby 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rspec +2 -0
- data/Gemfile +4 -0
- data/Gemfile.lock +30 -0
- data/LICENSE.txt +22 -0
- data/README.md +157 -0
- data/Rakefile +2 -0
- data/lib/generators/stackify/stackify_generator.rb +13 -0
- data/lib/generators/stackify/templates/stackify.rb +17 -0
- data/lib/stackify-api-ruby.rb +166 -0
- data/lib/stackify/authorization/authorizable.rb +61 -0
- data/lib/stackify/authorization/authorization_client.rb +31 -0
- data/lib/stackify/engine.rb +21 -0
- data/lib/stackify/env_details.rb +108 -0
- data/lib/stackify/error.rb +56 -0
- data/lib/stackify/errors_governor.rb +65 -0
- data/lib/stackify/http_client.rb +50 -0
- data/lib/stackify/logger_client.rb +71 -0
- data/lib/stackify/logger_proxy.rb +35 -0
- data/lib/stackify/logs_sender.rb +78 -0
- data/lib/stackify/metrics/metric.rb +68 -0
- data/lib/stackify/metrics/metric_aggregate.rb +52 -0
- data/lib/stackify/metrics/metrics.rb +88 -0
- data/lib/stackify/metrics/metrics_client.rb +238 -0
- data/lib/stackify/metrics/metrics_queue.rb +26 -0
- data/lib/stackify/metrics/metrics_sender.rb +32 -0
- data/lib/stackify/metrics/monitor.rb +34 -0
- data/lib/stackify/msgs_queue.rb +78 -0
- data/lib/stackify/rack/errors_catcher.rb +17 -0
- data/lib/stackify/schedule_task.rb +23 -0
- data/lib/stackify/scheduler.rb +79 -0
- data/lib/stackify/utils/backtrace.rb +36 -0
- data/lib/stackify/utils/configuration.rb +78 -0
- data/lib/stackify/utils/methods.rb +27 -0
- data/lib/stackify/utils/msg_object.rb +22 -0
- data/lib/stackify/version.rb +3 -0
- data/lib/stackify/workers/add_msg_worker.rb +9 -0
- data/lib/stackify/workers/auth_worker.rb +18 -0
- data/lib/stackify/workers/logs_sender_worker.rb +17 -0
- data/lib/stackify/workers/worker.rb +65 -0
- data/spec/spec_helper.rb +17 -0
- data/stackify-api-ruby.gemspec +25 -0
- metadata +137 -0
@@ -0,0 +1,78 @@
|
|
1
|
+
module Stackify
|
2
|
+
class LogsSender < HttpClient
|
3
|
+
|
4
|
+
LOGS_URI = URI('https://dev.stackify.com/api/Log/Save')
|
5
|
+
|
6
|
+
def start
|
7
|
+
worker = Stackify::Worker.new 'Main sending thread'
|
8
|
+
task = Stackify::ScheduleTask.new do
|
9
|
+
send_logs
|
10
|
+
end
|
11
|
+
worker.async_perform Stackify.configuration.send_interval, task
|
12
|
+
Stackify.internal_log :debug, 'LogsSender: main sending thread is started'
|
13
|
+
end
|
14
|
+
|
15
|
+
def send_remained_msgs
|
16
|
+
if Stackify.working?
|
17
|
+
Stackify.internal_log :warn, 'Sending of remained msgs is possible when Stackify is terminating work.'
|
18
|
+
else
|
19
|
+
worker = Stackify::Worker.new 'RemainedJob worker'
|
20
|
+
task = send_all_remained_msgs_task Stackify.msgs_queue.pop_all
|
21
|
+
worker.perform 2, task
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
private
|
26
|
+
|
27
|
+
def send_logs attempts = 3
|
28
|
+
msgs = Stackify.msgs_queue.pop #it should wait until queue will get a new chunk if queque is empty
|
29
|
+
worker = Stackify::LogsSenderWorker.new
|
30
|
+
task = send_logs_task attempts, msgs
|
31
|
+
worker.async_perform 5, task
|
32
|
+
end
|
33
|
+
|
34
|
+
def properties
|
35
|
+
{
|
36
|
+
success_condition: lambda { |result| result.try(:code) == '200' },
|
37
|
+
limit: 1
|
38
|
+
}.dup
|
39
|
+
end
|
40
|
+
|
41
|
+
def send_logs_task attempts = nil, msgs
|
42
|
+
properties[:attempts] = attempts if attempts
|
43
|
+
Stackify::ScheduleTask.new properties do
|
44
|
+
failure_msg = 'LogsSender: tried to send logs'
|
45
|
+
Stackify.if_not_authorized failure_msg do
|
46
|
+
Stackify.internal_log :info, 'LogsSender: trying to send logs to Stackify...'
|
47
|
+
send_request LOGS_URI, gather_and_pack_data(msgs).to_json
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
def send_all_remained_msgs_task msgs
|
53
|
+
Stackify::ScheduleTask.new properties do
|
54
|
+
failure_msg = 'LogsSender: tried to remained send logs'
|
55
|
+
Stackify.if_not_authorized failure_msg do
|
56
|
+
Stackify.internal_log :info, 'LogsSender: trying to send remained logs to Stackify...'
|
57
|
+
send_request LOGS_URI, gather_and_pack_data(msgs).to_json
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
def gather_and_pack_data msgs
|
63
|
+
details = Stackify::EnvDetails.instance.auth_info
|
64
|
+
{
|
65
|
+
'CDID' => details['DeviceID'],
|
66
|
+
'CDAppID' => details['DeviceAppID'],
|
67
|
+
'Logger' => 'Rails logger',
|
68
|
+
'AppName' => details['AppName'],
|
69
|
+
'AppNameID' => details['AppNameID'],
|
70
|
+
'EnvID' => details['EnvID'],
|
71
|
+
'ServerName' => details['DeviceName'],
|
72
|
+
'Msgs' => msgs,
|
73
|
+
'AppLoc' => details['AppLocation'],
|
74
|
+
'Platform' => 'Ruby'
|
75
|
+
}
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
@@ -0,0 +1,68 @@
|
|
1
|
+
require 'date'
|
2
|
+
module Stackify::Metrics
|
3
|
+
class Metric
|
4
|
+
attr_accessor :category, :name, :metric_type, :is_increment,
|
5
|
+
:settings, :value, :aggregate_key, :occurred
|
6
|
+
|
7
|
+
def initialize category, name, metric_type, metric_settings = nil
|
8
|
+
@category = category
|
9
|
+
@name = name
|
10
|
+
@metric_type = metric_type
|
11
|
+
@occurred = Time.now.utc
|
12
|
+
@occurred = get_rounded_time
|
13
|
+
@is_increment = false
|
14
|
+
@settings = metric_settings || MetricSettings.new
|
15
|
+
end
|
16
|
+
|
17
|
+
def calc_and_set_aggregate_key
|
18
|
+
@aggregate_key = @category.downcase + '-' + (@name || 'Missing Name').downcase +
|
19
|
+
'-' + @metric_type.to_s + '-' + get_rounded_time.to_s
|
20
|
+
end
|
21
|
+
|
22
|
+
def calc_name_key
|
23
|
+
@category.downcase + '-' + (@name || 'Missing Name').downcase + '-' + @metric_type.to_s
|
24
|
+
end
|
25
|
+
|
26
|
+
def get_rounded_time
|
27
|
+
@occurred - @occurred.sec
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
class MetricSettings
|
32
|
+
attr_reader :autoreport_zero_if_nothing_reported,
|
33
|
+
:autoreport_last_value_if_nothing_reported
|
34
|
+
|
35
|
+
def autoreport_last_value_if_nothing_reported= value
|
36
|
+
@autoreport_last_value_if_nothing_reported = value
|
37
|
+
@autoreport_zero_if_nothing_reported = false if value
|
38
|
+
end
|
39
|
+
|
40
|
+
def autoreport_zero_if_nothing_reported= value
|
41
|
+
@autoreport_zero_if_nothing_reported = value
|
42
|
+
@autoreport_last_value_if_nothing_reported = false if value
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
class MetricForSubmit
|
47
|
+
attr_accessor :monitor_id, :value, :count, :occurred_utc, :monitor_type_id
|
48
|
+
|
49
|
+
def initialize metric
|
50
|
+
@value = metric.value.round 2
|
51
|
+
@monitor_id = metric.monitor_id || 0
|
52
|
+
@occurred_utc = metric.occurred_utc
|
53
|
+
@count = metric.count
|
54
|
+
@monitor_type_id = metric.metric_type
|
55
|
+
end
|
56
|
+
|
57
|
+
def to_h
|
58
|
+
{
|
59
|
+
'Value' => @value,
|
60
|
+
'MonitorID' => @monitor_id,
|
61
|
+
'OccurredUtc' => DateTime.parse(@occurred_utc.to_s).iso8601,
|
62
|
+
'Count' => @count,
|
63
|
+
'MonitorTypeID' => @monitor_type_id
|
64
|
+
}
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
end
|
@@ -0,0 +1,52 @@
|
|
1
|
+
module Stackify::Metrics
|
2
|
+
class MetricAggregate
|
3
|
+
|
4
|
+
attr_accessor :name, :category, :value, :count, :occurred_utc,
|
5
|
+
:monitor_id, :metric_type, :name_key
|
6
|
+
|
7
|
+
def initialize metric
|
8
|
+
@name = metric.name
|
9
|
+
@category = metric.category
|
10
|
+
@metric_type = metric.metric_type
|
11
|
+
@value = 0
|
12
|
+
@count = 0
|
13
|
+
@occurred_utc = metric.get_rounded_time
|
14
|
+
@name_key = metric.calc_name_key
|
15
|
+
end
|
16
|
+
|
17
|
+
def aggregate_key
|
18
|
+
(@category || 'Missing Category').downcase + '-' + (@name || 'Missing Name').downcase +
|
19
|
+
'-' + @metric_type.to_s + '-' + @occurred_utc.to_s
|
20
|
+
end
|
21
|
+
|
22
|
+
end
|
23
|
+
|
24
|
+
class LatestAggregate
|
25
|
+
attr_accessor :category, :name, :metric_id, :occurred_utc,
|
26
|
+
:value, :count, :metric_type
|
27
|
+
|
28
|
+
def initialize aggr_metric
|
29
|
+
@count = aggr_metric.count
|
30
|
+
@metric_type = aggr_metric.metric_type
|
31
|
+
@metric_id = aggr_metric.monitor_id
|
32
|
+
@name = aggr_metric.name
|
33
|
+
@occurred_utc = aggr_metric.occurred_utc
|
34
|
+
@value = aggr_metric.value
|
35
|
+
@count = aggr_metric.count
|
36
|
+
@category = aggr_metric.category
|
37
|
+
end
|
38
|
+
|
39
|
+
def to_h
|
40
|
+
{
|
41
|
+
'Count' => @count,
|
42
|
+
'MetricType' => @metric_type,
|
43
|
+
'MetricID' => @metric_id,
|
44
|
+
'Name' => @name,
|
45
|
+
'OccurredUtc' => @occurred_utc,
|
46
|
+
'Value' => @value,
|
47
|
+
'Count' => @count,
|
48
|
+
'Category' => @category
|
49
|
+
}
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
@@ -0,0 +1,88 @@
|
|
1
|
+
module Stackify
|
2
|
+
module Metrics
|
3
|
+
require_relative 'metrics_queue'
|
4
|
+
require_relative 'metric'
|
5
|
+
require_relative 'metric_aggregate'
|
6
|
+
require_relative 'monitor'
|
7
|
+
require_relative 'metrics_client'
|
8
|
+
require_relative 'metrics_sender'
|
9
|
+
|
10
|
+
METRIC_TYPES = {
|
11
|
+
metric_last: 134,
|
12
|
+
counter: 129,
|
13
|
+
metric_average: 132,
|
14
|
+
counter_time: 131
|
15
|
+
}
|
16
|
+
class << self
|
17
|
+
def metrics_client
|
18
|
+
@@metrics_client ||= Stackify::Metrics::MetricsClient.new
|
19
|
+
end
|
20
|
+
|
21
|
+
def average category, metric_name, value, advanced_settings = nil
|
22
|
+
m = Metric.new category, metric_name, Stackify::Metrics::METRIC_TYPES[:metric_average]
|
23
|
+
m.value = value
|
24
|
+
m.settings = advanced_settings
|
25
|
+
Stackify::Metrics.metrics_client.queue_metric m
|
26
|
+
end
|
27
|
+
|
28
|
+
def count category, metric_name, increment_by= 1, advanced_settings = nil
|
29
|
+
m = Metric.new category, metric_name, Stackify::Metrics::METRIC_TYPES[:counter]
|
30
|
+
m.value = increment_by
|
31
|
+
m.settings = advanced_settings
|
32
|
+
Stackify::Metrics.metrics_client.queue_metric m
|
33
|
+
end
|
34
|
+
|
35
|
+
def get_latest category, metric_name
|
36
|
+
metrics_client.get_latest category, metric_name
|
37
|
+
end
|
38
|
+
|
39
|
+
def get_latest_all_metrics
|
40
|
+
metrics_client.get_latest_all_metrics
|
41
|
+
end
|
42
|
+
|
43
|
+
def increment_gauge category, metric_name, increment_by = 1, advanced_settings = nil
|
44
|
+
m = Metric.new category, metric_name, Stackify::Metrics::METRIC_TYPES[:metric_last]
|
45
|
+
m.value = increment_by
|
46
|
+
m.is_increment = true
|
47
|
+
m.settings = advanced_settings
|
48
|
+
Stackify::Metrics.metrics_client.queue_metric m
|
49
|
+
end
|
50
|
+
|
51
|
+
def set_gauge category, metric_name, value, advanced_settings = nil
|
52
|
+
m = Metric.new category, metric_name, Stackify::Metrics::METRIC_TYPES[:metric_last]
|
53
|
+
m.value = value
|
54
|
+
m.settings = advanced_settings
|
55
|
+
Stackify::Metrics.metrics_client.queue_metric m
|
56
|
+
end
|
57
|
+
|
58
|
+
def sum category, metric_name, value, advanced_settings = nil
|
59
|
+
m = Metric.new category, metric_name, Stackify::Metrics::METRIC_TYPES[:counter]
|
60
|
+
m.value = value
|
61
|
+
m.settings = advanced_settings
|
62
|
+
Stackify::Metrics.metrics_client.queue_metric m
|
63
|
+
end
|
64
|
+
|
65
|
+
def time category, metric_name, start_time
|
66
|
+
time_taken = Time.now.utc - start_time.utc
|
67
|
+
avarage_time category, metric_name, time_taken
|
68
|
+
end
|
69
|
+
|
70
|
+
def avarage_time category, metric_name, elapsed_time
|
71
|
+
m = Metric.new category, metric_name, Stackify::Metrics::METRIC_TYPES[:counter_time]
|
72
|
+
m.value = elapsed_time.round #seconds
|
73
|
+
Stackify::Metrics.metrics_client.queue_metric m
|
74
|
+
end
|
75
|
+
|
76
|
+
def count_and_time category, metric_name, start_time, advanced_settings = nil
|
77
|
+
counter_m = Metric.new category, metric_name, Stackify::Metrics::METRIC_TYPES[:counter]
|
78
|
+
counter_m.value = 1
|
79
|
+
counter_m.settings = advanced_settings
|
80
|
+
time_m = Metric.new category, metric_name + ' Time', Stackify::Metrics::METRIC_TYPES[:counter_time]
|
81
|
+
time_m.value = (Time.now.utc - start_time.utc).round
|
82
|
+
time_m.settings = advanced_settings
|
83
|
+
Stackify::Metrics.metrics_client.queue_metric counter_m
|
84
|
+
Stackify::Metrics.metrics_client.queue_metric time_m
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
@@ -0,0 +1,238 @@
|
|
1
|
+
module Stackify::Metrics
|
2
|
+
class MetricsClient
|
3
|
+
|
4
|
+
attr_reader :metrics_queue
|
5
|
+
|
6
|
+
def initialize
|
7
|
+
@metrics_queue = MetricsQueue.new
|
8
|
+
@last_aggregates = {}
|
9
|
+
@metric_settings = {}
|
10
|
+
@aggregate_metrics = {}
|
11
|
+
@monitor_ids = {}
|
12
|
+
@metrics_sender = MetricsSender.new
|
13
|
+
end
|
14
|
+
|
15
|
+
def start
|
16
|
+
if Stackify::Utils.is_mode_on? Stackify::MODES[:metrics]
|
17
|
+
worker = Stackify::Worker.new 'Metrics client - processing of metrics'
|
18
|
+
Stackify.internal_log :debug, 'Metrics client: processing of metrics is started'
|
19
|
+
task = submit_metrics_task
|
20
|
+
worker.async_perform 5, task
|
21
|
+
else
|
22
|
+
Stackify.internal_log :warn, '[MetricClient]: Processing of metrics is disabled at configuration!'
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
def get_latest category, metric_name
|
27
|
+
Stackify::Utils.do_only_if_authorized_and_mode_is_on Stackify::MODES[:metrics] do
|
28
|
+
l = @last_aggregates.select { |_key, aggr| aggr.category.eql?(category) && aggr.name.eql?(metric_name) }
|
29
|
+
LatestAggregate.new l.values.first
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
def get_latest_all_metrics
|
34
|
+
Stackify::Utils.do_only_if_authorized_and_mode_is_on Stackify::MODES[:metrics] do
|
35
|
+
all_latest = []
|
36
|
+
@last_aggregates.each_pair do |_key, aggr|
|
37
|
+
all_latest << Stackify::Metrics::LatestAggregate.new(aggr)
|
38
|
+
end
|
39
|
+
all_latest
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
def queue_metric metric
|
44
|
+
if Stackify.working?
|
45
|
+
if Stackify::Utils.is_mode_on? Stackify::MODES[:metrics]
|
46
|
+
@metrics_queue.add_metric metric
|
47
|
+
else
|
48
|
+
Stackify.internal_log :warn, '[MetricClient]: Adding of metrics is impossible because they are disabled by configuration'
|
49
|
+
end
|
50
|
+
else
|
51
|
+
Stackify.internal_log :warn, '[MetricClient]: Adding of metrics is impossible - Stackify is terminating or terminated work.'
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
private
|
56
|
+
|
57
|
+
def start_upload_metrics
|
58
|
+
all_is_ok = false
|
59
|
+
current_time = Stackify::Utils.rounded_current_time
|
60
|
+
purge_older_than = current_time - 10.minutes
|
61
|
+
#read everything up to the start of the current minute
|
62
|
+
read_queued_metrics_batch current_time
|
63
|
+
handle_zero_reports current_time
|
64
|
+
get_for_recent = @aggregate_metrics.select do |_k, v|
|
65
|
+
v.occurred_utc < current_time && v.occurred_utc > current_time - 5.minutes
|
66
|
+
end
|
67
|
+
set_latest_aggregates get_for_recent
|
68
|
+
selected_aggr_metrics = @aggregate_metrics.select { |_key, aggr| aggr.occurred_utc < current_time }
|
69
|
+
first_50_metrics = Hash[selected_aggr_metrics.to_a.take 50]
|
70
|
+
if first_50_metrics.length > 0
|
71
|
+
#only getting metrics less than 10 minutes old to drop old data in case we get backed up
|
72
|
+
#they are removed from the @aggregated_metrics in the upload function upon success
|
73
|
+
all_success = upload_aggregates(first_50_metrics.select { |_key, aggr| aggr.occurred_utc > current_time - 10.minutes })
|
74
|
+
end
|
75
|
+
@aggregate_metrics.delete_if { |_key, aggr| aggr.occurred_utc < purge_older_than }
|
76
|
+
end
|
77
|
+
|
78
|
+
def read_queued_metrics_batch chosen_time
|
79
|
+
batches = {}
|
80
|
+
|
81
|
+
while @metrics_queue.size > 0 do
|
82
|
+
metric = @metrics_queue.pop
|
83
|
+
metric.calc_and_set_aggregate_key
|
84
|
+
unless batches.has_key? metric.aggregate_key
|
85
|
+
name_key = metric.calc_name_key
|
86
|
+
if metric.is_increment && @last_aggregates.has_key?(name_key)
|
87
|
+
#if wanting to do increments we need to grab the last value so we know what to increment
|
88
|
+
metric.value = @last_aggregates[name_key].value
|
89
|
+
end
|
90
|
+
batches[metric.aggregate_key] = MetricAggregate.new metric
|
91
|
+
#if it is nil don't do anything
|
92
|
+
#we are doing it where the aggregates are created so we don't do it one very single metric,
|
93
|
+
#just once per batch to optimize performance
|
94
|
+
|
95
|
+
@metric_settings[name_key] = metric.settings if metric.settings != nil
|
96
|
+
end
|
97
|
+
batches[metric.aggregate_key].count += 1
|
98
|
+
if metric.is_increment
|
99
|
+
#add or subtract
|
100
|
+
batches[metric.aggregate_key].value += metric.value
|
101
|
+
elsif metric.metric_type == Stackify::Metrics::METRIC_TYPES[:metric_last]
|
102
|
+
#should end up the last value
|
103
|
+
batches[metric.aggregate_key].value = metric.value
|
104
|
+
else
|
105
|
+
batches[metric.aggregate_key].value += metric.value
|
106
|
+
end
|
107
|
+
#we don't need anything more this recent so bail
|
108
|
+
break if metric.occurred > chosen_time
|
109
|
+
end
|
110
|
+
batches.each_pair do |_key, aggregated_metric|
|
111
|
+
aggregate aggregated_metric
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
def aggregate am
|
116
|
+
agg_key = am.aggregate_key
|
117
|
+
if @aggregate_metrics.has_key? agg_key
|
118
|
+
agg = @aggregate_metrics[agg_key]
|
119
|
+
else
|
120
|
+
if @aggregate_metrics.length > 1000
|
121
|
+
str = 'No longer aggregating new metrics because more than 1000 are queued'
|
122
|
+
Stackify.internal_log :warn, str
|
123
|
+
return
|
124
|
+
end
|
125
|
+
Stackify.internal_log :debug, 'Creating aggregate for ' + agg_key
|
126
|
+
@aggregate_metrics[agg_key] = am
|
127
|
+
agg = Stackify::Metrics::Metric.new am.category, am.name, am.metric_type
|
128
|
+
agg = MetricAggregate.new agg
|
129
|
+
agg.occurred_utc = am.occurred_utc
|
130
|
+
end
|
131
|
+
|
132
|
+
if am.metric_type == Stackify::Metrics::METRIC_TYPES[:metric_last]
|
133
|
+
agg.count = 1
|
134
|
+
agg.value = am.value
|
135
|
+
else
|
136
|
+
agg.count += am.count
|
137
|
+
agg.value += am.value
|
138
|
+
end
|
139
|
+
@aggregate_metrics[agg_key]= agg
|
140
|
+
end
|
141
|
+
|
142
|
+
def submit_metrics_task
|
143
|
+
Stackify::ScheduleTask.new do
|
144
|
+
start_upload_metrics
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
def handle_zero_reports current_time
|
149
|
+
@last_aggregates.each_pair do |_key, aggregate|
|
150
|
+
if @metric_settings.has_key? aggregate.name_key
|
151
|
+
settings = @metric_settings[aggregate.name_key]
|
152
|
+
if settings.nil?
|
153
|
+
@metric_settings.delete[aggregate.name_key]
|
154
|
+
next
|
155
|
+
end
|
156
|
+
#agg = MetricAggregate.new aggregate.category, aggregate.name, aggregate.metric_type
|
157
|
+
agg = aggregate
|
158
|
+
agg.occurred_utc = current_time
|
159
|
+
case aggregate.metric_type
|
160
|
+
when Stackify::Metrics::METRIC_TYPES[:metric_last]
|
161
|
+
setting.autoreport_last_value_if_nothing_reported = false #do not allow this
|
162
|
+
when Stackify::Metrics::METRIC_TYPES[:counter]
|
163
|
+
setting.autoreport_last_value_if_nothing_reported = false #do not allow this
|
164
|
+
end
|
165
|
+
if settings.autoreport_zero_if_nothing_reported
|
166
|
+
agg.count = 1
|
167
|
+
agg.value = 0
|
168
|
+
elsif settings.autoreport_last_value_if_nothing_reported
|
169
|
+
agg.count = aggregate.value.to_i
|
170
|
+
agg.value = aggregate.value
|
171
|
+
else
|
172
|
+
next
|
173
|
+
end
|
174
|
+
agg_key = agg.aggregate_key
|
175
|
+
unless @aggregate_metrics.has_key? agg_key
|
176
|
+
agg.name_key = aggregate.name_key
|
177
|
+
Stackify.internal_log :debug, 'Creating 0 default value for ' + agg_key
|
178
|
+
@aggregate_metrics[agg_key] = agg
|
179
|
+
end
|
180
|
+
end
|
181
|
+
end
|
182
|
+
end
|
183
|
+
|
184
|
+
def set_latest_aggregates aggregates
|
185
|
+
aggregates.each_pair do |_key, aggr|
|
186
|
+
if @last_aggregates.has_key? aggr.name_key
|
187
|
+
curr_aggr = @last_aggregates[aggr.name_key]
|
188
|
+
@last_aggregates[aggr.name_key] = aggr if aggr.occurred_utc > curr_aggr.occurred_utc
|
189
|
+
else
|
190
|
+
@last_aggregates[aggr.name_key] = aggr
|
191
|
+
end
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
def upload_aggregates aggr_metrics
|
196
|
+
s = ''
|
197
|
+
aggr_metrics.each_pair do |_k, m|
|
198
|
+
s = s + m.inspect.to_s + "\n --------------------------- \n"
|
199
|
+
end
|
200
|
+
Stackify.internal_log :debug, "Uploading Aggregate Metrics at #{ Time.now }: \n" + s
|
201
|
+
all_success = true
|
202
|
+
aggr_metrics.each_pair do |_key, metric|
|
203
|
+
if @monitor_ids.has_key? metric.name_key
|
204
|
+
mon_info = @monitor_ids[metric.name_key]
|
205
|
+
else
|
206
|
+
req = @metrics_sender.monitor_info metric
|
207
|
+
if req.try(:code) == '200'
|
208
|
+
mon_info = JSON.parse req.body
|
209
|
+
if !mon_info.nil? && !mon_info['MonitorID'].nil? && mon_info['MonitorID'] > 0
|
210
|
+
@monitor_ids[metric.name_key] = mon_info
|
211
|
+
elsif !mon_info.nil? && mon_info['MonitorID'].nil?
|
212
|
+
Stackify.internal_log :warn, 'Unable to get metric info for ' + metric.name_key + ' MonitorID is nil'
|
213
|
+
@monitor_ids[metric.name_key] = mon_info
|
214
|
+
end
|
215
|
+
else
|
216
|
+
Stackify.internal_log :error, 'Unable to get metric info for ' + metric.name_key
|
217
|
+
mon_info = nil
|
218
|
+
all_success = false
|
219
|
+
end
|
220
|
+
end
|
221
|
+
|
222
|
+
if mon_info.nil? || mon_info['MonitorID'].nil?
|
223
|
+
Stackify.internal_log :warn, 'Metric info missing for ' + metric.name_key
|
224
|
+
metric.monitor_id = nil
|
225
|
+
all_success = false
|
226
|
+
else
|
227
|
+
metric.monitor_id = mon_info['MonitorID']
|
228
|
+
end
|
229
|
+
|
230
|
+
#get identified once
|
231
|
+
aggr_metrics_for_upload = aggr_metrics.select { |_key, aggr_metric| !aggr_metric.monitor_id.nil? }
|
232
|
+
response = @metrics_sender.upload_metrics aggr_metrics_for_upload
|
233
|
+
Stackify.internal_log :info, 'Metrics are uploaded successfully' if response.try(:code) == '200'
|
234
|
+
all_success
|
235
|
+
end
|
236
|
+
end
|
237
|
+
end
|
238
|
+
end
|