qa_server 2.2.0 → 2.2.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 6b1aa8683845f3afeadc0dd3b26a77b56f881fed
4
- data.tar.gz: 4a38cff642e17ba1be32215b8d89f840963bf1f7
3
+ metadata.gz: a9a28076a2932c11c2dcfa1d833665d623ab3969
4
+ data.tar.gz: 0e5eeb1c0c5535029f60c8a5f1a4bead059e4bbc
5
5
  SHA512:
6
- metadata.gz: 1fd0e79db6c61c2f696eb04eee64f18ef0e82393eba5cc57b13b9641158f235a8d28008ee85f6cc86a1c6641d4d7663e0ffbd8c5a0b1f0c6137b4e5543820af8
7
- data.tar.gz: 7fbb5e3f7c86a6e26b7dc891a6306cb618b038ddecaa29b0c405a5bf31c3c29049560c0591902e20164ac9715ede061ed3252281e16f6256afcc3bce5620f32a
6
+ metadata.gz: f891059ddd3ef9453bf857dbd7e5cc0ce5cab4beead2897b02efac28803f82a06fb00fb94ce3de487e30bb00f085049e14c80052a8c80565ae4931401c962f7b
7
+ data.tar.gz: e4c733a195568694b0bd9ed8fe7704aa62a3ab07c90c6ed2d3876869be33e439c68f28dc081c631182cb9b2dcba42a392b45420a5392a9079eb73c0e25c73040
data/CHANGELOG.md CHANGED
@@ -1,3 +1,8 @@
1
+ ### 2.2.1 (2019-09-04)
2
+
3
+ * config to optionally display of the performance graph/datatable
4
+ * add data table view of performance data
5
+
1
6
  ### 2.2.0 (2019-08-15)
2
7
 
3
8
  * add performance graphs to monitor status page
@@ -1,11 +1,15 @@
1
1
  # frozen_string_literal: true
2
2
  # Provide access to the scenario_results_history database table which tracks specific scenario runs over time.
3
3
  module QaServer
4
- class PerformanceHistory < ActiveRecord::Base
4
+ class PerformanceHistory < ActiveRecord::Base # rubocop:disable Metrics/ClassLength
5
5
  self.table_name = 'performance_history'
6
6
 
7
7
  enum action: [:fetch, :search]
8
8
 
9
+ PERFORMANCE_ALL_KEY = :all_authorities
10
+ PERFORMANCE_STATS_KEY = :stats
11
+ PERFORMANCE_FOR_LIFETIME_KEY = :lifetime_stats
12
+
9
13
  PERFORMANCE_FOR_DAY_KEY = :day
10
14
  PERFORMANCE_BY_HOUR_KEY = :hour
11
15
 
@@ -15,16 +19,24 @@ module QaServer
15
19
  PERFORMANCE_FOR_YEAR_KEY = :year
16
20
  PERFORMANCE_BY_MONTH_KEY = :month
17
21
 
18
- LOAD_TIME_KEY = :load_avg_ms
19
- NORMALIZATION_TIME_KEY = :normalization_avg_ms
20
- COMBINED_TIME_KEY = :combined_avg_ms
22
+ SUM_LOAD_TIME_KEY = :load_sum_ms
23
+ SUM_NORMALIZATION_TIME_KEY = :normalization_sum_ms
24
+ SUM_FULL_REQUEST_TIME_KEY = :full_request_sum_ms
25
+ MIN_LOAD_TIME_KEY = :load_min_ms
26
+ MIN_NORMALIZATION_TIME_KEY = :normalization_min_ms
27
+ MIN_FULL_REQUEST_TIME_KEY = :full_request_min_ms
28
+ MAX_LOAD_TIME_KEY = :load_max_ms
29
+ MAX_NORMALIZATION_TIME_KEY = :normalization_max_ms
30
+ MAX_FULL_REQUEST_TIME_KEY = :full_request_max_ms
31
+ AVG_LOAD_TIME_KEY = :load_avg_ms
32
+ AVG_NORMALIZATION_TIME_KEY = :normalization_avg_ms
33
+ AVG_FULL_REQUEST_TIME_KEY = :full_request_avg_ms
21
34
 
22
35
  class << self
23
-
24
36
  # Save a scenario result
25
37
  # @param run_id [Integer] the run on which to gather statistics
26
38
  # @param result [Hash] the scenario result to be saved
27
- def save_result(dt_stamp:, authority:, action:, size_bytes:, load_time_ms:, normalization_time_ms: )
39
+ def save_result(dt_stamp:, authority:, action:, size_bytes:, load_time_ms:, normalization_time_ms:) # rubocop:disable Metrics/ParameterLists
28
40
  QaServer::PerformanceHistory.create(dt_stamp: dt_stamp,
29
41
  authority: authority,
30
42
  action: action,
@@ -36,115 +48,207 @@ module QaServer
36
48
  # Performance data for a day, a month, and a year.
37
49
  # @returns [Hash] performance statistics for the past 24 hours
38
50
  # @example
39
- # { 0: { hour: 1400, load_avg_ms: 12.3, normalization_avg_ms: 4.2 },
40
- # 1: { hour: 1500, load_avg_ms: 12.3, normalization_avg_ms: 4.2 },
41
- # 2: { hour: 1600, load_avg_ms: 12.3, normalization_avg_ms: 4.2 },
42
- # ...,
43
- # 23: { hour: 1300, load_avg_ms: 12.3, normalization_avg_ms: 4.2 }
51
+ # { all_authorities:
52
+ # { lifetime_stats:
53
+ # { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }
54
+ # }
55
+ # { day:
56
+ # { 0: { hour: '1400', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
57
+ # 1: { hour: '1500', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
58
+ # 2: { hour: '1600', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
59
+ # ...,
60
+ # 23: { hour: 'NOW', load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }}
61
+ # }
62
+ # }
63
+ # { month:
64
+ # { 0: { day: '07-15-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
65
+ # 1: { day: '07-16-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
66
+ # 2: { day: '07-17-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
67
+ # ...,
68
+ # 29: { day: 'TODAY', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }}
69
+ # }
70
+ # }
71
+ # { year:
72
+ # { 0: { month: '09-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
73
+ # 1: { month: '10-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
74
+ # 2: { month: '11-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
75
+ # ...,
76
+ # 11: { month: '08-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }}
77
+ # }
78
+ # }
44
79
  # }
45
80
  def performance_data
46
81
  data = {}
47
- data[PERFORMANCE_FOR_DAY_KEY] = average_last_24_hours
48
- data[PERFORMANCE_FOR_MONTH_KEY] = average_last_30_days
49
- data[PERFORMANCE_FOR_YEAR_KEY] = average_last_12_months
82
+ data[PERFORMANCE_ALL_KEY] = {
83
+ PERFORMANCE_FOR_LIFETIME_KEY => lifetime,
84
+ PERFORMANCE_FOR_DAY_KEY => average_last_24_hours,
85
+ PERFORMANCE_FOR_MONTH_KEY => average_last_30_days,
86
+ PERFORMANCE_FOR_YEAR_KEY => average_last_12_months
87
+ }
50
88
  data
51
89
  end
52
90
 
53
91
  private
54
92
 
93
+ # Get hourly average for the past 24 hours.
94
+ # @returns [Hash] performance statistics across all records
95
+ # @example
96
+ # { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }
97
+ def lifetime
98
+ records = PerformanceHistory.all
99
+ calculate_stats(records)
100
+ end
101
+
55
102
  # Get hourly average for the past 24 hours.
56
103
  # @returns [Hash] performance statistics for the past 24 hours
57
104
  # @example
58
- # { 0: { hour: 1400, load_avg_ms: 12.3, normalization_avg_ms: 4.2, combined_avg_ms: 16.5 },
59
- # 1: { hour: 1500, load_avg_ms: 12.3, normalization_avg_ms: 4.2, combined_avg_ms: 16.5 },
60
- # 2: { hour: 1600, load_avg_ms: 12.3, normalization_avg_ms: 4.2, combined_avg_ms: 16.5 },
105
+ # { 0: { hour: '1400', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
106
+ # 1: { hour: '1500', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
107
+ # 2: { hour: '1600', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
61
108
  # ...,
62
- # 23: { hour: 1300, load_avg_ms: 12.3, normalization_avg_ms: 4.2, combined_avg_ms: 16.5 }
109
+ # 23: { hour: 'NOW', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }}
63
110
  # }
64
111
  def average_last_24_hours
65
- start_hour = Time.now.beginning_of_hour - 23.hour
112
+ start_hour = Time.now.beginning_of_hour - 23.hours
66
113
  avgs = {}
67
114
  0.upto(23).each do |idx|
68
115
  records = PerformanceHistory.where(dt_stamp: start_hour..start_hour.end_of_hour)
69
- averages = calculate_averages(records)
116
+ stats = calculate_stats(records)
70
117
  data = {}
71
- data[PERFORMANCE_BY_HOUR_KEY] = idx == 23 ? I18n.t('qa_server.monitor_status.performance.now') : ((idx + 1) % 2 == 0 ? (start_hour.hour * 100).to_s : "")
72
- data[LOAD_TIME_KEY] = averages[:avg_load_time_ms]
73
- data[NORMALIZATION_TIME_KEY] = averages[:avg_normalization_time_ms]
74
- data[COMBINED_TIME_KEY] = averages[:avg_combined_time_ms]
118
+ data[PERFORMANCE_BY_HOUR_KEY] = performance_by_hour_label(idx, start_hour)
119
+ data[PERFORMANCE_STATS_KEY] = stats
75
120
  avgs[idx] = data
76
- start_hour = start_hour + 1.hour
121
+ start_hour += 1.hour
77
122
  end
78
123
  avgs
79
124
  end
80
125
 
126
+ def performance_by_hour_label(idx, start_hour)
127
+ if idx == 23
128
+ I18n.t('qa_server.monitor_status.performance.now')
129
+ elsif ((idx + 1) % 2).zero?
130
+ (start_hour.hour * 100).to_s
131
+ else
132
+ ""
133
+ end
134
+ end
135
+
81
136
  # Get daily average for the past 30 days.
82
137
  # @returns [Hash] performance statistics for the past 30 days
83
138
  # @example
84
- # { 0: { day: '07-15-2019', load_avg_ms: 12.3, normalization_avg_ms: 4.2, combined_avg_ms: 16.5 },
85
- # 1: { day: '07-16-2019', load_avg_ms: 12.3, normalization_avg_ms: 4.2, combined_avg_ms: 16.5 },
86
- # 2: { day: '07-17-2019', load_avg_ms: 12.3, normalization_avg_ms: 4.2, combined_avg_ms: 16.5 },
139
+ # { 0: { day: '07-15-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
140
+ # 1: { day: '07-16-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
141
+ # 2: { day: '07-17-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
87
142
  # ...,
88
- # 29: { day: '08-13-2019', load_avg_ms: 12.3, normalization_avg_ms: 4.2, combined_avg_ms: 16.5 }
143
+ # 29: { day: 'TODAY', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }}
89
144
  # }
90
145
  def average_last_30_days
91
- start_day = Time.now.beginning_of_day - 29.day
146
+ start_day = Time.now.beginning_of_day - 29.days
92
147
  avgs = {}
93
148
  0.upto(29).each do |idx|
94
149
  records = PerformanceHistory.where(dt_stamp: start_day..start_day.end_of_day)
95
- averages = calculate_averages(records)
150
+ stats = calculate_stats(records)
96
151
  data = {}
97
- data[PERFORMANCE_BY_DAY_KEY] = idx == 29 ? I18n.t('qa_server.monitor_status.performance.today') : ((idx + 1) % 5 == 0 ? (start_day).strftime("%m-%d") : "")
98
- data[LOAD_TIME_KEY] = averages[:avg_load_time_ms]
99
- data[NORMALIZATION_TIME_KEY] = averages[:avg_normalization_time_ms]
100
- data[COMBINED_TIME_KEY] = averages[:avg_combined_time_ms]
152
+ data[PERFORMANCE_BY_DAY_KEY] = performance_by_day_label(idx, start_day)
153
+ data[PERFORMANCE_STATS_KEY] = stats
101
154
  avgs[idx] = data
102
- start_day = start_day + 1.day
155
+ start_day += 1.day
103
156
  end
104
157
  avgs
105
158
  end
106
159
 
160
+ def performance_by_day_label(idx, start_day)
161
+ if idx == 29
162
+ I18n.t('qa_server.monitor_status.performance.today')
163
+ elsif ((idx + 1) % 5).zero?
164
+ start_day.strftime("%m-%d")
165
+ else
166
+ ""
167
+ end
168
+ end
169
+
107
170
  # Get daily average for the past 12 months.
108
171
  # @returns [Hash] performance statistics for the past 12 months
109
172
  # @example
110
- # { 0: { month: '09-2019', load_avg_ms: 12.3, normalization_avg_ms: 4.2, combined_avg_ms: 16.5 },
111
- # 1: { month: '10-2019', load_avg_ms: 12.3, normalization_avg_ms: 4.2, combined_avg_ms: 16.5 },
112
- # 2: { month: '11-2019', load_avg_ms: 12.3, normalization_avg_ms: 4.2, combined_avg_ms: 16.5 },
173
+ # { 0: { month: '09-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
174
+ # 1: { month: '10-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
175
+ # 2: { month: '11-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }},
113
176
  # ...,
114
- # 11: { month: '08-2019', load_avg_ms: 12.3, normalization_avg_ms: 4.2, combined_avg_ms: 16.5 }
177
+ # 11: { month: '08-2019', stats: { load_avg_ms: 12.3, normalization_avg_ms: 4.2, full_request_avg_ms: 16.5, etc. }}
115
178
  # }
116
179
  def average_last_12_months
117
- start_month = Time.now.beginning_of_month - 11.month
180
+ start_month = Time.now.beginning_of_month - 11.months
118
181
  avgs = {}
119
182
  0.upto(11).each do |idx|
120
183
  records = PerformanceHistory.where(dt_stamp: start_month..start_month.end_of_month)
121
- averages = calculate_averages(records)
184
+ stats = calculate_stats(records)
122
185
  data = {}
123
- data[PERFORMANCE_BY_MONTH_KEY] = (start_month).strftime("%m-%Y")
124
- data[LOAD_TIME_KEY] = averages[:avg_load_time_ms]
125
- data[NORMALIZATION_TIME_KEY] = averages[:avg_normalization_time_ms]
126
- data[COMBINED_TIME_KEY] = averages[:avg_combined_time_ms]
186
+ data[PERFORMANCE_BY_MONTH_KEY] = start_month.strftime("%m-%Y")
187
+ data[PERFORMANCE_STATS_KEY] = stats
127
188
  avgs[idx] = data
128
- start_month = start_month + 1.month
189
+ start_month += 1.month
129
190
  end
130
191
  avgs
131
192
  end
132
193
 
133
- def calculate_averages(records)
134
- return { avg_load_time_ms: 0, avg_normalization_time_ms: 0, avg_combined_time_ms: 0 } if records.count.zero?
135
- sum_load_times = 0
136
- sum_normalization_times = 0
137
- sum_combined_times = 0
194
+ def calculate_stats(records)
195
+ stats = init_stats
196
+ return stats if records.count.zero?
197
+ first = true
138
198
  records.each do |record|
139
- sum_load_times += record.load_time_ms
140
- sum_normalization_times += record.normalization_time_ms
141
- sum_combined_times += (record.load_time_ms + record.normalization_time_ms)
199
+ update_sum_stats(stats, record)
200
+ update_min_stats(stats, record)
201
+ update_max_stats(stats, record)
202
+ first = false
142
203
  end
143
- {
144
- avg_load_time_ms: sum_load_times / records.count,
145
- avg_normalization_time_ms: sum_normalization_times / records.count,
146
- avg_combined_time_ms: sum_combined_times / records.count
147
- }
204
+ calculate_avg_stats(stats, records)
205
+ stats
206
+ end
207
+
208
+ MIN_STARTING_TIME = 999_999_999
209
+ def init_stats
210
+ stats = {}
211
+ stats[SUM_LOAD_TIME_KEY] = 0
212
+ stats[SUM_NORMALIZATION_TIME_KEY] = 0
213
+ stats[SUM_FULL_REQUEST_TIME_KEY] = 0
214
+ stats[AVG_LOAD_TIME_KEY] = 0
215
+ stats[AVG_NORMALIZATION_TIME_KEY] = 0
216
+ stats[AVG_FULL_REQUEST_TIME_KEY] = 0
217
+ stats[MIN_LOAD_TIME_KEY] = MIN_STARTING_TIME
218
+ stats[MIN_NORMALIZATION_TIME_KEY] = MIN_STARTING_TIME
219
+ stats[MIN_FULL_REQUEST_TIME_KEY] = MIN_STARTING_TIME
220
+ stats[MAX_LOAD_TIME_KEY] = 0
221
+ stats[MAX_NORMALIZATION_TIME_KEY] = 0
222
+ stats[MAX_FULL_REQUEST_TIME_KEY] = 0
223
+ stats
224
+ end
225
+
226
+ def update_sum_stats(stats, record)
227
+ stats[SUM_LOAD_TIME_KEY] += record.load_time_ms
228
+ stats[SUM_NORMALIZATION_TIME_KEY] += record.normalization_time_ms
229
+ stats[SUM_FULL_REQUEST_TIME_KEY] += full_request_time_ms(record)
230
+ end
231
+
232
+ def update_min_stats(stats, record)
233
+ stats[MIN_LOAD_TIME_KEY] = [stats[MIN_LOAD_TIME_KEY], record.load_time_ms].min
234
+ stats[MIN_NORMALIZATION_TIME_KEY] = [stats[MIN_NORMALIZATION_TIME_KEY], record.normalization_time_ms].min
235
+ stats[MIN_FULL_REQUEST_TIME_KEY] = [stats[MIN_FULL_REQUEST_TIME_KEY], full_request_time_ms(record)].min
236
+ end
237
+
238
+ def update_max_stats(stats, record)
239
+ stats[MAX_LOAD_TIME_KEY] = [stats[MAX_LOAD_TIME_KEY], record.load_time_ms].max
240
+ stats[MAX_NORMALIZATION_TIME_KEY] = [stats[MAX_NORMALIZATION_TIME_KEY], record.normalization_time_ms].max
241
+ stats[MAX_FULL_REQUEST_TIME_KEY] = [stats[MAX_FULL_REQUEST_TIME_KEY], full_request_time_ms(record)].max
242
+ end
243
+
244
+ def calculate_avg_stats(stats, records)
245
+ stats[AVG_LOAD_TIME_KEY] = stats[SUM_LOAD_TIME_KEY] / records.count
246
+ stats[AVG_NORMALIZATION_TIME_KEY] = stats[SUM_NORMALIZATION_TIME_KEY] / records.count
247
+ stats[AVG_FULL_REQUEST_TIME_KEY] = stats[SUM_FULL_REQUEST_TIME_KEY] / records.count
248
+ end
249
+
250
+ def full_request_time_ms(record)
251
+ record.load_time_ms + record.normalization_time_ms
148
252
  end
149
253
  end
150
254
  end
@@ -42,7 +42,7 @@ module QaServer
42
42
 
43
43
  def prefix_for_url(action)
44
44
  subauth = "/#{subauthority_name}" if subauthority_name.present?
45
- prefix = "#{QaServer::Engine.qa_engine_mount}/#{action}/linked_data/#{authority_name.downcase}#{subauth}"
45
+ "#{QaServer::Engine.qa_engine_mount}/#{action}/linked_data/#{authority_name.downcase}#{subauth}"
46
46
  end
47
47
 
48
48
  # Convert identifier into URL safe version with encoding if needed.
@@ -1,7 +1,8 @@
1
+ # frozen_string_literal: true
1
2
  module PrependedLinkedData::FindTerm
2
3
  # Override Qa::Authorities::LinkedData::FindTerm#find method
3
4
  # @return [Hash] single term results in requested format
4
- def find(id, language: nil, replacements: {}, subauth: nil, format: nil, jsonld: false, performance_data: false) # rubocop:disable Metrics/ParameterLists, Metrics/MethodLength
5
+ def find(id, language: nil, replacements: {}, subauth: nil, format: nil, jsonld: false, performance_data: false) # rubocop:disable Metrics/ParameterLists
5
6
  saved_performance_data = performance_data
6
7
  performance_data = true
7
8
  full_results = super
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
  module PrependedLinkedData::SearchQuery
2
3
  # Override Qa::Authorities::LinkedData::SearchQuery#search method
3
4
  # @return [String] json results for search query
@@ -0,0 +1,21 @@
1
+ # frozen_string_literal: true
2
+ require 'fileutils'
3
+ require 'gruff'
4
+
5
+ # This module include provides graph methods used by all monitor status presenters working with graphs
6
+ module QaServer::MonitorStatus
7
+ module GruffGraph
8
+ private
9
+
10
+ def graph_relative_path
11
+ File.join('qa_server', 'charts')
12
+ end
13
+
14
+ def graph_full_path(graph_filename)
15
+ path = Rails.root.join('app', 'assets', 'images', graph_relative_path)
16
+ FileUtils.mkdir_p path
17
+ File.join(path, graph_filename)
18
+ end
19
+ end
20
+ end
21
+ # frozen_string_literal: true
@@ -0,0 +1,76 @@
1
+ # frozen_string_literal: true
2
+ # This presenter class provides data related to last test run as needed by the view that monitors status of authorities.
3
+ module QaServer::MonitorStatus
4
+ class CurrentStatusPresenter
5
+ # @param current_summary [ScenarioRunSummary] summary status of the latest run of test scenarios
6
+ # @param current_data [Array<Hash>] current set of failures for the latest test run, if any
7
+ def initialize(current_summary:, current_failure_data:)
8
+ @current_summary = current_summary
9
+ @current_failure_data = current_failure_data
10
+ end
11
+
12
+ # @return [String] date of last test run
13
+ def last_updated
14
+ @current_summary.run_dt_stamp.in_time_zone("Eastern Time (US & Canada)").strftime("%m/%d/%y - %I:%M %p")
15
+ end
16
+
17
+ # @return [String] date of first recorded test run
18
+ def first_updated
19
+ QaServer::ScenarioRunRegistry.first.dt_stamp.in_time_zone("Eastern Time (US & Canada)").strftime("%m/%d/%y - %I:%M %p")
20
+ end
21
+
22
+ # @return [Integer] number of loaded authorities
23
+ def authorities_count
24
+ @current_summary.authority_count
25
+ end
26
+
27
+ # @return [Integer] number of authorities with failing tests in the latest test run
28
+ def failing_authorities_count
29
+ @current_failure_data.map { |f| f[:authority_name] }.uniq.count
30
+ end
31
+
32
+ # @return [String] css style class representing whether all tests passed or any failed
33
+ def authorities_count_style
34
+ failures? ? 'status-bad' : 'status-good'
35
+ end
36
+
37
+ # @return [Integer] number of tests in the latest test run
38
+ def tests_count
39
+ @current_summary.total_scenario_count
40
+ end
41
+
42
+ # @return [Integer] number of passing tests in the latest test run
43
+ def passing_tests_count
44
+ @current_summary.passing_scenario_count
45
+ end
46
+
47
+ # @return [Integer] number of failing tests in the latest test run
48
+ def failing_tests_count
49
+ @current_summary.failing_scenario_count
50
+ end
51
+
52
+ # @return [String] css style class representing whether all tests passed or any failed
53
+ def failing_tests_style
54
+ failures? ? 'summary-status-bad' : 'status-good'
55
+ end
56
+
57
+ # @return [Array<Hash>] A list of failures data in the latest test run, if any
58
+ # @example
59
+ # [ { status: :FAIL,
60
+ # status_label: 'X',
61
+ # authority_name: 'LOCNAMES_LD4L_CACHE',
62
+ # subauthority_name: 'person',
63
+ # service: 'ld4l_cache',
64
+ # action: 'search',
65
+ # url: '/qa/search/linked_data/locnames_ld4l_cache/person?q=mark twain&maxRecords=4',
66
+ # err_message: 'Exception: Something went wrong.' }, ... ]
67
+ def failures
68
+ @current_failure_data
69
+ end
70
+
71
+ # @return [Boolean] true if failure data exists for the latest test run; otherwise false
72
+ def failures?
73
+ failing_tests_count.positive?
74
+ end
75
+ end
76
+ end
@@ -0,0 +1,138 @@
1
+ # frozen_string_literal: true
2
+ # This presenter class provides historical testing data needed by the view that monitors status of authorities.
3
+ module QaServer::MonitorStatus
4
+ class HistoryPresenter
5
+ HISTORICAL_AUTHORITY_NAME_IDX = 0
6
+ HISTORICAL_FAILURE_COUNT_IDX = 1
7
+ HISTORICAL_PASSING_COUNT_IDX = 2
8
+
9
+ include QaServer::MonitorStatus::GruffGraph
10
+
11
+ # @param historical_summary_data [Array<Hash>] summary of past failuring runs per authority to drive chart
12
+ def initialize(historical_summary_data:)
13
+ @historical_summary_data = historical_summary_data
14
+ end
15
+
16
+ # @return [Array<Hash>] historical test data to be displayed (authname, failing, passing)
17
+ # @example
18
+ # [ [ 'agrovoc', 0, 24 ],
19
+ # [ 'geonames_ld4l_cache', 2, 22 ] ... ]
20
+ def historical_summary
21
+ @historical_summary_data
22
+ end
23
+
24
+ # @return [Boolean] true if historical test data exists; otherwise false
25
+ def history?
26
+ return true if @historical_summary_data.present?
27
+ false
28
+ end
29
+
30
+ def historical_graph
31
+ # g = Gruff::SideStackedBar.new('800x400')
32
+ g = Gruff::SideStackedBar.new
33
+ historical_graph_theme(g)
34
+ g.title = ''
35
+ historical_data = rework_historical_data_for_gruff
36
+ g.labels = historical_data[0]
37
+ g.data('Fail', historical_data[1])
38
+ g.data('Pass', historical_data[2])
39
+ g.write historical_graph_full_path
40
+ File.join(graph_relative_path, historical_graph_filename)
41
+ end
42
+
43
+ # @return [String] the name of the css style class to use for the status cell based on the status of the scenario test.
44
+ def status_style_class(status)
45
+ "status-#{status[:status]}"
46
+ end
47
+
48
+ # @return [String] the name of the css style class to use for the status cell based on the status of the scenario test.
49
+ def status_label(status)
50
+ case status[:status]
51
+ when :good
52
+ QaServer::ScenarioRunHistory::GOOD_MARKER
53
+ when :bad
54
+ QaServer::ScenarioRunHistory::BAD_MARKER
55
+ when :unknown
56
+ QaServer::ScenarioRunHistory::UNKNOWN_MARKER
57
+ end
58
+ end
59
+
60
+ def historical_data_authority_name(historical_entry)
61
+ historical_entry[HISTORICAL_AUTHORITY_NAME_IDX]
62
+ end
63
+
64
+ def days_authority_passing(historical_entry)
65
+ historical_entry[HISTORICAL_PASSING_COUNT_IDX]
66
+ end
67
+
68
+ def days_authority_failing(historical_entry)
69
+ historical_entry[HISTORICAL_FAILURE_COUNT_IDX]
70
+ end
71
+
72
+ def days_authority_tested(historical_entry)
73
+ days_authority_passing(historical_entry) + days_authority_failing(historical_entry)
74
+ end
75
+
76
+ def percent_authority_failing(historical_entry)
77
+ days_authority_failing(historical_entry).to_f / days_authority_tested(historical_entry)
78
+ end
79
+
80
+ def percent_authority_failing_str(historical_entry)
81
+ "#{percent_authority_failing(historical_entry) * 100}%"
82
+ end
83
+
84
+ def failure_style_class(historical_entry)
85
+ return "status-neutral" if days_authority_failing(historical_entry) <= 0
86
+ return "status-unknown" if percent_authority_failing(historical_entry) < 0.1
87
+ "status-bad"
88
+ end
89
+
90
+ def passing_style_class(historical_entry)
91
+ return "status-bad" if days_authority_passing(historical_entry) <= 0
92
+ "status-good"
93
+ end
94
+
95
+ def display_history_details?
96
+ display_historical_graph? || display_historical_datatable?
97
+ end
98
+
99
+ def display_historical_graph?
100
+ QaServer.config.display_historical_graph?
101
+ end
102
+
103
+ def display_historical_datatable?
104
+ QaServer.config.display_historical_datatable?
105
+ end
106
+
107
+ private
108
+
109
+ def historical_graph_theme(g)
110
+ g.theme_pastel
111
+ g.colors = ['#ffcccc', '#ccffcc']
112
+ g.marker_font_size = 12
113
+ g.x_axis_increment = 10
114
+ end
115
+
116
+ def historical_graph_full_path
117
+ graph_full_path(historical_graph_filename)
118
+ end
119
+
120
+ def historical_graph_filename
121
+ 'historical_side_stacked_bar.png'
122
+ end
123
+
124
+ def rework_historical_data_for_gruff
125
+ labels = {}
126
+ pass_data = []
127
+ fail_data = []
128
+ i = 0
129
+ historical_summary.each do |data|
130
+ labels[i] = data[0]
131
+ i += 1
132
+ fail_data << data[1]
133
+ pass_data << data[2]
134
+ end
135
+ [labels, fail_data, pass_data]
136
+ end
137
+ end
138
+ end