ezmetrics 1.2.2 → 2.0.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 408332cfc86d05b75d8b6004098d736ad60a624189f46022af822fdf6c24960d
4
- data.tar.gz: 6d4ef89fa6a80e28cb443e16f1528b351a93bf7bf50e2a8fc756dc943a25f11f
3
+ metadata.gz: 16333a0b9644e92d3f3648530e2217d6cc76eb2c427a63cf315d254f2e081cb6
4
+ data.tar.gz: 5dc280c1ec8594856addced752296731f9dfdc9ad7bae84625541d959f50f2d7
5
5
  SHA512:
6
- metadata.gz: 99a4ac48560305d2967cdcc8b248978ee3f0748ab1fb7da2830a3804d732d29664f25bfdc4bd2bfa50c449994913ecd455362f57af1f39f0f45e904e36876c12
7
- data.tar.gz: c53617fbc324765b9b388b1280b6d363a3707b002a2718cd0467a16ff24708a8adbf81a812aca111a643924de4834a155efdef252911195570837fdb8f6a9dce
6
+ metadata.gz: 48c142669ed79439784f8ed4c12d658169a51b125c2dbfd9e1324271d388fc69df3fe6c960b73970898ab088ca000335a405877c44722f717c38b2169a7c0bc4
7
+ data.tar.gz: c2503cd1900b0723db05e72dae3ee3c7633e46dabde7083f218fae8e0c3354c78edf1ab549b7e308d356017b171dc24da540d93fa4fe1b067f948de8df02e521
data/README.md CHANGED
@@ -10,6 +10,16 @@ Simple, lightweight and fast metrics aggregation for Rails.
10
10
  gem 'ezmetrics'
11
11
  ```
12
12
 
13
+ ## Available metrics
14
+
15
+ | Type | Aggregate functions |
16
+ |:----------:|:---------------------------------:|
17
+ | `duration` | `avg`, `max`, `percentile` |
18
+ | `views` | `avg`, `max`, `percentile` |
19
+ | `db` | `avg`, `max`, `percentile` |
20
+ | `queries` | `avg`, `max`, `percentile` |
21
+ | `requests` | `all`, `2xx`, `3xx`, `4xx`, `5xx` |
22
+
13
23
  ## Usage
14
24
 
15
25
  ### Getting started
@@ -258,6 +268,48 @@ EZmetrics.new.show(views: :avg, :db: [:avg, :max], requests: true)
258
268
  }
259
269
  ```
260
270
 
271
+ ---
272
+
273
+ **5. Percentile**
274
+
275
+ This feature is available since version `2.0.0`.
276
+
277
+ By default percentile aggregation is turned off because it requires to store each value of all metrics.
278
+
279
+ To enable this feature - you need to set `store_each_value: true` when saving the metrics:
280
+
281
+ ```ruby
282
+ EZmetrics.new.log(
283
+ duration: 100.5,
284
+ views: 40.7,
285
+ db: 59.8,
286
+ queries: 4,
287
+ status: 200,
288
+ store_each_value: true
289
+ )
290
+ ```
291
+
292
+ The aggregation syntax has the following format `metrics_type: :percentile_{number}` where `number` is any integer in the 1..99 range.
293
+
294
+
295
+ ```ruby
296
+ EZmetrics.new.show(db: [:avg, :percentile_90, :percentile_95], duration: :percentile_99)
297
+ ```
298
+
299
+ ```ruby
300
+ {
301
+ db: {
302
+ avg: 155,
303
+ percentile_90: 205,
304
+ percentile_95: 215
305
+ },
306
+ duration: {
307
+ percentile_99: 236
308
+ }
309
+ }
310
+ ```
311
+
312
+
261
313
  ### Partitioning
262
314
 
263
315
  If you want to visualize your metrics by using a **line chart**, you will need to use partitioning.
@@ -317,6 +369,8 @@ Available time units for partitioning: `second`, `minute`, `hour`, `day`. Defaul
317
369
 
318
370
  The aggregation speed relies on the performance of **Redis** (data storage) and **Oj** (json serialization/parsing).
319
371
 
372
+ #### Simple aggregation
373
+
320
374
  You can check the **aggregation** time by running:
321
375
 
322
376
  ```ruby
@@ -326,10 +380,10 @@ EZmetrics::Benchmark.new.measure_aggregation
326
380
  | Interval | Duration (seconds) |
327
381
  | :------: | :----------------: |
328
382
  | 1 minute | 0.0 |
329
- | 1 hour | 0.04 |
330
- | 12 hours | 0.49 |
331
- | 24 hours | 1.51 |
332
- | 48 hours | 3.48 |
383
+ | 1 hour | 0.02 |
384
+ | 12 hours | 0.22 |
385
+ | 24 hours | 0.61 |
386
+ | 48 hours | 1.42 |
333
387
 
334
388
  ---
335
389
 
@@ -342,9 +396,44 @@ EZmetrics::Benchmark.new.measure_aggregation(:minute)
342
396
  | Interval | Duration (seconds) |
343
397
  | :------: | :----------------: |
344
398
  | 1 minute | 0.0 |
345
- | 1 hour | 0.04 |
346
- | 12 hours | 0.53 |
347
- | 24 hours | 1.59 |
348
- | 48 hours | 3.51 |
399
+ | 1 hour | 0.02 |
400
+ | 12 hours | 0.25 |
401
+ | 24 hours | 0.78 |
402
+ | 48 hours | 1.75 |
403
+
404
+ ---
405
+
406
+ #### Percentile aggregation
407
+
408
+ You can check the **percentile aggregation** time by running:
409
+
410
+ ```ruby
411
+ EZmetrics::Benchmark.new(true).measure_aggregation
412
+ ```
413
+
414
+ | Interval | Duration (seconds) |
415
+ | :------: | :----------------: |
416
+ | 1 minute | 0.0 |
417
+ | 1 hour | 0.14 |
418
+ | 12 hours | 2.11 |
419
+ | 24 hours | 5.85 |
420
+ | 48 hours | 14.1 |
421
+
422
+ ---
423
+
424
+ To check the **partitioned aggregation** time for percentile you need to run:
425
+
426
+ ```ruby
427
+ EZmetrics::Benchmark.new(true).measure_aggregation(:minute)
428
+ ```
429
+
430
+ | Interval | Duration (seconds) |
431
+ | :------: | :----------------: |
432
+ | 1 minute | 0.0 |
433
+ | 1 hour | 0.16 |
434
+ | 12 hours | 1.97 |
435
+ | 24 hours | 5.85 |
436
+ | 48 hours | 13.9 |
437
+
349
438
 
350
439
  The benchmarks above were run on a _2017 Macbook Pro 2.9 GHz Intel Core i7 with 16 GB of RAM_
data/lib/ezmetrics.rb CHANGED
@@ -9,16 +9,18 @@ class EZmetrics
9
9
 
10
10
  def initialize(interval_seconds=60)
11
11
  @interval_seconds = interval_seconds.to_i
12
- @redis = Redis.new
12
+ @redis = Redis.new(driver: :hiredis)
13
+ @schema = redis_schema
13
14
  end
14
15
 
15
- def log(payload={duration: 0.0, views: 0.0, db: 0.0, queries: 0, status: 200})
16
+ def log(payload={duration: 0.0, views: 0.0, db: 0.0, queries: 0, status: 200, store_each_value: false})
16
17
  @safe_payload = {
17
- duration: payload[:duration].to_f,
18
- views: payload[:views].to_f,
19
- db: payload[:db].to_f,
20
- queries: payload[:queries].to_i,
21
- status: payload[:status].to_i
18
+ duration: payload[:duration].to_f,
19
+ views: payload[:views].to_f,
20
+ db: payload[:db].to_f,
21
+ queries: payload[:queries].to_i,
22
+ status: payload[:status].to_i,
23
+ store_each_value: payload[:store_each_value].to_s == "true"
22
24
  }
23
25
 
24
26
  this_second = Time.now.to_i
@@ -31,10 +33,11 @@ class EZmetrics
31
33
  METRICS.each do |metrics_type|
32
34
  update_sum(metrics_type)
33
35
  update_max(metrics_type)
36
+ store_value(metrics_type) if safe_payload[:store_each_value]
34
37
  end
35
38
 
36
- this_second_metrics["statuses"]["all"] += 1
37
- this_second_metrics["statuses"][status_group] += 1
39
+ this_second_metrics[schema["all"]] += 1
40
+ this_second_metrics[schema[status_group]] += 1
38
41
  else
39
42
  @this_second_metrics = {
40
43
  "second" => this_second,
@@ -46,10 +49,25 @@ class EZmetrics
46
49
  "db_max" => safe_payload[:db],
47
50
  "queries_sum" => safe_payload[:queries],
48
51
  "queries_max" => safe_payload[:queries],
49
- "statuses" => { "2xx" => 0, "3xx" => 0, "4xx" => 0, "5xx" => 0, "all" => 1 }
52
+ "2xx" => 0,
53
+ "3xx" => 0,
54
+ "4xx" => 0,
55
+ "5xx" => 0,
56
+ "all" => 1
50
57
  }
51
58
 
52
- this_second_metrics["statuses"][status_group] = 1
59
+ if safe_payload[:store_each_value]
60
+ this_second_metrics.merge!(
61
+ "duration_values" => [safe_payload[:duration]],
62
+ "views_values" => [safe_payload[:views]],
63
+ "db_values" => [safe_payload[:db]],
64
+ "queries_values" => [safe_payload[:queries]]
65
+ )
66
+ end
67
+
68
+ this_second_metrics[status_group] = 1
69
+
70
+ @this_second_metrics = this_second_metrics.values
53
71
  end
54
72
 
55
73
  redis.setex(this_second, interval_seconds, Oj.dump(this_second_metrics))
@@ -70,18 +88,18 @@ class EZmetrics
70
88
 
71
89
  def partition_by(time_unit=:minute)
72
90
  time_unit = PARTITION_UNITS.include?(time_unit) ? time_unit : :minute
73
- @partitioned_metrics = interval_metrics.group_by { |h| second_to_partition_unit(time_unit, h["second"]) }
91
+ @partitioned_metrics = interval_metrics.group_by { |array| second_to_partition_unit(time_unit, array[schema["second"]]) }
74
92
  self
75
93
  end
76
94
 
77
95
  private
78
96
 
79
- attr_reader :redis, :interval_seconds, :interval_metrics, :requests, :flat,
97
+ attr_reader :redis, :interval_seconds, :interval_metrics, :requests, :flat, :schema,
80
98
  :storage_key, :safe_payload, :this_second_metrics, :partitioned_metrics, :options
81
99
 
82
100
  def aggregate_data
83
101
  return {} unless interval_metrics.any?
84
- @requests = interval_metrics.sum { |hash| hash["statuses"]["all"] }
102
+ @requests = interval_metrics.sum { |array| array[schema["all"]] }
85
103
  build_result
86
104
  rescue
87
105
  {}
@@ -90,11 +108,12 @@ class EZmetrics
90
108
  def aggregate_partitioned_data
91
109
  partitioned_metrics.map do |partition, metrics|
92
110
  @interval_metrics = metrics
93
- @requests = interval_metrics.sum { |hash| hash["statuses"]["all"] }
111
+ @requests = interval_metrics.sum { |array| array[schema["all"]] }
112
+ METRICS.each { |metrics_type| instance_variable_set("@sorted_#{metrics_type}_values", nil) }
94
113
  flat ? { timestamp: partition, **build_result } : { timestamp: partition, data: build_result }
95
114
  end
96
115
  rescue
97
- new(options)
116
+ self
98
117
  end
99
118
 
100
119
  def build_result
@@ -147,39 +166,101 @@ class EZmetrics
147
166
  @interval_metrics ||= begin
148
167
  interval_start = Time.now.to_i - interval_seconds
149
168
  interval_keys = (interval_start..Time.now.to_i).to_a
150
- redis.mget(interval_keys).compact.map { |hash| Oj.load(hash) }
169
+ redis.mget(interval_keys).compact.map { |array| Oj.load(array) }
151
170
  end
152
171
  end
153
172
 
154
173
  def aggregate(metrics, aggregation_function)
155
- return unless AGGREGATION_FUNCTIONS.include?(aggregation_function)
156
174
  return avg("#{metrics}_sum") if aggregation_function == :avg
157
175
  return max("#{metrics}_max") if aggregation_function == :max
176
+
177
+ percentile = aggregation_function.match(/percentile_(?<value>\d+)/)
178
+
179
+ if percentile && percentile["value"]
180
+ sorted_values = send("sorted_#{metrics}_values")
181
+ percentile(sorted_values, percentile["value"].to_i)&.round
182
+ end
183
+ end
184
+
185
+ METRICS.each do |metrics|
186
+ define_method "sorted_#{metrics}_values" do
187
+ instance_variable_get("@sorted_#{metrics}_values") || instance_variable_set(
188
+ "@sorted_#{metrics}_values", interval_metrics.map { |array| array[schema["#{metrics}_values"]] }.flatten.compact
189
+ )
190
+ end
191
+ end
192
+
193
+ def redis_schema
194
+ [
195
+ "second",
196
+ "duration_sum",
197
+ "duration_max",
198
+ "views_sum",
199
+ "views_max",
200
+ "db_sum",
201
+ "db_max",
202
+ "queries_sum",
203
+ "queries_max",
204
+ "2xx",
205
+ "3xx",
206
+ "4xx",
207
+ "5xx",
208
+ "all",
209
+ "duration_values",
210
+ "views_values",
211
+ "db_values",
212
+ "queries_values"
213
+ ].each_with_index.inject({}){ |result, pair| result[pair[0]] = pair[1] ; result }
158
214
  end
159
215
 
160
216
  def update_sum(metrics)
161
- this_second_metrics["#{metrics}_sum"] += safe_payload[metrics]
217
+ this_second_metrics[schema["#{metrics}_sum"]] += safe_payload[metrics]
218
+ end
219
+
220
+ def store_value(metrics)
221
+ this_second_metrics[schema["#{metrics}_values"]] << safe_payload[metrics]
162
222
  end
163
223
 
164
224
  def update_max(metrics)
165
- max_value = [safe_payload[metrics], this_second_metrics["#{metrics}_max"]].max
166
- this_second_metrics["#{metrics}_max"] = max_value
225
+ max_value = [safe_payload[metrics], this_second_metrics[schema["#{metrics}_max"]]].max
226
+ this_second_metrics[schema["#{metrics}_max"]] = max_value
167
227
  end
168
228
 
169
229
  def avg(metrics)
170
- (interval_metrics.sum { |h| h[metrics] }.to_f / requests).round
230
+ (interval_metrics.sum { |array| array[schema[metrics]] }.to_f / requests).round
171
231
  end
172
232
 
173
233
  def max(metrics)
174
- interval_metrics.max { |h| h[metrics] }[metrics].round
234
+ interval_metrics.max { |array| array[schema[metrics]] }[schema[metrics]].round
235
+ end
236
+
237
+ def percentile(array, pcnt)
238
+ sorted_array = array.sort
239
+
240
+ return nil if array.length == 0
241
+
242
+ rank = (pcnt.to_f / 100) * (array.length + 1)
243
+ whole = rank.truncate
244
+
245
+ # if has fractional part
246
+ if whole != rank
247
+ s0 = sorted_array[whole - 1]
248
+ s1 = sorted_array[whole]
249
+
250
+ f = (rank - rank.truncate).abs
251
+
252
+ return (f * (s1 - s0)) + s0
253
+ else
254
+ return sorted_array[whole - 1]
255
+ end
175
256
  end
176
257
 
177
258
  def count_all_status_groups
178
- interval_metrics.inject({ "2xx" => 0, "3xx" => 0, "4xx" => 0, "5xx" => 0 }) do |result, h|
179
- result["2xx"] += h["statuses"]["2xx"]
180
- result["3xx"] += h["statuses"]["3xx"]
181
- result["4xx"] += h["statuses"]["4xx"]
182
- result["5xx"] += h["statuses"]["5xx"]
259
+ interval_metrics.inject({ "2xx" => 0, "3xx" => 0, "4xx" => 0, "5xx" => 0 }) do |result, array|
260
+ result["2xx"] += array[schema["2xx"]]
261
+ result["3xx"] += array[schema["3xx"]]
262
+ result["4xx"] += array[schema["4xx"]]
263
+ result["5xx"] += array[schema["5xx"]]
183
264
  result
184
265
  end
185
266
  end
@@ -2,12 +2,13 @@ require "benchmark"
2
2
 
3
3
  class EZmetrics::Benchmark
4
4
 
5
- def initialize
6
- @start = Time.now.to_i
7
- @redis = Redis.new
8
- @durations = []
9
- @iterations = 3
10
- @intervals = {
5
+ def initialize(store_each_value=false)
6
+ @store_each_value = store_each_value
7
+ @start = Time.now.to_i
8
+ @redis = Redis.new(driver: :hiredis)
9
+ @durations = []
10
+ @iterations = 1
11
+ @intervals = {
11
12
  "1.minute" => 60,
12
13
  "1.hour " => 3600,
13
14
  "12.hours" => 43200,
@@ -29,31 +30,38 @@ class EZmetrics::Benchmark
29
30
 
30
31
  private
31
32
 
32
- attr_reader :start, :redis, :durations, :intervals, :iterations
33
+ attr_reader :start, :redis, :durations, :intervals, :iterations, :store_each_value
33
34
 
34
35
  def write_metrics
35
36
  seconds = intervals.values.max
36
37
  seconds.times do |i|
37
38
  second = start - i
38
39
  payload = {
39
- "second" => second,
40
- "duration_sum" => rand(10000),
41
- "duration_max" => rand(10000),
42
- "views_sum" => rand(1000),
43
- "views_max" => rand(1000),
44
- "db_sum" => rand(8000),
45
- "db_max" => rand(8000),
46
- "queries_sum" => rand(100),
47
- "queries_max" => rand(100),
48
- "statuses" => {
49
- "2xx" => rand(1..10),
50
- "3xx" => rand(1..10),
51
- "4xx" => rand(1..10),
52
- "5xx" => rand(1..10),
53
- "all" => rand(1..40)
54
- }
40
+ "second" => second,
41
+ "duration_sum" => rand(10000),
42
+ "duration_max" => rand(10000),
43
+ "views_sum" => rand(1000),
44
+ "views_max" => rand(1000),
45
+ "db_sum" => rand(8000),
46
+ "db_max" => rand(8000),
47
+ "queries_sum" => rand(100),
48
+ "queries_max" => rand(100),
49
+ "2xx" => rand(1..10),
50
+ "3xx" => rand(1..10),
51
+ "4xx" => rand(1..10),
52
+ "5xx" => rand(1..10),
53
+ "all" => rand(1..40)
55
54
  }
56
- redis.setex(second, seconds, Oj.dump(payload))
55
+
56
+ if store_each_value
57
+ payload.merge!(
58
+ "duration_values" => Array.new(100) { rand(10..60000) },
59
+ "views_values" => Array.new(100) { rand(10..60000) },
60
+ "db_values" => Array.new(100) { rand(10..60000) },
61
+ "queries_values" => Array.new(10) { rand(1..60) }
62
+ )
63
+ end
64
+ redis.setex(second, seconds, Oj.dump(payload.values))
57
65
  end
58
66
  nil
59
67
  end
@@ -67,10 +75,11 @@ class EZmetrics::Benchmark
67
75
  def measure_aggregation_time(interval, seconds, partition_by)
68
76
  iterations.times do
69
77
  durations << ::Benchmark.measure do
70
- if partition_by
71
- EZmetrics.new(seconds).partition_by(partition_by).show
78
+ ezmetrics = EZmetrics.new(seconds)
79
+ if store_each_value
80
+ partition_by ? ezmetrics.partition_by(partition_by).show(db: :percentile_90) : ezmetrics.show(db: :percentile_90)
72
81
  else
73
- EZmetrics.new(seconds).show
82
+ partition_by ? ezmetrics.partition_by(partition_by).show : ezmetrics.show
74
83
  end
75
84
  end.real
76
85
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ezmetrics
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.2
4
+ version: 2.0.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Nicolae Rotaru