opentelemetry-metrics-sdk 0.11.1 → 0.11.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 0f62ae36ad71651af824a64c15bd3ffff2e32c08cd161655a013955736f97a95
|
|
4
|
+
data.tar.gz: 7c1a32329e65f2a96e5ee4c6715067cb488e2b5c88e2642d3668d2eddfbe9ce3
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 8286d7d68a62f7629e05fbfc7718743b583268a24882f390e60fdcdf584a27dc7aef7d487d209caafd179966c713969334c06fb2ad230f3865686627b62a88ef
|
|
7
|
+
data.tar.gz: e43fca566da12585f81251e896175a4b627af4403aa9d146d733ebce117cadf08836d1687d01acfe024c7f9ef06a395f3122ba080201249677eb9b154b9bedd9
|
data/CHANGELOG.md
CHANGED
|
@@ -9,6 +9,7 @@ require_relative 'exponential_histogram/log2e_scale_factor'
|
|
|
9
9
|
require_relative 'exponential_histogram/ieee_754'
|
|
10
10
|
require_relative 'exponential_histogram/logarithm_mapping'
|
|
11
11
|
require_relative 'exponential_histogram/exponent_mapping'
|
|
12
|
+
require_relative 'exponential_histogram_data_point'
|
|
12
13
|
|
|
13
14
|
module OpenTelemetry
|
|
14
15
|
module SDK
|
|
@@ -44,8 +45,20 @@ module OpenTelemetry
|
|
|
44
45
|
@scale = validate_scale(max_scale)
|
|
45
46
|
|
|
46
47
|
@mapping = new_mapping(@scale)
|
|
48
|
+
|
|
49
|
+
# Previous state for cumulative aggregation
|
|
50
|
+
@previous_positive = {} # nil
|
|
51
|
+
@previous_negative = {} # nil
|
|
52
|
+
@previous_min = {} # Float::INFINITY
|
|
53
|
+
@previous_max = {} # -Float::INFINITY
|
|
54
|
+
@previous_sum = {} # 0
|
|
55
|
+
@previous_count = {} # 0
|
|
56
|
+
@previous_zero_count = {} # 0
|
|
57
|
+
@previous_scale = {} # nil
|
|
47
58
|
end
|
|
48
59
|
|
|
60
|
+
# when aggregation temporality is cumulative, merge and downscale will happen.
|
|
61
|
+
# rubocop:disable Metrics/MethodLength
|
|
49
62
|
def collect(start_time, end_time, data_points)
|
|
50
63
|
if @aggregation_temporality.delta?
|
|
51
64
|
# Set timestamps and 'move' data point values to result.
|
|
@@ -57,18 +70,133 @@ module OpenTelemetry
|
|
|
57
70
|
data_points.clear
|
|
58
71
|
hdps
|
|
59
72
|
else
|
|
60
|
-
#
|
|
61
|
-
data_points
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
73
|
+
# CUMULATIVE temporality - merge current data_points to previous data_points
|
|
74
|
+
# and only keep the merged data_points in @previous_*
|
|
75
|
+
|
|
76
|
+
merged_data_points = {}
|
|
77
|
+
|
|
78
|
+
# this will slow down the operation especially if large amount of data_points present
|
|
79
|
+
# but it should be fine since with cumulative, the data_points are merged into previous_* and not kept in data_points
|
|
80
|
+
# rubocop:disable Metrics/BlockLength
|
|
81
|
+
data_points.each do |attributes, hdp|
|
|
82
|
+
# Store current values
|
|
83
|
+
current_positive = hdp.positive
|
|
84
|
+
current_negative = hdp.negative
|
|
85
|
+
current_sum = hdp.sum
|
|
86
|
+
current_min = hdp.min
|
|
87
|
+
current_max = hdp.max
|
|
88
|
+
current_count = hdp.count
|
|
89
|
+
current_zero_count = hdp.zero_count
|
|
90
|
+
current_scale = hdp.scale
|
|
91
|
+
|
|
92
|
+
# Setup previous positive, negative bucket and scale based on three different cases
|
|
93
|
+
@previous_positive[attributes] = current_positive.copy_empty if @previous_positive[attributes].nil?
|
|
94
|
+
@previous_negative[attributes] = current_negative.copy_empty if @previous_negative[attributes].nil?
|
|
95
|
+
@previous_scale[attributes] = current_scale if @previous_scale[attributes].nil?
|
|
96
|
+
|
|
97
|
+
# Determine minimum scale for merging
|
|
98
|
+
min_scale = [@previous_scale[attributes], current_scale].min
|
|
99
|
+
|
|
100
|
+
# Calculate ranges for positive and negative buckets
|
|
101
|
+
low_positive, high_positive = get_low_high_previous_current(
|
|
102
|
+
@previous_positive[attributes],
|
|
103
|
+
current_positive,
|
|
104
|
+
@previous_scale[attributes],
|
|
105
|
+
current_scale,
|
|
106
|
+
min_scale
|
|
107
|
+
)
|
|
108
|
+
low_negative, high_negative = get_low_high_previous_current(
|
|
109
|
+
@previous_negative[attributes],
|
|
110
|
+
current_negative,
|
|
111
|
+
@previous_scale[attributes],
|
|
112
|
+
current_scale,
|
|
113
|
+
min_scale
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Adjust min_scale based on bucket size constraints
|
|
117
|
+
min_scale = [
|
|
118
|
+
min_scale - get_scale_change(low_positive, high_positive),
|
|
119
|
+
min_scale - get_scale_change(low_negative, high_negative)
|
|
120
|
+
].min
|
|
121
|
+
|
|
122
|
+
# Downscale previous buckets if necessary
|
|
123
|
+
downscale_change = @previous_scale[attributes] - min_scale
|
|
124
|
+
downscale(downscale_change, @previous_positive[attributes], @previous_negative[attributes])
|
|
125
|
+
|
|
126
|
+
# Merge current buckets into previous buckets (kind like update); it's always :cumulative
|
|
127
|
+
merge_buckets(@previous_positive[attributes], current_positive, current_scale, min_scale, @aggregation_temporality)
|
|
128
|
+
merge_buckets(@previous_negative[attributes], current_negative, current_scale, min_scale, @aggregation_temporality)
|
|
129
|
+
|
|
130
|
+
# initialize min, max, sum, count, zero_count for first time
|
|
131
|
+
@previous_min[attributes] = Float::INFINITY if @previous_min[attributes].nil?
|
|
132
|
+
@previous_max[attributes] = -Float::INFINITY if @previous_max[attributes].nil?
|
|
133
|
+
@previous_sum[attributes] = 0 if @previous_sum[attributes].nil?
|
|
134
|
+
@previous_count[attributes] = 0 if @previous_count[attributes].nil?
|
|
135
|
+
@previous_zero_count[attributes] = 0 if @previous_zero_count[attributes].nil?
|
|
136
|
+
|
|
137
|
+
# Update aggregated values
|
|
138
|
+
@previous_min[attributes] = [@previous_min[attributes], current_min].min
|
|
139
|
+
@previous_max[attributes] = [@previous_max[attributes], current_max].max
|
|
140
|
+
@previous_sum[attributes] += current_sum
|
|
141
|
+
@previous_count[attributes] += current_count
|
|
142
|
+
@previous_zero_count[attributes] += current_zero_count
|
|
143
|
+
@previous_scale[attributes] = min_scale
|
|
144
|
+
|
|
145
|
+
# Create merged data point
|
|
146
|
+
merged_hdp = ExponentialHistogramDataPoint.new(
|
|
147
|
+
attributes,
|
|
148
|
+
start_time,
|
|
149
|
+
end_time,
|
|
150
|
+
@previous_count[attributes],
|
|
151
|
+
@previous_sum[attributes],
|
|
152
|
+
@previous_scale[attributes],
|
|
153
|
+
@previous_zero_count[attributes],
|
|
154
|
+
@previous_positive[attributes].dup,
|
|
155
|
+
@previous_negative[attributes].dup,
|
|
156
|
+
0, # flags
|
|
157
|
+
nil, # exemplars
|
|
158
|
+
@previous_min[attributes],
|
|
159
|
+
@previous_max[attributes],
|
|
160
|
+
@zero_threshold
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
merged_data_points[attributes] = merged_hdp
|
|
164
|
+
end
|
|
165
|
+
# rubocop:enable Metrics/BlockLength
|
|
166
|
+
|
|
167
|
+
# when you have no local_data_points, the loop from cumulative aggregation will not run
|
|
168
|
+
# so return last merged data points if exists
|
|
169
|
+
if data_points.empty? && !@previous_positive.empty?
|
|
170
|
+
@previous_positive.each_key do |attributes|
|
|
171
|
+
merged_hdp = ExponentialHistogramDataPoint.new(
|
|
172
|
+
attributes,
|
|
173
|
+
start_time,
|
|
174
|
+
end_time,
|
|
175
|
+
@previous_count[attributes],
|
|
176
|
+
@previous_sum[attributes],
|
|
177
|
+
@previous_scale[attributes],
|
|
178
|
+
@previous_zero_count[attributes],
|
|
179
|
+
@previous_positive[attributes].dup,
|
|
180
|
+
@previous_negative[attributes].dup,
|
|
181
|
+
0, # flags
|
|
182
|
+
nil, # exemplars
|
|
183
|
+
@previous_min[attributes],
|
|
184
|
+
@previous_max[attributes],
|
|
185
|
+
@zero_threshold
|
|
186
|
+
)
|
|
187
|
+
merged_data_points[attributes] = merged_hdp
|
|
188
|
+
end
|
|
68
189
|
end
|
|
190
|
+
|
|
191
|
+
# clear data_points since the data is merged into previous_* already;
|
|
192
|
+
# otherwise we will have duplicated data_points in the next collect
|
|
193
|
+
data_points.clear
|
|
194
|
+
merged_data_points.values # return array
|
|
69
195
|
end
|
|
70
196
|
end
|
|
197
|
+
# rubocop:enable Metrics/MethodLength
|
|
71
198
|
|
|
199
|
+
# this is aggregate in python; there is no merge in aggregate; but rescale happened
|
|
72
200
|
# rubocop:disable Metrics/MethodLength
|
|
73
201
|
def update(amount, attributes, data_points)
|
|
74
202
|
# fetch or initialize the ExponentialHistogramDataPoint
|
|
@@ -78,6 +206,7 @@ module OpenTelemetry
|
|
|
78
206
|
max = -Float::INFINITY
|
|
79
207
|
end
|
|
80
208
|
|
|
209
|
+
# this code block will only be executed if no data_points was found with the attributes
|
|
81
210
|
data_points[attributes] = ExponentialHistogramDataPoint.new(
|
|
82
211
|
attributes,
|
|
83
212
|
nil, # :start_time_unix_nano
|
|
@@ -203,7 +332,8 @@ module OpenTelemetry
|
|
|
203
332
|
end
|
|
204
333
|
|
|
205
334
|
def downscale(change, positive, negative)
|
|
206
|
-
return if change
|
|
335
|
+
return if change == 0
|
|
336
|
+
raise ArgumentError, 'Invalid change of scale' if change < 0
|
|
207
337
|
|
|
208
338
|
positive.downscale(change)
|
|
209
339
|
negative.downscale(change)
|
|
@@ -217,11 +347,76 @@ module OpenTelemetry
|
|
|
217
347
|
end
|
|
218
348
|
|
|
219
349
|
def validate_size(size)
|
|
220
|
-
raise ArgumentError, "
|
|
221
|
-
raise ArgumentError, "
|
|
350
|
+
raise ArgumentError, "Buckets min size #{size} is smaller than minimum min size #{MIN_MAX_SIZE}" if size < MIN_MAX_SIZE
|
|
351
|
+
raise ArgumentError, "Buckets max size #{size} is larger than maximum max size #{MAX_MAX_SIZE}" if size > MAX_MAX_SIZE
|
|
222
352
|
|
|
223
353
|
size
|
|
224
354
|
end
|
|
355
|
+
|
|
356
|
+
# checked, only issue is if @previous_scale is nil, then get_low_high may throw error
|
|
357
|
+
def get_low_high_previous_current(previous_buckets, current_buckets, previous_scale, current_scale, min_scale)
|
|
358
|
+
previous_low, previous_high = get_low_high(previous_buckets, previous_scale, min_scale)
|
|
359
|
+
current_low, current_high = get_low_high(current_buckets, current_scale, min_scale)
|
|
360
|
+
|
|
361
|
+
if current_low > current_high
|
|
362
|
+
[previous_low, previous_high]
|
|
363
|
+
elsif previous_low > previous_high
|
|
364
|
+
[current_low, current_high]
|
|
365
|
+
else
|
|
366
|
+
[[previous_low, current_low].min, [previous_high, current_high].max]
|
|
367
|
+
end
|
|
368
|
+
end
|
|
369
|
+
|
|
370
|
+
def get_low_high(buckets, scale, min_scale)
|
|
371
|
+
return [0, -1] if buckets.nil? || buckets.counts == [0] || buckets.counts.empty?
|
|
372
|
+
|
|
373
|
+
shift = scale - min_scale
|
|
374
|
+
[buckets.index_start >> shift, buckets.index_end >> shift]
|
|
375
|
+
end
|
|
376
|
+
|
|
377
|
+
def merge_buckets(previous_buckets, current_buckets, current_scale, min_scale, aggregation_temporality)
|
|
378
|
+
return unless current_buckets && !current_buckets.counts.empty?
|
|
379
|
+
|
|
380
|
+
current_change = current_scale - min_scale
|
|
381
|
+
|
|
382
|
+
# when we iterate counts, we don't use offset counts
|
|
383
|
+
current_buckets.instance_variable_get(:@counts).each_with_index do |current_bucket, current_bucket_index|
|
|
384
|
+
next if current_bucket == 0
|
|
385
|
+
|
|
386
|
+
current_index = current_buckets.index_base + current_bucket_index
|
|
387
|
+
current_index -= current_buckets.counts.size if current_index > current_buckets.index_end
|
|
388
|
+
|
|
389
|
+
inds = current_index >> current_change
|
|
390
|
+
|
|
391
|
+
# Grow previous buckets if needed to accommodate the new index
|
|
392
|
+
if inds < previous_buckets.index_start
|
|
393
|
+
span = previous_buckets.index_end - inds
|
|
394
|
+
|
|
395
|
+
raise StandardError, 'Incorrect merge scale' if span >= @size
|
|
396
|
+
|
|
397
|
+
previous_buckets.grow(span + 1, @size) if span >= previous_buckets.counts.size
|
|
398
|
+
|
|
399
|
+
previous_buckets.index_start = inds
|
|
400
|
+
end
|
|
401
|
+
|
|
402
|
+
if inds > previous_buckets.index_end
|
|
403
|
+
span = inds - previous_buckets.index_start
|
|
404
|
+
|
|
405
|
+
raise StandardError, 'Incorrect merge scale' if span >= @size
|
|
406
|
+
|
|
407
|
+
previous_buckets.grow(span + 1, @size) if span >= previous_buckets.counts.size
|
|
408
|
+
|
|
409
|
+
previous_buckets.index_end = inds
|
|
410
|
+
end
|
|
411
|
+
|
|
412
|
+
bucket_index = inds - previous_buckets.index_base
|
|
413
|
+
bucket_index += previous_buckets.counts.size if bucket_index < 0
|
|
414
|
+
|
|
415
|
+
# For delta temporality in merge, we subtract (this shouldn't normally happen in our use case)
|
|
416
|
+
increment = aggregation_temporality == :delta ? -current_bucket : current_bucket
|
|
417
|
+
previous_buckets.increment_bucket(bucket_index, increment)
|
|
418
|
+
end
|
|
419
|
+
end
|
|
225
420
|
end
|
|
226
421
|
end
|
|
227
422
|
end
|
|
@@ -27,7 +27,6 @@ module OpenTelemetry
|
|
|
27
27
|
old_positive_limit = size - bias
|
|
28
28
|
|
|
29
29
|
new_size = [2**Math.log2(needed).ceil, max_size].min
|
|
30
|
-
|
|
31
30
|
new_positive_limit = new_size - bias
|
|
32
31
|
|
|
33
32
|
tmp = Array.new(new_size, 0)
|
|
@@ -105,6 +104,15 @@ module OpenTelemetry
|
|
|
105
104
|
def increment_bucket(bucket_index, increment = 1)
|
|
106
105
|
@counts[bucket_index] += increment
|
|
107
106
|
end
|
|
107
|
+
|
|
108
|
+
def copy_empty
|
|
109
|
+
new_buckets = self.class.new
|
|
110
|
+
new_buckets.instance_variable_set(:@counts, Array.new(@counts.size, 0))
|
|
111
|
+
new_buckets.instance_variable_set(:@index_base, @index_base)
|
|
112
|
+
new_buckets.instance_variable_set(:@index_start, @index_start)
|
|
113
|
+
new_buckets.instance_variable_set(:@index_end, @index_end)
|
|
114
|
+
new_buckets
|
|
115
|
+
end
|
|
108
116
|
end
|
|
109
117
|
end
|
|
110
118
|
end
|
metadata
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: opentelemetry-metrics-sdk
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.11.
|
|
4
|
+
version: 0.11.2
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- OpenTelemetry Authors
|
|
8
8
|
autorequire:
|
|
9
9
|
bindir: bin
|
|
10
10
|
cert_chain: []
|
|
11
|
-
date: 2025-
|
|
11
|
+
date: 2025-12-03 00:00:00.000000000 Z
|
|
12
12
|
dependencies:
|
|
13
13
|
- !ruby/object:Gem::Dependency
|
|
14
14
|
name: opentelemetry-api
|
|
@@ -251,10 +251,10 @@ homepage: https://github.com/open-telemetry/opentelemetry-ruby
|
|
|
251
251
|
licenses:
|
|
252
252
|
- Apache-2.0
|
|
253
253
|
metadata:
|
|
254
|
-
changelog_uri: https://open-telemetry.github.io/opentelemetry-ruby/opentelemetry-metrics-sdk/v0.11.
|
|
254
|
+
changelog_uri: https://open-telemetry.github.io/opentelemetry-ruby/opentelemetry-metrics-sdk/v0.11.2/file.CHANGELOG.html
|
|
255
255
|
source_code_uri: https://github.com/open-telemetry/opentelemetry-ruby/tree/main/metrics_sdk
|
|
256
256
|
bug_tracker_uri: https://github.com/open-telemetry/opentelemetry-ruby/issues
|
|
257
|
-
documentation_uri: https://open-telemetry.github.io/opentelemetry-ruby/opentelemetry-metrics-sdk/v0.11.
|
|
257
|
+
documentation_uri: https://open-telemetry.github.io/opentelemetry-ruby/opentelemetry-metrics-sdk/v0.11.2
|
|
258
258
|
post_install_message:
|
|
259
259
|
rdoc_options: []
|
|
260
260
|
require_paths:
|