opentelemetry-metrics-sdk 0.11.0 → 0.11.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ac17345b1f4d2a296aa95591e64264cd7bc03e94b5b6cac35ff0a0067b8a457f
4
- data.tar.gz: d22cdd5f1c3f5f0d9745e330874d47c232bb451a986a4304f003e796a598abf7
3
+ metadata.gz: 0f62ae36ad71651af824a64c15bd3ffff2e32c08cd161655a013955736f97a95
4
+ data.tar.gz: 7c1a32329e65f2a96e5ee4c6715067cb488e2b5c88e2642d3668d2eddfbe9ce3
5
5
  SHA512:
6
- metadata.gz: ec0286d37a8a6143b239931c7f68853b5b9d5053ca00da5edfefb665713aef4522995f3ed082674e63c026e99883ef7a59ac7a4dc2b0803d8c17d4a11e4ee58e
7
- data.tar.gz: 1ac16f88eec4c62c7efeb494333e2c63d1f55f8d90e027ea9750d76dc7fe72c503fa03c9fa24fc91543e041df2957daf977fe45a7b072e9bdfbe4408312cabf0
6
+ metadata.gz: 8286d7d68a62f7629e05fbfc7718743b583268a24882f390e60fdcdf584a27dc7aef7d487d209caafd179966c713969334c06fb2ad230f3865686627b62a88ef
7
+ data.tar.gz: e43fca566da12585f81251e896175a4b627af4403aa9d146d733ebce117cadf08836d1687d01acfe024c7f9ef06a395f3122ba080201249677eb9b154b9bedd9
data/CHANGELOG.md CHANGED
@@ -1,5 +1,13 @@
1
1
  # Release History: opentelemetry-metrics-sdk
2
2
 
3
+ ### v0.11.2 / 2025-12-02
4
+
5
+ * FIXED: Add merge logic for exponential histogram when the temporality cumulative
6
+
7
+ ### v0.11.1 / 2025-11-04
8
+
9
+ * FIXED: Do not log error when there are no metrics to export
10
+
3
11
  ### v0.11.0 / 2025-10-28
4
12
 
5
13
  * ADDED: Add logging about export status to Metrics SDK
@@ -9,6 +9,7 @@ require_relative 'exponential_histogram/log2e_scale_factor'
9
9
  require_relative 'exponential_histogram/ieee_754'
10
10
  require_relative 'exponential_histogram/logarithm_mapping'
11
11
  require_relative 'exponential_histogram/exponent_mapping'
12
+ require_relative 'exponential_histogram_data_point'
12
13
 
13
14
  module OpenTelemetry
14
15
  module SDK
@@ -44,8 +45,20 @@ module OpenTelemetry
44
45
  @scale = validate_scale(max_scale)
45
46
 
46
47
  @mapping = new_mapping(@scale)
48
+
49
+ # Previous state for cumulative aggregation
50
+ @previous_positive = {} # nil
51
+ @previous_negative = {} # nil
52
+ @previous_min = {} # Float::INFINITY
53
+ @previous_max = {} # -Float::INFINITY
54
+ @previous_sum = {} # 0
55
+ @previous_count = {} # 0
56
+ @previous_zero_count = {} # 0
57
+ @previous_scale = {} # nil
47
58
  end
48
59
 
60
+ # when aggregation temporality is cumulative, merge and downscale will happen.
61
+ # rubocop:disable Metrics/MethodLength
49
62
  def collect(start_time, end_time, data_points)
50
63
  if @aggregation_temporality.delta?
51
64
  # Set timestamps and 'move' data point values to result.
@@ -57,18 +70,133 @@ module OpenTelemetry
57
70
  data_points.clear
58
71
  hdps
59
72
  else
60
- # Update timestamps and take a snapshot.
61
- data_points.values.map! do |hdp|
62
- hdp.start_time_unix_nano ||= start_time # Start time of a data point is from the first observation.
63
- hdp.time_unix_nano = end_time
64
- hdp = hdp.dup
65
- hdp.positive = hdp.positive.dup
66
- hdp.negative = hdp.negative.dup
67
- hdp
73
+ # CUMULATIVE temporality - merge current data_points to previous data_points
74
+ # and only keep the merged data_points in @previous_*
75
+
76
+ merged_data_points = {}
77
+
78
+ # this will slow down the operation especially if large amount of data_points present
79
+ # but it should be fine since with cumulative, the data_points are merged into previous_* and not kept in data_points
80
+ # rubocop:disable Metrics/BlockLength
81
+ data_points.each do |attributes, hdp|
82
+ # Store current values
83
+ current_positive = hdp.positive
84
+ current_negative = hdp.negative
85
+ current_sum = hdp.sum
86
+ current_min = hdp.min
87
+ current_max = hdp.max
88
+ current_count = hdp.count
89
+ current_zero_count = hdp.zero_count
90
+ current_scale = hdp.scale
91
+
92
+ # Setup previous positive, negative bucket and scale based on three different cases
93
+ @previous_positive[attributes] = current_positive.copy_empty if @previous_positive[attributes].nil?
94
+ @previous_negative[attributes] = current_negative.copy_empty if @previous_negative[attributes].nil?
95
+ @previous_scale[attributes] = current_scale if @previous_scale[attributes].nil?
96
+
97
+ # Determine minimum scale for merging
98
+ min_scale = [@previous_scale[attributes], current_scale].min
99
+
100
+ # Calculate ranges for positive and negative buckets
101
+ low_positive, high_positive = get_low_high_previous_current(
102
+ @previous_positive[attributes],
103
+ current_positive,
104
+ @previous_scale[attributes],
105
+ current_scale,
106
+ min_scale
107
+ )
108
+ low_negative, high_negative = get_low_high_previous_current(
109
+ @previous_negative[attributes],
110
+ current_negative,
111
+ @previous_scale[attributes],
112
+ current_scale,
113
+ min_scale
114
+ )
115
+
116
+ # Adjust min_scale based on bucket size constraints
117
+ min_scale = [
118
+ min_scale - get_scale_change(low_positive, high_positive),
119
+ min_scale - get_scale_change(low_negative, high_negative)
120
+ ].min
121
+
122
+ # Downscale previous buckets if necessary
123
+ downscale_change = @previous_scale[attributes] - min_scale
124
+ downscale(downscale_change, @previous_positive[attributes], @previous_negative[attributes])
125
+
126
+ # Merge current buckets into previous buckets (kind like update); it's always :cumulative
127
+ merge_buckets(@previous_positive[attributes], current_positive, current_scale, min_scale, @aggregation_temporality)
128
+ merge_buckets(@previous_negative[attributes], current_negative, current_scale, min_scale, @aggregation_temporality)
129
+
130
+ # initialize min, max, sum, count, zero_count for first time
131
+ @previous_min[attributes] = Float::INFINITY if @previous_min[attributes].nil?
132
+ @previous_max[attributes] = -Float::INFINITY if @previous_max[attributes].nil?
133
+ @previous_sum[attributes] = 0 if @previous_sum[attributes].nil?
134
+ @previous_count[attributes] = 0 if @previous_count[attributes].nil?
135
+ @previous_zero_count[attributes] = 0 if @previous_zero_count[attributes].nil?
136
+
137
+ # Update aggregated values
138
+ @previous_min[attributes] = [@previous_min[attributes], current_min].min
139
+ @previous_max[attributes] = [@previous_max[attributes], current_max].max
140
+ @previous_sum[attributes] += current_sum
141
+ @previous_count[attributes] += current_count
142
+ @previous_zero_count[attributes] += current_zero_count
143
+ @previous_scale[attributes] = min_scale
144
+
145
+ # Create merged data point
146
+ merged_hdp = ExponentialHistogramDataPoint.new(
147
+ attributes,
148
+ start_time,
149
+ end_time,
150
+ @previous_count[attributes],
151
+ @previous_sum[attributes],
152
+ @previous_scale[attributes],
153
+ @previous_zero_count[attributes],
154
+ @previous_positive[attributes].dup,
155
+ @previous_negative[attributes].dup,
156
+ 0, # flags
157
+ nil, # exemplars
158
+ @previous_min[attributes],
159
+ @previous_max[attributes],
160
+ @zero_threshold
161
+ )
162
+
163
+ merged_data_points[attributes] = merged_hdp
164
+ end
165
+ # rubocop:enable Metrics/BlockLength
166
+
167
+ # when you have no local_data_points, the loop from cumulative aggregation will not run
168
+ # so return last merged data points if exists
169
+ if data_points.empty? && !@previous_positive.empty?
170
+ @previous_positive.each_key do |attributes|
171
+ merged_hdp = ExponentialHistogramDataPoint.new(
172
+ attributes,
173
+ start_time,
174
+ end_time,
175
+ @previous_count[attributes],
176
+ @previous_sum[attributes],
177
+ @previous_scale[attributes],
178
+ @previous_zero_count[attributes],
179
+ @previous_positive[attributes].dup,
180
+ @previous_negative[attributes].dup,
181
+ 0, # flags
182
+ nil, # exemplars
183
+ @previous_min[attributes],
184
+ @previous_max[attributes],
185
+ @zero_threshold
186
+ )
187
+ merged_data_points[attributes] = merged_hdp
188
+ end
68
189
  end
190
+
191
+ # clear data_points since the data is merged into previous_* already;
192
+ # otherwise we will have duplicated data_points in the next collect
193
+ data_points.clear
194
+ merged_data_points.values # return array
69
195
  end
70
196
  end
197
+ # rubocop:enable Metrics/MethodLength
71
198
 
199
+ # this is aggregate in python; there is no merge in aggregate; but rescale happened
72
200
  # rubocop:disable Metrics/MethodLength
73
201
  def update(amount, attributes, data_points)
74
202
  # fetch or initialize the ExponentialHistogramDataPoint
@@ -78,6 +206,7 @@ module OpenTelemetry
78
206
  max = -Float::INFINITY
79
207
  end
80
208
 
209
+ # this code block will only be executed if no data_points was found with the attributes
81
210
  data_points[attributes] = ExponentialHistogramDataPoint.new(
82
211
  attributes,
83
212
  nil, # :start_time_unix_nano
@@ -203,7 +332,8 @@ module OpenTelemetry
203
332
  end
204
333
 
205
334
  def downscale(change, positive, negative)
206
- return if change <= 0
335
+ return if change == 0
336
+ raise ArgumentError, 'Invalid change of scale' if change < 0
207
337
 
208
338
  positive.downscale(change)
209
339
  negative.downscale(change)
@@ -217,11 +347,76 @@ module OpenTelemetry
217
347
  end
218
348
 
219
349
  def validate_size(size)
220
- raise ArgumentError, "Max size #{size} is smaller than minimum size #{MIN_MAX_SIZE}" if size < MIN_MAX_SIZE
221
- raise ArgumentError, "Max size #{size} is larger than maximum size #{MAX_MAX_SIZE}" if size > MAX_MAX_SIZE
350
+ raise ArgumentError, "Buckets min size #{size} is smaller than minimum min size #{MIN_MAX_SIZE}" if size < MIN_MAX_SIZE
351
+ raise ArgumentError, "Buckets max size #{size} is larger than maximum max size #{MAX_MAX_SIZE}" if size > MAX_MAX_SIZE
222
352
 
223
353
  size
224
354
  end
355
+
356
+ # checked, only issue is if @previous_scale is nil, then get_low_high may throw error
357
+ def get_low_high_previous_current(previous_buckets, current_buckets, previous_scale, current_scale, min_scale)
358
+ previous_low, previous_high = get_low_high(previous_buckets, previous_scale, min_scale)
359
+ current_low, current_high = get_low_high(current_buckets, current_scale, min_scale)
360
+
361
+ if current_low > current_high
362
+ [previous_low, previous_high]
363
+ elsif previous_low > previous_high
364
+ [current_low, current_high]
365
+ else
366
+ [[previous_low, current_low].min, [previous_high, current_high].max]
367
+ end
368
+ end
369
+
370
+ def get_low_high(buckets, scale, min_scale)
371
+ return [0, -1] if buckets.nil? || buckets.counts == [0] || buckets.counts.empty?
372
+
373
+ shift = scale - min_scale
374
+ [buckets.index_start >> shift, buckets.index_end >> shift]
375
+ end
376
+
377
+ def merge_buckets(previous_buckets, current_buckets, current_scale, min_scale, aggregation_temporality)
378
+ return unless current_buckets && !current_buckets.counts.empty?
379
+
380
+ current_change = current_scale - min_scale
381
+
382
+ # when we iterate counts, we don't use offset counts
383
+ current_buckets.instance_variable_get(:@counts).each_with_index do |current_bucket, current_bucket_index|
384
+ next if current_bucket == 0
385
+
386
+ current_index = current_buckets.index_base + current_bucket_index
387
+ current_index -= current_buckets.counts.size if current_index > current_buckets.index_end
388
+
389
+ inds = current_index >> current_change
390
+
391
+ # Grow previous buckets if needed to accommodate the new index
392
+ if inds < previous_buckets.index_start
393
+ span = previous_buckets.index_end - inds
394
+
395
+ raise StandardError, 'Incorrect merge scale' if span >= @size
396
+
397
+ previous_buckets.grow(span + 1, @size) if span >= previous_buckets.counts.size
398
+
399
+ previous_buckets.index_start = inds
400
+ end
401
+
402
+ if inds > previous_buckets.index_end
403
+ span = inds - previous_buckets.index_start
404
+
405
+ raise StandardError, 'Incorrect merge scale' if span >= @size
406
+
407
+ previous_buckets.grow(span + 1, @size) if span >= previous_buckets.counts.size
408
+
409
+ previous_buckets.index_end = inds
410
+ end
411
+
412
+ bucket_index = inds - previous_buckets.index_base
413
+ bucket_index += previous_buckets.counts.size if bucket_index < 0
414
+
415
+ # For delta temporality in merge, we subtract (this shouldn't normally happen in our use case)
416
+ increment = aggregation_temporality == :delta ? -current_bucket : current_bucket
417
+ previous_buckets.increment_bucket(bucket_index, increment)
418
+ end
419
+ end
225
420
  end
226
421
  end
227
422
  end
@@ -27,7 +27,6 @@ module OpenTelemetry
27
27
  old_positive_limit = size - bias
28
28
 
29
29
  new_size = [2**Math.log2(needed).ceil, max_size].min
30
-
31
30
  new_positive_limit = new_size - bias
32
31
 
33
32
  tmp = Array.new(new_size, 0)
@@ -105,6 +104,15 @@ module OpenTelemetry
105
104
  def increment_bucket(bucket_index, increment = 1)
106
105
  @counts[bucket_index] += increment
107
106
  end
107
+
108
+ def copy_empty
109
+ new_buckets = self.class.new
110
+ new_buckets.instance_variable_set(:@counts, Array.new(@counts.size, 0))
111
+ new_buckets.instance_variable_set(:@index_base, @index_base)
112
+ new_buckets.instance_variable_set(:@index_start, @index_start)
113
+ new_buckets.instance_variable_set(:@index_end, @index_end)
114
+ new_buckets
115
+ end
108
116
  end
109
117
  end
110
118
  end
@@ -130,12 +130,7 @@ module OpenTelemetry
130
130
  end
131
131
 
132
132
  def report_result(result_code)
133
- if result_code == Export::SUCCESS
134
- OpenTelemetry.logger.debug 'Successfully exported metrics'
135
- else
136
- OpenTelemetry.handle_error(exception: ExportError.new('Unable to export metrics'))
137
- OpenTelemetry.logger.error("Result code: #{result_code}")
138
- end
133
+ OpenTelemetry.logger.debug 'Successfully exported metrics' if result_code == Export::SUCCESS
139
134
  end
140
135
 
141
136
  def lock(&block)
@@ -56,7 +56,7 @@ module OpenTelemetry
56
56
  end
57
57
  end
58
58
  else
59
- @registered_views.each do |view|
59
+ @registered_views.each do |view, data_points|
60
60
  @mutex.synchronize do
61
61
  @callback.each do |cb|
62
62
  value = safe_guard_callback(cb, timeout: timeout)
@@ -64,7 +64,7 @@ module OpenTelemetry
64
64
 
65
65
  merged_attributes = attributes || {}
66
66
  merged_attributes.merge!(view.attribute_keys)
67
- view.aggregation.update(value, merged_attributes, @data_points) if view.valid_aggregation?
67
+ view.aggregation.update(value, merged_attributes, data_points) if view.valid_aggregation?
68
68
  end
69
69
  end
70
70
  end
@@ -78,19 +78,19 @@ module OpenTelemetry
78
78
  thread = Thread.new do
79
79
  result = callback.call
80
80
  rescue StandardError => e
81
- OpenTelemetry.logger.error("Error invoking callback: #{e.message}")
81
+ OpenTelemetry.handle_error(exception: e, message: 'Error invoking callback.')
82
82
  result = :error
83
83
  end
84
84
 
85
85
  unless thread.join(timeout)
86
86
  thread.kill
87
- OpenTelemetry.logger.error("Timeout while invoking callback after #{timeout} seconds")
87
+ OpenTelemetry.handle_error(message: "Timeout while invoking callback after #{timeout} seconds")
88
88
  return nil
89
89
  end
90
90
 
91
91
  result == :error ? nil : result
92
92
  rescue StandardError => e
93
- OpenTelemetry.logger.error("Unexpected error in callback execution: #{e.message}")
93
+ OpenTelemetry.handle_error(exception: e, message: 'Unexpected error in callback execution.')
94
94
  nil
95
95
  end
96
96
  end
@@ -32,7 +32,7 @@ module OpenTelemetry
32
32
  @instrumentation_scope = instrumentation_scope
33
33
  @default_aggregation = aggregation
34
34
  @data_points = {}
35
- @registered_views = []
35
+ @registered_views = {}
36
36
 
37
37
  find_registered_view
38
38
  @mutex = Mutex.new
@@ -43,12 +43,14 @@ module OpenTelemetry
43
43
  metric_data = []
44
44
 
45
45
  # data points are required to export over OTLP
46
- return metric_data if @data_points.empty?
46
+ return metric_data if empty_data_point?
47
47
 
48
48
  if @registered_views.empty?
49
49
  metric_data << aggregate_metric_data(start_time, end_time)
50
50
  else
51
- @registered_views.each { |view| metric_data << aggregate_metric_data(start_time, end_time, aggregation: view.aggregation) }
51
+ @registered_views.each do |view, data_points|
52
+ metric_data << aggregate_metric_data(start_time, end_time, aggregation: view.aggregation, data_points: data_points)
53
+ end
52
54
  end
53
55
 
54
56
  metric_data
@@ -60,20 +62,21 @@ module OpenTelemetry
60
62
  if @registered_views.empty?
61
63
  @mutex.synchronize { @default_aggregation.update(value, attributes, @data_points) }
62
64
  else
63
- @registered_views.each do |view|
65
+ @registered_views.each do |view, data_points|
64
66
  @mutex.synchronize do
65
67
  attributes ||= {}
66
68
  attributes.merge!(view.attribute_keys)
67
- view.aggregation.update(value, attributes, @data_points) if view.valid_aggregation?
69
+ view.aggregation.update(value, attributes, data_points) if view.valid_aggregation?
68
70
  end
69
71
  end
70
72
  end
71
73
  end
72
74
 
73
- def aggregate_metric_data(start_time, end_time, aggregation: nil)
75
+ def aggregate_metric_data(start_time, end_time, aggregation: nil, data_points: nil)
74
76
  aggregator = aggregation || @default_aggregation
75
77
  is_monotonic = aggregator.respond_to?(:monotonic?) ? aggregator.monotonic? : nil
76
78
  aggregation_temporality = aggregator.respond_to?(:aggregation_temporality) ? aggregator.aggregation_temporality : nil
79
+ data_point = data_points || @data_points
77
80
 
78
81
  MetricData.new(
79
82
  @name,
@@ -82,7 +85,7 @@ module OpenTelemetry
82
85
  @instrument_kind,
83
86
  @meter_provider.resource,
84
87
  @instrumentation_scope,
85
- aggregator.collect(start_time, end_time, @data_points),
88
+ aggregator.collect(start_time, end_time, data_point),
86
89
  aggregation_temporality,
87
90
  start_time,
88
91
  end_time,
@@ -93,7 +96,17 @@ module OpenTelemetry
93
96
  def find_registered_view
94
97
  return if @meter_provider.nil?
95
98
 
96
- @meter_provider.registered_views.each { |view| @registered_views << view if view.match_instrument?(self) }
99
+ @meter_provider.registered_views.each { |view| @registered_views[view] = {} if view.match_instrument?(self) }
100
+ end
101
+
102
+ def empty_data_point?
103
+ if @registered_views.empty?
104
+ @data_points.empty?
105
+ else
106
+ @registered_views.each_value do |data_points|
107
+ return false unless data_points.empty?
108
+ end
109
+ end
97
110
  end
98
111
 
99
112
  def to_s
@@ -8,7 +8,7 @@ module OpenTelemetry
8
8
  module SDK
9
9
  module Metrics
10
10
  # Current OpenTelemetry metrics sdk version
11
- VERSION = '0.11.0'
11
+ VERSION = '0.11.2'
12
12
  end
13
13
  end
14
14
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: opentelemetry-metrics-sdk
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.11.0
4
+ version: 0.11.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - OpenTelemetry Authors
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-10-28 00:00:00.000000000 Z
11
+ date: 2025-12-03 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: opentelemetry-api
@@ -251,10 +251,10 @@ homepage: https://github.com/open-telemetry/opentelemetry-ruby
251
251
  licenses:
252
252
  - Apache-2.0
253
253
  metadata:
254
- changelog_uri: https://open-telemetry.github.io/opentelemetry-ruby/opentelemetry-metrics-sdk/v0.11.0/file.CHANGELOG.html
254
+ changelog_uri: https://open-telemetry.github.io/opentelemetry-ruby/opentelemetry-metrics-sdk/v0.11.2/file.CHANGELOG.html
255
255
  source_code_uri: https://github.com/open-telemetry/opentelemetry-ruby/tree/main/metrics_sdk
256
256
  bug_tracker_uri: https://github.com/open-telemetry/opentelemetry-ruby/issues
257
- documentation_uri: https://open-telemetry.github.io/opentelemetry-ruby/opentelemetry-metrics-sdk/v0.11.0
257
+ documentation_uri: https://open-telemetry.github.io/opentelemetry-ruby/opentelemetry-metrics-sdk/v0.11.2
258
258
  post_install_message:
259
259
  rdoc_options: []
260
260
  require_paths: