opentelemetry-instrumentation-botocore 0.51b0__py3-none-any.whl → 0.52b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,15 +15,35 @@
15
15
  from __future__ import annotations
16
16
 
17
17
  import json
18
- from typing import Callable, Dict, Union
18
+ from os import environ
19
+ from typing import Any, Callable, Dict, Iterator, Sequence, Union
19
20
 
20
21
  from botocore.eventstream import EventStream, EventStreamError
21
22
  from wrapt import ObjectProxy
22
23
 
24
+ from opentelemetry._events import Event
25
+ from opentelemetry.instrumentation.botocore.environment_variables import (
26
+ OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT,
27
+ )
28
+ from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
29
+ GEN_AI_SYSTEM,
30
+ GenAiSystemValues,
31
+ )
32
+
23
33
  _StreamDoneCallableT = Callable[[Dict[str, Union[int, str]]], None]
24
34
  _StreamErrorCallableT = Callable[[Exception], None]
25
35
 
26
36
 
37
+ def _decode_tool_use(tool_use):
38
+ # input get sent encoded in json
39
+ if "input" in tool_use:
40
+ try:
41
+ tool_use["input"] = json.loads(tool_use["input"])
42
+ except json.JSONDecodeError:
43
+ pass
44
+ return tool_use
45
+
46
+
27
47
  # pylint: disable=abstract-method
28
48
  class ConverseStreamWrapper(ObjectProxy):
29
49
  """Wrapper for botocore.eventstream.EventStream"""
@@ -39,8 +59,11 @@ class ConverseStreamWrapper(ObjectProxy):
39
59
  self._stream_done_callback = stream_done_callback
40
60
  self._stream_error_callback = stream_error_callback
41
61
  # accumulating things in the same shape of non-streaming version
42
- # {"usage": {"inputTokens": 0, "outputTokens": 0}, "stopReason": "finish"}
62
+ # {"usage": {"inputTokens": 0, "outputTokens": 0}, "stopReason": "finish", "output": {"message": {"role": "", "content": [{"text": ""}]}
43
63
  self._response = {}
64
+ self._message = None
65
+ self._content_block = {}
66
+ self._record_message = False
44
67
 
45
68
  def __iter__(self):
46
69
  try:
@@ -52,22 +75,52 @@ class ConverseStreamWrapper(ObjectProxy):
52
75
  raise
53
76
 
54
77
  def _process_event(self, event):
78
+ # pylint: disable=too-many-branches
55
79
  if "messageStart" in event:
56
80
  # {'messageStart': {'role': 'assistant'}}
81
+ if event["messageStart"].get("role") == "assistant":
82
+ self._record_message = True
83
+ self._message = {"role": "assistant", "content": []}
84
+ return
85
+
86
+ if "contentBlockStart" in event:
87
+ # {'contentBlockStart': {'start': {'toolUse': {'toolUseId': 'id', 'name': 'func_name'}}, 'contentBlockIndex': 1}}
88
+ start = event["contentBlockStart"].get("start", {})
89
+ if "toolUse" in start:
90
+ tool_use = _decode_tool_use(start["toolUse"])
91
+ self._content_block = {"toolUse": tool_use}
57
92
  return
58
93
 
59
94
  if "contentBlockDelta" in event:
60
95
  # {'contentBlockDelta': {'delta': {'text': "Hello"}, 'contentBlockIndex': 0}}
96
+ # {'contentBlockDelta': {'delta': {'toolUse': {'input': '{"location":"Seattle"}'}}, 'contentBlockIndex': 1}}
97
+ if self._record_message:
98
+ delta = event["contentBlockDelta"].get("delta", {})
99
+ if "text" in delta:
100
+ self._content_block.setdefault("text", "")
101
+ self._content_block["text"] += delta["text"]
102
+ elif "toolUse" in delta:
103
+ tool_use = _decode_tool_use(delta["toolUse"])
104
+ self._content_block["toolUse"].update(tool_use)
61
105
  return
62
106
 
63
107
  if "contentBlockStop" in event:
64
108
  # {'contentBlockStop': {'contentBlockIndex': 0}}
109
+ if self._record_message:
110
+ self._message["content"].append(self._content_block)
111
+ self._content_block = {}
65
112
  return
66
113
 
67
114
  if "messageStop" in event:
68
115
  # {'messageStop': {'stopReason': 'end_turn'}}
69
116
  if stop_reason := event["messageStop"].get("stopReason"):
70
117
  self._response["stopReason"] = stop_reason
118
+
119
+ if self._record_message:
120
+ self._response["output"] = {"message": self._message}
121
+ self._record_message = False
122
+ self._message = None
123
+
71
124
  return
72
125
 
73
126
  if "metadata" in event:
@@ -81,6 +134,7 @@ class ConverseStreamWrapper(ObjectProxy):
81
134
  self._response["usage"]["outputTokens"] = output_tokens
82
135
 
83
136
  self._stream_done_callback(self._response)
137
+
84
138
  return
85
139
 
86
140
 
@@ -102,8 +156,12 @@ class InvokeModelWithResponseStreamWrapper(ObjectProxy):
102
156
  self._model_id = model_id
103
157
 
104
158
  # accumulating things in the same shape of the Converse API
105
- # {"usage": {"inputTokens": 0, "outputTokens": 0}, "stopReason": "finish"}
159
+ # {"usage": {"inputTokens": 0, "outputTokens": 0}, "stopReason": "finish", "output": {"message": {"role": "", "content": [{"text": ""}]}
106
160
  self._response = {}
161
+ self._message = None
162
+ self._content_block = {}
163
+ self._tool_json_input_buf = ""
164
+ self._record_message = False
107
165
 
108
166
  def __iter__(self):
109
167
  try:
@@ -149,15 +207,30 @@ class InvokeModelWithResponseStreamWrapper(ObjectProxy):
149
207
  # "inputTokenCount":9,"outputTokenCount":128,"invocationLatency":3569,"firstByteLatency":2180
150
208
  # }
151
209
  self._process_invocation_metrics(invocation_metrics)
210
+
211
+ # transform the shape of the message to match other models
212
+ self._response["output"] = {
213
+ "message": {"content": [{"text": chunk["outputText"]}]}
214
+ }
152
215
  self._stream_done_callback(self._response)
153
216
 
154
217
  def _process_amazon_nova_chunk(self, chunk):
218
+ # pylint: disable=too-many-branches
219
+ # TODO: handle tool calls!
155
220
  if "messageStart" in chunk:
156
221
  # {'messageStart': {'role': 'assistant'}}
222
+ if chunk["messageStart"].get("role") == "assistant":
223
+ self._record_message = True
224
+ self._message = {"role": "assistant", "content": []}
157
225
  return
158
226
 
159
227
  if "contentBlockDelta" in chunk:
160
228
  # {'contentBlockDelta': {'delta': {'text': "Hello"}, 'contentBlockIndex': 0}}
229
+ if self._record_message:
230
+ delta = chunk["contentBlockDelta"].get("delta", {})
231
+ if "text" in delta:
232
+ self._content_block.setdefault("text", "")
233
+ self._content_block["text"] += delta["text"]
161
234
  return
162
235
 
163
236
  if "contentBlockStop" in chunk:
@@ -168,6 +241,13 @@ class InvokeModelWithResponseStreamWrapper(ObjectProxy):
168
241
  # {'messageStop': {'stopReason': 'end_turn'}}
169
242
  if stop_reason := chunk["messageStop"].get("stopReason"):
170
243
  self._response["stopReason"] = stop_reason
244
+
245
+ if self._record_message:
246
+ self._message["content"].append(self._content_block)
247
+ self._content_block = {}
248
+ self._response["output"] = {"message": self._message}
249
+ self._record_message = False
250
+ self._message = None
171
251
  return
172
252
 
173
253
  if "metadata" in chunk:
@@ -184,24 +264,52 @@ class InvokeModelWithResponseStreamWrapper(ObjectProxy):
184
264
  return
185
265
 
186
266
  def _process_anthropic_claude_chunk(self, chunk):
187
- # pylint: disable=too-many-return-statements
267
+ # pylint: disable=too-many-return-statements,too-many-branches
188
268
  if not (message_type := chunk.get("type")):
189
269
  return
190
270
 
191
271
  if message_type == "message_start":
192
272
  # {'type': 'message_start', 'message': {'id': 'id', 'type': 'message', 'role': 'assistant', 'model': 'claude-2.0', 'content': [], 'stop_reason': None, 'stop_sequence': None, 'usage': {'input_tokens': 18, 'output_tokens': 1}}}
273
+ if chunk.get("message", {}).get("role") == "assistant":
274
+ self._record_message = True
275
+ message = chunk["message"]
276
+ self._message = {
277
+ "role": message["role"],
278
+ "content": message.get("content", []),
279
+ }
193
280
  return
194
281
 
195
282
  if message_type == "content_block_start":
196
283
  # {'type': 'content_block_start', 'index': 0, 'content_block': {'type': 'text', 'text': ''}}
284
+ # {'type': 'content_block_start', 'index': 1, 'content_block': {'type': 'tool_use', 'id': 'id', 'name': 'func_name', 'input': {}}}
285
+ if self._record_message:
286
+ block = chunk.get("content_block", {})
287
+ if block.get("type") == "text":
288
+ self._content_block = block
289
+ elif block.get("type") == "tool_use":
290
+ self._content_block = block
197
291
  return
198
292
 
199
293
  if message_type == "content_block_delta":
200
294
  # {'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': 'Here'}}
295
+ # {'type': 'content_block_delta', 'index': 1, 'delta': {'type': 'input_json_delta', 'partial_json': ''}}
296
+ if self._record_message:
297
+ delta = chunk.get("delta", {})
298
+ if delta.get("type") == "text_delta":
299
+ self._content_block["text"] += delta.get("text", "")
300
+ elif delta.get("type") == "input_json_delta":
301
+ self._tool_json_input_buf += delta.get("partial_json", "")
201
302
  return
202
303
 
203
304
  if message_type == "content_block_stop":
204
305
  # {'type': 'content_block_stop', 'index': 0}
306
+ if self._tool_json_input_buf:
307
+ self._content_block["input"] = self._tool_json_input_buf
308
+ self._message["content"].append(
309
+ _decode_tool_use(self._content_block)
310
+ )
311
+ self._content_block = {}
312
+ self._tool_json_input_buf = ""
205
313
  return
206
314
 
207
315
  if message_type == "message_delta":
@@ -218,5 +326,189 @@ class InvokeModelWithResponseStreamWrapper(ObjectProxy):
218
326
  "amazon-bedrock-invocationMetrics"
219
327
  ):
220
328
  self._process_invocation_metrics(invocation_metrics)
329
+
330
+ if self._record_message:
331
+ self._response["output"] = {"message": self._message}
332
+ self._record_message = False
333
+ self._message = None
334
+
221
335
  self._stream_done_callback(self._response)
222
336
  return
337
+
338
+
339
+ def genai_capture_message_content() -> bool:
340
+ capture_content = environ.get(
341
+ OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, "false"
342
+ )
343
+ return capture_content.lower() == "true"
344
+
345
+
346
+ def extract_tool_calls(
347
+ message: dict[str, Any], capture_content: bool
348
+ ) -> Sequence[Dict[str, Any]] | None:
349
+ content = message.get("content")
350
+ if not content:
351
+ return None
352
+
353
+ tool_uses = [item["toolUse"] for item in content if "toolUse" in item]
354
+ if not tool_uses:
355
+ tool_uses = [
356
+ item for item in content if item.get("type") == "tool_use"
357
+ ]
358
+ tool_id_key = "id"
359
+ else:
360
+ tool_id_key = "toolUseId"
361
+
362
+ if not tool_uses:
363
+ return None
364
+
365
+ tool_calls = []
366
+ for tool_use in tool_uses:
367
+ tool_call = {"type": "function"}
368
+ if call_id := tool_use.get(tool_id_key):
369
+ tool_call["id"] = call_id
370
+
371
+ if function_name := tool_use.get("name"):
372
+ tool_call["function"] = {"name": function_name}
373
+
374
+ if (function_input := tool_use.get("input")) and capture_content:
375
+ tool_call.setdefault("function", {})
376
+ tool_call["function"]["arguments"] = function_input
377
+
378
+ tool_calls.append(tool_call)
379
+ return tool_calls
380
+
381
+
382
+ def extract_tool_results(
383
+ message: dict[str, Any], capture_content: bool
384
+ ) -> Iterator[Dict[str, Any]]:
385
+ content = message.get("content")
386
+ if not content:
387
+ return
388
+
389
+ # langchain sends content as string with InvokeModel and Anthropic Claude
390
+ if isinstance(content, str):
391
+ return
392
+
393
+ # Converse format
394
+ tool_results = [
395
+ item["toolResult"] for item in content if "toolResult" in item
396
+ ]
397
+ # InvokeModel anthropic.claude format
398
+ if not tool_results:
399
+ tool_results = [
400
+ item for item in content if item.get("type") == "tool_result"
401
+ ]
402
+ tool_id_key = "tool_use_id"
403
+ else:
404
+ tool_id_key = "toolUseId"
405
+
406
+ if not tool_results:
407
+ return
408
+
409
+ # if we have a user message with toolResult keys we need to send
410
+ # one tool event for each part of the content
411
+ for tool_result in tool_results:
412
+ body = {}
413
+ if tool_id := tool_result.get(tool_id_key):
414
+ body["id"] = tool_id
415
+ tool_content = tool_result.get("content")
416
+ if capture_content and tool_content:
417
+ body["content"] = tool_content
418
+
419
+ yield body
420
+
421
+
422
+ def message_to_event(
423
+ message: dict[str, Any], capture_content: bool
424
+ ) -> Iterator[Event]:
425
+ attributes = {GEN_AI_SYSTEM: GenAiSystemValues.AWS_BEDROCK.value}
426
+ role = message.get("role")
427
+ content = message.get("content")
428
+
429
+ body = {}
430
+ if capture_content and content:
431
+ body["content"] = content
432
+ if role == "assistant":
433
+ # the assistant message contains both tool calls and model thinking content
434
+ if tool_calls := extract_tool_calls(message, capture_content):
435
+ body["tool_calls"] = tool_calls
436
+ elif role == "user":
437
+ # in case of tool calls we send one tool event for tool call and one for the user event
438
+ for tool_body in extract_tool_results(message, capture_content):
439
+ yield Event(
440
+ name="gen_ai.tool.message",
441
+ attributes=attributes,
442
+ body=tool_body,
443
+ )
444
+
445
+ yield Event(
446
+ name=f"gen_ai.{role}.message",
447
+ attributes=attributes,
448
+ body=body if body else None,
449
+ )
450
+
451
+
452
+ class _Choice:
453
+ def __init__(
454
+ self, message: dict[str, Any], finish_reason: str, index: int
455
+ ):
456
+ self.message = message
457
+ self.finish_reason = finish_reason
458
+ self.index = index
459
+
460
+ @classmethod
461
+ def from_converse(
462
+ cls, response: dict[str, Any], capture_content: bool
463
+ ) -> _Choice:
464
+ orig_message = response["output"]["message"]
465
+ if role := orig_message.get("role"):
466
+ message = {"role": role}
467
+ else:
468
+ # amazon.titan does not serialize the role
469
+ message = {}
470
+
471
+ if tool_calls := extract_tool_calls(orig_message, capture_content):
472
+ message["tool_calls"] = tool_calls
473
+ elif capture_content:
474
+ message["content"] = orig_message["content"]
475
+
476
+ return cls(message, response["stopReason"], index=0)
477
+
478
+ @classmethod
479
+ def from_invoke_amazon_titan(
480
+ cls, response: dict[str, Any], capture_content: bool
481
+ ) -> _Choice:
482
+ result = response["results"][0]
483
+ if capture_content:
484
+ message = {"content": result["outputText"]}
485
+ else:
486
+ message = {}
487
+ return cls(message, result["completionReason"], index=0)
488
+
489
+ @classmethod
490
+ def from_invoke_anthropic_claude(
491
+ cls, response: dict[str, Any], capture_content: bool
492
+ ) -> _Choice:
493
+ message = {"role": response["role"]}
494
+ if tool_calls := extract_tool_calls(response, capture_content):
495
+ message["tool_calls"] = tool_calls
496
+ elif capture_content:
497
+ message["content"] = response["content"]
498
+ return cls(message, response["stop_reason"], index=0)
499
+
500
+ def _to_body_dict(self) -> dict[str, Any]:
501
+ return {
502
+ "finish_reason": self.finish_reason,
503
+ "index": self.index,
504
+ "message": self.message,
505
+ }
506
+
507
+ def to_choice_event(self, **event_kwargs) -> Event:
508
+ attributes = {GEN_AI_SYSTEM: GenAiSystemValues.AWS_BEDROCK.value}
509
+ return Event(
510
+ name="gen_ai.choice",
511
+ attributes=attributes,
512
+ body=self._to_body_dict(),
513
+ **event_kwargs,
514
+ )
@@ -22,6 +22,7 @@ from opentelemetry.instrumentation.botocore.extensions.types import (
22
22
  _AttributeMapT,
23
23
  _AwsSdkCallContext,
24
24
  _AwsSdkExtension,
25
+ _BotocoreInstrumentorContext,
25
26
  _BotoResultT,
26
27
  )
27
28
  from opentelemetry.semconv.trace import DbSystemValues, SpanAttributes
@@ -370,7 +371,9 @@ class _DynamoDbExtension(_AwsSdkExtension):
370
371
  def _get_peer_name(self) -> str:
371
372
  return urlparse(self._call_context.endpoint_url).netloc
372
373
 
373
- def before_service_call(self, span: Span):
374
+ def before_service_call(
375
+ self, span: Span, instrumentor_context: _BotocoreInstrumentorContext
376
+ ):
374
377
  if not span.is_recording() or self._op is None:
375
378
  return
376
379
 
@@ -380,7 +383,12 @@ class _DynamoDbExtension(_AwsSdkExtension):
380
383
  span.set_attribute,
381
384
  )
382
385
 
383
- def on_success(self, span: Span, result: _BotoResultT):
386
+ def on_success(
387
+ self,
388
+ span: Span,
389
+ result: _BotoResultT,
390
+ instrumentor_context: _BotocoreInstrumentorContext,
391
+ ):
384
392
  if not span.is_recording():
385
393
  return
386
394
 
@@ -22,6 +22,7 @@ from opentelemetry.instrumentation.botocore.extensions.types import (
22
22
  _AttributeMapT,
23
23
  _AwsSdkCallContext,
24
24
  _AwsSdkExtension,
25
+ _BotocoreInstrumentorContext,
25
26
  )
26
27
  from opentelemetry.propagate import inject
27
28
  from opentelemetry.semconv.trace import SpanAttributes
@@ -119,7 +120,9 @@ class _LambdaExtension(_AwsSdkExtension):
119
120
 
120
121
  self._op.extract_attributes(self._call_context, attributes)
121
122
 
122
- def before_service_call(self, span: Span):
123
+ def before_service_call(
124
+ self, span: Span, instrumentor_context: _BotocoreInstrumentorContext
125
+ ):
123
126
  if self._op is None:
124
127
  return
125
128
 
@@ -23,6 +23,7 @@ from opentelemetry.instrumentation.botocore.extensions.types import (
23
23
  _AttributeMapT,
24
24
  _AwsSdkCallContext,
25
25
  _AwsSdkExtension,
26
+ _BotocoreInstrumentorContext,
26
27
  )
27
28
  from opentelemetry.semconv.trace import (
28
29
  MessagingDestinationKindValues,
@@ -73,36 +74,35 @@ class _OpPublish(_SnsOperation):
73
74
  def extract_attributes(
74
75
  cls, call_context: _AwsSdkCallContext, attributes: _AttributeMapT
75
76
  ):
76
- destination_name, is_phone_number = cls._extract_destination_name(
77
+ span_name, destination_name = cls._extract_destination_name(
77
78
  call_context
78
79
  )
80
+
81
+ call_context.span_name = f"{span_name} send"
82
+
79
83
  attributes[SpanAttributes.MESSAGING_DESTINATION_KIND] = (
80
84
  MessagingDestinationKindValues.TOPIC.value
81
85
  )
82
86
  attributes[SpanAttributes.MESSAGING_DESTINATION] = destination_name
83
-
84
- # TODO: Use SpanAttributes.MESSAGING_DESTINATION_NAME when opentelemetry-semantic-conventions 0.42b0 is released
85
- attributes["messaging.destination.name"] = cls._extract_input_arn(
86
- call_context
87
- )
88
- call_context.span_name = (
89
- f"{'phone_number' if is_phone_number else destination_name} send"
87
+ attributes[SpanAttributes.MESSAGING_DESTINATION_NAME] = (
88
+ destination_name
90
89
  )
91
90
 
92
91
  @classmethod
93
92
  def _extract_destination_name(
94
93
  cls, call_context: _AwsSdkCallContext
95
- ) -> Tuple[str, bool]:
94
+ ) -> Tuple[str, str]:
96
95
  arn = cls._extract_input_arn(call_context)
97
96
  if arn:
98
- return arn.rsplit(":", 1)[-1], False
97
+ return arn.rsplit(":", 1)[-1], arn
99
98
 
100
99
  if cls._phone_arg_name:
101
100
  phone_number = call_context.params.get(cls._phone_arg_name)
102
101
  if phone_number:
103
- return phone_number, True
102
+ # phone number redacted because it's a PII
103
+ return "phone_number", "phone_number:**"
104
104
 
105
- return "unknown", False
105
+ return "unknown", "unknown"
106
106
 
107
107
  @classmethod
108
108
  def _extract_input_arn(
@@ -165,6 +165,8 @@ class _SnsExtension(_AwsSdkExtension):
165
165
  if self._op:
166
166
  self._op.extract_attributes(self._call_context, attributes)
167
167
 
168
- def before_service_call(self, span: Span):
168
+ def before_service_call(
169
+ self, span: Span, instrumentor_context: _BotocoreInstrumentorContext
170
+ ):
169
171
  if self._op:
170
172
  self._op.before_service_call(self._call_context, span)
@@ -16,6 +16,7 @@ import logging
16
16
  from opentelemetry.instrumentation.botocore.extensions.types import (
17
17
  _AttributeMapT,
18
18
  _AwsSdkExtension,
19
+ _BotocoreInstrumentorContext,
19
20
  _BotoResultT,
20
21
  )
21
22
  from opentelemetry.semconv.trace import SpanAttributes
@@ -44,7 +45,12 @@ class _SqsExtension(_AwsSdkExtension):
44
45
  queue_url,
45
46
  )
46
47
 
47
- def on_success(self, span: Span, result: _BotoResultT):
48
+ def on_success(
49
+ self,
50
+ span: Span,
51
+ result: _BotoResultT,
52
+ instrumentor_context: _BotocoreInstrumentorContext,
53
+ ):
48
54
  operation = self._call_context.operation
49
55
  if operation in _SUPPORTED_OPERATIONS:
50
56
  try:
@@ -12,9 +12,13 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ from __future__ import annotations
16
+
15
17
  import logging
16
18
  from typing import Any, Dict, Optional, Tuple
17
19
 
20
+ from opentelemetry._events import EventLogger
21
+ from opentelemetry.metrics import Instrument, Meter
18
22
  from opentelemetry.trace import SpanKind
19
23
  from opentelemetry.trace.span import Span
20
24
  from opentelemetry.util.types import AttributeValue
@@ -89,10 +93,35 @@ class _AwsSdkCallContext:
89
93
  return default
90
94
 
91
95
 
96
+ class _BotocoreInstrumentorContext:
97
+ def __init__(
98
+ self,
99
+ event_logger: EventLogger,
100
+ metrics: Dict[str, Instrument] | None = None,
101
+ ):
102
+ self.event_logger = event_logger
103
+ self.metrics = metrics or {}
104
+
105
+
92
106
  class _AwsSdkExtension:
93
107
  def __init__(self, call_context: _AwsSdkCallContext):
94
108
  self._call_context = call_context
95
109
 
110
+ @staticmethod
111
+ def tracer_schema_version() -> str:
112
+ """Returns the tracer OTel schema version the extension is following"""
113
+ return "1.11.0"
114
+
115
+ @staticmethod
116
+ def event_logger_schema_version() -> str:
117
+ """Returns the event logger OTel schema version the extension is following"""
118
+ return "1.30.0"
119
+
120
+ @staticmethod
121
+ def meter_schema_version() -> str:
122
+ """Returns the meter OTel schema version the extension is following"""
123
+ return "1.30.0"
124
+
96
125
  def should_trace_service_call(self) -> bool: # pylint:disable=no-self-use
97
126
  """Returns if the AWS SDK service call should be traced or not
98
127
 
@@ -109,13 +138,21 @@ class _AwsSdkExtension:
109
138
  """
110
139
  return True
111
140
 
141
+ def setup_metrics(self, meter: Meter, metrics: Dict[str, Instrument]):
142
+ """Callback which gets invoked to setup metrics.
143
+
144
+ Extensions might override this function to add to the metrics dictionary all the metrics
145
+ they want to receive later in _BotocoreInstrumentorContext."""
146
+
112
147
  def extract_attributes(self, attributes: _AttributeMapT):
113
148
  """Callback which gets invoked before the span is created.
114
149
 
115
150
  Extensions might override this function to extract additional attributes.
116
151
  """
117
152
 
118
- def before_service_call(self, span: Span):
153
+ def before_service_call(
154
+ self, span: Span, instrumentor_context: _BotocoreInstrumentorContext
155
+ ):
119
156
  """Callback which gets invoked after the span is created but before the
120
157
  AWS SDK service is called.
121
158
 
@@ -123,7 +160,12 @@ class _AwsSdkExtension:
123
160
  a carrier.
124
161
  """
125
162
 
126
- def on_success(self, span: Span, result: _BotoResultT):
163
+ def on_success(
164
+ self,
165
+ span: Span,
166
+ result: _BotoResultT,
167
+ instrumentor_context: _BotocoreInstrumentorContext,
168
+ ):
127
169
  """Callback that gets invoked when the AWS SDK call returns
128
170
  successfully.
129
171
 
@@ -131,12 +173,19 @@ class _AwsSdkExtension:
131
173
  attributes on the span.
132
174
  """
133
175
 
134
- def on_error(self, span: Span, exception: _BotoClientErrorT):
176
+ def on_error(
177
+ self,
178
+ span: Span,
179
+ exception: _BotoClientErrorT,
180
+ instrumentor_context: _BotocoreInstrumentorContext,
181
+ ):
135
182
  """Callback that gets invoked when the AWS SDK service call raises a
136
183
  ClientError.
137
184
  """
138
185
 
139
- def after_service_call(self):
186
+ def after_service_call(
187
+ self, instrumentor_context: _BotocoreInstrumentorContext
188
+ ):
140
189
  """Callback that gets invoked after the AWS SDK service was called.
141
190
 
142
191
  Extensions might override this function to do some cleanup tasks.
@@ -12,4 +12,4 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- __version__ = "0.51b0"
15
+ __version__ = "0.52b1"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opentelemetry-instrumentation-botocore
3
- Version: 0.51b0
3
+ Version: 0.52b1
4
4
  Summary: OpenTelemetry Botocore instrumentation
5
5
  Project-URL: Homepage, https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-botocore
6
6
  Project-URL: Repository, https://github.com/open-telemetry/opentelemetry-python-contrib
@@ -19,10 +19,10 @@ Classifier: Programming Language :: Python :: 3.11
19
19
  Classifier: Programming Language :: Python :: 3.12
20
20
  Classifier: Programming Language :: Python :: 3.13
21
21
  Requires-Python: >=3.8
22
- Requires-Dist: opentelemetry-api~=1.12
23
- Requires-Dist: opentelemetry-instrumentation==0.51b0
22
+ Requires-Dist: opentelemetry-api~=1.30
23
+ Requires-Dist: opentelemetry-instrumentation==0.52b1
24
24
  Requires-Dist: opentelemetry-propagator-aws-xray~=1.0
25
- Requires-Dist: opentelemetry-semantic-conventions==0.51b0
25
+ Requires-Dist: opentelemetry-semantic-conventions==0.52b1
26
26
  Provides-Extra: instruments
27
27
  Requires-Dist: botocore~=1.0; extra == 'instruments'
28
28
  Description-Content-Type: text/x-rst