opentelemetry-instrumentation-botocore 0.50b0__tar.gz → 0.51b0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/PKG-INFO +8 -5
  2. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/pyproject.toml +4 -2
  3. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/src/opentelemetry/instrumentation/botocore/__init__.py +4 -0
  4. opentelemetry_instrumentation_botocore-0.51b0/src/opentelemetry/instrumentation/botocore/environment_variables.py +3 -0
  5. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/src/opentelemetry/instrumentation/botocore/extensions/__init__.py +1 -0
  6. opentelemetry_instrumentation_botocore-0.51b0/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py +396 -0
  7. opentelemetry_instrumentation_botocore-0.51b0/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py +222 -0
  8. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/src/opentelemetry/instrumentation/botocore/extensions/types.py +8 -0
  9. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/src/opentelemetry/instrumentation/botocore/version.py +1 -1
  10. opentelemetry_instrumentation_botocore-0.51b0/tests/bedrock_utils.py +213 -0
  11. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_converse_stream_handles_event_stream_error.yaml +71 -0
  12. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_converse_stream_with_content.yaml +69 -0
  13. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_converse_stream_with_invalid_model.yaml +54 -0
  14. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_converse_with_content.yaml +55 -0
  15. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_converse_with_invalid_model.yaml +54 -0
  16. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_invoke_model_with_content[amazon.nova].yaml +58 -0
  17. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_invoke_model_with_content[amazon.titan].yaml +57 -0
  18. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_invoke_model_with_content[anthropic.claude].yaml +58 -0
  19. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_invoke_model_with_invalid_model.yaml +51 -0
  20. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_invoke_model_with_response_stream_handles_stream_error.yaml +62 -0
  21. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_invoke_model_with_response_stream_invalid_model.yaml +51 -0
  22. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_invoke_model_with_response_stream_with_content[amazon.nova].yaml +144 -0
  23. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_invoke_model_with_response_stream_with_content[amazon.titan].yaml +61 -0
  24. opentelemetry_instrumentation_botocore-0.51b0/tests/cassettes/test_invoke_model_with_response_stream_with_content[anthropic.claude].yaml +124 -0
  25. opentelemetry_instrumentation_botocore-0.51b0/tests/conftest.py +121 -0
  26. opentelemetry_instrumentation_botocore-0.51b0/tests/test_botocore_bedrock.py +553 -0
  27. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/.gitignore +0 -0
  28. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/LICENSE +0 -0
  29. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/README.rst +0 -0
  30. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/src/opentelemetry/instrumentation/botocore/extensions/_messaging.py +0 -0
  31. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/src/opentelemetry/instrumentation/botocore/extensions/dynamodb.py +0 -0
  32. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/src/opentelemetry/instrumentation/botocore/extensions/lmbd.py +0 -0
  33. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/src/opentelemetry/instrumentation/botocore/extensions/sns.py +0 -0
  34. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/src/opentelemetry/instrumentation/botocore/extensions/sqs.py +0 -0
  35. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/src/opentelemetry/instrumentation/botocore/package.py +0 -0
  36. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/tests/__init__.py +0 -0
  37. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/tests/test_botocore_dynamodb.py +0 -0
  38. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/tests/test_botocore_instrumentation.py +0 -0
  39. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/tests/test_botocore_lambda.py +0 -0
  40. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/tests/test_botocore_messaging.py +0 -0
  41. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/tests/test_botocore_sns.py +0 -0
  42. {opentelemetry_instrumentation_botocore-0.50b0 → opentelemetry_instrumentation_botocore-0.51b0}/tests/test_botocore_sqs.py +0 -0
@@ -1,10 +1,12 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: opentelemetry-instrumentation-botocore
3
- Version: 0.50b0
3
+ Version: 0.51b0
4
4
  Summary: OpenTelemetry Botocore instrumentation
5
5
  Project-URL: Homepage, https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-botocore
6
+ Project-URL: Repository, https://github.com/open-telemetry/opentelemetry-python-contrib
6
7
  Author-email: OpenTelemetry Authors <cncf-opentelemetry-contributors@lists.cncf.io>
7
- License: Apache-2.0
8
+ License-Expression: Apache-2.0
9
+ License-File: LICENSE
8
10
  Classifier: Development Status :: 4 - Beta
9
11
  Classifier: Intended Audience :: Developers
10
12
  Classifier: License :: OSI Approved :: Apache Software License
@@ -15,11 +17,12 @@ Classifier: Programming Language :: Python :: 3.9
15
17
  Classifier: Programming Language :: Python :: 3.10
16
18
  Classifier: Programming Language :: Python :: 3.11
17
19
  Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Programming Language :: Python :: 3.13
18
21
  Requires-Python: >=3.8
19
22
  Requires-Dist: opentelemetry-api~=1.12
20
- Requires-Dist: opentelemetry-instrumentation==0.50b0
23
+ Requires-Dist: opentelemetry-instrumentation==0.51b0
21
24
  Requires-Dist: opentelemetry-propagator-aws-xray~=1.0
22
- Requires-Dist: opentelemetry-semantic-conventions==0.50b0
25
+ Requires-Dist: opentelemetry-semantic-conventions==0.51b0
23
26
  Provides-Extra: instruments
24
27
  Requires-Dist: botocore~=1.0; extra == 'instruments'
25
28
  Description-Content-Type: text/x-rst
@@ -23,11 +23,12 @@ classifiers = [
23
23
  "Programming Language :: Python :: 3.10",
24
24
  "Programming Language :: Python :: 3.11",
25
25
  "Programming Language :: Python :: 3.12",
26
+ "Programming Language :: Python :: 3.13",
26
27
  ]
27
28
  dependencies = [
28
29
  "opentelemetry-api ~= 1.12",
29
- "opentelemetry-instrumentation == 0.50b0",
30
- "opentelemetry-semantic-conventions == 0.50b0",
30
+ "opentelemetry-instrumentation == 0.51b0",
31
+ "opentelemetry-semantic-conventions == 0.51b0",
31
32
  "opentelemetry-propagator-aws-xray ~= 1.0",
32
33
  ]
33
34
 
@@ -41,6 +42,7 @@ botocore = "opentelemetry.instrumentation.botocore:BotocoreInstrumentor"
41
42
 
42
43
  [project.urls]
43
44
  Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-botocore"
45
+ Repository = "https://github.com/open-telemetry/opentelemetry-python-contrib"
44
46
 
45
47
  [tool.hatch.version]
46
48
  path = "src/opentelemetry/instrumentation/botocore/version.py"
@@ -188,11 +188,15 @@ class BotocoreInstrumentor(BaseInstrumentor):
188
188
  }
189
189
 
190
190
  _safe_invoke(extension.extract_attributes, attributes)
191
+ end_span_on_exit = extension.should_end_span_on_exit()
191
192
 
192
193
  with self._tracer.start_as_current_span(
193
194
  call_context.span_name,
194
195
  kind=call_context.span_kind,
195
196
  attributes=attributes,
197
+ # tracing streaming services require to close the span manually
198
+ # at a later time after the stream has been consumed
199
+ end_on_exit=end_span_on_exit,
196
200
  ) as span:
197
201
  _safe_invoke(extension.before_service_call, span)
198
202
  self._call_request_hook(span, call_context)
@@ -0,0 +1,3 @@
1
+ OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT = (
2
+ "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"
3
+ )
@@ -32,6 +32,7 @@ def _lazy_load(module, cls):
32
32
 
33
33
 
34
34
  _KNOWN_EXTENSIONS = {
35
+ "bedrock-runtime": _lazy_load(".bedrock", "_BedrockRuntimeExtension"),
35
36
  "dynamodb": _lazy_load(".dynamodb", "_DynamoDbExtension"),
36
37
  "lambda": _lazy_load(".lmbd", "_LambdaExtension"),
37
38
  "sns": _lazy_load(".sns", "_SnsExtension"),
@@ -0,0 +1,396 @@
1
+ # Copyright The OpenTelemetry Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Includes work from:
16
+ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
17
+ # SPDX-License-Identifier: Apache-2.0
18
+
19
+ from __future__ import annotations
20
+
21
+ import io
22
+ import json
23
+ import logging
24
+ from typing import Any
25
+
26
+ from botocore.eventstream import EventStream
27
+ from botocore.response import StreamingBody
28
+
29
+ from opentelemetry.instrumentation.botocore.extensions.bedrock_utils import (
30
+ ConverseStreamWrapper,
31
+ InvokeModelWithResponseStreamWrapper,
32
+ )
33
+ from opentelemetry.instrumentation.botocore.extensions.types import (
34
+ _AttributeMapT,
35
+ _AwsSdkExtension,
36
+ _BotoClientErrorT,
37
+ )
38
+ from opentelemetry.semconv._incubating.attributes.error_attributes import (
39
+ ERROR_TYPE,
40
+ )
41
+ from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
42
+ GEN_AI_OPERATION_NAME,
43
+ GEN_AI_REQUEST_MAX_TOKENS,
44
+ GEN_AI_REQUEST_MODEL,
45
+ GEN_AI_REQUEST_STOP_SEQUENCES,
46
+ GEN_AI_REQUEST_TEMPERATURE,
47
+ GEN_AI_REQUEST_TOP_P,
48
+ GEN_AI_RESPONSE_FINISH_REASONS,
49
+ GEN_AI_SYSTEM,
50
+ GEN_AI_USAGE_INPUT_TOKENS,
51
+ GEN_AI_USAGE_OUTPUT_TOKENS,
52
+ GenAiOperationNameValues,
53
+ GenAiSystemValues,
54
+ )
55
+ from opentelemetry.trace.span import Span
56
+ from opentelemetry.trace.status import Status, StatusCode
57
+
58
+ _logger = logging.getLogger(__name__)
59
+
60
+ _MODEL_ID_KEY: str = "modelId"
61
+
62
+
63
+ class _BedrockRuntimeExtension(_AwsSdkExtension):
64
+ """
65
+ This class is an extension for <a
66
+ href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_Operations_Amazon_Bedrock_Runtime.html">
67
+ Amazon Bedrock Runtime</a>.
68
+ """
69
+
70
+ _HANDLED_OPERATIONS = {
71
+ "Converse",
72
+ "ConverseStream",
73
+ "InvokeModel",
74
+ "InvokeModelWithResponseStream",
75
+ }
76
+ _DONT_CLOSE_SPAN_ON_END_OPERATIONS = {
77
+ "ConverseStream",
78
+ "InvokeModelWithResponseStream",
79
+ }
80
+
81
+ def should_end_span_on_exit(self):
82
+ return (
83
+ self._call_context.operation
84
+ not in self._DONT_CLOSE_SPAN_ON_END_OPERATIONS
85
+ )
86
+
87
+ def extract_attributes(self, attributes: _AttributeMapT):
88
+ if self._call_context.operation not in self._HANDLED_OPERATIONS:
89
+ return
90
+
91
+ attributes[GEN_AI_SYSTEM] = GenAiSystemValues.AWS_BEDROCK.value
92
+
93
+ model_id = self._call_context.params.get(_MODEL_ID_KEY)
94
+ if model_id:
95
+ attributes[GEN_AI_REQUEST_MODEL] = model_id
96
+ attributes[GEN_AI_OPERATION_NAME] = (
97
+ GenAiOperationNameValues.CHAT.value
98
+ )
99
+
100
+ # Converse / ConverseStream
101
+ if inference_config := self._call_context.params.get(
102
+ "inferenceConfig"
103
+ ):
104
+ self._set_if_not_none(
105
+ attributes,
106
+ GEN_AI_REQUEST_TEMPERATURE,
107
+ inference_config.get("temperature"),
108
+ )
109
+ self._set_if_not_none(
110
+ attributes,
111
+ GEN_AI_REQUEST_TOP_P,
112
+ inference_config.get("topP"),
113
+ )
114
+ self._set_if_not_none(
115
+ attributes,
116
+ GEN_AI_REQUEST_MAX_TOKENS,
117
+ inference_config.get("maxTokens"),
118
+ )
119
+ self._set_if_not_none(
120
+ attributes,
121
+ GEN_AI_REQUEST_STOP_SEQUENCES,
122
+ inference_config.get("stopSequences"),
123
+ )
124
+
125
+ # InvokeModel
126
+ # Get the request body if it exists
127
+ body = self._call_context.params.get("body")
128
+ if body:
129
+ try:
130
+ request_body = json.loads(body)
131
+
132
+ if "amazon.titan" in model_id:
133
+ # titan interface is a text completion one
134
+ attributes[GEN_AI_OPERATION_NAME] = (
135
+ GenAiOperationNameValues.TEXT_COMPLETION.value
136
+ )
137
+ self._extract_titan_attributes(
138
+ attributes, request_body
139
+ )
140
+ elif "amazon.nova" in model_id:
141
+ self._extract_nova_attributes(attributes, request_body)
142
+ elif "anthropic.claude" in model_id:
143
+ self._extract_claude_attributes(
144
+ attributes, request_body
145
+ )
146
+ except json.JSONDecodeError:
147
+ _logger.debug("Error: Unable to parse the body as JSON")
148
+
149
+ def _extract_titan_attributes(self, attributes, request_body):
150
+ config = request_body.get("textGenerationConfig", {})
151
+ self._set_if_not_none(
152
+ attributes, GEN_AI_REQUEST_TEMPERATURE, config.get("temperature")
153
+ )
154
+ self._set_if_not_none(
155
+ attributes, GEN_AI_REQUEST_TOP_P, config.get("topP")
156
+ )
157
+ self._set_if_not_none(
158
+ attributes, GEN_AI_REQUEST_MAX_TOKENS, config.get("maxTokenCount")
159
+ )
160
+ self._set_if_not_none(
161
+ attributes,
162
+ GEN_AI_REQUEST_STOP_SEQUENCES,
163
+ config.get("stopSequences"),
164
+ )
165
+
166
+ def _extract_nova_attributes(self, attributes, request_body):
167
+ config = request_body.get("inferenceConfig", {})
168
+ self._set_if_not_none(
169
+ attributes, GEN_AI_REQUEST_TEMPERATURE, config.get("temperature")
170
+ )
171
+ self._set_if_not_none(
172
+ attributes, GEN_AI_REQUEST_TOP_P, config.get("topP")
173
+ )
174
+ self._set_if_not_none(
175
+ attributes, GEN_AI_REQUEST_MAX_TOKENS, config.get("max_new_tokens")
176
+ )
177
+ self._set_if_not_none(
178
+ attributes,
179
+ GEN_AI_REQUEST_STOP_SEQUENCES,
180
+ config.get("stopSequences"),
181
+ )
182
+
183
+ def _extract_claude_attributes(self, attributes, request_body):
184
+ self._set_if_not_none(
185
+ attributes,
186
+ GEN_AI_REQUEST_MAX_TOKENS,
187
+ request_body.get("max_tokens"),
188
+ )
189
+ self._set_if_not_none(
190
+ attributes,
191
+ GEN_AI_REQUEST_TEMPERATURE,
192
+ request_body.get("temperature"),
193
+ )
194
+ self._set_if_not_none(
195
+ attributes, GEN_AI_REQUEST_TOP_P, request_body.get("top_p")
196
+ )
197
+ self._set_if_not_none(
198
+ attributes,
199
+ GEN_AI_REQUEST_STOP_SEQUENCES,
200
+ request_body.get("stop_sequences"),
201
+ )
202
+
203
+ @staticmethod
204
+ def _set_if_not_none(attributes, key, value):
205
+ if value is not None:
206
+ attributes[key] = value
207
+
208
+ def before_service_call(self, span: Span):
209
+ if self._call_context.operation not in self._HANDLED_OPERATIONS:
210
+ return
211
+
212
+ if not span.is_recording():
213
+ return
214
+
215
+ operation_name = span.attributes.get(GEN_AI_OPERATION_NAME, "")
216
+ request_model = span.attributes.get(GEN_AI_REQUEST_MODEL, "")
217
+ # avoid setting to an empty string if are not available
218
+ if operation_name and request_model:
219
+ span.update_name(f"{operation_name} {request_model}")
220
+
221
+ # pylint: disable=no-self-use
222
+ def _converse_on_success(self, span: Span, result: dict[str, Any]):
223
+ if usage := result.get("usage"):
224
+ if input_tokens := usage.get("inputTokens"):
225
+ span.set_attribute(
226
+ GEN_AI_USAGE_INPUT_TOKENS,
227
+ input_tokens,
228
+ )
229
+ if output_tokens := usage.get("outputTokens"):
230
+ span.set_attribute(
231
+ GEN_AI_USAGE_OUTPUT_TOKENS,
232
+ output_tokens,
233
+ )
234
+
235
+ if stop_reason := result.get("stopReason"):
236
+ span.set_attribute(
237
+ GEN_AI_RESPONSE_FINISH_REASONS,
238
+ [stop_reason],
239
+ )
240
+
241
+ def _invoke_model_on_success(
242
+ self, span: Span, result: dict[str, Any], model_id: str
243
+ ):
244
+ original_body = None
245
+ try:
246
+ original_body = result["body"]
247
+ body_content = original_body.read()
248
+
249
+ # Replenish stream for downstream application use
250
+ new_stream = io.BytesIO(body_content)
251
+ result["body"] = StreamingBody(new_stream, len(body_content))
252
+
253
+ response_body = json.loads(body_content.decode("utf-8"))
254
+ if "amazon.titan" in model_id:
255
+ self._handle_amazon_titan_response(span, response_body)
256
+ elif "amazon.nova" in model_id:
257
+ self._handle_amazon_nova_response(span, response_body)
258
+ elif "anthropic.claude" in model_id:
259
+ self._handle_anthropic_claude_response(span, response_body)
260
+
261
+ except json.JSONDecodeError:
262
+ _logger.debug("Error: Unable to parse the response body as JSON")
263
+ except Exception as exc: # pylint: disable=broad-exception-caught
264
+ _logger.debug("Error processing response: %s", exc)
265
+ finally:
266
+ if original_body is not None:
267
+ original_body.close()
268
+
269
+ def _on_stream_error_callback(self, span: Span, exception):
270
+ span.set_status(Status(StatusCode.ERROR, str(exception)))
271
+ if span.is_recording():
272
+ span.set_attribute(ERROR_TYPE, type(exception).__qualname__)
273
+ span.end()
274
+
275
+ def on_success(self, span: Span, result: dict[str, Any]):
276
+ if self._call_context.operation not in self._HANDLED_OPERATIONS:
277
+ return
278
+
279
+ if not span.is_recording():
280
+ if not self.should_end_span_on_exit():
281
+ span.end()
282
+ return
283
+
284
+ # ConverseStream
285
+ if "stream" in result and isinstance(result["stream"], EventStream):
286
+
287
+ def stream_done_callback(response):
288
+ self._converse_on_success(span, response)
289
+ span.end()
290
+
291
+ def stream_error_callback(exception):
292
+ self._on_stream_error_callback(span, exception)
293
+
294
+ result["stream"] = ConverseStreamWrapper(
295
+ result["stream"], stream_done_callback, stream_error_callback
296
+ )
297
+ return
298
+
299
+ # Converse
300
+ self._converse_on_success(span, result)
301
+
302
+ model_id = self._call_context.params.get(_MODEL_ID_KEY)
303
+ if not model_id:
304
+ return
305
+
306
+ # InvokeModel
307
+ if "body" in result and isinstance(result["body"], StreamingBody):
308
+ self._invoke_model_on_success(span, result, model_id)
309
+ return
310
+
311
+ # InvokeModelWithResponseStream
312
+ if "body" in result and isinstance(result["body"], EventStream):
313
+
314
+ def invoke_model_stream_done_callback(response):
315
+ # the callback gets data formatted as the simpler converse API
316
+ self._converse_on_success(span, response)
317
+ span.end()
318
+
319
+ def invoke_model_stream_error_callback(exception):
320
+ self._on_stream_error_callback(span, exception)
321
+
322
+ result["body"] = InvokeModelWithResponseStreamWrapper(
323
+ result["body"],
324
+ invoke_model_stream_done_callback,
325
+ invoke_model_stream_error_callback,
326
+ model_id,
327
+ )
328
+ return
329
+
330
+ # pylint: disable=no-self-use
331
+ def _handle_amazon_titan_response(
332
+ self, span: Span, response_body: dict[str, Any]
333
+ ):
334
+ if "inputTextTokenCount" in response_body:
335
+ span.set_attribute(
336
+ GEN_AI_USAGE_INPUT_TOKENS, response_body["inputTextTokenCount"]
337
+ )
338
+ if "results" in response_body and response_body["results"]:
339
+ result = response_body["results"][0]
340
+ if "tokenCount" in result:
341
+ span.set_attribute(
342
+ GEN_AI_USAGE_OUTPUT_TOKENS, result["tokenCount"]
343
+ )
344
+ if "completionReason" in result:
345
+ span.set_attribute(
346
+ GEN_AI_RESPONSE_FINISH_REASONS,
347
+ [result["completionReason"]],
348
+ )
349
+
350
+ # pylint: disable=no-self-use
351
+ def _handle_amazon_nova_response(
352
+ self, span: Span, response_body: dict[str, Any]
353
+ ):
354
+ if "usage" in response_body:
355
+ usage = response_body["usage"]
356
+ if "inputTokens" in usage:
357
+ span.set_attribute(
358
+ GEN_AI_USAGE_INPUT_TOKENS, usage["inputTokens"]
359
+ )
360
+ if "outputTokens" in usage:
361
+ span.set_attribute(
362
+ GEN_AI_USAGE_OUTPUT_TOKENS, usage["outputTokens"]
363
+ )
364
+ if "stopReason" in response_body:
365
+ span.set_attribute(
366
+ GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stopReason"]]
367
+ )
368
+
369
+ # pylint: disable=no-self-use
370
+ def _handle_anthropic_claude_response(
371
+ self, span: Span, response_body: dict[str, Any]
372
+ ):
373
+ if usage := response_body.get("usage"):
374
+ if "input_tokens" in usage:
375
+ span.set_attribute(
376
+ GEN_AI_USAGE_INPUT_TOKENS, usage["input_tokens"]
377
+ )
378
+ if "output_tokens" in usage:
379
+ span.set_attribute(
380
+ GEN_AI_USAGE_OUTPUT_TOKENS, usage["output_tokens"]
381
+ )
382
+ if "stop_reason" in response_body:
383
+ span.set_attribute(
384
+ GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stop_reason"]]
385
+ )
386
+
387
+ def on_error(self, span: Span, exception: _BotoClientErrorT):
388
+ if self._call_context.operation not in self._HANDLED_OPERATIONS:
389
+ return
390
+
391
+ span.set_status(Status(StatusCode.ERROR, str(exception)))
392
+ if span.is_recording():
393
+ span.set_attribute(ERROR_TYPE, type(exception).__qualname__)
394
+
395
+ if not self.should_end_span_on_exit():
396
+ span.end()