opentelemetry-instrumentation-botocore 0.53b1__tar.gz → 0.54b0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/PKG-INFO +3 -3
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/pyproject.toml +2 -2
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/__init__.py +2 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py +257 -7
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py +49 -0
- opentelemetry_instrumentation_botocore-0.54b0/src/opentelemetry/instrumentation/botocore/utils.py +31 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/version.py +1 -1
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/README.md +2 -1
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/bedrock_utils.py +52 -2
- opentelemetry_instrumentation_botocore-0.54b0/tests/cassettes/test_invoke_model_no_content[cohere.command-r].yaml +71 -0
- opentelemetry_instrumentation_botocore-0.54b0/tests/cassettes/test_invoke_model_no_content[cohere.command].yaml +66 -0
- opentelemetry_instrumentation_botocore-0.54b0/tests/cassettes/test_invoke_model_no_content[meta.llama].yaml +58 -0
- opentelemetry_instrumentation_botocore-0.54b0/tests/cassettes/test_invoke_model_no_content[mistral.mistral].yaml +63 -0
- opentelemetry_instrumentation_botocore-0.54b0/tests/cassettes/test_invoke_model_with_content[cohere.command-r].yaml +71 -0
- opentelemetry_instrumentation_botocore-0.54b0/tests/cassettes/test_invoke_model_with_content[cohere.command].yaml +66 -0
- opentelemetry_instrumentation_botocore-0.54b0/tests/cassettes/test_invoke_model_with_content[meta.llama].yaml +58 -0
- opentelemetry_instrumentation_botocore-0.54b0/tests/cassettes/test_invoke_model_with_content[mistral.mistral].yaml +63 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/test_botocore_bedrock.py +99 -6
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/test_botocore_instrumentation.py +56 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/.gitignore +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/LICENSE +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/README.rst +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/environment_variables.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/extensions/__init__.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/extensions/_messaging.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/extensions/dynamodb.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/extensions/lmbd.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/extensions/sns.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/extensions/sqs.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/extensions/types.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/src/opentelemetry/instrumentation/botocore/package.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/__init__.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_no_content.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_no_content_different_events.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_stream_handles_event_stream_error.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_stream_no_content.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_stream_no_content_different_events.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_stream_no_content_tool_call.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_stream_with_content.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_stream_with_content_different_events.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_stream_with_content_tool_call.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_stream_with_invalid_model.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_tool_call_no_content.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_tool_call_with_content.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_with_content.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_with_content_different_events.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_converse_with_invalid_model.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_no_content[amazon.nova].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_no_content[amazon.titan].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_no_content[anthropic.claude].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_no_content_different_events[amazon.nova].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_no_content_different_events[anthropic.claude].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_no_content_tool_call[amazon.nova].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_no_content_tool_call[anthropic.claude].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_content[amazon.nova].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_content[amazon.titan].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_content[anthropic.claude].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_content_different_events[amazon.nova].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_content_different_events[anthropic.claude].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_content_tool_call[amazon.nova].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_content_tool_call[anthropic.claude].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_content_user_content_as_string.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_invalid_model.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_handles_stream_error.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_invalid_model.yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_no_content[amazon.nova].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_no_content[amazon.titan].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_no_content[anthropic.claude].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_no_content_different_events[amazon.nova].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_no_content_different_events[anthropic.claude].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_no_content_tool_call[amazon.nova].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_no_content_tool_call[anthropic.claude].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_with_content[amazon.nova].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_with_content[amazon.titan].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_with_content[anthropic.claude].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_with_content_different_events[amazon.nova].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_with_content_different_events[anthropic.claude].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_with_content_tool_call[amazon.nova].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/cassettes/test_invoke_model_with_response_stream_with_content_tool_call[anthropic.claude].yaml +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/conftest.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/test_botocore_dynamodb.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/test_botocore_lambda.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/test_botocore_messaging.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/test_botocore_sns.py +0 -0
- {opentelemetry_instrumentation_botocore-0.53b1 → opentelemetry_instrumentation_botocore-0.54b0}/tests/test_botocore_sqs.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: opentelemetry-instrumentation-botocore
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.54b0
|
4
4
|
Summary: OpenTelemetry Botocore instrumentation
|
5
5
|
Project-URL: Homepage, https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-botocore
|
6
6
|
Project-URL: Repository, https://github.com/open-telemetry/opentelemetry-python-contrib
|
@@ -20,9 +20,9 @@ Classifier: Programming Language :: Python :: 3.12
|
|
20
20
|
Classifier: Programming Language :: Python :: 3.13
|
21
21
|
Requires-Python: >=3.8
|
22
22
|
Requires-Dist: opentelemetry-api~=1.30
|
23
|
-
Requires-Dist: opentelemetry-instrumentation==0.
|
23
|
+
Requires-Dist: opentelemetry-instrumentation==0.54b0
|
24
24
|
Requires-Dist: opentelemetry-propagator-aws-xray~=1.0
|
25
|
-
Requires-Dist: opentelemetry-semantic-conventions==0.
|
25
|
+
Requires-Dist: opentelemetry-semantic-conventions==0.54b0
|
26
26
|
Provides-Extra: instruments
|
27
27
|
Requires-Dist: botocore~=1.0; extra == 'instruments'
|
28
28
|
Description-Content-Type: text/x-rst
|
@@ -27,8 +27,8 @@ classifiers = [
|
|
27
27
|
]
|
28
28
|
dependencies = [
|
29
29
|
"opentelemetry-api ~= 1.30",
|
30
|
-
"opentelemetry-instrumentation == 0.
|
31
|
-
"opentelemetry-semantic-conventions == 0.
|
30
|
+
"opentelemetry-instrumentation == 0.54b0",
|
31
|
+
"opentelemetry-semantic-conventions == 0.54b0",
|
32
32
|
"opentelemetry-propagator-aws-xray ~= 1.0",
|
33
33
|
]
|
34
34
|
|
@@ -99,6 +99,7 @@ from opentelemetry.instrumentation.botocore.extensions.types import (
|
|
99
99
|
_BotocoreInstrumentorContext,
|
100
100
|
)
|
101
101
|
from opentelemetry.instrumentation.botocore.package import _instruments
|
102
|
+
from opentelemetry.instrumentation.botocore.utils import get_server_attributes
|
102
103
|
from opentelemetry.instrumentation.botocore.version import __version__
|
103
104
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
104
105
|
from opentelemetry.instrumentation.utils import (
|
@@ -277,6 +278,7 @@ class BotocoreInstrumentor(BaseInstrumentor):
|
|
277
278
|
SpanAttributes.RPC_METHOD: call_context.operation,
|
278
279
|
# TODO: update when semantic conventions exist
|
279
280
|
"aws.region": call_context.region,
|
281
|
+
**get_server_attributes(call_context.endpoint_url),
|
280
282
|
}
|
281
283
|
|
282
284
|
_safe_invoke(extension.extract_attributes, attributes)
|
@@ -15,6 +15,7 @@
|
|
15
15
|
# Includes work from:
|
16
16
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
17
17
|
# SPDX-License-Identifier: Apache-2.0
|
18
|
+
# pylint: disable=too-many-lines
|
18
19
|
|
19
20
|
from __future__ import annotations
|
20
21
|
|
@@ -31,6 +32,7 @@ from opentelemetry.instrumentation.botocore.extensions.bedrock_utils import (
|
|
31
32
|
ConverseStreamWrapper,
|
32
33
|
InvokeModelWithResponseStreamWrapper,
|
33
34
|
_Choice,
|
35
|
+
estimate_token_count,
|
34
36
|
genai_capture_message_content,
|
35
37
|
message_to_event,
|
36
38
|
)
|
@@ -40,6 +42,7 @@ from opentelemetry.instrumentation.botocore.extensions.types import (
|
|
40
42
|
_BotoClientErrorT,
|
41
43
|
_BotocoreInstrumentorContext,
|
42
44
|
)
|
45
|
+
from opentelemetry.instrumentation.botocore.utils import get_server_attributes
|
43
46
|
from opentelemetry.metrics import Instrument, Meter
|
44
47
|
from opentelemetry.semconv._incubating.attributes.error_attributes import (
|
45
48
|
ERROR_TYPE,
|
@@ -145,7 +148,10 @@ class _BedrockRuntimeExtension(_AwsSdkExtension):
|
|
145
148
|
)
|
146
149
|
|
147
150
|
def _extract_metrics_attributes(self) -> _AttributeMapT:
|
148
|
-
attributes = {
|
151
|
+
attributes = {
|
152
|
+
GEN_AI_SYSTEM: GenAiSystemValues.AWS_BEDROCK.value,
|
153
|
+
**get_server_attributes(self._call_context.endpoint_url),
|
154
|
+
}
|
149
155
|
|
150
156
|
model_id = self._call_context.params.get(_MODEL_ID_KEY)
|
151
157
|
if not model_id:
|
@@ -162,6 +168,7 @@ class _BedrockRuntimeExtension(_AwsSdkExtension):
|
|
162
168
|
attributes[GEN_AI_OPERATION_NAME] = (
|
163
169
|
GenAiOperationNameValues.CHAT.value
|
164
170
|
)
|
171
|
+
|
165
172
|
return attributes
|
166
173
|
|
167
174
|
def extract_attributes(self, attributes: _AttributeMapT):
|
@@ -223,6 +230,23 @@ class _BedrockRuntimeExtension(_AwsSdkExtension):
|
|
223
230
|
self._extract_claude_attributes(
|
224
231
|
attributes, request_body
|
225
232
|
)
|
233
|
+
elif "cohere.command-r" in model_id:
|
234
|
+
self._extract_command_r_attributes(
|
235
|
+
attributes, request_body
|
236
|
+
)
|
237
|
+
elif "cohere.command" in model_id:
|
238
|
+
self._extract_command_attributes(
|
239
|
+
attributes, request_body
|
240
|
+
)
|
241
|
+
elif "meta.llama" in model_id:
|
242
|
+
self._extract_llama_attributes(
|
243
|
+
attributes, request_body
|
244
|
+
)
|
245
|
+
elif "mistral" in model_id:
|
246
|
+
self._extract_mistral_attributes(
|
247
|
+
attributes, request_body
|
248
|
+
)
|
249
|
+
|
226
250
|
except json.JSONDecodeError:
|
227
251
|
_logger.debug("Error: Unable to parse the body as JSON")
|
228
252
|
|
@@ -280,6 +304,95 @@ class _BedrockRuntimeExtension(_AwsSdkExtension):
|
|
280
304
|
request_body.get("stop_sequences"),
|
281
305
|
)
|
282
306
|
|
307
|
+
def _extract_command_r_attributes(self, attributes, request_body):
|
308
|
+
prompt = request_body.get("message")
|
309
|
+
self._set_if_not_none(
|
310
|
+
attributes, GEN_AI_USAGE_INPUT_TOKENS, estimate_token_count(prompt)
|
311
|
+
)
|
312
|
+
self._set_if_not_none(
|
313
|
+
attributes,
|
314
|
+
GEN_AI_REQUEST_MAX_TOKENS,
|
315
|
+
request_body.get("max_tokens"),
|
316
|
+
)
|
317
|
+
self._set_if_not_none(
|
318
|
+
attributes,
|
319
|
+
GEN_AI_REQUEST_TEMPERATURE,
|
320
|
+
request_body.get("temperature"),
|
321
|
+
)
|
322
|
+
self._set_if_not_none(
|
323
|
+
attributes, GEN_AI_REQUEST_TOP_P, request_body.get("p")
|
324
|
+
)
|
325
|
+
self._set_if_not_none(
|
326
|
+
attributes,
|
327
|
+
GEN_AI_REQUEST_STOP_SEQUENCES,
|
328
|
+
request_body.get("stop_sequences"),
|
329
|
+
)
|
330
|
+
|
331
|
+
def _extract_command_attributes(self, attributes, request_body):
|
332
|
+
prompt = request_body.get("prompt")
|
333
|
+
self._set_if_not_none(
|
334
|
+
attributes, GEN_AI_USAGE_INPUT_TOKENS, estimate_token_count(prompt)
|
335
|
+
)
|
336
|
+
self._set_if_not_none(
|
337
|
+
attributes,
|
338
|
+
GEN_AI_REQUEST_MAX_TOKENS,
|
339
|
+
request_body.get("max_tokens"),
|
340
|
+
)
|
341
|
+
self._set_if_not_none(
|
342
|
+
attributes,
|
343
|
+
GEN_AI_REQUEST_TEMPERATURE,
|
344
|
+
request_body.get("temperature"),
|
345
|
+
)
|
346
|
+
self._set_if_not_none(
|
347
|
+
attributes, GEN_AI_REQUEST_TOP_P, request_body.get("p")
|
348
|
+
)
|
349
|
+
self._set_if_not_none(
|
350
|
+
attributes,
|
351
|
+
GEN_AI_REQUEST_STOP_SEQUENCES,
|
352
|
+
request_body.get("stop_sequences"),
|
353
|
+
)
|
354
|
+
|
355
|
+
def _extract_llama_attributes(self, attributes, request_body):
|
356
|
+
self._set_if_not_none(
|
357
|
+
attributes,
|
358
|
+
GEN_AI_REQUEST_MAX_TOKENS,
|
359
|
+
request_body.get("max_gen_len"),
|
360
|
+
)
|
361
|
+
self._set_if_not_none(
|
362
|
+
attributes,
|
363
|
+
GEN_AI_REQUEST_TEMPERATURE,
|
364
|
+
request_body.get("temperature"),
|
365
|
+
)
|
366
|
+
self._set_if_not_none(
|
367
|
+
attributes, GEN_AI_REQUEST_TOP_P, request_body.get("top_p")
|
368
|
+
)
|
369
|
+
# request for meta llama models does not contain stop_sequences field
|
370
|
+
|
371
|
+
def _extract_mistral_attributes(self, attributes, request_body):
|
372
|
+
prompt = request_body.get("prompt")
|
373
|
+
if prompt:
|
374
|
+
self._set_if_not_none(
|
375
|
+
attributes,
|
376
|
+
GEN_AI_USAGE_INPUT_TOKENS,
|
377
|
+
estimate_token_count(prompt),
|
378
|
+
)
|
379
|
+
self._set_if_not_none(
|
380
|
+
attributes,
|
381
|
+
GEN_AI_REQUEST_MAX_TOKENS,
|
382
|
+
request_body.get("max_tokens"),
|
383
|
+
)
|
384
|
+
self._set_if_not_none(
|
385
|
+
attributes,
|
386
|
+
GEN_AI_REQUEST_TEMPERATURE,
|
387
|
+
request_body.get("temperature"),
|
388
|
+
)
|
389
|
+
self._set_if_not_none(
|
390
|
+
attributes, GEN_AI_REQUEST_TOP_P, request_body.get("top_p")
|
391
|
+
)
|
392
|
+
self._set_if_not_none(
|
393
|
+
attributes, GEN_AI_REQUEST_STOP_SEQUENCES, request_body.get("stop")
|
394
|
+
)
|
395
|
+
|
283
396
|
@staticmethod
|
284
397
|
def _set_if_not_none(attributes, key, value):
|
285
398
|
if value is not None:
|
@@ -287,7 +400,6 @@ class _BedrockRuntimeExtension(_AwsSdkExtension):
|
|
287
400
|
|
288
401
|
def _get_request_messages(self):
|
289
402
|
"""Extracts and normalize system and user / assistant messages"""
|
290
|
-
input_text = None
|
291
403
|
if system := self._call_context.params.get("system", []):
|
292
404
|
system_messages = [{"role": "system", "content": system}]
|
293
405
|
else:
|
@@ -304,15 +416,37 @@ class _BedrockRuntimeExtension(_AwsSdkExtension):
|
|
304
416
|
system_messages = [{"role": "system", "content": content}]
|
305
417
|
|
306
418
|
messages = decoded_body.get("messages", [])
|
419
|
+
# if no messages interface, convert to messages format from generic API
|
307
420
|
if not messages:
|
308
|
-
|
309
|
-
if
|
310
|
-
messages =
|
311
|
-
|
312
|
-
|
421
|
+
model_id = self._call_context.params.get(_MODEL_ID_KEY)
|
422
|
+
if "amazon.titan" in model_id:
|
423
|
+
messages = self._get_messages_from_input_text(
|
424
|
+
decoded_body, "inputText"
|
425
|
+
)
|
426
|
+
elif "cohere.command-r" in model_id:
|
427
|
+
# chat_history can be converted to messages; for now, just use message
|
428
|
+
messages = self._get_messages_from_input_text(
|
429
|
+
decoded_body, "message"
|
430
|
+
)
|
431
|
+
elif (
|
432
|
+
"cohere.command" in model_id
|
433
|
+
or "meta.llama" in model_id
|
434
|
+
or "mistral.mistral" in model_id
|
435
|
+
):
|
436
|
+
messages = self._get_messages_from_input_text(
|
437
|
+
decoded_body, "prompt"
|
438
|
+
)
|
313
439
|
|
314
440
|
return system_messages + messages
|
315
441
|
|
442
|
+
# pylint: disable=no-self-use
|
443
|
+
def _get_messages_from_input_text(
|
444
|
+
self, decoded_body: dict[str, Any], input_name: str
|
445
|
+
):
|
446
|
+
if input_text := decoded_body.get(input_name):
|
447
|
+
return [{"role": "user", "content": [{"text": input_text}]}]
|
448
|
+
return []
|
449
|
+
|
316
450
|
def before_service_call(
|
317
451
|
self, span: Span, instrumentor_context: _BotocoreInstrumentorContext
|
318
452
|
):
|
@@ -439,6 +573,22 @@ class _BedrockRuntimeExtension(_AwsSdkExtension):
|
|
439
573
|
self._handle_anthropic_claude_response(
|
440
574
|
span, response_body, instrumentor_context, capture_content
|
441
575
|
)
|
576
|
+
elif "cohere.command-r" in model_id:
|
577
|
+
self._handle_cohere_command_r_response(
|
578
|
+
span, response_body, instrumentor_context, capture_content
|
579
|
+
)
|
580
|
+
elif "cohere.command" in model_id:
|
581
|
+
self._handle_cohere_command_response(
|
582
|
+
span, response_body, instrumentor_context, capture_content
|
583
|
+
)
|
584
|
+
elif "meta.llama" in model_id:
|
585
|
+
self._handle_meta_llama_response(
|
586
|
+
span, response_body, instrumentor_context, capture_content
|
587
|
+
)
|
588
|
+
elif "mistral" in model_id:
|
589
|
+
self._handle_mistral_ai_response(
|
590
|
+
span, response_body, instrumentor_context, capture_content
|
591
|
+
)
|
442
592
|
except json.JSONDecodeError:
|
443
593
|
_logger.debug("Error: Unable to parse the response body as JSON")
|
444
594
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
@@ -725,6 +875,106 @@ class _BedrockRuntimeExtension(_AwsSdkExtension):
|
|
725
875
|
output_tokens, output_attributes
|
726
876
|
)
|
727
877
|
|
878
|
+
def _handle_cohere_command_r_response(
|
879
|
+
self,
|
880
|
+
span: Span,
|
881
|
+
response_body: dict[str, Any],
|
882
|
+
instrumentor_context: _BotocoreInstrumentorContext,
|
883
|
+
capture_content: bool,
|
884
|
+
):
|
885
|
+
if "text" in response_body:
|
886
|
+
span.set_attribute(
|
887
|
+
GEN_AI_USAGE_OUTPUT_TOKENS,
|
888
|
+
estimate_token_count(response_body["text"]),
|
889
|
+
)
|
890
|
+
if "finish_reason" in response_body:
|
891
|
+
span.set_attribute(
|
892
|
+
GEN_AI_RESPONSE_FINISH_REASONS,
|
893
|
+
[response_body["finish_reason"]],
|
894
|
+
)
|
895
|
+
|
896
|
+
event_logger = instrumentor_context.event_logger
|
897
|
+
choice = _Choice.from_invoke_cohere_command_r(
|
898
|
+
response_body, capture_content
|
899
|
+
)
|
900
|
+
event_logger.emit(choice.to_choice_event())
|
901
|
+
|
902
|
+
def _handle_cohere_command_response(
|
903
|
+
self,
|
904
|
+
span: Span,
|
905
|
+
response_body: dict[str, Any],
|
906
|
+
instrumentor_context: _BotocoreInstrumentorContext,
|
907
|
+
capture_content: bool,
|
908
|
+
):
|
909
|
+
if "generations" in response_body and response_body["generations"]:
|
910
|
+
generations = response_body["generations"][0]
|
911
|
+
if "text" in generations:
|
912
|
+
span.set_attribute(
|
913
|
+
GEN_AI_USAGE_OUTPUT_TOKENS,
|
914
|
+
estimate_token_count(generations["text"]),
|
915
|
+
)
|
916
|
+
if "finish_reason" in generations:
|
917
|
+
span.set_attribute(
|
918
|
+
GEN_AI_RESPONSE_FINISH_REASONS,
|
919
|
+
[generations["finish_reason"]],
|
920
|
+
)
|
921
|
+
|
922
|
+
event_logger = instrumentor_context.event_logger
|
923
|
+
choice = _Choice.from_invoke_cohere_command(
|
924
|
+
response_body, capture_content
|
925
|
+
)
|
926
|
+
event_logger.emit(choice.to_choice_event())
|
927
|
+
|
928
|
+
def _handle_meta_llama_response(
|
929
|
+
self,
|
930
|
+
span: Span,
|
931
|
+
response_body: dict[str, Any],
|
932
|
+
instrumentor_context: _BotocoreInstrumentorContext,
|
933
|
+
capture_content: bool,
|
934
|
+
):
|
935
|
+
if "prompt_token_count" in response_body:
|
936
|
+
span.set_attribute(
|
937
|
+
GEN_AI_USAGE_INPUT_TOKENS, response_body["prompt_token_count"]
|
938
|
+
)
|
939
|
+
if "generation_token_count" in response_body:
|
940
|
+
span.set_attribute(
|
941
|
+
GEN_AI_USAGE_OUTPUT_TOKENS,
|
942
|
+
response_body["generation_token_count"],
|
943
|
+
)
|
944
|
+
if "stop_reason" in response_body:
|
945
|
+
span.set_attribute(
|
946
|
+
GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stop_reason"]]
|
947
|
+
)
|
948
|
+
|
949
|
+
event_logger = instrumentor_context.event_logger
|
950
|
+
choice = _Choice.from_invoke_meta_llama(response_body, capture_content)
|
951
|
+
event_logger.emit(choice.to_choice_event())
|
952
|
+
|
953
|
+
def _handle_mistral_ai_response(
|
954
|
+
self,
|
955
|
+
span: Span,
|
956
|
+
response_body: dict[str, Any],
|
957
|
+
instrumentor_context: _BotocoreInstrumentorContext,
|
958
|
+
capture_content: bool,
|
959
|
+
):
|
960
|
+
if "outputs" in response_body:
|
961
|
+
outputs = response_body["outputs"][0]
|
962
|
+
if "text" in outputs:
|
963
|
+
span.set_attribute(
|
964
|
+
GEN_AI_USAGE_OUTPUT_TOKENS,
|
965
|
+
estimate_token_count(outputs["text"]),
|
966
|
+
)
|
967
|
+
if "stop_reason" in outputs:
|
968
|
+
span.set_attribute(
|
969
|
+
GEN_AI_RESPONSE_FINISH_REASONS, [outputs["stop_reason"]]
|
970
|
+
)
|
971
|
+
|
972
|
+
event_logger = instrumentor_context.event_logger
|
973
|
+
choice = _Choice.from_invoke_mistral_mistral(
|
974
|
+
response_body, capture_content
|
975
|
+
)
|
976
|
+
event_logger.emit(choice.to_choice_event())
|
977
|
+
|
728
978
|
def on_error(
|
729
979
|
self,
|
730
980
|
span: Span,
|
@@ -15,6 +15,7 @@
|
|
15
15
|
from __future__ import annotations
|
16
16
|
|
17
17
|
import json
|
18
|
+
import math
|
18
19
|
from os import environ
|
19
20
|
from typing import Any, Callable, Dict, Iterator, Sequence, Union
|
20
21
|
|
@@ -358,6 +359,12 @@ class InvokeModelWithResponseStreamWrapper(ObjectProxy):
|
|
358
359
|
return
|
359
360
|
|
360
361
|
|
362
|
+
def estimate_token_count(message: str) -> int:
|
363
|
+
# https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-prepare.html
|
364
|
+
# use 6 chars per token to approximate token count when not provided in response body
|
365
|
+
return math.ceil(len(message) / 6)
|
366
|
+
|
367
|
+
|
361
368
|
def genai_capture_message_content() -> bool:
|
362
369
|
capture_content = environ.get(
|
363
370
|
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, "false"
|
@@ -519,6 +526,48 @@ class _Choice:
|
|
519
526
|
message["content"] = response["content"]
|
520
527
|
return cls(message, response["stop_reason"], index=0)
|
521
528
|
|
529
|
+
@classmethod
|
530
|
+
def from_invoke_cohere_command_r(
|
531
|
+
cls, response: dict[str, Any], capture_content: bool
|
532
|
+
) -> _Choice:
|
533
|
+
if capture_content:
|
534
|
+
message = {"content": response["text"]}
|
535
|
+
else:
|
536
|
+
message = {}
|
537
|
+
return cls(message, response["finish_reason"], index=0)
|
538
|
+
|
539
|
+
@classmethod
|
540
|
+
def from_invoke_cohere_command(
|
541
|
+
cls, response: dict[str, Any], capture_content: bool
|
542
|
+
) -> _Choice:
|
543
|
+
result = response["generations"][0]
|
544
|
+
if capture_content:
|
545
|
+
message = {"content": result["text"]}
|
546
|
+
else:
|
547
|
+
message = {}
|
548
|
+
return cls(message, result["finish_reason"], index=0)
|
549
|
+
|
550
|
+
@classmethod
|
551
|
+
def from_invoke_meta_llama(
|
552
|
+
cls, response: dict[str, Any], capture_content: bool
|
553
|
+
) -> _Choice:
|
554
|
+
if capture_content:
|
555
|
+
message = {"content": response["generation"]}
|
556
|
+
else:
|
557
|
+
message = {}
|
558
|
+
return cls(message, response["stop_reason"], index=0)
|
559
|
+
|
560
|
+
@classmethod
|
561
|
+
def from_invoke_mistral_mistral(
|
562
|
+
cls, response: dict[str, Any], capture_content: bool
|
563
|
+
) -> _Choice:
|
564
|
+
result = response["outputs"][0]
|
565
|
+
if capture_content:
|
566
|
+
message = {"content": result["text"]}
|
567
|
+
else:
|
568
|
+
message = {}
|
569
|
+
return cls(message, result["stop_reason"], index=0)
|
570
|
+
|
522
571
|
def _to_body_dict(self) -> dict[str, Any]:
|
523
572
|
return {
|
524
573
|
"finish_reason": self.finish_reason,
|
opentelemetry_instrumentation_botocore-0.54b0/src/opentelemetry/instrumentation/botocore/utils.py
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
# Copyright The OpenTelemetry Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
from __future__ import annotations
|
15
|
+
|
16
|
+
from urllib.parse import urlparse
|
17
|
+
|
18
|
+
from opentelemetry.semconv._incubating.attributes import (
|
19
|
+
server_attributes as ServerAttributes,
|
20
|
+
)
|
21
|
+
from opentelemetry.util.types import AttributeValue
|
22
|
+
|
23
|
+
|
24
|
+
def get_server_attributes(endpoint_url: str) -> dict[str, AttributeValue]:
|
25
|
+
"""Extract server.* attributes from AWS endpoint URL."""
|
26
|
+
parsed = urlparse(endpoint_url)
|
27
|
+
attributes = {}
|
28
|
+
if parsed.hostname:
|
29
|
+
attributes[ServerAttributes.SERVER_ADDRESS] = parsed.hostname
|
30
|
+
attributes[ServerAttributes.SERVER_PORT] = parsed.port or 443
|
31
|
+
return attributes
|
@@ -1,7 +1,8 @@
|
|
1
1
|
## Recording calls
|
2
2
|
|
3
3
|
If you need to record calls you may need to export authentication variables and the default region as environment
|
4
|
-
variables in order to have the code work properly.
|
4
|
+
variables in order to have the code work properly. The recorded tests assume the region us-east-1, so ensure that
|
5
|
+
AWS_DEFAULT_REGION is set accordingly when recording new calls.
|
5
6
|
Since tox blocks environment variables by default you need to override its configuration to let them pass:
|
6
7
|
|
7
8
|
```
|
@@ -15,6 +15,7 @@
|
|
15
15
|
from __future__ import annotations
|
16
16
|
|
17
17
|
import json
|
18
|
+
import math
|
18
19
|
from typing import Any
|
19
20
|
|
20
21
|
from botocore.response import StreamingBody
|
@@ -31,6 +32,9 @@ from opentelemetry.semconv._incubating.attributes import (
|
|
31
32
|
from opentelemetry.semconv._incubating.attributes import (
|
32
33
|
gen_ai_attributes as GenAIAttributes,
|
33
34
|
)
|
35
|
+
from opentelemetry.semconv._incubating.attributes import (
|
36
|
+
server_attributes as ServerAttributes,
|
37
|
+
)
|
34
38
|
from opentelemetry.semconv._incubating.attributes.error_attributes import (
|
35
39
|
ERROR_TYPE,
|
36
40
|
)
|
@@ -40,7 +44,7 @@ from opentelemetry.semconv._incubating.metrics.gen_ai_metrics import (
|
|
40
44
|
)
|
41
45
|
|
42
46
|
|
43
|
-
# pylint: disable=too-many-branches, too-many-locals
|
47
|
+
# pylint: disable=too-many-branches, too-many-locals, too-many-statements
|
44
48
|
def assert_completion_attributes_from_streaming_body(
|
45
49
|
span: ReadableSpan,
|
46
50
|
request_model: str,
|
@@ -54,6 +58,7 @@ def assert_completion_attributes_from_streaming_body(
|
|
54
58
|
input_tokens = None
|
55
59
|
output_tokens = None
|
56
60
|
finish_reason = None
|
61
|
+
request_prompt = "Say this is a test"
|
57
62
|
if response is not None:
|
58
63
|
original_body = response["body"]
|
59
64
|
body_content = original_body.read()
|
@@ -89,6 +94,33 @@ def assert_completion_attributes_from_streaming_body(
|
|
89
94
|
finish_reason = (response["stop_reason"],)
|
90
95
|
else:
|
91
96
|
finish_reason = None
|
97
|
+
elif "cohere.command-r" in request_model:
|
98
|
+
input_tokens = math.ceil(len(request_prompt) / 6)
|
99
|
+
text = response.get("text")
|
100
|
+
if text:
|
101
|
+
output_tokens = math.ceil(len(text) / 6)
|
102
|
+
finish_reason = (response["finish_reason"],)
|
103
|
+
elif "cohere.command" in request_model:
|
104
|
+
input_tokens = math.ceil(len(request_prompt) / 6)
|
105
|
+
generations = response.get("generations")
|
106
|
+
if generations:
|
107
|
+
first_generation = generations[0]
|
108
|
+
output_tokens = math.ceil(len(first_generation["text"]) / 6)
|
109
|
+
finish_reason = (first_generation["finish_reason"],)
|
110
|
+
elif "meta.llama" in request_model:
|
111
|
+
if "prompt_token_count" in response:
|
112
|
+
input_tokens = response.get("prompt_token_count")
|
113
|
+
if "generation_token_count" in response:
|
114
|
+
output_tokens = response.get("generation_token_count")
|
115
|
+
if "stop_reason" in response:
|
116
|
+
finish_reason = (response["stop_reason"],)
|
117
|
+
elif "mistral.mistral" in request_model:
|
118
|
+
input_tokens = math.ceil(len(request_prompt) / 6)
|
119
|
+
outputs = response.get("outputs")
|
120
|
+
if outputs:
|
121
|
+
first_output = outputs[0]
|
122
|
+
output_tokens = math.ceil(len(first_output["text"]) / 6)
|
123
|
+
finish_reason = (first_output["stop_reason"],)
|
92
124
|
|
93
125
|
return assert_all_attributes(
|
94
126
|
span,
|
@@ -192,6 +224,8 @@ def assert_all_attributes(
|
|
192
224
|
request_temperature: int | None = None,
|
193
225
|
request_max_tokens: int | None = None,
|
194
226
|
request_stop_sequences: tuple[str] | None = None,
|
227
|
+
server_address: str = "bedrock-runtime.us-east-1.amazonaws.com",
|
228
|
+
server_port: int = 443,
|
195
229
|
):
|
196
230
|
assert span.name == f"{operation_name} {request_model}"
|
197
231
|
assert (
|
@@ -206,6 +240,9 @@ def assert_all_attributes(
|
|
206
240
|
request_model == span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL]
|
207
241
|
)
|
208
242
|
|
243
|
+
assert server_address == span.attributes[ServerAttributes.SERVER_ADDRESS]
|
244
|
+
assert server_port == span.attributes[ServerAttributes.SERVER_PORT]
|
245
|
+
|
209
246
|
assert_equal_or_not_present(
|
210
247
|
input_tokens, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, span
|
211
248
|
)
|
@@ -274,7 +311,12 @@ def assert_message_in_logs(log, event_name, expected_content, parent_span):
|
|
274
311
|
|
275
312
|
|
276
313
|
def assert_all_metric_attributes(
|
277
|
-
data_point,
|
314
|
+
data_point,
|
315
|
+
operation_name: str,
|
316
|
+
model: str,
|
317
|
+
error_type: str | None = None,
|
318
|
+
server_address: str = "bedrock-runtime.us-east-1.amazonaws.com",
|
319
|
+
server_port: int = 443,
|
278
320
|
):
|
279
321
|
assert GenAIAttributes.GEN_AI_OPERATION_NAME in data_point.attributes
|
280
322
|
assert (
|
@@ -289,6 +331,14 @@ def assert_all_metric_attributes(
|
|
289
331
|
assert GenAIAttributes.GEN_AI_REQUEST_MODEL in data_point.attributes
|
290
332
|
assert data_point.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == model
|
291
333
|
|
334
|
+
assert ServerAttributes.SERVER_ADDRESS in data_point.attributes
|
335
|
+
assert (
|
336
|
+
data_point.attributes[ServerAttributes.SERVER_ADDRESS]
|
337
|
+
== server_address
|
338
|
+
)
|
339
|
+
assert ServerAttributes.SERVER_PORT in data_point.attributes
|
340
|
+
assert data_point.attributes[ServerAttributes.SERVER_PORT] == server_port
|
341
|
+
|
292
342
|
if error_type is not None:
|
293
343
|
assert ERROR_TYPE in data_point.attributes
|
294
344
|
assert data_point.attributes[ERROR_TYPE] == error_type
|