opentelemetry-instrumentation-botocore 0.50b0__py3-none-any.whl → 0.52b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- opentelemetry/instrumentation/botocore/__init__.py +151 -13
- opentelemetry/instrumentation/botocore/environment_variables.py +3 -0
- opentelemetry/instrumentation/botocore/extensions/__init__.py +5 -0
- opentelemetry/instrumentation/botocore/extensions/bedrock.py +756 -0
- opentelemetry/instrumentation/botocore/extensions/bedrock_utils.py +514 -0
- opentelemetry/instrumentation/botocore/extensions/dynamodb.py +10 -2
- opentelemetry/instrumentation/botocore/extensions/lmbd.py +4 -1
- opentelemetry/instrumentation/botocore/extensions/sns.py +15 -13
- opentelemetry/instrumentation/botocore/extensions/sqs.py +7 -1
- opentelemetry/instrumentation/botocore/extensions/types.py +61 -4
- opentelemetry/instrumentation/botocore/version.py +1 -1
- {opentelemetry_instrumentation_botocore-0.50b0.dist-info → opentelemetry_instrumentation_botocore-0.52b0.dist-info}/METADATA +9 -6
- opentelemetry_instrumentation_botocore-0.52b0.dist-info/RECORD +18 -0
- {opentelemetry_instrumentation_botocore-0.50b0.dist-info → opentelemetry_instrumentation_botocore-0.52b0.dist-info}/WHEEL +1 -1
- opentelemetry_instrumentation_botocore-0.50b0.dist-info/RECORD +0 -15
- {opentelemetry_instrumentation_botocore-0.50b0.dist-info → opentelemetry_instrumentation_botocore-0.52b0.dist-info}/entry_points.txt +0 -0
- {opentelemetry_instrumentation_botocore-0.50b0.dist-info → opentelemetry_instrumentation_botocore-0.52b0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,514 @@
|
|
1
|
+
# Copyright The OpenTelemetry Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from __future__ import annotations
|
16
|
+
|
17
|
+
import json
|
18
|
+
from os import environ
|
19
|
+
from typing import Any, Callable, Dict, Iterator, Sequence, Union
|
20
|
+
|
21
|
+
from botocore.eventstream import EventStream, EventStreamError
|
22
|
+
from wrapt import ObjectProxy
|
23
|
+
|
24
|
+
from opentelemetry._events import Event
|
25
|
+
from opentelemetry.instrumentation.botocore.environment_variables import (
|
26
|
+
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT,
|
27
|
+
)
|
28
|
+
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
|
29
|
+
GEN_AI_SYSTEM,
|
30
|
+
GenAiSystemValues,
|
31
|
+
)
|
32
|
+
|
33
|
+
_StreamDoneCallableT = Callable[[Dict[str, Union[int, str]]], None]
|
34
|
+
_StreamErrorCallableT = Callable[[Exception], None]
|
35
|
+
|
36
|
+
|
37
|
+
def _decode_tool_use(tool_use):
|
38
|
+
# input get sent encoded in json
|
39
|
+
if "input" in tool_use:
|
40
|
+
try:
|
41
|
+
tool_use["input"] = json.loads(tool_use["input"])
|
42
|
+
except json.JSONDecodeError:
|
43
|
+
pass
|
44
|
+
return tool_use
|
45
|
+
|
46
|
+
|
47
|
+
# pylint: disable=abstract-method
|
48
|
+
class ConverseStreamWrapper(ObjectProxy):
|
49
|
+
"""Wrapper for botocore.eventstream.EventStream"""
|
50
|
+
|
51
|
+
def __init__(
|
52
|
+
self,
|
53
|
+
stream: EventStream,
|
54
|
+
stream_done_callback: _StreamDoneCallableT,
|
55
|
+
stream_error_callback: _StreamErrorCallableT,
|
56
|
+
):
|
57
|
+
super().__init__(stream)
|
58
|
+
|
59
|
+
self._stream_done_callback = stream_done_callback
|
60
|
+
self._stream_error_callback = stream_error_callback
|
61
|
+
# accumulating things in the same shape of non-streaming version
|
62
|
+
# {"usage": {"inputTokens": 0, "outputTokens": 0}, "stopReason": "finish", "output": {"message": {"role": "", "content": [{"text": ""}]}
|
63
|
+
self._response = {}
|
64
|
+
self._message = None
|
65
|
+
self._content_block = {}
|
66
|
+
self._record_message = False
|
67
|
+
|
68
|
+
def __iter__(self):
|
69
|
+
try:
|
70
|
+
for event in self.__wrapped__:
|
71
|
+
self._process_event(event)
|
72
|
+
yield event
|
73
|
+
except EventStreamError as exc:
|
74
|
+
self._stream_error_callback(exc)
|
75
|
+
raise
|
76
|
+
|
77
|
+
def _process_event(self, event):
|
78
|
+
# pylint: disable=too-many-branches
|
79
|
+
if "messageStart" in event:
|
80
|
+
# {'messageStart': {'role': 'assistant'}}
|
81
|
+
if event["messageStart"].get("role") == "assistant":
|
82
|
+
self._record_message = True
|
83
|
+
self._message = {"role": "assistant", "content": []}
|
84
|
+
return
|
85
|
+
|
86
|
+
if "contentBlockStart" in event:
|
87
|
+
# {'contentBlockStart': {'start': {'toolUse': {'toolUseId': 'id', 'name': 'func_name'}}, 'contentBlockIndex': 1}}
|
88
|
+
start = event["contentBlockStart"].get("start", {})
|
89
|
+
if "toolUse" in start:
|
90
|
+
tool_use = _decode_tool_use(start["toolUse"])
|
91
|
+
self._content_block = {"toolUse": tool_use}
|
92
|
+
return
|
93
|
+
|
94
|
+
if "contentBlockDelta" in event:
|
95
|
+
# {'contentBlockDelta': {'delta': {'text': "Hello"}, 'contentBlockIndex': 0}}
|
96
|
+
# {'contentBlockDelta': {'delta': {'toolUse': {'input': '{"location":"Seattle"}'}}, 'contentBlockIndex': 1}}
|
97
|
+
if self._record_message:
|
98
|
+
delta = event["contentBlockDelta"].get("delta", {})
|
99
|
+
if "text" in delta:
|
100
|
+
self._content_block.setdefault("text", "")
|
101
|
+
self._content_block["text"] += delta["text"]
|
102
|
+
elif "toolUse" in delta:
|
103
|
+
tool_use = _decode_tool_use(delta["toolUse"])
|
104
|
+
self._content_block["toolUse"].update(tool_use)
|
105
|
+
return
|
106
|
+
|
107
|
+
if "contentBlockStop" in event:
|
108
|
+
# {'contentBlockStop': {'contentBlockIndex': 0}}
|
109
|
+
if self._record_message:
|
110
|
+
self._message["content"].append(self._content_block)
|
111
|
+
self._content_block = {}
|
112
|
+
return
|
113
|
+
|
114
|
+
if "messageStop" in event:
|
115
|
+
# {'messageStop': {'stopReason': 'end_turn'}}
|
116
|
+
if stop_reason := event["messageStop"].get("stopReason"):
|
117
|
+
self._response["stopReason"] = stop_reason
|
118
|
+
|
119
|
+
if self._record_message:
|
120
|
+
self._response["output"] = {"message": self._message}
|
121
|
+
self._record_message = False
|
122
|
+
self._message = None
|
123
|
+
|
124
|
+
return
|
125
|
+
|
126
|
+
if "metadata" in event:
|
127
|
+
# {'metadata': {'usage': {'inputTokens': 12, 'outputTokens': 15, 'totalTokens': 27}, 'metrics': {'latencyMs': 2980}}}
|
128
|
+
if usage := event["metadata"].get("usage"):
|
129
|
+
self._response["usage"] = {}
|
130
|
+
if input_tokens := usage.get("inputTokens"):
|
131
|
+
self._response["usage"]["inputTokens"] = input_tokens
|
132
|
+
|
133
|
+
if output_tokens := usage.get("outputTokens"):
|
134
|
+
self._response["usage"]["outputTokens"] = output_tokens
|
135
|
+
|
136
|
+
self._stream_done_callback(self._response)
|
137
|
+
|
138
|
+
return
|
139
|
+
|
140
|
+
|
141
|
+
# pylint: disable=abstract-method
|
142
|
+
class InvokeModelWithResponseStreamWrapper(ObjectProxy):
|
143
|
+
"""Wrapper for botocore.eventstream.EventStream"""
|
144
|
+
|
145
|
+
def __init__(
|
146
|
+
self,
|
147
|
+
stream: EventStream,
|
148
|
+
stream_done_callback: _StreamDoneCallableT,
|
149
|
+
stream_error_callback: _StreamErrorCallableT,
|
150
|
+
model_id: str,
|
151
|
+
):
|
152
|
+
super().__init__(stream)
|
153
|
+
|
154
|
+
self._stream_done_callback = stream_done_callback
|
155
|
+
self._stream_error_callback = stream_error_callback
|
156
|
+
self._model_id = model_id
|
157
|
+
|
158
|
+
# accumulating things in the same shape of the Converse API
|
159
|
+
# {"usage": {"inputTokens": 0, "outputTokens": 0}, "stopReason": "finish", "output": {"message": {"role": "", "content": [{"text": ""}]}
|
160
|
+
self._response = {}
|
161
|
+
self._message = None
|
162
|
+
self._content_block = {}
|
163
|
+
self._tool_json_input_buf = ""
|
164
|
+
self._record_message = False
|
165
|
+
|
166
|
+
def __iter__(self):
|
167
|
+
try:
|
168
|
+
for event in self.__wrapped__:
|
169
|
+
self._process_event(event)
|
170
|
+
yield event
|
171
|
+
except EventStreamError as exc:
|
172
|
+
self._stream_error_callback(exc)
|
173
|
+
raise
|
174
|
+
|
175
|
+
def _process_event(self, event):
|
176
|
+
if "chunk" not in event:
|
177
|
+
return
|
178
|
+
|
179
|
+
json_bytes = event["chunk"].get("bytes", b"")
|
180
|
+
decoded = json_bytes.decode("utf-8")
|
181
|
+
try:
|
182
|
+
chunk = json.loads(decoded)
|
183
|
+
except json.JSONDecodeError:
|
184
|
+
return
|
185
|
+
|
186
|
+
if "amazon.titan" in self._model_id:
|
187
|
+
self._process_amazon_titan_chunk(chunk)
|
188
|
+
elif "amazon.nova" in self._model_id:
|
189
|
+
self._process_amazon_nova_chunk(chunk)
|
190
|
+
elif "anthropic.claude" in self._model_id:
|
191
|
+
self._process_anthropic_claude_chunk(chunk)
|
192
|
+
|
193
|
+
def _process_invocation_metrics(self, invocation_metrics):
|
194
|
+
self._response["usage"] = {}
|
195
|
+
if input_tokens := invocation_metrics.get("inputTokenCount"):
|
196
|
+
self._response["usage"]["inputTokens"] = input_tokens
|
197
|
+
|
198
|
+
if output_tokens := invocation_metrics.get("outputTokenCount"):
|
199
|
+
self._response["usage"]["outputTokens"] = output_tokens
|
200
|
+
|
201
|
+
def _process_amazon_titan_chunk(self, chunk):
|
202
|
+
if (stop_reason := chunk.get("completionReason")) is not None:
|
203
|
+
self._response["stopReason"] = stop_reason
|
204
|
+
|
205
|
+
if invocation_metrics := chunk.get("amazon-bedrock-invocationMetrics"):
|
206
|
+
# "amazon-bedrock-invocationMetrics":{
|
207
|
+
# "inputTokenCount":9,"outputTokenCount":128,"invocationLatency":3569,"firstByteLatency":2180
|
208
|
+
# }
|
209
|
+
self._process_invocation_metrics(invocation_metrics)
|
210
|
+
|
211
|
+
# transform the shape of the message to match other models
|
212
|
+
self._response["output"] = {
|
213
|
+
"message": {"content": [{"text": chunk["outputText"]}]}
|
214
|
+
}
|
215
|
+
self._stream_done_callback(self._response)
|
216
|
+
|
217
|
+
def _process_amazon_nova_chunk(self, chunk):
|
218
|
+
# pylint: disable=too-many-branches
|
219
|
+
# TODO: handle tool calls!
|
220
|
+
if "messageStart" in chunk:
|
221
|
+
# {'messageStart': {'role': 'assistant'}}
|
222
|
+
if chunk["messageStart"].get("role") == "assistant":
|
223
|
+
self._record_message = True
|
224
|
+
self._message = {"role": "assistant", "content": []}
|
225
|
+
return
|
226
|
+
|
227
|
+
if "contentBlockDelta" in chunk:
|
228
|
+
# {'contentBlockDelta': {'delta': {'text': "Hello"}, 'contentBlockIndex': 0}}
|
229
|
+
if self._record_message:
|
230
|
+
delta = chunk["contentBlockDelta"].get("delta", {})
|
231
|
+
if "text" in delta:
|
232
|
+
self._content_block.setdefault("text", "")
|
233
|
+
self._content_block["text"] += delta["text"]
|
234
|
+
return
|
235
|
+
|
236
|
+
if "contentBlockStop" in chunk:
|
237
|
+
# {'contentBlockStop': {'contentBlockIndex': 0}}
|
238
|
+
return
|
239
|
+
|
240
|
+
if "messageStop" in chunk:
|
241
|
+
# {'messageStop': {'stopReason': 'end_turn'}}
|
242
|
+
if stop_reason := chunk["messageStop"].get("stopReason"):
|
243
|
+
self._response["stopReason"] = stop_reason
|
244
|
+
|
245
|
+
if self._record_message:
|
246
|
+
self._message["content"].append(self._content_block)
|
247
|
+
self._content_block = {}
|
248
|
+
self._response["output"] = {"message": self._message}
|
249
|
+
self._record_message = False
|
250
|
+
self._message = None
|
251
|
+
return
|
252
|
+
|
253
|
+
if "metadata" in chunk:
|
254
|
+
# {'metadata': {'usage': {'inputTokens': 8, 'outputTokens': 117}, 'metrics': {}, 'trace': {}}}
|
255
|
+
if usage := chunk["metadata"].get("usage"):
|
256
|
+
self._response["usage"] = {}
|
257
|
+
if input_tokens := usage.get("inputTokens"):
|
258
|
+
self._response["usage"]["inputTokens"] = input_tokens
|
259
|
+
|
260
|
+
if output_tokens := usage.get("outputTokens"):
|
261
|
+
self._response["usage"]["outputTokens"] = output_tokens
|
262
|
+
|
263
|
+
self._stream_done_callback(self._response)
|
264
|
+
return
|
265
|
+
|
266
|
+
def _process_anthropic_claude_chunk(self, chunk):
|
267
|
+
# pylint: disable=too-many-return-statements,too-many-branches
|
268
|
+
if not (message_type := chunk.get("type")):
|
269
|
+
return
|
270
|
+
|
271
|
+
if message_type == "message_start":
|
272
|
+
# {'type': 'message_start', 'message': {'id': 'id', 'type': 'message', 'role': 'assistant', 'model': 'claude-2.0', 'content': [], 'stop_reason': None, 'stop_sequence': None, 'usage': {'input_tokens': 18, 'output_tokens': 1}}}
|
273
|
+
if chunk.get("message", {}).get("role") == "assistant":
|
274
|
+
self._record_message = True
|
275
|
+
message = chunk["message"]
|
276
|
+
self._message = {
|
277
|
+
"role": message["role"],
|
278
|
+
"content": message.get("content", []),
|
279
|
+
}
|
280
|
+
return
|
281
|
+
|
282
|
+
if message_type == "content_block_start":
|
283
|
+
# {'type': 'content_block_start', 'index': 0, 'content_block': {'type': 'text', 'text': ''}}
|
284
|
+
# {'type': 'content_block_start', 'index': 1, 'content_block': {'type': 'tool_use', 'id': 'id', 'name': 'func_name', 'input': {}}}
|
285
|
+
if self._record_message:
|
286
|
+
block = chunk.get("content_block", {})
|
287
|
+
if block.get("type") == "text":
|
288
|
+
self._content_block = block
|
289
|
+
elif block.get("type") == "tool_use":
|
290
|
+
self._content_block = block
|
291
|
+
return
|
292
|
+
|
293
|
+
if message_type == "content_block_delta":
|
294
|
+
# {'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': 'Here'}}
|
295
|
+
# {'type': 'content_block_delta', 'index': 1, 'delta': {'type': 'input_json_delta', 'partial_json': ''}}
|
296
|
+
if self._record_message:
|
297
|
+
delta = chunk.get("delta", {})
|
298
|
+
if delta.get("type") == "text_delta":
|
299
|
+
self._content_block["text"] += delta.get("text", "")
|
300
|
+
elif delta.get("type") == "input_json_delta":
|
301
|
+
self._tool_json_input_buf += delta.get("partial_json", "")
|
302
|
+
return
|
303
|
+
|
304
|
+
if message_type == "content_block_stop":
|
305
|
+
# {'type': 'content_block_stop', 'index': 0}
|
306
|
+
if self._tool_json_input_buf:
|
307
|
+
self._content_block["input"] = self._tool_json_input_buf
|
308
|
+
self._message["content"].append(
|
309
|
+
_decode_tool_use(self._content_block)
|
310
|
+
)
|
311
|
+
self._content_block = {}
|
312
|
+
self._tool_json_input_buf = ""
|
313
|
+
return
|
314
|
+
|
315
|
+
if message_type == "message_delta":
|
316
|
+
# {'type': 'message_delta', 'delta': {'stop_reason': 'end_turn', 'stop_sequence': None}, 'usage': {'output_tokens': 123}}
|
317
|
+
if (
|
318
|
+
stop_reason := chunk.get("delta", {}).get("stop_reason")
|
319
|
+
) is not None:
|
320
|
+
self._response["stopReason"] = stop_reason
|
321
|
+
return
|
322
|
+
|
323
|
+
if message_type == "message_stop":
|
324
|
+
# {'type': 'message_stop', 'amazon-bedrock-invocationMetrics': {'inputTokenCount': 18, 'outputTokenCount': 123, 'invocationLatency': 5250, 'firstByteLatency': 290}}
|
325
|
+
if invocation_metrics := chunk.get(
|
326
|
+
"amazon-bedrock-invocationMetrics"
|
327
|
+
):
|
328
|
+
self._process_invocation_metrics(invocation_metrics)
|
329
|
+
|
330
|
+
if self._record_message:
|
331
|
+
self._response["output"] = {"message": self._message}
|
332
|
+
self._record_message = False
|
333
|
+
self._message = None
|
334
|
+
|
335
|
+
self._stream_done_callback(self._response)
|
336
|
+
return
|
337
|
+
|
338
|
+
|
339
|
+
def genai_capture_message_content() -> bool:
|
340
|
+
capture_content = environ.get(
|
341
|
+
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, "false"
|
342
|
+
)
|
343
|
+
return capture_content.lower() == "true"
|
344
|
+
|
345
|
+
|
346
|
+
def extract_tool_calls(
|
347
|
+
message: dict[str, Any], capture_content: bool
|
348
|
+
) -> Sequence[Dict[str, Any]] | None:
|
349
|
+
content = message.get("content")
|
350
|
+
if not content:
|
351
|
+
return None
|
352
|
+
|
353
|
+
tool_uses = [item["toolUse"] for item in content if "toolUse" in item]
|
354
|
+
if not tool_uses:
|
355
|
+
tool_uses = [
|
356
|
+
item for item in content if item.get("type") == "tool_use"
|
357
|
+
]
|
358
|
+
tool_id_key = "id"
|
359
|
+
else:
|
360
|
+
tool_id_key = "toolUseId"
|
361
|
+
|
362
|
+
if not tool_uses:
|
363
|
+
return None
|
364
|
+
|
365
|
+
tool_calls = []
|
366
|
+
for tool_use in tool_uses:
|
367
|
+
tool_call = {"type": "function"}
|
368
|
+
if call_id := tool_use.get(tool_id_key):
|
369
|
+
tool_call["id"] = call_id
|
370
|
+
|
371
|
+
if function_name := tool_use.get("name"):
|
372
|
+
tool_call["function"] = {"name": function_name}
|
373
|
+
|
374
|
+
if (function_input := tool_use.get("input")) and capture_content:
|
375
|
+
tool_call.setdefault("function", {})
|
376
|
+
tool_call["function"]["arguments"] = function_input
|
377
|
+
|
378
|
+
tool_calls.append(tool_call)
|
379
|
+
return tool_calls
|
380
|
+
|
381
|
+
|
382
|
+
def extract_tool_results(
|
383
|
+
message: dict[str, Any], capture_content: bool
|
384
|
+
) -> Iterator[Dict[str, Any]]:
|
385
|
+
content = message.get("content")
|
386
|
+
if not content:
|
387
|
+
return
|
388
|
+
|
389
|
+
# langchain sends content as string with InvokeModel and Anthropic Claude
|
390
|
+
if isinstance(content, str):
|
391
|
+
return
|
392
|
+
|
393
|
+
# Converse format
|
394
|
+
tool_results = [
|
395
|
+
item["toolResult"] for item in content if "toolResult" in item
|
396
|
+
]
|
397
|
+
# InvokeModel anthropic.claude format
|
398
|
+
if not tool_results:
|
399
|
+
tool_results = [
|
400
|
+
item for item in content if item.get("type") == "tool_result"
|
401
|
+
]
|
402
|
+
tool_id_key = "tool_use_id"
|
403
|
+
else:
|
404
|
+
tool_id_key = "toolUseId"
|
405
|
+
|
406
|
+
if not tool_results:
|
407
|
+
return
|
408
|
+
|
409
|
+
# if we have a user message with toolResult keys we need to send
|
410
|
+
# one tool event for each part of the content
|
411
|
+
for tool_result in tool_results:
|
412
|
+
body = {}
|
413
|
+
if tool_id := tool_result.get(tool_id_key):
|
414
|
+
body["id"] = tool_id
|
415
|
+
tool_content = tool_result.get("content")
|
416
|
+
if capture_content and tool_content:
|
417
|
+
body["content"] = tool_content
|
418
|
+
|
419
|
+
yield body
|
420
|
+
|
421
|
+
|
422
|
+
def message_to_event(
|
423
|
+
message: dict[str, Any], capture_content: bool
|
424
|
+
) -> Iterator[Event]:
|
425
|
+
attributes = {GEN_AI_SYSTEM: GenAiSystemValues.AWS_BEDROCK.value}
|
426
|
+
role = message.get("role")
|
427
|
+
content = message.get("content")
|
428
|
+
|
429
|
+
body = {}
|
430
|
+
if capture_content and content:
|
431
|
+
body["content"] = content
|
432
|
+
if role == "assistant":
|
433
|
+
# the assistant message contains both tool calls and model thinking content
|
434
|
+
if tool_calls := extract_tool_calls(message, capture_content):
|
435
|
+
body["tool_calls"] = tool_calls
|
436
|
+
elif role == "user":
|
437
|
+
# in case of tool calls we send one tool event for tool call and one for the user event
|
438
|
+
for tool_body in extract_tool_results(message, capture_content):
|
439
|
+
yield Event(
|
440
|
+
name="gen_ai.tool.message",
|
441
|
+
attributes=attributes,
|
442
|
+
body=tool_body,
|
443
|
+
)
|
444
|
+
|
445
|
+
yield Event(
|
446
|
+
name=f"gen_ai.{role}.message",
|
447
|
+
attributes=attributes,
|
448
|
+
body=body if body else None,
|
449
|
+
)
|
450
|
+
|
451
|
+
|
452
|
+
class _Choice:
|
453
|
+
def __init__(
|
454
|
+
self, message: dict[str, Any], finish_reason: str, index: int
|
455
|
+
):
|
456
|
+
self.message = message
|
457
|
+
self.finish_reason = finish_reason
|
458
|
+
self.index = index
|
459
|
+
|
460
|
+
@classmethod
|
461
|
+
def from_converse(
|
462
|
+
cls, response: dict[str, Any], capture_content: bool
|
463
|
+
) -> _Choice:
|
464
|
+
orig_message = response["output"]["message"]
|
465
|
+
if role := orig_message.get("role"):
|
466
|
+
message = {"role": role}
|
467
|
+
else:
|
468
|
+
# amazon.titan does not serialize the role
|
469
|
+
message = {}
|
470
|
+
|
471
|
+
if tool_calls := extract_tool_calls(orig_message, capture_content):
|
472
|
+
message["tool_calls"] = tool_calls
|
473
|
+
elif capture_content:
|
474
|
+
message["content"] = orig_message["content"]
|
475
|
+
|
476
|
+
return cls(message, response["stopReason"], index=0)
|
477
|
+
|
478
|
+
@classmethod
|
479
|
+
def from_invoke_amazon_titan(
|
480
|
+
cls, response: dict[str, Any], capture_content: bool
|
481
|
+
) -> _Choice:
|
482
|
+
result = response["results"][0]
|
483
|
+
if capture_content:
|
484
|
+
message = {"content": result["outputText"]}
|
485
|
+
else:
|
486
|
+
message = {}
|
487
|
+
return cls(message, result["completionReason"], index=0)
|
488
|
+
|
489
|
+
@classmethod
|
490
|
+
def from_invoke_anthropic_claude(
|
491
|
+
cls, response: dict[str, Any], capture_content: bool
|
492
|
+
) -> _Choice:
|
493
|
+
message = {"role": response["role"]}
|
494
|
+
if tool_calls := extract_tool_calls(response, capture_content):
|
495
|
+
message["tool_calls"] = tool_calls
|
496
|
+
elif capture_content:
|
497
|
+
message["content"] = response["content"]
|
498
|
+
return cls(message, response["stop_reason"], index=0)
|
499
|
+
|
500
|
+
def _to_body_dict(self) -> dict[str, Any]:
|
501
|
+
return {
|
502
|
+
"finish_reason": self.finish_reason,
|
503
|
+
"index": self.index,
|
504
|
+
"message": self.message,
|
505
|
+
}
|
506
|
+
|
507
|
+
def to_choice_event(self, **event_kwargs) -> Event:
|
508
|
+
attributes = {GEN_AI_SYSTEM: GenAiSystemValues.AWS_BEDROCK.value}
|
509
|
+
return Event(
|
510
|
+
name="gen_ai.choice",
|
511
|
+
attributes=attributes,
|
512
|
+
body=self._to_body_dict(),
|
513
|
+
**event_kwargs,
|
514
|
+
)
|
@@ -22,6 +22,7 @@ from opentelemetry.instrumentation.botocore.extensions.types import (
|
|
22
22
|
_AttributeMapT,
|
23
23
|
_AwsSdkCallContext,
|
24
24
|
_AwsSdkExtension,
|
25
|
+
_BotocoreInstrumentorContext,
|
25
26
|
_BotoResultT,
|
26
27
|
)
|
27
28
|
from opentelemetry.semconv.trace import DbSystemValues, SpanAttributes
|
@@ -370,7 +371,9 @@ class _DynamoDbExtension(_AwsSdkExtension):
|
|
370
371
|
def _get_peer_name(self) -> str:
|
371
372
|
return urlparse(self._call_context.endpoint_url).netloc
|
372
373
|
|
373
|
-
def before_service_call(
|
374
|
+
def before_service_call(
|
375
|
+
self, span: Span, instrumentor_context: _BotocoreInstrumentorContext
|
376
|
+
):
|
374
377
|
if not span.is_recording() or self._op is None:
|
375
378
|
return
|
376
379
|
|
@@ -380,7 +383,12 @@ class _DynamoDbExtension(_AwsSdkExtension):
|
|
380
383
|
span.set_attribute,
|
381
384
|
)
|
382
385
|
|
383
|
-
def on_success(
|
386
|
+
def on_success(
|
387
|
+
self,
|
388
|
+
span: Span,
|
389
|
+
result: _BotoResultT,
|
390
|
+
instrumentor_context: _BotocoreInstrumentorContext,
|
391
|
+
):
|
384
392
|
if not span.is_recording():
|
385
393
|
return
|
386
394
|
|
@@ -22,6 +22,7 @@ from opentelemetry.instrumentation.botocore.extensions.types import (
|
|
22
22
|
_AttributeMapT,
|
23
23
|
_AwsSdkCallContext,
|
24
24
|
_AwsSdkExtension,
|
25
|
+
_BotocoreInstrumentorContext,
|
25
26
|
)
|
26
27
|
from opentelemetry.propagate import inject
|
27
28
|
from opentelemetry.semconv.trace import SpanAttributes
|
@@ -119,7 +120,9 @@ class _LambdaExtension(_AwsSdkExtension):
|
|
119
120
|
|
120
121
|
self._op.extract_attributes(self._call_context, attributes)
|
121
122
|
|
122
|
-
def before_service_call(
|
123
|
+
def before_service_call(
|
124
|
+
self, span: Span, instrumentor_context: _BotocoreInstrumentorContext
|
125
|
+
):
|
123
126
|
if self._op is None:
|
124
127
|
return
|
125
128
|
|
@@ -23,6 +23,7 @@ from opentelemetry.instrumentation.botocore.extensions.types import (
|
|
23
23
|
_AttributeMapT,
|
24
24
|
_AwsSdkCallContext,
|
25
25
|
_AwsSdkExtension,
|
26
|
+
_BotocoreInstrumentorContext,
|
26
27
|
)
|
27
28
|
from opentelemetry.semconv.trace import (
|
28
29
|
MessagingDestinationKindValues,
|
@@ -73,36 +74,35 @@ class _OpPublish(_SnsOperation):
|
|
73
74
|
def extract_attributes(
|
74
75
|
cls, call_context: _AwsSdkCallContext, attributes: _AttributeMapT
|
75
76
|
):
|
76
|
-
|
77
|
+
span_name, destination_name = cls._extract_destination_name(
|
77
78
|
call_context
|
78
79
|
)
|
80
|
+
|
81
|
+
call_context.span_name = f"{span_name} send"
|
82
|
+
|
79
83
|
attributes[SpanAttributes.MESSAGING_DESTINATION_KIND] = (
|
80
84
|
MessagingDestinationKindValues.TOPIC.value
|
81
85
|
)
|
82
86
|
attributes[SpanAttributes.MESSAGING_DESTINATION] = destination_name
|
83
|
-
|
84
|
-
|
85
|
-
attributes["messaging.destination.name"] = cls._extract_input_arn(
|
86
|
-
call_context
|
87
|
-
)
|
88
|
-
call_context.span_name = (
|
89
|
-
f"{'phone_number' if is_phone_number else destination_name} send"
|
87
|
+
attributes[SpanAttributes.MESSAGING_DESTINATION_NAME] = (
|
88
|
+
destination_name
|
90
89
|
)
|
91
90
|
|
92
91
|
@classmethod
|
93
92
|
def _extract_destination_name(
|
94
93
|
cls, call_context: _AwsSdkCallContext
|
95
|
-
) -> Tuple[str,
|
94
|
+
) -> Tuple[str, str]:
|
96
95
|
arn = cls._extract_input_arn(call_context)
|
97
96
|
if arn:
|
98
|
-
return arn.rsplit(":", 1)[-1],
|
97
|
+
return arn.rsplit(":", 1)[-1], arn
|
99
98
|
|
100
99
|
if cls._phone_arg_name:
|
101
100
|
phone_number = call_context.params.get(cls._phone_arg_name)
|
102
101
|
if phone_number:
|
103
|
-
|
102
|
+
# phone number redacted because it's a PII
|
103
|
+
return "phone_number", "phone_number:**"
|
104
104
|
|
105
|
-
return "unknown",
|
105
|
+
return "unknown", "unknown"
|
106
106
|
|
107
107
|
@classmethod
|
108
108
|
def _extract_input_arn(
|
@@ -165,6 +165,8 @@ class _SnsExtension(_AwsSdkExtension):
|
|
165
165
|
if self._op:
|
166
166
|
self._op.extract_attributes(self._call_context, attributes)
|
167
167
|
|
168
|
-
def before_service_call(
|
168
|
+
def before_service_call(
|
169
|
+
self, span: Span, instrumentor_context: _BotocoreInstrumentorContext
|
170
|
+
):
|
169
171
|
if self._op:
|
170
172
|
self._op.before_service_call(self._call_context, span)
|
@@ -16,6 +16,7 @@ import logging
|
|
16
16
|
from opentelemetry.instrumentation.botocore.extensions.types import (
|
17
17
|
_AttributeMapT,
|
18
18
|
_AwsSdkExtension,
|
19
|
+
_BotocoreInstrumentorContext,
|
19
20
|
_BotoResultT,
|
20
21
|
)
|
21
22
|
from opentelemetry.semconv.trace import SpanAttributes
|
@@ -44,7 +45,12 @@ class _SqsExtension(_AwsSdkExtension):
|
|
44
45
|
queue_url,
|
45
46
|
)
|
46
47
|
|
47
|
-
def on_success(
|
48
|
+
def on_success(
|
49
|
+
self,
|
50
|
+
span: Span,
|
51
|
+
result: _BotoResultT,
|
52
|
+
instrumentor_context: _BotocoreInstrumentorContext,
|
53
|
+
):
|
48
54
|
operation = self._call_context.operation
|
49
55
|
if operation in _SUPPORTED_OPERATIONS:
|
50
56
|
try:
|