dv-pipecat-ai 0.0.82.dev815__py3-none-any.whl → 0.0.82.dev857__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

Files changed (106) hide show
  1. {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/METADATA +8 -3
  2. {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/RECORD +106 -79
  3. pipecat/adapters/base_llm_adapter.py +44 -6
  4. pipecat/adapters/services/anthropic_adapter.py +302 -2
  5. pipecat/adapters/services/aws_nova_sonic_adapter.py +40 -2
  6. pipecat/adapters/services/bedrock_adapter.py +40 -2
  7. pipecat/adapters/services/gemini_adapter.py +276 -6
  8. pipecat/adapters/services/open_ai_adapter.py +88 -7
  9. pipecat/adapters/services/open_ai_realtime_adapter.py +39 -1
  10. pipecat/audio/dtmf/__init__.py +0 -0
  11. pipecat/audio/dtmf/types.py +47 -0
  12. pipecat/audio/dtmf/utils.py +70 -0
  13. pipecat/audio/filters/aic_filter.py +199 -0
  14. pipecat/audio/utils.py +9 -7
  15. pipecat/extensions/ivr/__init__.py +0 -0
  16. pipecat/extensions/ivr/ivr_navigator.py +452 -0
  17. pipecat/frames/frames.py +156 -43
  18. pipecat/pipeline/llm_switcher.py +76 -0
  19. pipecat/pipeline/parallel_pipeline.py +3 -3
  20. pipecat/pipeline/service_switcher.py +144 -0
  21. pipecat/pipeline/task.py +68 -28
  22. pipecat/pipeline/task_observer.py +10 -0
  23. pipecat/processors/aggregators/dtmf_aggregator.py +2 -2
  24. pipecat/processors/aggregators/llm_context.py +277 -0
  25. pipecat/processors/aggregators/llm_response.py +48 -15
  26. pipecat/processors/aggregators/llm_response_universal.py +840 -0
  27. pipecat/processors/aggregators/openai_llm_context.py +3 -3
  28. pipecat/processors/dtmf_aggregator.py +0 -2
  29. pipecat/processors/filters/stt_mute_filter.py +0 -2
  30. pipecat/processors/frame_processor.py +18 -11
  31. pipecat/processors/frameworks/rtvi.py +17 -10
  32. pipecat/processors/metrics/sentry.py +2 -0
  33. pipecat/runner/daily.py +137 -36
  34. pipecat/runner/run.py +1 -1
  35. pipecat/runner/utils.py +7 -7
  36. pipecat/serializers/asterisk.py +20 -4
  37. pipecat/serializers/exotel.py +1 -1
  38. pipecat/serializers/plivo.py +1 -1
  39. pipecat/serializers/telnyx.py +1 -1
  40. pipecat/serializers/twilio.py +1 -1
  41. pipecat/services/__init__.py +2 -2
  42. pipecat/services/anthropic/llm.py +113 -28
  43. pipecat/services/asyncai/tts.py +4 -0
  44. pipecat/services/aws/llm.py +82 -8
  45. pipecat/services/aws/tts.py +0 -10
  46. pipecat/services/aws_nova_sonic/aws.py +5 -0
  47. pipecat/services/cartesia/tts.py +28 -16
  48. pipecat/services/cerebras/llm.py +15 -10
  49. pipecat/services/deepgram/stt.py +8 -0
  50. pipecat/services/deepseek/llm.py +13 -8
  51. pipecat/services/fireworks/llm.py +13 -8
  52. pipecat/services/fish/tts.py +8 -6
  53. pipecat/services/gemini_multimodal_live/gemini.py +5 -0
  54. pipecat/services/gladia/config.py +7 -1
  55. pipecat/services/gladia/stt.py +23 -15
  56. pipecat/services/google/llm.py +159 -59
  57. pipecat/services/google/llm_openai.py +18 -3
  58. pipecat/services/grok/llm.py +2 -1
  59. pipecat/services/llm_service.py +38 -3
  60. pipecat/services/mem0/memory.py +2 -1
  61. pipecat/services/mistral/llm.py +5 -6
  62. pipecat/services/nim/llm.py +2 -1
  63. pipecat/services/openai/base_llm.py +88 -26
  64. pipecat/services/openai/image.py +6 -1
  65. pipecat/services/openai_realtime_beta/openai.py +5 -2
  66. pipecat/services/openpipe/llm.py +6 -8
  67. pipecat/services/perplexity/llm.py +13 -8
  68. pipecat/services/playht/tts.py +9 -6
  69. pipecat/services/rime/tts.py +1 -1
  70. pipecat/services/sambanova/llm.py +18 -13
  71. pipecat/services/sarvam/tts.py +415 -10
  72. pipecat/services/speechmatics/stt.py +2 -2
  73. pipecat/services/tavus/video.py +1 -1
  74. pipecat/services/tts_service.py +15 -5
  75. pipecat/services/vistaar/llm.py +2 -5
  76. pipecat/transports/base_input.py +32 -19
  77. pipecat/transports/base_output.py +39 -5
  78. pipecat/transports/daily/__init__.py +0 -0
  79. pipecat/transports/daily/transport.py +2371 -0
  80. pipecat/transports/daily/utils.py +410 -0
  81. pipecat/transports/livekit/__init__.py +0 -0
  82. pipecat/transports/livekit/transport.py +1042 -0
  83. pipecat/transports/network/fastapi_websocket.py +12 -546
  84. pipecat/transports/network/small_webrtc.py +12 -922
  85. pipecat/transports/network/webrtc_connection.py +9 -595
  86. pipecat/transports/network/websocket_client.py +12 -481
  87. pipecat/transports/network/websocket_server.py +12 -487
  88. pipecat/transports/services/daily.py +9 -2334
  89. pipecat/transports/services/helpers/daily_rest.py +12 -396
  90. pipecat/transports/services/livekit.py +12 -975
  91. pipecat/transports/services/tavus.py +12 -757
  92. pipecat/transports/smallwebrtc/__init__.py +0 -0
  93. pipecat/transports/smallwebrtc/connection.py +612 -0
  94. pipecat/transports/smallwebrtc/transport.py +936 -0
  95. pipecat/transports/tavus/__init__.py +0 -0
  96. pipecat/transports/tavus/transport.py +770 -0
  97. pipecat/transports/websocket/__init__.py +0 -0
  98. pipecat/transports/websocket/client.py +494 -0
  99. pipecat/transports/websocket/fastapi.py +559 -0
  100. pipecat/transports/websocket/server.py +500 -0
  101. pipecat/transports/whatsapp/__init__.py +0 -0
  102. pipecat/transports/whatsapp/api.py +345 -0
  103. pipecat/transports/whatsapp/client.py +364 -0
  104. {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/WHEEL +0 -0
  105. {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/licenses/LICENSE +0 -0
  106. {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/top_level.txt +0 -0
@@ -6,20 +6,320 @@
6
6
 
7
7
  """Anthropic LLM adapter for Pipecat."""
8
8
 
9
- from typing import Any, Dict, List
9
+ import copy
10
+ import json
11
+ from dataclasses import dataclass
12
+ from typing import Any, Dict, List, Optional, TypedDict
13
+
14
+ from anthropic import NOT_GIVEN, NotGiven
15
+ from anthropic.types.message_param import MessageParam
16
+ from anthropic.types.tool_union_param import ToolUnionParam
17
+ from loguru import logger
10
18
 
11
19
  from pipecat.adapters.base_llm_adapter import BaseLLMAdapter
12
20
  from pipecat.adapters.schemas.function_schema import FunctionSchema
13
21
  from pipecat.adapters.schemas.tools_schema import ToolsSchema
22
+ from pipecat.processors.aggregators.llm_context import (
23
+ LLMContext,
24
+ LLMContextMessage,
25
+ LLMSpecificMessage,
26
+ LLMStandardMessage,
27
+ )
28
+
29
+
30
+ class AnthropicLLMInvocationParams(TypedDict):
31
+ """Context-based parameters for invoking Anthropic's LLM API.
32
+
33
+ This is a placeholder until support for universal LLMContext machinery is added for Anthropic.
34
+ """
35
+
36
+ system: str | NotGiven
37
+ messages: List[MessageParam]
38
+ tools: List[ToolUnionParam]
14
39
 
15
40
 
16
- class AnthropicLLMAdapter(BaseLLMAdapter):
41
+ class AnthropicLLMAdapter(BaseLLMAdapter[AnthropicLLMInvocationParams]):
17
42
  """Adapter for converting tool schemas to Anthropic's function-calling format.
18
43
 
19
44
  This adapter handles the conversion of Pipecat's standard function schemas
20
45
  to the specific format required by Anthropic's Claude models for function calling.
21
46
  """
22
47
 
48
+ def get_llm_invocation_params(
49
+ self, context: LLMContext, enable_prompt_caching: bool
50
+ ) -> AnthropicLLMInvocationParams:
51
+ """Get Anthropic-specific LLM invocation parameters from a universal LLM context.
52
+
53
+ This is a placeholder until support for universal LLMContext machinery is added for Anthropic.
54
+
55
+ Args:
56
+ context: The LLM context containing messages, tools, etc.
57
+ enable_prompt_caching: Whether prompt caching should be enabled.
58
+
59
+ Returns:
60
+ Dictionary of parameters for invoking Anthropic's LLM API.
61
+ """
62
+ messages = self._from_universal_context_messages(self._get_messages(context))
63
+ return {
64
+ "system": messages.system,
65
+ "messages": (
66
+ self._with_cache_control_markers(messages.messages)
67
+ if enable_prompt_caching
68
+ else messages.messages
69
+ ),
70
+ # NOTE: LLMContext's tools are guaranteed to be a ToolsSchema (or NOT_GIVEN)
71
+ "tools": self.from_standard_tools(context.tools) or [],
72
+ }
73
+
74
+ def get_messages_for_logging(self, context: LLMContext) -> List[Dict[str, Any]]:
75
+ """Get messages from a universal LLM context in a format ready for logging about Anthropic.
76
+
77
+ Removes or truncates sensitive data like image content for safe logging.
78
+
79
+ This is a placeholder until support for universal LLMContext machinery is added for Anthropic.
80
+
81
+ Args:
82
+ context: The LLM context containing messages.
83
+
84
+ Returns:
85
+ List of messages in a format ready for logging about Anthropic.
86
+ """
87
+ # Get messages in Anthropic's format
88
+ messages = self._from_universal_context_messages(self._get_messages(context)).messages
89
+
90
+ # Sanitize messages for logging
91
+ messages_for_logging = []
92
+ for message in messages:
93
+ msg = copy.deepcopy(message)
94
+ if "content" in msg:
95
+ if isinstance(msg["content"], list):
96
+ for item in msg["content"]:
97
+ if item["type"] == "image":
98
+ item["source"]["data"] = "..."
99
+ messages_for_logging.append(msg)
100
+ return messages_for_logging
101
+
102
+ def _get_messages(self, context: LLMContext) -> List[LLMContextMessage]:
103
+ return context.get_messages("anthropic")
104
+
105
+ @dataclass
106
+ class ConvertedMessages:
107
+ """Container for Anthropic-formatted messages converted from universal context."""
108
+
109
+ messages: List[MessageParam]
110
+ system: str | NotGiven
111
+
112
+ def _from_universal_context_messages(
113
+ self, universal_context_messages: List[LLMContextMessage]
114
+ ) -> ConvertedMessages:
115
+ system = NOT_GIVEN
116
+ messages = []
117
+
118
+ # first, map messages using self._from_universal_context_message(m)
119
+ try:
120
+ messages = [self._from_universal_context_message(m) for m in universal_context_messages]
121
+ except Exception as e:
122
+ logger.error(f"Error mapping messages: {e}")
123
+
124
+ # See if we should pull the system message out of our messages list.
125
+ if messages and messages[0]["role"] == "system":
126
+ if len(messages) == 1:
127
+ # If we have only have a system message in the list, all we can really do
128
+ # without introducing too much magic is change the role to "user".
129
+ messages[0]["role"] = "user"
130
+ else:
131
+ # If we have more than one message, we'll pull the system message out of the
132
+ # list.
133
+ system = messages[0]["content"]
134
+ messages.pop(0)
135
+
136
+ # Convert any subsequent "system"-role messages to "user"-role
137
+ # messages, as Anthropic doesn't support system input messages.
138
+ for message in messages:
139
+ if message["role"] == "system":
140
+ message["role"] = "user"
141
+
142
+ # Merge consecutive messages with the same role.
143
+ i = 0
144
+ while i < len(messages) - 1:
145
+ current_message = messages[i]
146
+ next_message = messages[i + 1]
147
+ if current_message["role"] == next_message["role"]:
148
+ # Convert content to list of dictionaries if it's a string
149
+ if isinstance(current_message["content"], str):
150
+ current_message["content"] = [
151
+ {"type": "text", "text": current_message["content"]}
152
+ ]
153
+ if isinstance(next_message["content"], str):
154
+ next_message["content"] = [{"type": "text", "text": next_message["content"]}]
155
+ # Concatenate the content
156
+ current_message["content"].extend(next_message["content"])
157
+ # Remove the next message from the list
158
+ messages.pop(i + 1)
159
+ else:
160
+ i += 1
161
+
162
+ # Avoid empty content in messages
163
+ for message in messages:
164
+ if isinstance(message["content"], str) and message["content"] == "":
165
+ message["content"] = "(empty)"
166
+ elif isinstance(message["content"], list) and len(message["content"]) == 0:
167
+ message["content"] = [{"type": "text", "text": "(empty)"}]
168
+
169
+ return self.ConvertedMessages(messages=messages, system=system)
170
+
171
+ def _from_universal_context_message(self, message: LLMContextMessage) -> MessageParam:
172
+ if isinstance(message, LLMSpecificMessage):
173
+ return copy.deepcopy(message.message)
174
+ return self._from_standard_message(message)
175
+
176
+ def _from_standard_message(self, message: LLMStandardMessage) -> MessageParam:
177
+ """Convert standard universal context message to Anthropic format.
178
+
179
+ Handles conversion of text content, tool calls, and tool results.
180
+ Empty text content is converted to "(empty)".
181
+
182
+ Args:
183
+ message: Message in standard universal context format.
184
+
185
+ Returns:
186
+ Message in Anthropic format.
187
+
188
+ Examples:
189
+ Input standard format::
190
+
191
+ {
192
+ "role": "assistant",
193
+ "tool_calls": [
194
+ {
195
+ "id": "123",
196
+ "function": {"name": "search", "arguments": '{"q": "test"}'}
197
+ }
198
+ ]
199
+ }
200
+
201
+ Output Anthropic format::
202
+
203
+ {
204
+ "role": "assistant",
205
+ "content": [
206
+ {
207
+ "type": "tool_use",
208
+ "id": "123",
209
+ "name": "search",
210
+ "input": {"q": "test"}
211
+ }
212
+ ]
213
+ }
214
+ """
215
+ message = copy.deepcopy(message)
216
+ if message["role"] == "tool":
217
+ return {
218
+ "role": "user",
219
+ "content": [
220
+ {
221
+ "type": "tool_result",
222
+ "tool_use_id": message["tool_call_id"],
223
+ "content": message["content"],
224
+ },
225
+ ],
226
+ }
227
+ if message.get("tool_calls"):
228
+ tc = message["tool_calls"]
229
+ ret = {"role": "assistant", "content": []}
230
+ for tool_call in tc:
231
+ function = tool_call["function"]
232
+ arguments = json.loads(function["arguments"])
233
+ new_tool_use = {
234
+ "type": "tool_use",
235
+ "id": tool_call["id"],
236
+ "name": function["name"],
237
+ "input": arguments,
238
+ }
239
+ ret["content"].append(new_tool_use)
240
+ return ret
241
+ content = message.get("content")
242
+ if isinstance(content, str):
243
+ # fix empty text
244
+ if content == "":
245
+ content = "(empty)"
246
+ elif isinstance(content, list):
247
+ for item in content:
248
+ # fix empty text
249
+ if item["type"] == "text" and item["text"] == "":
250
+ item["text"] = "(empty)"
251
+ # handle image_url -> image conversion
252
+ if item["type"] == "image_url":
253
+ item["type"] = "image"
254
+ item["source"] = {
255
+ "type": "base64",
256
+ "media_type": "image/jpeg",
257
+ "data": item["image_url"]["url"].split(",")[1],
258
+ }
259
+ del item["image_url"]
260
+ # In the case where there's a single image in the list (like what
261
+ # would result from a UserImageRawFrame), ensure that the image
262
+ # comes before text, as recommended by Anthropic docs
263
+ # (https://docs.anthropic.com/en/docs/build-with-claude/vision#example-one-image)
264
+ image_indices = [i for i, item in enumerate(content) if item["type"] == "image"]
265
+ text_indices = [i for i, item in enumerate(content) if item["type"] == "text"]
266
+ if len(image_indices) == 1 and text_indices:
267
+ img_idx = image_indices[0]
268
+ first_txt_idx = text_indices[0]
269
+ if img_idx > first_txt_idx:
270
+ # Move image before the first text
271
+ image_item = content.pop(img_idx)
272
+ content.insert(first_txt_idx, image_item)
273
+
274
+ return message
275
+
276
+ def _with_cache_control_markers(self, messages: List[MessageParam]) -> List[MessageParam]:
277
+ """Add cache control markers to messages for prompt caching.
278
+
279
+ Args:
280
+ messages: List of messages in Anthropic format.
281
+
282
+ Returns:
283
+ List of messages with cache control markers added.
284
+ """
285
+
286
+ def add_cache_control_marker(message: MessageParam):
287
+ if isinstance(message["content"], str):
288
+ message["content"] = [{"type": "text", "text": message["content"]}]
289
+ message["content"][-1]["cache_control"] = {"type": "ephemeral"}
290
+
291
+ try:
292
+ # Add cache control markers to the most recent two user messages.
293
+ # - The marker at the most recent user message tells Anthropic to
294
+ # cache the prompt up to that point.
295
+ # - The marker at the second-most-recent user message tells Anthropic
296
+ # to look up the cached prompt that goes up to that point (the
297
+ # point that *was* the last user message the previous turn).
298
+ # If we only added the marker to the last user message, we'd only
299
+ # ever be adding to the cache, never looking up from it.
300
+ # Why user messages? We're assuming that we're primarily running
301
+ # inference as soon as user turns come in. In Anthropic, turns
302
+ # strictly alternate between user and assistant.
303
+
304
+ messages_with_markers = copy.deepcopy(messages)
305
+
306
+ # Find the most recent two user messages
307
+ user_message_indices = []
308
+ for i in range(len(messages_with_markers) - 1, -1, -1):
309
+ if messages_with_markers[i]["role"] == "user":
310
+ user_message_indices.append(i)
311
+ if len(user_message_indices) == 2:
312
+ break
313
+
314
+ # Add cache control markers to the identified user messages
315
+ for index in user_message_indices:
316
+ add_cache_control_marker(messages_with_markers[index])
317
+
318
+ return messages_with_markers
319
+ except Exception as e:
320
+ logger.error(f"Error adding cache control marker: {e}")
321
+ return messages_with_markers
322
+
23
323
  @staticmethod
24
324
  def _to_anthropic_function_format(function: FunctionSchema) -> Dict[str, Any]:
25
325
  """Convert a single function schema to Anthropic's format.
@@ -7,20 +7,58 @@
7
7
  """AWS Nova Sonic LLM adapter for Pipecat."""
8
8
 
9
9
  import json
10
- from typing import Any, Dict, List
10
+ from typing import Any, Dict, List, TypedDict
11
11
 
12
12
  from pipecat.adapters.base_llm_adapter import BaseLLMAdapter
13
13
  from pipecat.adapters.schemas.function_schema import FunctionSchema
14
14
  from pipecat.adapters.schemas.tools_schema import ToolsSchema
15
+ from pipecat.processors.aggregators.llm_context import LLMContext
15
16
 
16
17
 
17
- class AWSNovaSonicLLMAdapter(BaseLLMAdapter):
18
+ class AWSNovaSonicLLMInvocationParams(TypedDict):
19
+ """Context-based parameters for invoking AWS Nova Sonic LLM API.
20
+
21
+ This is a placeholder until support for universal LLMContext machinery is added for AWS Nova Sonic.
22
+ """
23
+
24
+ pass
25
+
26
+
27
+ class AWSNovaSonicLLMAdapter(BaseLLMAdapter[AWSNovaSonicLLMInvocationParams]):
18
28
  """Adapter for AWS Nova Sonic language models.
19
29
 
20
30
  Converts Pipecat's standard function schemas into AWS Nova Sonic's
21
31
  specific function-calling format, enabling tool use with Nova Sonic models.
22
32
  """
23
33
 
34
+ def get_llm_invocation_params(self, context: LLMContext) -> AWSNovaSonicLLMInvocationParams:
35
+ """Get AWS Nova Sonic-specific LLM invocation parameters from a universal LLM context.
36
+
37
+ This is a placeholder until support for universal LLMContext machinery is added for AWS Nova Sonic.
38
+
39
+ Args:
40
+ context: The LLM context containing messages, tools, etc.
41
+
42
+ Returns:
43
+ Dictionary of parameters for invoking AWS Nova Sonic's LLM API.
44
+ """
45
+ raise NotImplementedError("Universal LLMContext is not yet supported for AWS Nova Sonic.")
46
+
47
+ def get_messages_for_logging(self, context) -> List[Dict[str, Any]]:
48
+ """Get messages from a universal LLM context in a format ready for logging about AWS Nova Sonic.
49
+
50
+ Removes or truncates sensitive data like image content for safe logging.
51
+
52
+ This is a placeholder until support for universal LLMContext machinery is added for AWS Nova Sonic.
53
+
54
+ Args:
55
+ context: The LLM context containing messages.
56
+
57
+ Returns:
58
+ List of messages in a format ready for logging about AWS Nova Sonic.
59
+ """
60
+ raise NotImplementedError("Universal LLMContext is not yet supported for AWS Nova Sonic.")
61
+
24
62
  @staticmethod
25
63
  def _to_aws_nova_sonic_function_format(function: FunctionSchema) -> Dict[str, Any]:
26
64
  """Convert a function schema to AWS Nova Sonic format.
@@ -6,20 +6,58 @@
6
6
 
7
7
  """AWS Bedrock LLM adapter for Pipecat."""
8
8
 
9
- from typing import Any, Dict, List
9
+ from typing import Any, Dict, List, TypedDict
10
10
 
11
11
  from pipecat.adapters.base_llm_adapter import BaseLLMAdapter
12
12
  from pipecat.adapters.schemas.function_schema import FunctionSchema
13
13
  from pipecat.adapters.schemas.tools_schema import ToolsSchema
14
+ from pipecat.processors.aggregators.llm_context import LLMContext
14
15
 
15
16
 
16
- class AWSBedrockLLMAdapter(BaseLLMAdapter):
17
+ class AWSBedrockLLMInvocationParams(TypedDict):
18
+ """Context-based parameters for invoking AWS Bedrock's LLM API.
19
+
20
+ This is a placeholder until support for universal LLMContext machinery is added for Bedrock.
21
+ """
22
+
23
+ pass
24
+
25
+
26
+ class AWSBedrockLLMAdapter(BaseLLMAdapter[AWSBedrockLLMInvocationParams]):
17
27
  """Adapter for AWS Bedrock LLM integration with Pipecat.
18
28
 
19
29
  Provides conversion utilities for transforming Pipecat function schemas
20
30
  into AWS Bedrock's expected tool format for function calling capabilities.
21
31
  """
22
32
 
33
+ def get_llm_invocation_params(self, context: LLMContext) -> AWSBedrockLLMInvocationParams:
34
+ """Get AWS Bedrock-specific LLM invocation parameters from a universal LLM context.
35
+
36
+ This is a placeholder until support for universal LLMContext machinery is added for Bedrock.
37
+
38
+ Args:
39
+ context: The LLM context containing messages, tools, etc.
40
+
41
+ Returns:
42
+ Dictionary of parameters for invoking AWS Bedrock's LLM API.
43
+ """
44
+ raise NotImplementedError("Universal LLMContext is not yet supported for AWS Bedrock.")
45
+
46
+ def get_messages_for_logging(self, context) -> List[Dict[str, Any]]:
47
+ """Get messages from a universal LLM context in a format ready for logging about AWS Bedrock.
48
+
49
+ Removes or truncates sensitive data like image content for safe logging.
50
+
51
+ This is a placeholder until support for universal LLMContext machinery is added for Bedrock.
52
+
53
+ Args:
54
+ context: The LLM context containing messages.
55
+
56
+ Returns:
57
+ List of messages in a format ready for logging about AWS Bedrock.
58
+ """
59
+ raise NotImplementedError("Universal LLMContext is not yet supported for AWS Bedrock.")
60
+
23
61
  @staticmethod
24
62
  def _to_bedrock_function_format(function: FunctionSchema) -> Dict[str, Any]:
25
63
  """Convert a function schema to Bedrock's tool format.