dv-pipecat-ai 0.0.82.dev857__py3-none-any.whl → 0.0.85.dev837__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

Files changed (195) hide show
  1. {dv_pipecat_ai-0.0.82.dev857.dist-info → dv_pipecat_ai-0.0.85.dev837.dist-info}/METADATA +98 -130
  2. {dv_pipecat_ai-0.0.82.dev857.dist-info → dv_pipecat_ai-0.0.85.dev837.dist-info}/RECORD +192 -140
  3. pipecat/adapters/base_llm_adapter.py +38 -1
  4. pipecat/adapters/services/anthropic_adapter.py +9 -14
  5. pipecat/adapters/services/aws_nova_sonic_adapter.py +120 -5
  6. pipecat/adapters/services/bedrock_adapter.py +236 -13
  7. pipecat/adapters/services/gemini_adapter.py +12 -8
  8. pipecat/adapters/services/open_ai_adapter.py +19 -7
  9. pipecat/adapters/services/open_ai_realtime_adapter.py +5 -0
  10. pipecat/audio/dtmf/dtmf-0.wav +0 -0
  11. pipecat/audio/dtmf/dtmf-1.wav +0 -0
  12. pipecat/audio/dtmf/dtmf-2.wav +0 -0
  13. pipecat/audio/dtmf/dtmf-3.wav +0 -0
  14. pipecat/audio/dtmf/dtmf-4.wav +0 -0
  15. pipecat/audio/dtmf/dtmf-5.wav +0 -0
  16. pipecat/audio/dtmf/dtmf-6.wav +0 -0
  17. pipecat/audio/dtmf/dtmf-7.wav +0 -0
  18. pipecat/audio/dtmf/dtmf-8.wav +0 -0
  19. pipecat/audio/dtmf/dtmf-9.wav +0 -0
  20. pipecat/audio/dtmf/dtmf-pound.wav +0 -0
  21. pipecat/audio/dtmf/dtmf-star.wav +0 -0
  22. pipecat/audio/filters/krisp_viva_filter.py +193 -0
  23. pipecat/audio/filters/noisereduce_filter.py +15 -0
  24. pipecat/audio/turn/base_turn_analyzer.py +9 -1
  25. pipecat/audio/turn/smart_turn/base_smart_turn.py +14 -8
  26. pipecat/audio/turn/smart_turn/data/__init__.py +0 -0
  27. pipecat/audio/turn/smart_turn/data/smart-turn-v3.0.onnx +0 -0
  28. pipecat/audio/turn/smart_turn/http_smart_turn.py +6 -2
  29. pipecat/audio/turn/smart_turn/local_smart_turn.py +1 -1
  30. pipecat/audio/turn/smart_turn/local_smart_turn_v2.py +1 -1
  31. pipecat/audio/turn/smart_turn/local_smart_turn_v3.py +124 -0
  32. pipecat/audio/vad/data/README.md +10 -0
  33. pipecat/audio/vad/data/silero_vad_v2.onnx +0 -0
  34. pipecat/audio/vad/silero.py +9 -3
  35. pipecat/audio/vad/vad_analyzer.py +13 -1
  36. pipecat/extensions/voicemail/voicemail_detector.py +5 -5
  37. pipecat/frames/frames.py +277 -86
  38. pipecat/observers/loggers/debug_log_observer.py +3 -3
  39. pipecat/observers/loggers/llm_log_observer.py +7 -3
  40. pipecat/observers/loggers/user_bot_latency_log_observer.py +22 -10
  41. pipecat/pipeline/runner.py +18 -6
  42. pipecat/pipeline/service_switcher.py +64 -36
  43. pipecat/pipeline/task.py +125 -79
  44. pipecat/pipeline/tts_switcher.py +30 -0
  45. pipecat/processors/aggregators/dtmf_aggregator.py +2 -3
  46. pipecat/processors/aggregators/{gated_openai_llm_context.py → gated_llm_context.py} +9 -9
  47. pipecat/processors/aggregators/gated_open_ai_llm_context.py +12 -0
  48. pipecat/processors/aggregators/llm_context.py +40 -2
  49. pipecat/processors/aggregators/llm_response.py +32 -15
  50. pipecat/processors/aggregators/llm_response_universal.py +19 -15
  51. pipecat/processors/aggregators/user_response.py +6 -6
  52. pipecat/processors/aggregators/vision_image_frame.py +24 -2
  53. pipecat/processors/audio/audio_buffer_processor.py +43 -8
  54. pipecat/processors/dtmf_aggregator.py +174 -77
  55. pipecat/processors/filters/stt_mute_filter.py +17 -0
  56. pipecat/processors/frame_processor.py +110 -24
  57. pipecat/processors/frameworks/langchain.py +8 -2
  58. pipecat/processors/frameworks/rtvi.py +210 -68
  59. pipecat/processors/frameworks/strands_agents.py +170 -0
  60. pipecat/processors/logger.py +2 -2
  61. pipecat/processors/transcript_processor.py +26 -5
  62. pipecat/processors/user_idle_processor.py +35 -11
  63. pipecat/runner/daily.py +59 -20
  64. pipecat/runner/run.py +395 -93
  65. pipecat/runner/types.py +6 -4
  66. pipecat/runner/utils.py +51 -10
  67. pipecat/serializers/__init__.py +5 -1
  68. pipecat/serializers/asterisk.py +16 -2
  69. pipecat/serializers/convox.py +41 -4
  70. pipecat/serializers/custom.py +257 -0
  71. pipecat/serializers/exotel.py +5 -5
  72. pipecat/serializers/livekit.py +20 -0
  73. pipecat/serializers/plivo.py +5 -5
  74. pipecat/serializers/protobuf.py +6 -5
  75. pipecat/serializers/telnyx.py +2 -2
  76. pipecat/serializers/twilio.py +43 -23
  77. pipecat/serializers/vi.py +324 -0
  78. pipecat/services/ai_service.py +2 -6
  79. pipecat/services/anthropic/llm.py +2 -25
  80. pipecat/services/assemblyai/models.py +6 -0
  81. pipecat/services/assemblyai/stt.py +13 -5
  82. pipecat/services/asyncai/tts.py +5 -3
  83. pipecat/services/aws/__init__.py +1 -0
  84. pipecat/services/aws/llm.py +147 -105
  85. pipecat/services/aws/nova_sonic/__init__.py +0 -0
  86. pipecat/services/aws/nova_sonic/context.py +436 -0
  87. pipecat/services/aws/nova_sonic/frames.py +25 -0
  88. pipecat/services/aws/nova_sonic/llm.py +1265 -0
  89. pipecat/services/aws/stt.py +3 -3
  90. pipecat/services/aws_nova_sonic/__init__.py +19 -1
  91. pipecat/services/aws_nova_sonic/aws.py +11 -1151
  92. pipecat/services/aws_nova_sonic/context.py +8 -354
  93. pipecat/services/aws_nova_sonic/frames.py +13 -17
  94. pipecat/services/azure/llm.py +51 -1
  95. pipecat/services/azure/realtime/__init__.py +0 -0
  96. pipecat/services/azure/realtime/llm.py +65 -0
  97. pipecat/services/azure/stt.py +15 -0
  98. pipecat/services/cartesia/stt.py +77 -70
  99. pipecat/services/cartesia/tts.py +80 -13
  100. pipecat/services/deepgram/__init__.py +1 -0
  101. pipecat/services/deepgram/flux/__init__.py +0 -0
  102. pipecat/services/deepgram/flux/stt.py +640 -0
  103. pipecat/services/elevenlabs/__init__.py +4 -1
  104. pipecat/services/elevenlabs/stt.py +339 -0
  105. pipecat/services/elevenlabs/tts.py +87 -46
  106. pipecat/services/fish/tts.py +5 -2
  107. pipecat/services/gemini_multimodal_live/events.py +38 -524
  108. pipecat/services/gemini_multimodal_live/file_api.py +23 -173
  109. pipecat/services/gemini_multimodal_live/gemini.py +41 -1403
  110. pipecat/services/gladia/stt.py +56 -72
  111. pipecat/services/google/__init__.py +1 -0
  112. pipecat/services/google/gemini_live/__init__.py +3 -0
  113. pipecat/services/google/gemini_live/file_api.py +189 -0
  114. pipecat/services/google/gemini_live/llm.py +1582 -0
  115. pipecat/services/google/gemini_live/llm_vertex.py +184 -0
  116. pipecat/services/google/llm.py +15 -11
  117. pipecat/services/google/llm_openai.py +3 -3
  118. pipecat/services/google/llm_vertex.py +86 -16
  119. pipecat/services/google/stt.py +4 -0
  120. pipecat/services/google/tts.py +7 -3
  121. pipecat/services/heygen/api.py +2 -0
  122. pipecat/services/heygen/client.py +8 -4
  123. pipecat/services/heygen/video.py +2 -0
  124. pipecat/services/hume/__init__.py +5 -0
  125. pipecat/services/hume/tts.py +220 -0
  126. pipecat/services/inworld/tts.py +6 -6
  127. pipecat/services/llm_service.py +15 -5
  128. pipecat/services/lmnt/tts.py +4 -2
  129. pipecat/services/mcp_service.py +4 -2
  130. pipecat/services/mem0/memory.py +6 -5
  131. pipecat/services/mistral/llm.py +29 -8
  132. pipecat/services/moondream/vision.py +42 -16
  133. pipecat/services/neuphonic/tts.py +5 -2
  134. pipecat/services/openai/__init__.py +1 -0
  135. pipecat/services/openai/base_llm.py +27 -20
  136. pipecat/services/openai/realtime/__init__.py +0 -0
  137. pipecat/services/openai/realtime/context.py +272 -0
  138. pipecat/services/openai/realtime/events.py +1106 -0
  139. pipecat/services/openai/realtime/frames.py +37 -0
  140. pipecat/services/openai/realtime/llm.py +829 -0
  141. pipecat/services/openai/tts.py +49 -10
  142. pipecat/services/openai_realtime/__init__.py +27 -0
  143. pipecat/services/openai_realtime/azure.py +21 -0
  144. pipecat/services/openai_realtime/context.py +21 -0
  145. pipecat/services/openai_realtime/events.py +21 -0
  146. pipecat/services/openai_realtime/frames.py +21 -0
  147. pipecat/services/openai_realtime_beta/azure.py +16 -0
  148. pipecat/services/openai_realtime_beta/openai.py +17 -5
  149. pipecat/services/piper/tts.py +7 -9
  150. pipecat/services/playht/tts.py +34 -4
  151. pipecat/services/rime/tts.py +12 -12
  152. pipecat/services/riva/stt.py +3 -1
  153. pipecat/services/salesforce/__init__.py +9 -0
  154. pipecat/services/salesforce/llm.py +700 -0
  155. pipecat/services/sarvam/__init__.py +7 -0
  156. pipecat/services/sarvam/stt.py +540 -0
  157. pipecat/services/sarvam/tts.py +97 -13
  158. pipecat/services/simli/video.py +2 -2
  159. pipecat/services/speechmatics/stt.py +22 -10
  160. pipecat/services/stt_service.py +47 -0
  161. pipecat/services/tavus/video.py +2 -2
  162. pipecat/services/tts_service.py +75 -22
  163. pipecat/services/vision_service.py +7 -6
  164. pipecat/services/vistaar/llm.py +51 -9
  165. pipecat/tests/utils.py +4 -4
  166. pipecat/transcriptions/language.py +41 -1
  167. pipecat/transports/base_input.py +13 -34
  168. pipecat/transports/base_output.py +140 -104
  169. pipecat/transports/daily/transport.py +199 -26
  170. pipecat/transports/heygen/__init__.py +0 -0
  171. pipecat/transports/heygen/transport.py +381 -0
  172. pipecat/transports/livekit/transport.py +228 -63
  173. pipecat/transports/local/audio.py +6 -1
  174. pipecat/transports/local/tk.py +11 -2
  175. pipecat/transports/network/fastapi_websocket.py +1 -1
  176. pipecat/transports/smallwebrtc/connection.py +103 -19
  177. pipecat/transports/smallwebrtc/request_handler.py +246 -0
  178. pipecat/transports/smallwebrtc/transport.py +65 -23
  179. pipecat/transports/tavus/transport.py +23 -12
  180. pipecat/transports/websocket/client.py +41 -5
  181. pipecat/transports/websocket/fastapi.py +21 -11
  182. pipecat/transports/websocket/server.py +14 -7
  183. pipecat/transports/whatsapp/api.py +8 -0
  184. pipecat/transports/whatsapp/client.py +47 -0
  185. pipecat/utils/base_object.py +54 -22
  186. pipecat/utils/redis.py +58 -0
  187. pipecat/utils/string.py +13 -1
  188. pipecat/utils/tracing/service_decorators.py +21 -21
  189. pipecat/serializers/genesys.py +0 -95
  190. pipecat/services/google/test-google-chirp.py +0 -45
  191. pipecat/services/openai.py +0 -698
  192. {dv_pipecat_ai-0.0.82.dev857.dist-info → dv_pipecat_ai-0.0.85.dev837.dist-info}/WHEEL +0 -0
  193. {dv_pipecat_ai-0.0.82.dev857.dist-info → dv_pipecat_ai-0.0.85.dev837.dist-info}/licenses/LICENSE +0 -0
  194. {dv_pipecat_ai-0.0.82.dev857.dist-info → dv_pipecat_ai-0.0.85.dev837.dist-info}/top_level.txt +0 -0
  195. /pipecat/services/{aws_nova_sonic → aws/nova_sonic}/ready.wav +0 -0
@@ -9,6 +9,7 @@ import sys
9
9
  from pipecat.services import DeprecatedModuleProxy
10
10
 
11
11
  from .llm import *
12
+ from .nova_sonic import *
12
13
  from .stt import *
13
14
  from .tts import *
14
15
 
@@ -25,7 +25,10 @@ from loguru import logger
25
25
  from PIL import Image
26
26
  from pydantic import BaseModel, Field
27
27
 
28
- from pipecat.adapters.services.bedrock_adapter import AWSBedrockLLMAdapter
28
+ from pipecat.adapters.services.bedrock_adapter import (
29
+ AWSBedrockLLMAdapter,
30
+ AWSBedrockLLMInvocationParams,
31
+ )
29
32
  from pipecat.frames.frames import (
30
33
  Frame,
31
34
  FunctionCallCancelFrame,
@@ -39,7 +42,6 @@ from pipecat.frames.frames import (
39
42
  LLMTextFrame,
40
43
  LLMUpdateSettingsFrame,
41
44
  UserImageRawFrame,
42
- VisionImageRawFrame,
43
45
  )
44
46
  from pipecat.metrics.metrics import LLMTokenUsage
45
47
  from pipecat.processors.aggregators.llm_context import LLMContext
@@ -59,7 +61,6 @@ from pipecat.utils.tracing.service_decorators import traced_llm
59
61
 
60
62
  try:
61
63
  import aioboto3
62
- import httpx
63
64
  from botocore.config import Config
64
65
  from botocore.exceptions import ReadTimeoutError
65
66
  except ModuleNotFoundError as e:
@@ -180,22 +181,6 @@ class AWSBedrockLLMContext(OpenAILLMContext):
180
181
  self._restructure_from_openai_messages()
181
182
  return self
182
183
 
183
- @classmethod
184
- def from_image_frame(cls, frame: VisionImageRawFrame) -> "AWSBedrockLLMContext":
185
- """Create AWS Bedrock context from vision image frame.
186
-
187
- Args:
188
- frame: The vision image frame to convert.
189
-
190
- Returns:
191
- New AWS Bedrock LLM context instance.
192
- """
193
- context = cls()
194
- context.add_image_frame_message(
195
- format=frame.format, size=frame.size, image=frame.image, text=frame.text
196
- )
197
- return context
198
-
199
184
  def set_messages(self, messages: List):
200
185
  """Set the messages list and restructure for Bedrock format.
201
186
 
@@ -399,9 +384,33 @@ class AWSBedrockLLMContext(OpenAILLMContext):
399
384
  elif isinstance(content, list):
400
385
  new_content = []
401
386
  for item in content:
387
+ # fix empty text
402
388
  if item.get("type", "") == "text":
403
389
  text_content = item["text"] if item["text"] != "" else "(empty)"
404
390
  new_content.append({"text": text_content})
391
+ # handle image_url -> image conversion
392
+ if item["type"] == "image_url":
393
+ new_item = {
394
+ "image": {
395
+ "format": "jpeg",
396
+ "source": {
397
+ "bytes": base64.b64decode(item["image_url"]["url"].split(",")[1])
398
+ },
399
+ }
400
+ }
401
+ new_content.append(new_item)
402
+ # In the case where there's a single image in the list (like what
403
+ # would result from a UserImageRawFrame), ensure that the image
404
+ # comes before text
405
+ image_indices = [i for i, item in enumerate(new_content) if "image" in item]
406
+ text_indices = [i for i, item in enumerate(new_content) if "text" in item]
407
+ if len(image_indices) == 1 and text_indices:
408
+ img_idx = image_indices[0]
409
+ first_txt_idx = text_indices[0]
410
+ if img_idx > first_txt_idx:
411
+ # Move image before the first text
412
+ image_item = new_content.pop(img_idx)
413
+ new_content.insert(first_txt_idx, image_item)
405
414
  return {"role": message["role"], "content": new_content}
406
415
 
407
416
  return message
@@ -569,7 +578,7 @@ class AWSBedrockLLMContext(OpenAILLMContext):
569
578
  if isinstance(msg["content"], list):
570
579
  for item in msg["content"]:
571
580
  if item.get("image"):
572
- item["source"]["bytes"] = "..."
581
+ item["image"]["source"]["bytes"] = "..."
573
582
  msgs.append(msg)
574
583
  return msgs
575
584
 
@@ -711,11 +720,11 @@ class AWSBedrockLLMService(LLMService):
711
720
  additional_model_request_fields: Additional model-specific parameters.
712
721
  """
713
722
 
714
- max_tokens: Optional[int] = Field(default_factory=lambda: 4096, ge=1)
715
- temperature: Optional[float] = Field(default_factory=lambda: 0.7, ge=0.0, le=1.0)
716
- top_p: Optional[float] = Field(default_factory=lambda: 0.999, ge=0.0, le=1.0)
723
+ max_tokens: Optional[int] = Field(default=None, ge=1)
724
+ temperature: Optional[float] = Field(default=None, ge=0.0, le=1.0)
725
+ top_p: Optional[float] = Field(default=None, ge=0.0, le=1.0)
717
726
  stop_sequences: Optional[List[str]] = Field(default_factory=lambda: [])
718
- latency: Optional[str] = Field(default_factory=lambda: "standard")
727
+ latency: Optional[str] = Field(default=None)
719
728
  additional_model_request_fields: Optional[Dict[str, Any]] = Field(default_factory=dict)
720
729
 
721
730
  def __init__(
@@ -792,6 +801,24 @@ class AWSBedrockLLMService(LLMService):
792
801
  """
793
802
  return True
794
803
 
804
+ def _build_inference_config(self) -> Dict[str, Any]:
805
+ """Build inference config with only the parameters that are set.
806
+
807
+ This prevents conflicts with models (e.g., Claude Sonnet 4.5) that don't
808
+ allow certain parameter combinations like temperature and top_p together.
809
+
810
+ Returns:
811
+ Dictionary containing only the inference parameters that are not None.
812
+ """
813
+ inference_config = {}
814
+ if self._settings["max_tokens"] is not None:
815
+ inference_config["maxTokens"] = self._settings["max_tokens"]
816
+ if self._settings["temperature"] is not None:
817
+ inference_config["temperature"] = self._settings["temperature"]
818
+ if self._settings["top_p"] is not None:
819
+ inference_config["topP"] = self._settings["top_p"]
820
+ return inference_config
821
+
795
822
  async def run_inference(self, context: LLMContext | OpenAILLMContext) -> Optional[str]:
796
823
  """Run a one-shot, out-of-band (i.e. out-of-pipeline) inference with the given LLM context.
797
824
 
@@ -801,64 +828,55 @@ class AWSBedrockLLMService(LLMService):
801
828
  Returns:
802
829
  The LLM's response as a string, or None if no response is generated.
803
830
  """
804
- try:
805
- messages = []
806
- system = []
807
- if isinstance(context, LLMContext):
808
- # Future code will be something like this:
809
- # adapter = self.get_llm_adapter()
810
- # params: AWSBedrockLLMInvocationParams = adapter.get_llm_invocation_params(context)
811
- # messages = params["messages"]
812
- # system = params["system_instruction"] # [{"text": "system message"}]
813
- raise NotImplementedError(
814
- "Universal LLMContext is not yet supported for AWS Bedrock."
815
- )
816
- else:
817
- context = AWSBedrockLLMContext.upgrade_to_bedrock(context)
818
- messages = context.messages
819
- system = getattr(context, "system", None) # [{"text": "system message"}]
831
+ messages = []
832
+ system = []
833
+ if isinstance(context, LLMContext):
834
+ adapter: AWSBedrockLLMAdapter = self.get_llm_adapter()
835
+ params: AWSBedrockLLMInvocationParams = adapter.get_llm_invocation_params(context)
836
+ messages = params["messages"]
837
+ system = params["system"] # [{"text": "system message"}]
838
+ else:
839
+ context = AWSBedrockLLMContext.upgrade_to_bedrock(context)
840
+ messages = context.messages
841
+ system = getattr(context, "system", None) # [{"text": "system message"}]
820
842
 
821
- # Determine if we're using Claude or Nova based on model ID
822
- model_id = self.model_name
843
+ # Determine if we're using Claude or Nova based on model ID
844
+ model_id = self.model_name
823
845
 
824
- # Prepare request parameters
825
- request_params = {
826
- "modelId": model_id,
827
- "messages": messages,
828
- "inferenceConfig": {
829
- "maxTokens": 8192,
830
- "temperature": 0.7,
831
- "topP": 0.9,
832
- },
833
- }
846
+ # Prepare request parameters
847
+ inference_config = self._build_inference_config()
834
848
 
835
- if system:
836
- request_params["system"] = system
849
+ request_params = {
850
+ "modelId": model_id,
851
+ "messages": messages,
852
+ }
837
853
 
838
- async with self._aws_session.client(
839
- service_name="bedrock-runtime", **self._aws_params
840
- ) as client:
841
- # Call Bedrock without streaming
842
- response = await client.converse(**request_params)
843
-
844
- # Extract the response text
845
- if (
846
- "output" in response
847
- and "message" in response["output"]
848
- and "content" in response["output"]["message"]
849
- ):
850
- content = response["output"]["message"]["content"]
851
- if isinstance(content, list):
852
- for item in content:
853
- if item.get("text"):
854
- return item["text"]
855
- elif isinstance(content, str):
856
- return content
857
-
858
- return None
854
+ if inference_config:
855
+ request_params["inferenceConfig"] = inference_config
856
+
857
+ if system:
858
+ request_params["system"] = system
859
+
860
+ async with self._aws_session.client(
861
+ service_name="bedrock-runtime", **self._aws_params
862
+ ) as client:
863
+ # Call Bedrock without streaming
864
+ response = await client.converse(**request_params)
865
+
866
+ # Extract the response text
867
+ if (
868
+ "output" in response
869
+ and "message" in response["output"]
870
+ and "content" in response["output"]["message"]
871
+ ):
872
+ content = response["output"]["message"]["content"]
873
+ if isinstance(content, list):
874
+ for item in content:
875
+ if item.get("text"):
876
+ return item["text"]
877
+ elif isinstance(content, str):
878
+ return content
859
879
 
860
- except Exception as e:
861
- logger.error(f"Bedrock summary generation failed: {e}", exc_info=True)
862
880
  return None
863
881
 
864
882
  async def _create_converse_stream(self, client, request_params):
@@ -933,8 +951,25 @@ class AWSBedrockLLMService(LLMService):
933
951
  }
934
952
  }
935
953
 
954
+ def _get_llm_invocation_params(
955
+ self, context: OpenAILLMContext | LLMContext
956
+ ) -> AWSBedrockLLMInvocationParams:
957
+ # Universal LLMContext
958
+ if isinstance(context, LLMContext):
959
+ adapter: AWSBedrockLLMAdapter = self.get_llm_adapter()
960
+ params = adapter.get_llm_invocation_params(context)
961
+ return params
962
+
963
+ # AWS Bedrock-specific context
964
+ return AWSBedrockLLMInvocationParams(
965
+ system=getattr(context, "system", None),
966
+ messages=context.messages,
967
+ tools=context.tools or [],
968
+ tool_choice=context.tool_choice,
969
+ )
970
+
936
971
  @traced_llm
937
- async def _process_context(self, context: AWSBedrockLLMContext):
972
+ async def _process_context(self, context: AWSBedrockLLMContext | LLMContext):
938
973
  # Usage tracking
939
974
  prompt_tokens = 0
940
975
  completion_tokens = 0
@@ -951,27 +986,33 @@ class AWSBedrockLLMService(LLMService):
951
986
 
952
987
  await self.start_ttfb_metrics()
953
988
 
954
- # Set up inference config
955
- inference_config = {
956
- "maxTokens": self._settings["max_tokens"],
957
- "temperature": self._settings["temperature"],
958
- "topP": self._settings["top_p"],
959
- }
989
+ params_from_context = self._get_llm_invocation_params(context)
990
+ messages = params_from_context["messages"]
991
+ system = params_from_context["system"]
992
+ tools = params_from_context["tools"]
993
+ tool_choice = params_from_context["tool_choice"]
994
+
995
+ # Set up inference config - only include parameters that are set
996
+ inference_config = self._build_inference_config()
960
997
 
961
998
  # Prepare request parameters
962
999
  request_params = {
963
1000
  "modelId": self.model_name,
964
- "messages": context.messages,
965
- "inferenceConfig": inference_config,
1001
+ "messages": messages,
966
1002
  "additionalModelRequestFields": self._settings["additional_model_request_fields"],
967
1003
  }
968
1004
 
1005
+ # Only add inference config if it has parameters
1006
+ if inference_config:
1007
+ request_params["inferenceConfig"] = inference_config
1008
+
969
1009
  # Add system message
970
- request_params["system"] = context.system
1010
+ if system:
1011
+ request_params["system"] = system
971
1012
 
972
1013
  # Check if messages contain tool use or tool result content blocks
973
1014
  has_tool_content = False
974
- for message in context.messages:
1015
+ for message in messages:
975
1016
  if isinstance(message.get("content"), list):
976
1017
  for content_item in message["content"]:
977
1018
  if "toolUse" in content_item or "toolResult" in content_item:
@@ -981,7 +1022,6 @@ class AWSBedrockLLMService(LLMService):
981
1022
  break
982
1023
 
983
1024
  # Handle tools: use current tools, or no-op if tool content exists but no current tools
984
- tools = context.tools or []
985
1025
  if has_tool_content and not tools:
986
1026
  tools = [self._create_no_op_tool()]
987
1027
  using_noop_tool = True
@@ -990,17 +1030,15 @@ class AWSBedrockLLMService(LLMService):
990
1030
  tool_config = {"tools": tools}
991
1031
 
992
1032
  # Only add tool_choice if we have real tools (not just no-op)
993
- if not using_noop_tool and context.tool_choice:
994
- if context.tool_choice == "auto":
1033
+ if not using_noop_tool and tool_choice:
1034
+ if tool_choice == "auto":
995
1035
  tool_config["toolChoice"] = {"auto": {}}
996
- elif context.tool_choice == "none":
1036
+ elif tool_choice == "none":
997
1037
  # Skip adding toolChoice for "none"
998
1038
  pass
999
- elif (
1000
- isinstance(context.tool_choice, dict) and "function" in context.tool_choice
1001
- ):
1039
+ elif isinstance(tool_choice, dict) and "function" in tool_choice:
1002
1040
  tool_config["toolChoice"] = {
1003
- "tool": {"name": context.tool_choice["function"]["name"]}
1041
+ "tool": {"name": tool_choice["function"]["name"]}
1004
1042
  }
1005
1043
 
1006
1044
  request_params["toolConfig"] = tool_config
@@ -1009,7 +1047,17 @@ class AWSBedrockLLMService(LLMService):
1009
1047
  if self._settings["latency"] in ["standard", "optimized"]:
1010
1048
  request_params["performanceConfig"] = {"latency": self._settings["latency"]}
1011
1049
 
1012
- logger.debug(f"Calling AWS Bedrock model with: {request_params}")
1050
+ # Log request params with messages redacted for logging
1051
+ if isinstance(context, LLMContext):
1052
+ adapter = self.get_llm_adapter()
1053
+ context_type_for_logging = "universal"
1054
+ messages_for_logging = adapter.get_messages_for_logging(context)
1055
+ else:
1056
+ context_type_for_logging = "LLM-specific"
1057
+ messages_for_logging = context.get_messages_for_logging()
1058
+ logger.debug(
1059
+ f"{self}: Generating chat from {context_type_for_logging} context [{system}] | {messages_for_logging}"
1060
+ )
1013
1061
 
1014
1062
  async with self._aws_session.client(
1015
1063
  service_name="bedrock-runtime", **self._aws_params
@@ -1085,7 +1133,7 @@ class AWSBedrockLLMService(LLMService):
1085
1133
  # also get cancelled.
1086
1134
  use_completion_tokens_estimate = True
1087
1135
  raise
1088
- except httpx.TimeoutException:
1136
+ except (ReadTimeoutError, asyncio.TimeoutError):
1089
1137
  await self._call_event_handler("on_completion_timeout")
1090
1138
  except Exception as e:
1091
1139
  logger.exception(f"{self} exception: {e}")
@@ -1117,15 +1165,9 @@ class AWSBedrockLLMService(LLMService):
1117
1165
  if isinstance(frame, OpenAILLMContextFrame):
1118
1166
  context = AWSBedrockLLMContext.upgrade_to_bedrock(frame.context)
1119
1167
  if isinstance(frame, LLMContextFrame):
1120
- raise NotImplementedError("Universal LLMContext is not yet supported for AWS Bedrock.")
1168
+ context = frame.context
1121
1169
  elif isinstance(frame, LLMMessagesFrame):
1122
1170
  context = AWSBedrockLLMContext.from_messages(frame.messages)
1123
- elif isinstance(frame, VisionImageRawFrame):
1124
- # This is only useful in very simple pipelines because it creates
1125
- # a new context. Generally we want a context manager to catch
1126
- # UserImageRawFrames coming through the pipeline and add them
1127
- # to the context.
1128
- context = AWSBedrockLLMContext.from_image_frame(frame)
1129
1171
  elif isinstance(frame, LLMUpdateSettingsFrame):
1130
1172
  await self._update_settings(frame.settings)
1131
1173
  else:
File without changes