dv-pipecat-ai 0.0.85.dev7__py3-none-any.whl → 0.0.85.dev698__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

Files changed (156) hide show
  1. {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/METADATA +78 -117
  2. {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/RECORD +156 -122
  3. pipecat/adapters/base_llm_adapter.py +38 -1
  4. pipecat/adapters/services/anthropic_adapter.py +9 -14
  5. pipecat/adapters/services/aws_nova_sonic_adapter.py +5 -0
  6. pipecat/adapters/services/bedrock_adapter.py +236 -13
  7. pipecat/adapters/services/gemini_adapter.py +12 -8
  8. pipecat/adapters/services/open_ai_adapter.py +19 -7
  9. pipecat/adapters/services/open_ai_realtime_adapter.py +5 -0
  10. pipecat/audio/filters/krisp_viva_filter.py +193 -0
  11. pipecat/audio/filters/noisereduce_filter.py +15 -0
  12. pipecat/audio/turn/base_turn_analyzer.py +9 -1
  13. pipecat/audio/turn/smart_turn/base_smart_turn.py +14 -8
  14. pipecat/audio/turn/smart_turn/data/__init__.py +0 -0
  15. pipecat/audio/turn/smart_turn/data/smart-turn-v3.0.onnx +0 -0
  16. pipecat/audio/turn/smart_turn/http_smart_turn.py +6 -2
  17. pipecat/audio/turn/smart_turn/local_smart_turn.py +1 -1
  18. pipecat/audio/turn/smart_turn/local_smart_turn_v2.py +1 -1
  19. pipecat/audio/turn/smart_turn/local_smart_turn_v3.py +124 -0
  20. pipecat/audio/vad/data/README.md +10 -0
  21. pipecat/audio/vad/vad_analyzer.py +13 -1
  22. pipecat/extensions/voicemail/voicemail_detector.py +5 -5
  23. pipecat/frames/frames.py +120 -87
  24. pipecat/observers/loggers/debug_log_observer.py +3 -3
  25. pipecat/observers/loggers/llm_log_observer.py +7 -3
  26. pipecat/observers/loggers/user_bot_latency_log_observer.py +22 -10
  27. pipecat/pipeline/runner.py +12 -4
  28. pipecat/pipeline/service_switcher.py +64 -36
  29. pipecat/pipeline/task.py +85 -24
  30. pipecat/processors/aggregators/dtmf_aggregator.py +28 -22
  31. pipecat/processors/aggregators/{gated_openai_llm_context.py → gated_llm_context.py} +9 -9
  32. pipecat/processors/aggregators/gated_open_ai_llm_context.py +12 -0
  33. pipecat/processors/aggregators/llm_response.py +6 -7
  34. pipecat/processors/aggregators/llm_response_universal.py +19 -15
  35. pipecat/processors/aggregators/user_response.py +6 -6
  36. pipecat/processors/aggregators/vision_image_frame.py +24 -2
  37. pipecat/processors/audio/audio_buffer_processor.py +43 -8
  38. pipecat/processors/filters/stt_mute_filter.py +2 -0
  39. pipecat/processors/frame_processor.py +103 -17
  40. pipecat/processors/frameworks/langchain.py +8 -2
  41. pipecat/processors/frameworks/rtvi.py +209 -68
  42. pipecat/processors/frameworks/strands_agents.py +170 -0
  43. pipecat/processors/logger.py +2 -2
  44. pipecat/processors/transcript_processor.py +4 -4
  45. pipecat/processors/user_idle_processor.py +3 -6
  46. pipecat/runner/run.py +270 -50
  47. pipecat/runner/types.py +2 -0
  48. pipecat/runner/utils.py +51 -10
  49. pipecat/serializers/exotel.py +5 -5
  50. pipecat/serializers/livekit.py +20 -0
  51. pipecat/serializers/plivo.py +6 -9
  52. pipecat/serializers/protobuf.py +6 -5
  53. pipecat/serializers/telnyx.py +2 -2
  54. pipecat/serializers/twilio.py +43 -23
  55. pipecat/services/ai_service.py +2 -6
  56. pipecat/services/anthropic/llm.py +2 -25
  57. pipecat/services/asyncai/tts.py +2 -3
  58. pipecat/services/aws/__init__.py +1 -0
  59. pipecat/services/aws/llm.py +122 -97
  60. pipecat/services/aws/nova_sonic/__init__.py +0 -0
  61. pipecat/services/aws/nova_sonic/context.py +367 -0
  62. pipecat/services/aws/nova_sonic/frames.py +25 -0
  63. pipecat/services/aws/nova_sonic/llm.py +1155 -0
  64. pipecat/services/aws/stt.py +1 -3
  65. pipecat/services/aws_nova_sonic/__init__.py +19 -1
  66. pipecat/services/aws_nova_sonic/aws.py +11 -1151
  67. pipecat/services/aws_nova_sonic/context.py +13 -355
  68. pipecat/services/aws_nova_sonic/frames.py +13 -17
  69. pipecat/services/azure/realtime/__init__.py +0 -0
  70. pipecat/services/azure/realtime/llm.py +65 -0
  71. pipecat/services/azure/stt.py +15 -0
  72. pipecat/services/cartesia/tts.py +2 -2
  73. pipecat/services/deepgram/__init__.py +1 -0
  74. pipecat/services/deepgram/flux/__init__.py +0 -0
  75. pipecat/services/deepgram/flux/stt.py +636 -0
  76. pipecat/services/elevenlabs/__init__.py +2 -1
  77. pipecat/services/elevenlabs/stt.py +254 -276
  78. pipecat/services/elevenlabs/tts.py +5 -5
  79. pipecat/services/fish/tts.py +2 -2
  80. pipecat/services/gemini_multimodal_live/events.py +38 -524
  81. pipecat/services/gemini_multimodal_live/file_api.py +23 -173
  82. pipecat/services/gemini_multimodal_live/gemini.py +41 -1403
  83. pipecat/services/gladia/stt.py +56 -72
  84. pipecat/services/google/__init__.py +1 -0
  85. pipecat/services/google/gemini_live/__init__.py +3 -0
  86. pipecat/services/google/gemini_live/file_api.py +189 -0
  87. pipecat/services/google/gemini_live/llm.py +1582 -0
  88. pipecat/services/google/gemini_live/llm_vertex.py +184 -0
  89. pipecat/services/google/llm.py +15 -11
  90. pipecat/services/google/llm_openai.py +3 -3
  91. pipecat/services/google/llm_vertex.py +86 -16
  92. pipecat/services/google/tts.py +7 -3
  93. pipecat/services/heygen/api.py +2 -0
  94. pipecat/services/heygen/client.py +8 -4
  95. pipecat/services/heygen/video.py +2 -0
  96. pipecat/services/hume/__init__.py +5 -0
  97. pipecat/services/hume/tts.py +220 -0
  98. pipecat/services/inworld/tts.py +6 -6
  99. pipecat/services/llm_service.py +15 -5
  100. pipecat/services/lmnt/tts.py +2 -2
  101. pipecat/services/mcp_service.py +4 -2
  102. pipecat/services/mem0/memory.py +6 -5
  103. pipecat/services/mistral/llm.py +29 -8
  104. pipecat/services/moondream/vision.py +42 -16
  105. pipecat/services/neuphonic/tts.py +2 -2
  106. pipecat/services/openai/__init__.py +1 -0
  107. pipecat/services/openai/base_llm.py +27 -20
  108. pipecat/services/openai/realtime/__init__.py +0 -0
  109. pipecat/services/openai/realtime/context.py +272 -0
  110. pipecat/services/openai/realtime/events.py +1106 -0
  111. pipecat/services/openai/realtime/frames.py +37 -0
  112. pipecat/services/openai/realtime/llm.py +829 -0
  113. pipecat/services/openai/tts.py +16 -8
  114. pipecat/services/openai_realtime/__init__.py +27 -0
  115. pipecat/services/openai_realtime/azure.py +21 -0
  116. pipecat/services/openai_realtime/context.py +21 -0
  117. pipecat/services/openai_realtime/events.py +21 -0
  118. pipecat/services/openai_realtime/frames.py +21 -0
  119. pipecat/services/openai_realtime_beta/azure.py +16 -0
  120. pipecat/services/openai_realtime_beta/openai.py +17 -5
  121. pipecat/services/playht/tts.py +31 -4
  122. pipecat/services/rime/tts.py +3 -4
  123. pipecat/services/sarvam/tts.py +2 -6
  124. pipecat/services/simli/video.py +2 -2
  125. pipecat/services/speechmatics/stt.py +1 -7
  126. pipecat/services/stt_service.py +34 -0
  127. pipecat/services/tavus/video.py +2 -2
  128. pipecat/services/tts_service.py +9 -9
  129. pipecat/services/vision_service.py +7 -6
  130. pipecat/tests/utils.py +4 -4
  131. pipecat/transcriptions/language.py +41 -1
  132. pipecat/transports/base_input.py +17 -42
  133. pipecat/transports/base_output.py +42 -26
  134. pipecat/transports/daily/transport.py +199 -26
  135. pipecat/transports/heygen/__init__.py +0 -0
  136. pipecat/transports/heygen/transport.py +381 -0
  137. pipecat/transports/livekit/transport.py +228 -63
  138. pipecat/transports/local/audio.py +6 -1
  139. pipecat/transports/local/tk.py +11 -2
  140. pipecat/transports/network/fastapi_websocket.py +1 -1
  141. pipecat/transports/smallwebrtc/connection.py +98 -19
  142. pipecat/transports/smallwebrtc/request_handler.py +204 -0
  143. pipecat/transports/smallwebrtc/transport.py +65 -23
  144. pipecat/transports/tavus/transport.py +23 -12
  145. pipecat/transports/websocket/client.py +41 -5
  146. pipecat/transports/websocket/fastapi.py +21 -11
  147. pipecat/transports/websocket/server.py +14 -7
  148. pipecat/transports/whatsapp/api.py +8 -0
  149. pipecat/transports/whatsapp/client.py +47 -0
  150. pipecat/utils/base_object.py +54 -22
  151. pipecat/utils/string.py +12 -1
  152. pipecat/utils/tracing/service_decorators.py +21 -21
  153. {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/WHEEL +0 -0
  154. {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/licenses/LICENSE +0 -0
  155. {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/top_level.txt +0 -0
  156. /pipecat/services/{aws_nova_sonic → aws/nova_sonic}/ready.wav +0 -0
@@ -25,7 +25,10 @@ from loguru import logger
25
25
  from PIL import Image
26
26
  from pydantic import BaseModel, Field
27
27
 
28
- from pipecat.adapters.services.bedrock_adapter import AWSBedrockLLMAdapter
28
+ from pipecat.adapters.services.bedrock_adapter import (
29
+ AWSBedrockLLMAdapter,
30
+ AWSBedrockLLMInvocationParams,
31
+ )
29
32
  from pipecat.frames.frames import (
30
33
  Frame,
31
34
  FunctionCallCancelFrame,
@@ -39,7 +42,6 @@ from pipecat.frames.frames import (
39
42
  LLMTextFrame,
40
43
  LLMUpdateSettingsFrame,
41
44
  UserImageRawFrame,
42
- VisionImageRawFrame,
43
45
  )
44
46
  from pipecat.metrics.metrics import LLMTokenUsage
45
47
  from pipecat.processors.aggregators.llm_context import LLMContext
@@ -59,7 +61,6 @@ from pipecat.utils.tracing.service_decorators import traced_llm
59
61
 
60
62
  try:
61
63
  import aioboto3
62
- import httpx
63
64
  from botocore.config import Config
64
65
  from botocore.exceptions import ReadTimeoutError
65
66
  except ModuleNotFoundError as e:
@@ -180,22 +181,6 @@ class AWSBedrockLLMContext(OpenAILLMContext):
180
181
  self._restructure_from_openai_messages()
181
182
  return self
182
183
 
183
- @classmethod
184
- def from_image_frame(cls, frame: VisionImageRawFrame) -> "AWSBedrockLLMContext":
185
- """Create AWS Bedrock context from vision image frame.
186
-
187
- Args:
188
- frame: The vision image frame to convert.
189
-
190
- Returns:
191
- New AWS Bedrock LLM context instance.
192
- """
193
- context = cls()
194
- context.add_image_frame_message(
195
- format=frame.format, size=frame.size, image=frame.image, text=frame.text
196
- )
197
- return context
198
-
199
184
  def set_messages(self, messages: List):
200
185
  """Set the messages list and restructure for Bedrock format.
201
186
 
@@ -399,9 +384,33 @@ class AWSBedrockLLMContext(OpenAILLMContext):
399
384
  elif isinstance(content, list):
400
385
  new_content = []
401
386
  for item in content:
387
+ # fix empty text
402
388
  if item.get("type", "") == "text":
403
389
  text_content = item["text"] if item["text"] != "" else "(empty)"
404
390
  new_content.append({"text": text_content})
391
+ # handle image_url -> image conversion
392
+ if item["type"] == "image_url":
393
+ new_item = {
394
+ "image": {
395
+ "format": "jpeg",
396
+ "source": {
397
+ "bytes": base64.b64decode(item["image_url"]["url"].split(",")[1])
398
+ },
399
+ }
400
+ }
401
+ new_content.append(new_item)
402
+ # In the case where there's a single image in the list (like what
403
+ # would result from a UserImageRawFrame), ensure that the image
404
+ # comes before text
405
+ image_indices = [i for i, item in enumerate(new_content) if "image" in item]
406
+ text_indices = [i for i, item in enumerate(new_content) if "text" in item]
407
+ if len(image_indices) == 1 and text_indices:
408
+ img_idx = image_indices[0]
409
+ first_txt_idx = text_indices[0]
410
+ if img_idx > first_txt_idx:
411
+ # Move image before the first text
412
+ image_item = new_content.pop(img_idx)
413
+ new_content.insert(first_txt_idx, image_item)
405
414
  return {"role": message["role"], "content": new_content}
406
415
 
407
416
  return message
@@ -569,7 +578,7 @@ class AWSBedrockLLMContext(OpenAILLMContext):
569
578
  if isinstance(msg["content"], list):
570
579
  for item in msg["content"]:
571
580
  if item.get("image"):
572
- item["source"]["bytes"] = "..."
581
+ item["image"]["source"]["bytes"] = "..."
573
582
  msgs.append(msg)
574
583
  return msgs
575
584
 
@@ -801,64 +810,55 @@ class AWSBedrockLLMService(LLMService):
801
810
  Returns:
802
811
  The LLM's response as a string, or None if no response is generated.
803
812
  """
804
- try:
805
- messages = []
806
- system = []
807
- if isinstance(context, LLMContext):
808
- # Future code will be something like this:
809
- # adapter = self.get_llm_adapter()
810
- # params: AWSBedrockLLMInvocationParams = adapter.get_llm_invocation_params(context)
811
- # messages = params["messages"]
812
- # system = params["system_instruction"] # [{"text": "system message"}]
813
- raise NotImplementedError(
814
- "Universal LLMContext is not yet supported for AWS Bedrock."
815
- )
816
- else:
817
- context = AWSBedrockLLMContext.upgrade_to_bedrock(context)
818
- messages = context.messages
819
- system = getattr(context, "system", None) # [{"text": "system message"}]
820
-
821
- # Determine if we're using Claude or Nova based on model ID
822
- model_id = self.model_name
823
-
824
- # Prepare request parameters
825
- request_params = {
826
- "modelId": model_id,
827
- "messages": messages,
828
- "inferenceConfig": {
829
- "maxTokens": 8192,
830
- "temperature": 0.7,
831
- "topP": 0.9,
832
- },
833
- }
834
-
835
- if system:
836
- request_params["system"] = system
813
+ messages = []
814
+ system = []
815
+ if isinstance(context, LLMContext):
816
+ adapter: AWSBedrockLLMAdapter = self.get_llm_adapter()
817
+ params: AWSBedrockLLMInvocationParams = adapter.get_llm_invocation_params(context)
818
+ messages = params["messages"]
819
+ system = params["system"] # [{"text": "system message"}]
820
+ else:
821
+ context = AWSBedrockLLMContext.upgrade_to_bedrock(context)
822
+ messages = context.messages
823
+ system = getattr(context, "system", None) # [{"text": "system message"}]
824
+
825
+ # Determine if we're using Claude or Nova based on model ID
826
+ model_id = self.model_name
827
+
828
+ # Prepare request parameters
829
+ request_params = {
830
+ "modelId": model_id,
831
+ "messages": messages,
832
+ "inferenceConfig": {
833
+ "maxTokens": 8192,
834
+ "temperature": 0.7,
835
+ "topP": 0.9,
836
+ },
837
+ }
837
838
 
838
- async with self._aws_session.client(
839
- service_name="bedrock-runtime", **self._aws_params
840
- ) as client:
841
- # Call Bedrock without streaming
842
- response = await client.converse(**request_params)
843
-
844
- # Extract the response text
845
- if (
846
- "output" in response
847
- and "message" in response["output"]
848
- and "content" in response["output"]["message"]
849
- ):
850
- content = response["output"]["message"]["content"]
851
- if isinstance(content, list):
852
- for item in content:
853
- if item.get("text"):
854
- return item["text"]
855
- elif isinstance(content, str):
856
- return content
857
-
858
- return None
839
+ if system:
840
+ request_params["system"] = system
841
+
842
+ async with self._aws_session.client(
843
+ service_name="bedrock-runtime", **self._aws_params
844
+ ) as client:
845
+ # Call Bedrock without streaming
846
+ response = await client.converse(**request_params)
847
+
848
+ # Extract the response text
849
+ if (
850
+ "output" in response
851
+ and "message" in response["output"]
852
+ and "content" in response["output"]["message"]
853
+ ):
854
+ content = response["output"]["message"]["content"]
855
+ if isinstance(content, list):
856
+ for item in content:
857
+ if item.get("text"):
858
+ return item["text"]
859
+ elif isinstance(content, str):
860
+ return content
859
861
 
860
- except Exception as e:
861
- logger.error(f"Bedrock summary generation failed: {e}", exc_info=True)
862
862
  return None
863
863
 
864
864
  async def _create_converse_stream(self, client, request_params):
@@ -933,8 +933,25 @@ class AWSBedrockLLMService(LLMService):
933
933
  }
934
934
  }
935
935
 
936
+ def _get_llm_invocation_params(
937
+ self, context: OpenAILLMContext | LLMContext
938
+ ) -> AWSBedrockLLMInvocationParams:
939
+ # Universal LLMContext
940
+ if isinstance(context, LLMContext):
941
+ adapter: AWSBedrockLLMAdapter = self.get_llm_adapter()
942
+ params = adapter.get_llm_invocation_params(context)
943
+ return params
944
+
945
+ # AWS Bedrock-specific context
946
+ return AWSBedrockLLMInvocationParams(
947
+ system=getattr(context, "system", None),
948
+ messages=context.messages,
949
+ tools=context.tools or [],
950
+ tool_choice=context.tool_choice,
951
+ )
952
+
936
953
  @traced_llm
937
- async def _process_context(self, context: AWSBedrockLLMContext):
954
+ async def _process_context(self, context: AWSBedrockLLMContext | LLMContext):
938
955
  # Usage tracking
939
956
  prompt_tokens = 0
940
957
  completion_tokens = 0
@@ -951,6 +968,12 @@ class AWSBedrockLLMService(LLMService):
951
968
 
952
969
  await self.start_ttfb_metrics()
953
970
 
971
+ params_from_context = self._get_llm_invocation_params(context)
972
+ messages = params_from_context["messages"]
973
+ system = params_from_context["system"]
974
+ tools = params_from_context["tools"]
975
+ tool_choice = params_from_context["tool_choice"]
976
+
954
977
  # Set up inference config
955
978
  inference_config = {
956
979
  "maxTokens": self._settings["max_tokens"],
@@ -961,17 +984,18 @@ class AWSBedrockLLMService(LLMService):
961
984
  # Prepare request parameters
962
985
  request_params = {
963
986
  "modelId": self.model_name,
964
- "messages": context.messages,
987
+ "messages": messages,
965
988
  "inferenceConfig": inference_config,
966
989
  "additionalModelRequestFields": self._settings["additional_model_request_fields"],
967
990
  }
968
991
 
969
992
  # Add system message
970
- request_params["system"] = context.system
993
+ if system:
994
+ request_params["system"] = system
971
995
 
972
996
  # Check if messages contain tool use or tool result content blocks
973
997
  has_tool_content = False
974
- for message in context.messages:
998
+ for message in messages:
975
999
  if isinstance(message.get("content"), list):
976
1000
  for content_item in message["content"]:
977
1001
  if "toolUse" in content_item or "toolResult" in content_item:
@@ -981,7 +1005,6 @@ class AWSBedrockLLMService(LLMService):
981
1005
  break
982
1006
 
983
1007
  # Handle tools: use current tools, or no-op if tool content exists but no current tools
984
- tools = context.tools or []
985
1008
  if has_tool_content and not tools:
986
1009
  tools = [self._create_no_op_tool()]
987
1010
  using_noop_tool = True
@@ -990,17 +1013,15 @@ class AWSBedrockLLMService(LLMService):
990
1013
  tool_config = {"tools": tools}
991
1014
 
992
1015
  # Only add tool_choice if we have real tools (not just no-op)
993
- if not using_noop_tool and context.tool_choice:
994
- if context.tool_choice == "auto":
1016
+ if not using_noop_tool and tool_choice:
1017
+ if tool_choice == "auto":
995
1018
  tool_config["toolChoice"] = {"auto": {}}
996
- elif context.tool_choice == "none":
1019
+ elif tool_choice == "none":
997
1020
  # Skip adding toolChoice for "none"
998
1021
  pass
999
- elif (
1000
- isinstance(context.tool_choice, dict) and "function" in context.tool_choice
1001
- ):
1022
+ elif isinstance(tool_choice, dict) and "function" in tool_choice:
1002
1023
  tool_config["toolChoice"] = {
1003
- "tool": {"name": context.tool_choice["function"]["name"]}
1024
+ "tool": {"name": tool_choice["function"]["name"]}
1004
1025
  }
1005
1026
 
1006
1027
  request_params["toolConfig"] = tool_config
@@ -1009,7 +1030,17 @@ class AWSBedrockLLMService(LLMService):
1009
1030
  if self._settings["latency"] in ["standard", "optimized"]:
1010
1031
  request_params["performanceConfig"] = {"latency": self._settings["latency"]}
1011
1032
 
1012
- logger.debug(f"Calling AWS Bedrock model with: {request_params}")
1033
+ # Log request params with messages redacted for logging
1034
+ if isinstance(context, LLMContext):
1035
+ adapter = self.get_llm_adapter()
1036
+ context_type_for_logging = "universal"
1037
+ messages_for_logging = adapter.get_messages_for_logging(context)
1038
+ else:
1039
+ context_type_for_logging = "LLM-specific"
1040
+ messages_for_logging = context.get_messages_for_logging()
1041
+ logger.debug(
1042
+ f"{self}: Generating chat from {context_type_for_logging} context [{system}] | {messages_for_logging}"
1043
+ )
1013
1044
 
1014
1045
  async with self._aws_session.client(
1015
1046
  service_name="bedrock-runtime", **self._aws_params
@@ -1085,7 +1116,7 @@ class AWSBedrockLLMService(LLMService):
1085
1116
  # also get cancelled.
1086
1117
  use_completion_tokens_estimate = True
1087
1118
  raise
1088
- except httpx.TimeoutException:
1119
+ except (ReadTimeoutError, asyncio.TimeoutError):
1089
1120
  await self._call_event_handler("on_completion_timeout")
1090
1121
  except Exception as e:
1091
1122
  logger.exception(f"{self} exception: {e}")
@@ -1117,15 +1148,9 @@ class AWSBedrockLLMService(LLMService):
1117
1148
  if isinstance(frame, OpenAILLMContextFrame):
1118
1149
  context = AWSBedrockLLMContext.upgrade_to_bedrock(frame.context)
1119
1150
  if isinstance(frame, LLMContextFrame):
1120
- raise NotImplementedError("Universal LLMContext is not yet supported for AWS Bedrock.")
1151
+ context = frame.context
1121
1152
  elif isinstance(frame, LLMMessagesFrame):
1122
1153
  context = AWSBedrockLLMContext.from_messages(frame.messages)
1123
- elif isinstance(frame, VisionImageRawFrame):
1124
- # This is only useful in very simple pipelines because it creates
1125
- # a new context. Generally we want a context manager to catch
1126
- # UserImageRawFrames coming through the pipeline and add them
1127
- # to the context.
1128
- context = AWSBedrockLLMContext.from_image_frame(frame)
1129
1154
  elif isinstance(frame, LLMUpdateSettingsFrame):
1130
1155
  await self._update_settings(frame.settings)
1131
1156
  else:
File without changes