dv-pipecat-ai 0.0.85.dev7__py3-none-any.whl → 0.0.85.dev699__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev699.dist-info}/METADATA +78 -117
- {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev699.dist-info}/RECORD +158 -122
- pipecat/adapters/base_llm_adapter.py +38 -1
- pipecat/adapters/services/anthropic_adapter.py +9 -14
- pipecat/adapters/services/aws_nova_sonic_adapter.py +5 -0
- pipecat/adapters/services/bedrock_adapter.py +236 -13
- pipecat/adapters/services/gemini_adapter.py +12 -8
- pipecat/adapters/services/open_ai_adapter.py +19 -7
- pipecat/adapters/services/open_ai_realtime_adapter.py +5 -0
- pipecat/audio/filters/krisp_viva_filter.py +193 -0
- pipecat/audio/filters/noisereduce_filter.py +15 -0
- pipecat/audio/turn/base_turn_analyzer.py +9 -1
- pipecat/audio/turn/smart_turn/base_smart_turn.py +14 -8
- pipecat/audio/turn/smart_turn/data/__init__.py +0 -0
- pipecat/audio/turn/smart_turn/data/smart-turn-v3.0.onnx +0 -0
- pipecat/audio/turn/smart_turn/http_smart_turn.py +6 -2
- pipecat/audio/turn/smart_turn/local_smart_turn.py +1 -1
- pipecat/audio/turn/smart_turn/local_smart_turn_v2.py +1 -1
- pipecat/audio/turn/smart_turn/local_smart_turn_v3.py +124 -0
- pipecat/audio/vad/data/README.md +10 -0
- pipecat/audio/vad/vad_analyzer.py +13 -1
- pipecat/extensions/voicemail/voicemail_detector.py +5 -5
- pipecat/frames/frames.py +120 -87
- pipecat/observers/loggers/debug_log_observer.py +3 -3
- pipecat/observers/loggers/llm_log_observer.py +7 -3
- pipecat/observers/loggers/user_bot_latency_log_observer.py +22 -10
- pipecat/pipeline/runner.py +12 -4
- pipecat/pipeline/service_switcher.py +64 -36
- pipecat/pipeline/task.py +85 -24
- pipecat/processors/aggregators/dtmf_aggregator.py +28 -22
- pipecat/processors/aggregators/{gated_openai_llm_context.py → gated_llm_context.py} +9 -9
- pipecat/processors/aggregators/gated_open_ai_llm_context.py +12 -0
- pipecat/processors/aggregators/llm_response.py +6 -7
- pipecat/processors/aggregators/llm_response_universal.py +19 -15
- pipecat/processors/aggregators/user_response.py +6 -6
- pipecat/processors/aggregators/vision_image_frame.py +24 -2
- pipecat/processors/audio/audio_buffer_processor.py +43 -8
- pipecat/processors/filters/stt_mute_filter.py +2 -0
- pipecat/processors/frame_processor.py +103 -17
- pipecat/processors/frameworks/langchain.py +8 -2
- pipecat/processors/frameworks/rtvi.py +209 -68
- pipecat/processors/frameworks/strands_agents.py +170 -0
- pipecat/processors/logger.py +2 -2
- pipecat/processors/transcript_processor.py +4 -4
- pipecat/processors/user_idle_processor.py +3 -6
- pipecat/runner/run.py +270 -50
- pipecat/runner/types.py +2 -0
- pipecat/runner/utils.py +51 -10
- pipecat/serializers/exotel.py +5 -5
- pipecat/serializers/livekit.py +20 -0
- pipecat/serializers/plivo.py +6 -9
- pipecat/serializers/protobuf.py +6 -5
- pipecat/serializers/telnyx.py +2 -2
- pipecat/serializers/twilio.py +43 -23
- pipecat/services/ai_service.py +2 -6
- pipecat/services/anthropic/llm.py +2 -25
- pipecat/services/asyncai/tts.py +2 -3
- pipecat/services/aws/__init__.py +1 -0
- pipecat/services/aws/llm.py +122 -97
- pipecat/services/aws/nova_sonic/__init__.py +0 -0
- pipecat/services/aws/nova_sonic/context.py +367 -0
- pipecat/services/aws/nova_sonic/frames.py +25 -0
- pipecat/services/aws/nova_sonic/llm.py +1155 -0
- pipecat/services/aws/stt.py +1 -3
- pipecat/services/aws_nova_sonic/__init__.py +19 -1
- pipecat/services/aws_nova_sonic/aws.py +11 -1151
- pipecat/services/aws_nova_sonic/context.py +13 -355
- pipecat/services/aws_nova_sonic/frames.py +13 -17
- pipecat/services/azure/realtime/__init__.py +0 -0
- pipecat/services/azure/realtime/llm.py +65 -0
- pipecat/services/azure/stt.py +15 -0
- pipecat/services/cartesia/tts.py +2 -2
- pipecat/services/deepgram/__init__.py +1 -0
- pipecat/services/deepgram/flux/__init__.py +0 -0
- pipecat/services/deepgram/flux/stt.py +636 -0
- pipecat/services/elevenlabs/__init__.py +2 -1
- pipecat/services/elevenlabs/stt.py +254 -276
- pipecat/services/elevenlabs/tts.py +5 -5
- pipecat/services/fish/tts.py +2 -2
- pipecat/services/gemini_multimodal_live/events.py +38 -524
- pipecat/services/gemini_multimodal_live/file_api.py +23 -173
- pipecat/services/gemini_multimodal_live/gemini.py +41 -1403
- pipecat/services/gladia/stt.py +56 -72
- pipecat/services/google/__init__.py +1 -0
- pipecat/services/google/gemini_live/__init__.py +3 -0
- pipecat/services/google/gemini_live/file_api.py +189 -0
- pipecat/services/google/gemini_live/llm.py +1582 -0
- pipecat/services/google/gemini_live/llm_vertex.py +184 -0
- pipecat/services/google/llm.py +15 -11
- pipecat/services/google/llm_openai.py +3 -3
- pipecat/services/google/llm_vertex.py +86 -16
- pipecat/services/google/tts.py +7 -3
- pipecat/services/heygen/api.py +2 -0
- pipecat/services/heygen/client.py +8 -4
- pipecat/services/heygen/video.py +2 -0
- pipecat/services/hume/__init__.py +5 -0
- pipecat/services/hume/tts.py +220 -0
- pipecat/services/inworld/tts.py +6 -6
- pipecat/services/llm_service.py +15 -5
- pipecat/services/lmnt/tts.py +2 -2
- pipecat/services/mcp_service.py +4 -2
- pipecat/services/mem0/memory.py +6 -5
- pipecat/services/mistral/llm.py +29 -8
- pipecat/services/moondream/vision.py +42 -16
- pipecat/services/neuphonic/tts.py +2 -2
- pipecat/services/openai/__init__.py +1 -0
- pipecat/services/openai/base_llm.py +27 -20
- pipecat/services/openai/realtime/__init__.py +0 -0
- pipecat/services/openai/realtime/context.py +272 -0
- pipecat/services/openai/realtime/events.py +1106 -0
- pipecat/services/openai/realtime/frames.py +37 -0
- pipecat/services/openai/realtime/llm.py +829 -0
- pipecat/services/openai/tts.py +16 -8
- pipecat/services/openai_realtime/__init__.py +27 -0
- pipecat/services/openai_realtime/azure.py +21 -0
- pipecat/services/openai_realtime/context.py +21 -0
- pipecat/services/openai_realtime/events.py +21 -0
- pipecat/services/openai_realtime/frames.py +21 -0
- pipecat/services/openai_realtime_beta/azure.py +16 -0
- pipecat/services/openai_realtime_beta/openai.py +17 -5
- pipecat/services/playht/tts.py +31 -4
- pipecat/services/rime/tts.py +3 -4
- pipecat/services/salesforce/__init__.py +9 -0
- pipecat/services/salesforce/llm.py +465 -0
- pipecat/services/sarvam/tts.py +2 -6
- pipecat/services/simli/video.py +2 -2
- pipecat/services/speechmatics/stt.py +1 -7
- pipecat/services/stt_service.py +34 -0
- pipecat/services/tavus/video.py +2 -2
- pipecat/services/tts_service.py +9 -9
- pipecat/services/vision_service.py +7 -6
- pipecat/tests/utils.py +4 -4
- pipecat/transcriptions/language.py +41 -1
- pipecat/transports/base_input.py +17 -42
- pipecat/transports/base_output.py +42 -26
- pipecat/transports/daily/transport.py +199 -26
- pipecat/transports/heygen/__init__.py +0 -0
- pipecat/transports/heygen/transport.py +381 -0
- pipecat/transports/livekit/transport.py +228 -63
- pipecat/transports/local/audio.py +6 -1
- pipecat/transports/local/tk.py +11 -2
- pipecat/transports/network/fastapi_websocket.py +1 -1
- pipecat/transports/smallwebrtc/connection.py +98 -19
- pipecat/transports/smallwebrtc/request_handler.py +204 -0
- pipecat/transports/smallwebrtc/transport.py +65 -23
- pipecat/transports/tavus/transport.py +23 -12
- pipecat/transports/websocket/client.py +41 -5
- pipecat/transports/websocket/fastapi.py +21 -11
- pipecat/transports/websocket/server.py +14 -7
- pipecat/transports/whatsapp/api.py +8 -0
- pipecat/transports/whatsapp/client.py +47 -0
- pipecat/utils/base_object.py +54 -22
- pipecat/utils/string.py +12 -1
- pipecat/utils/tracing/service_decorators.py +21 -21
- {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev699.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev699.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev699.dist-info}/top_level.txt +0 -0
- /pipecat/services/{aws_nova_sonic → aws/nova_sonic}/ready.wav +0 -0
pipecat/services/aws/llm.py
CHANGED
|
@@ -25,7 +25,10 @@ from loguru import logger
|
|
|
25
25
|
from PIL import Image
|
|
26
26
|
from pydantic import BaseModel, Field
|
|
27
27
|
|
|
28
|
-
from pipecat.adapters.services.bedrock_adapter import
|
|
28
|
+
from pipecat.adapters.services.bedrock_adapter import (
|
|
29
|
+
AWSBedrockLLMAdapter,
|
|
30
|
+
AWSBedrockLLMInvocationParams,
|
|
31
|
+
)
|
|
29
32
|
from pipecat.frames.frames import (
|
|
30
33
|
Frame,
|
|
31
34
|
FunctionCallCancelFrame,
|
|
@@ -39,7 +42,6 @@ from pipecat.frames.frames import (
|
|
|
39
42
|
LLMTextFrame,
|
|
40
43
|
LLMUpdateSettingsFrame,
|
|
41
44
|
UserImageRawFrame,
|
|
42
|
-
VisionImageRawFrame,
|
|
43
45
|
)
|
|
44
46
|
from pipecat.metrics.metrics import LLMTokenUsage
|
|
45
47
|
from pipecat.processors.aggregators.llm_context import LLMContext
|
|
@@ -59,7 +61,6 @@ from pipecat.utils.tracing.service_decorators import traced_llm
|
|
|
59
61
|
|
|
60
62
|
try:
|
|
61
63
|
import aioboto3
|
|
62
|
-
import httpx
|
|
63
64
|
from botocore.config import Config
|
|
64
65
|
from botocore.exceptions import ReadTimeoutError
|
|
65
66
|
except ModuleNotFoundError as e:
|
|
@@ -180,22 +181,6 @@ class AWSBedrockLLMContext(OpenAILLMContext):
|
|
|
180
181
|
self._restructure_from_openai_messages()
|
|
181
182
|
return self
|
|
182
183
|
|
|
183
|
-
@classmethod
|
|
184
|
-
def from_image_frame(cls, frame: VisionImageRawFrame) -> "AWSBedrockLLMContext":
|
|
185
|
-
"""Create AWS Bedrock context from vision image frame.
|
|
186
|
-
|
|
187
|
-
Args:
|
|
188
|
-
frame: The vision image frame to convert.
|
|
189
|
-
|
|
190
|
-
Returns:
|
|
191
|
-
New AWS Bedrock LLM context instance.
|
|
192
|
-
"""
|
|
193
|
-
context = cls()
|
|
194
|
-
context.add_image_frame_message(
|
|
195
|
-
format=frame.format, size=frame.size, image=frame.image, text=frame.text
|
|
196
|
-
)
|
|
197
|
-
return context
|
|
198
|
-
|
|
199
184
|
def set_messages(self, messages: List):
|
|
200
185
|
"""Set the messages list and restructure for Bedrock format.
|
|
201
186
|
|
|
@@ -399,9 +384,33 @@ class AWSBedrockLLMContext(OpenAILLMContext):
|
|
|
399
384
|
elif isinstance(content, list):
|
|
400
385
|
new_content = []
|
|
401
386
|
for item in content:
|
|
387
|
+
# fix empty text
|
|
402
388
|
if item.get("type", "") == "text":
|
|
403
389
|
text_content = item["text"] if item["text"] != "" else "(empty)"
|
|
404
390
|
new_content.append({"text": text_content})
|
|
391
|
+
# handle image_url -> image conversion
|
|
392
|
+
if item["type"] == "image_url":
|
|
393
|
+
new_item = {
|
|
394
|
+
"image": {
|
|
395
|
+
"format": "jpeg",
|
|
396
|
+
"source": {
|
|
397
|
+
"bytes": base64.b64decode(item["image_url"]["url"].split(",")[1])
|
|
398
|
+
},
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
new_content.append(new_item)
|
|
402
|
+
# In the case where there's a single image in the list (like what
|
|
403
|
+
# would result from a UserImageRawFrame), ensure that the image
|
|
404
|
+
# comes before text
|
|
405
|
+
image_indices = [i for i, item in enumerate(new_content) if "image" in item]
|
|
406
|
+
text_indices = [i for i, item in enumerate(new_content) if "text" in item]
|
|
407
|
+
if len(image_indices) == 1 and text_indices:
|
|
408
|
+
img_idx = image_indices[0]
|
|
409
|
+
first_txt_idx = text_indices[0]
|
|
410
|
+
if img_idx > first_txt_idx:
|
|
411
|
+
# Move image before the first text
|
|
412
|
+
image_item = new_content.pop(img_idx)
|
|
413
|
+
new_content.insert(first_txt_idx, image_item)
|
|
405
414
|
return {"role": message["role"], "content": new_content}
|
|
406
415
|
|
|
407
416
|
return message
|
|
@@ -569,7 +578,7 @@ class AWSBedrockLLMContext(OpenAILLMContext):
|
|
|
569
578
|
if isinstance(msg["content"], list):
|
|
570
579
|
for item in msg["content"]:
|
|
571
580
|
if item.get("image"):
|
|
572
|
-
item["source"]["bytes"] = "..."
|
|
581
|
+
item["image"]["source"]["bytes"] = "..."
|
|
573
582
|
msgs.append(msg)
|
|
574
583
|
return msgs
|
|
575
584
|
|
|
@@ -801,64 +810,55 @@ class AWSBedrockLLMService(LLMService):
|
|
|
801
810
|
Returns:
|
|
802
811
|
The LLM's response as a string, or None if no response is generated.
|
|
803
812
|
"""
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
"
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
"maxTokens": 8192,
|
|
830
|
-
"temperature": 0.7,
|
|
831
|
-
"topP": 0.9,
|
|
832
|
-
},
|
|
833
|
-
}
|
|
834
|
-
|
|
835
|
-
if system:
|
|
836
|
-
request_params["system"] = system
|
|
813
|
+
messages = []
|
|
814
|
+
system = []
|
|
815
|
+
if isinstance(context, LLMContext):
|
|
816
|
+
adapter: AWSBedrockLLMAdapter = self.get_llm_adapter()
|
|
817
|
+
params: AWSBedrockLLMInvocationParams = adapter.get_llm_invocation_params(context)
|
|
818
|
+
messages = params["messages"]
|
|
819
|
+
system = params["system"] # [{"text": "system message"}]
|
|
820
|
+
else:
|
|
821
|
+
context = AWSBedrockLLMContext.upgrade_to_bedrock(context)
|
|
822
|
+
messages = context.messages
|
|
823
|
+
system = getattr(context, "system", None) # [{"text": "system message"}]
|
|
824
|
+
|
|
825
|
+
# Determine if we're using Claude or Nova based on model ID
|
|
826
|
+
model_id = self.model_name
|
|
827
|
+
|
|
828
|
+
# Prepare request parameters
|
|
829
|
+
request_params = {
|
|
830
|
+
"modelId": model_id,
|
|
831
|
+
"messages": messages,
|
|
832
|
+
"inferenceConfig": {
|
|
833
|
+
"maxTokens": 8192,
|
|
834
|
+
"temperature": 0.7,
|
|
835
|
+
"topP": 0.9,
|
|
836
|
+
},
|
|
837
|
+
}
|
|
837
838
|
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
839
|
+
if system:
|
|
840
|
+
request_params["system"] = system
|
|
841
|
+
|
|
842
|
+
async with self._aws_session.client(
|
|
843
|
+
service_name="bedrock-runtime", **self._aws_params
|
|
844
|
+
) as client:
|
|
845
|
+
# Call Bedrock without streaming
|
|
846
|
+
response = await client.converse(**request_params)
|
|
847
|
+
|
|
848
|
+
# Extract the response text
|
|
849
|
+
if (
|
|
850
|
+
"output" in response
|
|
851
|
+
and "message" in response["output"]
|
|
852
|
+
and "content" in response["output"]["message"]
|
|
853
|
+
):
|
|
854
|
+
content = response["output"]["message"]["content"]
|
|
855
|
+
if isinstance(content, list):
|
|
856
|
+
for item in content:
|
|
857
|
+
if item.get("text"):
|
|
858
|
+
return item["text"]
|
|
859
|
+
elif isinstance(content, str):
|
|
860
|
+
return content
|
|
859
861
|
|
|
860
|
-
except Exception as e:
|
|
861
|
-
logger.error(f"Bedrock summary generation failed: {e}", exc_info=True)
|
|
862
862
|
return None
|
|
863
863
|
|
|
864
864
|
async def _create_converse_stream(self, client, request_params):
|
|
@@ -933,8 +933,25 @@ class AWSBedrockLLMService(LLMService):
|
|
|
933
933
|
}
|
|
934
934
|
}
|
|
935
935
|
|
|
936
|
+
def _get_llm_invocation_params(
|
|
937
|
+
self, context: OpenAILLMContext | LLMContext
|
|
938
|
+
) -> AWSBedrockLLMInvocationParams:
|
|
939
|
+
# Universal LLMContext
|
|
940
|
+
if isinstance(context, LLMContext):
|
|
941
|
+
adapter: AWSBedrockLLMAdapter = self.get_llm_adapter()
|
|
942
|
+
params = adapter.get_llm_invocation_params(context)
|
|
943
|
+
return params
|
|
944
|
+
|
|
945
|
+
# AWS Bedrock-specific context
|
|
946
|
+
return AWSBedrockLLMInvocationParams(
|
|
947
|
+
system=getattr(context, "system", None),
|
|
948
|
+
messages=context.messages,
|
|
949
|
+
tools=context.tools or [],
|
|
950
|
+
tool_choice=context.tool_choice,
|
|
951
|
+
)
|
|
952
|
+
|
|
936
953
|
@traced_llm
|
|
937
|
-
async def _process_context(self, context: AWSBedrockLLMContext):
|
|
954
|
+
async def _process_context(self, context: AWSBedrockLLMContext | LLMContext):
|
|
938
955
|
# Usage tracking
|
|
939
956
|
prompt_tokens = 0
|
|
940
957
|
completion_tokens = 0
|
|
@@ -951,6 +968,12 @@ class AWSBedrockLLMService(LLMService):
|
|
|
951
968
|
|
|
952
969
|
await self.start_ttfb_metrics()
|
|
953
970
|
|
|
971
|
+
params_from_context = self._get_llm_invocation_params(context)
|
|
972
|
+
messages = params_from_context["messages"]
|
|
973
|
+
system = params_from_context["system"]
|
|
974
|
+
tools = params_from_context["tools"]
|
|
975
|
+
tool_choice = params_from_context["tool_choice"]
|
|
976
|
+
|
|
954
977
|
# Set up inference config
|
|
955
978
|
inference_config = {
|
|
956
979
|
"maxTokens": self._settings["max_tokens"],
|
|
@@ -961,17 +984,18 @@ class AWSBedrockLLMService(LLMService):
|
|
|
961
984
|
# Prepare request parameters
|
|
962
985
|
request_params = {
|
|
963
986
|
"modelId": self.model_name,
|
|
964
|
-
"messages":
|
|
987
|
+
"messages": messages,
|
|
965
988
|
"inferenceConfig": inference_config,
|
|
966
989
|
"additionalModelRequestFields": self._settings["additional_model_request_fields"],
|
|
967
990
|
}
|
|
968
991
|
|
|
969
992
|
# Add system message
|
|
970
|
-
|
|
993
|
+
if system:
|
|
994
|
+
request_params["system"] = system
|
|
971
995
|
|
|
972
996
|
# Check if messages contain tool use or tool result content blocks
|
|
973
997
|
has_tool_content = False
|
|
974
|
-
for message in
|
|
998
|
+
for message in messages:
|
|
975
999
|
if isinstance(message.get("content"), list):
|
|
976
1000
|
for content_item in message["content"]:
|
|
977
1001
|
if "toolUse" in content_item or "toolResult" in content_item:
|
|
@@ -981,7 +1005,6 @@ class AWSBedrockLLMService(LLMService):
|
|
|
981
1005
|
break
|
|
982
1006
|
|
|
983
1007
|
# Handle tools: use current tools, or no-op if tool content exists but no current tools
|
|
984
|
-
tools = context.tools or []
|
|
985
1008
|
if has_tool_content and not tools:
|
|
986
1009
|
tools = [self._create_no_op_tool()]
|
|
987
1010
|
using_noop_tool = True
|
|
@@ -990,17 +1013,15 @@ class AWSBedrockLLMService(LLMService):
|
|
|
990
1013
|
tool_config = {"tools": tools}
|
|
991
1014
|
|
|
992
1015
|
# Only add tool_choice if we have real tools (not just no-op)
|
|
993
|
-
if not using_noop_tool and
|
|
994
|
-
if
|
|
1016
|
+
if not using_noop_tool and tool_choice:
|
|
1017
|
+
if tool_choice == "auto":
|
|
995
1018
|
tool_config["toolChoice"] = {"auto": {}}
|
|
996
|
-
elif
|
|
1019
|
+
elif tool_choice == "none":
|
|
997
1020
|
# Skip adding toolChoice for "none"
|
|
998
1021
|
pass
|
|
999
|
-
elif (
|
|
1000
|
-
isinstance(context.tool_choice, dict) and "function" in context.tool_choice
|
|
1001
|
-
):
|
|
1022
|
+
elif isinstance(tool_choice, dict) and "function" in tool_choice:
|
|
1002
1023
|
tool_config["toolChoice"] = {
|
|
1003
|
-
"tool": {"name":
|
|
1024
|
+
"tool": {"name": tool_choice["function"]["name"]}
|
|
1004
1025
|
}
|
|
1005
1026
|
|
|
1006
1027
|
request_params["toolConfig"] = tool_config
|
|
@@ -1009,7 +1030,17 @@ class AWSBedrockLLMService(LLMService):
|
|
|
1009
1030
|
if self._settings["latency"] in ["standard", "optimized"]:
|
|
1010
1031
|
request_params["performanceConfig"] = {"latency": self._settings["latency"]}
|
|
1011
1032
|
|
|
1012
|
-
|
|
1033
|
+
# Log request params with messages redacted for logging
|
|
1034
|
+
if isinstance(context, LLMContext):
|
|
1035
|
+
adapter = self.get_llm_adapter()
|
|
1036
|
+
context_type_for_logging = "universal"
|
|
1037
|
+
messages_for_logging = adapter.get_messages_for_logging(context)
|
|
1038
|
+
else:
|
|
1039
|
+
context_type_for_logging = "LLM-specific"
|
|
1040
|
+
messages_for_logging = context.get_messages_for_logging()
|
|
1041
|
+
logger.debug(
|
|
1042
|
+
f"{self}: Generating chat from {context_type_for_logging} context [{system}] | {messages_for_logging}"
|
|
1043
|
+
)
|
|
1013
1044
|
|
|
1014
1045
|
async with self._aws_session.client(
|
|
1015
1046
|
service_name="bedrock-runtime", **self._aws_params
|
|
@@ -1085,7 +1116,7 @@ class AWSBedrockLLMService(LLMService):
|
|
|
1085
1116
|
# also get cancelled.
|
|
1086
1117
|
use_completion_tokens_estimate = True
|
|
1087
1118
|
raise
|
|
1088
|
-
except
|
|
1119
|
+
except (ReadTimeoutError, asyncio.TimeoutError):
|
|
1089
1120
|
await self._call_event_handler("on_completion_timeout")
|
|
1090
1121
|
except Exception as e:
|
|
1091
1122
|
logger.exception(f"{self} exception: {e}")
|
|
@@ -1117,15 +1148,9 @@ class AWSBedrockLLMService(LLMService):
|
|
|
1117
1148
|
if isinstance(frame, OpenAILLMContextFrame):
|
|
1118
1149
|
context = AWSBedrockLLMContext.upgrade_to_bedrock(frame.context)
|
|
1119
1150
|
if isinstance(frame, LLMContextFrame):
|
|
1120
|
-
|
|
1151
|
+
context = frame.context
|
|
1121
1152
|
elif isinstance(frame, LLMMessagesFrame):
|
|
1122
1153
|
context = AWSBedrockLLMContext.from_messages(frame.messages)
|
|
1123
|
-
elif isinstance(frame, VisionImageRawFrame):
|
|
1124
|
-
# This is only useful in very simple pipelines because it creates
|
|
1125
|
-
# a new context. Generally we want a context manager to catch
|
|
1126
|
-
# UserImageRawFrames coming through the pipeline and add them
|
|
1127
|
-
# to the context.
|
|
1128
|
-
context = AWSBedrockLLMContext.from_image_frame(frame)
|
|
1129
1154
|
elif isinstance(frame, LLMUpdateSettingsFrame):
|
|
1130
1155
|
await self._update_settings(frame.settings)
|
|
1131
1156
|
else:
|
|
File without changes
|