dv-pipecat-ai 0.0.85.dev5__py3-none-any.whl → 0.0.85.dev698__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

Files changed (157) hide show
  1. {dv_pipecat_ai-0.0.85.dev5.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/METADATA +78 -117
  2. {dv_pipecat_ai-0.0.85.dev5.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/RECORD +157 -123
  3. pipecat/adapters/base_llm_adapter.py +38 -1
  4. pipecat/adapters/services/anthropic_adapter.py +9 -14
  5. pipecat/adapters/services/aws_nova_sonic_adapter.py +5 -0
  6. pipecat/adapters/services/bedrock_adapter.py +236 -13
  7. pipecat/adapters/services/gemini_adapter.py +12 -8
  8. pipecat/adapters/services/open_ai_adapter.py +19 -7
  9. pipecat/adapters/services/open_ai_realtime_adapter.py +5 -0
  10. pipecat/audio/filters/krisp_viva_filter.py +193 -0
  11. pipecat/audio/filters/noisereduce_filter.py +15 -0
  12. pipecat/audio/turn/base_turn_analyzer.py +9 -1
  13. pipecat/audio/turn/smart_turn/base_smart_turn.py +14 -8
  14. pipecat/audio/turn/smart_turn/data/__init__.py +0 -0
  15. pipecat/audio/turn/smart_turn/data/smart-turn-v3.0.onnx +0 -0
  16. pipecat/audio/turn/smart_turn/http_smart_turn.py +6 -2
  17. pipecat/audio/turn/smart_turn/local_smart_turn.py +1 -1
  18. pipecat/audio/turn/smart_turn/local_smart_turn_v2.py +1 -1
  19. pipecat/audio/turn/smart_turn/local_smart_turn_v3.py +124 -0
  20. pipecat/audio/vad/data/README.md +10 -0
  21. pipecat/audio/vad/vad_analyzer.py +13 -1
  22. pipecat/extensions/voicemail/voicemail_detector.py +5 -5
  23. pipecat/frames/frames.py +120 -87
  24. pipecat/observers/loggers/debug_log_observer.py +3 -3
  25. pipecat/observers/loggers/llm_log_observer.py +7 -3
  26. pipecat/observers/loggers/user_bot_latency_log_observer.py +22 -10
  27. pipecat/pipeline/runner.py +12 -4
  28. pipecat/pipeline/service_switcher.py +64 -36
  29. pipecat/pipeline/task.py +85 -24
  30. pipecat/processors/aggregators/dtmf_aggregator.py +28 -22
  31. pipecat/processors/aggregators/{gated_openai_llm_context.py → gated_llm_context.py} +9 -9
  32. pipecat/processors/aggregators/gated_open_ai_llm_context.py +12 -0
  33. pipecat/processors/aggregators/llm_response.py +6 -7
  34. pipecat/processors/aggregators/llm_response_universal.py +19 -15
  35. pipecat/processors/aggregators/user_response.py +6 -6
  36. pipecat/processors/aggregators/vision_image_frame.py +24 -2
  37. pipecat/processors/audio/audio_buffer_processor.py +43 -8
  38. pipecat/processors/filters/stt_mute_filter.py +2 -0
  39. pipecat/processors/frame_processor.py +103 -17
  40. pipecat/processors/frameworks/langchain.py +8 -2
  41. pipecat/processors/frameworks/rtvi.py +209 -68
  42. pipecat/processors/frameworks/strands_agents.py +170 -0
  43. pipecat/processors/logger.py +2 -2
  44. pipecat/processors/transcript_processor.py +4 -4
  45. pipecat/processors/user_idle_processor.py +3 -6
  46. pipecat/runner/run.py +270 -50
  47. pipecat/runner/types.py +2 -0
  48. pipecat/runner/utils.py +51 -10
  49. pipecat/serializers/exotel.py +5 -5
  50. pipecat/serializers/livekit.py +20 -0
  51. pipecat/serializers/plivo.py +6 -9
  52. pipecat/serializers/protobuf.py +6 -5
  53. pipecat/serializers/telnyx.py +2 -2
  54. pipecat/serializers/twilio.py +43 -23
  55. pipecat/services/ai_service.py +2 -6
  56. pipecat/services/anthropic/llm.py +2 -25
  57. pipecat/services/asyncai/tts.py +2 -3
  58. pipecat/services/aws/__init__.py +1 -0
  59. pipecat/services/aws/llm.py +122 -97
  60. pipecat/services/aws/nova_sonic/__init__.py +0 -0
  61. pipecat/services/aws/nova_sonic/context.py +367 -0
  62. pipecat/services/aws/nova_sonic/frames.py +25 -0
  63. pipecat/services/aws/nova_sonic/llm.py +1155 -0
  64. pipecat/services/aws/stt.py +1 -3
  65. pipecat/services/aws_nova_sonic/__init__.py +19 -1
  66. pipecat/services/aws_nova_sonic/aws.py +11 -1151
  67. pipecat/services/aws_nova_sonic/context.py +13 -355
  68. pipecat/services/aws_nova_sonic/frames.py +13 -17
  69. pipecat/services/azure/realtime/__init__.py +0 -0
  70. pipecat/services/azure/realtime/llm.py +65 -0
  71. pipecat/services/azure/stt.py +15 -0
  72. pipecat/services/cartesia/tts.py +2 -2
  73. pipecat/services/deepgram/__init__.py +1 -0
  74. pipecat/services/deepgram/flux/__init__.py +0 -0
  75. pipecat/services/deepgram/flux/stt.py +636 -0
  76. pipecat/services/elevenlabs/__init__.py +2 -1
  77. pipecat/services/elevenlabs/stt.py +254 -276
  78. pipecat/services/elevenlabs/tts.py +5 -5
  79. pipecat/services/fish/tts.py +2 -2
  80. pipecat/services/gemini_multimodal_live/events.py +38 -524
  81. pipecat/services/gemini_multimodal_live/file_api.py +23 -173
  82. pipecat/services/gemini_multimodal_live/gemini.py +41 -1403
  83. pipecat/services/gladia/stt.py +56 -72
  84. pipecat/services/google/__init__.py +1 -0
  85. pipecat/services/google/gemini_live/__init__.py +3 -0
  86. pipecat/services/google/gemini_live/file_api.py +189 -0
  87. pipecat/services/google/gemini_live/llm.py +1582 -0
  88. pipecat/services/google/gemini_live/llm_vertex.py +184 -0
  89. pipecat/services/google/llm.py +15 -11
  90. pipecat/services/google/llm_openai.py +3 -3
  91. pipecat/services/google/llm_vertex.py +86 -16
  92. pipecat/services/google/tts.py +7 -3
  93. pipecat/services/heygen/api.py +2 -0
  94. pipecat/services/heygen/client.py +8 -4
  95. pipecat/services/heygen/video.py +2 -0
  96. pipecat/services/hume/__init__.py +5 -0
  97. pipecat/services/hume/tts.py +220 -0
  98. pipecat/services/inworld/tts.py +6 -6
  99. pipecat/services/llm_service.py +15 -5
  100. pipecat/services/lmnt/tts.py +2 -2
  101. pipecat/services/mcp_service.py +4 -2
  102. pipecat/services/mem0/memory.py +6 -5
  103. pipecat/services/mistral/llm.py +29 -8
  104. pipecat/services/moondream/vision.py +42 -16
  105. pipecat/services/neuphonic/tts.py +2 -2
  106. pipecat/services/openai/__init__.py +1 -0
  107. pipecat/services/openai/base_llm.py +27 -20
  108. pipecat/services/openai/realtime/__init__.py +0 -0
  109. pipecat/services/openai/realtime/context.py +272 -0
  110. pipecat/services/openai/realtime/events.py +1106 -0
  111. pipecat/services/openai/realtime/frames.py +37 -0
  112. pipecat/services/openai/realtime/llm.py +829 -0
  113. pipecat/services/openai/tts.py +16 -8
  114. pipecat/services/openai_realtime/__init__.py +27 -0
  115. pipecat/services/openai_realtime/azure.py +21 -0
  116. pipecat/services/openai_realtime/context.py +21 -0
  117. pipecat/services/openai_realtime/events.py +21 -0
  118. pipecat/services/openai_realtime/frames.py +21 -0
  119. pipecat/services/openai_realtime_beta/azure.py +16 -0
  120. pipecat/services/openai_realtime_beta/openai.py +17 -5
  121. pipecat/services/playht/tts.py +31 -4
  122. pipecat/services/rime/tts.py +3 -4
  123. pipecat/services/sarvam/tts.py +2 -6
  124. pipecat/services/simli/video.py +2 -2
  125. pipecat/services/speechmatics/stt.py +1 -7
  126. pipecat/services/stt_service.py +34 -0
  127. pipecat/services/tavus/video.py +2 -2
  128. pipecat/services/tts_service.py +9 -9
  129. pipecat/services/vision_service.py +7 -6
  130. pipecat/services/vistaar/llm.py +4 -0
  131. pipecat/tests/utils.py +4 -4
  132. pipecat/transcriptions/language.py +41 -1
  133. pipecat/transports/base_input.py +17 -42
  134. pipecat/transports/base_output.py +42 -26
  135. pipecat/transports/daily/transport.py +199 -26
  136. pipecat/transports/heygen/__init__.py +0 -0
  137. pipecat/transports/heygen/transport.py +381 -0
  138. pipecat/transports/livekit/transport.py +228 -63
  139. pipecat/transports/local/audio.py +6 -1
  140. pipecat/transports/local/tk.py +11 -2
  141. pipecat/transports/network/fastapi_websocket.py +1 -1
  142. pipecat/transports/smallwebrtc/connection.py +98 -19
  143. pipecat/transports/smallwebrtc/request_handler.py +204 -0
  144. pipecat/transports/smallwebrtc/transport.py +65 -23
  145. pipecat/transports/tavus/transport.py +23 -12
  146. pipecat/transports/websocket/client.py +41 -5
  147. pipecat/transports/websocket/fastapi.py +21 -11
  148. pipecat/transports/websocket/server.py +14 -7
  149. pipecat/transports/whatsapp/api.py +8 -0
  150. pipecat/transports/whatsapp/client.py +47 -0
  151. pipecat/utils/base_object.py +54 -22
  152. pipecat/utils/string.py +12 -1
  153. pipecat/utils/tracing/service_decorators.py +21 -21
  154. {dv_pipecat_ai-0.0.85.dev5.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/WHEEL +0 -0
  155. {dv_pipecat_ai-0.0.85.dev5.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/licenses/LICENSE +0 -0
  156. {dv_pipecat_ai-0.0.85.dev5.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/top_level.txt +0 -0
  157. /pipecat/services/{aws_nova_sonic → aws/nova_sonic}/ready.wav +0 -0
@@ -22,10 +22,10 @@ from pipecat.frames.frames import (
22
22
  Frame,
23
23
  InputAudioRawFrame,
24
24
  InputDTMFFrame,
25
+ InterruptionFrame,
26
+ OutputTransportMessageFrame,
27
+ OutputTransportMessageUrgentFrame,
25
28
  StartFrame,
26
- StartInterruptionFrame,
27
- TransportMessageFrame,
28
- TransportMessageUrgentFrame,
29
29
  )
30
30
  from pipecat.serializers.base_serializer import FrameSerializer, FrameSerializerType
31
31
 
@@ -122,7 +122,7 @@ class PlivoFrameSerializer(FrameSerializer):
122
122
  self._hangup_attempted = True
123
123
  await self._hang_up_call()
124
124
  return None
125
- elif isinstance(frame, StartInterruptionFrame):
125
+ elif isinstance(frame, InterruptionFrame):
126
126
  answer = {"event": "clearAudio", "streamId": self._stream_id}
127
127
  return json.dumps(answer)
128
128
  elif isinstance(frame, AudioRawFrame):
@@ -148,7 +148,7 @@ class PlivoFrameSerializer(FrameSerializer):
148
148
  }
149
149
 
150
150
  return json.dumps(answer)
151
- elif isinstance(frame, (TransportMessageFrame, TransportMessageUrgentFrame)):
151
+ elif isinstance(frame, (OutputTransportMessageFrame, OutputTransportMessageUrgentFrame)):
152
152
  return json.dumps(frame.message)
153
153
 
154
154
  # Return None for unhandled frames
@@ -178,10 +178,7 @@ class PlivoFrameSerializer(FrameSerializer):
178
178
  return
179
179
 
180
180
  # Plivo API endpoint for hanging up calls
181
- if self._stream_id:
182
- endpoint = f"https://api.plivo.com/v1/Account/{auth_id}/Call/{call_id}/Stream/{self._stream_id}/"
183
- else:
184
- endpoint = f"https://api.plivo.com/v1/Account/{auth_id}/Call/{call_id}/"
181
+ endpoint = f"https://api.plivo.com/v1/Account/{auth_id}/Call/{call_id}/"
185
182
 
186
183
  # Create basic auth from auth_id and auth_token
187
184
  auth = aiohttp.BasicAuth(auth_id, auth_token)
@@ -15,11 +15,12 @@ import pipecat.frames.protobufs.frames_pb2 as frame_protos
15
15
  from pipecat.frames.frames import (
16
16
  Frame,
17
17
  InputAudioRawFrame,
18
+ InputTransportMessageFrame,
18
19
  OutputAudioRawFrame,
20
+ OutputTransportMessageFrame,
21
+ OutputTransportMessageUrgentFrame,
19
22
  TextFrame,
20
23
  TranscriptionFrame,
21
- TransportMessageFrame,
22
- TransportMessageUrgentFrame,
23
24
  )
24
25
  from pipecat.serializers.base_serializer import FrameSerializer, FrameSerializerType
25
26
 
@@ -82,7 +83,7 @@ class ProtobufFrameSerializer(FrameSerializer):
82
83
  Serialized frame as bytes, or None if frame type is not serializable.
83
84
  """
84
85
  # Wrapping this messages as a JSONFrame to send
85
- if isinstance(frame, (TransportMessageFrame, TransportMessageUrgentFrame)):
86
+ if isinstance(frame, (OutputTransportMessageFrame, OutputTransportMessageUrgentFrame)):
86
87
  frame = MessageFrame(
87
88
  data=json.dumps(frame.message),
88
89
  )
@@ -134,11 +135,11 @@ class ProtobufFrameSerializer(FrameSerializer):
134
135
  if "pts" in args_dict:
135
136
  del args_dict["pts"]
136
137
 
137
- # Special handling for MessageFrame -> TransportMessageUrgentFrame
138
+ # Special handling for MessageFrame -> OutputTransportMessageUrgentFrame
138
139
  if class_name == MessageFrame:
139
140
  try:
140
141
  msg = json.loads(args_dict["data"])
141
- instance = TransportMessageUrgentFrame(message=msg)
142
+ instance = InputTransportMessageFrame(message=msg)
142
143
  logger.debug(f"ProtobufFrameSerializer: Transport message {instance}")
143
144
  except Exception as e:
144
145
  logger.error(f"Error parsing MessageFrame data: {e}")
@@ -29,8 +29,8 @@ from pipecat.frames.frames import (
29
29
  Frame,
30
30
  InputAudioRawFrame,
31
31
  InputDTMFFrame,
32
+ InterruptionFrame,
32
33
  StartFrame,
33
- StartInterruptionFrame,
34
34
  )
35
35
  from pipecat.serializers.base_serializer import FrameSerializer, FrameSerializerType
36
36
 
@@ -137,7 +137,7 @@ class TelnyxFrameSerializer(FrameSerializer):
137
137
  self._hangup_attempted = True
138
138
  await self._hang_up_call()
139
139
  return None
140
- elif isinstance(frame, StartInterruptionFrame):
140
+ elif isinstance(frame, InterruptionFrame):
141
141
  answer = {"event": "clear"}
142
142
  return json.dumps(answer)
143
143
  elif isinstance(frame, AudioRawFrame):
@@ -22,10 +22,10 @@ from pipecat.frames.frames import (
22
22
  Frame,
23
23
  InputAudioRawFrame,
24
24
  InputDTMFFrame,
25
+ InterruptionFrame,
26
+ OutputTransportMessageFrame,
27
+ OutputTransportMessageUrgentFrame,
25
28
  StartFrame,
26
- StartInterruptionFrame,
27
- TransportMessageFrame,
28
- TransportMessageUrgentFrame,
29
29
  )
30
30
  from pipecat.serializers.base_serializer import FrameSerializer, FrameSerializerType
31
31
 
@@ -61,6 +61,8 @@ class TwilioFrameSerializer(FrameSerializer):
61
61
  call_sid: Optional[str] = None,
62
62
  account_sid: Optional[str] = None,
63
63
  auth_token: Optional[str] = None,
64
+ region: Optional[str] = None,
65
+ edge: Optional[str] = None,
64
66
  params: Optional[InputParams] = None,
65
67
  ):
66
68
  """Initialize the TwilioFrameSerializer.
@@ -70,13 +72,42 @@ class TwilioFrameSerializer(FrameSerializer):
70
72
  call_sid: The associated Twilio Call SID (optional, but required for auto hang-up).
71
73
  account_sid: Twilio account SID (required for auto hang-up).
72
74
  auth_token: Twilio auth token (required for auto hang-up).
75
+ region: Twilio region (e.g., "au1", "ie1"). Must be specified with edge.
76
+ edge: Twilio edge location (e.g., "sydney", "dublin"). Must be specified with region.
73
77
  params: Configuration parameters.
74
78
  """
79
+ self._params = params or TwilioFrameSerializer.InputParams()
80
+
81
+ # Validate hangup-related parameters if auto_hang_up is enabled
82
+ if self._params.auto_hang_up:
83
+ # Validate required credentials
84
+ missing_credentials = []
85
+ if not call_sid:
86
+ missing_credentials.append("call_sid")
87
+ if not account_sid:
88
+ missing_credentials.append("account_sid")
89
+ if not auth_token:
90
+ missing_credentials.append("auth_token")
91
+
92
+ if missing_credentials:
93
+ raise ValueError(
94
+ f"auto_hang_up is enabled but missing required parameters: {', '.join(missing_credentials)}"
95
+ )
96
+
97
+ # Validate region and edge are both provided if either is specified
98
+ if (region and not edge) or (edge and not region):
99
+ raise ValueError(
100
+ "Both edge and region parameters are required if one is set. "
101
+ f"Twilio's FQDN format requires both: api.{{edge}}.{{region}}.twilio.com. "
102
+ f"Got: region='{region}', edge='{edge}'"
103
+ )
104
+
75
105
  self._stream_sid = stream_sid
76
106
  self._call_sid = call_sid
77
107
  self._account_sid = account_sid
78
108
  self._auth_token = auth_token
79
- self._params = params or TwilioFrameSerializer.InputParams()
109
+ self._region = region
110
+ self._edge = edge
80
111
 
81
112
  self._twilio_sample_rate = self._params.twilio_sample_rate
82
113
  self._sample_rate = 0 # Pipeline input rate
@@ -122,7 +153,7 @@ class TwilioFrameSerializer(FrameSerializer):
122
153
  self._hangup_attempted = True
123
154
  await self._hang_up_call()
124
155
  return None
125
- elif isinstance(frame, StartInterruptionFrame):
156
+ elif isinstance(frame, InterruptionFrame):
126
157
  answer = {"event": "clear", "streamSid": self._stream_sid}
127
158
  return json.dumps(answer)
128
159
  elif isinstance(frame, AudioRawFrame):
@@ -135,7 +166,7 @@ class TwilioFrameSerializer(FrameSerializer):
135
166
  if serialized_data is None or len(serialized_data) == 0:
136
167
  # Ignoring in case we don't have audio
137
168
  return None
138
-
169
+
139
170
  payload = base64.b64encode(serialized_data).decode("utf-8")
140
171
  answer = {
141
172
  "event": "media",
@@ -144,7 +175,7 @@ class TwilioFrameSerializer(FrameSerializer):
144
175
  }
145
176
 
146
177
  return json.dumps(answer)
147
- elif isinstance(frame, (TransportMessageFrame, TransportMessageUrgentFrame)):
178
+ elif isinstance(frame, (OutputTransportMessageFrame, OutputTransportMessageUrgentFrame)):
148
179
  return json.dumps(frame.message)
149
180
 
150
181
  # Return None for unhandled frames
@@ -158,25 +189,14 @@ class TwilioFrameSerializer(FrameSerializer):
158
189
  account_sid = self._account_sid
159
190
  auth_token = self._auth_token
160
191
  call_sid = self._call_sid
192
+ region = self._region
193
+ edge = self._edge
161
194
 
162
- if not call_sid or not account_sid or not auth_token:
163
- missing = []
164
- if not call_sid:
165
- missing.append("call_sid")
166
- if not account_sid:
167
- missing.append("account_sid")
168
- if not auth_token:
169
- missing.append("auth_token")
170
-
171
- logger.warning(
172
- f"Cannot hang up Twilio call: missing required parameters: {', '.join(missing)}"
173
- )
174
- return
195
+ region_prefix = f"{region}." if region else ""
196
+ edge_prefix = f"{edge}." if edge else ""
175
197
 
176
198
  # Twilio API endpoint for updating calls
177
- endpoint = (
178
- f"https://api.twilio.com/2010-04-01/Accounts/{account_sid}/Calls/{call_sid}.json"
179
- )
199
+ endpoint = f"https://api.{edge_prefix}{region_prefix}twilio.com/2010-04-01/Accounts/{account_sid}/Calls/{call_sid}.json"
180
200
 
181
201
  # Create basic auth from account_sid and auth_token
182
202
  auth = aiohttp.BasicAuth(account_sid, auth_token)
@@ -97,9 +97,7 @@ class AIService(FrameProcessor):
97
97
  pass
98
98
 
99
99
  async def _update_settings(self, settings: Mapping[str, Any]):
100
- from pipecat.services.openai_realtime_beta.events import (
101
- SessionProperties,
102
- )
100
+ from pipecat.services.openai.realtime.events import SessionProperties
103
101
 
104
102
  for key, value in settings.items():
105
103
  logger.debug("Update request for:", key, value)
@@ -111,9 +109,7 @@ class AIService(FrameProcessor):
111
109
  logger.debug("Attempting to update", key, value)
112
110
 
113
111
  try:
114
- from pipecat.services.openai_realtime_beta.events import (
115
- TurnDetection,
116
- )
112
+ from pipecat.services.openai.realtime.events import TurnDetection
117
113
 
118
114
  if isinstance(self._session_properties, SessionProperties):
119
115
  current_properties = self._session_properties
@@ -42,7 +42,6 @@ from pipecat.frames.frames import (
42
42
  LLMTextFrame,
43
43
  LLMUpdateSettingsFrame,
44
44
  UserImageRawFrame,
45
- VisionImageRawFrame,
46
45
  )
47
46
  from pipecat.metrics.metrics import LLMTokenUsage
48
47
  from pipecat.processors.aggregators.llm_context import LLMContext
@@ -152,7 +151,7 @@ class AnthropicLLMService(LLMService):
152
151
  self,
153
152
  *,
154
153
  api_key: str,
155
- model: str = "claude-sonnet-4-20250514",
154
+ model: str = "claude-sonnet-4-5-20250929",
156
155
  params: Optional[InputParams] = None,
157
156
  client=None,
158
157
  retry_timeout_secs: Optional[float] = 5.0,
@@ -163,7 +162,7 @@ class AnthropicLLMService(LLMService):
163
162
 
164
163
  Args:
165
164
  api_key: Anthropic API key for authentication.
166
- model: Model name to use. Defaults to "claude-sonnet-4-20250514".
165
+ model: Model name to use. Defaults to "claude-sonnet-4-5-20250929".
167
166
  params: Optional model parameters for inference.
168
167
  client: Optional custom Anthropic client instance.
169
168
  retry_timeout_secs: Request timeout in seconds for retry logic.
@@ -495,12 +494,6 @@ class AnthropicLLMService(LLMService):
495
494
  context = frame.context
496
495
  elif isinstance(frame, LLMMessagesFrame):
497
496
  context = AnthropicLLMContext.from_messages(frame.messages)
498
- elif isinstance(frame, VisionImageRawFrame):
499
- # This is only useful in very simple pipelines because it creates
500
- # a new context. Generally we want a context manager to catch
501
- # UserImageRawFrames coming through the pipeline and add them
502
- # to the context.
503
- context = AnthropicLLMContext.from_image_frame(frame)
504
497
  elif isinstance(frame, LLMUpdateSettingsFrame):
505
498
  await self._update_settings(frame.settings)
506
499
  elif isinstance(frame, LLMEnablePromptCachingFrame):
@@ -626,22 +619,6 @@ class AnthropicLLMContext(OpenAILLMContext):
626
619
  self._restructure_from_openai_messages()
627
620
  return self
628
621
 
629
- @classmethod
630
- def from_image_frame(cls, frame: VisionImageRawFrame) -> "AnthropicLLMContext":
631
- """Create context from a vision image frame.
632
-
633
- Args:
634
- frame: The vision image frame to process.
635
-
636
- Returns:
637
- New Anthropic context with the image message.
638
- """
639
- context = cls()
640
- context.add_image_frame_message(
641
- format=frame.format, size=frame.size, image=frame.image, text=frame.text
642
- )
643
- return context
644
-
645
622
  def set_messages(self, messages: List):
646
623
  """Set the messages list and reset cache tracking.
647
624
 
@@ -20,8 +20,8 @@ from pipecat.frames.frames import (
20
20
  EndFrame,
21
21
  ErrorFrame,
22
22
  Frame,
23
+ InterruptionFrame,
23
24
  StartFrame,
24
- StartInterruptionFrame,
25
25
  TTSAudioRawFrame,
26
26
  TTSStartedFrame,
27
27
  TTSStoppedFrame,
@@ -119,7 +119,6 @@ class AsyncAITTSService(InterruptibleTTSService):
119
119
  """
120
120
  super().__init__(
121
121
  aggregate_sentences=aggregate_sentences,
122
- push_text_frames=False,
123
122
  pause_frame_processing=True,
124
123
  push_stop_frames=True,
125
124
  sample_rate=sample_rate,
@@ -275,7 +274,7 @@ class AsyncAITTSService(InterruptibleTTSService):
275
274
  direction: The direction to push the frame.
276
275
  """
277
276
  await super().push_frame(frame, direction)
278
- if isinstance(frame, (TTSStoppedFrame, StartInterruptionFrame)):
277
+ if isinstance(frame, (TTSStoppedFrame, InterruptionFrame)):
279
278
  self._started = False
280
279
 
281
280
  async def _receive_messages(self):
@@ -9,6 +9,7 @@ import sys
9
9
  from pipecat.services import DeprecatedModuleProxy
10
10
 
11
11
  from .llm import *
12
+ from .nova_sonic import *
12
13
  from .stt import *
13
14
  from .tts import *
14
15