dv-pipecat-ai 0.0.85.dev833__py3-none-any.whl → 0.0.85.dev834__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dv-pipecat-ai
3
- Version: 0.0.85.dev833
3
+ Version: 0.0.85.dev834
4
4
  Summary: An open source framework for voice (and multimodal) assistants
5
5
  License-Expression: BSD-2-Clause
6
6
  Project-URL: Source, https://github.com/pipecat-ai/pipecat
@@ -1,4 +1,4 @@
1
- dv_pipecat_ai-0.0.85.dev833.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
1
+ dv_pipecat_ai-0.0.85.dev834.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
2
2
  pipecat/__init__.py,sha256=j0Xm6adxHhd7D06dIyyPV_GlBYLlBnTAERVvD_jAARQ,861
3
3
  pipecat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  pipecat/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -107,7 +107,7 @@ pipecat/pipeline/to_be_updated/merge_pipeline.py,sha256=jLEWdufIW3z1xZhdoLowdJ_S
107
107
  pipecat/processors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
108
108
  pipecat/processors/async_generator.py,sha256=qPOZxk5eOad_NrF_Z06vWZ6deXIxb9AKZKYO2e5pkJs,2385
109
109
  pipecat/processors/consumer_processor.py,sha256=DrWCKnfblknZJ0bLmR_unIeJ1axQw4IPUn2IB3KLGGA,3228
110
- pipecat/processors/dtmf_aggregator.py,sha256=mo_IXUlsnVl-_Xn8sbTGnRF4Lkts0h6E3uauGbeFyWs,10204
110
+ pipecat/processors/dtmf_aggregator.py,sha256=k3xYncUr_8y5lrYfeX8PxqlF7jqFLshg_HB6HiFg7TA,10193
111
111
  pipecat/processors/frame_processor.py,sha256=uBu6Waa0_diMXdQXMZ5V5a_KwaaPzcieyuv5gO9u-ME,33841
112
112
  pipecat/processors/idle_frame_processor.py,sha256=z8AuhGap61lA5K35P6XCaOpn4kkmK_9NZNppbpQxheU,3124
113
113
  pipecat/processors/logger.py,sha256=8xa4KKekXQIETlQR7zoGnwUpLNo8CeDVm7YjyXePN-w,2385
@@ -122,7 +122,7 @@ pipecat/processors/aggregators/gated.py,sha256=tii0sRrBkRW6y9Xq5iTWPnqlOEejU4VqP
122
122
  pipecat/processors/aggregators/gated_llm_context.py,sha256=CPv6sMA8irD1zZ3fU1gSv6D7qcPvCA0MdpFhBtJ_ekI,3007
123
123
  pipecat/processors/aggregators/gated_open_ai_llm_context.py,sha256=DgqmdPj1u3fP_SVmxtfP7NjHqnyhN_RVVTDfmjbkxAs,361
124
124
  pipecat/processors/aggregators/llm_context.py,sha256=wNbZA0Vt0FzNc5cu06xiv1z7DIClIlfqR1ZD8EusbVw,11085
125
- pipecat/processors/aggregators/llm_response.py,sha256=V6wBTzfUGLJfMuI34fkf5VTR0I66AWIW8btxKI8_3IM,48795
125
+ pipecat/processors/aggregators/llm_response.py,sha256=--6D736k5mNnIhmauRbA7ZG7H9tBR16okniz3Mpypns,48573
126
126
  pipecat/processors/aggregators/llm_response_universal.py,sha256=5PqmpATpekD8BVWyBExZgatKHsNbZem8M-A7_VwTbiQ,34334
127
127
  pipecat/processors/aggregators/openai_llm_context.py,sha256=cC8DXdVPERRN04i0i-1Ys6kusvnbMALeH-Z8Pu5K684,12999
128
128
  pipecat/processors/aggregators/sentence.py,sha256=E7e3knfQl6HEGpYMKPklF1aO_gOn-rr7SnynErwfkQk,2235
@@ -154,10 +154,10 @@ pipecat/runner/run.py,sha256=McalzMoFYEJJRXyoD5PBAyUhHCdsEeeZJk8lBvplRck,30054
154
154
  pipecat/runner/types.py,sha256=zHjbAiU17fG0ypLXCEzPu7bpDOutAg-4gE7TESvK8n0,1761
155
155
  pipecat/runner/utils.py,sha256=Ve9rjRvbt1o8e9by0nIrCJzUDGcuJUeYYhkqycmgHXc,18682
156
156
  pipecat/serializers/__init__.py,sha256=xcmbbR7YYU5C4HPbo2WVgPij-Bl_qlrLcnunCdpcZkg,804
157
- pipecat/serializers/asterisk.py,sha256=bPuGuLiCf04_H0d9Gc-5BpEtqD9BRNWnpZZq5MZ1fDY,6091
157
+ pipecat/serializers/asterisk.py,sha256=QLJMXkU3DZ0sgFw3Vq2Zf8PHKkQQguL_v-l2Io4lZ_M,6729
158
158
  pipecat/serializers/base_serializer.py,sha256=OyBUZccs2ZT9mfkBbq2tGsUJMvci6o-j90Cl1sicPaI,2030
159
- pipecat/serializers/convox.py,sha256=Irby_iZywgBtevlxiC8nE2GY3eh4yNNRi2YC-0vnNTY,11155
160
- pipecat/serializers/custom.py,sha256=O0gHTyoSb1AZ_tEmE9VgRViYckmsNzjwCAqt-Xc2CaM,9081
159
+ pipecat/serializers/convox.py,sha256=fj9NkFTB74B9k8qWEuICQNGUQtEV0DusaHohkOqNLa8,11145
160
+ pipecat/serializers/custom.py,sha256=clUEqOazGe3B2XoUFRN9zkFpMd6aIZeVRTqBRHAzavM,9071
161
161
  pipecat/serializers/exotel.py,sha256=B04LtNnRMzKmaS61gPZbUjc2nbki3FmpCfUMww6cOe4,5953
162
162
  pipecat/serializers/livekit.py,sha256=OMaM7yUiHfeTPbpNxE2TrmIzjmbNQIjNvlujt81dsRI,3285
163
163
  pipecat/serializers/plivo.py,sha256=ie6VUhZDTJ7KlAuJyHNeIeMtJ3ScDq_2js1SZtz7jLI,9256
@@ -324,7 +324,7 @@ pipecat/services/sambanova/llm.py,sha256=5XVfPLEk__W8ykFqLdV95ZUhlGGkAaJwmbciLdZ
324
324
  pipecat/services/sambanova/stt.py,sha256=ZZgEZ7WQjLFHbCko-3LNTtVajjtfUvbtVLtFcaNadVQ,2536
325
325
  pipecat/services/sarvam/__init__.py,sha256=B4TN_tTHV9fWg0aSoPvfQlXISA0nJaQ9-u08I9UWvH4,280
326
326
  pipecat/services/sarvam/stt.py,sha256=p9Iq4loMwnftNZ_S0WoFSoX7iBbRKyja6RsVWbpj508,19314
327
- pipecat/services/sarvam/tts.py,sha256=wzfa0vvmd0wtuzqFSjRbTmHHS8H0L8nP9jkXwqFUJ3A,27638
327
+ pipecat/services/sarvam/tts.py,sha256=lrwfdC53kZ7f2QPgNRxzryISNkrJCvNtlZ-19-iXg94,27610
328
328
  pipecat/services/simli/__init__.py,sha256=cbDcqOaGsEgKbGYKpJ1Vv7LN4ZjOWA04sE84WW5vgQI,257
329
329
  pipecat/services/simli/video.py,sha256=Zu2XLvl2Y6VHaWzT9wEdzW9d0EYoZyzYLxjQFyV8vho,8320
330
330
  pipecat/services/soniox/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -338,7 +338,7 @@ pipecat/services/together/llm.py,sha256=VSayO-U6g9Ld0xK9CXRQPUsd5gWJKtiA8qDAyXgs
338
338
  pipecat/services/ultravox/__init__.py,sha256=EoHCSXI2o0DFQslELgkhAGZtxDj63gZi-9ZEhXljaKE,259
339
339
  pipecat/services/ultravox/stt.py,sha256=uCQm_-LbycXdXRV6IE1a6Mymis6tyww7V8PnPzAQtx8,16586
340
340
  pipecat/services/vistaar/__init__.py,sha256=UFfSWFN5rbzl6NN-E_OH_MFaSYodZWNlenAU0wk-rAI,110
341
- pipecat/services/vistaar/llm.py,sha256=8jp9BxGYOysmD6CFyof7m2AJRbTDx4KT4kFuUc95wcc,19335
341
+ pipecat/services/vistaar/llm.py,sha256=GNVKaelbpNH7NW7iOpBj2rJjmhMVUsPqfnBI-YgIjjw,19326
342
342
  pipecat/services/whisper/__init__.py,sha256=smADmw0Fv98k7cGRuHTEcljKTO2WdZqLpJd0qsTCwH8,281
343
343
  pipecat/services/whisper/base_stt.py,sha256=VhslESPnYIeVbmnQTzmlZPV35TH49duxYTvJe0epNnE,7850
344
344
  pipecat/services/whisper/stt.py,sha256=9Qd56vWMzg3LtHikQnfgyMtl4odE6BCHDbpAn3HSWjw,17480
@@ -415,7 +415,7 @@ pipecat/utils/tracing/service_decorators.py,sha256=fwzxFpi8DJl6BJbK74G0UEB4ccMJg
415
415
  pipecat/utils/tracing/setup.py,sha256=7TEgPNpq6M8lww8OQvf0P9FzYc5A30xICGklVA-fua0,2892
416
416
  pipecat/utils/tracing/turn_context_provider.py,sha256=ikon3plFOx0XbMrH6DdeHttNpb-U0gzMZIm3bWLc9eI,2485
417
417
  pipecat/utils/tracing/turn_trace_observer.py,sha256=dma16SBJpYSOE58YDWy89QzHyQFc_9gQZszKeWixuwc,9725
418
- dv_pipecat_ai-0.0.85.dev833.dist-info/METADATA,sha256=8G_xwlvsUOMNtLW1-haMrK98GNcgh0tMxIuPx3s7aQk,32924
419
- dv_pipecat_ai-0.0.85.dev833.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
420
- dv_pipecat_ai-0.0.85.dev833.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
421
- dv_pipecat_ai-0.0.85.dev833.dist-info/RECORD,,
418
+ dv_pipecat_ai-0.0.85.dev834.dist-info/METADATA,sha256=XxgFHDSl86nlTft4vFFQJ1hPi8bNDPJznW83XbbOhKk,32924
419
+ dv_pipecat_ai-0.0.85.dev834.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
420
+ dv_pipecat_ai-0.0.85.dev834.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
421
+ dv_pipecat_ai-0.0.85.dev834.dist-info/RECORD,,
@@ -49,7 +49,6 @@ from pipecat.frames.frames import (
49
49
  OpenAILLMContextAssistantTimestampFrame,
50
50
  SpeechControlParamsFrame,
51
51
  StartFrame,
52
- StartInterruptionFrame,
53
52
  TextFrame,
54
53
  TranscriptDropFrame,
55
54
  TranscriptionFrame,
@@ -473,8 +472,8 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
473
472
  frame: The frame to process.
474
473
  direction: The direction of frame flow in the pipeline.
475
474
  """
476
- if isinstance(frame, StartInterruptionFrame):
477
- self.logger.debug("Received StartInterruptionFrame")
475
+ if isinstance(frame, InterruptionFrame):
476
+ self.logger.debug("Received InterruptionFrame")
478
477
  await super().process_frame(frame, direction)
479
478
 
480
479
  if isinstance(frame, StartFrame):
@@ -560,7 +559,7 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
560
559
  "Triggering interruption - pushing BotInterruptionFrame and aggregation"
561
560
  )
562
561
  # await self.push_frame(BotInterruptionFrame(), FrameDirection.UPSTREAM)
563
- await self.push_frame(StartInterruptionFrame(), FrameDirection.DOWNSTREAM)
562
+ await self.push_frame(InterruptionFrame(), FrameDirection.DOWNSTREAM)
564
563
  self.logger.debug("Pushed BotInterruptionFrame")
565
564
  # No interruption config - normal behavior (always push aggregation)
566
565
  await self._process_aggregation()
@@ -596,12 +595,8 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
596
595
  """Notify upstream processors that pending transcripts should be dropped."""
597
596
  if self._pending_transcription_ids:
598
597
  drop_frame = TranscriptDropFrame(transcript_ids=list(self._pending_transcription_ids))
599
- self.logger.debug(
600
- f"Dropping {len(self._pending_transcription_ids)} transcript chunk(s) due to {reason}"
601
- )
602
598
  await self.push_frame(drop_frame, FrameDirection.UPSTREAM)
603
599
  self._pending_transcription_ids.clear()
604
- self._aggregation = ""
605
600
 
606
601
  async def _start(self, frame: StartFrame):
607
602
  self._create_aggregation_task()
@@ -1,3 +1,5 @@
1
+ """DTMF aggregator processor for collecting and flushing DTMF input digits."""
2
+
1
3
  import asyncio
2
4
 
3
5
  from pipecat.frames.frames import (
@@ -8,8 +10,8 @@ from pipecat.frames.frames import (
8
10
  EndFrame,
9
11
  Frame,
10
12
  InputDTMFFrame,
13
+ InterruptionFrame,
11
14
  StartDTMFCaptureFrame,
12
- StartInterruptionFrame,
13
15
  TranscriptionFrame,
14
16
  WaitForDTMFFrame,
15
17
  )
@@ -19,10 +21,11 @@ from pipecat.utils.time import time_now_iso8601
19
21
 
20
22
  class DTMFAggregator(FrameProcessor):
21
23
  """Aggregates DTMF frames using idle wait logic.
24
+
22
25
  The aggregator accumulates digits from incoming InputDTMFFrame instances.
23
26
  It flushes the aggregated digits by emitting a TranscriptionFrame when:
24
27
  - No new digit arrives within the specified timeout period,
25
- - The termination digit (“#”) is received, or
28
+ - The termination digit ("#") is received, or
26
29
  - The number of digits aggregated equals the configured 'digits' value.
27
30
  """
28
31
 
@@ -34,7 +37,9 @@ class DTMFAggregator(FrameProcessor):
34
37
  digits: int = None,
35
38
  **kwargs,
36
39
  ):
37
- """:param timeout: Idle timeout in seconds before flushing the aggregated digits.
40
+ """Initialize the DTMF aggregator.
41
+
42
+ :param timeout: Idle timeout in seconds before flushing the aggregated digits.
38
43
  :param digits: Number of digits to aggregate before flushing.
39
44
  """
40
45
  super().__init__(**kwargs)
@@ -48,6 +53,7 @@ class DTMFAggregator(FrameProcessor):
48
53
  self._dtmf_capture_active = False
49
54
 
50
55
  async def process_frame(self, frame: Frame, direction: FrameDirection) -> None:
56
+ """Process incoming frames and handle DTMF input aggregation."""
51
57
  # Handle DTMF frames.
52
58
  await super().process_frame(frame, direction)
53
59
 
@@ -69,8 +75,8 @@ class DTMFAggregator(FrameProcessor):
69
75
  self._digit_event.set() # Trigger the timeout handler
70
76
  await self._start_dtmf_capture()
71
77
  await self.push_frame(frame, direction)
72
- elif isinstance(frame, StartInterruptionFrame):
73
- self.logger.debug("Received StartInterruptionFrame")
78
+ elif isinstance(frame, InterruptionFrame):
79
+ self.logger.debug("Received InterruptionFrame")
74
80
  if self._aggregation:
75
81
  await self.flush_aggregation()
76
82
  await self._end_dtmf_capture()
@@ -108,9 +114,7 @@ class DTMFAggregator(FrameProcessor):
108
114
  if "digits" in settings:
109
115
  new_digits = settings["digits"]
110
116
  if new_digits != self._digits:
111
- self.logger.debug(
112
- f"Updating DTMF digits from {self._digits} to {new_digits}"
113
- )
117
+ self.logger.debug(f"Updating DTMF digits from {self._digits} to {new_digits}")
114
118
  self._digits = new_digits
115
119
  settings_changed = True
116
120
 
@@ -125,9 +129,7 @@ class DTMFAggregator(FrameProcessor):
125
129
  new_end_on = set(end_value)
126
130
 
127
131
  if new_end_on != self._end_on:
128
- self.logger.debug(
129
- f"Updating DTMF end_on from {self._end_on} to {new_end_on}"
130
- )
132
+ self.logger.debug(f"Updating DTMF end_on from {self._end_on} to {new_end_on}")
131
133
  self._end_on = new_end_on
132
134
  settings_changed = True
133
135
 
@@ -142,9 +144,7 @@ class DTMFAggregator(FrameProcessor):
142
144
  new_reset_on = set(reset_value)
143
145
 
144
146
  if new_reset_on != self._reset_on:
145
- self.logger.debug(
146
- f"Updating DTMF reset_on from {self._reset_on} to {new_reset_on}"
147
- )
147
+ self.logger.debug(f"Updating DTMF reset_on from {self._reset_on} to {new_reset_on}")
148
148
  self._reset_on = new_reset_on
149
149
  settings_changed = True
150
150
 
@@ -183,9 +183,7 @@ class DTMFAggregator(FrameProcessor):
183
183
  def _create_aggregation_task(self, raise_timeout: bool = False) -> None:
184
184
  """Creates the aggregation task if it hasn't been created yet."""
185
185
  if not self._aggregation_task:
186
- self._aggregation_task = self.create_task(
187
- self._aggregation_task_handler(raise_timeout)
188
- )
186
+ self._aggregation_task = self.create_task(self._aggregation_task_handler(raise_timeout))
189
187
 
190
188
  async def _stop_aggregation_task(self) -> None:
191
189
  """Stops the aggregation task."""
@@ -198,9 +196,7 @@ class DTMFAggregator(FrameProcessor):
198
196
  while True:
199
197
  try:
200
198
  # Wait for a new digit signal with a timeout.
201
- await asyncio.wait_for(
202
- self._digit_event.wait(), timeout=self._idle_timeout
203
- )
199
+ await asyncio.wait_for(self._digit_event.wait(), timeout=self._idle_timeout)
204
200
  self._digit_event.clear()
205
201
  except asyncio.TimeoutError:
206
202
  # No new digit arrived within the timeout period; flush if needed
@@ -216,7 +212,7 @@ class DTMFAggregator(FrameProcessor):
216
212
  aggregated_frame.metadata["push_aggregation"] = True
217
213
 
218
214
  # Send interruption frame (as per original design)
219
- await self.push_frame(StartInterruptionFrame(), FrameDirection.DOWNSTREAM)
215
+ await self.push_frame(InterruptionFrame(), FrameDirection.DOWNSTREAM)
220
216
 
221
217
  # Push the transcription frame
222
218
  await self.push_frame(aggregated_frame, FrameDirection.DOWNSTREAM)
@@ -1,4 +1,6 @@
1
1
  # asterisk_ws_serializer.py
2
+ """Frame serializer for Asterisk WebSocket communication."""
3
+
2
4
  import base64
3
5
  import json
4
6
  from typing import Literal, Optional
@@ -12,8 +14,8 @@ from pipecat.frames.frames import (
12
14
  EndFrame,
13
15
  Frame,
14
16
  InputAudioRawFrame,
17
+ InterruptionFrame,
15
18
  StartFrame,
16
- StartInterruptionFrame,
17
19
  TransportMessageFrame,
18
20
  TransportMessageUrgentFrame,
19
21
  )
@@ -21,6 +23,8 @@ from pipecat.serializers.base_serializer import FrameSerializer, FrameSerializer
21
23
 
22
24
 
23
25
  class AsteriskFrameSerializer(FrameSerializer):
26
+ """Serializes Pipecat frames to/from Asterisk WebSocket JSON messages."""
27
+
24
28
  class InputParams(BaseModel):
25
29
  """Configuration parameters for AsteriskFrameSerializer.
26
30
 
@@ -39,6 +43,12 @@ class AsteriskFrameSerializer(FrameSerializer):
39
43
  auto_hang_up: bool = False # no-op here; adapter handles hangup
40
44
 
41
45
  def __init__(self, stream_id: str, params: Optional[InputParams] = None):
46
+ """Initialize the Asterisk frame serializer.
47
+
48
+ Args:
49
+ stream_id: Unique identifier for the media stream.
50
+ params: Configuration parameters for the serializer.
51
+ """
42
52
  self._stream_id = stream_id
43
53
  self._params = params or AsteriskFrameSerializer.InputParams()
44
54
  self._tel_rate = self._params.telephony_sample_rate
@@ -49,13 +59,16 @@ class AsteriskFrameSerializer(FrameSerializer):
49
59
 
50
60
  @property
51
61
  def type(self) -> FrameSerializerType:
62
+ """Return the serializer type (TEXT for JSON messages)."""
52
63
  return FrameSerializerType.TEXT # we send/recv JSON strings
53
64
 
54
65
  async def setup(self, frame: StartFrame):
66
+ """Setup the serializer with audio parameters from the StartFrame."""
55
67
  self._sample_rate = self._params.sample_rate or frame.audio_in_sample_rate
56
68
 
57
69
  # Pipecat -> Adapter (play to caller)
58
70
  async def serialize(self, frame: Frame) -> str | bytes | None:
71
+ """Serialize Pipecat frames to Asterisk WebSocket JSON messages."""
59
72
  # On pipeline end, ask bridge to hang up
60
73
  if (
61
74
  self._params.auto_hang_up
@@ -64,7 +77,7 @@ class AsteriskFrameSerializer(FrameSerializer):
64
77
  ):
65
78
  self._hangup_sent = True
66
79
  return json.dumps({"event": "hangup"})
67
- if isinstance(frame, StartInterruptionFrame):
80
+ if isinstance(frame, InterruptionFrame):
68
81
  return json.dumps({"event": "clear", "streamId": self._stream_id})
69
82
  if isinstance(frame, AudioRawFrame):
70
83
  pcm = frame.audio
@@ -114,6 +127,7 @@ class AsteriskFrameSerializer(FrameSerializer):
114
127
 
115
128
  # Adapter -> Pipecat (audio from caller)
116
129
  async def deserialize(self, data: str | bytes) -> Frame | None:
130
+ """Deserialize Asterisk WebSocket JSON messages to Pipecat frames."""
117
131
  try:
118
132
  msg = json.loads(data)
119
133
  except Exception:
@@ -22,9 +22,9 @@ from pipecat.frames.frames import (
22
22
  Frame,
23
23
  InputAudioRawFrame,
24
24
  InputDTMFFrame,
25
+ InterruptionFrame,
25
26
  KeypadEntry,
26
27
  StartFrame,
27
- StartInterruptionFrame,
28
28
  TransportMessageFrame,
29
29
  TransportMessageUrgentFrame,
30
30
  )
@@ -117,7 +117,7 @@ class ConVoxFrameSerializer(FrameSerializer):
117
117
  self._call_ended = True
118
118
  # Return the callEnd event to be sent via the WebSocket
119
119
  return await self._send_call_end_event()
120
- elif isinstance(frame, StartInterruptionFrame):
120
+ elif isinstance(frame, InterruptionFrame):
121
121
  # Clear/interrupt command for ConVox
122
122
  message = {
123
123
  "event": "clear",
@@ -28,8 +28,8 @@ from pipecat.frames.frames import (
28
28
  EndFrame,
29
29
  Frame,
30
30
  InputAudioRawFrame,
31
+ InterruptionFrame,
31
32
  StartFrame,
32
- StartInterruptionFrame,
33
33
  TransportMessageFrame,
34
34
  TransportMessageUrgentFrame,
35
35
  )
@@ -121,7 +121,7 @@ class CustomFrameSerializer(FrameSerializer):
121
121
  Returns:
122
122
  Serialized data as JSON string, or None if the frame isn't handled.
123
123
  """
124
- if isinstance(frame, StartInterruptionFrame):
124
+ if isinstance(frame, InterruptionFrame):
125
125
  # Send clear event to instruct client to discard buffered audio
126
126
  answer = {"event": "clear", "stream_sid": self._stream_sid}
127
127
  return json.dumps(answer)
@@ -23,7 +23,6 @@ from pipecat.frames.frames import (
23
23
  InterruptionFrame,
24
24
  LLMFullResponseEndFrame,
25
25
  StartFrame,
26
- StartInterruptionFrame,
27
26
  TTSAudioRawFrame,
28
27
  TTSStartedFrame,
29
28
  TTSStoppedFrame,
@@ -14,15 +14,15 @@ from loguru import logger
14
14
  from pydantic import BaseModel, Field
15
15
 
16
16
  from pipecat.frames.frames import (
17
- EndFrame,
18
17
  CancelFrame,
18
+ EndFrame,
19
19
  Frame,
20
+ InterruptionFrame,
20
21
  LLMFullResponseEndFrame,
21
22
  LLMFullResponseStartFrame,
22
23
  LLMMessagesFrame,
23
24
  LLMTextFrame,
24
25
  LLMUpdateSettingsFrame,
25
- StartInterruptionFrame,
26
26
  )
27
27
  from pipecat.processors.aggregators.llm_response import (
28
28
  LLMAssistantAggregatorParams,
@@ -391,7 +391,7 @@ class VistaarLLMService(LLMService):
391
391
  )
392
392
  await self.push_frame(frame, direction)
393
393
  return
394
- elif isinstance(frame, StartInterruptionFrame):
394
+ elif isinstance(frame, InterruptionFrame):
395
395
  await self._handle_interruption()
396
396
  await self.push_frame(frame, direction)
397
397
  return
@@ -467,4 +467,4 @@ class VistaarLLMService(LLMService):
467
467
 
468
468
  def can_generate_metrics(self) -> bool:
469
469
  """Check if this service can generate processing metrics."""
470
- return True
470
+ return True