dv-pipecat-ai 0.0.82.dev63__py3-none-any.whl → 0.0.82.dev65__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dv-pipecat-ai
3
- Version: 0.0.82.dev63
3
+ Version: 0.0.82.dev65
4
4
  Summary: An open source framework for voice (and multimodal) assistants
5
5
  License-Expression: BSD-2-Clause
6
6
  Project-URL: Source, https://github.com/pipecat-ai/pipecat
@@ -1,4 +1,4 @@
1
- dv_pipecat_ai-0.0.82.dev63.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
1
+ dv_pipecat_ai-0.0.82.dev65.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
2
2
  pipecat/__init__.py,sha256=j0Xm6adxHhd7D06dIyyPV_GlBYLlBnTAERVvD_jAARQ,861
3
3
  pipecat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  pipecat/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -74,7 +74,7 @@ pipecat/extensions/voicemail/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
74
74
  pipecat/extensions/voicemail/voicemail_detector.py,sha256=g3L1m3cPJzsadeB5a8WRC9klH0D8m7xfPgB2YEaL6Do,29983
75
75
  pipecat/frames/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
76
76
  pipecat/frames/frames.proto,sha256=JXZm3VXLR8zMOUcOuhVoe2mhM3MQIQGMJXLopdJO_5Y,839
77
- pipecat/frames/frames.py,sha256=mMXjaMd2dC6ILva3DWL2cyNeLTq8q_kpUs5RiHMCEVw,44812
77
+ pipecat/frames/frames.py,sha256=ASeOObRvTRwbFBCXOHVEiKyLZZjZLhfouXIBhccEsa0,45163
78
78
  pipecat/frames/protobufs/frames_pb2.py,sha256=VHgGV_W7qQ4sfQK6RHb5_DggLm3PiSYMr6aBZ8_p1cQ,2590
79
79
  pipecat/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
80
80
  pipecat/metrics/metrics.py,sha256=bdZNciEtLTtA-xgoKDz2RJAy6fKrXkTwz3pryVHzc2M,2713
@@ -304,7 +304,7 @@ pipecat/services/together/llm.py,sha256=VSayO-U6g9Ld0xK9CXRQPUsd5gWJKtiA8qDAyXgs
304
304
  pipecat/services/ultravox/__init__.py,sha256=EoHCSXI2o0DFQslELgkhAGZtxDj63gZi-9ZEhXljaKE,259
305
305
  pipecat/services/ultravox/stt.py,sha256=uCQm_-LbycXdXRV6IE1a6Mymis6tyww7V8PnPzAQtx8,16586
306
306
  pipecat/services/vistaar/__init__.py,sha256=UFfSWFN5rbzl6NN-E_OH_MFaSYodZWNlenAU0wk-rAI,110
307
- pipecat/services/vistaar/llm.py,sha256=3ZYwQ-bzYpAXTgDXm_JvrjnnGF5zDEu2fpHfwMH2g2g,17538
307
+ pipecat/services/vistaar/llm.py,sha256=pzuAM7NTQ9buo5cX4hqKT-FbPI0cBP0pR2C2XM8zEdw,19032
308
308
  pipecat/services/whisper/__init__.py,sha256=smADmw0Fv98k7cGRuHTEcljKTO2WdZqLpJd0qsTCwH8,281
309
309
  pipecat/services/whisper/base_stt.py,sha256=VhslESPnYIeVbmnQTzmlZPV35TH49duxYTvJe0epNnE,7850
310
310
  pipecat/services/whisper/stt.py,sha256=9Qd56vWMzg3LtHikQnfgyMtl4odE6BCHDbpAn3HSWjw,17480
@@ -377,7 +377,7 @@ pipecat/utils/tracing/service_decorators.py,sha256=HwDCqLGijhYD3F8nxDuQmEw-YkRw0
377
377
  pipecat/utils/tracing/setup.py,sha256=7TEgPNpq6M8lww8OQvf0P9FzYc5A30xICGklVA-fua0,2892
378
378
  pipecat/utils/tracing/turn_context_provider.py,sha256=ikon3plFOx0XbMrH6DdeHttNpb-U0gzMZIm3bWLc9eI,2485
379
379
  pipecat/utils/tracing/turn_trace_observer.py,sha256=dma16SBJpYSOE58YDWy89QzHyQFc_9gQZszKeWixuwc,9725
380
- dv_pipecat_ai-0.0.82.dev63.dist-info/METADATA,sha256=zUsxP-l3aDOBffjbRf--VKdb5p0XzkEefIjrqMo_nic,32638
381
- dv_pipecat_ai-0.0.82.dev63.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
382
- dv_pipecat_ai-0.0.82.dev63.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
383
- dv_pipecat_ai-0.0.82.dev63.dist-info/RECORD,,
380
+ dv_pipecat_ai-0.0.82.dev65.dist-info/METADATA,sha256=FnUfphMoh0BZj4x2f0FFvnopkeS_PvmqkZ1GLUpzebg,32638
381
+ dv_pipecat_ai-0.0.82.dev65.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
382
+ dv_pipecat_ai-0.0.82.dev65.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
383
+ dv_pipecat_ai-0.0.82.dev65.dist-info/RECORD,,
pipecat/frames/frames.py CHANGED
@@ -1575,6 +1575,19 @@ class StopUserIdleProcessorFrame(SystemFrame):
1575
1575
  pass
1576
1576
 
1577
1577
 
1578
+ @dataclass
1579
+ class ResetUserIdleProcessorFrame(SystemFrame):
1580
+ """Frame signalling the UserIdleProcessor to treat the user as active again.
1581
+
1582
+ Parameters:
1583
+ text: Optional transcription text that triggered the reset.
1584
+ is_final: Whether the transcription was marked final by STT.
1585
+ """
1586
+
1587
+ text: str = ""
1588
+ is_final: bool = False
1589
+
1590
+
1578
1591
  @dataclass
1579
1592
  class WaitForDTMFFrame(ControlFrame):
1580
1593
  """Frame to stop the UserIdleProcessor monitoring."""
@@ -105,6 +105,8 @@ class VistaarLLMService(LLMService):
105
105
  self._partial_response = [] # Track what was actually sent before interruption
106
106
  self._interim_sent = False # Track if interim message was sent
107
107
  self._interim_task = None # Track interim message task
108
+ self._interim_completion_event = asyncio.Event() # Track interim message completion
109
+ self._interim_in_progress = False # Track if interim message is being spoken
108
110
 
109
111
  logger.info(
110
112
  f"Vistaar LLM initialized - Base URL: {self._base_url}, Session ID: {self._session_id}, Source Lang: {self._source_lang}, Target Lang: {self._target_lang}, Timeout: {self._timeout}s"
@@ -163,6 +165,10 @@ class VistaarLLMService(LLMService):
163
165
  # Set interruption flag
164
166
  self._is_interrupted = True
165
167
 
168
+ # Reset interim state on interruption
169
+ self._interim_in_progress = False
170
+ self._interim_completion_event.set() # Unblock any waiting LLM responses
171
+
166
172
  # Cancel interim message task if active
167
173
  await self._cancel_interim_message_task(
168
174
  "Cancelled interim message task - handling interruption"
@@ -195,6 +201,7 @@ class VistaarLLMService(LLMService):
195
201
  if not self._is_interrupted and not self._interim_sent:
196
202
  logger.info(f"Sending interim message after {self._interim_timeout}s timeout")
197
203
  self._interim_sent = True
204
+ self._interim_in_progress = True
198
205
 
199
206
  # Use random selection from pre_query_response_phrases if available, otherwise fallback to default
200
207
  if self._pre_query_response_phrases:
@@ -203,10 +210,19 @@ class VistaarLLMService(LLMService):
203
210
  message = "एक क्षण थांबा, मी बघतो. "
204
211
 
205
212
  await self.push_frame(LLMTextFrame(text=message))
213
+
214
+ # Wait for estimated TTS duration before marking as complete
215
+ estimated_tts_duration = max(2.0, len(message) * 0.08) # ~80ms per character
216
+ logger.info(f"Waiting {estimated_tts_duration:.2f}s for interim TTS completion")
217
+ await asyncio.sleep(estimated_tts_duration)
218
+ self._interim_in_progress = False
206
219
  except asyncio.CancelledError:
207
220
  logger.debug("Interim message task cancelled")
208
221
  except Exception as e:
209
222
  logger.error(f"Error sending interim message: {e}")
223
+ finally:
224
+ # Signal that interim message handling is complete
225
+ self._interim_completion_event.set()
210
226
 
211
227
  async def _stream_response(self, query: str) -> AsyncGenerator[str, None]:
212
228
  """Stream response from Vistaar API using Server-Sent Events.
@@ -240,6 +256,8 @@ class VistaarLLMService(LLMService):
240
256
  self._is_interrupted = False
241
257
  self._partial_response = []
242
258
  self._interim_sent = False
259
+ self._interim_in_progress = False
260
+ self._interim_completion_event.clear() # Reset the event for new request
243
261
 
244
262
  try:
245
263
  # Use httpx to handle SSE streaming
@@ -316,6 +334,15 @@ class VistaarLLMService(LLMService):
316
334
  if first_chunk:
317
335
  await self.stop_ttfb_metrics()
318
336
  first_chunk = False
337
+
338
+ # Wait for interim message to complete if it was sent and is in progress
339
+ if self._interim_sent:
340
+ logger.debug(
341
+ "Waiting for interim message completion before sending LLM response"
342
+ )
343
+ await self._interim_completion_event.wait()
344
+ logger.debug("Interim message completed, proceeding with LLM response")
345
+
319
346
  # Cancel interim message task since we got first response
320
347
  await self._cancel_interim_message_task(
321
348
  "Cancelled interim message task - got first response"