dv-pipecat-ai 0.0.85.dev842__py3-none-any.whl → 0.0.85.dev844__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dv-pipecat-ai
3
- Version: 0.0.85.dev842
3
+ Version: 0.0.85.dev844
4
4
  Summary: An open source framework for voice (and multimodal) assistants
5
5
  License-Expression: BSD-2-Clause
6
6
  Project-URL: Source, https://github.com/pipecat-ai/pipecat
@@ -1,4 +1,4 @@
1
- dv_pipecat_ai-0.0.85.dev842.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
1
+ dv_pipecat_ai-0.0.85.dev844.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
2
2
  pipecat/__init__.py,sha256=j0Xm6adxHhd7D06dIyyPV_GlBYLlBnTAERVvD_jAARQ,861
3
3
  pipecat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  pipecat/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -353,8 +353,8 @@ pipecat/tests/utils.py,sha256=DEHDQV8uhCuKIqoHUPGVdUoCiKqTCG9zv5GqLXWWwvY,7870
353
353
  pipecat/transcriptions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
354
354
  pipecat/transcriptions/language.py,sha256=-mWI1MiZbasuoqZTOBH69dAmoM7-UJzWq9rSCcrnmh4,8228
355
355
  pipecat/transports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
356
- pipecat/transports/base_input.py,sha256=WGtoXXlF3GIjYgjtYnAgi8nZozd5abNlGNjwRnz8FRs,20138
357
- pipecat/transports/base_output.py,sha256=7WoXtAQAi-3OC9PC_zk61lCWlBTk5-NuTLUbsQUAI_U,36723
356
+ pipecat/transports/base_input.py,sha256=AkdE-j9UksjIrUGJc7laMOaknXgOS7L22D5sehZ-6ew,20176
357
+ pipecat/transports/base_output.py,sha256=T_NfU38sT6wKxXF1jA7hW5eLhTK11pundQBxAojswW8,36723
358
358
  pipecat/transports/base_transport.py,sha256=JlNiH0DysTfr6azwHauJqY_Z9HJC702O29Q0qrsLrg4,7530
359
359
  pipecat/transports/daily/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
360
360
  pipecat/transports/daily/transport.py,sha256=VanO33ff9g6px-vwGgT6M7cMVg786pOGfMU7Okm7a78,91917
@@ -416,7 +416,7 @@ pipecat/utils/tracing/service_decorators.py,sha256=fwzxFpi8DJl6BJbK74G0UEB4ccMJg
416
416
  pipecat/utils/tracing/setup.py,sha256=7TEgPNpq6M8lww8OQvf0P9FzYc5A30xICGklVA-fua0,2892
417
417
  pipecat/utils/tracing/turn_context_provider.py,sha256=ikon3plFOx0XbMrH6DdeHttNpb-U0gzMZIm3bWLc9eI,2485
418
418
  pipecat/utils/tracing/turn_trace_observer.py,sha256=dma16SBJpYSOE58YDWy89QzHyQFc_9gQZszKeWixuwc,9725
419
- dv_pipecat_ai-0.0.85.dev842.dist-info/METADATA,sha256=8uxfODboEJQwB04vWViiyMIo4KyT-pGag_ChfLt8STo,32955
420
- dv_pipecat_ai-0.0.85.dev842.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
421
- dv_pipecat_ai-0.0.85.dev842.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
422
- dv_pipecat_ai-0.0.85.dev842.dist-info/RECORD,,
419
+ dv_pipecat_ai-0.0.85.dev844.dist-info/METADATA,sha256=JgW9PLS_gplsOlHfyohgocRxrsiivvsAEySMY214f4U,32955
420
+ dv_pipecat_ai-0.0.85.dev844.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
421
+ dv_pipecat_ai-0.0.85.dev844.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
422
+ dv_pipecat_ai-0.0.85.dev844.dist-info/RECORD,,
@@ -299,7 +299,7 @@ class BaseInputTransport(FrameProcessor):
299
299
  await self._handle_user_interruption(VADState.QUIET, emulated=True)
300
300
  elif isinstance(frame, VADParamsUpdateFrame):
301
301
  if self.vad_analyzer:
302
- self.vad_analyzer.set_params(frame.params)
302
+ self.vad_analyzer.set_params(frame.params, self.logger)
303
303
  speech_frame = SpeechControlParamsFrame(
304
304
  vad_params=frame.params,
305
305
  turn_params=self._params.turn_analyzer.params
@@ -445,7 +445,10 @@ class BaseInputTransport(FrameProcessor):
445
445
  await self._handle_user_interruption(VADState.QUIET)
446
446
 
447
447
  async def _run_turn_analyzer(
448
- self, frame: InputAudioRawFrame, vad_state: VADState, previous_vad_state: VADState
448
+ self,
449
+ frame: InputAudioRawFrame,
450
+ vad_state: VADState,
451
+ previous_vad_state: VADState,
449
452
  ):
450
453
  """Run turn analysis on audio frame and handle results."""
451
454
  is_speech = vad_state == VADState.SPEAKING or vad_state == VADState.STARTING
@@ -54,7 +54,7 @@ BOT_VAD_STOP_SECS = 0.30
54
54
  # detect end-of-speech sooner to improve responsiveness for the
55
55
  # user’s first short reply. Keep conservative to avoid mid-utterance
56
56
  # false stops when TTS streams quickly.
57
- FIRST_BOT_VAD_STOP_SECS = 0.08
57
+ FIRST_BOT_VAD_STOP_SECS = 0.12
58
58
 
59
59
 
60
60
  class BaseOutputTransport(FrameProcessor):