dv-pipecat-ai 0.0.85.dev1__py3-none-any.whl → 0.0.85.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dv-pipecat-ai
3
- Version: 0.0.85.dev1
3
+ Version: 0.0.85.dev5
4
4
  Summary: An open source framework for voice (and multimodal) assistants
5
5
  License-Expression: BSD-2-Clause
6
6
  Project-URL: Source, https://github.com/pipecat-ai/pipecat
@@ -1,4 +1,4 @@
1
- dv_pipecat_ai-0.0.85.dev1.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
1
+ dv_pipecat_ai-0.0.85.dev5.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
2
2
  pipecat/__init__.py,sha256=j0Xm6adxHhd7D06dIyyPV_GlBYLlBnTAERVvD_jAARQ,861
3
3
  pipecat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  pipecat/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -116,7 +116,7 @@ pipecat/processors/aggregators/dtmf_aggregator.py,sha256=nngjLiaOtcZtuCNpYPyfUVL
116
116
  pipecat/processors/aggregators/gated.py,sha256=tii0sRrBkRW6y9Xq5iTWPnqlOEejU4VqPIPtdOa61pc,3073
117
117
  pipecat/processors/aggregators/gated_openai_llm_context.py,sha256=cr6MT8J6SpPzZbppKPOKe3_pt_5qXC9g6a4wvZDyrec,3005
118
118
  pipecat/processors/aggregators/llm_context.py,sha256=eDf1cQElcISLx3onaA9LCWuepzb2G_JGszLzpNXggXo,9723
119
- pipecat/processors/aggregators/llm_response.py,sha256=P7DwrbzMQx6HUFmvc-9h65YPoGEU9JOpEYLwaUzkB_o,47671
119
+ pipecat/processors/aggregators/llm_response.py,sha256=0StzYtq7EzlAFSWp10I0yY0pV1jysw1ySEWv5R50h_s,47360
120
120
  pipecat/processors/aggregators/llm_response_universal.py,sha256=fBnB3rZVdxj4iEKIWcnR7yTpqyKupbcg7IUv6XVxrDQ,34287
121
121
  pipecat/processors/aggregators/openai_llm_context.py,sha256=cC8DXdVPERRN04i0i-1Ys6kusvnbMALeH-Z8Pu5K684,12999
122
122
  pipecat/processors/aggregators/sentence.py,sha256=E7e3knfQl6HEGpYMKPklF1aO_gOn-rr7SnynErwfkQk,2235
@@ -378,7 +378,7 @@ pipecat/utils/tracing/service_decorators.py,sha256=HwDCqLGijhYD3F8nxDuQmEw-YkRw0
378
378
  pipecat/utils/tracing/setup.py,sha256=7TEgPNpq6M8lww8OQvf0P9FzYc5A30xICGklVA-fua0,2892
379
379
  pipecat/utils/tracing/turn_context_provider.py,sha256=ikon3plFOx0XbMrH6DdeHttNpb-U0gzMZIm3bWLc9eI,2485
380
380
  pipecat/utils/tracing/turn_trace_observer.py,sha256=dma16SBJpYSOE58YDWy89QzHyQFc_9gQZszKeWixuwc,9725
381
- dv_pipecat_ai-0.0.85.dev1.dist-info/METADATA,sha256=pgoQtdaXFQgc4EfFkWTTNBcDURv8diEDGXKD-gC0t04,32691
382
- dv_pipecat_ai-0.0.85.dev1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
383
- dv_pipecat_ai-0.0.85.dev1.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
384
- dv_pipecat_ai-0.0.85.dev1.dist-info/RECORD,,
381
+ dv_pipecat_ai-0.0.85.dev5.dist-info/METADATA,sha256=VBXLTj2A7NQGsI_6599DPGcLteo-qXuLPB6VyRRfyws,32691
382
+ dv_pipecat_ai-0.0.85.dev5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
383
+ dv_pipecat_ai-0.0.85.dev5.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
384
+ dv_pipecat_ai-0.0.85.dev5.dist-info/RECORD,,
@@ -516,10 +516,6 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
516
516
  self.set_tools(frame.tools)
517
517
  elif isinstance(frame, LLMSetToolChoiceFrame):
518
518
  self.set_tool_choice(frame.tool_choice)
519
- elif isinstance(frame, LLMFullResponseStartFrame):
520
- self._last_llm_response_start_time = time.time()
521
- self.logger.debug(f"Received LLMFullResponseStartFrame")
522
- self._latest_final_transcript = ""
523
519
  elif isinstance(frame, SpeechControlParamsFrame):
524
520
  self._vad_params = frame.vad_params
525
521
  self._turn_params = frame.turn_params
@@ -688,7 +684,6 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
688
684
  elif (
689
685
  not self._bot_speaking
690
686
  and time_since_stopped < 3.0
691
- and time.time() - self._last_llm_response_start_time > 3.0
692
687
  and self._latest_final_transcript != text
693
688
  ):
694
689
  self.logger.debug(