dv-pipecat-ai 0.0.85.dev856__py3-none-any.whl → 0.0.85.dev857__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.85.dev856.dist-info → dv_pipecat_ai-0.0.85.dev857.dist-info}/METADATA +1 -1
- {dv_pipecat_ai-0.0.85.dev856.dist-info → dv_pipecat_ai-0.0.85.dev857.dist-info}/RECORD +13 -12
- pipecat/metrics/connection_metrics.py +45 -0
- pipecat/processors/frame_processor.py +44 -1
- pipecat/processors/metrics/frame_processor_metrics.py +108 -0
- pipecat/services/deepgram/stt.py +10 -0
- pipecat/services/elevenlabs/tts.py +6 -0
- pipecat/services/google/llm.py +26 -11
- pipecat/services/openai/base_llm.py +21 -12
- pipecat/services/vistaar/llm.py +6 -0
- {dv_pipecat_ai-0.0.85.dev856.dist-info → dv_pipecat_ai-0.0.85.dev857.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.85.dev856.dist-info → dv_pipecat_ai-0.0.85.dev857.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.85.dev856.dist-info → dv_pipecat_ai-0.0.85.dev857.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
dv_pipecat_ai-0.0.85.
|
|
1
|
+
dv_pipecat_ai-0.0.85.dev857.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
|
|
2
2
|
pipecat/__init__.py,sha256=j0Xm6adxHhd7D06dIyyPV_GlBYLlBnTAERVvD_jAARQ,861
|
|
3
3
|
pipecat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
pipecat/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -82,6 +82,7 @@ pipecat/frames/frames.proto,sha256=JXZm3VXLR8zMOUcOuhVoe2mhM3MQIQGMJXLopdJO_5Y,8
|
|
|
82
82
|
pipecat/frames/frames.py,sha256=248d54lNOyO04dq9ni51yUTWUItmGw8b9QKarrDGNeo,50354
|
|
83
83
|
pipecat/frames/protobufs/frames_pb2.py,sha256=VHgGV_W7qQ4sfQK6RHb5_DggLm3PiSYMr6aBZ8_p1cQ,2590
|
|
84
84
|
pipecat/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
85
|
+
pipecat/metrics/connection_metrics.py,sha256=8rBsEeufL3meWyJHcUbe35TS963W9B0wSnz0dQkv12A,1734
|
|
85
86
|
pipecat/metrics/metrics.py,sha256=bdZNciEtLTtA-xgoKDz2RJAy6fKrXkTwz3pryVHzc2M,2713
|
|
86
87
|
pipecat/observers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
87
88
|
pipecat/observers/base_observer.py,sha256=z812gu9lrxtZlr_6oZhcH0NHqlV2cJ7k_B8UJRrm8TY,3459
|
|
@@ -108,7 +109,7 @@ pipecat/processors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
|
|
|
108
109
|
pipecat/processors/async_generator.py,sha256=qPOZxk5eOad_NrF_Z06vWZ6deXIxb9AKZKYO2e5pkJs,2385
|
|
109
110
|
pipecat/processors/consumer_processor.py,sha256=DrWCKnfblknZJ0bLmR_unIeJ1axQw4IPUn2IB3KLGGA,3228
|
|
110
111
|
pipecat/processors/dtmf_aggregator.py,sha256=k3xYncUr_8y5lrYfeX8PxqlF7jqFLshg_HB6HiFg7TA,10193
|
|
111
|
-
pipecat/processors/frame_processor.py,sha256=
|
|
112
|
+
pipecat/processors/frame_processor.py,sha256=rf35H2qnREj0aeOAjXSy5YWTOoLwyzhRfw74J9LTCGg,35588
|
|
112
113
|
pipecat/processors/idle_frame_processor.py,sha256=z8AuhGap61lA5K35P6XCaOpn4kkmK_9NZNppbpQxheU,3124
|
|
113
114
|
pipecat/processors/logger.py,sha256=8xa4KKekXQIETlQR7zoGnwUpLNo8CeDVm7YjyXePN-w,2385
|
|
114
115
|
pipecat/processors/producer_processor.py,sha256=iIIOHZd77APvUGP7JqFbznAHUnCULcq_qYiSEjwXHcc,3265
|
|
@@ -145,7 +146,7 @@ pipecat/processors/frameworks/strands_agents.py,sha256=vaYcNtM084OWoXDQaT6eoGoP1
|
|
|
145
146
|
pipecat/processors/gstreamer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
146
147
|
pipecat/processors/gstreamer/pipeline_source.py,sha256=eXckOY1rQeSBjSfLs8EFEkdlTZEq94osOTFWeNh6C4Y,9765
|
|
147
148
|
pipecat/processors/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
148
|
-
pipecat/processors/metrics/frame_processor_metrics.py,sha256
|
|
149
|
+
pipecat/processors/metrics/frame_processor_metrics.py,sha256=-p1mv6U3w36JWScz4hTsKsRQtUqTZut-kDLqZr6h2x4,10244
|
|
149
150
|
pipecat/processors/metrics/sentry.py,sha256=Gts-b-H3EDFUvv-qn44e9pSDAWUKk72tr7tEfutxxK0,4911
|
|
150
151
|
pipecat/runner/__init__.py,sha256=iJh4vFMGNQYi_ATVGXJDU4rOJwI-1Y6fmkyV18-ddAE,64
|
|
151
152
|
pipecat/runner/daily.py,sha256=t-D-sgVC2SnT_YCTDaQJgcxVnzL8-pQhdmxp7gV2snI,9646
|
|
@@ -210,7 +211,7 @@ pipecat/services/cartesia/tts.py,sha256=I_OZCINywkDXmYzFL35MjSN8cAuNEaJs7nj0YB_o
|
|
|
210
211
|
pipecat/services/cerebras/__init__.py,sha256=5zBmqq9Zfcl-HC7ylekVS5qrRedbl1mAeEwUT-T-c_o,259
|
|
211
212
|
pipecat/services/cerebras/llm.py,sha256=-yzSe_6YDGigwzES-LZS4vNXMPugmvsIYEpTySyr5nA,3047
|
|
212
213
|
pipecat/services/deepgram/__init__.py,sha256=IjRtMI7WytRDdmYVpk2qDWClXUiNgdl7ZkvEAWg1eYE,304
|
|
213
|
-
pipecat/services/deepgram/stt.py,sha256=
|
|
214
|
+
pipecat/services/deepgram/stt.py,sha256=jej9sFI5xwuC_NwRPjql48sjaOMwOV9B836T67gG70A,25343
|
|
214
215
|
pipecat/services/deepgram/tts.py,sha256=H_2WCJEx3_L4ytrHHRNkA-6GKTd1coou_vvTfiEodpQ,3745
|
|
215
216
|
pipecat/services/deepgram/flux/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
216
217
|
pipecat/services/deepgram/flux/stt.py,sha256=yCZodrHAOShgYy_GbdviX8iAuh36dBgDL41gHMXVxEM,25887
|
|
@@ -218,7 +219,7 @@ pipecat/services/deepseek/__init__.py,sha256=bU5z_oNGzgrF_YpsD9pYIMtEibeZFaUobbR
|
|
|
218
219
|
pipecat/services/deepseek/llm.py,sha256=5KjpU2blmhUTM3LcRE1ymdsk6OmoFkIzeQgyNOGwQh8,3112
|
|
219
220
|
pipecat/services/elevenlabs/__init__.py,sha256=cMx5v0HEMh4WetMm5byR9tIjG6_wNVs9UxqWyB3tjlM,313
|
|
220
221
|
pipecat/services/elevenlabs/stt.py,sha256=c-6GDeyZCMcXu4FmcG0vugBRsUnq8Iz_L9XX_Y_8TlM,29453
|
|
221
|
-
pipecat/services/elevenlabs/tts.py,sha256=
|
|
222
|
+
pipecat/services/elevenlabs/tts.py,sha256=uKN7fu10MsBR9iRhadQoF3OeVZb0efyDDB4Ru8Z3E-Q,45800
|
|
222
223
|
pipecat/services/fal/__init__.py,sha256=z_kfZETvUcKy68Lyvni4B-RtdkOvz3J3eh6sFDVKq6M,278
|
|
223
224
|
pipecat/services/fal/image.py,sha256=vArKLKrIGoZfw_xeZY_E7zbUzfzVsScj-R7mOmVqjRQ,4585
|
|
224
225
|
pipecat/services/fal/stt.py,sha256=-5tw7N8srBJTS0Q65SN4csmLkIB6cLHR9pXKimxg55o,9678
|
|
@@ -237,7 +238,7 @@ pipecat/services/google/__init__.py,sha256=MDd6-gaugR1cUaa5cRxBJEEoo6bCnn0TBMWh8
|
|
|
237
238
|
pipecat/services/google/frames.py,sha256=_HHljqYg7x0wh6nTRLqKaavThuMxkKFsDeLAFgVutmU,2277
|
|
238
239
|
pipecat/services/google/google.py,sha256=D_GWyJQxnJmJ0sM8SLwcxom5e8snF3W3IhsEjTM7Uqg,507
|
|
239
240
|
pipecat/services/google/image.py,sha256=LQYIctDIB31udYvk3meC9EXTY7VDdWb_veCTFEltTRU,4674
|
|
240
|
-
pipecat/services/google/llm.py,sha256=
|
|
241
|
+
pipecat/services/google/llm.py,sha256=lwb9tjqOMUjPdAZB7py-femsNt_Q6Ekw-9ZP_4OSykg,40805
|
|
241
242
|
pipecat/services/google/llm_openai.py,sha256=cJDSKFOFFbBxWdsRC2f2kuPa_lpi-DgnfaPJLNsz82E,7520
|
|
242
243
|
pipecat/services/google/llm_vertex.py,sha256=0UL2U0aDATWTAWYh-ypTNihF4RS1tsl_E4KwPhSQ76c,8137
|
|
243
244
|
pipecat/services/google/rtvi.py,sha256=PZb1yVny5YG7_XmJRXPzs3iYapeQ4XHreFN1v6KwTGM,3014
|
|
@@ -280,7 +281,7 @@ pipecat/services/nim/llm.py,sha256=o4WPGI6kOmSiMV7WwOZ0cNEAoq9hW4Aqs2R8X7c9i94,4
|
|
|
280
281
|
pipecat/services/ollama/__init__.py,sha256=aw-25zYsR8LR74OFFlMKMTnJjaKwOzdPWVsClueNRkI,255
|
|
281
282
|
pipecat/services/ollama/llm.py,sha256=rfpG92LRHGJlpENKhF6ld8CLVS9DxlKW-WRVNldOIGs,1605
|
|
282
283
|
pipecat/services/openai/__init__.py,sha256=V0ZVa8PzEm3hmcStYICbAsYwfgk4ytZ6kiQoq9UZPmI,354
|
|
283
|
-
pipecat/services/openai/base_llm.py,sha256=
|
|
284
|
+
pipecat/services/openai/base_llm.py,sha256=OYzxsbSw49FH6CoY6au95PEs7W3JClkt-IM8gFRP7jI,22066
|
|
284
285
|
pipecat/services/openai/image.py,sha256=3e3h-dVQ6DQuQE7fp8akXwRMd-oYOdGuZg7RCOjHu9A,2994
|
|
285
286
|
pipecat/services/openai/llm.py,sha256=_aKtz1VebSFUUenT3tH6mBW9pSCm65_u45cDu_dkTzs,7396
|
|
286
287
|
pipecat/services/openai/stt.py,sha256=Idf0k73kxFyDgNRBt62MFpoKKNsBV9bwvJteJ6MGWzQ,2419
|
|
@@ -339,7 +340,7 @@ pipecat/services/together/llm.py,sha256=VSayO-U6g9Ld0xK9CXRQPUsd5gWJKtiA8qDAyXgs
|
|
|
339
340
|
pipecat/services/ultravox/__init__.py,sha256=EoHCSXI2o0DFQslELgkhAGZtxDj63gZi-9ZEhXljaKE,259
|
|
340
341
|
pipecat/services/ultravox/stt.py,sha256=uCQm_-LbycXdXRV6IE1a6Mymis6tyww7V8PnPzAQtx8,16586
|
|
341
342
|
pipecat/services/vistaar/__init__.py,sha256=UFfSWFN5rbzl6NN-E_OH_MFaSYodZWNlenAU0wk-rAI,110
|
|
342
|
-
pipecat/services/vistaar/llm.py,sha256=
|
|
343
|
+
pipecat/services/vistaar/llm.py,sha256=aJGGf5Sn08x8XjHt9gNZ4dE5xzBPVN7Sde3P5EqeTWk,23587
|
|
343
344
|
pipecat/services/whisper/__init__.py,sha256=smADmw0Fv98k7cGRuHTEcljKTO2WdZqLpJd0qsTCwH8,281
|
|
344
345
|
pipecat/services/whisper/base_stt.py,sha256=VhslESPnYIeVbmnQTzmlZPV35TH49duxYTvJe0epNnE,7850
|
|
345
346
|
pipecat/services/whisper/stt.py,sha256=9Qd56vWMzg3LtHikQnfgyMtl4odE6BCHDbpAn3HSWjw,17480
|
|
@@ -416,7 +417,7 @@ pipecat/utils/tracing/service_decorators.py,sha256=fwzxFpi8DJl6BJbK74G0UEB4ccMJg
|
|
|
416
417
|
pipecat/utils/tracing/setup.py,sha256=7TEgPNpq6M8lww8OQvf0P9FzYc5A30xICGklVA-fua0,2892
|
|
417
418
|
pipecat/utils/tracing/turn_context_provider.py,sha256=ikon3plFOx0XbMrH6DdeHttNpb-U0gzMZIm3bWLc9eI,2485
|
|
418
419
|
pipecat/utils/tracing/turn_trace_observer.py,sha256=dma16SBJpYSOE58YDWy89QzHyQFc_9gQZszKeWixuwc,9725
|
|
419
|
-
dv_pipecat_ai-0.0.85.
|
|
420
|
-
dv_pipecat_ai-0.0.85.
|
|
421
|
-
dv_pipecat_ai-0.0.85.
|
|
422
|
-
dv_pipecat_ai-0.0.85.
|
|
420
|
+
dv_pipecat_ai-0.0.85.dev857.dist-info/METADATA,sha256=1YE1UyNWwkyJEw2VRFSI8Bz80M609nvfhbNMCR1LBs0,32955
|
|
421
|
+
dv_pipecat_ai-0.0.85.dev857.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
422
|
+
dv_pipecat_ai-0.0.85.dev857.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
|
|
423
|
+
dv_pipecat_ai-0.0.85.dev857.dist-info/RECORD,,
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Connection metrics data models for Pipecat framework.
|
|
2
|
+
|
|
3
|
+
This module extends the core metrics system with connection-specific metrics
|
|
4
|
+
including connection establishment times, retry attempts, and network latencies.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
|
|
11
|
+
from pipecat.metrics.metrics import MetricsData
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ConnectionMetricsData(MetricsData):
|
|
15
|
+
"""Unified connection and reconnection metrics data.
|
|
16
|
+
|
|
17
|
+
Handles both initial connection establishment and reconnection scenarios.
|
|
18
|
+
For initial connections, use connect_time, success, connection_attempts.
|
|
19
|
+
For reconnections, use reconnect_count, downtime, reconnect_success, reason.
|
|
20
|
+
|
|
21
|
+
Parameters:
|
|
22
|
+
connect_time: Time taken to establish connection in seconds.
|
|
23
|
+
success: Whether the connection attempt was successful.
|
|
24
|
+
connection_attempts: Number of connection attempts made.
|
|
25
|
+
error_message: Error message if connection failed.
|
|
26
|
+
connection_type: Type of connection (websocket, http, etc.).
|
|
27
|
+
reconnect_count: Number of reconnection attempts (for reconnection scenarios).
|
|
28
|
+
downtime: Time connection was down in seconds (for reconnection scenarios).
|
|
29
|
+
reconnect_success: Whether reconnection was successful (for reconnection scenarios).
|
|
30
|
+
reason: Reason for reconnection (for reconnection scenarios).
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
connect_time: Optional[float] = None
|
|
34
|
+
success: bool = True
|
|
35
|
+
connection_attempts: int = 1
|
|
36
|
+
error_message: Optional[str] = None
|
|
37
|
+
connection_type: Optional[str] = None
|
|
38
|
+
|
|
39
|
+
# Reconnection-specific fields
|
|
40
|
+
reconnect_count: Optional[int] = None
|
|
41
|
+
downtime: Optional[float] = None
|
|
42
|
+
reconnect_success: Optional[bool] = None
|
|
43
|
+
reason: Optional[str] = None
|
|
44
|
+
|
|
45
|
+
|
|
@@ -436,10 +436,53 @@ class FrameProcessor(BaseObject):
|
|
|
436
436
|
if frame:
|
|
437
437
|
await self.push_frame(frame)
|
|
438
438
|
|
|
439
|
+
async def start_connection_metrics(self):
|
|
440
|
+
"""Start connection establishment metrics collection."""
|
|
441
|
+
if self.can_generate_metrics() and self.metrics_enabled:
|
|
442
|
+
await self._metrics.start_connection_metrics()
|
|
443
|
+
|
|
444
|
+
async def stop_connection_metrics(
|
|
445
|
+
self,
|
|
446
|
+
success: bool = True,
|
|
447
|
+
error: str = None,
|
|
448
|
+
connection_type: str = None
|
|
449
|
+
):
|
|
450
|
+
"""Stop connection metrics collection and emit metrics frame.
|
|
451
|
+
|
|
452
|
+
Args:
|
|
453
|
+
success: Whether the connection was successful.
|
|
454
|
+
error: Error message if connection failed.
|
|
455
|
+
connection_type: Type of connection (websocket, http, etc.).
|
|
456
|
+
"""
|
|
457
|
+
if self.can_generate_metrics() and self.metrics_enabled:
|
|
458
|
+
frame = await self._metrics.stop_connection_metrics(success, error, connection_type)
|
|
459
|
+
if frame:
|
|
460
|
+
await self.push_frame(frame)
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
async def start_reconnection_metrics(self):
|
|
464
|
+
"""Start reconnection metrics collection."""
|
|
465
|
+
if self.can_generate_metrics() and self.metrics_enabled:
|
|
466
|
+
await self._metrics.start_reconnection_metrics()
|
|
467
|
+
|
|
468
|
+
async def stop_reconnection_metrics(self, success: bool = True, reason: str = None):
|
|
469
|
+
"""Stop reconnection metrics collection and emit metrics frame.
|
|
470
|
+
|
|
471
|
+
Args:
|
|
472
|
+
success: Whether the reconnection was successful.
|
|
473
|
+
reason: Reason for reconnection.
|
|
474
|
+
"""
|
|
475
|
+
if self.can_generate_metrics() and self.metrics_enabled:
|
|
476
|
+
frame = await self._metrics.stop_reconnection_metrics(success, reason)
|
|
477
|
+
if frame:
|
|
478
|
+
await self.push_frame(frame)
|
|
479
|
+
|
|
480
|
+
|
|
439
481
|
async def stop_all_metrics(self):
|
|
440
482
|
"""Stop all active metrics collection."""
|
|
441
483
|
await self.stop_ttfb_metrics()
|
|
442
484
|
await self.stop_processing_metrics()
|
|
485
|
+
await self.stop_connection_metrics()
|
|
443
486
|
|
|
444
487
|
def create_task(self, coroutine: Coroutine, name: Optional[str] = None) -> asyncio.Task:
|
|
445
488
|
"""Create a new task managed by this processor.
|
|
@@ -811,7 +854,7 @@ class FrameProcessor(BaseObject):
|
|
|
811
854
|
Returns:
|
|
812
855
|
True if the processor has been started.
|
|
813
856
|
"""
|
|
814
|
-
if not self.__started:
|
|
857
|
+
if not self.__started and not isinstance(frame, SystemFrame):
|
|
815
858
|
self.logger.error(f"{self} Trying to process {frame} but StartFrame not received yet")
|
|
816
859
|
return self.__started
|
|
817
860
|
|
|
@@ -20,6 +20,9 @@ from pipecat.metrics.metrics import (
|
|
|
20
20
|
TTFBMetricsData,
|
|
21
21
|
TTSUsageMetricsData,
|
|
22
22
|
)
|
|
23
|
+
from pipecat.metrics.connection_metrics import (
|
|
24
|
+
ConnectionMetricsData,
|
|
25
|
+
)
|
|
23
26
|
from pipecat.utils.asyncio.task_manager import BaseTaskManager
|
|
24
27
|
from pipecat.utils.base_object import BaseObject
|
|
25
28
|
|
|
@@ -46,6 +49,13 @@ class FrameProcessorMetrics(BaseObject):
|
|
|
46
49
|
self._last_ttfb_time = 0
|
|
47
50
|
self._should_report_ttfb = True
|
|
48
51
|
self._logger = logger
|
|
52
|
+
|
|
53
|
+
# Connection metrics state
|
|
54
|
+
self._start_connection_time = 0
|
|
55
|
+
self._connection_attempts = 0
|
|
56
|
+
self._last_connection_error = None
|
|
57
|
+
self._reconnection_start_time = 0
|
|
58
|
+
self._reconnect_count = 0
|
|
49
59
|
|
|
50
60
|
async def setup(self, task_manager: BaseTaskManager):
|
|
51
61
|
"""Set up the metrics collector with a task manager.
|
|
@@ -195,3 +205,101 @@ class FrameProcessorMetrics(BaseObject):
|
|
|
195
205
|
)
|
|
196
206
|
self._logger.debug(f"{self._processor_name()} usage characters: {characters.value}")
|
|
197
207
|
return MetricsFrame(data=[characters])
|
|
208
|
+
|
|
209
|
+
async def start_connection_metrics(self):
|
|
210
|
+
"""Start measuring connection establishment time."""
|
|
211
|
+
self._start_connection_time = time.time()
|
|
212
|
+
self._connection_attempts += 1
|
|
213
|
+
self._last_connection_error = None
|
|
214
|
+
|
|
215
|
+
async def stop_connection_metrics(
|
|
216
|
+
self,
|
|
217
|
+
success: bool = True,
|
|
218
|
+
error: str = None,
|
|
219
|
+
connection_type: str = None
|
|
220
|
+
):
|
|
221
|
+
"""Stop connection measurement and generate metrics frame.
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
success: Whether the connection was successful.
|
|
225
|
+
error: Error message if connection failed.
|
|
226
|
+
connection_type: Type of connection (websocket, http, etc.).
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
MetricsFrame containing connection data, or None if not measuring.
|
|
230
|
+
"""
|
|
231
|
+
if self._start_connection_time == 0:
|
|
232
|
+
return None
|
|
233
|
+
|
|
234
|
+
connect_time = time.time() - self._start_connection_time
|
|
235
|
+
|
|
236
|
+
if not success:
|
|
237
|
+
self._last_connection_error = error
|
|
238
|
+
|
|
239
|
+
logstr = f"{self._processor_name()} connection "
|
|
240
|
+
logstr += "successful" if success else f"failed: {error}"
|
|
241
|
+
logstr += f" (attempt #{self._connection_attempts}, {connect_time:.3f}s)"
|
|
242
|
+
|
|
243
|
+
if success:
|
|
244
|
+
self._logger.debug(logstr)
|
|
245
|
+
else:
|
|
246
|
+
self._logger.warning(logstr)
|
|
247
|
+
|
|
248
|
+
connection_data = ConnectionMetricsData(
|
|
249
|
+
processor=self._processor_name(),
|
|
250
|
+
model=self._model_name(),
|
|
251
|
+
connect_time=round(connect_time, 3),
|
|
252
|
+
success=success,
|
|
253
|
+
connection_attempts=self._connection_attempts,
|
|
254
|
+
error_message=error,
|
|
255
|
+
connection_type=connection_type
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
self._start_connection_time = 0
|
|
259
|
+
return MetricsFrame(data=[connection_data])
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
async def start_reconnection_metrics(self):
|
|
263
|
+
"""Start measuring reconnection downtime."""
|
|
264
|
+
self._reconnection_start_time = time.time()
|
|
265
|
+
self._reconnect_count += 1
|
|
266
|
+
|
|
267
|
+
async def stop_reconnection_metrics(
|
|
268
|
+
self,
|
|
269
|
+
success: bool = True,
|
|
270
|
+
reason: str = None
|
|
271
|
+
):
|
|
272
|
+
"""Stop reconnection measurement and generate metrics frame.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
success: Whether the reconnection was successful.
|
|
276
|
+
reason: Reason for reconnection.
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
MetricsFrame containing reconnection data, or None if not measuring.
|
|
280
|
+
"""
|
|
281
|
+
if self._reconnection_start_time == 0:
|
|
282
|
+
return None
|
|
283
|
+
|
|
284
|
+
downtime = time.time() - self._reconnection_start_time
|
|
285
|
+
|
|
286
|
+
logstr = f"{self._processor_name()} reconnection #{self._reconnect_count} "
|
|
287
|
+
logstr += "successful" if success else "failed"
|
|
288
|
+
logstr += f" (downtime: {downtime:.3f}s)"
|
|
289
|
+
if reason:
|
|
290
|
+
logstr += f" - {reason}"
|
|
291
|
+
|
|
292
|
+
self._logger.debug(logstr)
|
|
293
|
+
|
|
294
|
+
reconnection_data = ConnectionMetricsData(
|
|
295
|
+
processor=self._processor_name(),
|
|
296
|
+
model=self._model_name(),
|
|
297
|
+
reconnect_count=self._reconnect_count,
|
|
298
|
+
downtime=round(downtime, 3),
|
|
299
|
+
reconnect_success=success,
|
|
300
|
+
reason=reason
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
self._reconnection_start_time = 0
|
|
304
|
+
return MetricsFrame(data=[reconnection_data])
|
|
305
|
+
|
pipecat/services/deepgram/stt.py
CHANGED
|
@@ -388,6 +388,7 @@ class DeepgramSTTService(STTService):
|
|
|
388
388
|
|
|
389
389
|
async def _connect(self):
|
|
390
390
|
self.logger.debug("Attempting to connect to Deepgram...")
|
|
391
|
+
await self.start_connection_metrics()
|
|
391
392
|
|
|
392
393
|
loop = asyncio.get_running_loop()
|
|
393
394
|
for attempt in range(self._max_connect_retries):
|
|
@@ -489,6 +490,8 @@ class DeepgramSTTService(STTService):
|
|
|
489
490
|
elapsed_ms,
|
|
490
491
|
diagnostics,
|
|
491
492
|
)
|
|
493
|
+
await self.stop_connection_metrics(success=True, connection_type="websocket")
|
|
494
|
+
await self.stop_reconnection_metrics(success=True, reason="successful_reconnection")
|
|
492
495
|
return # Exit the method on success
|
|
493
496
|
|
|
494
497
|
self.logger.warning(
|
|
@@ -524,6 +527,12 @@ class DeepgramSTTService(STTService):
|
|
|
524
527
|
error_msg = (
|
|
525
528
|
f"{self}: unable to connect to Deepgram after {self._max_connect_retries} attempts."
|
|
526
529
|
)
|
|
530
|
+
await self.stop_connection_metrics(
|
|
531
|
+
success=False,
|
|
532
|
+
error=f"Failed after {self._max_connect_retries} attempts",
|
|
533
|
+
connection_type="websocket"
|
|
534
|
+
)
|
|
535
|
+
await self.stop_reconnection_metrics(success=False, reason="max_retries_exceeded")
|
|
527
536
|
self.logger.error(error_msg)
|
|
528
537
|
await self.push_error(ErrorFrame(error_msg, fatal=True))
|
|
529
538
|
|
|
@@ -589,6 +598,7 @@ class DeepgramSTTService(STTService):
|
|
|
589
598
|
# NOTE(aleix): we don't disconnect (i.e. call finish on the connection)
|
|
590
599
|
# because this triggers more errors internally in the Deepgram SDK. So,
|
|
591
600
|
# we just forget about the previous connection and create a new one.
|
|
601
|
+
await self.start_reconnection_metrics()
|
|
592
602
|
await self._connect()
|
|
593
603
|
|
|
594
604
|
async def _on_speech_started(self, *args, **kwargs):
|
|
@@ -519,6 +519,7 @@ class ElevenLabsTTSService(AudioContextWordTTSService):
|
|
|
519
519
|
return
|
|
520
520
|
|
|
521
521
|
self.logger.debug("Connecting to ElevenLabs")
|
|
522
|
+
await self.start_connection_metrics()
|
|
522
523
|
|
|
523
524
|
voice_id = self._voice_id
|
|
524
525
|
model = self.model_name
|
|
@@ -551,14 +552,19 @@ class ElevenLabsTTSService(AudioContextWordTTSService):
|
|
|
551
552
|
additional_headers={"xi-api-key": self._api_key},
|
|
552
553
|
)
|
|
553
554
|
|
|
555
|
+
await self.stop_connection_metrics(success=True, connection_type="websocket")
|
|
556
|
+
await self.stop_reconnection_metrics(success=True, reason="successful_reconnection")
|
|
554
557
|
await self._call_event_handler("on_connected")
|
|
555
558
|
except Exception as e:
|
|
556
559
|
self.logger.error(f"{self} initialization error: {e}")
|
|
560
|
+
await self.stop_connection_metrics(success=False, error=str(e), connection_type="websocket")
|
|
561
|
+
await self.stop_reconnection_metrics(success=False, reason="connection_failed")
|
|
557
562
|
self._websocket = None
|
|
558
563
|
await self._call_event_handler("on_connection_error", f"{e}")
|
|
559
564
|
|
|
560
565
|
async def _disconnect_websocket(self):
|
|
561
566
|
try:
|
|
567
|
+
await self.start_reconnection_metrics()
|
|
562
568
|
await self.stop_all_metrics()
|
|
563
569
|
|
|
564
570
|
if self._websocket:
|
pipecat/services/google/llm.py
CHANGED
|
@@ -760,12 +760,19 @@ class GoogleLLMService(LLMService):
|
|
|
760
760
|
|
|
761
761
|
generation_config = GenerateContentConfig(system_instruction=system)
|
|
762
762
|
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
763
|
+
await self.start_connection_metrics()
|
|
764
|
+
|
|
765
|
+
try:
|
|
766
|
+
# Use the new google-genai client's async method
|
|
767
|
+
response = await self._client.aio.models.generate_content(
|
|
768
|
+
model=self._model_name,
|
|
769
|
+
contents=messages,
|
|
770
|
+
config=generation_config,
|
|
771
|
+
)
|
|
772
|
+
await self.stop_connection_metrics(success=True, connection_type="grpc")
|
|
773
|
+
except Exception as e:
|
|
774
|
+
await self.stop_connection_metrics(success=False, error=str(e), connection_type="grpc")
|
|
775
|
+
raise
|
|
769
776
|
|
|
770
777
|
# Extract text from response
|
|
771
778
|
if response.candidates and response.candidates[0].content:
|
|
@@ -849,11 +856,19 @@ class GoogleLLMService(LLMService):
|
|
|
849
856
|
)
|
|
850
857
|
|
|
851
858
|
await self.start_ttfb_metrics()
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
859
|
+
await self.start_connection_metrics()
|
|
860
|
+
|
|
861
|
+
try:
|
|
862
|
+
result = await self._client.aio.models.generate_content_stream(
|
|
863
|
+
model=self._model_name,
|
|
864
|
+
contents=messages,
|
|
865
|
+
config=generation_config,
|
|
866
|
+
)
|
|
867
|
+
await self.stop_connection_metrics(success=True, connection_type="grpc")
|
|
868
|
+
return result
|
|
869
|
+
except Exception as e:
|
|
870
|
+
await self.stop_connection_metrics(success=False, error=str(e), connection_type="grpc")
|
|
871
|
+
raise
|
|
857
872
|
|
|
858
873
|
async def _stream_content_specific_context(
|
|
859
874
|
self, context: OpenAILLMContext
|
|
@@ -205,20 +205,29 @@ class BaseOpenAILLMService(LLMService):
|
|
|
205
205
|
"""
|
|
206
206
|
params = self.build_chat_completion_params(params_from_context)
|
|
207
207
|
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
208
|
+
await self.start_connection_metrics()
|
|
209
|
+
|
|
210
|
+
try:
|
|
211
|
+
if self._retry_on_timeout:
|
|
212
|
+
try:
|
|
213
|
+
chunks = await asyncio.wait_for(
|
|
214
|
+
self._client.chat.completions.create(**params), timeout=self._retry_timeout_secs
|
|
215
|
+
)
|
|
216
|
+
await self.stop_connection_metrics(success=True, connection_type="http")
|
|
217
|
+
return chunks
|
|
218
|
+
except (APITimeoutError, asyncio.TimeoutError):
|
|
219
|
+
# Retry, this time without a timeout so we get a response
|
|
220
|
+
logger.debug(f"{self}: Retrying chat completion due to timeout")
|
|
221
|
+
chunks = await self._client.chat.completions.create(**params)
|
|
222
|
+
await self.stop_connection_metrics(success=True, connection_type="http")
|
|
223
|
+
return chunks
|
|
224
|
+
else:
|
|
217
225
|
chunks = await self._client.chat.completions.create(**params)
|
|
226
|
+
await self.stop_connection_metrics(success=True, connection_type="http")
|
|
218
227
|
return chunks
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
228
|
+
except Exception as e:
|
|
229
|
+
await self.stop_connection_metrics(success=False, error=str(e), connection_type="http")
|
|
230
|
+
raise
|
|
222
231
|
|
|
223
232
|
def build_chat_completion_params(self, params_from_context: OpenAILLMInvocationParams) -> dict:
|
|
224
233
|
"""Build parameters for chat completion request.
|
pipecat/services/vistaar/llm.py
CHANGED
|
@@ -344,9 +344,12 @@ class VistaarLLMService(LLMService):
|
|
|
344
344
|
logger.error(f"Failed to generate JWT token: {e}")
|
|
345
345
|
raise
|
|
346
346
|
|
|
347
|
+
await self.start_connection_metrics()
|
|
348
|
+
|
|
347
349
|
try:
|
|
348
350
|
# Use httpx to handle SSE streaming
|
|
349
351
|
async with self._client.stream("GET", url, headers=headers) as response:
|
|
352
|
+
await self.stop_connection_metrics(success=True, connection_type="http")
|
|
350
353
|
self._current_response = response # Store for potential cancellation
|
|
351
354
|
response.raise_for_status()
|
|
352
355
|
|
|
@@ -364,14 +367,17 @@ class VistaarLLMService(LLMService):
|
|
|
364
367
|
yield line
|
|
365
368
|
|
|
366
369
|
except httpx.HTTPStatusError as e:
|
|
370
|
+
await self.stop_connection_metrics(success=False, error=f"HTTP {e.response.status_code}", connection_type="http")
|
|
367
371
|
logger.error(
|
|
368
372
|
f"Vistaar HTTP error - Status: {e.response.status_code}, URL: {url}, Response: {e.response.text if hasattr(e.response, 'text') else 'N/A'}"
|
|
369
373
|
)
|
|
370
374
|
raise
|
|
371
375
|
except httpx.TimeoutException as e:
|
|
376
|
+
await self.stop_connection_metrics(success=False, error="Timeout", connection_type="http")
|
|
372
377
|
logger.error(f"Vistaar timeout error - URL: {url}, Timeout: {self._timeout}s")
|
|
373
378
|
raise
|
|
374
379
|
except Exception as e:
|
|
380
|
+
await self.stop_connection_metrics(success=False, error=str(e), connection_type="http")
|
|
375
381
|
logger.error(
|
|
376
382
|
f"Vistaar unexpected error - Type: {type(e).__name__}, Message: {str(e)}, URL: {url}"
|
|
377
383
|
)
|
|
File without changes
|
{dv_pipecat_ai-0.0.85.dev856.dist-info → dv_pipecat_ai-0.0.85.dev857.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
{dv_pipecat_ai-0.0.85.dev856.dist-info → dv_pipecat_ai-0.0.85.dev857.dist-info}/top_level.txt
RENAMED
|
File without changes
|