dv-pipecat-ai 0.0.85.dev818__py3-none-any.whl → 0.0.85.dev858__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.85.dev818.dist-info → dv_pipecat_ai-0.0.85.dev858.dist-info}/METADATA +2 -1
- {dv_pipecat_ai-0.0.85.dev818.dist-info → dv_pipecat_ai-0.0.85.dev858.dist-info}/RECORD +32 -29
- pipecat/audio/turn/smart_turn/local_smart_turn_v3.py +5 -1
- pipecat/frames/frames.py +34 -0
- pipecat/metrics/connection_metrics.py +45 -0
- pipecat/processors/aggregators/llm_response.py +25 -4
- pipecat/processors/dtmf_aggregator.py +17 -21
- pipecat/processors/frame_processor.py +51 -8
- pipecat/processors/metrics/frame_processor_metrics.py +108 -0
- pipecat/processors/transcript_processor.py +22 -1
- pipecat/serializers/__init__.py +2 -0
- pipecat/serializers/asterisk.py +16 -2
- pipecat/serializers/convox.py +2 -2
- pipecat/serializers/custom.py +2 -2
- pipecat/serializers/vi.py +326 -0
- pipecat/services/cartesia/tts.py +75 -10
- pipecat/services/deepgram/stt.py +317 -17
- pipecat/services/elevenlabs/stt.py +487 -19
- pipecat/services/elevenlabs/tts.py +28 -4
- pipecat/services/google/llm.py +26 -11
- pipecat/services/openai/base_llm.py +79 -14
- pipecat/services/salesforce/llm.py +321 -86
- pipecat/services/sarvam/tts.py +0 -1
- pipecat/services/soniox/stt.py +45 -10
- pipecat/services/vistaar/llm.py +97 -6
- pipecat/transcriptions/language.py +50 -0
- pipecat/transports/base_input.py +15 -11
- pipecat/transports/base_output.py +29 -3
- pipecat/utils/redis.py +58 -0
- {dv_pipecat_ai-0.0.85.dev818.dist-info → dv_pipecat_ai-0.0.85.dev858.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.85.dev818.dist-info → dv_pipecat_ai-0.0.85.dev858.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.85.dev818.dist-info → dv_pipecat_ai-0.0.85.dev858.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dv-pipecat-ai
|
|
3
|
-
Version: 0.0.85.
|
|
3
|
+
Version: 0.0.85.dev858
|
|
4
4
|
Summary: An open source framework for voice (and multimodal) assistants
|
|
5
5
|
License-Expression: BSD-2-Clause
|
|
6
6
|
Project-URL: Source, https://github.com/pipecat-ai/pipecat
|
|
@@ -26,6 +26,7 @@ Requires-Dist: numpy<3,>=1.26.4
|
|
|
26
26
|
Requires-Dist: Pillow<12,>=11.1.0
|
|
27
27
|
Requires-Dist: protobuf~=5.29.3
|
|
28
28
|
Requires-Dist: pydantic<3,>=2.10.6
|
|
29
|
+
Requires-Dist: PyJWT<3,>=2.8.0
|
|
29
30
|
Requires-Dist: pyloudnorm~=0.1.1
|
|
30
31
|
Requires-Dist: resampy~=0.4.3
|
|
31
32
|
Requires-Dist: soxr~=0.5.0
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
dv_pipecat_ai-0.0.85.
|
|
1
|
+
dv_pipecat_ai-0.0.85.dev858.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
|
|
2
2
|
pipecat/__init__.py,sha256=j0Xm6adxHhd7D06dIyyPV_GlBYLlBnTAERVvD_jAARQ,861
|
|
3
3
|
pipecat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
pipecat/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -59,7 +59,7 @@ pipecat/audio/turn/smart_turn/http_smart_turn.py,sha256=HlHpdVbk-1g_AU3qAAy7Xob8
|
|
|
59
59
|
pipecat/audio/turn/smart_turn/local_coreml_smart_turn.py,sha256=50kiBeZhnq7FZWZnzdSX8KUmhhQtkme0KH2rbiAJbCU,3140
|
|
60
60
|
pipecat/audio/turn/smart_turn/local_smart_turn.py,sha256=0z2M_MC9xIcelm4d9XqZwzJMe2FM-zOjgnHDAeoMw0g,3564
|
|
61
61
|
pipecat/audio/turn/smart_turn/local_smart_turn_v2.py,sha256=hd_nhEdaxwJ2_G6F2RJru9mC8vyzkmku2YqmtULl7NM,7154
|
|
62
|
-
pipecat/audio/turn/smart_turn/local_smart_turn_v3.py,sha256=
|
|
62
|
+
pipecat/audio/turn/smart_turn/local_smart_turn_v3.py,sha256=WlgeFem2r2M00aayal9BpdkO38Q2gfaQJPnpTpNiaJQ,4806
|
|
63
63
|
pipecat/audio/turn/smart_turn/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
64
64
|
pipecat/audio/turn/smart_turn/data/smart-turn-v3.0.onnx,sha256=B6Ezq6MeLQtSPxf4wuTmXv5tj2he_RLKT-Iev055iZE,8757193
|
|
65
65
|
pipecat/audio/vad/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -79,9 +79,10 @@ pipecat/extensions/voicemail/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
|
|
|
79
79
|
pipecat/extensions/voicemail/voicemail_detector.py,sha256=JxmU2752iWP_1_GmzZReNESUTFAeyEa4XBPL20_C208,30004
|
|
80
80
|
pipecat/frames/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
81
81
|
pipecat/frames/frames.proto,sha256=JXZm3VXLR8zMOUcOuhVoe2mhM3MQIQGMJXLopdJO_5Y,839
|
|
82
|
-
pipecat/frames/frames.py,sha256=
|
|
82
|
+
pipecat/frames/frames.py,sha256=248d54lNOyO04dq9ni51yUTWUItmGw8b9QKarrDGNeo,50354
|
|
83
83
|
pipecat/frames/protobufs/frames_pb2.py,sha256=VHgGV_W7qQ4sfQK6RHb5_DggLm3PiSYMr6aBZ8_p1cQ,2590
|
|
84
84
|
pipecat/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
85
|
+
pipecat/metrics/connection_metrics.py,sha256=8rBsEeufL3meWyJHcUbe35TS963W9B0wSnz0dQkv12A,1734
|
|
85
86
|
pipecat/metrics/metrics.py,sha256=bdZNciEtLTtA-xgoKDz2RJAy6fKrXkTwz3pryVHzc2M,2713
|
|
86
87
|
pipecat/observers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
87
88
|
pipecat/observers/base_observer.py,sha256=z812gu9lrxtZlr_6oZhcH0NHqlV2cJ7k_B8UJRrm8TY,3459
|
|
@@ -107,13 +108,13 @@ pipecat/pipeline/to_be_updated/merge_pipeline.py,sha256=jLEWdufIW3z1xZhdoLowdJ_S
|
|
|
107
108
|
pipecat/processors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
108
109
|
pipecat/processors/async_generator.py,sha256=qPOZxk5eOad_NrF_Z06vWZ6deXIxb9AKZKYO2e5pkJs,2385
|
|
109
110
|
pipecat/processors/consumer_processor.py,sha256=DrWCKnfblknZJ0bLmR_unIeJ1axQw4IPUn2IB3KLGGA,3228
|
|
110
|
-
pipecat/processors/dtmf_aggregator.py,sha256=
|
|
111
|
-
pipecat/processors/frame_processor.py,sha256=
|
|
111
|
+
pipecat/processors/dtmf_aggregator.py,sha256=k3xYncUr_8y5lrYfeX8PxqlF7jqFLshg_HB6HiFg7TA,10193
|
|
112
|
+
pipecat/processors/frame_processor.py,sha256=rf35H2qnREj0aeOAjXSy5YWTOoLwyzhRfw74J9LTCGg,35588
|
|
112
113
|
pipecat/processors/idle_frame_processor.py,sha256=z8AuhGap61lA5K35P6XCaOpn4kkmK_9NZNppbpQxheU,3124
|
|
113
114
|
pipecat/processors/logger.py,sha256=8xa4KKekXQIETlQR7zoGnwUpLNo8CeDVm7YjyXePN-w,2385
|
|
114
115
|
pipecat/processors/producer_processor.py,sha256=iIIOHZd77APvUGP7JqFbznAHUnCULcq_qYiSEjwXHcc,3265
|
|
115
116
|
pipecat/processors/text_transformer.py,sha256=LnfWJYzntJhZhrQ1lgSSY4D4VbHtrQJgrC227M69ZYU,1718
|
|
116
|
-
pipecat/processors/transcript_processor.py,sha256=
|
|
117
|
+
pipecat/processors/transcript_processor.py,sha256=TbMSeZCxXgQIdYodXuMSwLp6BvXTy7vQB9YiQZfPxc0,12488
|
|
117
118
|
pipecat/processors/two_stage_user_idle_processor.py,sha256=uf2aZh_lfW-eMxmFogP3R4taAJ1yXOSqjKsR7oXtD0Y,2938
|
|
118
119
|
pipecat/processors/user_idle_processor.py,sha256=Dl-Kcg0B4JZqWXXiyGuvYszGimbu2oKOyOJC92R9_hE,9140
|
|
119
120
|
pipecat/processors/aggregators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -122,7 +123,7 @@ pipecat/processors/aggregators/gated.py,sha256=tii0sRrBkRW6y9Xq5iTWPnqlOEejU4VqP
|
|
|
122
123
|
pipecat/processors/aggregators/gated_llm_context.py,sha256=CPv6sMA8irD1zZ3fU1gSv6D7qcPvCA0MdpFhBtJ_ekI,3007
|
|
123
124
|
pipecat/processors/aggregators/gated_open_ai_llm_context.py,sha256=DgqmdPj1u3fP_SVmxtfP7NjHqnyhN_RVVTDfmjbkxAs,361
|
|
124
125
|
pipecat/processors/aggregators/llm_context.py,sha256=wNbZA0Vt0FzNc5cu06xiv1z7DIClIlfqR1ZD8EusbVw,11085
|
|
125
|
-
pipecat/processors/aggregators/llm_response.py,sha256
|
|
126
|
+
pipecat/processors/aggregators/llm_response.py,sha256=--6D736k5mNnIhmauRbA7ZG7H9tBR16okniz3Mpypns,48573
|
|
126
127
|
pipecat/processors/aggregators/llm_response_universal.py,sha256=5PqmpATpekD8BVWyBExZgatKHsNbZem8M-A7_VwTbiQ,34334
|
|
127
128
|
pipecat/processors/aggregators/openai_llm_context.py,sha256=cC8DXdVPERRN04i0i-1Ys6kusvnbMALeH-Z8Pu5K684,12999
|
|
128
129
|
pipecat/processors/aggregators/sentence.py,sha256=E7e3knfQl6HEGpYMKPklF1aO_gOn-rr7SnynErwfkQk,2235
|
|
@@ -145,7 +146,7 @@ pipecat/processors/frameworks/strands_agents.py,sha256=vaYcNtM084OWoXDQaT6eoGoP1
|
|
|
145
146
|
pipecat/processors/gstreamer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
146
147
|
pipecat/processors/gstreamer/pipeline_source.py,sha256=eXckOY1rQeSBjSfLs8EFEkdlTZEq94osOTFWeNh6C4Y,9765
|
|
147
148
|
pipecat/processors/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
148
|
-
pipecat/processors/metrics/frame_processor_metrics.py,sha256
|
|
149
|
+
pipecat/processors/metrics/frame_processor_metrics.py,sha256=-p1mv6U3w36JWScz4hTsKsRQtUqTZut-kDLqZr6h2x4,10244
|
|
149
150
|
pipecat/processors/metrics/sentry.py,sha256=Gts-b-H3EDFUvv-qn44e9pSDAWUKk72tr7tEfutxxK0,4911
|
|
150
151
|
pipecat/runner/__init__.py,sha256=iJh4vFMGNQYi_ATVGXJDU4rOJwI-1Y6fmkyV18-ddAE,64
|
|
151
152
|
pipecat/runner/daily.py,sha256=t-D-sgVC2SnT_YCTDaQJgcxVnzL8-pQhdmxp7gV2snI,9646
|
|
@@ -153,17 +154,18 @@ pipecat/runner/livekit.py,sha256=in-2Io3FUZV-VcZZ-gQCx9L1WnKp5sHqmm7tDYlFNl4,458
|
|
|
153
154
|
pipecat/runner/run.py,sha256=McalzMoFYEJJRXyoD5PBAyUhHCdsEeeZJk8lBvplRck,30054
|
|
154
155
|
pipecat/runner/types.py,sha256=zHjbAiU17fG0ypLXCEzPu7bpDOutAg-4gE7TESvK8n0,1761
|
|
155
156
|
pipecat/runner/utils.py,sha256=Ve9rjRvbt1o8e9by0nIrCJzUDGcuJUeYYhkqycmgHXc,18682
|
|
156
|
-
pipecat/serializers/__init__.py,sha256=
|
|
157
|
-
pipecat/serializers/asterisk.py,sha256=
|
|
157
|
+
pipecat/serializers/__init__.py,sha256=z0V5GflCoPt4k2Yqm4ivuzKDh9VsYYAgK2UXZTw10aU,863
|
|
158
|
+
pipecat/serializers/asterisk.py,sha256=QLJMXkU3DZ0sgFw3Vq2Zf8PHKkQQguL_v-l2Io4lZ_M,6729
|
|
158
159
|
pipecat/serializers/base_serializer.py,sha256=OyBUZccs2ZT9mfkBbq2tGsUJMvci6o-j90Cl1sicPaI,2030
|
|
159
|
-
pipecat/serializers/convox.py,sha256=
|
|
160
|
-
pipecat/serializers/custom.py,sha256=
|
|
160
|
+
pipecat/serializers/convox.py,sha256=fj9NkFTB74B9k8qWEuICQNGUQtEV0DusaHohkOqNLa8,11145
|
|
161
|
+
pipecat/serializers/custom.py,sha256=clUEqOazGe3B2XoUFRN9zkFpMd6aIZeVRTqBRHAzavM,9071
|
|
161
162
|
pipecat/serializers/exotel.py,sha256=B04LtNnRMzKmaS61gPZbUjc2nbki3FmpCfUMww6cOe4,5953
|
|
162
163
|
pipecat/serializers/livekit.py,sha256=OMaM7yUiHfeTPbpNxE2TrmIzjmbNQIjNvlujt81dsRI,3285
|
|
163
164
|
pipecat/serializers/plivo.py,sha256=ie6VUhZDTJ7KlAuJyHNeIeMtJ3ScDq_2js1SZtz7jLI,9256
|
|
164
165
|
pipecat/serializers/protobuf.py,sha256=L0jSqvgTdkfxsu6JWjYK8QSTVji9nhzmgRsEEbGU7xY,5223
|
|
165
166
|
pipecat/serializers/telnyx.py,sha256=eFkC7dExDFildYLR8DPvgfHbgXlCwdSPd1vc11yxyok,10847
|
|
166
167
|
pipecat/serializers/twilio.py,sha256=0emSzXVw8DU_N5RPruMekbBKku9Q429-0z1PMuYejSk,10823
|
|
168
|
+
pipecat/serializers/vi.py,sha256=XdnyNwhpxo8CvgS4v1_9DmetF0BkRmx8KoSVdjCdPPQ,11247
|
|
167
169
|
pipecat/services/__init__.py,sha256=8e3Ta-8_BOPozhDB3l0GJkNXs5PWhib6yqZQUof2Kvw,1209
|
|
168
170
|
pipecat/services/ai_service.py,sha256=yE386fm2Id-yD4fCNfkmEMtg0lTA7PB17n2x_A_jwTg,5896
|
|
169
171
|
pipecat/services/ai_services.py,sha256=_RrDWfM8adV17atzY9RxK0nXRVM5kbUkKrvN90GAWYM,795
|
|
@@ -205,19 +207,19 @@ pipecat/services/azure/realtime/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm
|
|
|
205
207
|
pipecat/services/azure/realtime/llm.py,sha256=MnDiw-YJP3kll1gbkta4z4vsWfWZ5oBprZCinMP9O0M,2385
|
|
206
208
|
pipecat/services/cartesia/__init__.py,sha256=vzh0jBnfPwWdxFfV-tu0x1HFoOTgr9s91GYmD-CJUtY,284
|
|
207
209
|
pipecat/services/cartesia/stt.py,sha256=00k9gQYo_xPKb-RRJ-RNV4LPFw-7xXiFU7ACFLYttWY,12388
|
|
208
|
-
pipecat/services/cartesia/tts.py,sha256=
|
|
210
|
+
pipecat/services/cartesia/tts.py,sha256=I_OZCINywkDXmYzFL35MjSN8cAuNEaJs7nj0YB_obtc,27008
|
|
209
211
|
pipecat/services/cerebras/__init__.py,sha256=5zBmqq9Zfcl-HC7ylekVS5qrRedbl1mAeEwUT-T-c_o,259
|
|
210
212
|
pipecat/services/cerebras/llm.py,sha256=-yzSe_6YDGigwzES-LZS4vNXMPugmvsIYEpTySyr5nA,3047
|
|
211
213
|
pipecat/services/deepgram/__init__.py,sha256=IjRtMI7WytRDdmYVpk2qDWClXUiNgdl7ZkvEAWg1eYE,304
|
|
212
|
-
pipecat/services/deepgram/stt.py,sha256=
|
|
214
|
+
pipecat/services/deepgram/stt.py,sha256=jej9sFI5xwuC_NwRPjql48sjaOMwOV9B836T67gG70A,25343
|
|
213
215
|
pipecat/services/deepgram/tts.py,sha256=H_2WCJEx3_L4ytrHHRNkA-6GKTd1coou_vvTfiEodpQ,3745
|
|
214
216
|
pipecat/services/deepgram/flux/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
215
217
|
pipecat/services/deepgram/flux/stt.py,sha256=yCZodrHAOShgYy_GbdviX8iAuh36dBgDL41gHMXVxEM,25887
|
|
216
218
|
pipecat/services/deepseek/__init__.py,sha256=bU5z_oNGzgrF_YpsD9pYIMtEibeZFaUobbRjJ9WcYyE,259
|
|
217
219
|
pipecat/services/deepseek/llm.py,sha256=5KjpU2blmhUTM3LcRE1ymdsk6OmoFkIzeQgyNOGwQh8,3112
|
|
218
220
|
pipecat/services/elevenlabs/__init__.py,sha256=cMx5v0HEMh4WetMm5byR9tIjG6_wNVs9UxqWyB3tjlM,313
|
|
219
|
-
pipecat/services/elevenlabs/stt.py,sha256=
|
|
220
|
-
pipecat/services/elevenlabs/tts.py,sha256=
|
|
221
|
+
pipecat/services/elevenlabs/stt.py,sha256=c-6GDeyZCMcXu4FmcG0vugBRsUnq8Iz_L9XX_Y_8TlM,29453
|
|
222
|
+
pipecat/services/elevenlabs/tts.py,sha256=uKN7fu10MsBR9iRhadQoF3OeVZb0efyDDB4Ru8Z3E-Q,45800
|
|
221
223
|
pipecat/services/fal/__init__.py,sha256=z_kfZETvUcKy68Lyvni4B-RtdkOvz3J3eh6sFDVKq6M,278
|
|
222
224
|
pipecat/services/fal/image.py,sha256=vArKLKrIGoZfw_xeZY_E7zbUzfzVsScj-R7mOmVqjRQ,4585
|
|
223
225
|
pipecat/services/fal/stt.py,sha256=-5tw7N8srBJTS0Q65SN4csmLkIB6cLHR9pXKimxg55o,9678
|
|
@@ -236,7 +238,7 @@ pipecat/services/google/__init__.py,sha256=MDd6-gaugR1cUaa5cRxBJEEoo6bCnn0TBMWh8
|
|
|
236
238
|
pipecat/services/google/frames.py,sha256=_HHljqYg7x0wh6nTRLqKaavThuMxkKFsDeLAFgVutmU,2277
|
|
237
239
|
pipecat/services/google/google.py,sha256=D_GWyJQxnJmJ0sM8SLwcxom5e8snF3W3IhsEjTM7Uqg,507
|
|
238
240
|
pipecat/services/google/image.py,sha256=LQYIctDIB31udYvk3meC9EXTY7VDdWb_veCTFEltTRU,4674
|
|
239
|
-
pipecat/services/google/llm.py,sha256=
|
|
241
|
+
pipecat/services/google/llm.py,sha256=lwb9tjqOMUjPdAZB7py-femsNt_Q6Ekw-9ZP_4OSykg,40805
|
|
240
242
|
pipecat/services/google/llm_openai.py,sha256=cJDSKFOFFbBxWdsRC2f2kuPa_lpi-DgnfaPJLNsz82E,7520
|
|
241
243
|
pipecat/services/google/llm_vertex.py,sha256=0UL2U0aDATWTAWYh-ypTNihF4RS1tsl_E4KwPhSQ76c,8137
|
|
242
244
|
pipecat/services/google/rtvi.py,sha256=PZb1yVny5YG7_XmJRXPzs3iYapeQ4XHreFN1v6KwTGM,3014
|
|
@@ -279,7 +281,7 @@ pipecat/services/nim/llm.py,sha256=o4WPGI6kOmSiMV7WwOZ0cNEAoq9hW4Aqs2R8X7c9i94,4
|
|
|
279
281
|
pipecat/services/ollama/__init__.py,sha256=aw-25zYsR8LR74OFFlMKMTnJjaKwOzdPWVsClueNRkI,255
|
|
280
282
|
pipecat/services/ollama/llm.py,sha256=rfpG92LRHGJlpENKhF6ld8CLVS9DxlKW-WRVNldOIGs,1605
|
|
281
283
|
pipecat/services/openai/__init__.py,sha256=V0ZVa8PzEm3hmcStYICbAsYwfgk4ytZ6kiQoq9UZPmI,354
|
|
282
|
-
pipecat/services/openai/base_llm.py,sha256=
|
|
284
|
+
pipecat/services/openai/base_llm.py,sha256=OYzxsbSw49FH6CoY6au95PEs7W3JClkt-IM8gFRP7jI,22066
|
|
283
285
|
pipecat/services/openai/image.py,sha256=3e3h-dVQ6DQuQE7fp8akXwRMd-oYOdGuZg7RCOjHu9A,2994
|
|
284
286
|
pipecat/services/openai/llm.py,sha256=_aKtz1VebSFUUenT3tH6mBW9pSCm65_u45cDu_dkTzs,7396
|
|
285
287
|
pipecat/services/openai/stt.py,sha256=Idf0k73kxFyDgNRBt62MFpoKKNsBV9bwvJteJ6MGWzQ,2419
|
|
@@ -318,17 +320,17 @@ pipecat/services/riva/__init__.py,sha256=rObSsj504O_TMXhPBg_ymqKslZBhovlR-A0aaRZ
|
|
|
318
320
|
pipecat/services/riva/stt.py,sha256=bAss4dimx8eideaSPmPHM15_rSV3tfXNf13o5n1mfv4,25146
|
|
319
321
|
pipecat/services/riva/tts.py,sha256=idbqx3I2NlWCXtrIFsjEaYapxA3BLIA14ai3aMBh-2w,8158
|
|
320
322
|
pipecat/services/salesforce/__init__.py,sha256=OFvYbcvCadYhcKdBAVLj3ZUXVXQ1HyVyhgxIFf6_Thg,173
|
|
321
|
-
pipecat/services/salesforce/llm.py,sha256=
|
|
323
|
+
pipecat/services/salesforce/llm.py,sha256=2wh4U5mBElj5Olze2L0jC6V-UjFDC8ZEXlAKu0ORTwI,27825
|
|
322
324
|
pipecat/services/sambanova/__init__.py,sha256=oTXExLic-qTcsfsiWmssf3Elclf3IIWoN41_2IpoF18,128
|
|
323
325
|
pipecat/services/sambanova/llm.py,sha256=5XVfPLEk__W8ykFqLdV95ZUhlGGkAaJwmbciLdZYtTc,8976
|
|
324
326
|
pipecat/services/sambanova/stt.py,sha256=ZZgEZ7WQjLFHbCko-3LNTtVajjtfUvbtVLtFcaNadVQ,2536
|
|
325
327
|
pipecat/services/sarvam/__init__.py,sha256=B4TN_tTHV9fWg0aSoPvfQlXISA0nJaQ9-u08I9UWvH4,280
|
|
326
328
|
pipecat/services/sarvam/stt.py,sha256=p9Iq4loMwnftNZ_S0WoFSoX7iBbRKyja6RsVWbpj508,19314
|
|
327
|
-
pipecat/services/sarvam/tts.py,sha256=
|
|
329
|
+
pipecat/services/sarvam/tts.py,sha256=lrwfdC53kZ7f2QPgNRxzryISNkrJCvNtlZ-19-iXg94,27610
|
|
328
330
|
pipecat/services/simli/__init__.py,sha256=cbDcqOaGsEgKbGYKpJ1Vv7LN4ZjOWA04sE84WW5vgQI,257
|
|
329
331
|
pipecat/services/simli/video.py,sha256=Zu2XLvl2Y6VHaWzT9wEdzW9d0EYoZyzYLxjQFyV8vho,8320
|
|
330
332
|
pipecat/services/soniox/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
331
|
-
pipecat/services/soniox/stt.py,sha256=
|
|
333
|
+
pipecat/services/soniox/stt.py,sha256=zRp5qWU051hEAikt0vB0rbHrkQkH5sT-IOe-o5vCurQ,16650
|
|
332
334
|
pipecat/services/speechmatics/__init__.py,sha256=Jgq1fqrZVkpWC21D79L1cn5Ub8PnYgnnCaqC5pOlbIc,89
|
|
333
335
|
pipecat/services/speechmatics/stt.py,sha256=GLGJzlMSeZ1WzTOMjhKXDl5JYkqGhnFTbP3o0ez0hSw,44696
|
|
334
336
|
pipecat/services/tavus/__init__.py,sha256=SNyyi2Xq6tXIihDG2Bwvmg6Srbd-uWd1RwG-NKWcPuI,257
|
|
@@ -338,7 +340,7 @@ pipecat/services/together/llm.py,sha256=VSayO-U6g9Ld0xK9CXRQPUsd5gWJKtiA8qDAyXgs
|
|
|
338
340
|
pipecat/services/ultravox/__init__.py,sha256=EoHCSXI2o0DFQslELgkhAGZtxDj63gZi-9ZEhXljaKE,259
|
|
339
341
|
pipecat/services/ultravox/stt.py,sha256=uCQm_-LbycXdXRV6IE1a6Mymis6tyww7V8PnPzAQtx8,16586
|
|
340
342
|
pipecat/services/vistaar/__init__.py,sha256=UFfSWFN5rbzl6NN-E_OH_MFaSYodZWNlenAU0wk-rAI,110
|
|
341
|
-
pipecat/services/vistaar/llm.py,sha256=
|
|
343
|
+
pipecat/services/vistaar/llm.py,sha256=aJGGf5Sn08x8XjHt9gNZ4dE5xzBPVN7Sde3P5EqeTWk,23587
|
|
342
344
|
pipecat/services/whisper/__init__.py,sha256=smADmw0Fv98k7cGRuHTEcljKTO2WdZqLpJd0qsTCwH8,281
|
|
343
345
|
pipecat/services/whisper/base_stt.py,sha256=VhslESPnYIeVbmnQTzmlZPV35TH49duxYTvJe0epNnE,7850
|
|
344
346
|
pipecat/services/whisper/stt.py,sha256=9Qd56vWMzg3LtHikQnfgyMtl4odE6BCHDbpAn3HSWjw,17480
|
|
@@ -350,10 +352,10 @@ pipecat/sync/event_notifier.py,sha256=h50fC-RBGaGldWZx_wpgOmMIwJiq0PeNwQq5GPmfRR
|
|
|
350
352
|
pipecat/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
351
353
|
pipecat/tests/utils.py,sha256=DEHDQV8uhCuKIqoHUPGVdUoCiKqTCG9zv5GqLXWWwvY,7870
|
|
352
354
|
pipecat/transcriptions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
353
|
-
pipecat/transcriptions/language.py,sha256
|
|
355
|
+
pipecat/transcriptions/language.py,sha256=9kqmqCJF2NUTksWn0TH7-huRwtDqQzzALKzF1CnK_cY,10106
|
|
354
356
|
pipecat/transports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
355
|
-
pipecat/transports/base_input.py,sha256=
|
|
356
|
-
pipecat/transports/base_output.py,sha256=
|
|
357
|
+
pipecat/transports/base_input.py,sha256=AkdE-j9UksjIrUGJc7laMOaknXgOS7L22D5sehZ-6ew,20176
|
|
358
|
+
pipecat/transports/base_output.py,sha256=T_NfU38sT6wKxXF1jA7hW5eLhTK11pundQBxAojswW8,36723
|
|
357
359
|
pipecat/transports/base_transport.py,sha256=JlNiH0DysTfr6azwHauJqY_Z9HJC702O29Q0qrsLrg4,7530
|
|
358
360
|
pipecat/transports/daily/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
359
361
|
pipecat/transports/daily/transport.py,sha256=VanO33ff9g6px-vwGgT6M7cMVg786pOGfMU7Okm7a78,91917
|
|
@@ -394,6 +396,7 @@ pipecat/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
394
396
|
pipecat/utils/base_object.py,sha256=xnG4FX-ZrmswM2GItIMDtSVus_lHjaeTigi8yhw5n7o,7185
|
|
395
397
|
pipecat/utils/logger_config.py,sha256=5-RmvReZIINeqSXz3ALhEIiMZ_azmpOxnlIkdyCjWWk,5606
|
|
396
398
|
pipecat/utils/network.py,sha256=RRQ7MmTcbeXBJ2aY5UbMCQ6elm5B8Rxkn8XqkJ9S0Nc,825
|
|
399
|
+
pipecat/utils/redis.py,sha256=JmBaC1yY6e8qygUQkAER3DNFCYSCH18hd7NN9qqjDMU,1677
|
|
397
400
|
pipecat/utils/string.py,sha256=NIkp2Zesx8hvtVYGTWV1gaUkSerePLzUxl-mOKJ7XXQ,7321
|
|
398
401
|
pipecat/utils/time.py,sha256=lirjh24suz9EI1pf2kYwvAYb3I-13U_rJ_ZRg3nRiGs,1741
|
|
399
402
|
pipecat/utils/utils.py,sha256=T2y1Mcd9yWiZiIToUiRkhW-n7EFf8juk3kWX3TF8XOQ,2451
|
|
@@ -414,7 +417,7 @@ pipecat/utils/tracing/service_decorators.py,sha256=fwzxFpi8DJl6BJbK74G0UEB4ccMJg
|
|
|
414
417
|
pipecat/utils/tracing/setup.py,sha256=7TEgPNpq6M8lww8OQvf0P9FzYc5A30xICGklVA-fua0,2892
|
|
415
418
|
pipecat/utils/tracing/turn_context_provider.py,sha256=ikon3plFOx0XbMrH6DdeHttNpb-U0gzMZIm3bWLc9eI,2485
|
|
416
419
|
pipecat/utils/tracing/turn_trace_observer.py,sha256=dma16SBJpYSOE58YDWy89QzHyQFc_9gQZszKeWixuwc,9725
|
|
417
|
-
dv_pipecat_ai-0.0.85.
|
|
418
|
-
dv_pipecat_ai-0.0.85.
|
|
419
|
-
dv_pipecat_ai-0.0.85.
|
|
420
|
-
dv_pipecat_ai-0.0.85.
|
|
420
|
+
dv_pipecat_ai-0.0.85.dev858.dist-info/METADATA,sha256=skQyLk2xRWMvuIMOi7At9niD00oe-fbvkONlfxf2Css,32955
|
|
421
|
+
dv_pipecat_ai-0.0.85.dev858.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
422
|
+
dv_pipecat_ai-0.0.85.dev858.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
|
|
423
|
+
dv_pipecat_ai-0.0.85.dev858.dist-info/RECORD,,
|
|
@@ -35,12 +35,15 @@ class LocalSmartTurnAnalyzerV3(BaseSmartTurn):
|
|
|
35
35
|
enabling offline operation without network dependencies.
|
|
36
36
|
"""
|
|
37
37
|
|
|
38
|
-
def __init__(
|
|
38
|
+
def __init__(
|
|
39
|
+
self, *, smart_turn_model_path: Optional[str] = None, cpu_count: int = 1, **kwargs
|
|
40
|
+
):
|
|
39
41
|
"""Initialize the local ONNX smart-turn-v3 analyzer.
|
|
40
42
|
|
|
41
43
|
Args:
|
|
42
44
|
smart_turn_model_path: Path to the ONNX model file. If this is not
|
|
43
45
|
set, the bundled smart-turn-v3.0 model will be used.
|
|
46
|
+
cpu_count: The number of CPUs to use for inference. Defaults to 1.
|
|
44
47
|
**kwargs: Additional arguments passed to BaseSmartTurn.
|
|
45
48
|
"""
|
|
46
49
|
super().__init__(**kwargs)
|
|
@@ -70,6 +73,7 @@ class LocalSmartTurnAnalyzerV3(BaseSmartTurn):
|
|
|
70
73
|
so = ort.SessionOptions()
|
|
71
74
|
so.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
|
|
72
75
|
so.inter_op_num_threads = 1
|
|
76
|
+
so.intra_op_num_threads = cpu_count
|
|
73
77
|
so.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
|
|
74
78
|
|
|
75
79
|
self._feature_extractor = WhisperFeatureExtractor(chunk_length=8)
|
pipecat/frames/frames.py
CHANGED
|
@@ -451,12 +451,14 @@ class TranscriptionMessage:
|
|
|
451
451
|
content: The message content/text.
|
|
452
452
|
user_id: Optional identifier for the user.
|
|
453
453
|
timestamp: Optional timestamp when the message was created.
|
|
454
|
+
message_id: Optional unique identifier for tracking and dropping messages.
|
|
454
455
|
"""
|
|
455
456
|
|
|
456
457
|
role: Literal["user", "assistant"]
|
|
457
458
|
content: str
|
|
458
459
|
user_id: Optional[str] = None
|
|
459
460
|
timestamp: Optional[str] = None
|
|
461
|
+
message_id: Optional[int] = None
|
|
460
462
|
|
|
461
463
|
|
|
462
464
|
@dataclass
|
|
@@ -510,6 +512,17 @@ class TranscriptionUpdateFrame(DataFrame):
|
|
|
510
512
|
return f"{self.name}(pts: {pts}, messages: {len(self.messages)})"
|
|
511
513
|
|
|
512
514
|
|
|
515
|
+
@dataclass
|
|
516
|
+
class TranscriptDropFrame(DataFrame):
|
|
517
|
+
"""Frame indicating previously emitted transcript chunks should be discarded.
|
|
518
|
+
|
|
519
|
+
Parameters:
|
|
520
|
+
transcript_ids: List of frame/message identifiers to drop.
|
|
521
|
+
"""
|
|
522
|
+
|
|
523
|
+
transcript_ids: List[int]
|
|
524
|
+
|
|
525
|
+
|
|
513
526
|
@dataclass
|
|
514
527
|
class LLMContextFrame(Frame):
|
|
515
528
|
"""Frame containing a universal LLM context.
|
|
@@ -573,6 +586,27 @@ class LLMRunFrame(DataFrame):
|
|
|
573
586
|
pass
|
|
574
587
|
|
|
575
588
|
|
|
589
|
+
@dataclass
|
|
590
|
+
class WarmupLLMFrame(DataFrame):
|
|
591
|
+
"""Frame to trigger prompt caching/warmup in supported LLM providers.
|
|
592
|
+
|
|
593
|
+
This frame instructs the LLM service to cache the provided messages
|
|
594
|
+
without generating a visible response. Primarily used for warming up provider
|
|
595
|
+
caches (e.g., Claude's prompt caching, OpenAI's prompt caching) to improve
|
|
596
|
+
latency for subsequent requests.
|
|
597
|
+
|
|
598
|
+
The LLM service should:
|
|
599
|
+
1. Send the messages to the provider to trigger caching
|
|
600
|
+
2. Generate a minimal response (e.g., single word)
|
|
601
|
+
3. Discard the response without emitting LLM output frames
|
|
602
|
+
|
|
603
|
+
Parameters:
|
|
604
|
+
messages: List of messages to send for cache warming (should match conversation structure).
|
|
605
|
+
"""
|
|
606
|
+
|
|
607
|
+
messages: List[dict]
|
|
608
|
+
|
|
609
|
+
|
|
576
610
|
@dataclass
|
|
577
611
|
class LLMMessagesAppendFrame(DataFrame):
|
|
578
612
|
"""Frame containing LLM messages to append to current context.
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Connection metrics data models for Pipecat framework.
|
|
2
|
+
|
|
3
|
+
This module extends the core metrics system with connection-specific metrics
|
|
4
|
+
including connection establishment times, retry attempts, and network latencies.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
|
|
11
|
+
from pipecat.metrics.metrics import MetricsData
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ConnectionMetricsData(MetricsData):
|
|
15
|
+
"""Unified connection and reconnection metrics data.
|
|
16
|
+
|
|
17
|
+
Handles both initial connection establishment and reconnection scenarios.
|
|
18
|
+
For initial connections, use connect_time, success, connection_attempts.
|
|
19
|
+
For reconnections, use reconnect_count, downtime, reconnect_success, reason.
|
|
20
|
+
|
|
21
|
+
Parameters:
|
|
22
|
+
connect_time: Time taken to establish connection in seconds.
|
|
23
|
+
success: Whether the connection attempt was successful.
|
|
24
|
+
connection_attempts: Number of connection attempts made.
|
|
25
|
+
error_message: Error message if connection failed.
|
|
26
|
+
connection_type: Type of connection (websocket, http, etc.).
|
|
27
|
+
reconnect_count: Number of reconnection attempts (for reconnection scenarios).
|
|
28
|
+
downtime: Time connection was down in seconds (for reconnection scenarios).
|
|
29
|
+
reconnect_success: Whether reconnection was successful (for reconnection scenarios).
|
|
30
|
+
reason: Reason for reconnection (for reconnection scenarios).
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
connect_time: Optional[float] = None
|
|
34
|
+
success: bool = True
|
|
35
|
+
connection_attempts: int = 1
|
|
36
|
+
error_message: Optional[str] = None
|
|
37
|
+
connection_type: Optional[str] = None
|
|
38
|
+
|
|
39
|
+
# Reconnection-specific fields
|
|
40
|
+
reconnect_count: Optional[int] = None
|
|
41
|
+
downtime: Optional[float] = None
|
|
42
|
+
reconnect_success: Optional[bool] = None
|
|
43
|
+
reason: Optional[str] = None
|
|
44
|
+
|
|
45
|
+
|
|
@@ -50,6 +50,7 @@ from pipecat.frames.frames import (
|
|
|
50
50
|
SpeechControlParamsFrame,
|
|
51
51
|
StartFrame,
|
|
52
52
|
TextFrame,
|
|
53
|
+
TranscriptDropFrame,
|
|
53
54
|
TranscriptionFrame,
|
|
54
55
|
UserImageRawFrame,
|
|
55
56
|
UserStartedSpeakingFrame,
|
|
@@ -445,6 +446,7 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
|
|
|
445
446
|
self._latest_final_transcript = ""
|
|
446
447
|
self._last_user_speaking_time = 0
|
|
447
448
|
self._last_aggregation_push_time = 0
|
|
449
|
+
self._pending_transcription_ids: List[int] = []
|
|
448
450
|
|
|
449
451
|
async def reset(self):
|
|
450
452
|
"""Reset the aggregation state and interruption strategies."""
|
|
@@ -452,6 +454,7 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
|
|
|
452
454
|
self._was_bot_speaking = False
|
|
453
455
|
self._seen_interim_results = False
|
|
454
456
|
self._waiting_for_aggregation = False
|
|
457
|
+
self._pending_transcription_ids.clear()
|
|
455
458
|
[await s.reset() for s in self._interruption_strategies]
|
|
456
459
|
|
|
457
460
|
async def handle_aggregation(self, aggregation: str):
|
|
@@ -469,8 +472,8 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
|
|
|
469
472
|
frame: The frame to process.
|
|
470
473
|
direction: The direction of frame flow in the pipeline.
|
|
471
474
|
"""
|
|
472
|
-
if isinstance(frame,
|
|
473
|
-
self.logger.debug("Received
|
|
475
|
+
if isinstance(frame, InterruptionFrame):
|
|
476
|
+
self.logger.debug("Received InterruptionFrame")
|
|
474
477
|
await super().process_frame(frame, direction)
|
|
475
478
|
|
|
476
479
|
if isinstance(frame, StartFrame):
|
|
@@ -547,7 +550,8 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
|
|
|
547
550
|
await self._process_aggregation()
|
|
548
551
|
else:
|
|
549
552
|
self.logger.debug("Interruption conditions not met - not pushing aggregation")
|
|
550
|
-
# Don't process aggregation,
|
|
553
|
+
# Don't process aggregation, discard pending transcriptions and reset
|
|
554
|
+
await self._discard_pending_transcriptions("interruption_conditions_not_met")
|
|
551
555
|
await self.reset()
|
|
552
556
|
else:
|
|
553
557
|
if trigger_interruption:
|
|
@@ -555,7 +559,7 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
|
|
|
555
559
|
"Triggering interruption - pushing BotInterruptionFrame and aggregation"
|
|
556
560
|
)
|
|
557
561
|
# await self.push_frame(BotInterruptionFrame(), FrameDirection.UPSTREAM)
|
|
558
|
-
await self.push_frame(
|
|
562
|
+
await self.push_frame(InterruptionFrame(), FrameDirection.DOWNSTREAM)
|
|
559
563
|
self.logger.debug("Pushed BotInterruptionFrame")
|
|
560
564
|
# No interruption config - normal behavior (always push aggregation)
|
|
561
565
|
await self._process_aggregation()
|
|
@@ -587,6 +591,13 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
|
|
|
587
591
|
|
|
588
592
|
return any([await should_interrupt(s) for s in self._interruption_strategies])
|
|
589
593
|
|
|
594
|
+
async def _discard_pending_transcriptions(self, reason: str):
|
|
595
|
+
"""Notify upstream processors that pending transcripts should be dropped."""
|
|
596
|
+
if self._pending_transcription_ids:
|
|
597
|
+
drop_frame = TranscriptDropFrame(transcript_ids=list(self._pending_transcription_ids))
|
|
598
|
+
await self.push_frame(drop_frame, FrameDirection.UPSTREAM)
|
|
599
|
+
self._pending_transcription_ids.clear()
|
|
600
|
+
|
|
590
601
|
async def _start(self, frame: StartFrame):
|
|
591
602
|
self._create_aggregation_task()
|
|
592
603
|
|
|
@@ -613,10 +624,18 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
|
|
|
613
624
|
for s in self.interruption_strategies:
|
|
614
625
|
await s.append_audio(frame.audio, frame.sample_rate)
|
|
615
626
|
|
|
627
|
+
async def _discard_pending_transcriptions(self, reason: str):
|
|
628
|
+
"""Notify upstream processors that pending transcripts should be dropped."""
|
|
629
|
+
if self._pending_transcription_ids:
|
|
630
|
+
drop_frame = TranscriptDropFrame(transcript_ids=list(self._pending_transcription_ids))
|
|
631
|
+
await self.push_frame(drop_frame, FrameDirection.UPSTREAM)
|
|
632
|
+
self._pending_transcription_ids.clear()
|
|
633
|
+
|
|
616
634
|
async def _handle_user_started_speaking(self, frame: UserStartedSpeakingFrame):
|
|
617
635
|
if len(self._aggregation) > 0:
|
|
618
636
|
self.logger.debug(f"Dropping {self._aggregation}")
|
|
619
637
|
self._aggregation = ""
|
|
638
|
+
await self._discard_pending_transcriptions("user_started_speaking")
|
|
620
639
|
self._latest_final_transcript = ""
|
|
621
640
|
self._last_user_speaking_time = time.time()
|
|
622
641
|
self._user_speaking = True
|
|
@@ -661,6 +680,7 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
|
|
|
661
680
|
return
|
|
662
681
|
|
|
663
682
|
self._aggregation += f" {text}" if self._aggregation else text
|
|
683
|
+
self._pending_transcription_ids.append(frame.id)
|
|
664
684
|
# We just got a final result, so let's reset interim results.
|
|
665
685
|
self._seen_interim_results = False
|
|
666
686
|
|
|
@@ -790,6 +810,7 @@ class LLMUserContextAggregator(LLMContextResponseAggregator):
|
|
|
790
810
|
if self._bot_speaking and not self._params.enable_emulated_vad_interruptions:
|
|
791
811
|
# If emulated VAD interruptions are disabled and bot is speaking, ignore
|
|
792
812
|
logger.debug("Ignoring user speaking emulation, bot is speaking.")
|
|
813
|
+
await self._discard_pending_transcriptions("emulated_vad_ignored")
|
|
793
814
|
await self.reset()
|
|
794
815
|
else:
|
|
795
816
|
# Either bot is not speaking, or emulated VAD interruptions are enabled
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
"""DTMF aggregator processor for collecting and flushing DTMF input digits."""
|
|
2
|
+
|
|
1
3
|
import asyncio
|
|
2
4
|
|
|
3
5
|
from pipecat.frames.frames import (
|
|
@@ -8,8 +10,8 @@ from pipecat.frames.frames import (
|
|
|
8
10
|
EndFrame,
|
|
9
11
|
Frame,
|
|
10
12
|
InputDTMFFrame,
|
|
13
|
+
InterruptionFrame,
|
|
11
14
|
StartDTMFCaptureFrame,
|
|
12
|
-
StartInterruptionFrame,
|
|
13
15
|
TranscriptionFrame,
|
|
14
16
|
WaitForDTMFFrame,
|
|
15
17
|
)
|
|
@@ -19,10 +21,11 @@ from pipecat.utils.time import time_now_iso8601
|
|
|
19
21
|
|
|
20
22
|
class DTMFAggregator(FrameProcessor):
|
|
21
23
|
"""Aggregates DTMF frames using idle wait logic.
|
|
24
|
+
|
|
22
25
|
The aggregator accumulates digits from incoming InputDTMFFrame instances.
|
|
23
26
|
It flushes the aggregated digits by emitting a TranscriptionFrame when:
|
|
24
27
|
- No new digit arrives within the specified timeout period,
|
|
25
|
-
- The termination digit (
|
|
28
|
+
- The termination digit ("#") is received, or
|
|
26
29
|
- The number of digits aggregated equals the configured 'digits' value.
|
|
27
30
|
"""
|
|
28
31
|
|
|
@@ -34,7 +37,9 @@ class DTMFAggregator(FrameProcessor):
|
|
|
34
37
|
digits: int = None,
|
|
35
38
|
**kwargs,
|
|
36
39
|
):
|
|
37
|
-
"""
|
|
40
|
+
"""Initialize the DTMF aggregator.
|
|
41
|
+
|
|
42
|
+
:param timeout: Idle timeout in seconds before flushing the aggregated digits.
|
|
38
43
|
:param digits: Number of digits to aggregate before flushing.
|
|
39
44
|
"""
|
|
40
45
|
super().__init__(**kwargs)
|
|
@@ -48,6 +53,7 @@ class DTMFAggregator(FrameProcessor):
|
|
|
48
53
|
self._dtmf_capture_active = False
|
|
49
54
|
|
|
50
55
|
async def process_frame(self, frame: Frame, direction: FrameDirection) -> None:
|
|
56
|
+
"""Process incoming frames and handle DTMF input aggregation."""
|
|
51
57
|
# Handle DTMF frames.
|
|
52
58
|
await super().process_frame(frame, direction)
|
|
53
59
|
|
|
@@ -69,8 +75,8 @@ class DTMFAggregator(FrameProcessor):
|
|
|
69
75
|
self._digit_event.set() # Trigger the timeout handler
|
|
70
76
|
await self._start_dtmf_capture()
|
|
71
77
|
await self.push_frame(frame, direction)
|
|
72
|
-
elif isinstance(frame,
|
|
73
|
-
self.logger.debug("Received
|
|
78
|
+
elif isinstance(frame, InterruptionFrame):
|
|
79
|
+
self.logger.debug("Received InterruptionFrame")
|
|
74
80
|
if self._aggregation:
|
|
75
81
|
await self.flush_aggregation()
|
|
76
82
|
await self._end_dtmf_capture()
|
|
@@ -108,9 +114,7 @@ class DTMFAggregator(FrameProcessor):
|
|
|
108
114
|
if "digits" in settings:
|
|
109
115
|
new_digits = settings["digits"]
|
|
110
116
|
if new_digits != self._digits:
|
|
111
|
-
self.logger.debug(
|
|
112
|
-
f"Updating DTMF digits from {self._digits} to {new_digits}"
|
|
113
|
-
)
|
|
117
|
+
self.logger.debug(f"Updating DTMF digits from {self._digits} to {new_digits}")
|
|
114
118
|
self._digits = new_digits
|
|
115
119
|
settings_changed = True
|
|
116
120
|
|
|
@@ -125,9 +129,7 @@ class DTMFAggregator(FrameProcessor):
|
|
|
125
129
|
new_end_on = set(end_value)
|
|
126
130
|
|
|
127
131
|
if new_end_on != self._end_on:
|
|
128
|
-
self.logger.debug(
|
|
129
|
-
f"Updating DTMF end_on from {self._end_on} to {new_end_on}"
|
|
130
|
-
)
|
|
132
|
+
self.logger.debug(f"Updating DTMF end_on from {self._end_on} to {new_end_on}")
|
|
131
133
|
self._end_on = new_end_on
|
|
132
134
|
settings_changed = True
|
|
133
135
|
|
|
@@ -142,9 +144,7 @@ class DTMFAggregator(FrameProcessor):
|
|
|
142
144
|
new_reset_on = set(reset_value)
|
|
143
145
|
|
|
144
146
|
if new_reset_on != self._reset_on:
|
|
145
|
-
self.logger.debug(
|
|
146
|
-
f"Updating DTMF reset_on from {self._reset_on} to {new_reset_on}"
|
|
147
|
-
)
|
|
147
|
+
self.logger.debug(f"Updating DTMF reset_on from {self._reset_on} to {new_reset_on}")
|
|
148
148
|
self._reset_on = new_reset_on
|
|
149
149
|
settings_changed = True
|
|
150
150
|
|
|
@@ -183,9 +183,7 @@ class DTMFAggregator(FrameProcessor):
|
|
|
183
183
|
def _create_aggregation_task(self, raise_timeout: bool = False) -> None:
|
|
184
184
|
"""Creates the aggregation task if it hasn't been created yet."""
|
|
185
185
|
if not self._aggregation_task:
|
|
186
|
-
self._aggregation_task = self.create_task(
|
|
187
|
-
self._aggregation_task_handler(raise_timeout)
|
|
188
|
-
)
|
|
186
|
+
self._aggregation_task = self.create_task(self._aggregation_task_handler(raise_timeout))
|
|
189
187
|
|
|
190
188
|
async def _stop_aggregation_task(self) -> None:
|
|
191
189
|
"""Stops the aggregation task."""
|
|
@@ -198,9 +196,7 @@ class DTMFAggregator(FrameProcessor):
|
|
|
198
196
|
while True:
|
|
199
197
|
try:
|
|
200
198
|
# Wait for a new digit signal with a timeout.
|
|
201
|
-
await asyncio.wait_for(
|
|
202
|
-
self._digit_event.wait(), timeout=self._idle_timeout
|
|
203
|
-
)
|
|
199
|
+
await asyncio.wait_for(self._digit_event.wait(), timeout=self._idle_timeout)
|
|
204
200
|
self._digit_event.clear()
|
|
205
201
|
except asyncio.TimeoutError:
|
|
206
202
|
# No new digit arrived within the timeout period; flush if needed
|
|
@@ -216,7 +212,7 @@ class DTMFAggregator(FrameProcessor):
|
|
|
216
212
|
aggregated_frame.metadata["push_aggregation"] = True
|
|
217
213
|
|
|
218
214
|
# Send interruption frame (as per original design)
|
|
219
|
-
await self.push_frame(
|
|
215
|
+
await self.push_frame(InterruptionFrame(), FrameDirection.DOWNSTREAM)
|
|
220
216
|
|
|
221
217
|
# Push the transcription frame
|
|
222
218
|
await self.push_frame(aggregated_frame, FrameDirection.DOWNSTREAM)
|