dv-pipecat-ai 0.0.82.dev807__py3-none-any.whl → 0.0.82.dev815__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.82.dev807.dist-info → dv_pipecat_ai-0.0.82.dev815.dist-info}/METADATA +1 -1
- {dv_pipecat_ai-0.0.82.dev807.dist-info → dv_pipecat_ai-0.0.82.dev815.dist-info}/RECORD +7 -6
- pipecat/serializers/asterisk.py +129 -0
- pipecat/services/vistaar/llm.py +56 -2
- {dv_pipecat_ai-0.0.82.dev807.dist-info → dv_pipecat_ai-0.0.82.dev815.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.82.dev807.dist-info → dv_pipecat_ai-0.0.82.dev815.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.82.dev807.dist-info → dv_pipecat_ai-0.0.82.dev815.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
dv_pipecat_ai-0.0.82.
|
|
1
|
+
dv_pipecat_ai-0.0.82.dev815.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
|
|
2
2
|
pipecat/__init__.py,sha256=j0Xm6adxHhd7D06dIyyPV_GlBYLlBnTAERVvD_jAARQ,861
|
|
3
3
|
pipecat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
pipecat/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -123,6 +123,7 @@ pipecat/runner/run.py,sha256=BuVI9-cpnQHOBxymkPoqpGaSaZImWZKLeu1g0JsvS8E,18818
|
|
|
123
123
|
pipecat/runner/types.py,sha256=iG9A1ox1ePXiEo2bWANsi6RxpGOb5n_Am5O3enbojBM,1599
|
|
124
124
|
pipecat/runner/utils.py,sha256=cT4G46skiIuZexm-KJ9ltrtufcGxPCAk7HW95rCy3tA,17724
|
|
125
125
|
pipecat/serializers/__init__.py,sha256=OV61GQX5ZVU7l7Dt7UTBdv2wUF7ZvtbCoXryo7nnoGY,734
|
|
126
|
+
pipecat/serializers/asterisk.py,sha256=rDb8qMYNGgHzRC_EyCgxB6p99d_1YXXoIm-YW7aHtAc,5236
|
|
126
127
|
pipecat/serializers/base_serializer.py,sha256=OyBUZccs2ZT9mfkBbq2tGsUJMvci6o-j90Cl1sicPaI,2030
|
|
127
128
|
pipecat/serializers/convox.py,sha256=MXCLhV6GMnoP8bI6-EVrObhrftEyTGOmzVeIU5ywmPo,9536
|
|
128
129
|
pipecat/serializers/exotel.py,sha256=LB4wYoXDjPmtkydrZ0G4H4u-SXpQw9KjyRzBZCYloEE,5907
|
|
@@ -280,7 +281,7 @@ pipecat/services/together/llm.py,sha256=VSayO-U6g9Ld0xK9CXRQPUsd5gWJKtiA8qDAyXgs
|
|
|
280
281
|
pipecat/services/ultravox/__init__.py,sha256=EoHCSXI2o0DFQslELgkhAGZtxDj63gZi-9ZEhXljaKE,259
|
|
281
282
|
pipecat/services/ultravox/stt.py,sha256=uCQm_-LbycXdXRV6IE1a6Mymis6tyww7V8PnPzAQtx8,16586
|
|
282
283
|
pipecat/services/vistaar/__init__.py,sha256=UFfSWFN5rbzl6NN-E_OH_MFaSYodZWNlenAU0wk-rAI,110
|
|
283
|
-
pipecat/services/vistaar/llm.py,sha256=
|
|
284
|
+
pipecat/services/vistaar/llm.py,sha256=yXo6hQ_YscxYZZbQLGoXy5X1Pt-lMPpGQc5MrnjYqpI,17332
|
|
284
285
|
pipecat/services/whisper/__init__.py,sha256=smADmw0Fv98k7cGRuHTEcljKTO2WdZqLpJd0qsTCwH8,281
|
|
285
286
|
pipecat/services/whisper/base_stt.py,sha256=VhslESPnYIeVbmnQTzmlZPV35TH49duxYTvJe0epNnE,7850
|
|
286
287
|
pipecat/services/whisper/stt.py,sha256=9Qd56vWMzg3LtHikQnfgyMtl4odE6BCHDbpAn3HSWjw,17480
|
|
@@ -336,7 +337,7 @@ pipecat/utils/tracing/service_decorators.py,sha256=HwDCqLGijhYD3F8nxDuQmEw-YkRw0
|
|
|
336
337
|
pipecat/utils/tracing/setup.py,sha256=7TEgPNpq6M8lww8OQvf0P9FzYc5A30xICGklVA-fua0,2892
|
|
337
338
|
pipecat/utils/tracing/turn_context_provider.py,sha256=ikon3plFOx0XbMrH6DdeHttNpb-U0gzMZIm3bWLc9eI,2485
|
|
338
339
|
pipecat/utils/tracing/turn_trace_observer.py,sha256=dma16SBJpYSOE58YDWy89QzHyQFc_9gQZszKeWixuwc,9725
|
|
339
|
-
dv_pipecat_ai-0.0.82.
|
|
340
|
-
dv_pipecat_ai-0.0.82.
|
|
341
|
-
dv_pipecat_ai-0.0.82.
|
|
342
|
-
dv_pipecat_ai-0.0.82.
|
|
340
|
+
dv_pipecat_ai-0.0.82.dev815.dist-info/METADATA,sha256=L2lkaXtlnUH8zjhRFqwqSQS-0pfyy90Ym9bOFeAfchU,32457
|
|
341
|
+
dv_pipecat_ai-0.0.82.dev815.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
342
|
+
dv_pipecat_ai-0.0.82.dev815.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
|
|
343
|
+
dv_pipecat_ai-0.0.82.dev815.dist-info/RECORD,,
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
# asterisk_ws_serializer.py
|
|
2
|
+
import base64
|
|
3
|
+
import json
|
|
4
|
+
from typing import Literal, Optional
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
|
|
8
|
+
from pipecat.audio.utils import create_stream_resampler, pcm_to_ulaw, ulaw_to_pcm
|
|
9
|
+
from pipecat.frames.frames import (
|
|
10
|
+
AudioRawFrame,
|
|
11
|
+
CancelFrame,
|
|
12
|
+
EndFrame,
|
|
13
|
+
Frame,
|
|
14
|
+
InputAudioRawFrame,
|
|
15
|
+
StartFrame,
|
|
16
|
+
StartInterruptionFrame,
|
|
17
|
+
TransportMessageFrame,
|
|
18
|
+
TransportMessageUrgentFrame,
|
|
19
|
+
)
|
|
20
|
+
from pipecat.serializers.base_serializer import FrameSerializer, FrameSerializerType
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class AsteriskFrameSerializer(FrameSerializer):
|
|
24
|
+
class InputParams(BaseModel):
|
|
25
|
+
"""Configuration parameters for AsteriskFrameSerializer.
|
|
26
|
+
|
|
27
|
+
Parameters:
|
|
28
|
+
telephony_encoding: The encoding used by the telephony system (e.g., "pcmu" for μ-law).
|
|
29
|
+
telephony_sample_rate: The sample rate used by the telephony system (e.g., 8000 Hz).
|
|
30
|
+
sample_rate: Optional override for pipeline input sample rate.
|
|
31
|
+
auto_hang_up: Whether to automatically terminate call on EndFrame.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
# What the ADAPTER/Asterisk is sending/expecting on the wire:
|
|
35
|
+
# "pcmu" -> μ-law @ 8k; "pcm16" -> signed 16-bit @ 8k
|
|
36
|
+
telephony_encoding: Literal["pcmu", "pcm16"] = "pcmu"
|
|
37
|
+
telephony_sample_rate: int = 8000
|
|
38
|
+
sample_rate: Optional[int] = None # pipeline input rate
|
|
39
|
+
auto_hang_up: bool = False # no-op here; adapter handles hangup
|
|
40
|
+
|
|
41
|
+
def __init__(self, stream_id: str, params: Optional[InputParams] = None):
|
|
42
|
+
self._stream_id = stream_id
|
|
43
|
+
self._params = params or AsteriskFrameSerializer.InputParams()
|
|
44
|
+
self._tel_rate = self._params.telephony_sample_rate
|
|
45
|
+
self._sample_rate = 0
|
|
46
|
+
self._in_resampler = create_stream_resampler()
|
|
47
|
+
self._out_resampler = create_stream_resampler()
|
|
48
|
+
|
|
49
|
+
@property
|
|
50
|
+
def type(self) -> FrameSerializerType:
|
|
51
|
+
return FrameSerializerType.TEXT # we send/recv JSON strings
|
|
52
|
+
|
|
53
|
+
async def setup(self, frame: StartFrame):
|
|
54
|
+
self._sample_rate = self._params.sample_rate or frame.audio_in_sample_rate
|
|
55
|
+
|
|
56
|
+
# Pipecat -> Adapter (play to caller)
|
|
57
|
+
async def serialize(self, frame: Frame) -> str | bytes | None:
|
|
58
|
+
# On pipeline end, ask bridge to hang up
|
|
59
|
+
if (
|
|
60
|
+
self._params.auto_hang_up
|
|
61
|
+
and not self._hangup_sent
|
|
62
|
+
and isinstance(frame, (EndFrame, CancelFrame))
|
|
63
|
+
):
|
|
64
|
+
self._hangup_sent = True
|
|
65
|
+
return json.dumps({"event": "hangup"})
|
|
66
|
+
if isinstance(frame, StartInterruptionFrame):
|
|
67
|
+
return json.dumps({"event": "clear", "streamId": self._stream_id})
|
|
68
|
+
if isinstance(frame, AudioRawFrame):
|
|
69
|
+
pcm = frame.audio
|
|
70
|
+
if self._params.telephony_encoding == "pcmu":
|
|
71
|
+
ul = await pcm_to_ulaw(pcm, frame.sample_rate, self._tel_rate, self._out_resampler)
|
|
72
|
+
if not ul:
|
|
73
|
+
return None
|
|
74
|
+
payload = base64.b64encode(ul).decode("utf-8")
|
|
75
|
+
return json.dumps(
|
|
76
|
+
{
|
|
77
|
+
"event": "media",
|
|
78
|
+
"encoding": "pcmu",
|
|
79
|
+
"sampleRate": self._tel_rate,
|
|
80
|
+
"payload": payload,
|
|
81
|
+
}
|
|
82
|
+
)
|
|
83
|
+
else: # "pcm16"
|
|
84
|
+
# resample to 8k if needed, but data stays PCM16 bytes
|
|
85
|
+
pcm8 = await self._out_resampler.resample(pcm, frame.sample_rate, self._tel_rate)
|
|
86
|
+
if not pcm8:
|
|
87
|
+
return None
|
|
88
|
+
payload = base64.b64encode(pcm8).decode("utf-8")
|
|
89
|
+
return json.dumps(
|
|
90
|
+
{
|
|
91
|
+
"event": "media",
|
|
92
|
+
"encoding": "pcm16",
|
|
93
|
+
"sampleRate": self._tel_rate,
|
|
94
|
+
"payload": payload,
|
|
95
|
+
}
|
|
96
|
+
)
|
|
97
|
+
if isinstance(frame, (TransportMessageFrame, TransportMessageUrgentFrame)):
|
|
98
|
+
return json.dumps(frame.message)
|
|
99
|
+
return None
|
|
100
|
+
|
|
101
|
+
# Adapter -> Pipecat (audio from caller)
|
|
102
|
+
async def deserialize(self, data: str | bytes) -> Frame | None:
|
|
103
|
+
try:
|
|
104
|
+
msg = json.loads(data)
|
|
105
|
+
except Exception:
|
|
106
|
+
return None
|
|
107
|
+
if msg.get("event") == "media":
|
|
108
|
+
enc = msg.get("encoding")
|
|
109
|
+
sr = int(msg.get("sampleRate", self._tel_rate))
|
|
110
|
+
raw = base64.b64decode(msg.get("payload", ""))
|
|
111
|
+
if not raw:
|
|
112
|
+
return None
|
|
113
|
+
if enc == "pcmu":
|
|
114
|
+
pcm = await ulaw_to_pcm(raw, sr, self._sample_rate, self._in_resampler)
|
|
115
|
+
elif enc == "pcm16":
|
|
116
|
+
# resample if pipeline rate != 8k
|
|
117
|
+
pcm = await self._in_resampler.resample(raw, sr, self._sample_rate)
|
|
118
|
+
else:
|
|
119
|
+
return None
|
|
120
|
+
if not pcm:
|
|
121
|
+
return None
|
|
122
|
+
return InputAudioRawFrame(audio=pcm, num_channels=1, sample_rate=self._sample_rate)
|
|
123
|
+
elif msg.get("event") == "dtmf":
|
|
124
|
+
# optional: map to InputDTMFFrame if you want
|
|
125
|
+
return None
|
|
126
|
+
elif msg.get("event") == "hangup":
|
|
127
|
+
# Bridge is hanging up; you can treat as EndFrame if you want.
|
|
128
|
+
return CancelFrame()
|
|
129
|
+
return None
|
pipecat/services/vistaar/llm.py
CHANGED
|
@@ -13,6 +13,8 @@ from loguru import logger
|
|
|
13
13
|
from pydantic import BaseModel, Field
|
|
14
14
|
|
|
15
15
|
from pipecat.frames.frames import (
|
|
16
|
+
CancelFrame,
|
|
17
|
+
EndFrame,
|
|
16
18
|
Frame,
|
|
17
19
|
LLMFullResponseEndFrame,
|
|
18
20
|
LLMFullResponseStartFrame,
|
|
@@ -68,6 +70,8 @@ class VistaarLLMService(LLMService):
|
|
|
68
70
|
base_url: str = "https://vistaar.kenpath.ai/api",
|
|
69
71
|
params: Optional[InputParams] = None,
|
|
70
72
|
timeout: float = 30.0,
|
|
73
|
+
interim_timeout: float = 5.0,
|
|
74
|
+
interim_message: str = "एक क्षण थांबा, मी बघतो. ",
|
|
71
75
|
**kwargs,
|
|
72
76
|
):
|
|
73
77
|
"""Initialize Vistaar LLM service.
|
|
@@ -76,6 +80,8 @@ class VistaarLLMService(LLMService):
|
|
|
76
80
|
base_url: The base URL for Vistaar API. Defaults to "https://vistaar.kenpath.ai/api".
|
|
77
81
|
params: Input parameters for model configuration and behavior.
|
|
78
82
|
timeout: Request timeout in seconds. Defaults to 30.0 seconds.
|
|
83
|
+
interim_timeout: Time in seconds before sending interim message. Defaults to 3.0 seconds.
|
|
84
|
+
interim_message: Message to send if API takes longer than interim_timeout. Defaults to "एक क्षण थांबा, मी बघतो. ".
|
|
79
85
|
**kwargs: Additional arguments passed to the parent LLMService.
|
|
80
86
|
"""
|
|
81
87
|
super().__init__(**kwargs)
|
|
@@ -88,14 +94,18 @@ class VistaarLLMService(LLMService):
|
|
|
88
94
|
self._session_id = params.session_id or str(uuid.uuid4())
|
|
89
95
|
self._extra = params.extra if isinstance(params.extra, dict) else {}
|
|
90
96
|
self._timeout = timeout
|
|
97
|
+
self._interim_timeout = interim_timeout
|
|
98
|
+
self._interim_message = interim_message
|
|
91
99
|
|
|
92
100
|
# Create an async HTTP client
|
|
93
|
-
self._client = httpx.AsyncClient(timeout=httpx.Timeout(self._timeout))
|
|
101
|
+
self._client = httpx.AsyncClient(timeout=httpx.Timeout(self._timeout), verify=False)
|
|
94
102
|
|
|
95
103
|
# Interruption handling state
|
|
96
104
|
self._current_response = None # Track current HTTP response stream
|
|
97
105
|
self._is_interrupted = False # Track if current generation was interrupted
|
|
98
106
|
self._partial_response = [] # Track what was actually sent before interruption
|
|
107
|
+
self._interim_sent = False # Track if interim message was sent
|
|
108
|
+
self._interim_task = None # Track interim message task
|
|
99
109
|
|
|
100
110
|
logger.info(
|
|
101
111
|
f"Vistaar LLM initialized - Base URL: {self._base_url}, Session ID: {self._session_id}, Source Lang: {self._source_lang}, Target Lang: {self._target_lang}, Timeout: {self._timeout}s"
|
|
@@ -154,6 +164,11 @@ class VistaarLLMService(LLMService):
|
|
|
154
164
|
# Set interruption flag
|
|
155
165
|
self._is_interrupted = True
|
|
156
166
|
|
|
167
|
+
# Cancel interim message task if active
|
|
168
|
+
await self._cancel_interim_message_task(
|
|
169
|
+
"Cancelled interim message task - handling interruption"
|
|
170
|
+
)
|
|
171
|
+
|
|
157
172
|
# Cancel ongoing HTTP response stream if active
|
|
158
173
|
if self._current_response:
|
|
159
174
|
try:
|
|
@@ -174,6 +189,19 @@ class VistaarLLMService(LLMService):
|
|
|
174
189
|
# Clear current partial response
|
|
175
190
|
self._partial_response = []
|
|
176
191
|
|
|
192
|
+
async def _send_interim_message(self):
|
|
193
|
+
"""Send interim message after timeout."""
|
|
194
|
+
try:
|
|
195
|
+
await asyncio.sleep(self._interim_timeout)
|
|
196
|
+
if not self._is_interrupted and not self._interim_sent:
|
|
197
|
+
logger.info(f"Sending interim message after {self._interim_timeout}s timeout")
|
|
198
|
+
self._interim_sent = True
|
|
199
|
+
await self.push_frame(LLMTextFrame(text=self._interim_message))
|
|
200
|
+
except asyncio.CancelledError:
|
|
201
|
+
logger.debug("Interim message task cancelled")
|
|
202
|
+
except Exception as e:
|
|
203
|
+
logger.error(f"Error sending interim message: {e}")
|
|
204
|
+
|
|
177
205
|
async def _stream_response(self, query: str) -> AsyncGenerator[str, None]:
|
|
178
206
|
"""Stream response from Vistaar API using Server-Sent Events.
|
|
179
207
|
|
|
@@ -205,6 +233,7 @@ class VistaarLLMService(LLMService):
|
|
|
205
233
|
# Reset interruption state and partial response for new request
|
|
206
234
|
self._is_interrupted = False
|
|
207
235
|
self._partial_response = []
|
|
236
|
+
self._interim_sent = False
|
|
208
237
|
|
|
209
238
|
try:
|
|
210
239
|
# Use httpx to handle SSE streaming
|
|
@@ -268,6 +297,11 @@ class VistaarLLMService(LLMService):
|
|
|
268
297
|
await self.start_processing_metrics()
|
|
269
298
|
await self.start_ttfb_metrics()
|
|
270
299
|
|
|
300
|
+
# Start interim message task
|
|
301
|
+
self._interim_task = self.create_task(
|
|
302
|
+
self._send_interim_message(), "Vistaar LLM - _send_interim_message"
|
|
303
|
+
)
|
|
304
|
+
|
|
271
305
|
first_chunk = True
|
|
272
306
|
full_response = []
|
|
273
307
|
|
|
@@ -276,6 +310,10 @@ class VistaarLLMService(LLMService):
|
|
|
276
310
|
if first_chunk:
|
|
277
311
|
await self.stop_ttfb_metrics()
|
|
278
312
|
first_chunk = False
|
|
313
|
+
# Cancel interim message task since we got first response
|
|
314
|
+
await self._cancel_interim_message_task(
|
|
315
|
+
"Cancelled interim message task - got first response"
|
|
316
|
+
)
|
|
279
317
|
|
|
280
318
|
# Push each text chunk as it arrives
|
|
281
319
|
await self.push_frame(LLMTextFrame(text=text_chunk))
|
|
@@ -293,6 +331,10 @@ class VistaarLLMService(LLMService):
|
|
|
293
331
|
logger.error(f"Vistaar traceback: {traceback.format_exc()}")
|
|
294
332
|
raise
|
|
295
333
|
finally:
|
|
334
|
+
# Clean up interim message task
|
|
335
|
+
await self._cancel_interim_message_task(
|
|
336
|
+
"Cancelled interim message task in finally block"
|
|
337
|
+
)
|
|
296
338
|
await self.stop_processing_metrics()
|
|
297
339
|
await self.push_frame(LLMFullResponseEndFrame())
|
|
298
340
|
|
|
@@ -308,7 +350,13 @@ class VistaarLLMService(LLMService):
|
|
|
308
350
|
"""
|
|
309
351
|
await super().process_frame(frame, direction)
|
|
310
352
|
context = None
|
|
311
|
-
if isinstance(frame,
|
|
353
|
+
if isinstance(frame, (EndFrame, CancelFrame)):
|
|
354
|
+
await self._cancel_interim_message_task(
|
|
355
|
+
f"Cancelled interim message task - received {type(frame).__name__}"
|
|
356
|
+
)
|
|
357
|
+
await self.push_frame(frame, direction)
|
|
358
|
+
return
|
|
359
|
+
elif isinstance(frame, StartInterruptionFrame):
|
|
312
360
|
await self._handle_interruption()
|
|
313
361
|
await self.push_frame(frame, direction)
|
|
314
362
|
return
|
|
@@ -375,3 +423,9 @@ class VistaarLLMService(LLMService):
|
|
|
375
423
|
asyncio.create_task(self._client.aclose())
|
|
376
424
|
except:
|
|
377
425
|
pass
|
|
426
|
+
|
|
427
|
+
async def _cancel_interim_message_task(self, message: str = "Cancelled interim message task"):
|
|
428
|
+
if self._interim_task and not self._interim_task.done():
|
|
429
|
+
await self.cancel_task(self._interim_task)
|
|
430
|
+
self._interim_task = None
|
|
431
|
+
logger.debug(message)
|
|
File without changes
|
{dv_pipecat_ai-0.0.82.dev807.dist-info → dv_pipecat_ai-0.0.82.dev815.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
{dv_pipecat_ai-0.0.82.dev807.dist-info → dv_pipecat_ai-0.0.82.dev815.dist-info}/top_level.txt
RENAMED
|
File without changes
|