dv-pipecat-ai 0.0.82.dev807__py3-none-any.whl → 0.0.82.dev816__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dv-pipecat-ai
3
- Version: 0.0.82.dev807
3
+ Version: 0.0.82.dev816
4
4
  Summary: An open source framework for voice (and multimodal) assistants
5
5
  License-Expression: BSD-2-Clause
6
6
  Project-URL: Source, https://github.com/pipecat-ai/pipecat
@@ -1,4 +1,4 @@
1
- dv_pipecat_ai-0.0.82.dev807.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
1
+ dv_pipecat_ai-0.0.82.dev816.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
2
2
  pipecat/__init__.py,sha256=j0Xm6adxHhd7D06dIyyPV_GlBYLlBnTAERVvD_jAARQ,861
3
3
  pipecat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  pipecat/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -123,6 +123,7 @@ pipecat/runner/run.py,sha256=BuVI9-cpnQHOBxymkPoqpGaSaZImWZKLeu1g0JsvS8E,18818
123
123
  pipecat/runner/types.py,sha256=iG9A1ox1ePXiEo2bWANsi6RxpGOb5n_Am5O3enbojBM,1599
124
124
  pipecat/runner/utils.py,sha256=cT4G46skiIuZexm-KJ9ltrtufcGxPCAk7HW95rCy3tA,17724
125
125
  pipecat/serializers/__init__.py,sha256=OV61GQX5ZVU7l7Dt7UTBdv2wUF7ZvtbCoXryo7nnoGY,734
126
+ pipecat/serializers/asterisk.py,sha256=7CgIRRawVXgolyACGvTm7TlcmBrmb5KxSocY0zeidM0,5270
126
127
  pipecat/serializers/base_serializer.py,sha256=OyBUZccs2ZT9mfkBbq2tGsUJMvci6o-j90Cl1sicPaI,2030
127
128
  pipecat/serializers/convox.py,sha256=MXCLhV6GMnoP8bI6-EVrObhrftEyTGOmzVeIU5ywmPo,9536
128
129
  pipecat/serializers/exotel.py,sha256=LB4wYoXDjPmtkydrZ0G4H4u-SXpQw9KjyRzBZCYloEE,5907
@@ -140,7 +141,7 @@ pipecat/services/llm_service.py,sha256=VUzdf9mP8P9hdKwi2P544vwZOJHzB8ExfTAaKYKcm
140
141
  pipecat/services/mcp_service.py,sha256=OYftGfdfGlDmjsWbF2b3CuMhPw8B1jcgaZUUYZPIA_o,14298
141
142
  pipecat/services/openai.py,sha256=fg5-MIvwqgKTN6i5Kp7GD6XUvMRo3nlughuNt9QqLGA,27546
142
143
  pipecat/services/stt_service.py,sha256=tShjVEl374j1Sc3qsdhTuWaT-8NJsAn-3yFw0XLRm4A,11163
143
- pipecat/services/tts_service.py,sha256=WQZfK8o50XH11VDnaqKsOnk770K97pDHaezoASoX1r0,34380
144
+ pipecat/services/tts_service.py,sha256=9Qvo_k9bXlI7oq_OlMPLDk5uAlFuLYuwSlpzXgjj174,34426
144
145
  pipecat/services/vision_service.py,sha256=dtI3U5RX30R6i97d6Rh7bVMqeh5ogWuwnM9j6djeXQ8,2519
145
146
  pipecat/services/websocket_service.py,sha256=AWv7CL6G_XAh815xVaKNPpjP5escp8Q880SYHG7kCoI,5745
146
147
  pipecat/services/anthropic/__init__.py,sha256=NfRQFoNZcUHsJA4mggeLalEmgM08TZdBjkRRjmyp6jE,261
@@ -280,7 +281,7 @@ pipecat/services/together/llm.py,sha256=VSayO-U6g9Ld0xK9CXRQPUsd5gWJKtiA8qDAyXgs
280
281
  pipecat/services/ultravox/__init__.py,sha256=EoHCSXI2o0DFQslELgkhAGZtxDj63gZi-9ZEhXljaKE,259
281
282
  pipecat/services/ultravox/stt.py,sha256=uCQm_-LbycXdXRV6IE1a6Mymis6tyww7V8PnPzAQtx8,16586
282
283
  pipecat/services/vistaar/__init__.py,sha256=UFfSWFN5rbzl6NN-E_OH_MFaSYodZWNlenAU0wk-rAI,110
283
- pipecat/services/vistaar/llm.py,sha256=O-sznJDPivnhY_XUsr5xYcwkCqXpMv_zOuZ1rJBfn9Y,14631
284
+ pipecat/services/vistaar/llm.py,sha256=5SGrt36yF1yLXhJEstXsg3P1qL1yADHO0kS8kzC8jCE,17301
284
285
  pipecat/services/whisper/__init__.py,sha256=smADmw0Fv98k7cGRuHTEcljKTO2WdZqLpJd0qsTCwH8,281
285
286
  pipecat/services/whisper/base_stt.py,sha256=VhslESPnYIeVbmnQTzmlZPV35TH49duxYTvJe0epNnE,7850
286
287
  pipecat/services/whisper/stt.py,sha256=9Qd56vWMzg3LtHikQnfgyMtl4odE6BCHDbpAn3HSWjw,17480
@@ -336,7 +337,7 @@ pipecat/utils/tracing/service_decorators.py,sha256=HwDCqLGijhYD3F8nxDuQmEw-YkRw0
336
337
  pipecat/utils/tracing/setup.py,sha256=7TEgPNpq6M8lww8OQvf0P9FzYc5A30xICGklVA-fua0,2892
337
338
  pipecat/utils/tracing/turn_context_provider.py,sha256=ikon3plFOx0XbMrH6DdeHttNpb-U0gzMZIm3bWLc9eI,2485
338
339
  pipecat/utils/tracing/turn_trace_observer.py,sha256=dma16SBJpYSOE58YDWy89QzHyQFc_9gQZszKeWixuwc,9725
339
- dv_pipecat_ai-0.0.82.dev807.dist-info/METADATA,sha256=KiRlQndV2W1crKYJlr_ksFAeOJOee9sac40jX_hbyHg,32457
340
- dv_pipecat_ai-0.0.82.dev807.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
341
- dv_pipecat_ai-0.0.82.dev807.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
342
- dv_pipecat_ai-0.0.82.dev807.dist-info/RECORD,,
340
+ dv_pipecat_ai-0.0.82.dev816.dist-info/METADATA,sha256=mbNsHiiroNzd8-CN_3561ma11w1IgV51pDL4cQd-n24,32457
341
+ dv_pipecat_ai-0.0.82.dev816.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
342
+ dv_pipecat_ai-0.0.82.dev816.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
343
+ dv_pipecat_ai-0.0.82.dev816.dist-info/RECORD,,
@@ -0,0 +1,130 @@
1
+ # asterisk_ws_serializer.py
2
+ import base64
3
+ import json
4
+ from typing import Literal, Optional
5
+
6
+ from pydantic import BaseModel
7
+
8
+ from pipecat.audio.utils import create_stream_resampler, pcm_to_ulaw, ulaw_to_pcm
9
+ from pipecat.frames.frames import (
10
+ AudioRawFrame,
11
+ CancelFrame,
12
+ EndFrame,
13
+ Frame,
14
+ InputAudioRawFrame,
15
+ StartFrame,
16
+ StartInterruptionFrame,
17
+ TransportMessageFrame,
18
+ TransportMessageUrgentFrame,
19
+ )
20
+ from pipecat.serializers.base_serializer import FrameSerializer, FrameSerializerType
21
+
22
+
23
+ class AsteriskFrameSerializer(FrameSerializer):
24
+ class InputParams(BaseModel):
25
+ """Configuration parameters for AsteriskFrameSerializer.
26
+
27
+ Parameters:
28
+ telephony_encoding: The encoding used by the telephony system (e.g., "pcmu" for μ-law).
29
+ telephony_sample_rate: The sample rate used by the telephony system (e.g., 8000 Hz).
30
+ sample_rate: Optional override for pipeline input sample rate.
31
+ auto_hang_up: Whether to automatically terminate call on EndFrame.
32
+ """
33
+
34
+ # What the ADAPTER/Asterisk is sending/expecting on the wire:
35
+ # "pcmu" -> μ-law @ 8k; "pcm16" -> signed 16-bit @ 8k
36
+ telephony_encoding: Literal["pcmu", "pcm16"] = "pcmu"
37
+ telephony_sample_rate: int = 8000
38
+ sample_rate: Optional[int] = None # pipeline input rate
39
+ auto_hang_up: bool = False # no-op here; adapter handles hangup
40
+
41
+ def __init__(self, stream_id: str, params: Optional[InputParams] = None):
42
+ self._stream_id = stream_id
43
+ self._params = params or AsteriskFrameSerializer.InputParams()
44
+ self._tel_rate = self._params.telephony_sample_rate
45
+ self._sample_rate = 0
46
+ self._in_resampler = create_stream_resampler()
47
+ self._out_resampler = create_stream_resampler()
48
+ self._hangup_sent = False
49
+
50
+ @property
51
+ def type(self) -> FrameSerializerType:
52
+ return FrameSerializerType.TEXT # we send/recv JSON strings
53
+
54
+ async def setup(self, frame: StartFrame):
55
+ self._sample_rate = self._params.sample_rate or frame.audio_in_sample_rate
56
+
57
+ # Pipecat -> Adapter (play to caller)
58
+ async def serialize(self, frame: Frame) -> str | bytes | None:
59
+ # On pipeline end, ask bridge to hang up
60
+ if (
61
+ self._params.auto_hang_up
62
+ and not self._hangup_sent
63
+ and isinstance(frame, (EndFrame, CancelFrame))
64
+ ):
65
+ self._hangup_sent = True
66
+ return json.dumps({"event": "hangup"})
67
+ if isinstance(frame, StartInterruptionFrame):
68
+ return json.dumps({"event": "clear", "streamId": self._stream_id})
69
+ if isinstance(frame, AudioRawFrame):
70
+ pcm = frame.audio
71
+ if self._params.telephony_encoding == "pcmu":
72
+ ul = await pcm_to_ulaw(pcm, frame.sample_rate, self._tel_rate, self._out_resampler)
73
+ if not ul:
74
+ return None
75
+ payload = base64.b64encode(ul).decode("utf-8")
76
+ return json.dumps(
77
+ {
78
+ "event": "media",
79
+ "encoding": "pcmu",
80
+ "sampleRate": self._tel_rate,
81
+ "payload": payload,
82
+ }
83
+ )
84
+ else: # "pcm16"
85
+ # resample to 8k if needed, but data stays PCM16 bytes
86
+ pcm8 = await self._out_resampler.resample(pcm, frame.sample_rate, self._tel_rate)
87
+ if not pcm8:
88
+ return None
89
+ payload = base64.b64encode(pcm8).decode("utf-8")
90
+ return json.dumps(
91
+ {
92
+ "event": "media",
93
+ "encoding": "pcm16",
94
+ "sampleRate": self._tel_rate,
95
+ "payload": payload,
96
+ }
97
+ )
98
+ if isinstance(frame, (TransportMessageFrame, TransportMessageUrgentFrame)):
99
+ return json.dumps(frame.message)
100
+ return None
101
+
102
+ # Adapter -> Pipecat (audio from caller)
103
+ async def deserialize(self, data: str | bytes) -> Frame | None:
104
+ try:
105
+ msg = json.loads(data)
106
+ except Exception:
107
+ return None
108
+ if msg.get("event") == "media":
109
+ enc = msg.get("encoding")
110
+ sr = int(msg.get("sampleRate", self._tel_rate))
111
+ raw = base64.b64decode(msg.get("payload", ""))
112
+ if not raw:
113
+ return None
114
+ if enc == "pcmu":
115
+ pcm = await ulaw_to_pcm(raw, sr, self._sample_rate, self._in_resampler)
116
+ elif enc == "pcm16":
117
+ # resample if pipeline rate != 8k
118
+ pcm = await self._in_resampler.resample(raw, sr, self._sample_rate)
119
+ else:
120
+ return None
121
+ if not pcm:
122
+ return None
123
+ return InputAudioRawFrame(audio=pcm, num_channels=1, sample_rate=self._sample_rate)
124
+ elif msg.get("event") == "dtmf":
125
+ # optional: map to InputDTMFFrame if you want
126
+ return None
127
+ elif msg.get("event") == "hangup":
128
+ # Bridge is hanging up; you can treat as EndFrame if you want.
129
+ return CancelFrame()
130
+ return None
@@ -122,6 +122,9 @@ class TTSService(AIService):
122
122
  self._voice = None
123
123
  self._voice_clone_params = None
124
124
 
125
+ self._tracing_enabled: bool = False
126
+
127
+
125
128
  if text_filter:
126
129
  import warnings
127
130
 
@@ -30,13 +30,13 @@ from pipecat.processors.aggregators.openai_llm_context import (
30
30
  OpenAILLMContext,
31
31
  OpenAILLMContextFrame,
32
32
  )
33
+ from pipecat.processors.frame_processor import FrameDirection
34
+ from pipecat.services.llm_service import LLMService
33
35
  from pipecat.services.openai.llm import (
34
36
  OpenAIAssistantContextAggregator,
35
37
  OpenAIContextAggregatorPair,
36
38
  OpenAIUserContextAggregator,
37
39
  )
38
- from pipecat.processors.frame_processor import FrameDirection
39
- from pipecat.services.llm_service import LLMService
40
40
 
41
41
 
42
42
  class VistaarLLMService(LLMService):
@@ -68,6 +68,8 @@ class VistaarLLMService(LLMService):
68
68
  base_url: str = "https://vistaar.kenpath.ai/api",
69
69
  params: Optional[InputParams] = None,
70
70
  timeout: float = 30.0,
71
+ interim_timeout: float = 5.0,
72
+ interim_message: str = "एक क्षण थांबा, मी बघतो. ",
71
73
  **kwargs,
72
74
  ):
73
75
  """Initialize Vistaar LLM service.
@@ -76,6 +78,8 @@ class VistaarLLMService(LLMService):
76
78
  base_url: The base URL for Vistaar API. Defaults to "https://vistaar.kenpath.ai/api".
77
79
  params: Input parameters for model configuration and behavior.
78
80
  timeout: Request timeout in seconds. Defaults to 30.0 seconds.
81
+ interim_timeout: Time in seconds before sending interim message. Defaults to 3.0 seconds.
82
+ interim_message: Message to send if API takes longer than interim_timeout. Defaults to "एक क्षण थांबा, मी बघतो. ".
79
83
  **kwargs: Additional arguments passed to the parent LLMService.
80
84
  """
81
85
  super().__init__(**kwargs)
@@ -88,14 +92,18 @@ class VistaarLLMService(LLMService):
88
92
  self._session_id = params.session_id or str(uuid.uuid4())
89
93
  self._extra = params.extra if isinstance(params.extra, dict) else {}
90
94
  self._timeout = timeout
95
+ self._interim_timeout = interim_timeout
96
+ self._interim_message = interim_message
91
97
 
92
98
  # Create an async HTTP client
93
- self._client = httpx.AsyncClient(timeout=httpx.Timeout(self._timeout))
99
+ self._client = httpx.AsyncClient(timeout=httpx.Timeout(self._timeout), verify=False)
94
100
 
95
101
  # Interruption handling state
96
102
  self._current_response = None # Track current HTTP response stream
97
103
  self._is_interrupted = False # Track if current generation was interrupted
98
104
  self._partial_response = [] # Track what was actually sent before interruption
105
+ self._interim_sent = False # Track if interim message was sent
106
+ self._interim_task = None # Track interim message task
99
107
 
100
108
  logger.info(
101
109
  f"Vistaar LLM initialized - Base URL: {self._base_url}, Session ID: {self._session_id}, Source Lang: {self._source_lang}, Target Lang: {self._target_lang}, Timeout: {self._timeout}s"
@@ -154,6 +162,11 @@ class VistaarLLMService(LLMService):
154
162
  # Set interruption flag
155
163
  self._is_interrupted = True
156
164
 
165
+ # Cancel interim message task if active
166
+ await self._cancel_interim_message_task(
167
+ "Cancelled interim message task - handling interruption"
168
+ )
169
+
157
170
  # Cancel ongoing HTTP response stream if active
158
171
  if self._current_response:
159
172
  try:
@@ -174,6 +187,19 @@ class VistaarLLMService(LLMService):
174
187
  # Clear current partial response
175
188
  self._partial_response = []
176
189
 
190
+ async def _send_interim_message(self):
191
+ """Send interim message after timeout."""
192
+ try:
193
+ await asyncio.sleep(self._interim_timeout)
194
+ if not self._is_interrupted and not self._interim_sent:
195
+ logger.info(f"Sending interim message after {self._interim_timeout}s timeout")
196
+ self._interim_sent = True
197
+ await self.push_frame(LLMTextFrame(text=self._interim_message))
198
+ except asyncio.CancelledError:
199
+ logger.debug("Interim message task cancelled")
200
+ except Exception as e:
201
+ logger.error(f"Error sending interim message: {e}")
202
+
177
203
  async def _stream_response(self, query: str) -> AsyncGenerator[str, None]:
178
204
  """Stream response from Vistaar API using Server-Sent Events.
179
205
 
@@ -205,6 +231,7 @@ class VistaarLLMService(LLMService):
205
231
  # Reset interruption state and partial response for new request
206
232
  self._is_interrupted = False
207
233
  self._partial_response = []
234
+ self._interim_sent = False
208
235
 
209
236
  try:
210
237
  # Use httpx to handle SSE streaming
@@ -268,6 +295,11 @@ class VistaarLLMService(LLMService):
268
295
  await self.start_processing_metrics()
269
296
  await self.start_ttfb_metrics()
270
297
 
298
+ # Start interim message task
299
+ self._interim_task = self.create_task(
300
+ self._send_interim_message(), "Vistaar LLM - _send_interim_message"
301
+ )
302
+
271
303
  first_chunk = True
272
304
  full_response = []
273
305
 
@@ -276,6 +308,10 @@ class VistaarLLMService(LLMService):
276
308
  if first_chunk:
277
309
  await self.stop_ttfb_metrics()
278
310
  first_chunk = False
311
+ # Cancel interim message task since we got first response
312
+ await self._cancel_interim_message_task(
313
+ "Cancelled interim message task - got first response"
314
+ )
279
315
 
280
316
  # Push each text chunk as it arrives
281
317
  await self.push_frame(LLMTextFrame(text=text_chunk))
@@ -293,6 +329,10 @@ class VistaarLLMService(LLMService):
293
329
  logger.error(f"Vistaar traceback: {traceback.format_exc()}")
294
330
  raise
295
331
  finally:
332
+ # Clean up interim message task
333
+ await self._cancel_interim_message_task(
334
+ "Cancelled interim message task in finally block"
335
+ )
296
336
  await self.stop_processing_metrics()
297
337
  await self.push_frame(LLMFullResponseEndFrame())
298
338
 
@@ -308,7 +348,13 @@ class VistaarLLMService(LLMService):
308
348
  """
309
349
  await super().process_frame(frame, direction)
310
350
  context = None
311
- if isinstance(frame, StartInterruptionFrame):
351
+ if isinstance(frame, (EndFrame, CancelFrame)):
352
+ await self._cancel_interim_message_task(
353
+ f"Cancelled interim message task - received {type(frame).__name__}"
354
+ )
355
+ await self.push_frame(frame, direction)
356
+ return
357
+ elif isinstance(frame, StartInterruptionFrame):
312
358
  await self._handle_interruption()
313
359
  await self.push_frame(frame, direction)
314
360
  return
@@ -375,3 +421,9 @@ class VistaarLLMService(LLMService):
375
421
  asyncio.create_task(self._client.aclose())
376
422
  except:
377
423
  pass
424
+
425
+ async def _cancel_interim_message_task(self, message: str = "Cancelled interim message task"):
426
+ if self._interim_task and not self._interim_task.done():
427
+ await self.cancel_task(self._interim_task)
428
+ self._interim_task = None
429
+ logger.debug(message)