dv-pipecat-ai 0.0.82.dev815__py3-none-any.whl → 0.0.82.dev857__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/METADATA +8 -3
- {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/RECORD +106 -79
- pipecat/adapters/base_llm_adapter.py +44 -6
- pipecat/adapters/services/anthropic_adapter.py +302 -2
- pipecat/adapters/services/aws_nova_sonic_adapter.py +40 -2
- pipecat/adapters/services/bedrock_adapter.py +40 -2
- pipecat/adapters/services/gemini_adapter.py +276 -6
- pipecat/adapters/services/open_ai_adapter.py +88 -7
- pipecat/adapters/services/open_ai_realtime_adapter.py +39 -1
- pipecat/audio/dtmf/__init__.py +0 -0
- pipecat/audio/dtmf/types.py +47 -0
- pipecat/audio/dtmf/utils.py +70 -0
- pipecat/audio/filters/aic_filter.py +199 -0
- pipecat/audio/utils.py +9 -7
- pipecat/extensions/ivr/__init__.py +0 -0
- pipecat/extensions/ivr/ivr_navigator.py +452 -0
- pipecat/frames/frames.py +156 -43
- pipecat/pipeline/llm_switcher.py +76 -0
- pipecat/pipeline/parallel_pipeline.py +3 -3
- pipecat/pipeline/service_switcher.py +144 -0
- pipecat/pipeline/task.py +68 -28
- pipecat/pipeline/task_observer.py +10 -0
- pipecat/processors/aggregators/dtmf_aggregator.py +2 -2
- pipecat/processors/aggregators/llm_context.py +277 -0
- pipecat/processors/aggregators/llm_response.py +48 -15
- pipecat/processors/aggregators/llm_response_universal.py +840 -0
- pipecat/processors/aggregators/openai_llm_context.py +3 -3
- pipecat/processors/dtmf_aggregator.py +0 -2
- pipecat/processors/filters/stt_mute_filter.py +0 -2
- pipecat/processors/frame_processor.py +18 -11
- pipecat/processors/frameworks/rtvi.py +17 -10
- pipecat/processors/metrics/sentry.py +2 -0
- pipecat/runner/daily.py +137 -36
- pipecat/runner/run.py +1 -1
- pipecat/runner/utils.py +7 -7
- pipecat/serializers/asterisk.py +20 -4
- pipecat/serializers/exotel.py +1 -1
- pipecat/serializers/plivo.py +1 -1
- pipecat/serializers/telnyx.py +1 -1
- pipecat/serializers/twilio.py +1 -1
- pipecat/services/__init__.py +2 -2
- pipecat/services/anthropic/llm.py +113 -28
- pipecat/services/asyncai/tts.py +4 -0
- pipecat/services/aws/llm.py +82 -8
- pipecat/services/aws/tts.py +0 -10
- pipecat/services/aws_nova_sonic/aws.py +5 -0
- pipecat/services/cartesia/tts.py +28 -16
- pipecat/services/cerebras/llm.py +15 -10
- pipecat/services/deepgram/stt.py +8 -0
- pipecat/services/deepseek/llm.py +13 -8
- pipecat/services/fireworks/llm.py +13 -8
- pipecat/services/fish/tts.py +8 -6
- pipecat/services/gemini_multimodal_live/gemini.py +5 -0
- pipecat/services/gladia/config.py +7 -1
- pipecat/services/gladia/stt.py +23 -15
- pipecat/services/google/llm.py +159 -59
- pipecat/services/google/llm_openai.py +18 -3
- pipecat/services/grok/llm.py +2 -1
- pipecat/services/llm_service.py +38 -3
- pipecat/services/mem0/memory.py +2 -1
- pipecat/services/mistral/llm.py +5 -6
- pipecat/services/nim/llm.py +2 -1
- pipecat/services/openai/base_llm.py +88 -26
- pipecat/services/openai/image.py +6 -1
- pipecat/services/openai_realtime_beta/openai.py +5 -2
- pipecat/services/openpipe/llm.py +6 -8
- pipecat/services/perplexity/llm.py +13 -8
- pipecat/services/playht/tts.py +9 -6
- pipecat/services/rime/tts.py +1 -1
- pipecat/services/sambanova/llm.py +18 -13
- pipecat/services/sarvam/tts.py +415 -10
- pipecat/services/speechmatics/stt.py +2 -2
- pipecat/services/tavus/video.py +1 -1
- pipecat/services/tts_service.py +15 -5
- pipecat/services/vistaar/llm.py +2 -5
- pipecat/transports/base_input.py +32 -19
- pipecat/transports/base_output.py +39 -5
- pipecat/transports/daily/__init__.py +0 -0
- pipecat/transports/daily/transport.py +2371 -0
- pipecat/transports/daily/utils.py +410 -0
- pipecat/transports/livekit/__init__.py +0 -0
- pipecat/transports/livekit/transport.py +1042 -0
- pipecat/transports/network/fastapi_websocket.py +12 -546
- pipecat/transports/network/small_webrtc.py +12 -922
- pipecat/transports/network/webrtc_connection.py +9 -595
- pipecat/transports/network/websocket_client.py +12 -481
- pipecat/transports/network/websocket_server.py +12 -487
- pipecat/transports/services/daily.py +9 -2334
- pipecat/transports/services/helpers/daily_rest.py +12 -396
- pipecat/transports/services/livekit.py +12 -975
- pipecat/transports/services/tavus.py +12 -757
- pipecat/transports/smallwebrtc/__init__.py +0 -0
- pipecat/transports/smallwebrtc/connection.py +612 -0
- pipecat/transports/smallwebrtc/transport.py +936 -0
- pipecat/transports/tavus/__init__.py +0 -0
- pipecat/transports/tavus/transport.py +770 -0
- pipecat/transports/websocket/__init__.py +0 -0
- pipecat/transports/websocket/client.py +494 -0
- pipecat/transports/websocket/fastapi.py +559 -0
- pipecat/transports/websocket/server.py +500 -0
- pipecat/transports/whatsapp/__init__.py +0 -0
- pipecat/transports/whatsapp/api.py +345 -0
- pipecat/transports/whatsapp/client.py +364 -0
- {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/top_level.txt +0 -0
|
@@ -11,925 +11,15 @@ real-time audio and video communication. It supports bidirectional media
|
|
|
11
11
|
streaming, application messaging, and client connection management.
|
|
12
12
|
"""
|
|
13
13
|
|
|
14
|
-
import
|
|
15
|
-
|
|
16
|
-
import
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
EndFrame,
|
|
27
|
-
Frame,
|
|
28
|
-
InputAudioRawFrame,
|
|
29
|
-
OutputAudioRawFrame,
|
|
30
|
-
OutputImageRawFrame,
|
|
31
|
-
SpriteFrame,
|
|
32
|
-
StartFrame,
|
|
33
|
-
TransportMessageFrame,
|
|
34
|
-
TransportMessageUrgentFrame,
|
|
35
|
-
UserImageRawFrame,
|
|
36
|
-
UserImageRequestFrame,
|
|
37
|
-
)
|
|
38
|
-
from pipecat.processors.frame_processor import FrameDirection
|
|
39
|
-
from pipecat.transports.base_input import BaseInputTransport
|
|
40
|
-
from pipecat.transports.base_output import BaseOutputTransport
|
|
41
|
-
from pipecat.transports.base_transport import BaseTransport, TransportParams
|
|
42
|
-
from pipecat.transports.network.webrtc_connection import SmallWebRTCConnection
|
|
43
|
-
|
|
44
|
-
try:
|
|
45
|
-
import cv2
|
|
46
|
-
from aiortc import VideoStreamTrack
|
|
47
|
-
from aiortc.mediastreams import AudioStreamTrack, MediaStreamError
|
|
48
|
-
from av import AudioFrame, AudioResampler, VideoFrame
|
|
49
|
-
except ModuleNotFoundError as e:
|
|
50
|
-
logger.error(f"Exception: {e}")
|
|
51
|
-
logger.error("In order to use the SmallWebRTC, you need to `pip install pipecat-ai[webrtc]`.")
|
|
52
|
-
raise Exception(f"Missing module: {e}")
|
|
53
|
-
|
|
54
|
-
CAM_VIDEO_SOURCE = "camera"
|
|
55
|
-
SCREEN_VIDEO_SOURCE = "screenVideo"
|
|
56
|
-
MIC_AUDIO_SOURCE = "microphone"
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
class SmallWebRTCCallbacks(BaseModel):
|
|
60
|
-
"""Callback handlers for SmallWebRTC events.
|
|
61
|
-
|
|
62
|
-
Parameters:
|
|
63
|
-
on_app_message: Called when an application message is received.
|
|
64
|
-
on_client_connected: Called when a client establishes connection.
|
|
65
|
-
on_client_disconnected: Called when a client disconnects.
|
|
66
|
-
"""
|
|
67
|
-
|
|
68
|
-
on_app_message: Callable[[Any], Awaitable[None]]
|
|
69
|
-
on_client_connected: Callable[[SmallWebRTCConnection], Awaitable[None]]
|
|
70
|
-
on_client_disconnected: Callable[[SmallWebRTCConnection], Awaitable[None]]
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
class RawAudioTrack(AudioStreamTrack):
|
|
74
|
-
"""Custom audio stream track for WebRTC output.
|
|
75
|
-
|
|
76
|
-
Handles audio frame generation and timing for WebRTC transmission,
|
|
77
|
-
supporting queued audio data with proper synchronization.
|
|
78
|
-
"""
|
|
79
|
-
|
|
80
|
-
def __init__(self, sample_rate):
|
|
81
|
-
"""Initialize the raw audio track.
|
|
82
|
-
|
|
83
|
-
Args:
|
|
84
|
-
sample_rate: The audio sample rate in Hz.
|
|
85
|
-
"""
|
|
86
|
-
super().__init__()
|
|
87
|
-
self._sample_rate = sample_rate
|
|
88
|
-
self._samples_per_10ms = sample_rate * 10 // 1000
|
|
89
|
-
self._bytes_per_10ms = self._samples_per_10ms * 2 # 16-bit (2 bytes per sample)
|
|
90
|
-
self._timestamp = 0
|
|
91
|
-
self._start = time.time()
|
|
92
|
-
# Queue of (bytes, future), broken into 10ms sub chunks as needed
|
|
93
|
-
self._chunk_queue = deque()
|
|
94
|
-
|
|
95
|
-
def add_audio_bytes(self, audio_bytes: bytes):
|
|
96
|
-
"""Add audio bytes to the buffer for transmission.
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
audio_bytes: Raw audio data to queue for transmission.
|
|
100
|
-
|
|
101
|
-
Returns:
|
|
102
|
-
A Future that completes when the data is processed.
|
|
103
|
-
|
|
104
|
-
Raises:
|
|
105
|
-
ValueError: If audio bytes are not a multiple of 10ms size.
|
|
106
|
-
"""
|
|
107
|
-
if len(audio_bytes) % self._bytes_per_10ms != 0:
|
|
108
|
-
raise ValueError("Audio bytes must be a multiple of 10ms size.")
|
|
109
|
-
future = asyncio.get_running_loop().create_future()
|
|
110
|
-
|
|
111
|
-
# Break input into 10ms chunks
|
|
112
|
-
for i in range(0, len(audio_bytes), self._bytes_per_10ms):
|
|
113
|
-
chunk = audio_bytes[i : i + self._bytes_per_10ms]
|
|
114
|
-
# Only the last chunk carries the future to be resolved once fully consumed
|
|
115
|
-
fut = future if i + self._bytes_per_10ms >= len(audio_bytes) else None
|
|
116
|
-
self._chunk_queue.append((chunk, fut))
|
|
117
|
-
|
|
118
|
-
return future
|
|
119
|
-
|
|
120
|
-
async def recv(self):
|
|
121
|
-
"""Return the next audio frame for WebRTC transmission.
|
|
122
|
-
|
|
123
|
-
Returns:
|
|
124
|
-
An AudioFrame containing the next audio data or silence.
|
|
125
|
-
"""
|
|
126
|
-
# Compute required wait time for synchronization
|
|
127
|
-
if self._timestamp > 0:
|
|
128
|
-
wait = self._start + (self._timestamp / self._sample_rate) - time.time()
|
|
129
|
-
if wait > 0:
|
|
130
|
-
await asyncio.sleep(wait)
|
|
131
|
-
|
|
132
|
-
if self._chunk_queue:
|
|
133
|
-
chunk, future = self._chunk_queue.popleft()
|
|
134
|
-
if future and not future.done():
|
|
135
|
-
future.set_result(True)
|
|
136
|
-
else:
|
|
137
|
-
chunk = bytes(self._bytes_per_10ms) # silence
|
|
138
|
-
|
|
139
|
-
# Convert the byte data to an ndarray of int16 samples
|
|
140
|
-
samples = np.frombuffer(chunk, dtype=np.int16)
|
|
141
|
-
|
|
142
|
-
# Create AudioFrame
|
|
143
|
-
frame = AudioFrame.from_ndarray(samples[None, :], layout="mono")
|
|
144
|
-
frame.sample_rate = self._sample_rate
|
|
145
|
-
frame.pts = self._timestamp
|
|
146
|
-
frame.time_base = fractions.Fraction(1, self._sample_rate)
|
|
147
|
-
self._timestamp += self._samples_per_10ms
|
|
148
|
-
return frame
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
class RawVideoTrack(VideoStreamTrack):
|
|
152
|
-
"""Custom video stream track for WebRTC output.
|
|
153
|
-
|
|
154
|
-
Handles video frame queuing and conversion for WebRTC transmission.
|
|
155
|
-
"""
|
|
156
|
-
|
|
157
|
-
def __init__(self, width, height):
|
|
158
|
-
"""Initialize the raw video track.
|
|
159
|
-
|
|
160
|
-
Args:
|
|
161
|
-
width: Video frame width in pixels.
|
|
162
|
-
height: Video frame height in pixels.
|
|
163
|
-
"""
|
|
164
|
-
super().__init__()
|
|
165
|
-
self._width = width
|
|
166
|
-
self._height = height
|
|
167
|
-
self._video_buffer = asyncio.Queue()
|
|
168
|
-
|
|
169
|
-
def add_video_frame(self, frame):
|
|
170
|
-
"""Add a video frame to the transmission buffer.
|
|
171
|
-
|
|
172
|
-
Args:
|
|
173
|
-
frame: The video frame to queue for transmission.
|
|
174
|
-
"""
|
|
175
|
-
self._video_buffer.put_nowait(frame)
|
|
176
|
-
|
|
177
|
-
async def recv(self):
|
|
178
|
-
"""Return the next video frame for WebRTC transmission.
|
|
179
|
-
|
|
180
|
-
Returns:
|
|
181
|
-
A VideoFrame ready for WebRTC transmission.
|
|
182
|
-
"""
|
|
183
|
-
raw_frame = await self._video_buffer.get()
|
|
184
|
-
|
|
185
|
-
# Convert bytes to NumPy array
|
|
186
|
-
frame_data = np.frombuffer(raw_frame.image, dtype=np.uint8).reshape(
|
|
187
|
-
(self._height, self._width, 3)
|
|
188
|
-
)
|
|
189
|
-
|
|
190
|
-
frame = VideoFrame.from_ndarray(frame_data, format="rgb24")
|
|
191
|
-
|
|
192
|
-
# Assign timestamp
|
|
193
|
-
frame.pts, frame.time_base = await self.next_timestamp()
|
|
194
|
-
|
|
195
|
-
return frame
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
class SmallWebRTCClient:
|
|
199
|
-
"""WebRTC client implementation for handling connections and media streams.
|
|
200
|
-
|
|
201
|
-
Manages WebRTC peer connections, audio/video streaming, and application
|
|
202
|
-
messaging through the SmallWebRTCConnection interface.
|
|
203
|
-
"""
|
|
204
|
-
|
|
205
|
-
FORMAT_CONVERSIONS = {
|
|
206
|
-
"yuv420p": cv2.COLOR_YUV2RGB_I420,
|
|
207
|
-
"yuvj420p": cv2.COLOR_YUV2RGB_I420, # OpenCV treats both the same
|
|
208
|
-
"nv12": cv2.COLOR_YUV2RGB_NV12,
|
|
209
|
-
"gray": cv2.COLOR_GRAY2RGB,
|
|
210
|
-
}
|
|
211
|
-
|
|
212
|
-
def __init__(self, webrtc_connection: SmallWebRTCConnection, callbacks: SmallWebRTCCallbacks):
|
|
213
|
-
"""Initialize the WebRTC client.
|
|
214
|
-
|
|
215
|
-
Args:
|
|
216
|
-
webrtc_connection: The underlying WebRTC connection handler.
|
|
217
|
-
callbacks: Event callbacks for connection and message handling.
|
|
218
|
-
"""
|
|
219
|
-
self._webrtc_connection = webrtc_connection
|
|
220
|
-
self._closing = False
|
|
221
|
-
self._callbacks = callbacks
|
|
222
|
-
|
|
223
|
-
self._audio_output_track = None
|
|
224
|
-
self._video_output_track = None
|
|
225
|
-
self._audio_input_track: Optional[AudioStreamTrack] = None
|
|
226
|
-
self._video_input_track: Optional[VideoStreamTrack] = None
|
|
227
|
-
self._screen_video_track: Optional[VideoStreamTrack] = None
|
|
228
|
-
|
|
229
|
-
self._params = None
|
|
230
|
-
self._audio_in_channels = None
|
|
231
|
-
self._in_sample_rate = None
|
|
232
|
-
self._out_sample_rate = None
|
|
233
|
-
self._leave_counter = 0
|
|
234
|
-
|
|
235
|
-
# We are always resampling it for 16000 if the sample_rate that we receive is bigger than that.
|
|
236
|
-
# otherwise we face issues with Silero VAD
|
|
237
|
-
self._pipecat_resampler = AudioResampler("s16", "mono", 16000)
|
|
238
|
-
|
|
239
|
-
@self._webrtc_connection.event_handler("connected")
|
|
240
|
-
async def on_connected(connection: SmallWebRTCConnection):
|
|
241
|
-
logger.debug("Peer connection established.")
|
|
242
|
-
await self._handle_client_connected()
|
|
243
|
-
|
|
244
|
-
@self._webrtc_connection.event_handler("disconnected")
|
|
245
|
-
async def on_disconnected(connection: SmallWebRTCConnection):
|
|
246
|
-
logger.debug("Peer connection lost.")
|
|
247
|
-
await self._handle_peer_disconnected()
|
|
248
|
-
|
|
249
|
-
@self._webrtc_connection.event_handler("closed")
|
|
250
|
-
async def on_closed(connection: SmallWebRTCConnection):
|
|
251
|
-
logger.debug("Client connection closed.")
|
|
252
|
-
await self._handle_client_closed()
|
|
253
|
-
|
|
254
|
-
@self._webrtc_connection.event_handler("app-message")
|
|
255
|
-
async def on_app_message(connection: SmallWebRTCConnection, message: Any):
|
|
256
|
-
await self._handle_app_message(message)
|
|
257
|
-
|
|
258
|
-
def _convert_frame(self, frame_array: np.ndarray, format_name: str) -> np.ndarray:
|
|
259
|
-
"""Convert a video frame to RGB format based on the input format.
|
|
260
|
-
|
|
261
|
-
Args:
|
|
262
|
-
frame_array: The input frame as a NumPy array.
|
|
263
|
-
format_name: The format of the input frame.
|
|
264
|
-
|
|
265
|
-
Returns:
|
|
266
|
-
The converted RGB frame as a NumPy array.
|
|
267
|
-
|
|
268
|
-
Raises:
|
|
269
|
-
ValueError: If the format is unsupported.
|
|
270
|
-
"""
|
|
271
|
-
if format_name.startswith("rgb"): # Already in RGB, no conversion needed
|
|
272
|
-
return frame_array
|
|
273
|
-
|
|
274
|
-
conversion_code = SmallWebRTCClient.FORMAT_CONVERSIONS.get(format_name)
|
|
275
|
-
|
|
276
|
-
if conversion_code is None:
|
|
277
|
-
raise ValueError(f"Unsupported format: {format_name}")
|
|
278
|
-
|
|
279
|
-
return cv2.cvtColor(frame_array, conversion_code)
|
|
280
|
-
|
|
281
|
-
async def read_video_frame(self, video_source: str):
|
|
282
|
-
"""Read video frames from the WebRTC connection.
|
|
283
|
-
|
|
284
|
-
Reads a video frame from the given MediaStreamTrack, converts it to RGB,
|
|
285
|
-
and creates an InputImageRawFrame.
|
|
286
|
-
|
|
287
|
-
Args:
|
|
288
|
-
video_source: Video source to capture ("camera" or "screenVideo").
|
|
289
|
-
|
|
290
|
-
Yields:
|
|
291
|
-
UserImageRawFrame objects containing video data from the peer.
|
|
292
|
-
"""
|
|
293
|
-
while True:
|
|
294
|
-
video_track = (
|
|
295
|
-
self._video_input_track
|
|
296
|
-
if video_source == CAM_VIDEO_SOURCE
|
|
297
|
-
else self._screen_video_track
|
|
298
|
-
)
|
|
299
|
-
if video_track is None:
|
|
300
|
-
await asyncio.sleep(0.01)
|
|
301
|
-
continue
|
|
302
|
-
|
|
303
|
-
try:
|
|
304
|
-
frame = await asyncio.wait_for(video_track.recv(), timeout=2.0)
|
|
305
|
-
except asyncio.TimeoutError:
|
|
306
|
-
if self._webrtc_connection.is_connected():
|
|
307
|
-
logger.warning("Timeout: No video frame received within the specified time.")
|
|
308
|
-
# self._webrtc_connection.ask_to_renegotiate()
|
|
309
|
-
frame = None
|
|
310
|
-
except MediaStreamError:
|
|
311
|
-
logger.warning("Received an unexpected media stream error while reading the audio.")
|
|
312
|
-
frame = None
|
|
313
|
-
|
|
314
|
-
if frame is None or not isinstance(frame, VideoFrame):
|
|
315
|
-
# If no valid frame, sleep for a bit
|
|
316
|
-
await asyncio.sleep(0.01)
|
|
317
|
-
continue
|
|
318
|
-
|
|
319
|
-
format_name = frame.format.name
|
|
320
|
-
# Convert frame to NumPy array in its native format
|
|
321
|
-
frame_array = frame.to_ndarray(format=format_name)
|
|
322
|
-
frame_rgb = self._convert_frame(frame_array, format_name)
|
|
323
|
-
|
|
324
|
-
image_frame = UserImageRawFrame(
|
|
325
|
-
user_id=self._webrtc_connection.pc_id,
|
|
326
|
-
image=frame_rgb.tobytes(),
|
|
327
|
-
size=(frame.width, frame.height),
|
|
328
|
-
format="RGB",
|
|
329
|
-
)
|
|
330
|
-
image_frame.transport_source = video_source
|
|
331
|
-
|
|
332
|
-
yield image_frame
|
|
333
|
-
|
|
334
|
-
async def read_audio_frame(self):
|
|
335
|
-
"""Read audio frames from the WebRTC connection.
|
|
336
|
-
|
|
337
|
-
Reads 20ms of audio from the given MediaStreamTrack and creates an InputAudioRawFrame.
|
|
338
|
-
|
|
339
|
-
Yields:
|
|
340
|
-
InputAudioRawFrame objects containing audio data from the peer.
|
|
341
|
-
"""
|
|
342
|
-
while True:
|
|
343
|
-
if self._audio_input_track is None:
|
|
344
|
-
await asyncio.sleep(0.01)
|
|
345
|
-
continue
|
|
346
|
-
|
|
347
|
-
try:
|
|
348
|
-
frame = await asyncio.wait_for(self._audio_input_track.recv(), timeout=2.0)
|
|
349
|
-
except asyncio.TimeoutError:
|
|
350
|
-
if self._webrtc_connection.is_connected():
|
|
351
|
-
logger.warning("Timeout: No audio frame received within the specified time.")
|
|
352
|
-
frame = None
|
|
353
|
-
except MediaStreamError:
|
|
354
|
-
logger.warning("Received an unexpected media stream error while reading the audio.")
|
|
355
|
-
frame = None
|
|
356
|
-
|
|
357
|
-
if frame is None or not isinstance(frame, AudioFrame):
|
|
358
|
-
# If we don't read any audio let's sleep for a little bit (i.e. busy wait).
|
|
359
|
-
await asyncio.sleep(0.01)
|
|
360
|
-
continue
|
|
361
|
-
|
|
362
|
-
if frame.sample_rate > self._in_sample_rate:
|
|
363
|
-
resampled_frames = self._pipecat_resampler.resample(frame)
|
|
364
|
-
for resampled_frame in resampled_frames:
|
|
365
|
-
# 16-bit PCM bytes
|
|
366
|
-
pcm_bytes = resampled_frame.to_ndarray().astype(np.int16).tobytes()
|
|
367
|
-
audio_frame = InputAudioRawFrame(
|
|
368
|
-
audio=pcm_bytes,
|
|
369
|
-
sample_rate=resampled_frame.sample_rate,
|
|
370
|
-
num_channels=self._audio_in_channels,
|
|
371
|
-
)
|
|
372
|
-
yield audio_frame
|
|
373
|
-
else:
|
|
374
|
-
# 16-bit PCM bytes
|
|
375
|
-
pcm_bytes = frame.to_ndarray().astype(np.int16).tobytes()
|
|
376
|
-
audio_frame = InputAudioRawFrame(
|
|
377
|
-
audio=pcm_bytes,
|
|
378
|
-
sample_rate=frame.sample_rate,
|
|
379
|
-
num_channels=self._audio_in_channels,
|
|
380
|
-
)
|
|
381
|
-
yield audio_frame
|
|
382
|
-
|
|
383
|
-
async def write_audio_frame(self, frame: OutputAudioRawFrame):
|
|
384
|
-
"""Write an audio frame to the WebRTC connection.
|
|
385
|
-
|
|
386
|
-
Args:
|
|
387
|
-
frame: The audio frame to transmit.
|
|
388
|
-
"""
|
|
389
|
-
if self._can_send() and self._audio_output_track:
|
|
390
|
-
await self._audio_output_track.add_audio_bytes(frame.audio)
|
|
391
|
-
|
|
392
|
-
async def write_video_frame(self, frame: OutputImageRawFrame):
|
|
393
|
-
"""Write a video frame to the WebRTC connection.
|
|
394
|
-
|
|
395
|
-
Args:
|
|
396
|
-
frame: The video frame to transmit.
|
|
397
|
-
"""
|
|
398
|
-
if self._can_send() and self._video_output_track:
|
|
399
|
-
self._video_output_track.add_video_frame(frame)
|
|
400
|
-
|
|
401
|
-
async def setup(self, _params: TransportParams, frame):
|
|
402
|
-
"""Set up the client with transport parameters.
|
|
403
|
-
|
|
404
|
-
Args:
|
|
405
|
-
_params: Transport configuration parameters.
|
|
406
|
-
frame: The initialization frame containing setup data.
|
|
407
|
-
"""
|
|
408
|
-
self._audio_in_channels = _params.audio_in_channels
|
|
409
|
-
self._in_sample_rate = _params.audio_in_sample_rate or frame.audio_in_sample_rate
|
|
410
|
-
self._out_sample_rate = _params.audio_out_sample_rate or frame.audio_out_sample_rate
|
|
411
|
-
self._params = _params
|
|
412
|
-
self._leave_counter += 1
|
|
413
|
-
|
|
414
|
-
async def connect(self):
|
|
415
|
-
"""Establish the WebRTC connection."""
|
|
416
|
-
if self._webrtc_connection.is_connected():
|
|
417
|
-
# already initialized
|
|
418
|
-
return
|
|
419
|
-
|
|
420
|
-
logger.info(f"Connecting to Small WebRTC")
|
|
421
|
-
await self._webrtc_connection.connect()
|
|
422
|
-
|
|
423
|
-
async def disconnect(self):
|
|
424
|
-
"""Disconnect from the WebRTC peer."""
|
|
425
|
-
self._leave_counter -= 1
|
|
426
|
-
if self._leave_counter > 0:
|
|
427
|
-
return
|
|
428
|
-
|
|
429
|
-
if self.is_connected and not self.is_closing:
|
|
430
|
-
logger.info(f"Disconnecting to Small WebRTC")
|
|
431
|
-
self._closing = True
|
|
432
|
-
await self._webrtc_connection.disconnect()
|
|
433
|
-
await self._handle_peer_disconnected()
|
|
434
|
-
|
|
435
|
-
async def send_message(self, frame: TransportMessageFrame | TransportMessageUrgentFrame):
|
|
436
|
-
"""Send an application message through the WebRTC connection.
|
|
437
|
-
|
|
438
|
-
Args:
|
|
439
|
-
frame: The message frame to send.
|
|
440
|
-
"""
|
|
441
|
-
if self._can_send():
|
|
442
|
-
self._webrtc_connection.send_app_message(frame.message)
|
|
443
|
-
|
|
444
|
-
async def _handle_client_connected(self):
|
|
445
|
-
"""Handle client connection establishment."""
|
|
446
|
-
# There is nothing to do here yet, the pipeline is still not ready
|
|
447
|
-
if not self._params:
|
|
448
|
-
return
|
|
449
|
-
|
|
450
|
-
self._audio_input_track = self._webrtc_connection.audio_input_track()
|
|
451
|
-
self._video_input_track = self._webrtc_connection.video_input_track()
|
|
452
|
-
self._screen_video_track = self._webrtc_connection.screen_video_input_track()
|
|
453
|
-
if self._params.audio_out_enabled:
|
|
454
|
-
self._audio_output_track = RawAudioTrack(sample_rate=self._out_sample_rate)
|
|
455
|
-
self._webrtc_connection.replace_audio_track(self._audio_output_track)
|
|
456
|
-
|
|
457
|
-
if self._params.video_out_enabled:
|
|
458
|
-
self._video_output_track = RawVideoTrack(
|
|
459
|
-
width=self._params.video_out_width, height=self._params.video_out_height
|
|
460
|
-
)
|
|
461
|
-
self._webrtc_connection.replace_video_track(self._video_output_track)
|
|
462
|
-
|
|
463
|
-
await self._callbacks.on_client_connected(self._webrtc_connection)
|
|
464
|
-
|
|
465
|
-
async def _handle_peer_disconnected(self):
|
|
466
|
-
"""Handle peer disconnection cleanup."""
|
|
467
|
-
self._audio_input_track = None
|
|
468
|
-
self._video_input_track = None
|
|
469
|
-
self._screen_video_track = None
|
|
470
|
-
self._audio_output_track = None
|
|
471
|
-
self._video_output_track = None
|
|
472
|
-
|
|
473
|
-
async def _handle_client_closed(self):
|
|
474
|
-
"""Handle client connection closure."""
|
|
475
|
-
self._audio_input_track = None
|
|
476
|
-
self._video_input_track = None
|
|
477
|
-
self._screen_video_track = None
|
|
478
|
-
self._audio_output_track = None
|
|
479
|
-
self._video_output_track = None
|
|
480
|
-
await self._callbacks.on_client_disconnected(self._webrtc_connection)
|
|
481
|
-
|
|
482
|
-
async def _handle_app_message(self, message: Any):
|
|
483
|
-
"""Handle incoming application messages."""
|
|
484
|
-
await self._callbacks.on_app_message(message)
|
|
485
|
-
|
|
486
|
-
def _can_send(self):
|
|
487
|
-
"""Check if the connection is ready for sending data."""
|
|
488
|
-
return self.is_connected and not self.is_closing
|
|
489
|
-
|
|
490
|
-
@property
|
|
491
|
-
def is_connected(self) -> bool:
|
|
492
|
-
"""Check if the WebRTC connection is established.
|
|
493
|
-
|
|
494
|
-
Returns:
|
|
495
|
-
True if connected to the peer.
|
|
496
|
-
"""
|
|
497
|
-
return self._webrtc_connection.is_connected()
|
|
498
|
-
|
|
499
|
-
@property
|
|
500
|
-
def is_closing(self) -> bool:
|
|
501
|
-
"""Check if the connection is in the process of closing.
|
|
502
|
-
|
|
503
|
-
Returns:
|
|
504
|
-
True if the connection is closing.
|
|
505
|
-
"""
|
|
506
|
-
return self._closing
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
class SmallWebRTCInputTransport(BaseInputTransport):
|
|
510
|
-
"""Input transport implementation for SmallWebRTC.
|
|
511
|
-
|
|
512
|
-
Handles incoming audio and video streams from WebRTC peers,
|
|
513
|
-
including user image requests and application message handling.
|
|
514
|
-
"""
|
|
515
|
-
|
|
516
|
-
def __init__(
|
|
517
|
-
self,
|
|
518
|
-
client: SmallWebRTCClient,
|
|
519
|
-
params: TransportParams,
|
|
520
|
-
**kwargs,
|
|
521
|
-
):
|
|
522
|
-
"""Initialize the WebRTC input transport.
|
|
523
|
-
|
|
524
|
-
Args:
|
|
525
|
-
client: The WebRTC client instance.
|
|
526
|
-
params: Transport configuration parameters.
|
|
527
|
-
**kwargs: Additional arguments passed to parent class.
|
|
528
|
-
"""
|
|
529
|
-
super().__init__(params, **kwargs)
|
|
530
|
-
self._client = client
|
|
531
|
-
self._params = params
|
|
532
|
-
self._receive_audio_task = None
|
|
533
|
-
self._receive_video_task = None
|
|
534
|
-
self._receive_screen_video_task = None
|
|
535
|
-
self._image_requests = {}
|
|
536
|
-
|
|
537
|
-
# Whether we have seen a StartFrame already.
|
|
538
|
-
self._initialized = False
|
|
539
|
-
|
|
540
|
-
async def process_frame(self, frame: Frame, direction: FrameDirection):
|
|
541
|
-
"""Process incoming frames including user image requests.
|
|
542
|
-
|
|
543
|
-
Args:
|
|
544
|
-
frame: The frame to process.
|
|
545
|
-
direction: The direction of frame flow in the pipeline.
|
|
546
|
-
"""
|
|
547
|
-
await super().process_frame(frame, direction)
|
|
548
|
-
|
|
549
|
-
if isinstance(frame, UserImageRequestFrame):
|
|
550
|
-
await self.request_participant_image(frame)
|
|
551
|
-
|
|
552
|
-
async def start(self, frame: StartFrame):
|
|
553
|
-
"""Start the input transport and establish WebRTC connection.
|
|
554
|
-
|
|
555
|
-
Args:
|
|
556
|
-
frame: The start frame containing initialization parameters.
|
|
557
|
-
"""
|
|
558
|
-
await super().start(frame)
|
|
559
|
-
|
|
560
|
-
if self._initialized:
|
|
561
|
-
return
|
|
562
|
-
|
|
563
|
-
self._initialized = True
|
|
564
|
-
|
|
565
|
-
await self._client.setup(self._params, frame)
|
|
566
|
-
await self._client.connect()
|
|
567
|
-
await self.set_transport_ready(frame)
|
|
568
|
-
if not self._receive_audio_task and self._params.audio_in_enabled:
|
|
569
|
-
self._receive_audio_task = self.create_task(self._receive_audio())
|
|
570
|
-
if not self._receive_video_task and self._params.video_in_enabled:
|
|
571
|
-
self._receive_video_task = self.create_task(self._receive_video(CAM_VIDEO_SOURCE))
|
|
572
|
-
|
|
573
|
-
async def _stop_tasks(self):
|
|
574
|
-
"""Stop all background tasks."""
|
|
575
|
-
if self._receive_audio_task:
|
|
576
|
-
await self.cancel_task(self._receive_audio_task)
|
|
577
|
-
self._receive_audio_task = None
|
|
578
|
-
if self._receive_video_task:
|
|
579
|
-
await self.cancel_task(self._receive_video_task)
|
|
580
|
-
self._receive_video_task = None
|
|
581
|
-
|
|
582
|
-
async def stop(self, frame: EndFrame):
|
|
583
|
-
"""Stop the input transport and disconnect from WebRTC.
|
|
584
|
-
|
|
585
|
-
Args:
|
|
586
|
-
frame: The end frame signaling transport shutdown.
|
|
587
|
-
"""
|
|
588
|
-
await super().stop(frame)
|
|
589
|
-
await self._stop_tasks()
|
|
590
|
-
await self._client.disconnect()
|
|
591
|
-
|
|
592
|
-
async def cancel(self, frame: CancelFrame):
|
|
593
|
-
"""Cancel the input transport and disconnect immediately.
|
|
594
|
-
|
|
595
|
-
Args:
|
|
596
|
-
frame: The cancel frame signaling immediate cancellation.
|
|
597
|
-
"""
|
|
598
|
-
await super().cancel(frame)
|
|
599
|
-
await self._stop_tasks()
|
|
600
|
-
await self._client.disconnect()
|
|
601
|
-
|
|
602
|
-
async def _receive_audio(self):
|
|
603
|
-
"""Background task for receiving audio frames from WebRTC."""
|
|
604
|
-
try:
|
|
605
|
-
audio_iterator = self._client.read_audio_frame()
|
|
606
|
-
async for audio_frame in audio_iterator:
|
|
607
|
-
if audio_frame:
|
|
608
|
-
await self.push_audio_frame(audio_frame)
|
|
609
|
-
|
|
610
|
-
except Exception as e:
|
|
611
|
-
logger.error(f"{self} exception receiving data: {e.__class__.__name__} ({e})")
|
|
612
|
-
|
|
613
|
-
async def _receive_video(self, video_source: str):
|
|
614
|
-
"""Background task for receiving video frames from WebRTC.
|
|
615
|
-
|
|
616
|
-
Args:
|
|
617
|
-
video_source: Video source to capture ("camera" or "screenVideo").
|
|
618
|
-
"""
|
|
619
|
-
try:
|
|
620
|
-
video_iterator = self._client.read_video_frame(video_source)
|
|
621
|
-
async for video_frame in video_iterator:
|
|
622
|
-
if video_frame:
|
|
623
|
-
await self.push_video_frame(video_frame)
|
|
624
|
-
|
|
625
|
-
# Check if there are any pending image requests and create UserImageRawFrame
|
|
626
|
-
if self._image_requests:
|
|
627
|
-
for req_id, request_frame in list(self._image_requests.items()):
|
|
628
|
-
if request_frame.video_source == video_source:
|
|
629
|
-
# Create UserImageRawFrame using the current video frame
|
|
630
|
-
image_frame = UserImageRawFrame(
|
|
631
|
-
user_id=request_frame.user_id,
|
|
632
|
-
request=request_frame,
|
|
633
|
-
image=video_frame.image,
|
|
634
|
-
size=video_frame.size,
|
|
635
|
-
format=video_frame.format,
|
|
636
|
-
)
|
|
637
|
-
image_frame.transport_source = video_source
|
|
638
|
-
# Push the frame to the pipeline
|
|
639
|
-
await self.push_video_frame(image_frame)
|
|
640
|
-
# Remove from pending requests
|
|
641
|
-
del self._image_requests[req_id]
|
|
642
|
-
|
|
643
|
-
except Exception as e:
|
|
644
|
-
logger.error(f"{self} exception receiving data: {e.__class__.__name__} ({e})")
|
|
645
|
-
|
|
646
|
-
async def push_app_message(self, message: Any):
|
|
647
|
-
"""Push an application message into the pipeline.
|
|
648
|
-
|
|
649
|
-
Args:
|
|
650
|
-
message: The application message to process.
|
|
651
|
-
"""
|
|
652
|
-
logger.debug(f"Received app message inside SmallWebRTCInputTransport {message}")
|
|
653
|
-
frame = TransportMessageUrgentFrame(message=message)
|
|
654
|
-
await self.push_frame(frame)
|
|
655
|
-
|
|
656
|
-
# Add this method similar to DailyInputTransport.request_participant_image
|
|
657
|
-
async def request_participant_image(self, frame: UserImageRequestFrame):
|
|
658
|
-
"""Request an image frame from the participant's video stream.
|
|
659
|
-
|
|
660
|
-
When a UserImageRequestFrame is received, this method will store the request
|
|
661
|
-
and the next video frame received will be converted to a UserImageRawFrame.
|
|
662
|
-
|
|
663
|
-
Args:
|
|
664
|
-
frame: The user image request frame.
|
|
665
|
-
"""
|
|
666
|
-
logger.debug(f"Requesting image from participant: {frame.user_id}")
|
|
667
|
-
|
|
668
|
-
# Store the request
|
|
669
|
-
request_id = f"{frame.function_name}:{frame.tool_call_id}"
|
|
670
|
-
self._image_requests[request_id] = frame
|
|
671
|
-
|
|
672
|
-
# Default to camera if no source specified
|
|
673
|
-
if frame.video_source is None:
|
|
674
|
-
frame.video_source = CAM_VIDEO_SOURCE
|
|
675
|
-
# If we're not already receiving video, try to get a frame now
|
|
676
|
-
if (
|
|
677
|
-
frame.video_source == CAM_VIDEO_SOURCE
|
|
678
|
-
and not self._receive_video_task
|
|
679
|
-
and self._params.video_in_enabled
|
|
680
|
-
):
|
|
681
|
-
# Start video reception if it's not already running
|
|
682
|
-
self._receive_video_task = self.create_task(self._receive_video(CAM_VIDEO_SOURCE))
|
|
683
|
-
elif (
|
|
684
|
-
frame.video_source == SCREEN_VIDEO_SOURCE
|
|
685
|
-
and not self._receive_screen_video_task
|
|
686
|
-
and self._params.video_in_enabled
|
|
687
|
-
):
|
|
688
|
-
# Start screen video reception if it's not already running
|
|
689
|
-
self._receive_screen_video_task = self.create_task(
|
|
690
|
-
self._receive_video(SCREEN_VIDEO_SOURCE)
|
|
691
|
-
)
|
|
692
|
-
|
|
693
|
-
async def capture_participant_media(
|
|
694
|
-
self,
|
|
695
|
-
source: str = CAM_VIDEO_SOURCE,
|
|
696
|
-
):
|
|
697
|
-
"""Capture media from a specific participant.
|
|
698
|
-
|
|
699
|
-
Args:
|
|
700
|
-
source: Media source to capture from. ("camera", "microphone", or "screenVideo")
|
|
701
|
-
"""
|
|
702
|
-
# If we're not already receiving video, try to get a frame now
|
|
703
|
-
if (
|
|
704
|
-
source == MIC_AUDIO_SOURCE
|
|
705
|
-
and not self._receive_audio_task
|
|
706
|
-
and self._params.audio_in_enabled
|
|
707
|
-
):
|
|
708
|
-
# Start audio reception if it's not already running
|
|
709
|
-
self._receive_audio_task = self.create_task(self._receive_audio())
|
|
710
|
-
elif (
|
|
711
|
-
source == CAM_VIDEO_SOURCE
|
|
712
|
-
and not self._receive_video_task
|
|
713
|
-
and self._params.video_in_enabled
|
|
714
|
-
):
|
|
715
|
-
# Start video reception if it's not already running
|
|
716
|
-
self._receive_video_task = self.create_task(self._receive_video(CAM_VIDEO_SOURCE))
|
|
717
|
-
elif (
|
|
718
|
-
source == SCREEN_VIDEO_SOURCE
|
|
719
|
-
and not self._receive_screen_video_task
|
|
720
|
-
and self._params.video_in_enabled
|
|
721
|
-
):
|
|
722
|
-
# Start screen video reception if it's not already running
|
|
723
|
-
self._receive_screen_video_task = self.create_task(
|
|
724
|
-
self._receive_video(SCREEN_VIDEO_SOURCE)
|
|
725
|
-
)
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
class SmallWebRTCOutputTransport(BaseOutputTransport):
|
|
729
|
-
"""Output transport implementation for SmallWebRTC.
|
|
730
|
-
|
|
731
|
-
Handles outgoing audio and video streams to WebRTC peers,
|
|
732
|
-
including transport message sending.
|
|
733
|
-
"""
|
|
734
|
-
|
|
735
|
-
def __init__(
|
|
736
|
-
self,
|
|
737
|
-
client: SmallWebRTCClient,
|
|
738
|
-
params: TransportParams,
|
|
739
|
-
**kwargs,
|
|
740
|
-
):
|
|
741
|
-
"""Initialize the WebRTC output transport.
|
|
742
|
-
|
|
743
|
-
Args:
|
|
744
|
-
client: The WebRTC client instance.
|
|
745
|
-
params: Transport configuration parameters.
|
|
746
|
-
**kwargs: Additional arguments passed to parent class.
|
|
747
|
-
"""
|
|
748
|
-
super().__init__(params, **kwargs)
|
|
749
|
-
self._client = client
|
|
750
|
-
self._params = params
|
|
751
|
-
|
|
752
|
-
# Whether we have seen a StartFrame already.
|
|
753
|
-
self._initialized = False
|
|
754
|
-
|
|
755
|
-
async def start(self, frame: StartFrame):
|
|
756
|
-
"""Start the output transport and establish WebRTC connection.
|
|
757
|
-
|
|
758
|
-
Args:
|
|
759
|
-
frame: The start frame containing initialization parameters.
|
|
760
|
-
"""
|
|
761
|
-
await super().start(frame)
|
|
762
|
-
|
|
763
|
-
if self._initialized:
|
|
764
|
-
return
|
|
765
|
-
|
|
766
|
-
self._initialized = True
|
|
767
|
-
|
|
768
|
-
await self._client.setup(self._params, frame)
|
|
769
|
-
await self._client.connect()
|
|
770
|
-
await self.set_transport_ready(frame)
|
|
771
|
-
|
|
772
|
-
async def stop(self, frame: EndFrame):
|
|
773
|
-
"""Stop the output transport and disconnect from WebRTC.
|
|
774
|
-
|
|
775
|
-
Args:
|
|
776
|
-
frame: The end frame signaling transport shutdown.
|
|
777
|
-
"""
|
|
778
|
-
await super().stop(frame)
|
|
779
|
-
await self._client.disconnect()
|
|
780
|
-
|
|
781
|
-
async def cancel(self, frame: CancelFrame):
|
|
782
|
-
"""Cancel the output transport and disconnect immediately.
|
|
783
|
-
|
|
784
|
-
Args:
|
|
785
|
-
frame: The cancel frame signaling immediate cancellation.
|
|
786
|
-
"""
|
|
787
|
-
await super().cancel(frame)
|
|
788
|
-
await self._client.disconnect()
|
|
789
|
-
|
|
790
|
-
async def send_message(self, frame: TransportMessageFrame | TransportMessageUrgentFrame):
|
|
791
|
-
"""Send a transport message through the WebRTC connection.
|
|
792
|
-
|
|
793
|
-
Args:
|
|
794
|
-
frame: The transport message frame to send.
|
|
795
|
-
"""
|
|
796
|
-
await self._client.send_message(frame)
|
|
797
|
-
|
|
798
|
-
async def write_audio_frame(self, frame: OutputAudioRawFrame):
|
|
799
|
-
"""Write an audio frame to the WebRTC connection.
|
|
800
|
-
|
|
801
|
-
Args:
|
|
802
|
-
frame: The output audio frame to transmit.
|
|
803
|
-
"""
|
|
804
|
-
await self._client.write_audio_frame(frame)
|
|
805
|
-
|
|
806
|
-
async def write_video_frame(self, frame: OutputImageRawFrame):
|
|
807
|
-
"""Write a video frame to the WebRTC connection.
|
|
808
|
-
|
|
809
|
-
Args:
|
|
810
|
-
frame: The output video frame to transmit.
|
|
811
|
-
"""
|
|
812
|
-
await self._client.write_video_frame(frame)
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
class SmallWebRTCTransport(BaseTransport):
|
|
816
|
-
"""WebRTC transport implementation for real-time communication.
|
|
817
|
-
|
|
818
|
-
Provides bidirectional audio and video streaming over WebRTC connections
|
|
819
|
-
with support for application messaging and connection event handling.
|
|
820
|
-
"""
|
|
821
|
-
|
|
822
|
-
def __init__(
|
|
823
|
-
self,
|
|
824
|
-
webrtc_connection: SmallWebRTCConnection,
|
|
825
|
-
params: TransportParams,
|
|
826
|
-
input_name: Optional[str] = None,
|
|
827
|
-
output_name: Optional[str] = None,
|
|
828
|
-
):
|
|
829
|
-
"""Initialize the WebRTC transport.
|
|
830
|
-
|
|
831
|
-
Args:
|
|
832
|
-
webrtc_connection: The underlying WebRTC connection handler.
|
|
833
|
-
params: Transport configuration parameters.
|
|
834
|
-
input_name: Optional name for the input processor.
|
|
835
|
-
output_name: Optional name for the output processor.
|
|
836
|
-
"""
|
|
837
|
-
super().__init__(input_name=input_name, output_name=output_name)
|
|
838
|
-
self._params = params
|
|
839
|
-
|
|
840
|
-
self._callbacks = SmallWebRTCCallbacks(
|
|
841
|
-
on_app_message=self._on_app_message,
|
|
842
|
-
on_client_connected=self._on_client_connected,
|
|
843
|
-
on_client_disconnected=self._on_client_disconnected,
|
|
844
|
-
)
|
|
845
|
-
|
|
846
|
-
self._client = SmallWebRTCClient(webrtc_connection, self._callbacks)
|
|
847
|
-
|
|
848
|
-
self._input: Optional[SmallWebRTCInputTransport] = None
|
|
849
|
-
self._output: Optional[SmallWebRTCOutputTransport] = None
|
|
850
|
-
|
|
851
|
-
# Register supported handlers. The user will only be able to register
|
|
852
|
-
# these handlers.
|
|
853
|
-
self._register_event_handler("on_app_message")
|
|
854
|
-
self._register_event_handler("on_client_connected")
|
|
855
|
-
self._register_event_handler("on_client_disconnected")
|
|
856
|
-
|
|
857
|
-
def input(self) -> SmallWebRTCInputTransport:
|
|
858
|
-
"""Get the input transport processor.
|
|
859
|
-
|
|
860
|
-
Returns:
|
|
861
|
-
The input transport for handling incoming media streams.
|
|
862
|
-
"""
|
|
863
|
-
if not self._input:
|
|
864
|
-
self._input = SmallWebRTCInputTransport(
|
|
865
|
-
self._client, self._params, name=self._input_name
|
|
866
|
-
)
|
|
867
|
-
return self._input
|
|
868
|
-
|
|
869
|
-
def output(self) -> SmallWebRTCOutputTransport:
|
|
870
|
-
"""Get the output transport processor.
|
|
871
|
-
|
|
872
|
-
Returns:
|
|
873
|
-
The output transport for handling outgoing media streams.
|
|
874
|
-
"""
|
|
875
|
-
if not self._output:
|
|
876
|
-
self._output = SmallWebRTCOutputTransport(
|
|
877
|
-
self._client, self._params, name=self._input_name
|
|
878
|
-
)
|
|
879
|
-
return self._output
|
|
880
|
-
|
|
881
|
-
async def send_image(self, frame: OutputImageRawFrame | SpriteFrame):
|
|
882
|
-
"""Send an image frame through the transport.
|
|
883
|
-
|
|
884
|
-
Args:
|
|
885
|
-
frame: The image frame to send.
|
|
886
|
-
"""
|
|
887
|
-
if self._output:
|
|
888
|
-
await self._output.queue_frame(frame, FrameDirection.DOWNSTREAM)
|
|
889
|
-
|
|
890
|
-
async def send_audio(self, frame: OutputAudioRawFrame):
|
|
891
|
-
"""Send an audio frame through the transport.
|
|
892
|
-
|
|
893
|
-
Args:
|
|
894
|
-
frame: The audio frame to send.
|
|
895
|
-
"""
|
|
896
|
-
if self._output:
|
|
897
|
-
await self._output.queue_frame(frame, FrameDirection.DOWNSTREAM)
|
|
898
|
-
|
|
899
|
-
async def _on_app_message(self, message: Any):
|
|
900
|
-
"""Handle incoming application messages."""
|
|
901
|
-
if self._input:
|
|
902
|
-
await self._input.push_app_message(message)
|
|
903
|
-
await self._call_event_handler("on_app_message", message)
|
|
904
|
-
|
|
905
|
-
async def _on_client_connected(self, webrtc_connection):
|
|
906
|
-
"""Handle client connection events."""
|
|
907
|
-
await self._call_event_handler("on_client_connected", webrtc_connection)
|
|
908
|
-
|
|
909
|
-
async def _on_client_disconnected(self, webrtc_connection):
|
|
910
|
-
"""Handle client disconnection events."""
|
|
911
|
-
await self._call_event_handler("on_client_disconnected", webrtc_connection)
|
|
912
|
-
|
|
913
|
-
async def capture_participant_video(
|
|
914
|
-
self,
|
|
915
|
-
video_source: str = CAM_VIDEO_SOURCE,
|
|
916
|
-
):
|
|
917
|
-
"""Capture video from a specific participant.
|
|
918
|
-
|
|
919
|
-
Args:
|
|
920
|
-
video_source: Video source to capture from ("camera" or "screenVideo").
|
|
921
|
-
"""
|
|
922
|
-
if self._input:
|
|
923
|
-
await self._input.capture_participant_media(source=video_source)
|
|
924
|
-
|
|
925
|
-
async def capture_participant_audio(
|
|
926
|
-
self,
|
|
927
|
-
audio_source: str = MIC_AUDIO_SOURCE,
|
|
928
|
-
):
|
|
929
|
-
"""Capture audio from a specific participant.
|
|
930
|
-
|
|
931
|
-
Args:
|
|
932
|
-
audio_source: Audio source to capture from. (currently, "microphone" is the only supported option)
|
|
933
|
-
"""
|
|
934
|
-
if self._input:
|
|
935
|
-
await self._input.capture_participant_media(source=audio_source)
|
|
14
|
+
import warnings
|
|
15
|
+
|
|
16
|
+
from pipecat.transports.smallwebrtc.transport import *
|
|
17
|
+
|
|
18
|
+
with warnings.catch_warnings():
|
|
19
|
+
warnings.simplefilter("always")
|
|
20
|
+
warnings.warn(
|
|
21
|
+
"Module `pipecat.transports.network.small_webrtc` is deprecated, "
|
|
22
|
+
"use `pipecat.transports.smallwebrtc.transport` instead.",
|
|
23
|
+
DeprecationWarning,
|
|
24
|
+
stacklevel=2,
|
|
25
|
+
)
|