dv-pipecat-ai 0.0.82.dev815__py3-none-any.whl → 0.0.82.dev857__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/METADATA +8 -3
- {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/RECORD +106 -79
- pipecat/adapters/base_llm_adapter.py +44 -6
- pipecat/adapters/services/anthropic_adapter.py +302 -2
- pipecat/adapters/services/aws_nova_sonic_adapter.py +40 -2
- pipecat/adapters/services/bedrock_adapter.py +40 -2
- pipecat/adapters/services/gemini_adapter.py +276 -6
- pipecat/adapters/services/open_ai_adapter.py +88 -7
- pipecat/adapters/services/open_ai_realtime_adapter.py +39 -1
- pipecat/audio/dtmf/__init__.py +0 -0
- pipecat/audio/dtmf/types.py +47 -0
- pipecat/audio/dtmf/utils.py +70 -0
- pipecat/audio/filters/aic_filter.py +199 -0
- pipecat/audio/utils.py +9 -7
- pipecat/extensions/ivr/__init__.py +0 -0
- pipecat/extensions/ivr/ivr_navigator.py +452 -0
- pipecat/frames/frames.py +156 -43
- pipecat/pipeline/llm_switcher.py +76 -0
- pipecat/pipeline/parallel_pipeline.py +3 -3
- pipecat/pipeline/service_switcher.py +144 -0
- pipecat/pipeline/task.py +68 -28
- pipecat/pipeline/task_observer.py +10 -0
- pipecat/processors/aggregators/dtmf_aggregator.py +2 -2
- pipecat/processors/aggregators/llm_context.py +277 -0
- pipecat/processors/aggregators/llm_response.py +48 -15
- pipecat/processors/aggregators/llm_response_universal.py +840 -0
- pipecat/processors/aggregators/openai_llm_context.py +3 -3
- pipecat/processors/dtmf_aggregator.py +0 -2
- pipecat/processors/filters/stt_mute_filter.py +0 -2
- pipecat/processors/frame_processor.py +18 -11
- pipecat/processors/frameworks/rtvi.py +17 -10
- pipecat/processors/metrics/sentry.py +2 -0
- pipecat/runner/daily.py +137 -36
- pipecat/runner/run.py +1 -1
- pipecat/runner/utils.py +7 -7
- pipecat/serializers/asterisk.py +20 -4
- pipecat/serializers/exotel.py +1 -1
- pipecat/serializers/plivo.py +1 -1
- pipecat/serializers/telnyx.py +1 -1
- pipecat/serializers/twilio.py +1 -1
- pipecat/services/__init__.py +2 -2
- pipecat/services/anthropic/llm.py +113 -28
- pipecat/services/asyncai/tts.py +4 -0
- pipecat/services/aws/llm.py +82 -8
- pipecat/services/aws/tts.py +0 -10
- pipecat/services/aws_nova_sonic/aws.py +5 -0
- pipecat/services/cartesia/tts.py +28 -16
- pipecat/services/cerebras/llm.py +15 -10
- pipecat/services/deepgram/stt.py +8 -0
- pipecat/services/deepseek/llm.py +13 -8
- pipecat/services/fireworks/llm.py +13 -8
- pipecat/services/fish/tts.py +8 -6
- pipecat/services/gemini_multimodal_live/gemini.py +5 -0
- pipecat/services/gladia/config.py +7 -1
- pipecat/services/gladia/stt.py +23 -15
- pipecat/services/google/llm.py +159 -59
- pipecat/services/google/llm_openai.py +18 -3
- pipecat/services/grok/llm.py +2 -1
- pipecat/services/llm_service.py +38 -3
- pipecat/services/mem0/memory.py +2 -1
- pipecat/services/mistral/llm.py +5 -6
- pipecat/services/nim/llm.py +2 -1
- pipecat/services/openai/base_llm.py +88 -26
- pipecat/services/openai/image.py +6 -1
- pipecat/services/openai_realtime_beta/openai.py +5 -2
- pipecat/services/openpipe/llm.py +6 -8
- pipecat/services/perplexity/llm.py +13 -8
- pipecat/services/playht/tts.py +9 -6
- pipecat/services/rime/tts.py +1 -1
- pipecat/services/sambanova/llm.py +18 -13
- pipecat/services/sarvam/tts.py +415 -10
- pipecat/services/speechmatics/stt.py +2 -2
- pipecat/services/tavus/video.py +1 -1
- pipecat/services/tts_service.py +15 -5
- pipecat/services/vistaar/llm.py +2 -5
- pipecat/transports/base_input.py +32 -19
- pipecat/transports/base_output.py +39 -5
- pipecat/transports/daily/__init__.py +0 -0
- pipecat/transports/daily/transport.py +2371 -0
- pipecat/transports/daily/utils.py +410 -0
- pipecat/transports/livekit/__init__.py +0 -0
- pipecat/transports/livekit/transport.py +1042 -0
- pipecat/transports/network/fastapi_websocket.py +12 -546
- pipecat/transports/network/small_webrtc.py +12 -922
- pipecat/transports/network/webrtc_connection.py +9 -595
- pipecat/transports/network/websocket_client.py +12 -481
- pipecat/transports/network/websocket_server.py +12 -487
- pipecat/transports/services/daily.py +9 -2334
- pipecat/transports/services/helpers/daily_rest.py +12 -396
- pipecat/transports/services/livekit.py +12 -975
- pipecat/transports/services/tavus.py +12 -757
- pipecat/transports/smallwebrtc/__init__.py +0 -0
- pipecat/transports/smallwebrtc/connection.py +612 -0
- pipecat/transports/smallwebrtc/transport.py +936 -0
- pipecat/transports/tavus/__init__.py +0 -0
- pipecat/transports/tavus/transport.py +770 -0
- pipecat/transports/websocket/__init__.py +0 -0
- pipecat/transports/websocket/client.py +494 -0
- pipecat/transports/websocket/fastapi.py +559 -0
- pipecat/transports/websocket/server.py +500 -0
- pipecat/transports/whatsapp/__init__.py +0 -0
- pipecat/transports/whatsapp/api.py +345 -0
- pipecat/transports/whatsapp/client.py +364 -0
- {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.82.dev815.dist-info → dv_pipecat_ai-0.0.82.dev857.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1042 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Copyright (c) 2024–2025, Daily
|
|
3
|
+
#
|
|
4
|
+
# SPDX-License-Identifier: BSD 2-Clause License
|
|
5
|
+
#
|
|
6
|
+
|
|
7
|
+
"""LiveKit transport implementation for Pipecat.
|
|
8
|
+
|
|
9
|
+
This module provides comprehensive LiveKit real-time communication integration
|
|
10
|
+
including audio streaming, data messaging, participant management, and room
|
|
11
|
+
event handling for conversational AI applications.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import asyncio
|
|
15
|
+
from dataclasses import dataclass
|
|
16
|
+
from typing import Any, Awaitable, Callable, List, Optional
|
|
17
|
+
|
|
18
|
+
from loguru import logger
|
|
19
|
+
from pydantic import BaseModel
|
|
20
|
+
|
|
21
|
+
from pipecat.audio.utils import create_stream_resampler
|
|
22
|
+
from pipecat.audio.vad.vad_analyzer import VADAnalyzer
|
|
23
|
+
from pipecat.frames.frames import (
|
|
24
|
+
AudioRawFrame,
|
|
25
|
+
CancelFrame,
|
|
26
|
+
EndFrame,
|
|
27
|
+
OutputAudioRawFrame,
|
|
28
|
+
OutputDTMFFrame,
|
|
29
|
+
OutputDTMFUrgentFrame,
|
|
30
|
+
StartFrame,
|
|
31
|
+
TransportMessageFrame,
|
|
32
|
+
TransportMessageUrgentFrame,
|
|
33
|
+
UserAudioRawFrame,
|
|
34
|
+
)
|
|
35
|
+
from pipecat.processors.frame_processor import FrameDirection, FrameProcessorSetup
|
|
36
|
+
from pipecat.transports.base_input import BaseInputTransport
|
|
37
|
+
from pipecat.transports.base_output import BaseOutputTransport
|
|
38
|
+
from pipecat.transports.base_transport import BaseTransport, TransportParams
|
|
39
|
+
from pipecat.utils.asyncio.task_manager import BaseTaskManager
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
from livekit import rtc
|
|
43
|
+
from tenacity import retry, stop_after_attempt, wait_exponential
|
|
44
|
+
except ModuleNotFoundError as e:
|
|
45
|
+
logger.error(f"Exception: {e}")
|
|
46
|
+
logger.error("In order to use LiveKit, you need to `pip install pipecat-ai[livekit]`.")
|
|
47
|
+
raise Exception(f"Missing module: {e}")
|
|
48
|
+
|
|
49
|
+
# DTMF mapping according to RFC 4733
|
|
50
|
+
DTMF_CODE_MAP = {
|
|
51
|
+
"0": 0,
|
|
52
|
+
"1": 1,
|
|
53
|
+
"2": 2,
|
|
54
|
+
"3": 3,
|
|
55
|
+
"4": 4,
|
|
56
|
+
"5": 5,
|
|
57
|
+
"6": 6,
|
|
58
|
+
"7": 7,
|
|
59
|
+
"8": 8,
|
|
60
|
+
"9": 9,
|
|
61
|
+
"*": 10,
|
|
62
|
+
"#": 11,
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@dataclass
|
|
67
|
+
class LiveKitTransportMessageFrame(TransportMessageFrame):
|
|
68
|
+
"""Frame for transport messages in LiveKit rooms.
|
|
69
|
+
|
|
70
|
+
Parameters:
|
|
71
|
+
participant_id: Optional ID of the participant this message is for/from.
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
participant_id: Optional[str] = None
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@dataclass
|
|
78
|
+
class LiveKitTransportMessageUrgentFrame(TransportMessageUrgentFrame):
|
|
79
|
+
"""Frame for urgent transport messages in LiveKit rooms.
|
|
80
|
+
|
|
81
|
+
Parameters:
|
|
82
|
+
participant_id: Optional ID of the participant this message is for/from.
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
participant_id: Optional[str] = None
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class LiveKitParams(TransportParams):
|
|
89
|
+
"""Configuration parameters for LiveKit transport.
|
|
90
|
+
|
|
91
|
+
Inherits all parameters from TransportParams without additional configuration.
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
pass
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class LiveKitCallbacks(BaseModel):
|
|
98
|
+
"""Callback handlers for LiveKit events.
|
|
99
|
+
|
|
100
|
+
Parameters:
|
|
101
|
+
on_connected: Called when connected to the LiveKit room.
|
|
102
|
+
on_disconnected: Called when disconnected from the LiveKit room.
|
|
103
|
+
on_participant_connected: Called when a participant joins the room.
|
|
104
|
+
on_participant_disconnected: Called when a participant leaves the room.
|
|
105
|
+
on_audio_track_subscribed: Called when an audio track is subscribed.
|
|
106
|
+
on_audio_track_unsubscribed: Called when an audio track is unsubscribed.
|
|
107
|
+
on_data_received: Called when data is received from a participant.
|
|
108
|
+
on_first_participant_joined: Called when the first participant joins.
|
|
109
|
+
"""
|
|
110
|
+
|
|
111
|
+
on_connected: Callable[[], Awaitable[None]]
|
|
112
|
+
on_disconnected: Callable[[], Awaitable[None]]
|
|
113
|
+
on_participant_connected: Callable[[str], Awaitable[None]]
|
|
114
|
+
on_participant_disconnected: Callable[[str], Awaitable[None]]
|
|
115
|
+
on_audio_track_subscribed: Callable[[str], Awaitable[None]]
|
|
116
|
+
on_audio_track_unsubscribed: Callable[[str], Awaitable[None]]
|
|
117
|
+
on_data_received: Callable[[bytes, str], Awaitable[None]]
|
|
118
|
+
on_first_participant_joined: Callable[[str], Awaitable[None]]
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class LiveKitTransportClient:
|
|
122
|
+
"""Core client for interacting with LiveKit rooms.
|
|
123
|
+
|
|
124
|
+
Manages the connection to LiveKit rooms and handles all low-level API interactions
|
|
125
|
+
including room management, audio streaming, data messaging, and event handling.
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
def __init__(
|
|
129
|
+
self,
|
|
130
|
+
url: str,
|
|
131
|
+
token: str,
|
|
132
|
+
room_name: str,
|
|
133
|
+
params: LiveKitParams,
|
|
134
|
+
callbacks: LiveKitCallbacks,
|
|
135
|
+
transport_name: str,
|
|
136
|
+
):
|
|
137
|
+
"""Initialize the LiveKit transport client.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
url: LiveKit server URL to connect to.
|
|
141
|
+
token: Authentication token for the room.
|
|
142
|
+
room_name: Name of the LiveKit room to join.
|
|
143
|
+
params: Configuration parameters for the transport.
|
|
144
|
+
callbacks: Event callback handlers.
|
|
145
|
+
transport_name: Name identifier for the transport.
|
|
146
|
+
"""
|
|
147
|
+
self._url = url
|
|
148
|
+
self._token = token
|
|
149
|
+
self._room_name = room_name
|
|
150
|
+
self._params = params
|
|
151
|
+
self._callbacks = callbacks
|
|
152
|
+
self._transport_name = transport_name
|
|
153
|
+
self._room: Optional[rtc.Room] = None
|
|
154
|
+
self._participant_id: str = ""
|
|
155
|
+
self._connected = False
|
|
156
|
+
self._disconnect_counter = 0
|
|
157
|
+
self._audio_source: Optional[rtc.AudioSource] = None
|
|
158
|
+
self._audio_track: Optional[rtc.LocalAudioTrack] = None
|
|
159
|
+
self._audio_tracks = {}
|
|
160
|
+
self._audio_queue = asyncio.Queue()
|
|
161
|
+
self._other_participant_has_joined = False
|
|
162
|
+
self._task_manager: Optional[BaseTaskManager] = None
|
|
163
|
+
|
|
164
|
+
@property
|
|
165
|
+
def participant_id(self) -> str:
|
|
166
|
+
"""Get the participant ID for this client.
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
The participant ID assigned by LiveKit.
|
|
170
|
+
"""
|
|
171
|
+
return self._participant_id
|
|
172
|
+
|
|
173
|
+
@property
|
|
174
|
+
def room(self) -> rtc.Room:
|
|
175
|
+
"""Get the LiveKit room instance.
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
The LiveKit room object.
|
|
179
|
+
|
|
180
|
+
Raises:
|
|
181
|
+
Exception: If room object is not available.
|
|
182
|
+
"""
|
|
183
|
+
if not self._room:
|
|
184
|
+
raise Exception(f"{self}: missing room object (pipeline not started?)")
|
|
185
|
+
return self._room
|
|
186
|
+
|
|
187
|
+
async def setup(self, setup: FrameProcessorSetup):
|
|
188
|
+
"""Setup the client with task manager and room initialization.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
setup: The frame processor setup configuration.
|
|
192
|
+
"""
|
|
193
|
+
if self._task_manager:
|
|
194
|
+
return
|
|
195
|
+
|
|
196
|
+
self._task_manager = setup.task_manager
|
|
197
|
+
self._room = rtc.Room(loop=self._task_manager.get_event_loop())
|
|
198
|
+
|
|
199
|
+
# Set up room event handlers
|
|
200
|
+
self.room.on("participant_connected")(self._on_participant_connected_wrapper)
|
|
201
|
+
self.room.on("participant_disconnected")(self._on_participant_disconnected_wrapper)
|
|
202
|
+
self.room.on("track_subscribed")(self._on_track_subscribed_wrapper)
|
|
203
|
+
self.room.on("track_unsubscribed")(self._on_track_unsubscribed_wrapper)
|
|
204
|
+
self.room.on("data_received")(self._on_data_received_wrapper)
|
|
205
|
+
self.room.on("connected")(self._on_connected_wrapper)
|
|
206
|
+
self.room.on("disconnected")(self._on_disconnected_wrapper)
|
|
207
|
+
|
|
208
|
+
async def cleanup(self):
|
|
209
|
+
"""Cleanup client resources."""
|
|
210
|
+
await self.disconnect()
|
|
211
|
+
|
|
212
|
+
async def start(self, frame: StartFrame):
|
|
213
|
+
"""Start the client and initialize audio components.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
frame: The start frame containing initialization parameters.
|
|
217
|
+
"""
|
|
218
|
+
self._out_sample_rate = self._params.audio_out_sample_rate or frame.audio_out_sample_rate
|
|
219
|
+
|
|
220
|
+
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
|
|
221
|
+
async def connect(self):
|
|
222
|
+
"""Connect to the LiveKit room with retry logic."""
|
|
223
|
+
if self._connected:
|
|
224
|
+
# Increment disconnect counter if already connected.
|
|
225
|
+
self._disconnect_counter += 1
|
|
226
|
+
return
|
|
227
|
+
|
|
228
|
+
logger.info(f"Connecting to {self._room_name}")
|
|
229
|
+
|
|
230
|
+
try:
|
|
231
|
+
await self.room.connect(
|
|
232
|
+
self._url,
|
|
233
|
+
self._token,
|
|
234
|
+
options=rtc.RoomOptions(auto_subscribe=True),
|
|
235
|
+
)
|
|
236
|
+
self._connected = True
|
|
237
|
+
# Increment disconnect counter if we successfully connected.
|
|
238
|
+
self._disconnect_counter += 1
|
|
239
|
+
|
|
240
|
+
self._participant_id = self.room.local_participant.sid
|
|
241
|
+
logger.info(f"Connected to {self._room_name}")
|
|
242
|
+
|
|
243
|
+
# Set up audio source and track
|
|
244
|
+
self._audio_source = rtc.AudioSource(
|
|
245
|
+
self._out_sample_rate, self._params.audio_out_channels
|
|
246
|
+
)
|
|
247
|
+
self._audio_track = rtc.LocalAudioTrack.create_audio_track(
|
|
248
|
+
"pipecat-audio", self._audio_source
|
|
249
|
+
)
|
|
250
|
+
options = rtc.TrackPublishOptions()
|
|
251
|
+
options.source = rtc.TrackSource.SOURCE_MICROPHONE
|
|
252
|
+
await self.room.local_participant.publish_track(self._audio_track, options)
|
|
253
|
+
|
|
254
|
+
await self._callbacks.on_connected()
|
|
255
|
+
|
|
256
|
+
# Check if there are already participants in the room
|
|
257
|
+
participants = self.get_participants()
|
|
258
|
+
if participants and not self._other_participant_has_joined:
|
|
259
|
+
self._other_participant_has_joined = True
|
|
260
|
+
await self._callbacks.on_first_participant_joined(participants[0])
|
|
261
|
+
except Exception as e:
|
|
262
|
+
logger.error(f"Error connecting to {self._room_name}: {e}")
|
|
263
|
+
raise
|
|
264
|
+
|
|
265
|
+
async def disconnect(self):
|
|
266
|
+
"""Disconnect from the LiveKit room."""
|
|
267
|
+
# Decrement leave counter when leaving.
|
|
268
|
+
self._disconnect_counter -= 1
|
|
269
|
+
|
|
270
|
+
if not self._connected or self._disconnect_counter > 0:
|
|
271
|
+
return
|
|
272
|
+
|
|
273
|
+
logger.info(f"Disconnecting from {self._room_name}")
|
|
274
|
+
await self.room.disconnect()
|
|
275
|
+
self._connected = False
|
|
276
|
+
logger.info(f"Disconnected from {self._room_name}")
|
|
277
|
+
await self._callbacks.on_disconnected()
|
|
278
|
+
|
|
279
|
+
async def send_data(self, data: bytes, participant_id: Optional[str] = None):
|
|
280
|
+
"""Send data to participants in the room.
|
|
281
|
+
|
|
282
|
+
Args:
|
|
283
|
+
data: The data bytes to send.
|
|
284
|
+
participant_id: Optional specific participant to send to.
|
|
285
|
+
"""
|
|
286
|
+
if not self._connected:
|
|
287
|
+
return
|
|
288
|
+
|
|
289
|
+
try:
|
|
290
|
+
if participant_id:
|
|
291
|
+
await self.room.local_participant.publish_data(
|
|
292
|
+
data, reliable=True, destination_identities=[participant_id]
|
|
293
|
+
)
|
|
294
|
+
else:
|
|
295
|
+
await self.room.local_participant.publish_data(data, reliable=True)
|
|
296
|
+
except Exception as e:
|
|
297
|
+
logger.error(f"Error sending data: {e}")
|
|
298
|
+
|
|
299
|
+
async def send_dtmf(self, digit: str):
|
|
300
|
+
"""Send DTMF tone to the room.
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
digit: The DTMF digit to send (0-9, *, #).
|
|
304
|
+
"""
|
|
305
|
+
if not self._connected:
|
|
306
|
+
return
|
|
307
|
+
|
|
308
|
+
if digit not in DTMF_CODE_MAP:
|
|
309
|
+
logger.warning(f"Invalid DTMF digit: {digit}")
|
|
310
|
+
return
|
|
311
|
+
|
|
312
|
+
code = DTMF_CODE_MAP[digit]
|
|
313
|
+
|
|
314
|
+
try:
|
|
315
|
+
await self.room.local_participant.publish_dtmf(code=code, digit=digit)
|
|
316
|
+
except Exception as e:
|
|
317
|
+
logger.error(f"Error sending DTMF tone {digit}: {e}")
|
|
318
|
+
|
|
319
|
+
async def publish_audio(self, audio_frame: rtc.AudioFrame):
|
|
320
|
+
"""Publish an audio frame to the room.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
audio_frame: The LiveKit audio frame to publish.
|
|
324
|
+
"""
|
|
325
|
+
if not self._connected or not self._audio_source:
|
|
326
|
+
return
|
|
327
|
+
|
|
328
|
+
try:
|
|
329
|
+
await self._audio_source.capture_frame(audio_frame)
|
|
330
|
+
except Exception as e:
|
|
331
|
+
logger.error(f"Error publishing audio: {e}")
|
|
332
|
+
|
|
333
|
+
def get_participants(self) -> List[str]:
|
|
334
|
+
"""Get list of participant IDs in the room.
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
List of participant IDs.
|
|
338
|
+
"""
|
|
339
|
+
return [p.sid for p in self.room.remote_participants.values()]
|
|
340
|
+
|
|
341
|
+
async def get_participant_metadata(self, participant_id: str) -> dict:
|
|
342
|
+
"""Get metadata for a specific participant.
|
|
343
|
+
|
|
344
|
+
Args:
|
|
345
|
+
participant_id: ID of the participant to get metadata for.
|
|
346
|
+
|
|
347
|
+
Returns:
|
|
348
|
+
Dictionary containing participant metadata.
|
|
349
|
+
"""
|
|
350
|
+
participant = self.room.remote_participants.get(participant_id)
|
|
351
|
+
if participant:
|
|
352
|
+
return {
|
|
353
|
+
"id": participant.sid,
|
|
354
|
+
"name": participant.name,
|
|
355
|
+
"metadata": participant.metadata,
|
|
356
|
+
"is_speaking": participant.is_speaking,
|
|
357
|
+
}
|
|
358
|
+
return {}
|
|
359
|
+
|
|
360
|
+
async def set_participant_metadata(self, metadata: str):
|
|
361
|
+
"""Set metadata for the local participant.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
metadata: Metadata string to set.
|
|
365
|
+
"""
|
|
366
|
+
await self.room.local_participant.set_metadata(metadata)
|
|
367
|
+
|
|
368
|
+
async def mute_participant(self, participant_id: str):
|
|
369
|
+
"""Mute a specific participant's audio tracks.
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
participant_id: ID of the participant to mute.
|
|
373
|
+
"""
|
|
374
|
+
participant = self.room.remote_participants.get(participant_id)
|
|
375
|
+
if participant:
|
|
376
|
+
for track in participant.tracks.values():
|
|
377
|
+
if track.kind == "audio":
|
|
378
|
+
await track.set_enabled(False)
|
|
379
|
+
|
|
380
|
+
async def unmute_participant(self, participant_id: str):
|
|
381
|
+
"""Unmute a specific participant's audio tracks.
|
|
382
|
+
|
|
383
|
+
Args:
|
|
384
|
+
participant_id: ID of the participant to unmute.
|
|
385
|
+
"""
|
|
386
|
+
participant = self.room.remote_participants.get(participant_id)
|
|
387
|
+
if participant:
|
|
388
|
+
for track in participant.tracks.values():
|
|
389
|
+
if track.kind == "audio":
|
|
390
|
+
await track.set_enabled(True)
|
|
391
|
+
|
|
392
|
+
# Wrapper methods for event handlers
|
|
393
|
+
def _on_participant_connected_wrapper(self, participant: rtc.RemoteParticipant):
|
|
394
|
+
"""Wrapper for participant connected events."""
|
|
395
|
+
self._task_manager.create_task(
|
|
396
|
+
self._async_on_participant_connected(participant),
|
|
397
|
+
f"{self}::_async_on_participant_connected",
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
def _on_participant_disconnected_wrapper(self, participant: rtc.RemoteParticipant):
|
|
401
|
+
"""Wrapper for participant disconnected events."""
|
|
402
|
+
self._task_manager.create_task(
|
|
403
|
+
self._async_on_participant_disconnected(participant),
|
|
404
|
+
f"{self}::_async_on_participant_disconnected",
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
def _on_track_subscribed_wrapper(
|
|
408
|
+
self,
|
|
409
|
+
track: rtc.Track,
|
|
410
|
+
publication: rtc.RemoteTrackPublication,
|
|
411
|
+
participant: rtc.RemoteParticipant,
|
|
412
|
+
):
|
|
413
|
+
"""Wrapper for track subscribed events."""
|
|
414
|
+
self._task_manager.create_task(
|
|
415
|
+
self._async_on_track_subscribed(track, publication, participant),
|
|
416
|
+
f"{self}::_async_on_track_subscribed",
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
def _on_track_unsubscribed_wrapper(
|
|
420
|
+
self,
|
|
421
|
+
track: rtc.Track,
|
|
422
|
+
publication: rtc.RemoteTrackPublication,
|
|
423
|
+
participant: rtc.RemoteParticipant,
|
|
424
|
+
):
|
|
425
|
+
"""Wrapper for track unsubscribed events."""
|
|
426
|
+
self._task_manager.create_task(
|
|
427
|
+
self._async_on_track_unsubscribed(track, publication, participant),
|
|
428
|
+
f"{self}::_async_on_track_unsubscribed",
|
|
429
|
+
)
|
|
430
|
+
|
|
431
|
+
def _on_data_received_wrapper(self, data: rtc.DataPacket):
|
|
432
|
+
"""Wrapper for data received events."""
|
|
433
|
+
self._task_manager.create_task(
|
|
434
|
+
self._async_on_data_received(data),
|
|
435
|
+
f"{self}::_async_on_data_received",
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
def _on_connected_wrapper(self):
|
|
439
|
+
"""Wrapper for connected events."""
|
|
440
|
+
self._task_manager.create_task(self._async_on_connected(), f"{self}::_async_on_connected")
|
|
441
|
+
|
|
442
|
+
def _on_disconnected_wrapper(self):
|
|
443
|
+
"""Wrapper for disconnected events."""
|
|
444
|
+
self._task_manager.create_task(
|
|
445
|
+
self._async_on_disconnected(), f"{self}::_async_on_disconnected"
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
# Async methods for event handling
|
|
449
|
+
async def _async_on_participant_connected(self, participant: rtc.RemoteParticipant):
|
|
450
|
+
"""Handle participant connected events."""
|
|
451
|
+
logger.info(f"Participant connected: {participant.identity}")
|
|
452
|
+
await self._callbacks.on_participant_connected(participant.sid)
|
|
453
|
+
if not self._other_participant_has_joined:
|
|
454
|
+
self._other_participant_has_joined = True
|
|
455
|
+
await self._callbacks.on_first_participant_joined(participant.sid)
|
|
456
|
+
|
|
457
|
+
async def _async_on_participant_disconnected(self, participant: rtc.RemoteParticipant):
|
|
458
|
+
"""Handle participant disconnected events."""
|
|
459
|
+
logger.info(f"Participant disconnected: {participant.identity}")
|
|
460
|
+
await self._callbacks.on_participant_disconnected(participant.sid)
|
|
461
|
+
if len(self.get_participants()) == 0:
|
|
462
|
+
self._other_participant_has_joined = False
|
|
463
|
+
|
|
464
|
+
async def _async_on_track_subscribed(
|
|
465
|
+
self,
|
|
466
|
+
track: rtc.Track,
|
|
467
|
+
publication: rtc.RemoteTrackPublication,
|
|
468
|
+
participant: rtc.RemoteParticipant,
|
|
469
|
+
):
|
|
470
|
+
"""Handle track subscribed events."""
|
|
471
|
+
if track.kind == rtc.TrackKind.KIND_AUDIO:
|
|
472
|
+
logger.info(f"Audio track subscribed: {track.sid} from participant {participant.sid}")
|
|
473
|
+
self._audio_tracks[participant.sid] = track
|
|
474
|
+
audio_stream = rtc.AudioStream(track)
|
|
475
|
+
self._task_manager.create_task(
|
|
476
|
+
self._process_audio_stream(audio_stream, participant.sid),
|
|
477
|
+
f"{self}::_process_audio_stream",
|
|
478
|
+
)
|
|
479
|
+
await self._callbacks.on_audio_track_subscribed(participant.sid)
|
|
480
|
+
|
|
481
|
+
async def _async_on_track_unsubscribed(
|
|
482
|
+
self,
|
|
483
|
+
track: rtc.Track,
|
|
484
|
+
publication: rtc.RemoteTrackPublication,
|
|
485
|
+
participant: rtc.RemoteParticipant,
|
|
486
|
+
):
|
|
487
|
+
"""Handle track unsubscribed events."""
|
|
488
|
+
logger.info(f"Track unsubscribed: {publication.sid} from {participant.identity}")
|
|
489
|
+
if track.kind == rtc.TrackKind.KIND_AUDIO:
|
|
490
|
+
await self._callbacks.on_audio_track_unsubscribed(participant.sid)
|
|
491
|
+
|
|
492
|
+
async def _async_on_data_received(self, data: rtc.DataPacket):
|
|
493
|
+
"""Handle data received events."""
|
|
494
|
+
await self._callbacks.on_data_received(data.data, data.participant.sid)
|
|
495
|
+
|
|
496
|
+
async def _async_on_connected(self):
|
|
497
|
+
"""Handle connected events."""
|
|
498
|
+
await self._callbacks.on_connected()
|
|
499
|
+
|
|
500
|
+
async def _async_on_disconnected(self, reason=None):
|
|
501
|
+
"""Handle disconnected events."""
|
|
502
|
+
self._connected = False
|
|
503
|
+
logger.info(f"Disconnected from {self._room_name}. Reason: {reason}")
|
|
504
|
+
await self._callbacks.on_disconnected()
|
|
505
|
+
|
|
506
|
+
async def _process_audio_stream(self, audio_stream: rtc.AudioStream, participant_id: str):
|
|
507
|
+
"""Process incoming audio stream from a participant."""
|
|
508
|
+
logger.info(f"Started processing audio stream for participant {participant_id}")
|
|
509
|
+
async for event in audio_stream:
|
|
510
|
+
if isinstance(event, rtc.AudioFrameEvent):
|
|
511
|
+
await self._audio_queue.put((event, participant_id))
|
|
512
|
+
else:
|
|
513
|
+
logger.warning(f"Received unexpected event type: {type(event)}")
|
|
514
|
+
|
|
515
|
+
async def get_next_audio_frame(self):
|
|
516
|
+
"""Get the next audio frame from the queue."""
|
|
517
|
+
while True:
|
|
518
|
+
frame, participant_id = await self._audio_queue.get()
|
|
519
|
+
yield frame, participant_id
|
|
520
|
+
|
|
521
|
+
def __str__(self):
|
|
522
|
+
"""String representation of the LiveKit transport client."""
|
|
523
|
+
return f"{self._transport_name}::LiveKitTransportClient"
|
|
524
|
+
|
|
525
|
+
|
|
526
|
+
class LiveKitInputTransport(BaseInputTransport):
|
|
527
|
+
"""Handles incoming media streams and events from LiveKit rooms.
|
|
528
|
+
|
|
529
|
+
Processes incoming audio streams from room participants and forwards them
|
|
530
|
+
as Pipecat frames, including audio resampling and VAD integration.
|
|
531
|
+
"""
|
|
532
|
+
|
|
533
|
+
def __init__(
|
|
534
|
+
self,
|
|
535
|
+
transport: BaseTransport,
|
|
536
|
+
client: LiveKitTransportClient,
|
|
537
|
+
params: LiveKitParams,
|
|
538
|
+
**kwargs,
|
|
539
|
+
):
|
|
540
|
+
"""Initialize the LiveKit input transport.
|
|
541
|
+
|
|
542
|
+
Args:
|
|
543
|
+
transport: The parent transport instance.
|
|
544
|
+
client: LiveKitTransportClient instance.
|
|
545
|
+
params: Configuration parameters.
|
|
546
|
+
**kwargs: Additional arguments passed to parent class.
|
|
547
|
+
"""
|
|
548
|
+
super().__init__(params, **kwargs)
|
|
549
|
+
self._transport = transport
|
|
550
|
+
self._client = client
|
|
551
|
+
|
|
552
|
+
self._audio_in_task = None
|
|
553
|
+
self._vad_analyzer: Optional[VADAnalyzer] = params.vad_analyzer
|
|
554
|
+
self._resampler = create_stream_resampler()
|
|
555
|
+
|
|
556
|
+
# Whether we have seen a StartFrame already.
|
|
557
|
+
self._initialized = False
|
|
558
|
+
|
|
559
|
+
@property
|
|
560
|
+
def vad_analyzer(self) -> Optional[VADAnalyzer]:
|
|
561
|
+
"""Get the Voice Activity Detection analyzer.
|
|
562
|
+
|
|
563
|
+
Returns:
|
|
564
|
+
The VAD analyzer instance if configured.
|
|
565
|
+
"""
|
|
566
|
+
return self._vad_analyzer
|
|
567
|
+
|
|
568
|
+
async def start(self, frame: StartFrame):
|
|
569
|
+
"""Start the input transport and connect to LiveKit room.
|
|
570
|
+
|
|
571
|
+
Args:
|
|
572
|
+
frame: The start frame containing initialization parameters.
|
|
573
|
+
"""
|
|
574
|
+
await super().start(frame)
|
|
575
|
+
|
|
576
|
+
if self._initialized:
|
|
577
|
+
return
|
|
578
|
+
|
|
579
|
+
self._initialized = True
|
|
580
|
+
|
|
581
|
+
await self._client.start(frame)
|
|
582
|
+
await self._client.connect()
|
|
583
|
+
if not self._audio_in_task and self._params.audio_in_enabled:
|
|
584
|
+
self._audio_in_task = self.create_task(self._audio_in_task_handler())
|
|
585
|
+
await self.set_transport_ready(frame)
|
|
586
|
+
logger.info("LiveKitInputTransport started")
|
|
587
|
+
|
|
588
|
+
async def stop(self, frame: EndFrame):
|
|
589
|
+
"""Stop the input transport and disconnect from LiveKit room.
|
|
590
|
+
|
|
591
|
+
Args:
|
|
592
|
+
frame: The end frame signaling transport shutdown.
|
|
593
|
+
"""
|
|
594
|
+
await super().stop(frame)
|
|
595
|
+
await self._client.disconnect()
|
|
596
|
+
if self._audio_in_task:
|
|
597
|
+
await self.cancel_task(self._audio_in_task)
|
|
598
|
+
logger.info("LiveKitInputTransport stopped")
|
|
599
|
+
|
|
600
|
+
async def cancel(self, frame: CancelFrame):
|
|
601
|
+
"""Cancel the input transport and disconnect from LiveKit room.
|
|
602
|
+
|
|
603
|
+
Args:
|
|
604
|
+
frame: The cancel frame signaling immediate cancellation.
|
|
605
|
+
"""
|
|
606
|
+
await super().cancel(frame)
|
|
607
|
+
await self._client.disconnect()
|
|
608
|
+
if self._audio_in_task and self._params.audio_in_enabled:
|
|
609
|
+
await self.cancel_task(self._audio_in_task)
|
|
610
|
+
|
|
611
|
+
async def setup(self, setup: FrameProcessorSetup):
|
|
612
|
+
"""Setup the input transport with shared client setup.
|
|
613
|
+
|
|
614
|
+
Args:
|
|
615
|
+
setup: The frame processor setup configuration.
|
|
616
|
+
"""
|
|
617
|
+
await super().setup(setup)
|
|
618
|
+
await self._client.setup(setup)
|
|
619
|
+
|
|
620
|
+
async def cleanup(self):
|
|
621
|
+
"""Cleanup input transport and shared resources."""
|
|
622
|
+
await super().cleanup()
|
|
623
|
+
await self._transport.cleanup()
|
|
624
|
+
|
|
625
|
+
async def push_app_message(self, message: Any, sender: str):
|
|
626
|
+
"""Push an application message as an urgent transport frame.
|
|
627
|
+
|
|
628
|
+
Args:
|
|
629
|
+
message: The message data to send.
|
|
630
|
+
sender: ID of the message sender.
|
|
631
|
+
"""
|
|
632
|
+
frame = LiveKitTransportMessageUrgentFrame(message=message, participant_id=sender)
|
|
633
|
+
await self.push_frame(frame)
|
|
634
|
+
|
|
635
|
+
async def _audio_in_task_handler(self):
|
|
636
|
+
"""Handle incoming audio frames from participants."""
|
|
637
|
+
logger.info("Audio input task started")
|
|
638
|
+
audio_iterator = self._client.get_next_audio_frame()
|
|
639
|
+
async for audio_data in audio_iterator:
|
|
640
|
+
if audio_data:
|
|
641
|
+
audio_frame_event, participant_id = audio_data
|
|
642
|
+
pipecat_audio_frame = await self._convert_livekit_audio_to_pipecat(
|
|
643
|
+
audio_frame_event
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
# Skip frames with no audio data
|
|
647
|
+
if len(pipecat_audio_frame.audio) == 0:
|
|
648
|
+
continue
|
|
649
|
+
|
|
650
|
+
input_audio_frame = UserAudioRawFrame(
|
|
651
|
+
user_id=participant_id,
|
|
652
|
+
audio=pipecat_audio_frame.audio,
|
|
653
|
+
sample_rate=pipecat_audio_frame.sample_rate,
|
|
654
|
+
num_channels=pipecat_audio_frame.num_channels,
|
|
655
|
+
)
|
|
656
|
+
await self.push_audio_frame(input_audio_frame)
|
|
657
|
+
|
|
658
|
+
async def _convert_livekit_audio_to_pipecat(
|
|
659
|
+
self, audio_frame_event: rtc.AudioFrameEvent
|
|
660
|
+
) -> AudioRawFrame:
|
|
661
|
+
"""Convert LiveKit audio frame to Pipecat audio frame."""
|
|
662
|
+
audio_frame = audio_frame_event.frame
|
|
663
|
+
|
|
664
|
+
audio_data = await self._resampler.resample(
|
|
665
|
+
audio_frame.data.tobytes(), audio_frame.sample_rate, self.sample_rate
|
|
666
|
+
)
|
|
667
|
+
|
|
668
|
+
return AudioRawFrame(
|
|
669
|
+
audio=audio_data,
|
|
670
|
+
sample_rate=self.sample_rate,
|
|
671
|
+
num_channels=audio_frame.num_channels,
|
|
672
|
+
)
|
|
673
|
+
|
|
674
|
+
|
|
675
|
+
class LiveKitOutputTransport(BaseOutputTransport):
|
|
676
|
+
"""Handles outgoing media streams and events to LiveKit rooms.
|
|
677
|
+
|
|
678
|
+
Manages sending audio frames and data messages to LiveKit room participants,
|
|
679
|
+
including audio format conversion for LiveKit compatibility.
|
|
680
|
+
"""
|
|
681
|
+
|
|
682
|
+
def __init__(
|
|
683
|
+
self,
|
|
684
|
+
transport: BaseTransport,
|
|
685
|
+
client: LiveKitTransportClient,
|
|
686
|
+
params: LiveKitParams,
|
|
687
|
+
**kwargs,
|
|
688
|
+
):
|
|
689
|
+
"""Initialize the LiveKit output transport.
|
|
690
|
+
|
|
691
|
+
Args:
|
|
692
|
+
transport: The parent transport instance.
|
|
693
|
+
client: LiveKitTransportClient instance.
|
|
694
|
+
params: Configuration parameters.
|
|
695
|
+
**kwargs: Additional arguments passed to parent class.
|
|
696
|
+
"""
|
|
697
|
+
super().__init__(params, **kwargs)
|
|
698
|
+
self._transport = transport
|
|
699
|
+
self._client = client
|
|
700
|
+
|
|
701
|
+
# Whether we have seen a StartFrame already.
|
|
702
|
+
self._initialized = False
|
|
703
|
+
|
|
704
|
+
async def start(self, frame: StartFrame):
|
|
705
|
+
"""Start the output transport and connect to LiveKit room.
|
|
706
|
+
|
|
707
|
+
Args:
|
|
708
|
+
frame: The start frame containing initialization parameters.
|
|
709
|
+
"""
|
|
710
|
+
await super().start(frame)
|
|
711
|
+
|
|
712
|
+
if self._initialized:
|
|
713
|
+
return
|
|
714
|
+
|
|
715
|
+
self._initialized = True
|
|
716
|
+
|
|
717
|
+
await self._client.start(frame)
|
|
718
|
+
await self._client.connect()
|
|
719
|
+
await self.set_transport_ready(frame)
|
|
720
|
+
logger.info("LiveKitOutputTransport started")
|
|
721
|
+
|
|
722
|
+
async def stop(self, frame: EndFrame):
|
|
723
|
+
"""Stop the output transport and disconnect from LiveKit room.
|
|
724
|
+
|
|
725
|
+
Args:
|
|
726
|
+
frame: The end frame signaling transport shutdown.
|
|
727
|
+
"""
|
|
728
|
+
await super().stop(frame)
|
|
729
|
+
await self._client.disconnect()
|
|
730
|
+
logger.info("LiveKitOutputTransport stopped")
|
|
731
|
+
|
|
732
|
+
async def cancel(self, frame: CancelFrame):
|
|
733
|
+
"""Cancel the output transport and disconnect from LiveKit room.
|
|
734
|
+
|
|
735
|
+
Args:
|
|
736
|
+
frame: The cancel frame signaling immediate cancellation.
|
|
737
|
+
"""
|
|
738
|
+
await super().cancel(frame)
|
|
739
|
+
await self._client.disconnect()
|
|
740
|
+
|
|
741
|
+
async def setup(self, setup: FrameProcessorSetup):
|
|
742
|
+
"""Setup the output transport with shared client setup.
|
|
743
|
+
|
|
744
|
+
Args:
|
|
745
|
+
setup: The frame processor setup configuration.
|
|
746
|
+
"""
|
|
747
|
+
await super().setup(setup)
|
|
748
|
+
await self._client.setup(setup)
|
|
749
|
+
|
|
750
|
+
async def cleanup(self):
|
|
751
|
+
"""Cleanup output transport and shared resources."""
|
|
752
|
+
await super().cleanup()
|
|
753
|
+
await self._transport.cleanup()
|
|
754
|
+
|
|
755
|
+
async def send_message(self, frame: TransportMessageFrame | TransportMessageUrgentFrame):
|
|
756
|
+
"""Send a transport message to participants.
|
|
757
|
+
|
|
758
|
+
Args:
|
|
759
|
+
frame: The transport message frame to send.
|
|
760
|
+
"""
|
|
761
|
+
if isinstance(frame, (LiveKitTransportMessageFrame, LiveKitTransportMessageUrgentFrame)):
|
|
762
|
+
await self._client.send_data(frame.message.encode(), frame.participant_id)
|
|
763
|
+
else:
|
|
764
|
+
await self._client.send_data(frame.message.encode())
|
|
765
|
+
|
|
766
|
+
async def write_audio_frame(self, frame: OutputAudioRawFrame):
|
|
767
|
+
"""Write an audio frame to the LiveKit room.
|
|
768
|
+
|
|
769
|
+
Args:
|
|
770
|
+
frame: The audio frame to write.
|
|
771
|
+
"""
|
|
772
|
+
livekit_audio = self._convert_pipecat_audio_to_livekit(frame.audio)
|
|
773
|
+
await self._client.publish_audio(livekit_audio)
|
|
774
|
+
|
|
775
|
+
def _supports_native_dtmf(self) -> bool:
|
|
776
|
+
"""LiveKit supports native DTMF via telephone events.
|
|
777
|
+
|
|
778
|
+
Returns:
|
|
779
|
+
True, as LiveKit supports native DTMF transmission.
|
|
780
|
+
"""
|
|
781
|
+
return True
|
|
782
|
+
|
|
783
|
+
async def _write_dtmf_native(self, frame: OutputDTMFFrame | OutputDTMFUrgentFrame):
|
|
784
|
+
"""Use LiveKit's native publish_dtmf method for telephone events.
|
|
785
|
+
|
|
786
|
+
Args:
|
|
787
|
+
frame: The DTMF frame to write.
|
|
788
|
+
"""
|
|
789
|
+
await self._client.send_dtmf(frame.button.value)
|
|
790
|
+
|
|
791
|
+
def _convert_pipecat_audio_to_livekit(self, pipecat_audio: bytes) -> rtc.AudioFrame:
|
|
792
|
+
"""Convert Pipecat audio data to LiveKit audio frame."""
|
|
793
|
+
bytes_per_sample = 2 # Assuming 16-bit audio
|
|
794
|
+
total_samples = len(pipecat_audio) // bytes_per_sample
|
|
795
|
+
samples_per_channel = total_samples // self._params.audio_out_channels
|
|
796
|
+
|
|
797
|
+
return rtc.AudioFrame(
|
|
798
|
+
data=pipecat_audio,
|
|
799
|
+
sample_rate=self.sample_rate,
|
|
800
|
+
num_channels=self._params.audio_out_channels,
|
|
801
|
+
samples_per_channel=samples_per_channel,
|
|
802
|
+
)
|
|
803
|
+
|
|
804
|
+
|
|
805
|
+
class LiveKitTransport(BaseTransport):
|
|
806
|
+
"""Transport implementation for LiveKit real-time communication.
|
|
807
|
+
|
|
808
|
+
Provides comprehensive LiveKit integration including audio streaming, data
|
|
809
|
+
messaging, participant management, and room event handling for conversational
|
|
810
|
+
AI applications.
|
|
811
|
+
"""
|
|
812
|
+
|
|
813
|
+
def __init__(
|
|
814
|
+
self,
|
|
815
|
+
url: str,
|
|
816
|
+
token: str,
|
|
817
|
+
room_name: str,
|
|
818
|
+
params: Optional[LiveKitParams] = None,
|
|
819
|
+
input_name: Optional[str] = None,
|
|
820
|
+
output_name: Optional[str] = None,
|
|
821
|
+
):
|
|
822
|
+
"""Initialize the LiveKit transport.
|
|
823
|
+
|
|
824
|
+
Args:
|
|
825
|
+
url: LiveKit server URL to connect to.
|
|
826
|
+
token: Authentication token for the room.
|
|
827
|
+
room_name: Name of the LiveKit room to join.
|
|
828
|
+
params: Configuration parameters for the transport.
|
|
829
|
+
input_name: Optional name for the input transport.
|
|
830
|
+
output_name: Optional name for the output transport.
|
|
831
|
+
"""
|
|
832
|
+
super().__init__(input_name=input_name, output_name=output_name)
|
|
833
|
+
|
|
834
|
+
callbacks = LiveKitCallbacks(
|
|
835
|
+
on_connected=self._on_connected,
|
|
836
|
+
on_disconnected=self._on_disconnected,
|
|
837
|
+
on_participant_connected=self._on_participant_connected,
|
|
838
|
+
on_participant_disconnected=self._on_participant_disconnected,
|
|
839
|
+
on_audio_track_subscribed=self._on_audio_track_subscribed,
|
|
840
|
+
on_audio_track_unsubscribed=self._on_audio_track_unsubscribed,
|
|
841
|
+
on_data_received=self._on_data_received,
|
|
842
|
+
on_first_participant_joined=self._on_first_participant_joined,
|
|
843
|
+
)
|
|
844
|
+
self._params = params or LiveKitParams()
|
|
845
|
+
|
|
846
|
+
self._client = LiveKitTransportClient(
|
|
847
|
+
url, token, room_name, self._params, callbacks, self.name
|
|
848
|
+
)
|
|
849
|
+
self._input: Optional[LiveKitInputTransport] = None
|
|
850
|
+
self._output: Optional[LiveKitOutputTransport] = None
|
|
851
|
+
|
|
852
|
+
self._register_event_handler("on_connected")
|
|
853
|
+
self._register_event_handler("on_disconnected")
|
|
854
|
+
self._register_event_handler("on_participant_connected")
|
|
855
|
+
self._register_event_handler("on_participant_disconnected")
|
|
856
|
+
self._register_event_handler("on_audio_track_subscribed")
|
|
857
|
+
self._register_event_handler("on_audio_track_unsubscribed")
|
|
858
|
+
self._register_event_handler("on_data_received")
|
|
859
|
+
self._register_event_handler("on_first_participant_joined")
|
|
860
|
+
self._register_event_handler("on_participant_left")
|
|
861
|
+
self._register_event_handler("on_call_state_updated")
|
|
862
|
+
|
|
863
|
+
def input(self) -> LiveKitInputTransport:
|
|
864
|
+
"""Get the input transport for receiving media and events.
|
|
865
|
+
|
|
866
|
+
Returns:
|
|
867
|
+
The LiveKit input transport instance.
|
|
868
|
+
"""
|
|
869
|
+
if not self._input:
|
|
870
|
+
self._input = LiveKitInputTransport(
|
|
871
|
+
self, self._client, self._params, name=self._input_name
|
|
872
|
+
)
|
|
873
|
+
return self._input
|
|
874
|
+
|
|
875
|
+
def output(self) -> LiveKitOutputTransport:
|
|
876
|
+
"""Get the output transport for sending media and events.
|
|
877
|
+
|
|
878
|
+
Returns:
|
|
879
|
+
The LiveKit output transport instance.
|
|
880
|
+
"""
|
|
881
|
+
if not self._output:
|
|
882
|
+
self._output = LiveKitOutputTransport(
|
|
883
|
+
self, self._client, self._params, name=self._output_name
|
|
884
|
+
)
|
|
885
|
+
return self._output
|
|
886
|
+
|
|
887
|
+
@property
|
|
888
|
+
def participant_id(self) -> str:
|
|
889
|
+
"""Get the participant ID for this transport.
|
|
890
|
+
|
|
891
|
+
Returns:
|
|
892
|
+
The participant ID assigned by LiveKit.
|
|
893
|
+
"""
|
|
894
|
+
return self._client.participant_id
|
|
895
|
+
|
|
896
|
+
async def send_audio(self, frame: OutputAudioRawFrame):
|
|
897
|
+
"""Send an audio frame to the LiveKit room.
|
|
898
|
+
|
|
899
|
+
Args:
|
|
900
|
+
frame: The audio frame to send.
|
|
901
|
+
"""
|
|
902
|
+
if self._output:
|
|
903
|
+
await self._output.queue_frame(frame, FrameDirection.DOWNSTREAM)
|
|
904
|
+
|
|
905
|
+
def get_participants(self) -> List[str]:
|
|
906
|
+
"""Get list of participant IDs in the room.
|
|
907
|
+
|
|
908
|
+
Returns:
|
|
909
|
+
List of participant IDs.
|
|
910
|
+
"""
|
|
911
|
+
return self._client.get_participants()
|
|
912
|
+
|
|
913
|
+
async def get_participant_metadata(self, participant_id: str) -> dict:
|
|
914
|
+
"""Get metadata for a specific participant.
|
|
915
|
+
|
|
916
|
+
Args:
|
|
917
|
+
participant_id: ID of the participant to get metadata for.
|
|
918
|
+
|
|
919
|
+
Returns:
|
|
920
|
+
Dictionary containing participant metadata.
|
|
921
|
+
"""
|
|
922
|
+
return await self._client.get_participant_metadata(participant_id)
|
|
923
|
+
|
|
924
|
+
async def set_metadata(self, metadata: str):
|
|
925
|
+
"""Set metadata for the local participant.
|
|
926
|
+
|
|
927
|
+
Args:
|
|
928
|
+
metadata: Metadata string to set.
|
|
929
|
+
"""
|
|
930
|
+
await self._client.set_participant_metadata(metadata)
|
|
931
|
+
|
|
932
|
+
async def mute_participant(self, participant_id: str):
|
|
933
|
+
"""Mute a specific participant's audio tracks.
|
|
934
|
+
|
|
935
|
+
Args:
|
|
936
|
+
participant_id: ID of the participant to mute.
|
|
937
|
+
"""
|
|
938
|
+
await self._client.mute_participant(participant_id)
|
|
939
|
+
|
|
940
|
+
async def unmute_participant(self, participant_id: str):
|
|
941
|
+
"""Unmute a specific participant's audio tracks.
|
|
942
|
+
|
|
943
|
+
Args:
|
|
944
|
+
participant_id: ID of the participant to unmute.
|
|
945
|
+
"""
|
|
946
|
+
await self._client.unmute_participant(participant_id)
|
|
947
|
+
|
|
948
|
+
async def _on_connected(self):
|
|
949
|
+
"""Handle room connected events."""
|
|
950
|
+
await self._call_event_handler("on_connected")
|
|
951
|
+
|
|
952
|
+
async def _on_disconnected(self):
|
|
953
|
+
"""Handle room disconnected events."""
|
|
954
|
+
await self._call_event_handler("on_disconnected")
|
|
955
|
+
|
|
956
|
+
async def _on_participant_connected(self, participant_id: str):
|
|
957
|
+
"""Handle participant connected events."""
|
|
958
|
+
await self._call_event_handler("on_participant_connected", participant_id)
|
|
959
|
+
|
|
960
|
+
async def _on_participant_disconnected(self, participant_id: str):
|
|
961
|
+
"""Handle participant disconnected events."""
|
|
962
|
+
await self._call_event_handler("on_participant_disconnected", participant_id)
|
|
963
|
+
await self._call_event_handler("on_participant_left", participant_id, "disconnected")
|
|
964
|
+
|
|
965
|
+
async def _on_audio_track_subscribed(self, participant_id: str):
|
|
966
|
+
"""Handle audio track subscribed events."""
|
|
967
|
+
await self._call_event_handler("on_audio_track_subscribed", participant_id)
|
|
968
|
+
participant = self._client.room.remote_participants.get(participant_id)
|
|
969
|
+
if participant:
|
|
970
|
+
for publication in participant.audio_tracks.values():
|
|
971
|
+
self._client._on_track_subscribed_wrapper(
|
|
972
|
+
publication.track, publication, participant
|
|
973
|
+
)
|
|
974
|
+
|
|
975
|
+
async def _on_audio_track_unsubscribed(self, participant_id: str):
|
|
976
|
+
"""Handle audio track unsubscribed events."""
|
|
977
|
+
await self._call_event_handler("on_audio_track_unsubscribed", participant_id)
|
|
978
|
+
|
|
979
|
+
async def _on_data_received(self, data: bytes, participant_id: str):
|
|
980
|
+
"""Handle data received events."""
|
|
981
|
+
if self._input:
|
|
982
|
+
await self._input.push_app_message(data.decode(), participant_id)
|
|
983
|
+
await self._call_event_handler("on_data_received", data, participant_id)
|
|
984
|
+
|
|
985
|
+
async def send_message(self, message: str, participant_id: Optional[str] = None):
|
|
986
|
+
"""Send a message to participants in the room.
|
|
987
|
+
|
|
988
|
+
Args:
|
|
989
|
+
message: The message string to send.
|
|
990
|
+
participant_id: Optional specific participant to send to.
|
|
991
|
+
"""
|
|
992
|
+
if self._output:
|
|
993
|
+
frame = LiveKitTransportMessageFrame(message=message, participant_id=participant_id)
|
|
994
|
+
await self._output.send_message(frame)
|
|
995
|
+
|
|
996
|
+
async def send_message_urgent(self, message: str, participant_id: Optional[str] = None):
|
|
997
|
+
"""Send an urgent message to participants in the room.
|
|
998
|
+
|
|
999
|
+
Args:
|
|
1000
|
+
message: The urgent message string to send.
|
|
1001
|
+
participant_id: Optional specific participant to send to.
|
|
1002
|
+
"""
|
|
1003
|
+
if self._output:
|
|
1004
|
+
frame = LiveKitTransportMessageUrgentFrame(
|
|
1005
|
+
message=message, participant_id=participant_id
|
|
1006
|
+
)
|
|
1007
|
+
await self._output.send_message(frame)
|
|
1008
|
+
|
|
1009
|
+
async def on_room_event(self, event):
|
|
1010
|
+
"""Handle room events.
|
|
1011
|
+
|
|
1012
|
+
Args:
|
|
1013
|
+
event: The room event to handle.
|
|
1014
|
+
"""
|
|
1015
|
+
# Handle room events
|
|
1016
|
+
pass
|
|
1017
|
+
|
|
1018
|
+
async def on_participant_event(self, event):
|
|
1019
|
+
"""Handle participant events.
|
|
1020
|
+
|
|
1021
|
+
Args:
|
|
1022
|
+
event: The participant event to handle.
|
|
1023
|
+
"""
|
|
1024
|
+
# Handle participant events
|
|
1025
|
+
pass
|
|
1026
|
+
|
|
1027
|
+
async def on_track_event(self, event):
|
|
1028
|
+
"""Handle track events.
|
|
1029
|
+
|
|
1030
|
+
Args:
|
|
1031
|
+
event: The track event to handle.
|
|
1032
|
+
"""
|
|
1033
|
+
# Handle track events
|
|
1034
|
+
pass
|
|
1035
|
+
|
|
1036
|
+
async def _on_call_state_updated(self, state: str):
|
|
1037
|
+
"""Handle call state update events."""
|
|
1038
|
+
await self._call_event_handler("on_call_state_updated", self, state)
|
|
1039
|
+
|
|
1040
|
+
async def _on_first_participant_joined(self, participant_id: str):
|
|
1041
|
+
"""Handle first participant joined events."""
|
|
1042
|
+
await self._call_event_handler("on_first_participant_joined", participant_id)
|