dv-pipecat-ai 0.0.74.dev770__py3-none-any.whl → 0.0.82.dev776__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/METADATA +137 -93
- dv_pipecat_ai-0.0.82.dev776.dist-info/RECORD +340 -0
- pipecat/__init__.py +17 -0
- pipecat/adapters/base_llm_adapter.py +36 -1
- pipecat/adapters/schemas/direct_function.py +296 -0
- pipecat/adapters/schemas/function_schema.py +15 -6
- pipecat/adapters/schemas/tools_schema.py +55 -7
- pipecat/adapters/services/anthropic_adapter.py +22 -3
- pipecat/adapters/services/aws_nova_sonic_adapter.py +23 -3
- pipecat/adapters/services/bedrock_adapter.py +22 -3
- pipecat/adapters/services/gemini_adapter.py +16 -3
- pipecat/adapters/services/open_ai_adapter.py +17 -2
- pipecat/adapters/services/open_ai_realtime_adapter.py +23 -3
- pipecat/audio/filters/base_audio_filter.py +30 -6
- pipecat/audio/filters/koala_filter.py +37 -2
- pipecat/audio/filters/krisp_filter.py +59 -6
- pipecat/audio/filters/noisereduce_filter.py +37 -0
- pipecat/audio/interruptions/base_interruption_strategy.py +25 -5
- pipecat/audio/interruptions/min_words_interruption_strategy.py +21 -4
- pipecat/audio/mixers/base_audio_mixer.py +30 -7
- pipecat/audio/mixers/soundfile_mixer.py +53 -6
- pipecat/audio/resamplers/base_audio_resampler.py +17 -9
- pipecat/audio/resamplers/resampy_resampler.py +26 -1
- pipecat/audio/resamplers/soxr_resampler.py +32 -1
- pipecat/audio/resamplers/soxr_stream_resampler.py +101 -0
- pipecat/audio/utils.py +194 -1
- pipecat/audio/vad/silero.py +60 -3
- pipecat/audio/vad/vad_analyzer.py +114 -30
- pipecat/clocks/base_clock.py +19 -0
- pipecat/clocks/system_clock.py +25 -0
- pipecat/extensions/voicemail/__init__.py +0 -0
- pipecat/extensions/voicemail/voicemail_detector.py +707 -0
- pipecat/frames/frames.py +590 -156
- pipecat/metrics/metrics.py +64 -1
- pipecat/observers/base_observer.py +58 -19
- pipecat/observers/loggers/debug_log_observer.py +56 -64
- pipecat/observers/loggers/llm_log_observer.py +8 -1
- pipecat/observers/loggers/transcription_log_observer.py +19 -7
- pipecat/observers/loggers/user_bot_latency_log_observer.py +32 -5
- pipecat/observers/turn_tracking_observer.py +26 -1
- pipecat/pipeline/base_pipeline.py +5 -7
- pipecat/pipeline/base_task.py +52 -9
- pipecat/pipeline/parallel_pipeline.py +121 -177
- pipecat/pipeline/pipeline.py +129 -20
- pipecat/pipeline/runner.py +50 -1
- pipecat/pipeline/sync_parallel_pipeline.py +132 -32
- pipecat/pipeline/task.py +263 -280
- pipecat/pipeline/task_observer.py +85 -34
- pipecat/pipeline/to_be_updated/merge_pipeline.py +32 -2
- pipecat/processors/aggregators/dtmf_aggregator.py +29 -22
- pipecat/processors/aggregators/gated.py +25 -24
- pipecat/processors/aggregators/gated_openai_llm_context.py +22 -2
- pipecat/processors/aggregators/llm_response.py +398 -89
- pipecat/processors/aggregators/openai_llm_context.py +161 -13
- pipecat/processors/aggregators/sentence.py +25 -14
- pipecat/processors/aggregators/user_response.py +28 -3
- pipecat/processors/aggregators/vision_image_frame.py +24 -14
- pipecat/processors/async_generator.py +28 -0
- pipecat/processors/audio/audio_buffer_processor.py +78 -37
- pipecat/processors/consumer_processor.py +25 -6
- pipecat/processors/filters/frame_filter.py +23 -0
- pipecat/processors/filters/function_filter.py +30 -0
- pipecat/processors/filters/identity_filter.py +17 -2
- pipecat/processors/filters/null_filter.py +24 -1
- pipecat/processors/filters/stt_mute_filter.py +56 -21
- pipecat/processors/filters/wake_check_filter.py +46 -3
- pipecat/processors/filters/wake_notifier_filter.py +21 -3
- pipecat/processors/frame_processor.py +488 -131
- pipecat/processors/frameworks/langchain.py +38 -3
- pipecat/processors/frameworks/rtvi.py +719 -34
- pipecat/processors/gstreamer/pipeline_source.py +41 -0
- pipecat/processors/idle_frame_processor.py +26 -3
- pipecat/processors/logger.py +23 -0
- pipecat/processors/metrics/frame_processor_metrics.py +77 -4
- pipecat/processors/metrics/sentry.py +42 -4
- pipecat/processors/producer_processor.py +34 -14
- pipecat/processors/text_transformer.py +22 -10
- pipecat/processors/transcript_processor.py +48 -29
- pipecat/processors/user_idle_processor.py +31 -21
- pipecat/runner/__init__.py +1 -0
- pipecat/runner/daily.py +132 -0
- pipecat/runner/livekit.py +148 -0
- pipecat/runner/run.py +543 -0
- pipecat/runner/types.py +67 -0
- pipecat/runner/utils.py +515 -0
- pipecat/serializers/base_serializer.py +42 -0
- pipecat/serializers/exotel.py +17 -6
- pipecat/serializers/genesys.py +95 -0
- pipecat/serializers/livekit.py +33 -0
- pipecat/serializers/plivo.py +16 -15
- pipecat/serializers/protobuf.py +37 -1
- pipecat/serializers/telnyx.py +18 -17
- pipecat/serializers/twilio.py +32 -16
- pipecat/services/ai_service.py +5 -3
- pipecat/services/anthropic/llm.py +113 -43
- pipecat/services/assemblyai/models.py +63 -5
- pipecat/services/assemblyai/stt.py +64 -11
- pipecat/services/asyncai/__init__.py +0 -0
- pipecat/services/asyncai/tts.py +501 -0
- pipecat/services/aws/llm.py +185 -111
- pipecat/services/aws/stt.py +217 -23
- pipecat/services/aws/tts.py +118 -52
- pipecat/services/aws/utils.py +101 -5
- pipecat/services/aws_nova_sonic/aws.py +82 -64
- pipecat/services/aws_nova_sonic/context.py +15 -6
- pipecat/services/azure/common.py +10 -2
- pipecat/services/azure/image.py +32 -0
- pipecat/services/azure/llm.py +9 -7
- pipecat/services/azure/stt.py +65 -2
- pipecat/services/azure/tts.py +154 -23
- pipecat/services/cartesia/stt.py +125 -8
- pipecat/services/cartesia/tts.py +102 -38
- pipecat/services/cerebras/llm.py +15 -23
- pipecat/services/deepgram/stt.py +19 -11
- pipecat/services/deepgram/tts.py +36 -0
- pipecat/services/deepseek/llm.py +14 -23
- pipecat/services/elevenlabs/tts.py +330 -64
- pipecat/services/fal/image.py +43 -0
- pipecat/services/fal/stt.py +48 -10
- pipecat/services/fireworks/llm.py +14 -21
- pipecat/services/fish/tts.py +109 -9
- pipecat/services/gemini_multimodal_live/__init__.py +1 -0
- pipecat/services/gemini_multimodal_live/events.py +83 -2
- pipecat/services/gemini_multimodal_live/file_api.py +189 -0
- pipecat/services/gemini_multimodal_live/gemini.py +218 -21
- pipecat/services/gladia/config.py +17 -10
- pipecat/services/gladia/stt.py +82 -36
- pipecat/services/google/frames.py +40 -0
- pipecat/services/google/google.py +2 -0
- pipecat/services/google/image.py +39 -2
- pipecat/services/google/llm.py +176 -58
- pipecat/services/google/llm_openai.py +26 -4
- pipecat/services/google/llm_vertex.py +37 -15
- pipecat/services/google/rtvi.py +41 -0
- pipecat/services/google/stt.py +65 -17
- pipecat/services/google/test-google-chirp.py +45 -0
- pipecat/services/google/tts.py +390 -19
- pipecat/services/grok/llm.py +8 -6
- pipecat/services/groq/llm.py +8 -6
- pipecat/services/groq/stt.py +13 -9
- pipecat/services/groq/tts.py +40 -0
- pipecat/services/hamsa/__init__.py +9 -0
- pipecat/services/hamsa/stt.py +241 -0
- pipecat/services/heygen/__init__.py +5 -0
- pipecat/services/heygen/api.py +281 -0
- pipecat/services/heygen/client.py +620 -0
- pipecat/services/heygen/video.py +338 -0
- pipecat/services/image_service.py +5 -3
- pipecat/services/inworld/__init__.py +1 -0
- pipecat/services/inworld/tts.py +592 -0
- pipecat/services/llm_service.py +127 -45
- pipecat/services/lmnt/tts.py +80 -7
- pipecat/services/mcp_service.py +85 -44
- pipecat/services/mem0/memory.py +42 -13
- pipecat/services/minimax/tts.py +74 -15
- pipecat/services/mistral/__init__.py +0 -0
- pipecat/services/mistral/llm.py +185 -0
- pipecat/services/moondream/vision.py +55 -10
- pipecat/services/neuphonic/tts.py +275 -48
- pipecat/services/nim/llm.py +8 -6
- pipecat/services/ollama/llm.py +27 -7
- pipecat/services/openai/base_llm.py +54 -16
- pipecat/services/openai/image.py +30 -0
- pipecat/services/openai/llm.py +7 -5
- pipecat/services/openai/stt.py +13 -9
- pipecat/services/openai/tts.py +42 -10
- pipecat/services/openai_realtime_beta/azure.py +11 -9
- pipecat/services/openai_realtime_beta/context.py +7 -5
- pipecat/services/openai_realtime_beta/events.py +10 -7
- pipecat/services/openai_realtime_beta/openai.py +37 -18
- pipecat/services/openpipe/llm.py +30 -24
- pipecat/services/openrouter/llm.py +9 -7
- pipecat/services/perplexity/llm.py +15 -19
- pipecat/services/piper/tts.py +26 -12
- pipecat/services/playht/tts.py +227 -65
- pipecat/services/qwen/llm.py +8 -6
- pipecat/services/rime/tts.py +128 -17
- pipecat/services/riva/stt.py +160 -22
- pipecat/services/riva/tts.py +67 -2
- pipecat/services/sambanova/llm.py +19 -17
- pipecat/services/sambanova/stt.py +14 -8
- pipecat/services/sarvam/tts.py +60 -13
- pipecat/services/simli/video.py +82 -21
- pipecat/services/soniox/__init__.py +0 -0
- pipecat/services/soniox/stt.py +398 -0
- pipecat/services/speechmatics/stt.py +29 -17
- pipecat/services/stt_service.py +47 -11
- pipecat/services/tavus/video.py +94 -25
- pipecat/services/together/llm.py +8 -6
- pipecat/services/tts_service.py +77 -53
- pipecat/services/ultravox/stt.py +46 -43
- pipecat/services/vision_service.py +5 -3
- pipecat/services/websocket_service.py +12 -11
- pipecat/services/whisper/base_stt.py +58 -12
- pipecat/services/whisper/stt.py +69 -58
- pipecat/services/xtts/tts.py +59 -2
- pipecat/sync/base_notifier.py +19 -0
- pipecat/sync/event_notifier.py +24 -0
- pipecat/tests/utils.py +73 -5
- pipecat/transcriptions/language.py +24 -0
- pipecat/transports/base_input.py +112 -8
- pipecat/transports/base_output.py +235 -13
- pipecat/transports/base_transport.py +119 -0
- pipecat/transports/local/audio.py +76 -0
- pipecat/transports/local/tk.py +84 -0
- pipecat/transports/network/fastapi_websocket.py +174 -15
- pipecat/transports/network/small_webrtc.py +383 -39
- pipecat/transports/network/webrtc_connection.py +214 -8
- pipecat/transports/network/websocket_client.py +171 -1
- pipecat/transports/network/websocket_server.py +147 -9
- pipecat/transports/services/daily.py +792 -70
- pipecat/transports/services/helpers/daily_rest.py +122 -129
- pipecat/transports/services/livekit.py +339 -4
- pipecat/transports/services/tavus.py +273 -38
- pipecat/utils/asyncio/task_manager.py +92 -186
- pipecat/utils/base_object.py +83 -1
- pipecat/utils/network.py +2 -0
- pipecat/utils/string.py +114 -58
- pipecat/utils/text/base_text_aggregator.py +44 -13
- pipecat/utils/text/base_text_filter.py +46 -0
- pipecat/utils/text/markdown_text_filter.py +70 -14
- pipecat/utils/text/pattern_pair_aggregator.py +18 -14
- pipecat/utils/text/simple_text_aggregator.py +43 -2
- pipecat/utils/text/skip_tags_aggregator.py +21 -13
- pipecat/utils/time.py +36 -0
- pipecat/utils/tracing/class_decorators.py +32 -7
- pipecat/utils/tracing/conversation_context_provider.py +12 -2
- pipecat/utils/tracing/service_attributes.py +80 -64
- pipecat/utils/tracing/service_decorators.py +48 -21
- pipecat/utils/tracing/setup.py +13 -7
- pipecat/utils/tracing/turn_context_provider.py +12 -2
- pipecat/utils/tracing/turn_trace_observer.py +27 -0
- pipecat/utils/utils.py +14 -14
- dv_pipecat_ai-0.0.74.dev770.dist-info/RECORD +0 -319
- pipecat/examples/daily_runner.py +0 -64
- pipecat/examples/run.py +0 -265
- pipecat/utils/asyncio/watchdog_async_iterator.py +0 -72
- pipecat/utils/asyncio/watchdog_event.py +0 -42
- pipecat/utils/asyncio/watchdog_priority_queue.py +0 -48
- pipecat/utils/asyncio/watchdog_queue.py +0 -48
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/top_level.txt +0 -0
- /pipecat/{examples → extensions}/__init__.py +0 -0
|
@@ -4,6 +4,13 @@
|
|
|
4
4
|
# SPDX-License-Identifier: BSD 2-Clause License
|
|
5
5
|
#
|
|
6
6
|
|
|
7
|
+
"""Small WebRTC connection implementation for Pipecat.
|
|
8
|
+
|
|
9
|
+
This module provides a WebRTC connection implementation using aiortc,
|
|
10
|
+
with support for audio/video tracks, data channels, and signaling
|
|
11
|
+
for real-time communication applications.
|
|
12
|
+
"""
|
|
13
|
+
|
|
7
14
|
import asyncio
|
|
8
15
|
import json
|
|
9
16
|
import time
|
|
@@ -32,39 +39,90 @@ except ModuleNotFoundError as e:
|
|
|
32
39
|
SIGNALLING_TYPE = "signalling"
|
|
33
40
|
AUDIO_TRANSCEIVER_INDEX = 0
|
|
34
41
|
VIDEO_TRANSCEIVER_INDEX = 1
|
|
42
|
+
SCREEN_VIDEO_TRANSCEIVER_INDEX = 2
|
|
35
43
|
|
|
36
44
|
|
|
37
45
|
class TrackStatusMessage(BaseModel):
|
|
46
|
+
"""Message for updating track enabled/disabled status.
|
|
47
|
+
|
|
48
|
+
Parameters:
|
|
49
|
+
type: Message type identifier.
|
|
50
|
+
receiver_index: Index of the track receiver to update.
|
|
51
|
+
enabled: Whether the track should be enabled or disabled.
|
|
52
|
+
"""
|
|
53
|
+
|
|
38
54
|
type: Literal["trackStatus"]
|
|
39
55
|
receiver_index: int
|
|
40
56
|
enabled: bool
|
|
41
57
|
|
|
42
58
|
|
|
43
59
|
class RenegotiateMessage(BaseModel):
|
|
60
|
+
"""Message requesting WebRTC renegotiation.
|
|
61
|
+
|
|
62
|
+
Parameters:
|
|
63
|
+
type: Message type identifier for renegotiation requests.
|
|
64
|
+
"""
|
|
65
|
+
|
|
44
66
|
type: Literal["renegotiate"] = "renegotiate"
|
|
45
67
|
|
|
46
68
|
|
|
47
69
|
class PeerLeftMessage(BaseModel):
|
|
70
|
+
"""Message indicating a peer has left the connection.
|
|
71
|
+
|
|
72
|
+
Parameters:
|
|
73
|
+
type: Message type identifier for peer departure.
|
|
74
|
+
"""
|
|
75
|
+
|
|
48
76
|
type: Literal["peerLeft"] = "peerLeft"
|
|
49
77
|
|
|
50
78
|
|
|
51
79
|
class SignallingMessage:
|
|
80
|
+
"""Union types for signaling message handling.
|
|
81
|
+
|
|
82
|
+
Parameters:
|
|
83
|
+
Inbound: Types of messages that can be received from peers.
|
|
84
|
+
outbound: Types of messages that can be sent to peers.
|
|
85
|
+
"""
|
|
86
|
+
|
|
52
87
|
Inbound = Union[TrackStatusMessage] # in case we need to add new messages in the future
|
|
53
88
|
outbound = Union[RenegotiateMessage]
|
|
54
89
|
|
|
55
90
|
|
|
56
91
|
class SmallWebRTCTrack:
|
|
92
|
+
"""Wrapper for WebRTC media tracks with enabled/disabled state management.
|
|
93
|
+
|
|
94
|
+
Provides additional functionality on top of aiortc MediaStreamTrack including
|
|
95
|
+
enable/disable control and frame discarding for audio and video streams.
|
|
96
|
+
"""
|
|
97
|
+
|
|
57
98
|
def __init__(self, track: MediaStreamTrack):
|
|
99
|
+
"""Initialize the WebRTC track wrapper.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
track: The underlying MediaStreamTrack to wrap.
|
|
103
|
+
index: The index of the track in the transceiver (0 for mic, 1 for cam, 2 for screen)
|
|
104
|
+
"""
|
|
58
105
|
self._track = track
|
|
59
106
|
self._enabled = True
|
|
60
107
|
|
|
61
108
|
def set_enabled(self, enabled: bool) -> None:
|
|
109
|
+
"""Enable or disable the track.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
enabled: Whether the track should be enabled for receiving frames.
|
|
113
|
+
"""
|
|
62
114
|
self._enabled = enabled
|
|
63
115
|
|
|
64
116
|
def is_enabled(self) -> bool:
|
|
117
|
+
"""Check if the track is currently enabled.
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
True if the track is enabled for receiving frames.
|
|
121
|
+
"""
|
|
65
122
|
return self._enabled
|
|
66
123
|
|
|
67
124
|
async def discard_old_frames(self):
|
|
125
|
+
"""Discard old frames from the track queue to reduce latency."""
|
|
68
126
|
remote_track = self._track
|
|
69
127
|
if isinstance(remote_track, RemoteStreamTrack):
|
|
70
128
|
if not hasattr(remote_track, "_queue") or not isinstance(
|
|
@@ -78,11 +136,24 @@ class SmallWebRTCTrack:
|
|
|
78
136
|
remote_track._queue.task_done()
|
|
79
137
|
|
|
80
138
|
async def recv(self) -> Optional[Frame]:
|
|
81
|
-
|
|
139
|
+
"""Receive the next frame from the track.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
The next frame, except for video tracks, where it returns the frame only if the track is enabled, otherwise, returns None.
|
|
143
|
+
"""
|
|
144
|
+
if not self._enabled and self._track.kind == "video":
|
|
82
145
|
return None
|
|
83
146
|
return await self._track.recv()
|
|
84
147
|
|
|
85
148
|
def __getattr__(self, name):
|
|
149
|
+
"""Forward attribute access to the underlying track.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
name: The attribute name to access.
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
The attribute value from the underlying track.
|
|
156
|
+
"""
|
|
86
157
|
# Forward other attribute/method calls to the underlying track
|
|
87
158
|
return getattr(self._track, name)
|
|
88
159
|
|
|
@@ -92,7 +163,22 @@ IceServer = RTCIceServer
|
|
|
92
163
|
|
|
93
164
|
|
|
94
165
|
class SmallWebRTCConnection(BaseObject):
|
|
166
|
+
"""WebRTC connection implementation using aiortc.
|
|
167
|
+
|
|
168
|
+
Provides WebRTC peer connection functionality including ICE server configuration,
|
|
169
|
+
track management, data channel communication, and connection state handling
|
|
170
|
+
for real-time audio/video communication.
|
|
171
|
+
"""
|
|
172
|
+
|
|
95
173
|
def __init__(self, ice_servers: Optional[Union[List[str], List[IceServer]]] = None):
|
|
174
|
+
"""Initialize the WebRTC connection.
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
ice_servers: List of ICE servers as URLs or IceServer objects.
|
|
178
|
+
|
|
179
|
+
Raises:
|
|
180
|
+
TypeError: If ice_servers contains mixed types or unsupported types.
|
|
181
|
+
"""
|
|
96
182
|
super().__init__()
|
|
97
183
|
if not ice_servers:
|
|
98
184
|
self.ice_servers: List[IceServer] = []
|
|
@@ -107,6 +193,7 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
107
193
|
self._track_getters = {
|
|
108
194
|
AUDIO_TRANSCEIVER_INDEX: self.audio_input_track,
|
|
109
195
|
VIDEO_TRANSCEIVER_INDEX: self.video_input_track,
|
|
196
|
+
SCREEN_VIDEO_TRANSCEIVER_INDEX: self.screen_video_input_track,
|
|
110
197
|
}
|
|
111
198
|
|
|
112
199
|
self._initialize()
|
|
@@ -126,13 +213,24 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
126
213
|
|
|
127
214
|
@property
|
|
128
215
|
def pc(self) -> RTCPeerConnection:
|
|
216
|
+
"""Get the underlying RTCPeerConnection.
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
The aiortc RTCPeerConnection instance.
|
|
220
|
+
"""
|
|
129
221
|
return self._pc
|
|
130
222
|
|
|
131
223
|
@property
|
|
132
224
|
def pc_id(self) -> str:
|
|
225
|
+
"""Get the peer connection identifier.
|
|
226
|
+
|
|
227
|
+
Returns:
|
|
228
|
+
The unique identifier for this peer connection.
|
|
229
|
+
"""
|
|
133
230
|
return self._pc_id
|
|
134
231
|
|
|
135
232
|
def _initialize(self):
|
|
233
|
+
"""Initialize the peer connection and associated components."""
|
|
136
234
|
logger.debug("Initializing new peer connection")
|
|
137
235
|
rtc_config = RTCConfiguration(iceServers=self.ice_servers)
|
|
138
236
|
|
|
@@ -147,6 +245,8 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
147
245
|
self._pending_app_messages = []
|
|
148
246
|
|
|
149
247
|
def _setup_listeners(self):
|
|
248
|
+
"""Set up event listeners for the peer connection."""
|
|
249
|
+
|
|
150
250
|
@self._pc.on("datachannel")
|
|
151
251
|
def on_datachannel(channel):
|
|
152
252
|
self._data_channel = channel
|
|
@@ -208,6 +308,7 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
208
308
|
await self._call_event_handler("track-ended", track)
|
|
209
309
|
|
|
210
310
|
async def _create_answer(self, sdp: str, type: str):
|
|
311
|
+
"""Create an SDP answer for the given offer."""
|
|
211
312
|
offer = RTCSessionDescription(sdp=sdp, type=type)
|
|
212
313
|
await self._pc.setRemoteDescription(offer)
|
|
213
314
|
|
|
@@ -223,9 +324,16 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
223
324
|
self._answer = self._pc.localDescription
|
|
224
325
|
|
|
225
326
|
async def initialize(self, sdp: str, type: str):
|
|
327
|
+
"""Initialize the connection with an SDP offer.
|
|
328
|
+
|
|
329
|
+
Args:
|
|
330
|
+
sdp: The SDP offer string.
|
|
331
|
+
type: The SDP type (usually "offer").
|
|
332
|
+
"""
|
|
226
333
|
await self._create_answer(sdp, type)
|
|
227
334
|
|
|
228
335
|
async def connect(self):
|
|
336
|
+
"""Connect the WebRTC peer connection and handle initial setup."""
|
|
229
337
|
self._connect_invoked = True
|
|
230
338
|
# If we already connected, trigger again the connected event
|
|
231
339
|
if self.is_connected():
|
|
@@ -238,9 +346,23 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
238
346
|
video_input_track = self.video_input_track()
|
|
239
347
|
if video_input_track:
|
|
240
348
|
await self.video_input_track().discard_old_frames()
|
|
241
|
-
self.
|
|
349
|
+
screen_video_input_track = self.screen_video_input_track()
|
|
350
|
+
if screen_video_input_track:
|
|
351
|
+
await self.screen_video_input_track().discard_old_frames()
|
|
352
|
+
if video_input_track or screen_video_input_track:
|
|
353
|
+
# This prevents an issue where sometimes the WebRTC connection can be established
|
|
354
|
+
# before the bot is ready to receive video. When that happens, we can lose a couple
|
|
355
|
+
# of seconds of video before we received a key frame to finally start displaying it.
|
|
356
|
+
self.ask_to_renegotiate()
|
|
242
357
|
|
|
243
358
|
async def renegotiate(self, sdp: str, type: str, restart_pc: bool = False):
|
|
359
|
+
"""Renegotiate the WebRTC connection with new parameters.
|
|
360
|
+
|
|
361
|
+
Args:
|
|
362
|
+
sdp: The new SDP offer string.
|
|
363
|
+
type: The SDP type (usually "offer").
|
|
364
|
+
restart_pc: Whether to restart the peer connection entirely.
|
|
365
|
+
"""
|
|
244
366
|
logger.debug(f"Renegotiating {self._pc_id}")
|
|
245
367
|
|
|
246
368
|
if restart_pc:
|
|
@@ -264,14 +386,24 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
264
386
|
asyncio.create_task(delayed_task())
|
|
265
387
|
|
|
266
388
|
def force_transceivers_to_send_recv(self):
|
|
389
|
+
"""Force all transceivers to bidirectional send/receive mode."""
|
|
267
390
|
for transceiver in self._pc.getTransceivers():
|
|
268
|
-
|
|
391
|
+
# For now, we only support sendrecv for camera audio and video (the first two transceivers)
|
|
392
|
+
if transceiver.mid == "0" or transceiver.mid == "1":
|
|
393
|
+
transceiver.direction = "sendrecv"
|
|
394
|
+
else:
|
|
395
|
+
transceiver.direction = "recvonly"
|
|
269
396
|
# logger.debug(
|
|
270
397
|
# f"Transceiver: {transceiver}, Mid: {transceiver.mid}, Direction: {transceiver.direction}"
|
|
271
398
|
# )
|
|
272
399
|
# logger.debug(f"Sender track: {transceiver.sender.track}")
|
|
273
400
|
|
|
274
401
|
def replace_audio_track(self, track):
|
|
402
|
+
"""Replace the audio track in the first transceiver.
|
|
403
|
+
|
|
404
|
+
Args:
|
|
405
|
+
track: The new audio track to use for sending.
|
|
406
|
+
"""
|
|
275
407
|
logger.debug(f"Replacing audio track {track.kind}")
|
|
276
408
|
# Transceivers always appear in creation-order for both peers
|
|
277
409
|
# For now we are only considering that we are going to have 02 transceivers,
|
|
@@ -283,6 +415,11 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
283
415
|
logger.warning("Audio transceiver not found. Cannot replace audio track.")
|
|
284
416
|
|
|
285
417
|
def replace_video_track(self, track):
|
|
418
|
+
"""Replace the video track in the second transceiver.
|
|
419
|
+
|
|
420
|
+
Args:
|
|
421
|
+
track: The new video track to use for sending.
|
|
422
|
+
"""
|
|
286
423
|
logger.debug(f"Replacing video track {track.kind}")
|
|
287
424
|
# Transceivers always appear in creation-order for both peers
|
|
288
425
|
# For now we are only considering that we are going to have 02 transceivers,
|
|
@@ -293,11 +430,29 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
293
430
|
else:
|
|
294
431
|
logger.warning("Video transceiver not found. Cannot replace video track.")
|
|
295
432
|
|
|
433
|
+
def replace_screen_video_track(self, track):
|
|
434
|
+
"""Replace the screen video track in the second transceiver.
|
|
435
|
+
|
|
436
|
+
Args:
|
|
437
|
+
track: The new screen video track to use for sending.
|
|
438
|
+
"""
|
|
439
|
+
logger.debug(f"Replacing screen video track {track.kind}")
|
|
440
|
+
# Transceivers always appear in creation-order for both peers
|
|
441
|
+
# For now we are only considering that we are going to have 02 transceivers,
|
|
442
|
+
# one for audio and one for video
|
|
443
|
+
transceivers = self._pc.getTransceivers()
|
|
444
|
+
if len(transceivers) > 2 and transceivers[2].sender:
|
|
445
|
+
transceivers[2].sender.replaceTrack(track)
|
|
446
|
+
else:
|
|
447
|
+
logger.warning("Screen video transceiver not found. Cannot replace screen video track.")
|
|
448
|
+
|
|
296
449
|
async def disconnect(self):
|
|
450
|
+
"""Disconnect from the WebRTC peer connection."""
|
|
297
451
|
self.send_app_message({"type": SIGNALLING_TYPE, "message": PeerLeftMessage().model_dump()})
|
|
298
452
|
await self._close()
|
|
299
453
|
|
|
300
454
|
async def _close(self):
|
|
455
|
+
"""Close the peer connection and cleanup resources."""
|
|
301
456
|
if self._pc:
|
|
302
457
|
await self._pc.close()
|
|
303
458
|
self._message_queue.clear()
|
|
@@ -305,6 +460,12 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
305
460
|
self._track_map = {}
|
|
306
461
|
|
|
307
462
|
def get_answer(self):
|
|
463
|
+
"""Get the SDP answer for the current connection.
|
|
464
|
+
|
|
465
|
+
Returns:
|
|
466
|
+
Dictionary containing SDP answer, type, and peer connection ID,
|
|
467
|
+
or None if no answer is available.
|
|
468
|
+
"""
|
|
308
469
|
if not self._answer:
|
|
309
470
|
return None
|
|
310
471
|
|
|
@@ -315,6 +476,7 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
315
476
|
}
|
|
316
477
|
|
|
317
478
|
async def _handle_new_connection_state(self):
|
|
479
|
+
"""Handle changes in the peer connection state."""
|
|
318
480
|
state = self._pc.connectionState
|
|
319
481
|
if state == "connected" and not self._connect_invoked:
|
|
320
482
|
# We are going to wait until the pipeline is ready before triggering the event
|
|
@@ -328,7 +490,12 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
328
490
|
# Despite the fact that aiortc provides this listener, they don't have a status for "disconnected"
|
|
329
491
|
# So, there is no advantage in looking at self._pc.connectionState
|
|
330
492
|
# That is why we are trying to keep our own state
|
|
331
|
-
def is_connected(self):
|
|
493
|
+
def is_connected(self) -> bool:
|
|
494
|
+
"""Check if the WebRTC connection is currently active.
|
|
495
|
+
|
|
496
|
+
Returns:
|
|
497
|
+
True if the connection is active and receiving data.
|
|
498
|
+
"""
|
|
332
499
|
# If the small webrtc transport has never invoked to connect
|
|
333
500
|
# we are acting like if we are not connected
|
|
334
501
|
if not self._connect_invoked:
|
|
@@ -342,12 +509,17 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
342
509
|
return (time.time() - self._last_received_time) < 3
|
|
343
510
|
|
|
344
511
|
def audio_input_track(self):
|
|
512
|
+
"""Get the audio input track wrapper.
|
|
513
|
+
|
|
514
|
+
Returns:
|
|
515
|
+
SmallWebRTCTrack wrapper for the audio track, or None if unavailable.
|
|
516
|
+
"""
|
|
345
517
|
if self._track_map.get(AUDIO_TRANSCEIVER_INDEX):
|
|
346
518
|
return self._track_map[AUDIO_TRANSCEIVER_INDEX]
|
|
347
519
|
|
|
348
520
|
# Transceivers always appear in creation-order for both peers
|
|
349
|
-
# For
|
|
350
|
-
#
|
|
521
|
+
# For support 3 receivers in the following order:
|
|
522
|
+
# audio, video, screenVideo
|
|
351
523
|
transceivers = self._pc.getTransceivers()
|
|
352
524
|
if len(transceivers) == 0 or not transceivers[AUDIO_TRANSCEIVER_INDEX].receiver:
|
|
353
525
|
logger.warning("No audio transceiver is available")
|
|
@@ -359,12 +531,17 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
359
531
|
return audio_track
|
|
360
532
|
|
|
361
533
|
def video_input_track(self):
|
|
534
|
+
"""Get the video input track wrapper.
|
|
535
|
+
|
|
536
|
+
Returns:
|
|
537
|
+
SmallWebRTCTrack wrapper for the video track, or None if unavailable.
|
|
538
|
+
"""
|
|
362
539
|
if self._track_map.get(VIDEO_TRANSCEIVER_INDEX):
|
|
363
540
|
return self._track_map[VIDEO_TRANSCEIVER_INDEX]
|
|
364
541
|
|
|
365
542
|
# Transceivers always appear in creation-order for both peers
|
|
366
|
-
# For
|
|
367
|
-
#
|
|
543
|
+
# For support 3 receivers in the following order:
|
|
544
|
+
# audio, video, screenVideo
|
|
368
545
|
transceivers = self._pc.getTransceivers()
|
|
369
546
|
if len(transceivers) <= 1 or not transceivers[VIDEO_TRANSCEIVER_INDEX].receiver:
|
|
370
547
|
logger.warning("No video transceiver is available")
|
|
@@ -375,7 +552,34 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
375
552
|
self._track_map[VIDEO_TRANSCEIVER_INDEX] = video_track
|
|
376
553
|
return video_track
|
|
377
554
|
|
|
555
|
+
def screen_video_input_track(self):
|
|
556
|
+
"""Get the screen video input track wrapper.
|
|
557
|
+
|
|
558
|
+
Returns:
|
|
559
|
+
SmallWebRTCTrack wrapper for the screen video track, or None if unavailable.
|
|
560
|
+
"""
|
|
561
|
+
if self._track_map.get(SCREEN_VIDEO_TRANSCEIVER_INDEX):
|
|
562
|
+
return self._track_map[SCREEN_VIDEO_TRANSCEIVER_INDEX]
|
|
563
|
+
|
|
564
|
+
# Transceivers always appear in creation-order for both peers
|
|
565
|
+
# For support 3 receivers in the following order:
|
|
566
|
+
# audio, video, screenVideo
|
|
567
|
+
transceivers = self._pc.getTransceivers()
|
|
568
|
+
if len(transceivers) <= 2 or not transceivers[SCREEN_VIDEO_TRANSCEIVER_INDEX].receiver:
|
|
569
|
+
logger.warning("No screen video transceiver is available")
|
|
570
|
+
return None
|
|
571
|
+
|
|
572
|
+
track = transceivers[SCREEN_VIDEO_TRANSCEIVER_INDEX].receiver.track
|
|
573
|
+
video_track = SmallWebRTCTrack(track) if track else None
|
|
574
|
+
self._track_map[SCREEN_VIDEO_TRANSCEIVER_INDEX] = video_track
|
|
575
|
+
return video_track
|
|
576
|
+
|
|
378
577
|
def send_app_message(self, message: Any):
|
|
578
|
+
"""Send an application message through the data channel.
|
|
579
|
+
|
|
580
|
+
Args:
|
|
581
|
+
message: The message to send (will be JSON serialized).
|
|
582
|
+
"""
|
|
379
583
|
json_message = json.dumps(message)
|
|
380
584
|
if self._data_channel and self._data_channel.readyState == "open":
|
|
381
585
|
self._data_channel.send(json_message)
|
|
@@ -384,6 +588,7 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
384
588
|
self._message_queue.append(json_message)
|
|
385
589
|
|
|
386
590
|
def ask_to_renegotiate(self):
|
|
591
|
+
"""Request renegotiation of the WebRTC connection."""
|
|
387
592
|
if self._renegotiation_in_progress:
|
|
388
593
|
return
|
|
389
594
|
|
|
@@ -393,6 +598,7 @@ class SmallWebRTCConnection(BaseObject):
|
|
|
393
598
|
)
|
|
394
599
|
|
|
395
600
|
def _handle_signalling_message(self, message):
|
|
601
|
+
"""Handle incoming signaling messages."""
|
|
396
602
|
logger.debug(f"Signalling message received: {message}")
|
|
397
603
|
inbound_adapter = TypeAdapter(SignallingMessage.Inbound)
|
|
398
604
|
signalling_message = inbound_adapter.validate_python(message)
|