dv-pipecat-ai 0.0.74.dev770__py3-none-any.whl → 0.0.82.dev776__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/METADATA +137 -93
- dv_pipecat_ai-0.0.82.dev776.dist-info/RECORD +340 -0
- pipecat/__init__.py +17 -0
- pipecat/adapters/base_llm_adapter.py +36 -1
- pipecat/adapters/schemas/direct_function.py +296 -0
- pipecat/adapters/schemas/function_schema.py +15 -6
- pipecat/adapters/schemas/tools_schema.py +55 -7
- pipecat/adapters/services/anthropic_adapter.py +22 -3
- pipecat/adapters/services/aws_nova_sonic_adapter.py +23 -3
- pipecat/adapters/services/bedrock_adapter.py +22 -3
- pipecat/adapters/services/gemini_adapter.py +16 -3
- pipecat/adapters/services/open_ai_adapter.py +17 -2
- pipecat/adapters/services/open_ai_realtime_adapter.py +23 -3
- pipecat/audio/filters/base_audio_filter.py +30 -6
- pipecat/audio/filters/koala_filter.py +37 -2
- pipecat/audio/filters/krisp_filter.py +59 -6
- pipecat/audio/filters/noisereduce_filter.py +37 -0
- pipecat/audio/interruptions/base_interruption_strategy.py +25 -5
- pipecat/audio/interruptions/min_words_interruption_strategy.py +21 -4
- pipecat/audio/mixers/base_audio_mixer.py +30 -7
- pipecat/audio/mixers/soundfile_mixer.py +53 -6
- pipecat/audio/resamplers/base_audio_resampler.py +17 -9
- pipecat/audio/resamplers/resampy_resampler.py +26 -1
- pipecat/audio/resamplers/soxr_resampler.py +32 -1
- pipecat/audio/resamplers/soxr_stream_resampler.py +101 -0
- pipecat/audio/utils.py +194 -1
- pipecat/audio/vad/silero.py +60 -3
- pipecat/audio/vad/vad_analyzer.py +114 -30
- pipecat/clocks/base_clock.py +19 -0
- pipecat/clocks/system_clock.py +25 -0
- pipecat/extensions/voicemail/__init__.py +0 -0
- pipecat/extensions/voicemail/voicemail_detector.py +707 -0
- pipecat/frames/frames.py +590 -156
- pipecat/metrics/metrics.py +64 -1
- pipecat/observers/base_observer.py +58 -19
- pipecat/observers/loggers/debug_log_observer.py +56 -64
- pipecat/observers/loggers/llm_log_observer.py +8 -1
- pipecat/observers/loggers/transcription_log_observer.py +19 -7
- pipecat/observers/loggers/user_bot_latency_log_observer.py +32 -5
- pipecat/observers/turn_tracking_observer.py +26 -1
- pipecat/pipeline/base_pipeline.py +5 -7
- pipecat/pipeline/base_task.py +52 -9
- pipecat/pipeline/parallel_pipeline.py +121 -177
- pipecat/pipeline/pipeline.py +129 -20
- pipecat/pipeline/runner.py +50 -1
- pipecat/pipeline/sync_parallel_pipeline.py +132 -32
- pipecat/pipeline/task.py +263 -280
- pipecat/pipeline/task_observer.py +85 -34
- pipecat/pipeline/to_be_updated/merge_pipeline.py +32 -2
- pipecat/processors/aggregators/dtmf_aggregator.py +29 -22
- pipecat/processors/aggregators/gated.py +25 -24
- pipecat/processors/aggregators/gated_openai_llm_context.py +22 -2
- pipecat/processors/aggregators/llm_response.py +398 -89
- pipecat/processors/aggregators/openai_llm_context.py +161 -13
- pipecat/processors/aggregators/sentence.py +25 -14
- pipecat/processors/aggregators/user_response.py +28 -3
- pipecat/processors/aggregators/vision_image_frame.py +24 -14
- pipecat/processors/async_generator.py +28 -0
- pipecat/processors/audio/audio_buffer_processor.py +78 -37
- pipecat/processors/consumer_processor.py +25 -6
- pipecat/processors/filters/frame_filter.py +23 -0
- pipecat/processors/filters/function_filter.py +30 -0
- pipecat/processors/filters/identity_filter.py +17 -2
- pipecat/processors/filters/null_filter.py +24 -1
- pipecat/processors/filters/stt_mute_filter.py +56 -21
- pipecat/processors/filters/wake_check_filter.py +46 -3
- pipecat/processors/filters/wake_notifier_filter.py +21 -3
- pipecat/processors/frame_processor.py +488 -131
- pipecat/processors/frameworks/langchain.py +38 -3
- pipecat/processors/frameworks/rtvi.py +719 -34
- pipecat/processors/gstreamer/pipeline_source.py +41 -0
- pipecat/processors/idle_frame_processor.py +26 -3
- pipecat/processors/logger.py +23 -0
- pipecat/processors/metrics/frame_processor_metrics.py +77 -4
- pipecat/processors/metrics/sentry.py +42 -4
- pipecat/processors/producer_processor.py +34 -14
- pipecat/processors/text_transformer.py +22 -10
- pipecat/processors/transcript_processor.py +48 -29
- pipecat/processors/user_idle_processor.py +31 -21
- pipecat/runner/__init__.py +1 -0
- pipecat/runner/daily.py +132 -0
- pipecat/runner/livekit.py +148 -0
- pipecat/runner/run.py +543 -0
- pipecat/runner/types.py +67 -0
- pipecat/runner/utils.py +515 -0
- pipecat/serializers/base_serializer.py +42 -0
- pipecat/serializers/exotel.py +17 -6
- pipecat/serializers/genesys.py +95 -0
- pipecat/serializers/livekit.py +33 -0
- pipecat/serializers/plivo.py +16 -15
- pipecat/serializers/protobuf.py +37 -1
- pipecat/serializers/telnyx.py +18 -17
- pipecat/serializers/twilio.py +32 -16
- pipecat/services/ai_service.py +5 -3
- pipecat/services/anthropic/llm.py +113 -43
- pipecat/services/assemblyai/models.py +63 -5
- pipecat/services/assemblyai/stt.py +64 -11
- pipecat/services/asyncai/__init__.py +0 -0
- pipecat/services/asyncai/tts.py +501 -0
- pipecat/services/aws/llm.py +185 -111
- pipecat/services/aws/stt.py +217 -23
- pipecat/services/aws/tts.py +118 -52
- pipecat/services/aws/utils.py +101 -5
- pipecat/services/aws_nova_sonic/aws.py +82 -64
- pipecat/services/aws_nova_sonic/context.py +15 -6
- pipecat/services/azure/common.py +10 -2
- pipecat/services/azure/image.py +32 -0
- pipecat/services/azure/llm.py +9 -7
- pipecat/services/azure/stt.py +65 -2
- pipecat/services/azure/tts.py +154 -23
- pipecat/services/cartesia/stt.py +125 -8
- pipecat/services/cartesia/tts.py +102 -38
- pipecat/services/cerebras/llm.py +15 -23
- pipecat/services/deepgram/stt.py +19 -11
- pipecat/services/deepgram/tts.py +36 -0
- pipecat/services/deepseek/llm.py +14 -23
- pipecat/services/elevenlabs/tts.py +330 -64
- pipecat/services/fal/image.py +43 -0
- pipecat/services/fal/stt.py +48 -10
- pipecat/services/fireworks/llm.py +14 -21
- pipecat/services/fish/tts.py +109 -9
- pipecat/services/gemini_multimodal_live/__init__.py +1 -0
- pipecat/services/gemini_multimodal_live/events.py +83 -2
- pipecat/services/gemini_multimodal_live/file_api.py +189 -0
- pipecat/services/gemini_multimodal_live/gemini.py +218 -21
- pipecat/services/gladia/config.py +17 -10
- pipecat/services/gladia/stt.py +82 -36
- pipecat/services/google/frames.py +40 -0
- pipecat/services/google/google.py +2 -0
- pipecat/services/google/image.py +39 -2
- pipecat/services/google/llm.py +176 -58
- pipecat/services/google/llm_openai.py +26 -4
- pipecat/services/google/llm_vertex.py +37 -15
- pipecat/services/google/rtvi.py +41 -0
- pipecat/services/google/stt.py +65 -17
- pipecat/services/google/test-google-chirp.py +45 -0
- pipecat/services/google/tts.py +390 -19
- pipecat/services/grok/llm.py +8 -6
- pipecat/services/groq/llm.py +8 -6
- pipecat/services/groq/stt.py +13 -9
- pipecat/services/groq/tts.py +40 -0
- pipecat/services/hamsa/__init__.py +9 -0
- pipecat/services/hamsa/stt.py +241 -0
- pipecat/services/heygen/__init__.py +5 -0
- pipecat/services/heygen/api.py +281 -0
- pipecat/services/heygen/client.py +620 -0
- pipecat/services/heygen/video.py +338 -0
- pipecat/services/image_service.py +5 -3
- pipecat/services/inworld/__init__.py +1 -0
- pipecat/services/inworld/tts.py +592 -0
- pipecat/services/llm_service.py +127 -45
- pipecat/services/lmnt/tts.py +80 -7
- pipecat/services/mcp_service.py +85 -44
- pipecat/services/mem0/memory.py +42 -13
- pipecat/services/minimax/tts.py +74 -15
- pipecat/services/mistral/__init__.py +0 -0
- pipecat/services/mistral/llm.py +185 -0
- pipecat/services/moondream/vision.py +55 -10
- pipecat/services/neuphonic/tts.py +275 -48
- pipecat/services/nim/llm.py +8 -6
- pipecat/services/ollama/llm.py +27 -7
- pipecat/services/openai/base_llm.py +54 -16
- pipecat/services/openai/image.py +30 -0
- pipecat/services/openai/llm.py +7 -5
- pipecat/services/openai/stt.py +13 -9
- pipecat/services/openai/tts.py +42 -10
- pipecat/services/openai_realtime_beta/azure.py +11 -9
- pipecat/services/openai_realtime_beta/context.py +7 -5
- pipecat/services/openai_realtime_beta/events.py +10 -7
- pipecat/services/openai_realtime_beta/openai.py +37 -18
- pipecat/services/openpipe/llm.py +30 -24
- pipecat/services/openrouter/llm.py +9 -7
- pipecat/services/perplexity/llm.py +15 -19
- pipecat/services/piper/tts.py +26 -12
- pipecat/services/playht/tts.py +227 -65
- pipecat/services/qwen/llm.py +8 -6
- pipecat/services/rime/tts.py +128 -17
- pipecat/services/riva/stt.py +160 -22
- pipecat/services/riva/tts.py +67 -2
- pipecat/services/sambanova/llm.py +19 -17
- pipecat/services/sambanova/stt.py +14 -8
- pipecat/services/sarvam/tts.py +60 -13
- pipecat/services/simli/video.py +82 -21
- pipecat/services/soniox/__init__.py +0 -0
- pipecat/services/soniox/stt.py +398 -0
- pipecat/services/speechmatics/stt.py +29 -17
- pipecat/services/stt_service.py +47 -11
- pipecat/services/tavus/video.py +94 -25
- pipecat/services/together/llm.py +8 -6
- pipecat/services/tts_service.py +77 -53
- pipecat/services/ultravox/stt.py +46 -43
- pipecat/services/vision_service.py +5 -3
- pipecat/services/websocket_service.py +12 -11
- pipecat/services/whisper/base_stt.py +58 -12
- pipecat/services/whisper/stt.py +69 -58
- pipecat/services/xtts/tts.py +59 -2
- pipecat/sync/base_notifier.py +19 -0
- pipecat/sync/event_notifier.py +24 -0
- pipecat/tests/utils.py +73 -5
- pipecat/transcriptions/language.py +24 -0
- pipecat/transports/base_input.py +112 -8
- pipecat/transports/base_output.py +235 -13
- pipecat/transports/base_transport.py +119 -0
- pipecat/transports/local/audio.py +76 -0
- pipecat/transports/local/tk.py +84 -0
- pipecat/transports/network/fastapi_websocket.py +174 -15
- pipecat/transports/network/small_webrtc.py +383 -39
- pipecat/transports/network/webrtc_connection.py +214 -8
- pipecat/transports/network/websocket_client.py +171 -1
- pipecat/transports/network/websocket_server.py +147 -9
- pipecat/transports/services/daily.py +792 -70
- pipecat/transports/services/helpers/daily_rest.py +122 -129
- pipecat/transports/services/livekit.py +339 -4
- pipecat/transports/services/tavus.py +273 -38
- pipecat/utils/asyncio/task_manager.py +92 -186
- pipecat/utils/base_object.py +83 -1
- pipecat/utils/network.py +2 -0
- pipecat/utils/string.py +114 -58
- pipecat/utils/text/base_text_aggregator.py +44 -13
- pipecat/utils/text/base_text_filter.py +46 -0
- pipecat/utils/text/markdown_text_filter.py +70 -14
- pipecat/utils/text/pattern_pair_aggregator.py +18 -14
- pipecat/utils/text/simple_text_aggregator.py +43 -2
- pipecat/utils/text/skip_tags_aggregator.py +21 -13
- pipecat/utils/time.py +36 -0
- pipecat/utils/tracing/class_decorators.py +32 -7
- pipecat/utils/tracing/conversation_context_provider.py +12 -2
- pipecat/utils/tracing/service_attributes.py +80 -64
- pipecat/utils/tracing/service_decorators.py +48 -21
- pipecat/utils/tracing/setup.py +13 -7
- pipecat/utils/tracing/turn_context_provider.py +12 -2
- pipecat/utils/tracing/turn_trace_observer.py +27 -0
- pipecat/utils/utils.py +14 -14
- dv_pipecat_ai-0.0.74.dev770.dist-info/RECORD +0 -319
- pipecat/examples/daily_runner.py +0 -64
- pipecat/examples/run.py +0 -265
- pipecat/utils/asyncio/watchdog_async_iterator.py +0 -72
- pipecat/utils/asyncio/watchdog_event.py +0 -42
- pipecat/utils/asyncio/watchdog_priority_queue.py +0 -48
- pipecat/utils/asyncio/watchdog_queue.py +0 -48
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/top_level.txt +0 -0
- /pipecat/{examples → extensions}/__init__.py +0 -0
|
@@ -4,6 +4,8 @@
|
|
|
4
4
|
# SPDX-License-Identifier: BSD 2-Clause License
|
|
5
5
|
#
|
|
6
6
|
|
|
7
|
+
"""GStreamer pipeline source integration for Pipecat."""
|
|
8
|
+
|
|
7
9
|
import asyncio
|
|
8
10
|
from typing import Optional
|
|
9
11
|
|
|
@@ -36,7 +38,24 @@ except ModuleNotFoundError as e:
|
|
|
36
38
|
|
|
37
39
|
|
|
38
40
|
class GStreamerPipelineSource(FrameProcessor):
|
|
41
|
+
"""A frame processor that uses GStreamer pipelines as media sources.
|
|
42
|
+
|
|
43
|
+
This processor creates and manages GStreamer pipelines to generate audio and video
|
|
44
|
+
output frames. It handles pipeline lifecycle, decoding, format conversion, and
|
|
45
|
+
frame generation with configurable output parameters.
|
|
46
|
+
"""
|
|
47
|
+
|
|
39
48
|
class OutputParams(BaseModel):
|
|
49
|
+
"""Output configuration parameters for GStreamer pipeline.
|
|
50
|
+
|
|
51
|
+
Parameters:
|
|
52
|
+
video_width: Width of output video frames in pixels.
|
|
53
|
+
video_height: Height of output video frames in pixels.
|
|
54
|
+
audio_sample_rate: Sample rate for audio output. If None, uses frame sample rate.
|
|
55
|
+
audio_channels: Number of audio channels for output.
|
|
56
|
+
clock_sync: Whether to synchronize output with pipeline clock.
|
|
57
|
+
"""
|
|
58
|
+
|
|
40
59
|
video_width: int = 1280
|
|
41
60
|
video_height: int = 720
|
|
42
61
|
audio_sample_rate: Optional[int] = None
|
|
@@ -44,6 +63,13 @@ class GStreamerPipelineSource(FrameProcessor):
|
|
|
44
63
|
clock_sync: bool = True
|
|
45
64
|
|
|
46
65
|
def __init__(self, *, pipeline: str, out_params: Optional[OutputParams] = None, **kwargs):
|
|
66
|
+
"""Initialize the GStreamer pipeline source.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
pipeline: GStreamer pipeline description string for the source.
|
|
70
|
+
out_params: Output configuration parameters. If None, uses defaults.
|
|
71
|
+
**kwargs: Additional arguments passed to parent FrameProcessor.
|
|
72
|
+
"""
|
|
47
73
|
super().__init__(**kwargs)
|
|
48
74
|
|
|
49
75
|
self._out_params = out_params or GStreamerPipelineSource.OutputParams()
|
|
@@ -67,6 +93,12 @@ class GStreamerPipelineSource(FrameProcessor):
|
|
|
67
93
|
bus.connect("message", self._on_gstreamer_message)
|
|
68
94
|
|
|
69
95
|
async def process_frame(self, frame: Frame, direction: FrameDirection):
|
|
96
|
+
"""Process incoming frames and manage GStreamer pipeline lifecycle.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
frame: The frame to process.
|
|
100
|
+
direction: The direction of frame processing.
|
|
101
|
+
"""
|
|
70
102
|
await super().process_frame(frame, direction)
|
|
71
103
|
|
|
72
104
|
# Specific system frames
|
|
@@ -92,13 +124,16 @@ class GStreamerPipelineSource(FrameProcessor):
|
|
|
92
124
|
await self.push_frame(frame, direction)
|
|
93
125
|
|
|
94
126
|
async def _start(self, frame: StartFrame):
|
|
127
|
+
"""Start the GStreamer pipeline."""
|
|
95
128
|
self._sample_rate = self._out_params.audio_sample_rate or frame.audio_out_sample_rate
|
|
96
129
|
self._player.set_state(Gst.State.PLAYING)
|
|
97
130
|
|
|
98
131
|
async def _stop(self, frame: EndFrame):
|
|
132
|
+
"""Stop the GStreamer pipeline."""
|
|
99
133
|
self._player.set_state(Gst.State.NULL)
|
|
100
134
|
|
|
101
135
|
async def _cancel(self, frame: CancelFrame):
|
|
136
|
+
"""Cancel the GStreamer pipeline."""
|
|
102
137
|
self._player.set_state(Gst.State.NULL)
|
|
103
138
|
|
|
104
139
|
#
|
|
@@ -106,6 +141,7 @@ class GStreamerPipelineSource(FrameProcessor):
|
|
|
106
141
|
#
|
|
107
142
|
|
|
108
143
|
def _on_gstreamer_message(self, bus: Gst.Bus, message: Gst.Message):
|
|
144
|
+
"""Handle GStreamer bus messages."""
|
|
109
145
|
t = message.type
|
|
110
146
|
if t == Gst.MessageType.ERROR:
|
|
111
147
|
err, debug = message.parse_error()
|
|
@@ -113,6 +149,7 @@ class GStreamerPipelineSource(FrameProcessor):
|
|
|
113
149
|
return True
|
|
114
150
|
|
|
115
151
|
def _decodebin_callback(self, decodebin: Gst.Element, pad: Gst.Pad):
|
|
152
|
+
"""Handle new pads from decodebin element."""
|
|
116
153
|
caps_string = pad.get_current_caps().to_string()
|
|
117
154
|
if caps_string.startswith("audio"):
|
|
118
155
|
self._decodebin_audio(pad)
|
|
@@ -120,6 +157,7 @@ class GStreamerPipelineSource(FrameProcessor):
|
|
|
120
157
|
self._decodebin_video(pad)
|
|
121
158
|
|
|
122
159
|
def _decodebin_audio(self, pad: Gst.Pad):
|
|
160
|
+
"""Set up audio processing pipeline from decoded audio pad."""
|
|
123
161
|
queue_audio = Gst.ElementFactory.make("queue", None)
|
|
124
162
|
audioconvert = Gst.ElementFactory.make("audioconvert", None)
|
|
125
163
|
audioresample = Gst.ElementFactory.make("audioresample", None)
|
|
@@ -153,6 +191,7 @@ class GStreamerPipelineSource(FrameProcessor):
|
|
|
153
191
|
pad.link(queue_pad)
|
|
154
192
|
|
|
155
193
|
def _decodebin_video(self, pad: Gst.Pad):
|
|
194
|
+
"""Set up video processing pipeline from decoded video pad."""
|
|
156
195
|
queue_video = Gst.ElementFactory.make("queue", None)
|
|
157
196
|
videoconvert = Gst.ElementFactory.make("videoconvert", None)
|
|
158
197
|
videoscale = Gst.ElementFactory.make("videoscale", None)
|
|
@@ -187,6 +226,7 @@ class GStreamerPipelineSource(FrameProcessor):
|
|
|
187
226
|
pad.link(queue_pad)
|
|
188
227
|
|
|
189
228
|
def _appsink_audio_new_sample(self, appsink: GstApp.AppSink):
|
|
229
|
+
"""Handle new audio samples from GStreamer appsink."""
|
|
190
230
|
buffer = appsink.pull_sample().get_buffer()
|
|
191
231
|
(_, info) = buffer.map(Gst.MapFlags.READ)
|
|
192
232
|
frame = OutputAudioRawFrame(
|
|
@@ -199,6 +239,7 @@ class GStreamerPipelineSource(FrameProcessor):
|
|
|
199
239
|
return Gst.FlowReturn.OK
|
|
200
240
|
|
|
201
241
|
def _appsink_video_new_sample(self, appsink: GstApp.AppSink):
|
|
242
|
+
"""Handle new video samples from GStreamer appsink."""
|
|
202
243
|
buffer = appsink.pull_sample().get_buffer()
|
|
203
244
|
(_, info) = buffer.map(Gst.MapFlags.READ)
|
|
204
245
|
frame = OutputImageRawFrame(
|
|
@@ -4,6 +4,8 @@
|
|
|
4
4
|
# SPDX-License-Identifier: BSD 2-Clause License
|
|
5
5
|
#
|
|
6
6
|
|
|
7
|
+
"""Idle frame processor for timeout-based callback execution."""
|
|
8
|
+
|
|
7
9
|
import asyncio
|
|
8
10
|
from typing import Awaitable, Callable, List, Optional
|
|
9
11
|
|
|
@@ -12,9 +14,11 @@ from pipecat.processors.frame_processor import FrameDirection, FrameProcessor
|
|
|
12
14
|
|
|
13
15
|
|
|
14
16
|
class IdleFrameProcessor(FrameProcessor):
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
|
|
17
|
+
"""Monitors frame activity and triggers callbacks on timeout.
|
|
18
|
+
|
|
19
|
+
This processor waits to receive any frame or specific frame types within a
|
|
20
|
+
given timeout period. If the timeout is reached before receiving the expected
|
|
21
|
+
frames, the provided callback will be executed.
|
|
18
22
|
"""
|
|
19
23
|
|
|
20
24
|
def __init__(
|
|
@@ -25,6 +29,16 @@ class IdleFrameProcessor(FrameProcessor):
|
|
|
25
29
|
types: Optional[List[type]] = None,
|
|
26
30
|
**kwargs,
|
|
27
31
|
):
|
|
32
|
+
"""Initialize the idle frame processor.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
callback: Async callback function to execute on timeout. Receives
|
|
36
|
+
this processor instance as an argument.
|
|
37
|
+
timeout: Timeout duration in seconds before triggering the callback.
|
|
38
|
+
types: Optional list of frame types to monitor. If None, monitors
|
|
39
|
+
all frames.
|
|
40
|
+
**kwargs: Additional arguments passed to parent class.
|
|
41
|
+
"""
|
|
28
42
|
super().__init__(**kwargs)
|
|
29
43
|
|
|
30
44
|
self._callback = callback
|
|
@@ -33,6 +47,12 @@ class IdleFrameProcessor(FrameProcessor):
|
|
|
33
47
|
self._idle_task = None
|
|
34
48
|
|
|
35
49
|
async def process_frame(self, frame: Frame, direction: FrameDirection):
|
|
50
|
+
"""Process incoming frames and manage idle timeout monitoring.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
frame: The frame to process.
|
|
54
|
+
direction: The direction of frame flow in the pipeline.
|
|
55
|
+
"""
|
|
36
56
|
await super().process_frame(frame, direction)
|
|
37
57
|
|
|
38
58
|
if isinstance(frame, StartFrame):
|
|
@@ -50,15 +70,18 @@ class IdleFrameProcessor(FrameProcessor):
|
|
|
50
70
|
self._idle_event.set()
|
|
51
71
|
|
|
52
72
|
async def cleanup(self):
|
|
73
|
+
"""Clean up resources and cancel pending tasks."""
|
|
53
74
|
if self._idle_task:
|
|
54
75
|
await self.cancel_task(self._idle_task)
|
|
55
76
|
|
|
56
77
|
def _create_idle_task(self):
|
|
78
|
+
"""Create and start the idle monitoring task."""
|
|
57
79
|
if not self._idle_task:
|
|
58
80
|
self._idle_event = asyncio.Event()
|
|
59
81
|
self._idle_task = self.create_task(self._idle_task_handler())
|
|
60
82
|
|
|
61
83
|
async def _idle_task_handler(self):
|
|
84
|
+
"""Handle idle timeout monitoring and callback execution."""
|
|
62
85
|
while True:
|
|
63
86
|
try:
|
|
64
87
|
await asyncio.wait_for(self._idle_event.wait(), timeout=self._timeout)
|
pipecat/processors/logger.py
CHANGED
|
@@ -4,6 +4,8 @@
|
|
|
4
4
|
# SPDX-License-Identifier: BSD 2-Clause License
|
|
5
5
|
#
|
|
6
6
|
|
|
7
|
+
"""Frame logging utilities for debugging and monitoring frame flow in Pipecat pipelines."""
|
|
8
|
+
|
|
7
9
|
from typing import Optional, Tuple, Type
|
|
8
10
|
|
|
9
11
|
from loguru import logger
|
|
@@ -21,6 +23,13 @@ logger = logger.opt(ansi=True)
|
|
|
21
23
|
|
|
22
24
|
|
|
23
25
|
class FrameLogger(FrameProcessor):
|
|
26
|
+
"""A frame processor that logs frame information for debugging purposes.
|
|
27
|
+
|
|
28
|
+
This processor intercepts frames passing through the pipeline and logs
|
|
29
|
+
their details with configurable formatting and filtering. Useful for
|
|
30
|
+
debugging frame flow and understanding pipeline behavior.
|
|
31
|
+
"""
|
|
32
|
+
|
|
24
33
|
def __init__(
|
|
25
34
|
self,
|
|
26
35
|
prefix="Frame",
|
|
@@ -32,12 +41,26 @@ class FrameLogger(FrameProcessor):
|
|
|
32
41
|
TransportMessageFrame,
|
|
33
42
|
),
|
|
34
43
|
):
|
|
44
|
+
"""Initialize the frame logger.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
prefix: Text prefix to add to log messages. Defaults to "Frame".
|
|
48
|
+
color: ANSI color code for log message formatting. If None, no coloring is applied.
|
|
49
|
+
ignored_frame_types: Tuple of frame types to exclude from logging.
|
|
50
|
+
Defaults to common high-frequency frames like audio and speaking frames.
|
|
51
|
+
"""
|
|
35
52
|
super().__init__()
|
|
36
53
|
self._prefix = prefix
|
|
37
54
|
self._color = color
|
|
38
55
|
self._ignored_frame_types = ignored_frame_types
|
|
39
56
|
|
|
40
57
|
async def process_frame(self, frame: Frame, direction: FrameDirection):
|
|
58
|
+
"""Process and log frame information.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
frame: The frame to process and potentially log.
|
|
62
|
+
direction: The direction of frame flow in the pipeline.
|
|
63
|
+
"""
|
|
41
64
|
await super().process_frame(frame, direction)
|
|
42
65
|
|
|
43
66
|
if self._ignored_frame_types and not isinstance(frame, self._ignored_frame_types):
|
|
@@ -4,6 +4,8 @@
|
|
|
4
4
|
# SPDX-License-Identifier: BSD 2-Clause License
|
|
5
5
|
#
|
|
6
6
|
|
|
7
|
+
"""Frame processor metrics collection and reporting."""
|
|
8
|
+
|
|
7
9
|
import time
|
|
8
10
|
from typing import Optional
|
|
9
11
|
|
|
@@ -23,7 +25,20 @@ from pipecat.utils.base_object import BaseObject
|
|
|
23
25
|
|
|
24
26
|
|
|
25
27
|
class FrameProcessorMetrics(BaseObject):
|
|
28
|
+
"""Metrics collection and reporting for frame processors.
|
|
29
|
+
|
|
30
|
+
Provides comprehensive metrics tracking for frame processing operations,
|
|
31
|
+
including timing measurements, resource usage, and performance analytics.
|
|
32
|
+
Supports TTFB tracking, processing duration metrics, and usage statistics
|
|
33
|
+
for LLM and TTS operations.
|
|
34
|
+
"""
|
|
35
|
+
|
|
26
36
|
def __init__(self):
|
|
37
|
+
"""Initialize the frame processor metrics collector.
|
|
38
|
+
|
|
39
|
+
Sets up internal state for tracking various metrics including TTFB,
|
|
40
|
+
processing times, and usage statistics.
|
|
41
|
+
"""
|
|
27
42
|
super().__init__()
|
|
28
43
|
self._task_manager = None
|
|
29
44
|
self._start_ttfb_time = 0
|
|
@@ -33,13 +48,24 @@ class FrameProcessorMetrics(BaseObject):
|
|
|
33
48
|
self._logger = logger
|
|
34
49
|
|
|
35
50
|
async def setup(self, task_manager: BaseTaskManager):
|
|
51
|
+
"""Set up the metrics collector with a task manager.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
task_manager: The task manager for handling async operations.
|
|
55
|
+
"""
|
|
36
56
|
self._task_manager = task_manager
|
|
37
57
|
|
|
38
58
|
async def cleanup(self):
|
|
59
|
+
"""Clean up metrics collection resources."""
|
|
39
60
|
await super().cleanup()
|
|
40
61
|
|
|
41
62
|
@property
|
|
42
63
|
def task_manager(self) -> BaseTaskManager:
|
|
64
|
+
"""Get the associated task manager.
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
The task manager instance for async operations.
|
|
68
|
+
"""
|
|
43
69
|
return self._task_manager
|
|
44
70
|
|
|
45
71
|
@property
|
|
@@ -47,7 +73,7 @@ class FrameProcessorMetrics(BaseObject):
|
|
|
47
73
|
"""Get the current TTFB value in seconds.
|
|
48
74
|
|
|
49
75
|
Returns:
|
|
50
|
-
|
|
76
|
+
The TTFB value in seconds, or None if not measured.
|
|
51
77
|
"""
|
|
52
78
|
if self._last_ttfb_time > 0:
|
|
53
79
|
return self._last_ttfb_time
|
|
@@ -59,9 +85,11 @@ class FrameProcessorMetrics(BaseObject):
|
|
|
59
85
|
return None
|
|
60
86
|
|
|
61
87
|
def _processor_name(self):
|
|
88
|
+
"""Get the processor name from core metrics data."""
|
|
62
89
|
return self._core_metrics_data.processor
|
|
63
90
|
|
|
64
91
|
def _model_name(self):
|
|
92
|
+
"""Get the model name from core metrics data."""
|
|
65
93
|
return self._core_metrics_data.model
|
|
66
94
|
|
|
67
95
|
def set_logger(self, logger_instance):
|
|
@@ -69,18 +97,38 @@ class FrameProcessorMetrics(BaseObject):
|
|
|
69
97
|
self._logger = logger_instance
|
|
70
98
|
|
|
71
99
|
def set_core_metrics_data(self, data: MetricsData):
|
|
100
|
+
"""Set the core metrics data for this collector.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
data: The core metrics data containing processor and model information.
|
|
104
|
+
"""
|
|
72
105
|
self._core_metrics_data = data
|
|
73
106
|
|
|
74
107
|
def set_processor_name(self, name: str):
|
|
108
|
+
"""Set the processor name for metrics reporting.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
name: The name of the processor to use in metrics.
|
|
112
|
+
"""
|
|
75
113
|
self._core_metrics_data = MetricsData(processor=name)
|
|
76
114
|
|
|
77
115
|
async def start_ttfb_metrics(self, report_only_initial_ttfb):
|
|
116
|
+
"""Start measuring time-to-first-byte (TTFB).
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
report_only_initial_ttfb: Whether to report only the first TTFB measurement.
|
|
120
|
+
"""
|
|
78
121
|
if self._should_report_ttfb:
|
|
79
122
|
self._start_ttfb_time = time.time()
|
|
80
123
|
self._last_ttfb_time = 0
|
|
81
124
|
self._should_report_ttfb = not report_only_initial_ttfb
|
|
82
125
|
|
|
83
126
|
async def stop_ttfb_metrics(self):
|
|
127
|
+
"""Stop TTFB measurement and generate metrics frame.
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
MetricsFrame containing TTFB data, or None if not measuring.
|
|
131
|
+
"""
|
|
84
132
|
if self._start_ttfb_time == 0:
|
|
85
133
|
return None
|
|
86
134
|
|
|
@@ -93,9 +141,15 @@ class FrameProcessorMetrics(BaseObject):
|
|
|
93
141
|
return MetricsFrame(data=[ttfb])
|
|
94
142
|
|
|
95
143
|
async def start_processing_metrics(self):
|
|
144
|
+
"""Start measuring processing time."""
|
|
96
145
|
self._start_processing_time = time.time()
|
|
97
146
|
|
|
98
147
|
async def stop_processing_metrics(self):
|
|
148
|
+
"""Stop processing time measurement and generate metrics frame.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
MetricsFrame containing processing duration data, or None if not measuring.
|
|
152
|
+
"""
|
|
99
153
|
if self._start_processing_time == 0:
|
|
100
154
|
return None
|
|
101
155
|
|
|
@@ -108,15 +162,34 @@ class FrameProcessorMetrics(BaseObject):
|
|
|
108
162
|
return MetricsFrame(data=[processing])
|
|
109
163
|
|
|
110
164
|
async def start_llm_usage_metrics(self, tokens: LLMTokenUsage):
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
165
|
+
"""Record LLM token usage metrics.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
tokens: Token usage information including prompt and completion tokens.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
MetricsFrame containing LLM usage data.
|
|
172
|
+
"""
|
|
173
|
+
logstr = f"{self._processor_name()} prompt tokens: {tokens.prompt_tokens}, completion tokens: {tokens.completion_tokens}"
|
|
174
|
+
if tokens.cache_read_input_tokens:
|
|
175
|
+
logstr += f", cache read input tokens: {tokens.cache_read_input_tokens}"
|
|
176
|
+
if tokens.reasoning_tokens:
|
|
177
|
+
logstr += f", reasoning tokens: {tokens.reasoning_tokens}"
|
|
178
|
+
self._logger.debug(logstr)
|
|
114
179
|
value = LLMUsageMetricsData(
|
|
115
180
|
processor=self._processor_name(), model=self._model_name(), value=tokens
|
|
116
181
|
)
|
|
117
182
|
return MetricsFrame(data=[value])
|
|
118
183
|
|
|
119
184
|
async def start_tts_usage_metrics(self, text: str):
|
|
185
|
+
"""Record TTS character usage metrics.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
text: The text being processed by TTS.
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
MetricsFrame containing TTS usage data.
|
|
192
|
+
"""
|
|
120
193
|
characters = TTSUsageMetricsData(
|
|
121
194
|
processor=self._processor_name(), model=self._model_name(), value=len(text)
|
|
122
195
|
)
|
|
@@ -4,12 +4,11 @@
|
|
|
4
4
|
# SPDX-License-Identifier: BSD 2-Clause License
|
|
5
5
|
#
|
|
6
6
|
|
|
7
|
-
|
|
7
|
+
"""Sentry integration for frame processor metrics."""
|
|
8
8
|
|
|
9
9
|
from loguru import logger
|
|
10
10
|
|
|
11
11
|
from pipecat.utils.asyncio.task_manager import BaseTaskManager
|
|
12
|
-
from pipecat.utils.asyncio.watchdog_queue import WatchdogQueue
|
|
13
12
|
|
|
14
13
|
try:
|
|
15
14
|
import sentry_sdk
|
|
@@ -22,7 +21,19 @@ from pipecat.processors.metrics.frame_processor_metrics import FrameProcessorMet
|
|
|
22
21
|
|
|
23
22
|
|
|
24
23
|
class SentryMetrics(FrameProcessorMetrics):
|
|
24
|
+
"""Frame processor metrics integration with Sentry monitoring.
|
|
25
|
+
|
|
26
|
+
Extends FrameProcessorMetrics to send time-to-first-byte (TTFB) and
|
|
27
|
+
processing metrics as Sentry transactions for performance monitoring
|
|
28
|
+
and debugging.
|
|
29
|
+
"""
|
|
30
|
+
|
|
25
31
|
def __init__(self):
|
|
32
|
+
"""Initialize the Sentry metrics collector.
|
|
33
|
+
|
|
34
|
+
Sets up internal state for tracking transactions and verifies
|
|
35
|
+
Sentry SDK initialization status.
|
|
36
|
+
"""
|
|
26
37
|
super().__init__()
|
|
27
38
|
self._ttfb_metrics_tx = None
|
|
28
39
|
self._processing_metrics_tx = None
|
|
@@ -32,23 +43,37 @@ class SentryMetrics(FrameProcessorMetrics):
|
|
|
32
43
|
self._sentry_task = None
|
|
33
44
|
|
|
34
45
|
async def setup(self, task_manager: BaseTaskManager):
|
|
46
|
+
"""Setup the Sentry metrics system.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
task_manager: The task manager to use for background operations.
|
|
50
|
+
"""
|
|
35
51
|
await super().setup(task_manager)
|
|
36
52
|
if self._sentry_available:
|
|
37
|
-
self._sentry_queue =
|
|
53
|
+
self._sentry_queue = asyncio.Queue()
|
|
38
54
|
self._sentry_task = self.task_manager.create_task(
|
|
39
55
|
self._sentry_task_handler(), name=f"{self}::_sentry_task_handler"
|
|
40
56
|
)
|
|
41
57
|
|
|
42
58
|
async def cleanup(self):
|
|
59
|
+
"""Clean up Sentry resources and flush pending transactions.
|
|
60
|
+
|
|
61
|
+
Ensures all pending transactions are sent to Sentry before shutdown.
|
|
62
|
+
"""
|
|
43
63
|
await super().cleanup()
|
|
44
64
|
if self._sentry_task:
|
|
45
65
|
await self._sentry_queue.put(None)
|
|
46
|
-
await self.
|
|
66
|
+
await self._sentry_task
|
|
47
67
|
self._sentry_task = None
|
|
48
68
|
logger.trace(f"{self} Flushing Sentry metrics")
|
|
49
69
|
sentry_sdk.flush(timeout=5.0)
|
|
50
70
|
|
|
51
71
|
async def start_ttfb_metrics(self, report_only_initial_ttfb):
|
|
72
|
+
"""Start tracking time-to-first-byte metrics.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
report_only_initial_ttfb: Whether to report only the initial TTFB measurement.
|
|
76
|
+
"""
|
|
52
77
|
await super().start_ttfb_metrics(report_only_initial_ttfb)
|
|
53
78
|
|
|
54
79
|
if self._should_report_ttfb and self._sentry_available:
|
|
@@ -61,6 +86,10 @@ class SentryMetrics(FrameProcessorMetrics):
|
|
|
61
86
|
)
|
|
62
87
|
|
|
63
88
|
async def stop_ttfb_metrics(self):
|
|
89
|
+
"""Stop tracking time-to-first-byte metrics.
|
|
90
|
+
|
|
91
|
+
Queues the TTFB transaction for completion and transmission to Sentry.
|
|
92
|
+
"""
|
|
64
93
|
await super().stop_ttfb_metrics()
|
|
65
94
|
|
|
66
95
|
if self._sentry_available and self._ttfb_metrics_tx:
|
|
@@ -68,6 +97,10 @@ class SentryMetrics(FrameProcessorMetrics):
|
|
|
68
97
|
self._ttfb_metrics_tx = None
|
|
69
98
|
|
|
70
99
|
async def start_processing_metrics(self):
|
|
100
|
+
"""Start tracking frame processing metrics.
|
|
101
|
+
|
|
102
|
+
Creates a new Sentry transaction to track processing performance.
|
|
103
|
+
"""
|
|
71
104
|
await super().start_processing_metrics()
|
|
72
105
|
|
|
73
106
|
if self._sentry_available:
|
|
@@ -80,6 +113,10 @@ class SentryMetrics(FrameProcessorMetrics):
|
|
|
80
113
|
)
|
|
81
114
|
|
|
82
115
|
async def stop_processing_metrics(self):
|
|
116
|
+
"""Stop tracking frame processing metrics.
|
|
117
|
+
|
|
118
|
+
Queues the processing transaction for completion and transmission to Sentry.
|
|
119
|
+
"""
|
|
83
120
|
await super().stop_processing_metrics()
|
|
84
121
|
|
|
85
122
|
if self._sentry_available and self._processing_metrics_tx:
|
|
@@ -87,6 +124,7 @@ class SentryMetrics(FrameProcessorMetrics):
|
|
|
87
124
|
self._processing_metrics_tx = None
|
|
88
125
|
|
|
89
126
|
async def _sentry_task_handler(self):
|
|
127
|
+
"""Background task handler for completing Sentry transactions."""
|
|
90
128
|
running = True
|
|
91
129
|
while running:
|
|
92
130
|
tx = await self._sentry_queue.get()
|
|
@@ -4,24 +4,34 @@
|
|
|
4
4
|
# SPDX-License-Identifier: BSD 2-Clause License
|
|
5
5
|
#
|
|
6
6
|
|
|
7
|
+
"""Producer processor for frame filtering and distribution."""
|
|
8
|
+
|
|
7
9
|
import asyncio
|
|
8
10
|
from typing import Awaitable, Callable, List
|
|
9
11
|
|
|
10
12
|
from pipecat.frames.frames import Frame
|
|
11
13
|
from pipecat.processors.frame_processor import FrameDirection, FrameProcessor
|
|
12
|
-
from pipecat.utils.asyncio.watchdog_queue import WatchdogQueue
|
|
13
14
|
|
|
14
15
|
|
|
15
16
|
async def identity_transformer(frame: Frame):
|
|
17
|
+
"""Default transformer that returns the frame unchanged.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
frame: The frame to transform.
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
The same frame without modifications.
|
|
24
|
+
"""
|
|
16
25
|
return frame
|
|
17
26
|
|
|
18
27
|
|
|
19
28
|
class ProducerProcessor(FrameProcessor):
|
|
20
|
-
"""
|
|
21
|
-
frames should be sent to consumers based on a user-defined filter. The
|
|
22
|
-
frames can be transformed into a different type of frame before being
|
|
23
|
-
sending them to the consumers. More than one consumer can be added.
|
|
29
|
+
"""A processor that filters frames and distributes them to multiple consumers.
|
|
24
30
|
|
|
31
|
+
This processor receives frames, applies a filter to determine which frames
|
|
32
|
+
should be sent to consumers (ConsumerProcessor), optionally transforms those
|
|
33
|
+
frames, and distributes them to registered consumer queues. It can also pass
|
|
34
|
+
frames through to the next processor in the pipeline.
|
|
25
35
|
"""
|
|
26
36
|
|
|
27
37
|
def __init__(
|
|
@@ -31,6 +41,16 @@ class ProducerProcessor(FrameProcessor):
|
|
|
31
41
|
transformer: Callable[[Frame], Awaitable[Frame]] = identity_transformer,
|
|
32
42
|
passthrough: bool = True,
|
|
33
43
|
):
|
|
44
|
+
"""Initialize the producer processor.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
filter: Async function that determines if a frame should be produced.
|
|
48
|
+
Must return True for frames to be sent to consumers.
|
|
49
|
+
transformer: Async function to transform frames before sending to consumers.
|
|
50
|
+
Defaults to identity_transformer which returns frames unchanged.
|
|
51
|
+
passthrough: Whether to pass frames through to the next processor.
|
|
52
|
+
If True, all frames continue downstream regardless of filter result.
|
|
53
|
+
"""
|
|
34
54
|
super().__init__()
|
|
35
55
|
self._filter = filter
|
|
36
56
|
self._transformer = transformer
|
|
@@ -38,26 +58,25 @@ class ProducerProcessor(FrameProcessor):
|
|
|
38
58
|
self._consumers: List[asyncio.Queue] = []
|
|
39
59
|
|
|
40
60
|
def add_consumer(self):
|
|
41
|
-
"""
|
|
42
|
-
Adds a new consumer and returns its associated queue.
|
|
61
|
+
"""Add a new consumer and return its associated queue.
|
|
43
62
|
|
|
44
63
|
Returns:
|
|
45
64
|
asyncio.Queue: The queue for the newly added consumer.
|
|
46
65
|
"""
|
|
47
|
-
queue =
|
|
66
|
+
queue = asyncio.Queue()
|
|
48
67
|
self._consumers.append(queue)
|
|
49
68
|
return queue
|
|
50
69
|
|
|
51
70
|
async def process_frame(self, frame: Frame, direction: FrameDirection):
|
|
52
|
-
"""
|
|
53
|
-
Processes an incoming frame and determines whether to produce it as a ProducerItem.
|
|
71
|
+
"""Process an incoming frame and determine whether to produce it.
|
|
54
72
|
|
|
55
|
-
If the frame meets the
|
|
56
|
-
If passthrough is enabled, the frame
|
|
73
|
+
If the frame meets the filter criteria, it will be transformed and added
|
|
74
|
+
to all consumer queues. If passthrough is enabled, the original frame
|
|
75
|
+
will also be sent downstream.
|
|
57
76
|
|
|
58
77
|
Args:
|
|
59
|
-
frame
|
|
60
|
-
direction
|
|
78
|
+
frame: The frame to process.
|
|
79
|
+
direction: The direction of the frame flow.
|
|
61
80
|
"""
|
|
62
81
|
await super().process_frame(frame, direction)
|
|
63
82
|
|
|
@@ -69,6 +88,7 @@ class ProducerProcessor(FrameProcessor):
|
|
|
69
88
|
await self.push_frame(frame, direction)
|
|
70
89
|
|
|
71
90
|
async def _produce(self, frame: Frame):
|
|
91
|
+
"""Produce a frame to all consumers."""
|
|
72
92
|
for consumer in self._consumers:
|
|
73
93
|
new_frame = await self._transformer(frame)
|
|
74
94
|
await consumer.put(new_frame)
|
|
@@ -4,29 +4,41 @@
|
|
|
4
4
|
# SPDX-License-Identifier: BSD 2-Clause License
|
|
5
5
|
#
|
|
6
6
|
|
|
7
|
-
|
|
7
|
+
"""Stateless text transformation processor for Pipecat."""
|
|
8
|
+
|
|
9
|
+
from typing import Callable, Coroutine, Union
|
|
8
10
|
|
|
9
11
|
from pipecat.frames.frames import Frame, TextFrame
|
|
10
12
|
from pipecat.processors.frame_processor import FrameDirection, FrameProcessor
|
|
11
13
|
|
|
12
14
|
|
|
13
15
|
class StatelessTextTransformer(FrameProcessor):
|
|
14
|
-
"""
|
|
15
|
-
|
|
16
|
-
>>> async def print_frames(aggregator, frame):
|
|
17
|
-
... async for frame in aggregator.process_frame(frame):
|
|
18
|
-
... print(frame.text)
|
|
16
|
+
"""Processor that applies transformation functions to text frames.
|
|
19
17
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
18
|
+
This processor intercepts TextFrame objects and applies a user-provided
|
|
19
|
+
transformation function to the text content. The function can be either
|
|
20
|
+
synchronous or asynchronous (coroutine).
|
|
23
21
|
"""
|
|
24
22
|
|
|
25
|
-
def __init__(
|
|
23
|
+
def __init__(
|
|
24
|
+
self, transform_fn: Union[Callable[[str], str], Callable[[str], Coroutine[None, None, str]]]
|
|
25
|
+
):
|
|
26
|
+
"""Initialize the text transformer.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
transform_fn: Function to apply to text content. Can be synchronous
|
|
30
|
+
(str -> str) or asynchronous (str -> Coroutine[None, None, str]).
|
|
31
|
+
"""
|
|
26
32
|
super().__init__()
|
|
27
33
|
self._transform_fn = transform_fn
|
|
28
34
|
|
|
29
35
|
async def process_frame(self, frame: Frame, direction: FrameDirection):
|
|
36
|
+
"""Process frames, applying transformation to text frames.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
frame: The frame to process.
|
|
40
|
+
direction: The direction of frame flow in the pipeline.
|
|
41
|
+
"""
|
|
30
42
|
await super().process_frame(frame, direction)
|
|
31
43
|
|
|
32
44
|
if isinstance(frame, TextFrame):
|