dv-pipecat-ai 0.0.74.dev770__py3-none-any.whl → 0.0.82.dev776__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/METADATA +137 -93
- dv_pipecat_ai-0.0.82.dev776.dist-info/RECORD +340 -0
- pipecat/__init__.py +17 -0
- pipecat/adapters/base_llm_adapter.py +36 -1
- pipecat/adapters/schemas/direct_function.py +296 -0
- pipecat/adapters/schemas/function_schema.py +15 -6
- pipecat/adapters/schemas/tools_schema.py +55 -7
- pipecat/adapters/services/anthropic_adapter.py +22 -3
- pipecat/adapters/services/aws_nova_sonic_adapter.py +23 -3
- pipecat/adapters/services/bedrock_adapter.py +22 -3
- pipecat/adapters/services/gemini_adapter.py +16 -3
- pipecat/adapters/services/open_ai_adapter.py +17 -2
- pipecat/adapters/services/open_ai_realtime_adapter.py +23 -3
- pipecat/audio/filters/base_audio_filter.py +30 -6
- pipecat/audio/filters/koala_filter.py +37 -2
- pipecat/audio/filters/krisp_filter.py +59 -6
- pipecat/audio/filters/noisereduce_filter.py +37 -0
- pipecat/audio/interruptions/base_interruption_strategy.py +25 -5
- pipecat/audio/interruptions/min_words_interruption_strategy.py +21 -4
- pipecat/audio/mixers/base_audio_mixer.py +30 -7
- pipecat/audio/mixers/soundfile_mixer.py +53 -6
- pipecat/audio/resamplers/base_audio_resampler.py +17 -9
- pipecat/audio/resamplers/resampy_resampler.py +26 -1
- pipecat/audio/resamplers/soxr_resampler.py +32 -1
- pipecat/audio/resamplers/soxr_stream_resampler.py +101 -0
- pipecat/audio/utils.py +194 -1
- pipecat/audio/vad/silero.py +60 -3
- pipecat/audio/vad/vad_analyzer.py +114 -30
- pipecat/clocks/base_clock.py +19 -0
- pipecat/clocks/system_clock.py +25 -0
- pipecat/extensions/voicemail/__init__.py +0 -0
- pipecat/extensions/voicemail/voicemail_detector.py +707 -0
- pipecat/frames/frames.py +590 -156
- pipecat/metrics/metrics.py +64 -1
- pipecat/observers/base_observer.py +58 -19
- pipecat/observers/loggers/debug_log_observer.py +56 -64
- pipecat/observers/loggers/llm_log_observer.py +8 -1
- pipecat/observers/loggers/transcription_log_observer.py +19 -7
- pipecat/observers/loggers/user_bot_latency_log_observer.py +32 -5
- pipecat/observers/turn_tracking_observer.py +26 -1
- pipecat/pipeline/base_pipeline.py +5 -7
- pipecat/pipeline/base_task.py +52 -9
- pipecat/pipeline/parallel_pipeline.py +121 -177
- pipecat/pipeline/pipeline.py +129 -20
- pipecat/pipeline/runner.py +50 -1
- pipecat/pipeline/sync_parallel_pipeline.py +132 -32
- pipecat/pipeline/task.py +263 -280
- pipecat/pipeline/task_observer.py +85 -34
- pipecat/pipeline/to_be_updated/merge_pipeline.py +32 -2
- pipecat/processors/aggregators/dtmf_aggregator.py +29 -22
- pipecat/processors/aggregators/gated.py +25 -24
- pipecat/processors/aggregators/gated_openai_llm_context.py +22 -2
- pipecat/processors/aggregators/llm_response.py +398 -89
- pipecat/processors/aggregators/openai_llm_context.py +161 -13
- pipecat/processors/aggregators/sentence.py +25 -14
- pipecat/processors/aggregators/user_response.py +28 -3
- pipecat/processors/aggregators/vision_image_frame.py +24 -14
- pipecat/processors/async_generator.py +28 -0
- pipecat/processors/audio/audio_buffer_processor.py +78 -37
- pipecat/processors/consumer_processor.py +25 -6
- pipecat/processors/filters/frame_filter.py +23 -0
- pipecat/processors/filters/function_filter.py +30 -0
- pipecat/processors/filters/identity_filter.py +17 -2
- pipecat/processors/filters/null_filter.py +24 -1
- pipecat/processors/filters/stt_mute_filter.py +56 -21
- pipecat/processors/filters/wake_check_filter.py +46 -3
- pipecat/processors/filters/wake_notifier_filter.py +21 -3
- pipecat/processors/frame_processor.py +488 -131
- pipecat/processors/frameworks/langchain.py +38 -3
- pipecat/processors/frameworks/rtvi.py +719 -34
- pipecat/processors/gstreamer/pipeline_source.py +41 -0
- pipecat/processors/idle_frame_processor.py +26 -3
- pipecat/processors/logger.py +23 -0
- pipecat/processors/metrics/frame_processor_metrics.py +77 -4
- pipecat/processors/metrics/sentry.py +42 -4
- pipecat/processors/producer_processor.py +34 -14
- pipecat/processors/text_transformer.py +22 -10
- pipecat/processors/transcript_processor.py +48 -29
- pipecat/processors/user_idle_processor.py +31 -21
- pipecat/runner/__init__.py +1 -0
- pipecat/runner/daily.py +132 -0
- pipecat/runner/livekit.py +148 -0
- pipecat/runner/run.py +543 -0
- pipecat/runner/types.py +67 -0
- pipecat/runner/utils.py +515 -0
- pipecat/serializers/base_serializer.py +42 -0
- pipecat/serializers/exotel.py +17 -6
- pipecat/serializers/genesys.py +95 -0
- pipecat/serializers/livekit.py +33 -0
- pipecat/serializers/plivo.py +16 -15
- pipecat/serializers/protobuf.py +37 -1
- pipecat/serializers/telnyx.py +18 -17
- pipecat/serializers/twilio.py +32 -16
- pipecat/services/ai_service.py +5 -3
- pipecat/services/anthropic/llm.py +113 -43
- pipecat/services/assemblyai/models.py +63 -5
- pipecat/services/assemblyai/stt.py +64 -11
- pipecat/services/asyncai/__init__.py +0 -0
- pipecat/services/asyncai/tts.py +501 -0
- pipecat/services/aws/llm.py +185 -111
- pipecat/services/aws/stt.py +217 -23
- pipecat/services/aws/tts.py +118 -52
- pipecat/services/aws/utils.py +101 -5
- pipecat/services/aws_nova_sonic/aws.py +82 -64
- pipecat/services/aws_nova_sonic/context.py +15 -6
- pipecat/services/azure/common.py +10 -2
- pipecat/services/azure/image.py +32 -0
- pipecat/services/azure/llm.py +9 -7
- pipecat/services/azure/stt.py +65 -2
- pipecat/services/azure/tts.py +154 -23
- pipecat/services/cartesia/stt.py +125 -8
- pipecat/services/cartesia/tts.py +102 -38
- pipecat/services/cerebras/llm.py +15 -23
- pipecat/services/deepgram/stt.py +19 -11
- pipecat/services/deepgram/tts.py +36 -0
- pipecat/services/deepseek/llm.py +14 -23
- pipecat/services/elevenlabs/tts.py +330 -64
- pipecat/services/fal/image.py +43 -0
- pipecat/services/fal/stt.py +48 -10
- pipecat/services/fireworks/llm.py +14 -21
- pipecat/services/fish/tts.py +109 -9
- pipecat/services/gemini_multimodal_live/__init__.py +1 -0
- pipecat/services/gemini_multimodal_live/events.py +83 -2
- pipecat/services/gemini_multimodal_live/file_api.py +189 -0
- pipecat/services/gemini_multimodal_live/gemini.py +218 -21
- pipecat/services/gladia/config.py +17 -10
- pipecat/services/gladia/stt.py +82 -36
- pipecat/services/google/frames.py +40 -0
- pipecat/services/google/google.py +2 -0
- pipecat/services/google/image.py +39 -2
- pipecat/services/google/llm.py +176 -58
- pipecat/services/google/llm_openai.py +26 -4
- pipecat/services/google/llm_vertex.py +37 -15
- pipecat/services/google/rtvi.py +41 -0
- pipecat/services/google/stt.py +65 -17
- pipecat/services/google/test-google-chirp.py +45 -0
- pipecat/services/google/tts.py +390 -19
- pipecat/services/grok/llm.py +8 -6
- pipecat/services/groq/llm.py +8 -6
- pipecat/services/groq/stt.py +13 -9
- pipecat/services/groq/tts.py +40 -0
- pipecat/services/hamsa/__init__.py +9 -0
- pipecat/services/hamsa/stt.py +241 -0
- pipecat/services/heygen/__init__.py +5 -0
- pipecat/services/heygen/api.py +281 -0
- pipecat/services/heygen/client.py +620 -0
- pipecat/services/heygen/video.py +338 -0
- pipecat/services/image_service.py +5 -3
- pipecat/services/inworld/__init__.py +1 -0
- pipecat/services/inworld/tts.py +592 -0
- pipecat/services/llm_service.py +127 -45
- pipecat/services/lmnt/tts.py +80 -7
- pipecat/services/mcp_service.py +85 -44
- pipecat/services/mem0/memory.py +42 -13
- pipecat/services/minimax/tts.py +74 -15
- pipecat/services/mistral/__init__.py +0 -0
- pipecat/services/mistral/llm.py +185 -0
- pipecat/services/moondream/vision.py +55 -10
- pipecat/services/neuphonic/tts.py +275 -48
- pipecat/services/nim/llm.py +8 -6
- pipecat/services/ollama/llm.py +27 -7
- pipecat/services/openai/base_llm.py +54 -16
- pipecat/services/openai/image.py +30 -0
- pipecat/services/openai/llm.py +7 -5
- pipecat/services/openai/stt.py +13 -9
- pipecat/services/openai/tts.py +42 -10
- pipecat/services/openai_realtime_beta/azure.py +11 -9
- pipecat/services/openai_realtime_beta/context.py +7 -5
- pipecat/services/openai_realtime_beta/events.py +10 -7
- pipecat/services/openai_realtime_beta/openai.py +37 -18
- pipecat/services/openpipe/llm.py +30 -24
- pipecat/services/openrouter/llm.py +9 -7
- pipecat/services/perplexity/llm.py +15 -19
- pipecat/services/piper/tts.py +26 -12
- pipecat/services/playht/tts.py +227 -65
- pipecat/services/qwen/llm.py +8 -6
- pipecat/services/rime/tts.py +128 -17
- pipecat/services/riva/stt.py +160 -22
- pipecat/services/riva/tts.py +67 -2
- pipecat/services/sambanova/llm.py +19 -17
- pipecat/services/sambanova/stt.py +14 -8
- pipecat/services/sarvam/tts.py +60 -13
- pipecat/services/simli/video.py +82 -21
- pipecat/services/soniox/__init__.py +0 -0
- pipecat/services/soniox/stt.py +398 -0
- pipecat/services/speechmatics/stt.py +29 -17
- pipecat/services/stt_service.py +47 -11
- pipecat/services/tavus/video.py +94 -25
- pipecat/services/together/llm.py +8 -6
- pipecat/services/tts_service.py +77 -53
- pipecat/services/ultravox/stt.py +46 -43
- pipecat/services/vision_service.py +5 -3
- pipecat/services/websocket_service.py +12 -11
- pipecat/services/whisper/base_stt.py +58 -12
- pipecat/services/whisper/stt.py +69 -58
- pipecat/services/xtts/tts.py +59 -2
- pipecat/sync/base_notifier.py +19 -0
- pipecat/sync/event_notifier.py +24 -0
- pipecat/tests/utils.py +73 -5
- pipecat/transcriptions/language.py +24 -0
- pipecat/transports/base_input.py +112 -8
- pipecat/transports/base_output.py +235 -13
- pipecat/transports/base_transport.py +119 -0
- pipecat/transports/local/audio.py +76 -0
- pipecat/transports/local/tk.py +84 -0
- pipecat/transports/network/fastapi_websocket.py +174 -15
- pipecat/transports/network/small_webrtc.py +383 -39
- pipecat/transports/network/webrtc_connection.py +214 -8
- pipecat/transports/network/websocket_client.py +171 -1
- pipecat/transports/network/websocket_server.py +147 -9
- pipecat/transports/services/daily.py +792 -70
- pipecat/transports/services/helpers/daily_rest.py +122 -129
- pipecat/transports/services/livekit.py +339 -4
- pipecat/transports/services/tavus.py +273 -38
- pipecat/utils/asyncio/task_manager.py +92 -186
- pipecat/utils/base_object.py +83 -1
- pipecat/utils/network.py +2 -0
- pipecat/utils/string.py +114 -58
- pipecat/utils/text/base_text_aggregator.py +44 -13
- pipecat/utils/text/base_text_filter.py +46 -0
- pipecat/utils/text/markdown_text_filter.py +70 -14
- pipecat/utils/text/pattern_pair_aggregator.py +18 -14
- pipecat/utils/text/simple_text_aggregator.py +43 -2
- pipecat/utils/text/skip_tags_aggregator.py +21 -13
- pipecat/utils/time.py +36 -0
- pipecat/utils/tracing/class_decorators.py +32 -7
- pipecat/utils/tracing/conversation_context_provider.py +12 -2
- pipecat/utils/tracing/service_attributes.py +80 -64
- pipecat/utils/tracing/service_decorators.py +48 -21
- pipecat/utils/tracing/setup.py +13 -7
- pipecat/utils/tracing/turn_context_provider.py +12 -2
- pipecat/utils/tracing/turn_trace_observer.py +27 -0
- pipecat/utils/utils.py +14 -14
- dv_pipecat_ai-0.0.74.dev770.dist-info/RECORD +0 -319
- pipecat/examples/daily_runner.py +0 -64
- pipecat/examples/run.py +0 -265
- pipecat/utils/asyncio/watchdog_async_iterator.py +0 -72
- pipecat/utils/asyncio/watchdog_event.py +0 -42
- pipecat/utils/asyncio/watchdog_priority_queue.py +0 -48
- pipecat/utils/asyncio/watchdog_queue.py +0 -48
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/top_level.txt +0 -0
- /pipecat/{examples → extensions}/__init__.py +0 -0
pipecat/pipeline/task.py
CHANGED
|
@@ -4,6 +4,13 @@
|
|
|
4
4
|
# SPDX-License-Identifier: BSD 2-Clause License
|
|
5
5
|
#
|
|
6
6
|
|
|
7
|
+
"""Pipeline task implementation for managing frame processing pipelines.
|
|
8
|
+
|
|
9
|
+
This module provides the main PipelineTask class that orchestrates pipeline
|
|
10
|
+
execution, frame routing, lifecycle management, and monitoring capabilities
|
|
11
|
+
including heartbeats, idle detection, and observer integration.
|
|
12
|
+
"""
|
|
13
|
+
|
|
7
14
|
import asyncio
|
|
8
15
|
import time
|
|
9
16
|
from collections import deque
|
|
@@ -25,26 +32,24 @@ from pipecat.frames.frames import (
|
|
|
25
32
|
Frame,
|
|
26
33
|
HeartbeatFrame,
|
|
27
34
|
InputAudioRawFrame,
|
|
35
|
+
InterimTranscriptionFrame,
|
|
28
36
|
LLMFullResponseEndFrame,
|
|
29
37
|
MetricsFrame,
|
|
30
38
|
StartFrame,
|
|
31
39
|
StopFrame,
|
|
32
40
|
StopTaskFrame,
|
|
41
|
+
TranscriptionFrame,
|
|
42
|
+
UserStartedSpeakingFrame,
|
|
43
|
+
UserStoppedSpeakingFrame,
|
|
33
44
|
)
|
|
34
45
|
from pipecat.metrics.metrics import ProcessingMetricsData, TTFBMetricsData
|
|
35
46
|
from pipecat.observers.base_observer import BaseObserver
|
|
36
47
|
from pipecat.observers.turn_tracking_observer import TurnTrackingObserver
|
|
37
|
-
from pipecat.pipeline.base_pipeline import BasePipeline
|
|
38
48
|
from pipecat.pipeline.base_task import BasePipelineTask, PipelineTaskParams
|
|
49
|
+
from pipecat.pipeline.pipeline import Pipeline, PipelineSink, PipelineSource
|
|
39
50
|
from pipecat.pipeline.task_observer import TaskObserver
|
|
40
51
|
from pipecat.processors.frame_processor import FrameDirection, FrameProcessor, FrameProcessorSetup
|
|
41
|
-
from pipecat.utils.asyncio.task_manager import
|
|
42
|
-
WATCHDOG_TIMEOUT,
|
|
43
|
-
BaseTaskManager,
|
|
44
|
-
TaskManager,
|
|
45
|
-
TaskManagerParams,
|
|
46
|
-
)
|
|
47
|
-
from pipecat.utils.asyncio.watchdog_queue import WatchdogQueue
|
|
52
|
+
from pipecat.utils.asyncio.task_manager import BaseTaskManager, TaskManager, TaskManagerParams
|
|
48
53
|
from pipecat.utils.tracing.setup import is_tracing_available
|
|
49
54
|
from pipecat.utils.tracing.turn_trace_observer import TurnTraceObserver
|
|
50
55
|
|
|
@@ -53,12 +58,13 @@ HEARTBEAT_MONITOR_SECONDS = HEARTBEAT_SECONDS * 10
|
|
|
53
58
|
|
|
54
59
|
|
|
55
60
|
class PipelineParams(BaseModel):
|
|
56
|
-
"""Configuration parameters for pipeline execution.
|
|
57
|
-
usually passed to all frame processors using through `StartFrame`. For other
|
|
58
|
-
generic pipeline task parameters use `PipelineTask` constructor arguments
|
|
59
|
-
instead.
|
|
61
|
+
"""Configuration parameters for pipeline execution.
|
|
60
62
|
|
|
61
|
-
|
|
63
|
+
These parameters are usually passed to all frame processors through
|
|
64
|
+
StartFrame. For other generic pipeline task parameters use PipelineTask
|
|
65
|
+
constructor arguments instead.
|
|
66
|
+
|
|
67
|
+
Parameters:
|
|
62
68
|
allow_interruptions: Whether to allow pipeline interruptions.
|
|
63
69
|
audio_in_sample_rate: Input audio sample rate in Hz.
|
|
64
70
|
audio_out_sample_rate: Output audio sample rate in Hz.
|
|
@@ -66,12 +72,15 @@ class PipelineParams(BaseModel):
|
|
|
66
72
|
enable_metrics: Whether to enable metrics collection.
|
|
67
73
|
enable_usage_metrics: Whether to enable usage metrics.
|
|
68
74
|
heartbeats_period_secs: Period between heartbeats in seconds.
|
|
75
|
+
interruption_strategies: Strategies for bot interruption behavior.
|
|
69
76
|
observers: [deprecated] Use `observers` arg in `PipelineTask` class.
|
|
77
|
+
|
|
78
|
+
.. deprecated:: 0.0.58
|
|
79
|
+
Use the `observers` argument in the `PipelineTask` class instead.
|
|
80
|
+
|
|
70
81
|
report_only_initial_ttfb: Whether to report only initial time to first byte.
|
|
71
82
|
send_initial_empty_metrics: Whether to send initial empty metrics.
|
|
72
83
|
start_metadata: Additional metadata for pipeline start.
|
|
73
|
-
interruption_strategies: Strategies for bot interruption behavior.
|
|
74
|
-
|
|
75
84
|
"""
|
|
76
85
|
|
|
77
86
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
@@ -90,124 +99,38 @@ class PipelineParams(BaseModel):
|
|
|
90
99
|
start_metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
91
100
|
|
|
92
101
|
|
|
93
|
-
class
|
|
94
|
-
"""
|
|
95
|
-
|
|
96
|
-
This is the source processor that is linked at the beginning of the
|
|
97
|
-
pipeline given to the pipeline task. It allows us to easily push frames
|
|
98
|
-
downstream to the pipeline and also receive upstream frames coming from the
|
|
99
|
-
pipeline.
|
|
100
|
-
|
|
101
|
-
Args:
|
|
102
|
-
up_queue: Queue for upstream frame processing.
|
|
103
|
-
|
|
104
|
-
"""
|
|
105
|
-
|
|
106
|
-
def __init__(self, up_queue: asyncio.Queue, **kwargs):
|
|
107
|
-
super().__init__(**kwargs)
|
|
108
|
-
self._up_queue = up_queue
|
|
109
|
-
|
|
110
|
-
async def process_frame(self, frame: Frame, direction: FrameDirection):
|
|
111
|
-
await super().process_frame(frame, direction)
|
|
112
|
-
|
|
113
|
-
match direction:
|
|
114
|
-
case FrameDirection.UPSTREAM:
|
|
115
|
-
await self._up_queue.put(frame)
|
|
116
|
-
case FrameDirection.DOWNSTREAM:
|
|
117
|
-
await self.push_frame(frame, direction)
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
class PipelineTaskSink(FrameProcessor):
|
|
121
|
-
"""Sink processor for pipeline tasks that handles final frame processing.
|
|
122
|
-
|
|
123
|
-
This is the sink processor that is linked at the end of the pipeline
|
|
124
|
-
given to the pipeline task. It allows us to receive downstream frames and
|
|
125
|
-
act on them, for example, waiting to receive an EndFrame.
|
|
102
|
+
class PipelineTask(BasePipelineTask):
|
|
103
|
+
"""Manages the execution of a pipeline, handling frame processing and task lifecycle.
|
|
126
104
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
105
|
+
This class orchestrates pipeline execution with comprehensive monitoring,
|
|
106
|
+
event handling, and lifecycle management. It provides event handlers for
|
|
107
|
+
various pipeline states and frame types, idle detection, heartbeat monitoring,
|
|
108
|
+
and observer integration.
|
|
130
109
|
|
|
131
|
-
|
|
132
|
-
super().__init__(**kwargs)
|
|
133
|
-
self._down_queue = down_queue
|
|
110
|
+
Event handlers available:
|
|
134
111
|
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
112
|
+
- on_frame_reached_upstream: Called when upstream frames reach the source
|
|
113
|
+
- on_frame_reached_downstream: Called when downstream frames reach the sink
|
|
114
|
+
- on_idle_timeout: Called when pipeline is idle beyond timeout threshold
|
|
115
|
+
- on_pipeline_started: Called when pipeline starts with StartFrame
|
|
116
|
+
- on_pipeline_stopped: Called when pipeline stops with StopFrame
|
|
117
|
+
- on_pipeline_ended: Called when pipeline ends with EndFrame
|
|
118
|
+
- on_pipeline_cancelled: Called when pipeline is cancelled
|
|
138
119
|
|
|
120
|
+
Example::
|
|
139
121
|
|
|
140
|
-
|
|
141
|
-
|
|
122
|
+
@task.event_handler("on_frame_reached_upstream")
|
|
123
|
+
async def on_frame_reached_upstream(task, frame):
|
|
124
|
+
...
|
|
142
125
|
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
handlers will not be called unless some filters are set using
|
|
147
|
-
`set_reached_upstream_filter` and `set_reached_downstream_filter`.
|
|
148
|
-
|
|
149
|
-
@task.event_handler("on_frame_reached_upstream")
|
|
150
|
-
async def on_frame_reached_upstream(task, frame):
|
|
151
|
-
...
|
|
152
|
-
|
|
153
|
-
@task.event_handler("on_frame_reached_downstream")
|
|
154
|
-
async def on_frame_reached_downstream(task, frame):
|
|
155
|
-
...
|
|
156
|
-
|
|
157
|
-
It also has an event handler that detects when the pipeline is idle. By
|
|
158
|
-
default, a pipeline is idle if no `BotSpeakingFrame` or
|
|
159
|
-
`LLMFullResponseEndFrame` are received within `idle_timeout_secs`.
|
|
160
|
-
|
|
161
|
-
@task.event_handler("on_idle_timeout")
|
|
162
|
-
async def on_pipeline_idle_timeout(task):
|
|
163
|
-
...
|
|
164
|
-
|
|
165
|
-
There are also events to know if a pipeline has been started, stopped, ended
|
|
166
|
-
or cancelled.
|
|
167
|
-
|
|
168
|
-
@task.event_handler("on_pipeline_started")
|
|
169
|
-
async def on_pipeline_started(task, frame: StartFrame):
|
|
170
|
-
...
|
|
171
|
-
|
|
172
|
-
@task.event_handler("on_pipeline_stopped")
|
|
173
|
-
async def on_pipeline_stopped(task, frame: StopFrame):
|
|
174
|
-
...
|
|
175
|
-
|
|
176
|
-
@task.event_handler("on_pipeline_ended")
|
|
177
|
-
async def on_pipeline_ended(task, frame: EndFrame):
|
|
178
|
-
...
|
|
179
|
-
|
|
180
|
-
@task.event_handler("on_pipeline_cancelled")
|
|
181
|
-
async def on_pipeline_cancelled(task, frame: CancelFrame):
|
|
182
|
-
...
|
|
183
|
-
|
|
184
|
-
Args:
|
|
185
|
-
pipeline: The pipeline to execute.
|
|
186
|
-
params: Configuration parameters for the pipeline.
|
|
187
|
-
additional_span_attributes: Optional dictionary of attributes to propagate as
|
|
188
|
-
OpenTelemetry conversation span attributes.
|
|
189
|
-
cancel_on_idle_timeout: Whether the pipeline task should be cancelled if
|
|
190
|
-
the idle timeout is reached.
|
|
191
|
-
check_dangling_tasks: Whether to check for processors' tasks finishing properly.
|
|
192
|
-
clock: Clock implementation for timing operations.
|
|
193
|
-
conversation_id: Optional custom ID for the conversation.
|
|
194
|
-
enable_tracing: Whether to enable tracing.
|
|
195
|
-
enable_turn_tracking: Whether to enable turn tracking.
|
|
196
|
-
enable_watchdog_logging: Whether to print task processing times.
|
|
197
|
-
enable_watchdog_timers: Whether to enable task watchdog timers.
|
|
198
|
-
idle_timeout_frames: A tuple with the frames that should trigger an idle
|
|
199
|
-
timeout if not received withing `idle_timeout_seconds`.
|
|
200
|
-
idle_timeout_secs: Timeout (in seconds) to consider pipeline idle or
|
|
201
|
-
None. If a pipeline is idle the pipeline task will be cancelled
|
|
202
|
-
automatically.
|
|
203
|
-
observers: List of observers for monitoring pipeline execution.
|
|
204
|
-
watchdog_timeout_secs: Watchdog timer timeout (in seconds). A warning
|
|
205
|
-
will be logged if the watchdog timer is not reset before this timeout.
|
|
126
|
+
@task.event_handler("on_idle_timeout")
|
|
127
|
+
async def on_pipeline_idle_timeout(task):
|
|
128
|
+
...
|
|
206
129
|
"""
|
|
207
130
|
|
|
208
131
|
def __init__(
|
|
209
132
|
self,
|
|
210
|
-
pipeline:
|
|
133
|
+
pipeline: FrameProcessor,
|
|
211
134
|
*,
|
|
212
135
|
params: Optional[PipelineParams] = None,
|
|
213
136
|
additional_span_attributes: Optional[dict] = None,
|
|
@@ -217,19 +140,41 @@ class PipelineTask(BasePipelineTask):
|
|
|
217
140
|
conversation_id: Optional[str] = None,
|
|
218
141
|
enable_tracing: bool = False,
|
|
219
142
|
enable_turn_tracking: bool = True,
|
|
220
|
-
enable_watchdog_logging: bool = False,
|
|
221
|
-
enable_watchdog_timers: bool = False,
|
|
222
143
|
idle_timeout_frames: Tuple[Type[Frame], ...] = (
|
|
223
144
|
BotSpeakingFrame,
|
|
145
|
+
InterimTranscriptionFrame,
|
|
224
146
|
LLMFullResponseEndFrame,
|
|
147
|
+
TranscriptionFrame,
|
|
148
|
+
UserStartedSpeakingFrame,
|
|
149
|
+
UserStoppedSpeakingFrame,
|
|
225
150
|
),
|
|
226
151
|
idle_timeout_secs: Optional[float] = 300,
|
|
227
152
|
observers: Optional[List[BaseObserver]] = None,
|
|
228
153
|
task_manager: Optional[BaseTaskManager] = None,
|
|
229
|
-
watchdog_timeout_secs: float = WATCHDOG_TIMEOUT,
|
|
230
154
|
):
|
|
155
|
+
"""Initialize the PipelineTask.
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
pipeline: The pipeline to execute.
|
|
159
|
+
params: Configuration parameters for the pipeline.
|
|
160
|
+
additional_span_attributes: Optional dictionary of attributes to propagate as
|
|
161
|
+
OpenTelemetry conversation span attributes.
|
|
162
|
+
cancel_on_idle_timeout: Whether the pipeline task should be cancelled if
|
|
163
|
+
the idle timeout is reached.
|
|
164
|
+
check_dangling_tasks: Whether to check for processors' tasks finishing properly.
|
|
165
|
+
clock: Clock implementation for timing operations.
|
|
166
|
+
conversation_id: Optional custom ID for the conversation.
|
|
167
|
+
enable_tracing: Whether to enable tracing.
|
|
168
|
+
enable_turn_tracking: Whether to enable turn tracking.
|
|
169
|
+
idle_timeout_frames: A tuple with the frames that should trigger an idle
|
|
170
|
+
timeout if not received within `idle_timeout_seconds`.
|
|
171
|
+
idle_timeout_secs: Timeout (in seconds) to consider pipeline idle or
|
|
172
|
+
None. If a pipeline is idle the pipeline task will be cancelled
|
|
173
|
+
automatically.
|
|
174
|
+
observers: List of observers for monitoring pipeline execution.
|
|
175
|
+
task_manager: Optional task manager for handling asyncio tasks.
|
|
176
|
+
"""
|
|
231
177
|
super().__init__()
|
|
232
|
-
self._pipeline = pipeline
|
|
233
178
|
self._params = params or PipelineParams()
|
|
234
179
|
self._additional_span_attributes = additional_span_attributes or {}
|
|
235
180
|
self._cancel_on_idle_timeout = cancel_on_idle_timeout
|
|
@@ -238,11 +183,8 @@ class PipelineTask(BasePipelineTask):
|
|
|
238
183
|
self._conversation_id = conversation_id
|
|
239
184
|
self._enable_tracing = enable_tracing and is_tracing_available()
|
|
240
185
|
self._enable_turn_tracking = enable_turn_tracking
|
|
241
|
-
self._enable_watchdog_logging = enable_watchdog_logging
|
|
242
|
-
self._enable_watchdog_timers = enable_watchdog_timers
|
|
243
186
|
self._idle_timeout_frames = idle_timeout_frames
|
|
244
187
|
self._idle_timeout_secs = idle_timeout_secs
|
|
245
|
-
self._watchdog_timeout_secs = watchdog_timeout_secs
|
|
246
188
|
if self._params.observers:
|
|
247
189
|
import warnings
|
|
248
190
|
|
|
@@ -273,40 +215,30 @@ class PipelineTask(BasePipelineTask):
|
|
|
273
215
|
# PipelineTask and its frame processors.
|
|
274
216
|
self._task_manager = task_manager or TaskManager(conversation_id)
|
|
275
217
|
|
|
276
|
-
# This queue receives frames coming from the pipeline upstream.
|
|
277
|
-
self._up_queue = WatchdogQueue(self._task_manager)
|
|
278
|
-
self._process_up_task: Optional[asyncio.Task] = None
|
|
279
|
-
# This queue receives frames coming from the pipeline downstream.
|
|
280
|
-
self._down_queue = WatchdogQueue(self._task_manager)
|
|
281
|
-
self._process_down_task: Optional[asyncio.Task] = None
|
|
282
218
|
# This queue is the queue used to push frames to the pipeline.
|
|
283
|
-
self._push_queue =
|
|
219
|
+
self._push_queue = asyncio.Queue()
|
|
284
220
|
self._process_push_task: Optional[asyncio.Task] = None
|
|
285
221
|
# This is the heartbeat queue. When a heartbeat frame is received in the
|
|
286
222
|
# down queue we add it to the heartbeat queue for processing.
|
|
287
|
-
self._heartbeat_queue =
|
|
223
|
+
self._heartbeat_queue = asyncio.Queue()
|
|
288
224
|
self._heartbeat_push_task: Optional[asyncio.Task] = None
|
|
289
225
|
self._heartbeat_monitor_task: Optional[asyncio.Task] = None
|
|
290
226
|
# This is the idle queue. When frames are received downstream they are
|
|
291
227
|
# put in the queue. If no frame is received the pipeline is considered
|
|
292
228
|
# idle.
|
|
293
|
-
self._idle_queue =
|
|
229
|
+
self._idle_queue = asyncio.Queue()
|
|
294
230
|
self._idle_monitor_task: Optional[asyncio.Task] = None
|
|
295
231
|
# This event is used to indicate a finalize frame (e.g. EndFrame,
|
|
296
232
|
# StopFrame) has been received in the down queue.
|
|
297
233
|
self._pipeline_end_event = asyncio.Event()
|
|
298
234
|
|
|
299
|
-
# This is
|
|
300
|
-
#
|
|
301
|
-
# upstream frames
|
|
302
|
-
|
|
303
|
-
self.
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
# pipeline. This sink processor allows up to receive and react to
|
|
307
|
-
# downstream frames.
|
|
308
|
-
self._sink = PipelineTaskSink(self._down_queue)
|
|
309
|
-
pipeline.link(self._sink)
|
|
235
|
+
# This is the final pipeline. It is composed of a source processor,
|
|
236
|
+
# followed by the user pipeline, and ending with a sink processor. The
|
|
237
|
+
# source allows us to receive and react to upstream frames, and the sink
|
|
238
|
+
# allows us to receive and react to downstream frames.
|
|
239
|
+
source = PipelineSource(self._source_push_frame, name=f"{self}::Source")
|
|
240
|
+
sink = PipelineSink(self._sink_push_frame, name=f"{self}::Sink")
|
|
241
|
+
self._pipeline = Pipeline([pipeline], source=source, sink=sink)
|
|
310
242
|
|
|
311
243
|
# The task observer acts as a proxy to the provided observers. This way,
|
|
312
244
|
# we only need to pass a single observer (using the StartFrame) which
|
|
@@ -331,60 +263,97 @@ class PipelineTask(BasePipelineTask):
|
|
|
331
263
|
|
|
332
264
|
@property
|
|
333
265
|
def params(self) -> PipelineParams:
|
|
334
|
-
"""
|
|
266
|
+
"""Get the pipeline parameters for this task.
|
|
267
|
+
|
|
268
|
+
Returns:
|
|
269
|
+
The pipeline parameters configuration.
|
|
270
|
+
"""
|
|
335
271
|
return self._params
|
|
336
272
|
|
|
337
273
|
@property
|
|
338
274
|
def turn_tracking_observer(self) -> Optional[TurnTrackingObserver]:
|
|
339
|
-
"""
|
|
275
|
+
"""Get the turn tracking observer if enabled.
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
The turn tracking observer instance or None if not enabled.
|
|
279
|
+
"""
|
|
340
280
|
return self._turn_tracking_observer
|
|
341
281
|
|
|
342
282
|
@property
|
|
343
283
|
def turn_trace_observer(self) -> Optional[TurnTraceObserver]:
|
|
344
|
-
"""
|
|
284
|
+
"""Get the turn trace observer if enabled.
|
|
285
|
+
|
|
286
|
+
Returns:
|
|
287
|
+
The turn trace observer instance or None if not enabled.
|
|
288
|
+
"""
|
|
345
289
|
return self._turn_trace_observer
|
|
346
290
|
|
|
347
291
|
def add_observer(self, observer: BaseObserver):
|
|
292
|
+
"""Add an observer to monitor pipeline execution.
|
|
293
|
+
|
|
294
|
+
Args:
|
|
295
|
+
observer: The observer to add to the pipeline monitoring.
|
|
296
|
+
"""
|
|
348
297
|
self._observer.add_observer(observer)
|
|
349
298
|
|
|
350
299
|
async def remove_observer(self, observer: BaseObserver):
|
|
300
|
+
"""Remove an observer from pipeline monitoring.
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
observer: The observer to remove from pipeline monitoring.
|
|
304
|
+
"""
|
|
351
305
|
await self._observer.remove_observer(observer)
|
|
352
306
|
|
|
353
307
|
def set_reached_upstream_filter(self, types: Tuple[Type[Frame], ...]):
|
|
354
|
-
"""
|
|
355
|
-
on_frame_reached_upstream event handler.
|
|
308
|
+
"""Set which frame types trigger the on_frame_reached_upstream event.
|
|
356
309
|
|
|
310
|
+
Args:
|
|
311
|
+
types: Tuple of frame types to monitor for upstream events.
|
|
357
312
|
"""
|
|
358
313
|
self._reached_upstream_types = types
|
|
359
314
|
|
|
360
315
|
def set_reached_downstream_filter(self, types: Tuple[Type[Frame], ...]):
|
|
361
|
-
"""
|
|
362
|
-
on_frame_reached_downstream event handler.
|
|
316
|
+
"""Set which frame types trigger the on_frame_reached_downstream event.
|
|
363
317
|
|
|
318
|
+
Args:
|
|
319
|
+
types: Tuple of frame types to monitor for downstream events.
|
|
364
320
|
"""
|
|
365
321
|
self._reached_downstream_types = types
|
|
366
322
|
|
|
367
323
|
def has_finished(self) -> bool:
|
|
368
|
-
"""
|
|
324
|
+
"""Check if the pipeline task has finished execution.
|
|
325
|
+
|
|
326
|
+
This indicates whether the tasks has finished, meaninig all processors
|
|
369
327
|
have stopped.
|
|
370
328
|
|
|
329
|
+
Returns:
|
|
330
|
+
True if all processors have stopped and the task is complete.
|
|
371
331
|
"""
|
|
372
332
|
return self._finished
|
|
373
333
|
|
|
374
334
|
async def stop_when_done(self):
|
|
375
|
-
"""
|
|
376
|
-
order to stop the task after everything in it has been processed.
|
|
335
|
+
"""Schedule the pipeline to stop after processing all queued frames.
|
|
377
336
|
|
|
337
|
+
Sends an EndFrame to gracefully terminate the pipeline once all
|
|
338
|
+
current processing is complete.
|
|
378
339
|
"""
|
|
379
340
|
logger.debug(f"Task {self} scheduled to stop when done")
|
|
380
341
|
await self.queue_frame(EndFrame())
|
|
381
342
|
|
|
382
343
|
async def cancel(self):
|
|
383
|
-
"""
|
|
344
|
+
"""Immediately stop the running pipeline.
|
|
345
|
+
|
|
346
|
+
Cancels all running tasks and stops frame processing without
|
|
347
|
+
waiting for completion.
|
|
348
|
+
"""
|
|
384
349
|
await self._cancel()
|
|
385
350
|
|
|
386
351
|
async def run(self, params: PipelineTaskParams):
|
|
387
|
-
"""
|
|
352
|
+
"""Start and manage the pipeline execution until completion or cancellation.
|
|
353
|
+
|
|
354
|
+
Args:
|
|
355
|
+
params: Configuration parameters for pipeline execution.
|
|
356
|
+
"""
|
|
388
357
|
if self.has_finished():
|
|
389
358
|
return
|
|
390
359
|
cleanup_pipeline = True
|
|
@@ -394,29 +363,43 @@ class PipelineTask(BasePipelineTask):
|
|
|
394
363
|
|
|
395
364
|
# Create all main tasks and wait of the main push task. This is the
|
|
396
365
|
# task that pushes frames to the very beginning of our pipeline (our
|
|
397
|
-
# controlled
|
|
366
|
+
# controlled source processor).
|
|
398
367
|
push_task = await self._create_tasks()
|
|
399
|
-
await
|
|
368
|
+
await push_task
|
|
400
369
|
|
|
401
370
|
# We have already cleaned up the pipeline inside the task.
|
|
402
371
|
cleanup_pipeline = False
|
|
372
|
+
|
|
373
|
+
# Pipeline has finished nicely.
|
|
374
|
+
self._finished = True
|
|
403
375
|
except asyncio.CancelledError:
|
|
404
|
-
#
|
|
405
|
-
#
|
|
406
|
-
|
|
407
|
-
# awaiting a task.
|
|
408
|
-
pass
|
|
376
|
+
# Raise exception back to the pipeline runner so it can cancel this
|
|
377
|
+
# task properly.
|
|
378
|
+
raise
|
|
409
379
|
finally:
|
|
410
|
-
#
|
|
411
|
-
#
|
|
412
|
-
# properly.
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
380
|
+
# We can reach this point for different reasons:
|
|
381
|
+
#
|
|
382
|
+
# 1. The task has finished properly (e.g. `EndFrame`).
|
|
383
|
+
# 2. By calling `PipelineTask.cancel()`.
|
|
384
|
+
# 3. By asyncio task cancellation.
|
|
385
|
+
#
|
|
386
|
+
# Case (1) will execute the code below without issues because
|
|
387
|
+
# `self._finished` is true.
|
|
388
|
+
#
|
|
389
|
+
# Case (2) will execute the code below without issues because
|
|
390
|
+
# `self._cancelled` is true.
|
|
391
|
+
#
|
|
392
|
+
# Case (3) will raise the exception above (because we are cancelling
|
|
393
|
+
# the asyncio task). This will be then captured by the
|
|
394
|
+
# `PipelineRunner` which will call `PipelineTask.cancel()` and
|
|
395
|
+
# therefore becoming case (2).
|
|
396
|
+
if self._finished or self._cancelled:
|
|
397
|
+
logger.debug(f"Pipeline task {self} has finished, cleaning up resources")
|
|
398
|
+
await self._cancel_tasks()
|
|
399
|
+
await self._cleanup(cleanup_pipeline)
|
|
400
|
+
if self._check_dangling_tasks:
|
|
401
|
+
self._print_dangling_tasks()
|
|
402
|
+
self._finished = True
|
|
420
403
|
|
|
421
404
|
async def queue_frame(self, frame: Frame):
|
|
422
405
|
"""Queue a single frame to be pushed down the pipeline.
|
|
@@ -440,25 +423,25 @@ class PipelineTask(BasePipelineTask):
|
|
|
440
423
|
await self.queue_frame(frame)
|
|
441
424
|
|
|
442
425
|
async def _cancel(self):
|
|
426
|
+
"""Internal cancellation logic for the pipeline task."""
|
|
443
427
|
if not self._cancelled:
|
|
444
428
|
logger.debug(f"Canceling pipeline task {self}", call_id=self._conversation_id)
|
|
445
429
|
self._cancelled = True
|
|
446
430
|
# Make sure everything is cleaned up downstream. This is sent
|
|
447
431
|
# out-of-band from the main streaming task which is what we want since
|
|
448
432
|
# we want to cancel right away.
|
|
449
|
-
await self.
|
|
450
|
-
#
|
|
433
|
+
await self._pipeline.queue_frame(CancelFrame())
|
|
434
|
+
# Wait for CancelFrame to make it throught the pipeline.
|
|
435
|
+
await self._wait_for_pipeline_end()
|
|
436
|
+
# Only cancel the push task, we don't want to be able to process any
|
|
437
|
+
# other frame after cancel. Everything else will be cancelled in
|
|
438
|
+
# run().
|
|
451
439
|
if self._process_push_task:
|
|
452
440
|
await self._task_manager.cancel_task(self._process_push_task)
|
|
453
441
|
self._process_push_task = None
|
|
454
442
|
|
|
455
443
|
async def _create_tasks(self):
|
|
456
|
-
|
|
457
|
-
self._process_up_queue(), f"{self}::_process_up_queue"
|
|
458
|
-
)
|
|
459
|
-
self._process_down_task = self._task_manager.create_task(
|
|
460
|
-
self._process_down_queue(), f"{self}::_process_down_queue"
|
|
461
|
-
)
|
|
444
|
+
"""Create and start all pipeline processing tasks."""
|
|
462
445
|
self._process_push_task = self._task_manager.create_task(
|
|
463
446
|
self._process_push_queue(), f"{self}::_process_push_queue"
|
|
464
447
|
)
|
|
@@ -468,6 +451,7 @@ class PipelineTask(BasePipelineTask):
|
|
|
468
451
|
return self._process_push_task
|
|
469
452
|
|
|
470
453
|
def _maybe_start_heartbeat_tasks(self):
|
|
454
|
+
"""Start heartbeat tasks if heartbeats are enabled and not already running."""
|
|
471
455
|
if self._params.enable_heartbeats and self._heartbeat_push_task is None:
|
|
472
456
|
self._heartbeat_push_task = self._task_manager.create_task(
|
|
473
457
|
self._heartbeat_push_handler(), f"{self}::_heartbeat_push_handler"
|
|
@@ -477,26 +461,25 @@ class PipelineTask(BasePipelineTask):
|
|
|
477
461
|
)
|
|
478
462
|
|
|
479
463
|
def _maybe_start_idle_task(self):
|
|
464
|
+
"""Start idle monitoring task if idle timeout is configured."""
|
|
480
465
|
if self._idle_timeout_secs:
|
|
481
466
|
self._idle_monitor_task = self._task_manager.create_task(
|
|
482
467
|
self._idle_monitor_handler(), f"{self}::_idle_monitor_handler"
|
|
483
468
|
)
|
|
484
469
|
|
|
485
470
|
async def _cancel_tasks(self):
|
|
471
|
+
"""Cancel all running pipeline tasks."""
|
|
486
472
|
await self._observer.stop()
|
|
487
473
|
|
|
488
|
-
if self.
|
|
489
|
-
await self._task_manager.cancel_task(self.
|
|
490
|
-
self.
|
|
491
|
-
|
|
492
|
-
if self._process_down_task:
|
|
493
|
-
await self._task_manager.cancel_task(self._process_down_task)
|
|
494
|
-
self._process_down_task = None
|
|
474
|
+
if self._process_push_task:
|
|
475
|
+
await self._task_manager.cancel_task(self._process_push_task)
|
|
476
|
+
self._process_push_task = None
|
|
495
477
|
|
|
496
478
|
await self._maybe_cancel_heartbeat_tasks()
|
|
497
479
|
await self._maybe_cancel_idle_task()
|
|
498
480
|
|
|
499
481
|
async def _maybe_cancel_heartbeat_tasks(self):
|
|
482
|
+
"""Cancel heartbeat tasks if they are running."""
|
|
500
483
|
if not self._params.enable_heartbeats:
|
|
501
484
|
return
|
|
502
485
|
|
|
@@ -509,11 +492,13 @@ class PipelineTask(BasePipelineTask):
|
|
|
509
492
|
self._heartbeat_monitor_task = None
|
|
510
493
|
|
|
511
494
|
async def _maybe_cancel_idle_task(self):
|
|
495
|
+
"""Cancel idle monitoring task if it is running."""
|
|
512
496
|
if self._idle_timeout_secs and self._idle_monitor_task:
|
|
513
497
|
await self._task_manager.cancel_task(self._idle_monitor_task)
|
|
514
498
|
self._idle_monitor_task = None
|
|
515
499
|
|
|
516
500
|
def _initial_metrics_frame(self) -> MetricsFrame:
|
|
501
|
+
"""Create an initial metrics frame with zero values for all processors."""
|
|
517
502
|
processors = self._pipeline.processors_with_metrics()
|
|
518
503
|
data = []
|
|
519
504
|
for p in processors:
|
|
@@ -522,29 +507,24 @@ class PipelineTask(BasePipelineTask):
|
|
|
522
507
|
return MetricsFrame(data=data)
|
|
523
508
|
|
|
524
509
|
async def _wait_for_pipeline_end(self):
|
|
510
|
+
"""Wait for the pipeline to signal completion."""
|
|
525
511
|
await self._pipeline_end_event.wait()
|
|
526
512
|
self._pipeline_end_event.clear()
|
|
527
513
|
|
|
528
514
|
async def _setup(self, params: PipelineTaskParams):
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
enable_watchdog_logging=self._enable_watchdog_logging,
|
|
532
|
-
enable_watchdog_timers=self._enable_watchdog_timers,
|
|
533
|
-
watchdog_timeout=self._watchdog_timeout_secs,
|
|
534
|
-
)
|
|
515
|
+
"""Set up the pipeline task and all processors."""
|
|
516
|
+
mgr_params = TaskManagerParams(loop=params.loop)
|
|
535
517
|
self._task_manager.setup(mgr_params)
|
|
536
518
|
|
|
537
519
|
setup = FrameProcessorSetup(
|
|
538
520
|
clock=self._clock,
|
|
539
521
|
task_manager=self._task_manager,
|
|
540
522
|
observer=self._observer,
|
|
541
|
-
watchdog_timers_enabled=self._enable_watchdog_timers,
|
|
542
523
|
)
|
|
543
|
-
await self._source.setup(setup)
|
|
544
524
|
await self._pipeline.setup(setup)
|
|
545
|
-
await self._sink.setup(setup)
|
|
546
525
|
|
|
547
526
|
async def _cleanup(self, cleanup_pipeline: bool):
|
|
527
|
+
"""Clean up the pipeline task and processors."""
|
|
548
528
|
# Cleanup base object.
|
|
549
529
|
await self.cleanup()
|
|
550
530
|
|
|
@@ -553,16 +533,15 @@ class PipelineTask(BasePipelineTask):
|
|
|
553
533
|
self._turn_trace_observer.end_conversation_tracing()
|
|
554
534
|
|
|
555
535
|
# Cleanup pipeline processors.
|
|
556
|
-
await self._source.cleanup()
|
|
557
536
|
if cleanup_pipeline:
|
|
558
537
|
await self._pipeline.cleanup()
|
|
559
|
-
await self._sink.cleanup()
|
|
560
538
|
|
|
561
539
|
async def _process_push_queue(self):
|
|
562
|
-
"""
|
|
540
|
+
"""Process frames from the push queue and send them through the pipeline.
|
|
541
|
+
|
|
542
|
+
This is the task that runs the pipeline for the first time by sending
|
|
563
543
|
a StartFrame and by pushing any other frames queued by the user. It runs
|
|
564
544
|
until the tasks is cancelled or stopped (e.g. with an EndFrame).
|
|
565
|
-
|
|
566
545
|
"""
|
|
567
546
|
self._clock.start()
|
|
568
547
|
|
|
@@ -573,115 +552,108 @@ class PipelineTask(BasePipelineTask):
|
|
|
573
552
|
audio_in_sample_rate=self._params.audio_in_sample_rate,
|
|
574
553
|
audio_out_sample_rate=self._params.audio_out_sample_rate,
|
|
575
554
|
enable_metrics=self._params.enable_metrics,
|
|
555
|
+
enable_tracing=self._enable_tracing,
|
|
576
556
|
enable_usage_metrics=self._params.enable_usage_metrics,
|
|
577
557
|
report_only_initial_ttfb=self._params.report_only_initial_ttfb,
|
|
578
558
|
interruption_strategies=self._params.interruption_strategies,
|
|
579
559
|
)
|
|
580
560
|
start_frame.metadata = self._params.start_metadata
|
|
581
|
-
await self.
|
|
561
|
+
await self._pipeline.queue_frame(start_frame)
|
|
582
562
|
|
|
583
563
|
if self._params.enable_metrics and self._params.send_initial_empty_metrics:
|
|
584
|
-
await self.
|
|
564
|
+
await self._pipeline.queue_frame(self._initial_metrics_frame())
|
|
585
565
|
|
|
586
566
|
running = True
|
|
587
567
|
cleanup_pipeline = True
|
|
588
568
|
while running:
|
|
589
569
|
frame = await self._push_queue.get()
|
|
590
|
-
await self.
|
|
591
|
-
if isinstance(frame, (EndFrame, StopFrame)):
|
|
570
|
+
await self._pipeline.queue_frame(frame)
|
|
571
|
+
if isinstance(frame, (CancelFrame, EndFrame, StopFrame)):
|
|
592
572
|
await self._wait_for_pipeline_end()
|
|
593
573
|
running = not isinstance(frame, (CancelFrame, EndFrame, StopFrame))
|
|
594
574
|
cleanup_pipeline = not isinstance(frame, StopFrame)
|
|
595
575
|
self._push_queue.task_done()
|
|
596
576
|
await self._cleanup(cleanup_pipeline)
|
|
597
577
|
|
|
598
|
-
async def
|
|
599
|
-
"""
|
|
578
|
+
async def _source_push_frame(self, frame: Frame, direction: FrameDirection):
|
|
579
|
+
"""Process frames coming upstream from the pipeline.
|
|
580
|
+
|
|
581
|
+
This is the task that processes frames coming upstream from the
|
|
600
582
|
pipeline. These frames might indicate, for example, that we want the
|
|
601
583
|
pipeline to be stopped (e.g. EndTaskFrame) in which case we would send
|
|
602
584
|
an EndFrame down the pipeline.
|
|
603
|
-
|
|
604
585
|
"""
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
586
|
+
if isinstance(frame, self._reached_upstream_types):
|
|
587
|
+
await self._call_event_handler("on_frame_reached_upstream", frame)
|
|
588
|
+
|
|
589
|
+
if isinstance(frame, EndTaskFrame):
|
|
590
|
+
# Tell the task we should end nicely.
|
|
591
|
+
await self.queue_frame(EndFrame())
|
|
592
|
+
elif isinstance(frame, CancelTaskFrame):
|
|
593
|
+
# Tell the task we should end right away.
|
|
594
|
+
await self.queue_frame(CancelFrame())
|
|
595
|
+
elif isinstance(frame, StopTaskFrame):
|
|
596
|
+
# Tell the task we should stop nicely.
|
|
597
|
+
await self.queue_frame(StopFrame())
|
|
598
|
+
elif isinstance(frame, ErrorFrame):
|
|
599
|
+
if frame.fatal:
|
|
600
|
+
logger.error(f"A fatal error occurred: {frame}")
|
|
601
|
+
# Cancel all tasks downstream.
|
|
616
602
|
await self.queue_frame(CancelFrame())
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
if frame.fatal:
|
|
622
|
-
logger.error(
|
|
623
|
-
"A fatal error occurred: {}", str(frame), call_id=self._conversation_id
|
|
624
|
-
)
|
|
625
|
-
# Cancel all tasks downstream.
|
|
626
|
-
await self.queue_frame(CancelFrame())
|
|
627
|
-
# Tell the task we should stop.
|
|
628
|
-
await self.queue_frame(StopTaskFrame())
|
|
629
|
-
else:
|
|
630
|
-
logger.warning(
|
|
631
|
-
"Something went wrong: {}", str(frame), call_id=self._conversation_id
|
|
632
|
-
)
|
|
633
|
-
self._up_queue.task_done()
|
|
603
|
+
# Tell the task we should stop.
|
|
604
|
+
await self.queue_frame(StopTaskFrame())
|
|
605
|
+
else:
|
|
606
|
+
logger.warning(f"Something went wrong: {frame}")
|
|
634
607
|
|
|
635
|
-
async def
|
|
636
|
-
"""
|
|
608
|
+
async def _sink_push_frame(self, frame: Frame, direction: FrameDirection):
|
|
609
|
+
"""Process frames coming downstream from the pipeline.
|
|
610
|
+
|
|
611
|
+
This tasks process frames coming downstream from the pipeline. For
|
|
637
612
|
example, heartbeat frames or an EndFrame which would indicate all
|
|
638
613
|
processors have handled the EndFrame and therefore we can exit the task
|
|
639
614
|
cleanly.
|
|
640
|
-
|
|
641
615
|
"""
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
await self.
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
elif isinstance(frame, HeartbeatFrame):
|
|
667
|
-
await self._heartbeat_queue.put(frame)
|
|
668
|
-
self._down_queue.task_done()
|
|
616
|
+
# Queue received frame to the idle queue so we can monitor idle
|
|
617
|
+
# pipelines.
|
|
618
|
+
await self._idle_queue.put(frame)
|
|
619
|
+
|
|
620
|
+
if isinstance(frame, self._reached_downstream_types):
|
|
621
|
+
await self._call_event_handler("on_frame_reached_downstream", frame)
|
|
622
|
+
|
|
623
|
+
if isinstance(frame, StartFrame):
|
|
624
|
+
await self._call_event_handler("on_pipeline_started", frame)
|
|
625
|
+
|
|
626
|
+
# Start heartbeat tasks now that StartFrame has been processed
|
|
627
|
+
# by all processors in the pipeline
|
|
628
|
+
self._maybe_start_heartbeat_tasks()
|
|
629
|
+
elif isinstance(frame, EndFrame):
|
|
630
|
+
await self._call_event_handler("on_pipeline_ended", frame)
|
|
631
|
+
self._pipeline_end_event.set()
|
|
632
|
+
elif isinstance(frame, StopFrame):
|
|
633
|
+
await self._call_event_handler("on_pipeline_stopped", frame)
|
|
634
|
+
self._pipeline_end_event.set()
|
|
635
|
+
elif isinstance(frame, CancelFrame):
|
|
636
|
+
await self._call_event_handler("on_pipeline_cancelled", frame)
|
|
637
|
+
self._pipeline_end_event.set()
|
|
638
|
+
elif isinstance(frame, HeartbeatFrame):
|
|
639
|
+
await self._heartbeat_queue.put(frame)
|
|
669
640
|
|
|
670
641
|
async def _heartbeat_push_handler(self):
|
|
671
|
-
"""
|
|
642
|
+
"""Push heartbeat frames at regular intervals."""
|
|
672
643
|
while True:
|
|
673
644
|
# Don't use `queue_frame()` because if an EndFrame is queued the
|
|
674
645
|
# task will just stop waiting for the pipeline to finish not
|
|
675
646
|
# allowing more frames to be pushed.
|
|
676
|
-
await self.
|
|
647
|
+
await self._pipeline.queue_frame(HeartbeatFrame(timestamp=self._clock.get_time()))
|
|
677
648
|
await asyncio.sleep(self._params.heartbeats_period_secs)
|
|
678
649
|
|
|
679
650
|
async def _heartbeat_monitor_handler(self):
|
|
680
|
-
"""
|
|
651
|
+
"""Monitor heartbeat frames for processing time and timeout detection.
|
|
652
|
+
|
|
653
|
+
This task monitors heartbeat frames. If a heartbeat frame has not
|
|
681
654
|
been received for a long period a warning will be logged. It also logs
|
|
682
655
|
the time that a heartbeat frame takes to processes, that is how long it
|
|
683
656
|
takes for the heartbeat frame to traverse all the pipeline.
|
|
684
|
-
|
|
685
657
|
"""
|
|
686
658
|
wait_time = HEARTBEAT_MONITOR_SECONDS
|
|
687
659
|
while True:
|
|
@@ -697,9 +669,12 @@ class PipelineTask(BasePipelineTask):
|
|
|
697
669
|
)
|
|
698
670
|
|
|
699
671
|
async def _idle_monitor_handler(self):
|
|
700
|
-
"""
|
|
701
|
-
received (heartbeats don't count) the pipeline is considered idle.
|
|
672
|
+
"""Monitor pipeline activity and detect idle conditions.
|
|
702
673
|
|
|
674
|
+
Tracks frame activity and triggers idle timeout events when the
|
|
675
|
+
pipeline hasn't received relevant frames within the timeout period.
|
|
676
|
+
|
|
677
|
+
Note: Heartbeats are excluded from idle detection.
|
|
703
678
|
"""
|
|
704
679
|
running = True
|
|
705
680
|
last_frame_time = 0
|
|
@@ -737,11 +712,18 @@ class PipelineTask(BasePipelineTask):
|
|
|
737
712
|
running = await self._idle_timeout_detected(frame_buffer)
|
|
738
713
|
|
|
739
714
|
async def _idle_timeout_detected(self, last_frames: Deque[Frame]) -> bool:
|
|
740
|
-
"""
|
|
715
|
+
"""Handle idle timeout detection and optional cancellation.
|
|
716
|
+
|
|
717
|
+
Args:
|
|
718
|
+
last_frames: Recent frames received before timeout for debugging.
|
|
741
719
|
|
|
742
720
|
Returns:
|
|
743
|
-
|
|
721
|
+
Whether the pipeline task should continue running.
|
|
744
722
|
"""
|
|
723
|
+
# If we are cancelling, just exit the task.
|
|
724
|
+
if self._cancelled:
|
|
725
|
+
return True
|
|
726
|
+
|
|
745
727
|
logger.warning("Idle timeout detected. Last 10 frames received:")
|
|
746
728
|
for i, frame in enumerate(last_frames, 1):
|
|
747
729
|
logger.warning(f"Frame {i}: {frame}")
|
|
@@ -757,6 +739,7 @@ class PipelineTask(BasePipelineTask):
|
|
|
757
739
|
return True
|
|
758
740
|
|
|
759
741
|
def _print_dangling_tasks(self):
|
|
742
|
+
"""Log any dangling tasks that haven't been properly cleaned up."""
|
|
760
743
|
tasks = [t.get_name() for t in self._task_manager.current_tasks()]
|
|
761
744
|
if tasks:
|
|
762
745
|
logger.warning(f"Dangling tasks detected: {tasks}", call_id=self._conversation_id)
|