dv-pipecat-ai 0.0.74.dev770__py3-none-any.whl → 0.0.82.dev776__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

Files changed (244) hide show
  1. {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/METADATA +137 -93
  2. dv_pipecat_ai-0.0.82.dev776.dist-info/RECORD +340 -0
  3. pipecat/__init__.py +17 -0
  4. pipecat/adapters/base_llm_adapter.py +36 -1
  5. pipecat/adapters/schemas/direct_function.py +296 -0
  6. pipecat/adapters/schemas/function_schema.py +15 -6
  7. pipecat/adapters/schemas/tools_schema.py +55 -7
  8. pipecat/adapters/services/anthropic_adapter.py +22 -3
  9. pipecat/adapters/services/aws_nova_sonic_adapter.py +23 -3
  10. pipecat/adapters/services/bedrock_adapter.py +22 -3
  11. pipecat/adapters/services/gemini_adapter.py +16 -3
  12. pipecat/adapters/services/open_ai_adapter.py +17 -2
  13. pipecat/adapters/services/open_ai_realtime_adapter.py +23 -3
  14. pipecat/audio/filters/base_audio_filter.py +30 -6
  15. pipecat/audio/filters/koala_filter.py +37 -2
  16. pipecat/audio/filters/krisp_filter.py +59 -6
  17. pipecat/audio/filters/noisereduce_filter.py +37 -0
  18. pipecat/audio/interruptions/base_interruption_strategy.py +25 -5
  19. pipecat/audio/interruptions/min_words_interruption_strategy.py +21 -4
  20. pipecat/audio/mixers/base_audio_mixer.py +30 -7
  21. pipecat/audio/mixers/soundfile_mixer.py +53 -6
  22. pipecat/audio/resamplers/base_audio_resampler.py +17 -9
  23. pipecat/audio/resamplers/resampy_resampler.py +26 -1
  24. pipecat/audio/resamplers/soxr_resampler.py +32 -1
  25. pipecat/audio/resamplers/soxr_stream_resampler.py +101 -0
  26. pipecat/audio/utils.py +194 -1
  27. pipecat/audio/vad/silero.py +60 -3
  28. pipecat/audio/vad/vad_analyzer.py +114 -30
  29. pipecat/clocks/base_clock.py +19 -0
  30. pipecat/clocks/system_clock.py +25 -0
  31. pipecat/extensions/voicemail/__init__.py +0 -0
  32. pipecat/extensions/voicemail/voicemail_detector.py +707 -0
  33. pipecat/frames/frames.py +590 -156
  34. pipecat/metrics/metrics.py +64 -1
  35. pipecat/observers/base_observer.py +58 -19
  36. pipecat/observers/loggers/debug_log_observer.py +56 -64
  37. pipecat/observers/loggers/llm_log_observer.py +8 -1
  38. pipecat/observers/loggers/transcription_log_observer.py +19 -7
  39. pipecat/observers/loggers/user_bot_latency_log_observer.py +32 -5
  40. pipecat/observers/turn_tracking_observer.py +26 -1
  41. pipecat/pipeline/base_pipeline.py +5 -7
  42. pipecat/pipeline/base_task.py +52 -9
  43. pipecat/pipeline/parallel_pipeline.py +121 -177
  44. pipecat/pipeline/pipeline.py +129 -20
  45. pipecat/pipeline/runner.py +50 -1
  46. pipecat/pipeline/sync_parallel_pipeline.py +132 -32
  47. pipecat/pipeline/task.py +263 -280
  48. pipecat/pipeline/task_observer.py +85 -34
  49. pipecat/pipeline/to_be_updated/merge_pipeline.py +32 -2
  50. pipecat/processors/aggregators/dtmf_aggregator.py +29 -22
  51. pipecat/processors/aggregators/gated.py +25 -24
  52. pipecat/processors/aggregators/gated_openai_llm_context.py +22 -2
  53. pipecat/processors/aggregators/llm_response.py +398 -89
  54. pipecat/processors/aggregators/openai_llm_context.py +161 -13
  55. pipecat/processors/aggregators/sentence.py +25 -14
  56. pipecat/processors/aggregators/user_response.py +28 -3
  57. pipecat/processors/aggregators/vision_image_frame.py +24 -14
  58. pipecat/processors/async_generator.py +28 -0
  59. pipecat/processors/audio/audio_buffer_processor.py +78 -37
  60. pipecat/processors/consumer_processor.py +25 -6
  61. pipecat/processors/filters/frame_filter.py +23 -0
  62. pipecat/processors/filters/function_filter.py +30 -0
  63. pipecat/processors/filters/identity_filter.py +17 -2
  64. pipecat/processors/filters/null_filter.py +24 -1
  65. pipecat/processors/filters/stt_mute_filter.py +56 -21
  66. pipecat/processors/filters/wake_check_filter.py +46 -3
  67. pipecat/processors/filters/wake_notifier_filter.py +21 -3
  68. pipecat/processors/frame_processor.py +488 -131
  69. pipecat/processors/frameworks/langchain.py +38 -3
  70. pipecat/processors/frameworks/rtvi.py +719 -34
  71. pipecat/processors/gstreamer/pipeline_source.py +41 -0
  72. pipecat/processors/idle_frame_processor.py +26 -3
  73. pipecat/processors/logger.py +23 -0
  74. pipecat/processors/metrics/frame_processor_metrics.py +77 -4
  75. pipecat/processors/metrics/sentry.py +42 -4
  76. pipecat/processors/producer_processor.py +34 -14
  77. pipecat/processors/text_transformer.py +22 -10
  78. pipecat/processors/transcript_processor.py +48 -29
  79. pipecat/processors/user_idle_processor.py +31 -21
  80. pipecat/runner/__init__.py +1 -0
  81. pipecat/runner/daily.py +132 -0
  82. pipecat/runner/livekit.py +148 -0
  83. pipecat/runner/run.py +543 -0
  84. pipecat/runner/types.py +67 -0
  85. pipecat/runner/utils.py +515 -0
  86. pipecat/serializers/base_serializer.py +42 -0
  87. pipecat/serializers/exotel.py +17 -6
  88. pipecat/serializers/genesys.py +95 -0
  89. pipecat/serializers/livekit.py +33 -0
  90. pipecat/serializers/plivo.py +16 -15
  91. pipecat/serializers/protobuf.py +37 -1
  92. pipecat/serializers/telnyx.py +18 -17
  93. pipecat/serializers/twilio.py +32 -16
  94. pipecat/services/ai_service.py +5 -3
  95. pipecat/services/anthropic/llm.py +113 -43
  96. pipecat/services/assemblyai/models.py +63 -5
  97. pipecat/services/assemblyai/stt.py +64 -11
  98. pipecat/services/asyncai/__init__.py +0 -0
  99. pipecat/services/asyncai/tts.py +501 -0
  100. pipecat/services/aws/llm.py +185 -111
  101. pipecat/services/aws/stt.py +217 -23
  102. pipecat/services/aws/tts.py +118 -52
  103. pipecat/services/aws/utils.py +101 -5
  104. pipecat/services/aws_nova_sonic/aws.py +82 -64
  105. pipecat/services/aws_nova_sonic/context.py +15 -6
  106. pipecat/services/azure/common.py +10 -2
  107. pipecat/services/azure/image.py +32 -0
  108. pipecat/services/azure/llm.py +9 -7
  109. pipecat/services/azure/stt.py +65 -2
  110. pipecat/services/azure/tts.py +154 -23
  111. pipecat/services/cartesia/stt.py +125 -8
  112. pipecat/services/cartesia/tts.py +102 -38
  113. pipecat/services/cerebras/llm.py +15 -23
  114. pipecat/services/deepgram/stt.py +19 -11
  115. pipecat/services/deepgram/tts.py +36 -0
  116. pipecat/services/deepseek/llm.py +14 -23
  117. pipecat/services/elevenlabs/tts.py +330 -64
  118. pipecat/services/fal/image.py +43 -0
  119. pipecat/services/fal/stt.py +48 -10
  120. pipecat/services/fireworks/llm.py +14 -21
  121. pipecat/services/fish/tts.py +109 -9
  122. pipecat/services/gemini_multimodal_live/__init__.py +1 -0
  123. pipecat/services/gemini_multimodal_live/events.py +83 -2
  124. pipecat/services/gemini_multimodal_live/file_api.py +189 -0
  125. pipecat/services/gemini_multimodal_live/gemini.py +218 -21
  126. pipecat/services/gladia/config.py +17 -10
  127. pipecat/services/gladia/stt.py +82 -36
  128. pipecat/services/google/frames.py +40 -0
  129. pipecat/services/google/google.py +2 -0
  130. pipecat/services/google/image.py +39 -2
  131. pipecat/services/google/llm.py +176 -58
  132. pipecat/services/google/llm_openai.py +26 -4
  133. pipecat/services/google/llm_vertex.py +37 -15
  134. pipecat/services/google/rtvi.py +41 -0
  135. pipecat/services/google/stt.py +65 -17
  136. pipecat/services/google/test-google-chirp.py +45 -0
  137. pipecat/services/google/tts.py +390 -19
  138. pipecat/services/grok/llm.py +8 -6
  139. pipecat/services/groq/llm.py +8 -6
  140. pipecat/services/groq/stt.py +13 -9
  141. pipecat/services/groq/tts.py +40 -0
  142. pipecat/services/hamsa/__init__.py +9 -0
  143. pipecat/services/hamsa/stt.py +241 -0
  144. pipecat/services/heygen/__init__.py +5 -0
  145. pipecat/services/heygen/api.py +281 -0
  146. pipecat/services/heygen/client.py +620 -0
  147. pipecat/services/heygen/video.py +338 -0
  148. pipecat/services/image_service.py +5 -3
  149. pipecat/services/inworld/__init__.py +1 -0
  150. pipecat/services/inworld/tts.py +592 -0
  151. pipecat/services/llm_service.py +127 -45
  152. pipecat/services/lmnt/tts.py +80 -7
  153. pipecat/services/mcp_service.py +85 -44
  154. pipecat/services/mem0/memory.py +42 -13
  155. pipecat/services/minimax/tts.py +74 -15
  156. pipecat/services/mistral/__init__.py +0 -0
  157. pipecat/services/mistral/llm.py +185 -0
  158. pipecat/services/moondream/vision.py +55 -10
  159. pipecat/services/neuphonic/tts.py +275 -48
  160. pipecat/services/nim/llm.py +8 -6
  161. pipecat/services/ollama/llm.py +27 -7
  162. pipecat/services/openai/base_llm.py +54 -16
  163. pipecat/services/openai/image.py +30 -0
  164. pipecat/services/openai/llm.py +7 -5
  165. pipecat/services/openai/stt.py +13 -9
  166. pipecat/services/openai/tts.py +42 -10
  167. pipecat/services/openai_realtime_beta/azure.py +11 -9
  168. pipecat/services/openai_realtime_beta/context.py +7 -5
  169. pipecat/services/openai_realtime_beta/events.py +10 -7
  170. pipecat/services/openai_realtime_beta/openai.py +37 -18
  171. pipecat/services/openpipe/llm.py +30 -24
  172. pipecat/services/openrouter/llm.py +9 -7
  173. pipecat/services/perplexity/llm.py +15 -19
  174. pipecat/services/piper/tts.py +26 -12
  175. pipecat/services/playht/tts.py +227 -65
  176. pipecat/services/qwen/llm.py +8 -6
  177. pipecat/services/rime/tts.py +128 -17
  178. pipecat/services/riva/stt.py +160 -22
  179. pipecat/services/riva/tts.py +67 -2
  180. pipecat/services/sambanova/llm.py +19 -17
  181. pipecat/services/sambanova/stt.py +14 -8
  182. pipecat/services/sarvam/tts.py +60 -13
  183. pipecat/services/simli/video.py +82 -21
  184. pipecat/services/soniox/__init__.py +0 -0
  185. pipecat/services/soniox/stt.py +398 -0
  186. pipecat/services/speechmatics/stt.py +29 -17
  187. pipecat/services/stt_service.py +47 -11
  188. pipecat/services/tavus/video.py +94 -25
  189. pipecat/services/together/llm.py +8 -6
  190. pipecat/services/tts_service.py +77 -53
  191. pipecat/services/ultravox/stt.py +46 -43
  192. pipecat/services/vision_service.py +5 -3
  193. pipecat/services/websocket_service.py +12 -11
  194. pipecat/services/whisper/base_stt.py +58 -12
  195. pipecat/services/whisper/stt.py +69 -58
  196. pipecat/services/xtts/tts.py +59 -2
  197. pipecat/sync/base_notifier.py +19 -0
  198. pipecat/sync/event_notifier.py +24 -0
  199. pipecat/tests/utils.py +73 -5
  200. pipecat/transcriptions/language.py +24 -0
  201. pipecat/transports/base_input.py +112 -8
  202. pipecat/transports/base_output.py +235 -13
  203. pipecat/transports/base_transport.py +119 -0
  204. pipecat/transports/local/audio.py +76 -0
  205. pipecat/transports/local/tk.py +84 -0
  206. pipecat/transports/network/fastapi_websocket.py +174 -15
  207. pipecat/transports/network/small_webrtc.py +383 -39
  208. pipecat/transports/network/webrtc_connection.py +214 -8
  209. pipecat/transports/network/websocket_client.py +171 -1
  210. pipecat/transports/network/websocket_server.py +147 -9
  211. pipecat/transports/services/daily.py +792 -70
  212. pipecat/transports/services/helpers/daily_rest.py +122 -129
  213. pipecat/transports/services/livekit.py +339 -4
  214. pipecat/transports/services/tavus.py +273 -38
  215. pipecat/utils/asyncio/task_manager.py +92 -186
  216. pipecat/utils/base_object.py +83 -1
  217. pipecat/utils/network.py +2 -0
  218. pipecat/utils/string.py +114 -58
  219. pipecat/utils/text/base_text_aggregator.py +44 -13
  220. pipecat/utils/text/base_text_filter.py +46 -0
  221. pipecat/utils/text/markdown_text_filter.py +70 -14
  222. pipecat/utils/text/pattern_pair_aggregator.py +18 -14
  223. pipecat/utils/text/simple_text_aggregator.py +43 -2
  224. pipecat/utils/text/skip_tags_aggregator.py +21 -13
  225. pipecat/utils/time.py +36 -0
  226. pipecat/utils/tracing/class_decorators.py +32 -7
  227. pipecat/utils/tracing/conversation_context_provider.py +12 -2
  228. pipecat/utils/tracing/service_attributes.py +80 -64
  229. pipecat/utils/tracing/service_decorators.py +48 -21
  230. pipecat/utils/tracing/setup.py +13 -7
  231. pipecat/utils/tracing/turn_context_provider.py +12 -2
  232. pipecat/utils/tracing/turn_trace_observer.py +27 -0
  233. pipecat/utils/utils.py +14 -14
  234. dv_pipecat_ai-0.0.74.dev770.dist-info/RECORD +0 -319
  235. pipecat/examples/daily_runner.py +0 -64
  236. pipecat/examples/run.py +0 -265
  237. pipecat/utils/asyncio/watchdog_async_iterator.py +0 -72
  238. pipecat/utils/asyncio/watchdog_event.py +0 -42
  239. pipecat/utils/asyncio/watchdog_priority_queue.py +0 -48
  240. pipecat/utils/asyncio/watchdog_queue.py +0 -48
  241. {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/WHEEL +0 -0
  242. {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/licenses/LICENSE +0 -0
  243. {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/top_level.txt +0 -0
  244. /pipecat/{examples → extensions}/__init__.py +0 -0
pipecat/pipeline/task.py CHANGED
@@ -4,6 +4,13 @@
4
4
  # SPDX-License-Identifier: BSD 2-Clause License
5
5
  #
6
6
 
7
+ """Pipeline task implementation for managing frame processing pipelines.
8
+
9
+ This module provides the main PipelineTask class that orchestrates pipeline
10
+ execution, frame routing, lifecycle management, and monitoring capabilities
11
+ including heartbeats, idle detection, and observer integration.
12
+ """
13
+
7
14
  import asyncio
8
15
  import time
9
16
  from collections import deque
@@ -25,26 +32,24 @@ from pipecat.frames.frames import (
25
32
  Frame,
26
33
  HeartbeatFrame,
27
34
  InputAudioRawFrame,
35
+ InterimTranscriptionFrame,
28
36
  LLMFullResponseEndFrame,
29
37
  MetricsFrame,
30
38
  StartFrame,
31
39
  StopFrame,
32
40
  StopTaskFrame,
41
+ TranscriptionFrame,
42
+ UserStartedSpeakingFrame,
43
+ UserStoppedSpeakingFrame,
33
44
  )
34
45
  from pipecat.metrics.metrics import ProcessingMetricsData, TTFBMetricsData
35
46
  from pipecat.observers.base_observer import BaseObserver
36
47
  from pipecat.observers.turn_tracking_observer import TurnTrackingObserver
37
- from pipecat.pipeline.base_pipeline import BasePipeline
38
48
  from pipecat.pipeline.base_task import BasePipelineTask, PipelineTaskParams
49
+ from pipecat.pipeline.pipeline import Pipeline, PipelineSink, PipelineSource
39
50
  from pipecat.pipeline.task_observer import TaskObserver
40
51
  from pipecat.processors.frame_processor import FrameDirection, FrameProcessor, FrameProcessorSetup
41
- from pipecat.utils.asyncio.task_manager import (
42
- WATCHDOG_TIMEOUT,
43
- BaseTaskManager,
44
- TaskManager,
45
- TaskManagerParams,
46
- )
47
- from pipecat.utils.asyncio.watchdog_queue import WatchdogQueue
52
+ from pipecat.utils.asyncio.task_manager import BaseTaskManager, TaskManager, TaskManagerParams
48
53
  from pipecat.utils.tracing.setup import is_tracing_available
49
54
  from pipecat.utils.tracing.turn_trace_observer import TurnTraceObserver
50
55
 
@@ -53,12 +58,13 @@ HEARTBEAT_MONITOR_SECONDS = HEARTBEAT_SECONDS * 10
53
58
 
54
59
 
55
60
  class PipelineParams(BaseModel):
56
- """Configuration parameters for pipeline execution. These parameters are
57
- usually passed to all frame processors using through `StartFrame`. For other
58
- generic pipeline task parameters use `PipelineTask` constructor arguments
59
- instead.
61
+ """Configuration parameters for pipeline execution.
60
62
 
61
- Attributes:
63
+ These parameters are usually passed to all frame processors through
64
+ StartFrame. For other generic pipeline task parameters use PipelineTask
65
+ constructor arguments instead.
66
+
67
+ Parameters:
62
68
  allow_interruptions: Whether to allow pipeline interruptions.
63
69
  audio_in_sample_rate: Input audio sample rate in Hz.
64
70
  audio_out_sample_rate: Output audio sample rate in Hz.
@@ -66,12 +72,15 @@ class PipelineParams(BaseModel):
66
72
  enable_metrics: Whether to enable metrics collection.
67
73
  enable_usage_metrics: Whether to enable usage metrics.
68
74
  heartbeats_period_secs: Period between heartbeats in seconds.
75
+ interruption_strategies: Strategies for bot interruption behavior.
69
76
  observers: [deprecated] Use `observers` arg in `PipelineTask` class.
77
+
78
+ .. deprecated:: 0.0.58
79
+ Use the `observers` argument in the `PipelineTask` class instead.
80
+
70
81
  report_only_initial_ttfb: Whether to report only initial time to first byte.
71
82
  send_initial_empty_metrics: Whether to send initial empty metrics.
72
83
  start_metadata: Additional metadata for pipeline start.
73
- interruption_strategies: Strategies for bot interruption behavior.
74
-
75
84
  """
76
85
 
77
86
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -90,124 +99,38 @@ class PipelineParams(BaseModel):
90
99
  start_metadata: Dict[str, Any] = Field(default_factory=dict)
91
100
 
92
101
 
93
- class PipelineTaskSource(FrameProcessor):
94
- """Source processor for pipeline tasks that handles frame routing.
95
-
96
- This is the source processor that is linked at the beginning of the
97
- pipeline given to the pipeline task. It allows us to easily push frames
98
- downstream to the pipeline and also receive upstream frames coming from the
99
- pipeline.
100
-
101
- Args:
102
- up_queue: Queue for upstream frame processing.
103
-
104
- """
105
-
106
- def __init__(self, up_queue: asyncio.Queue, **kwargs):
107
- super().__init__(**kwargs)
108
- self._up_queue = up_queue
109
-
110
- async def process_frame(self, frame: Frame, direction: FrameDirection):
111
- await super().process_frame(frame, direction)
112
-
113
- match direction:
114
- case FrameDirection.UPSTREAM:
115
- await self._up_queue.put(frame)
116
- case FrameDirection.DOWNSTREAM:
117
- await self.push_frame(frame, direction)
118
-
119
-
120
- class PipelineTaskSink(FrameProcessor):
121
- """Sink processor for pipeline tasks that handles final frame processing.
122
-
123
- This is the sink processor that is linked at the end of the pipeline
124
- given to the pipeline task. It allows us to receive downstream frames and
125
- act on them, for example, waiting to receive an EndFrame.
102
+ class PipelineTask(BasePipelineTask):
103
+ """Manages the execution of a pipeline, handling frame processing and task lifecycle.
126
104
 
127
- Args:
128
- down_queue: Queue for downstream frame processing.
129
- """
105
+ This class orchestrates pipeline execution with comprehensive monitoring,
106
+ event handling, and lifecycle management. It provides event handlers for
107
+ various pipeline states and frame types, idle detection, heartbeat monitoring,
108
+ and observer integration.
130
109
 
131
- def __init__(self, down_queue: asyncio.Queue, **kwargs):
132
- super().__init__(**kwargs)
133
- self._down_queue = down_queue
110
+ Event handlers available:
134
111
 
135
- async def process_frame(self, frame: Frame, direction: FrameDirection):
136
- await super().process_frame(frame, direction)
137
- await self._down_queue.put(frame)
112
+ - on_frame_reached_upstream: Called when upstream frames reach the source
113
+ - on_frame_reached_downstream: Called when downstream frames reach the sink
114
+ - on_idle_timeout: Called when pipeline is idle beyond timeout threshold
115
+ - on_pipeline_started: Called when pipeline starts with StartFrame
116
+ - on_pipeline_stopped: Called when pipeline stops with StopFrame
117
+ - on_pipeline_ended: Called when pipeline ends with EndFrame
118
+ - on_pipeline_cancelled: Called when pipeline is cancelled
138
119
 
120
+ Example::
139
121
 
140
- class PipelineTask(BasePipelineTask):
141
- """Manages the execution of a pipeline, handling frame processing and task lifecycle.
122
+ @task.event_handler("on_frame_reached_upstream")
123
+ async def on_frame_reached_upstream(task, frame):
124
+ ...
142
125
 
143
- It has a couple of event handlers `on_frame_reached_upstream` and
144
- `on_frame_reached_downstream` that are called when upstream frames or
145
- downstream frames reach both ends of pipeline. By default, the events
146
- handlers will not be called unless some filters are set using
147
- `set_reached_upstream_filter` and `set_reached_downstream_filter`.
148
-
149
- @task.event_handler("on_frame_reached_upstream")
150
- async def on_frame_reached_upstream(task, frame):
151
- ...
152
-
153
- @task.event_handler("on_frame_reached_downstream")
154
- async def on_frame_reached_downstream(task, frame):
155
- ...
156
-
157
- It also has an event handler that detects when the pipeline is idle. By
158
- default, a pipeline is idle if no `BotSpeakingFrame` or
159
- `LLMFullResponseEndFrame` are received within `idle_timeout_secs`.
160
-
161
- @task.event_handler("on_idle_timeout")
162
- async def on_pipeline_idle_timeout(task):
163
- ...
164
-
165
- There are also events to know if a pipeline has been started, stopped, ended
166
- or cancelled.
167
-
168
- @task.event_handler("on_pipeline_started")
169
- async def on_pipeline_started(task, frame: StartFrame):
170
- ...
171
-
172
- @task.event_handler("on_pipeline_stopped")
173
- async def on_pipeline_stopped(task, frame: StopFrame):
174
- ...
175
-
176
- @task.event_handler("on_pipeline_ended")
177
- async def on_pipeline_ended(task, frame: EndFrame):
178
- ...
179
-
180
- @task.event_handler("on_pipeline_cancelled")
181
- async def on_pipeline_cancelled(task, frame: CancelFrame):
182
- ...
183
-
184
- Args:
185
- pipeline: The pipeline to execute.
186
- params: Configuration parameters for the pipeline.
187
- additional_span_attributes: Optional dictionary of attributes to propagate as
188
- OpenTelemetry conversation span attributes.
189
- cancel_on_idle_timeout: Whether the pipeline task should be cancelled if
190
- the idle timeout is reached.
191
- check_dangling_tasks: Whether to check for processors' tasks finishing properly.
192
- clock: Clock implementation for timing operations.
193
- conversation_id: Optional custom ID for the conversation.
194
- enable_tracing: Whether to enable tracing.
195
- enable_turn_tracking: Whether to enable turn tracking.
196
- enable_watchdog_logging: Whether to print task processing times.
197
- enable_watchdog_timers: Whether to enable task watchdog timers.
198
- idle_timeout_frames: A tuple with the frames that should trigger an idle
199
- timeout if not received withing `idle_timeout_seconds`.
200
- idle_timeout_secs: Timeout (in seconds) to consider pipeline idle or
201
- None. If a pipeline is idle the pipeline task will be cancelled
202
- automatically.
203
- observers: List of observers for monitoring pipeline execution.
204
- watchdog_timeout_secs: Watchdog timer timeout (in seconds). A warning
205
- will be logged if the watchdog timer is not reset before this timeout.
126
+ @task.event_handler("on_idle_timeout")
127
+ async def on_pipeline_idle_timeout(task):
128
+ ...
206
129
  """
207
130
 
208
131
  def __init__(
209
132
  self,
210
- pipeline: BasePipeline,
133
+ pipeline: FrameProcessor,
211
134
  *,
212
135
  params: Optional[PipelineParams] = None,
213
136
  additional_span_attributes: Optional[dict] = None,
@@ -217,19 +140,41 @@ class PipelineTask(BasePipelineTask):
217
140
  conversation_id: Optional[str] = None,
218
141
  enable_tracing: bool = False,
219
142
  enable_turn_tracking: bool = True,
220
- enable_watchdog_logging: bool = False,
221
- enable_watchdog_timers: bool = False,
222
143
  idle_timeout_frames: Tuple[Type[Frame], ...] = (
223
144
  BotSpeakingFrame,
145
+ InterimTranscriptionFrame,
224
146
  LLMFullResponseEndFrame,
147
+ TranscriptionFrame,
148
+ UserStartedSpeakingFrame,
149
+ UserStoppedSpeakingFrame,
225
150
  ),
226
151
  idle_timeout_secs: Optional[float] = 300,
227
152
  observers: Optional[List[BaseObserver]] = None,
228
153
  task_manager: Optional[BaseTaskManager] = None,
229
- watchdog_timeout_secs: float = WATCHDOG_TIMEOUT,
230
154
  ):
155
+ """Initialize the PipelineTask.
156
+
157
+ Args:
158
+ pipeline: The pipeline to execute.
159
+ params: Configuration parameters for the pipeline.
160
+ additional_span_attributes: Optional dictionary of attributes to propagate as
161
+ OpenTelemetry conversation span attributes.
162
+ cancel_on_idle_timeout: Whether the pipeline task should be cancelled if
163
+ the idle timeout is reached.
164
+ check_dangling_tasks: Whether to check for processors' tasks finishing properly.
165
+ clock: Clock implementation for timing operations.
166
+ conversation_id: Optional custom ID for the conversation.
167
+ enable_tracing: Whether to enable tracing.
168
+ enable_turn_tracking: Whether to enable turn tracking.
169
+ idle_timeout_frames: A tuple with the frames that should trigger an idle
170
+ timeout if not received within `idle_timeout_seconds`.
171
+ idle_timeout_secs: Timeout (in seconds) to consider pipeline idle or
172
+ None. If a pipeline is idle the pipeline task will be cancelled
173
+ automatically.
174
+ observers: List of observers for monitoring pipeline execution.
175
+ task_manager: Optional task manager for handling asyncio tasks.
176
+ """
231
177
  super().__init__()
232
- self._pipeline = pipeline
233
178
  self._params = params or PipelineParams()
234
179
  self._additional_span_attributes = additional_span_attributes or {}
235
180
  self._cancel_on_idle_timeout = cancel_on_idle_timeout
@@ -238,11 +183,8 @@ class PipelineTask(BasePipelineTask):
238
183
  self._conversation_id = conversation_id
239
184
  self._enable_tracing = enable_tracing and is_tracing_available()
240
185
  self._enable_turn_tracking = enable_turn_tracking
241
- self._enable_watchdog_logging = enable_watchdog_logging
242
- self._enable_watchdog_timers = enable_watchdog_timers
243
186
  self._idle_timeout_frames = idle_timeout_frames
244
187
  self._idle_timeout_secs = idle_timeout_secs
245
- self._watchdog_timeout_secs = watchdog_timeout_secs
246
188
  if self._params.observers:
247
189
  import warnings
248
190
 
@@ -273,40 +215,30 @@ class PipelineTask(BasePipelineTask):
273
215
  # PipelineTask and its frame processors.
274
216
  self._task_manager = task_manager or TaskManager(conversation_id)
275
217
 
276
- # This queue receives frames coming from the pipeline upstream.
277
- self._up_queue = WatchdogQueue(self._task_manager)
278
- self._process_up_task: Optional[asyncio.Task] = None
279
- # This queue receives frames coming from the pipeline downstream.
280
- self._down_queue = WatchdogQueue(self._task_manager)
281
- self._process_down_task: Optional[asyncio.Task] = None
282
218
  # This queue is the queue used to push frames to the pipeline.
283
- self._push_queue = WatchdogQueue(self._task_manager)
219
+ self._push_queue = asyncio.Queue()
284
220
  self._process_push_task: Optional[asyncio.Task] = None
285
221
  # This is the heartbeat queue. When a heartbeat frame is received in the
286
222
  # down queue we add it to the heartbeat queue for processing.
287
- self._heartbeat_queue = WatchdogQueue(self._task_manager)
223
+ self._heartbeat_queue = asyncio.Queue()
288
224
  self._heartbeat_push_task: Optional[asyncio.Task] = None
289
225
  self._heartbeat_monitor_task: Optional[asyncio.Task] = None
290
226
  # This is the idle queue. When frames are received downstream they are
291
227
  # put in the queue. If no frame is received the pipeline is considered
292
228
  # idle.
293
- self._idle_queue = WatchdogQueue(self._task_manager)
229
+ self._idle_queue = asyncio.Queue()
294
230
  self._idle_monitor_task: Optional[asyncio.Task] = None
295
231
  # This event is used to indicate a finalize frame (e.g. EndFrame,
296
232
  # StopFrame) has been received in the down queue.
297
233
  self._pipeline_end_event = asyncio.Event()
298
234
 
299
- # This is a source processor that we connect to the provided
300
- # pipeline. This source processor allows up to receive and react to
301
- # upstream frames.
302
- self._source = PipelineTaskSource(self._up_queue)
303
- self._source.link(pipeline)
304
-
305
- # This is a sink processor that we connect to the provided
306
- # pipeline. This sink processor allows up to receive and react to
307
- # downstream frames.
308
- self._sink = PipelineTaskSink(self._down_queue)
309
- pipeline.link(self._sink)
235
+ # This is the final pipeline. It is composed of a source processor,
236
+ # followed by the user pipeline, and ending with a sink processor. The
237
+ # source allows us to receive and react to upstream frames, and the sink
238
+ # allows us to receive and react to downstream frames.
239
+ source = PipelineSource(self._source_push_frame, name=f"{self}::Source")
240
+ sink = PipelineSink(self._sink_push_frame, name=f"{self}::Sink")
241
+ self._pipeline = Pipeline([pipeline], source=source, sink=sink)
310
242
 
311
243
  # The task observer acts as a proxy to the provided observers. This way,
312
244
  # we only need to pass a single observer (using the StartFrame) which
@@ -331,60 +263,97 @@ class PipelineTask(BasePipelineTask):
331
263
 
332
264
  @property
333
265
  def params(self) -> PipelineParams:
334
- """Returns the pipeline parameters of this task."""
266
+ """Get the pipeline parameters for this task.
267
+
268
+ Returns:
269
+ The pipeline parameters configuration.
270
+ """
335
271
  return self._params
336
272
 
337
273
  @property
338
274
  def turn_tracking_observer(self) -> Optional[TurnTrackingObserver]:
339
- """Return the turn tracking observer if enabled."""
275
+ """Get the turn tracking observer if enabled.
276
+
277
+ Returns:
278
+ The turn tracking observer instance or None if not enabled.
279
+ """
340
280
  return self._turn_tracking_observer
341
281
 
342
282
  @property
343
283
  def turn_trace_observer(self) -> Optional[TurnTraceObserver]:
344
- """Return the turn trace observer if enabled."""
284
+ """Get the turn trace observer if enabled.
285
+
286
+ Returns:
287
+ The turn trace observer instance or None if not enabled.
288
+ """
345
289
  return self._turn_trace_observer
346
290
 
347
291
  def add_observer(self, observer: BaseObserver):
292
+ """Add an observer to monitor pipeline execution.
293
+
294
+ Args:
295
+ observer: The observer to add to the pipeline monitoring.
296
+ """
348
297
  self._observer.add_observer(observer)
349
298
 
350
299
  async def remove_observer(self, observer: BaseObserver):
300
+ """Remove an observer from pipeline monitoring.
301
+
302
+ Args:
303
+ observer: The observer to remove from pipeline monitoring.
304
+ """
351
305
  await self._observer.remove_observer(observer)
352
306
 
353
307
  def set_reached_upstream_filter(self, types: Tuple[Type[Frame], ...]):
354
- """Sets which frames will be checked before calling the
355
- on_frame_reached_upstream event handler.
308
+ """Set which frame types trigger the on_frame_reached_upstream event.
356
309
 
310
+ Args:
311
+ types: Tuple of frame types to monitor for upstream events.
357
312
  """
358
313
  self._reached_upstream_types = types
359
314
 
360
315
  def set_reached_downstream_filter(self, types: Tuple[Type[Frame], ...]):
361
- """Sets which frames will be checked before calling the
362
- on_frame_reached_downstream event handler.
316
+ """Set which frame types trigger the on_frame_reached_downstream event.
363
317
 
318
+ Args:
319
+ types: Tuple of frame types to monitor for downstream events.
364
320
  """
365
321
  self._reached_downstream_types = types
366
322
 
367
323
  def has_finished(self) -> bool:
368
- """Indicates whether the tasks has finished. That is, all processors
324
+ """Check if the pipeline task has finished execution.
325
+
326
+ This indicates whether the tasks has finished, meaninig all processors
369
327
  have stopped.
370
328
 
329
+ Returns:
330
+ True if all processors have stopped and the task is complete.
371
331
  """
372
332
  return self._finished
373
333
 
374
334
  async def stop_when_done(self):
375
- """This is a helper function that sends an EndFrame to the pipeline in
376
- order to stop the task after everything in it has been processed.
335
+ """Schedule the pipeline to stop after processing all queued frames.
377
336
 
337
+ Sends an EndFrame to gracefully terminate the pipeline once all
338
+ current processing is complete.
378
339
  """
379
340
  logger.debug(f"Task {self} scheduled to stop when done")
380
341
  await self.queue_frame(EndFrame())
381
342
 
382
343
  async def cancel(self):
383
- """Stops the running pipeline immediately."""
344
+ """Immediately stop the running pipeline.
345
+
346
+ Cancels all running tasks and stops frame processing without
347
+ waiting for completion.
348
+ """
384
349
  await self._cancel()
385
350
 
386
351
  async def run(self, params: PipelineTaskParams):
387
- """Starts and manages the pipeline execution until completion or cancellation."""
352
+ """Start and manage the pipeline execution until completion or cancellation.
353
+
354
+ Args:
355
+ params: Configuration parameters for pipeline execution.
356
+ """
388
357
  if self.has_finished():
389
358
  return
390
359
  cleanup_pipeline = True
@@ -394,29 +363,43 @@ class PipelineTask(BasePipelineTask):
394
363
 
395
364
  # Create all main tasks and wait of the main push task. This is the
396
365
  # task that pushes frames to the very beginning of our pipeline (our
397
- # controlled PipelineTaskSource processor).
366
+ # controlled source processor).
398
367
  push_task = await self._create_tasks()
399
- await self._task_manager.wait_for_task(push_task)
368
+ await push_task
400
369
 
401
370
  # We have already cleaned up the pipeline inside the task.
402
371
  cleanup_pipeline = False
372
+
373
+ # Pipeline has finished nicely.
374
+ self._finished = True
403
375
  except asyncio.CancelledError:
404
- # We are awaiting on the push task and it might be cancelled
405
- # (e.g. Ctrl-C). This means we will get a CancelledError here as
406
- # well, because you get a CancelledError in every place you are
407
- # awaiting a task.
408
- pass
376
+ # Raise exception back to the pipeline runner so it can cancel this
377
+ # task properly.
378
+ raise
409
379
  finally:
410
- # It's possibe that we get an asyncio.CancelledError from the
411
- # outside, if so we need to make sure everything gets cancelled
412
- # properly.
413
- if cleanup_pipeline:
414
- await self._cancel()
415
- await self._cancel_tasks()
416
- await self._cleanup(cleanup_pipeline)
417
- if self._check_dangling_tasks:
418
- self._print_dangling_tasks()
419
- self._finished = True
380
+ # We can reach this point for different reasons:
381
+ #
382
+ # 1. The task has finished properly (e.g. `EndFrame`).
383
+ # 2. By calling `PipelineTask.cancel()`.
384
+ # 3. By asyncio task cancellation.
385
+ #
386
+ # Case (1) will execute the code below without issues because
387
+ # `self._finished` is true.
388
+ #
389
+ # Case (2) will execute the code below without issues because
390
+ # `self._cancelled` is true.
391
+ #
392
+ # Case (3) will raise the exception above (because we are cancelling
393
+ # the asyncio task). This will be then captured by the
394
+ # `PipelineRunner` which will call `PipelineTask.cancel()` and
395
+ # therefore becoming case (2).
396
+ if self._finished or self._cancelled:
397
+ logger.debug(f"Pipeline task {self} has finished, cleaning up resources")
398
+ await self._cancel_tasks()
399
+ await self._cleanup(cleanup_pipeline)
400
+ if self._check_dangling_tasks:
401
+ self._print_dangling_tasks()
402
+ self._finished = True
420
403
 
421
404
  async def queue_frame(self, frame: Frame):
422
405
  """Queue a single frame to be pushed down the pipeline.
@@ -440,25 +423,25 @@ class PipelineTask(BasePipelineTask):
440
423
  await self.queue_frame(frame)
441
424
 
442
425
  async def _cancel(self):
426
+ """Internal cancellation logic for the pipeline task."""
443
427
  if not self._cancelled:
444
428
  logger.debug(f"Canceling pipeline task {self}", call_id=self._conversation_id)
445
429
  self._cancelled = True
446
430
  # Make sure everything is cleaned up downstream. This is sent
447
431
  # out-of-band from the main streaming task which is what we want since
448
432
  # we want to cancel right away.
449
- await self._source.push_frame(CancelFrame())
450
- # Only cancel the push task. Everything else will be cancelled in run().
433
+ await self._pipeline.queue_frame(CancelFrame())
434
+ # Wait for CancelFrame to make it throught the pipeline.
435
+ await self._wait_for_pipeline_end()
436
+ # Only cancel the push task, we don't want to be able to process any
437
+ # other frame after cancel. Everything else will be cancelled in
438
+ # run().
451
439
  if self._process_push_task:
452
440
  await self._task_manager.cancel_task(self._process_push_task)
453
441
  self._process_push_task = None
454
442
 
455
443
  async def _create_tasks(self):
456
- self._process_up_task = self._task_manager.create_task(
457
- self._process_up_queue(), f"{self}::_process_up_queue"
458
- )
459
- self._process_down_task = self._task_manager.create_task(
460
- self._process_down_queue(), f"{self}::_process_down_queue"
461
- )
444
+ """Create and start all pipeline processing tasks."""
462
445
  self._process_push_task = self._task_manager.create_task(
463
446
  self._process_push_queue(), f"{self}::_process_push_queue"
464
447
  )
@@ -468,6 +451,7 @@ class PipelineTask(BasePipelineTask):
468
451
  return self._process_push_task
469
452
 
470
453
  def _maybe_start_heartbeat_tasks(self):
454
+ """Start heartbeat tasks if heartbeats are enabled and not already running."""
471
455
  if self._params.enable_heartbeats and self._heartbeat_push_task is None:
472
456
  self._heartbeat_push_task = self._task_manager.create_task(
473
457
  self._heartbeat_push_handler(), f"{self}::_heartbeat_push_handler"
@@ -477,26 +461,25 @@ class PipelineTask(BasePipelineTask):
477
461
  )
478
462
 
479
463
  def _maybe_start_idle_task(self):
464
+ """Start idle monitoring task if idle timeout is configured."""
480
465
  if self._idle_timeout_secs:
481
466
  self._idle_monitor_task = self._task_manager.create_task(
482
467
  self._idle_monitor_handler(), f"{self}::_idle_monitor_handler"
483
468
  )
484
469
 
485
470
  async def _cancel_tasks(self):
471
+ """Cancel all running pipeline tasks."""
486
472
  await self._observer.stop()
487
473
 
488
- if self._process_up_task:
489
- await self._task_manager.cancel_task(self._process_up_task)
490
- self._process_up_task = None
491
-
492
- if self._process_down_task:
493
- await self._task_manager.cancel_task(self._process_down_task)
494
- self._process_down_task = None
474
+ if self._process_push_task:
475
+ await self._task_manager.cancel_task(self._process_push_task)
476
+ self._process_push_task = None
495
477
 
496
478
  await self._maybe_cancel_heartbeat_tasks()
497
479
  await self._maybe_cancel_idle_task()
498
480
 
499
481
  async def _maybe_cancel_heartbeat_tasks(self):
482
+ """Cancel heartbeat tasks if they are running."""
500
483
  if not self._params.enable_heartbeats:
501
484
  return
502
485
 
@@ -509,11 +492,13 @@ class PipelineTask(BasePipelineTask):
509
492
  self._heartbeat_monitor_task = None
510
493
 
511
494
  async def _maybe_cancel_idle_task(self):
495
+ """Cancel idle monitoring task if it is running."""
512
496
  if self._idle_timeout_secs and self._idle_monitor_task:
513
497
  await self._task_manager.cancel_task(self._idle_monitor_task)
514
498
  self._idle_monitor_task = None
515
499
 
516
500
  def _initial_metrics_frame(self) -> MetricsFrame:
501
+ """Create an initial metrics frame with zero values for all processors."""
517
502
  processors = self._pipeline.processors_with_metrics()
518
503
  data = []
519
504
  for p in processors:
@@ -522,29 +507,24 @@ class PipelineTask(BasePipelineTask):
522
507
  return MetricsFrame(data=data)
523
508
 
524
509
  async def _wait_for_pipeline_end(self):
510
+ """Wait for the pipeline to signal completion."""
525
511
  await self._pipeline_end_event.wait()
526
512
  self._pipeline_end_event.clear()
527
513
 
528
514
  async def _setup(self, params: PipelineTaskParams):
529
- mgr_params = TaskManagerParams(
530
- loop=params.loop,
531
- enable_watchdog_logging=self._enable_watchdog_logging,
532
- enable_watchdog_timers=self._enable_watchdog_timers,
533
- watchdog_timeout=self._watchdog_timeout_secs,
534
- )
515
+ """Set up the pipeline task and all processors."""
516
+ mgr_params = TaskManagerParams(loop=params.loop)
535
517
  self._task_manager.setup(mgr_params)
536
518
 
537
519
  setup = FrameProcessorSetup(
538
520
  clock=self._clock,
539
521
  task_manager=self._task_manager,
540
522
  observer=self._observer,
541
- watchdog_timers_enabled=self._enable_watchdog_timers,
542
523
  )
543
- await self._source.setup(setup)
544
524
  await self._pipeline.setup(setup)
545
- await self._sink.setup(setup)
546
525
 
547
526
  async def _cleanup(self, cleanup_pipeline: bool):
527
+ """Clean up the pipeline task and processors."""
548
528
  # Cleanup base object.
549
529
  await self.cleanup()
550
530
 
@@ -553,16 +533,15 @@ class PipelineTask(BasePipelineTask):
553
533
  self._turn_trace_observer.end_conversation_tracing()
554
534
 
555
535
  # Cleanup pipeline processors.
556
- await self._source.cleanup()
557
536
  if cleanup_pipeline:
558
537
  await self._pipeline.cleanup()
559
- await self._sink.cleanup()
560
538
 
561
539
  async def _process_push_queue(self):
562
- """This is the task that runs the pipeline for the first time by sending
540
+ """Process frames from the push queue and send them through the pipeline.
541
+
542
+ This is the task that runs the pipeline for the first time by sending
563
543
  a StartFrame and by pushing any other frames queued by the user. It runs
564
544
  until the tasks is cancelled or stopped (e.g. with an EndFrame).
565
-
566
545
  """
567
546
  self._clock.start()
568
547
 
@@ -573,115 +552,108 @@ class PipelineTask(BasePipelineTask):
573
552
  audio_in_sample_rate=self._params.audio_in_sample_rate,
574
553
  audio_out_sample_rate=self._params.audio_out_sample_rate,
575
554
  enable_metrics=self._params.enable_metrics,
555
+ enable_tracing=self._enable_tracing,
576
556
  enable_usage_metrics=self._params.enable_usage_metrics,
577
557
  report_only_initial_ttfb=self._params.report_only_initial_ttfb,
578
558
  interruption_strategies=self._params.interruption_strategies,
579
559
  )
580
560
  start_frame.metadata = self._params.start_metadata
581
- await self._source.queue_frame(start_frame, FrameDirection.DOWNSTREAM)
561
+ await self._pipeline.queue_frame(start_frame)
582
562
 
583
563
  if self._params.enable_metrics and self._params.send_initial_empty_metrics:
584
- await self._source.queue_frame(self._initial_metrics_frame(), FrameDirection.DOWNSTREAM)
564
+ await self._pipeline.queue_frame(self._initial_metrics_frame())
585
565
 
586
566
  running = True
587
567
  cleanup_pipeline = True
588
568
  while running:
589
569
  frame = await self._push_queue.get()
590
- await self._source.queue_frame(frame, FrameDirection.DOWNSTREAM)
591
- if isinstance(frame, (EndFrame, StopFrame)):
570
+ await self._pipeline.queue_frame(frame)
571
+ if isinstance(frame, (CancelFrame, EndFrame, StopFrame)):
592
572
  await self._wait_for_pipeline_end()
593
573
  running = not isinstance(frame, (CancelFrame, EndFrame, StopFrame))
594
574
  cleanup_pipeline = not isinstance(frame, StopFrame)
595
575
  self._push_queue.task_done()
596
576
  await self._cleanup(cleanup_pipeline)
597
577
 
598
- async def _process_up_queue(self):
599
- """This is the task that processes frames coming upstream from the
578
+ async def _source_push_frame(self, frame: Frame, direction: FrameDirection):
579
+ """Process frames coming upstream from the pipeline.
580
+
581
+ This is the task that processes frames coming upstream from the
600
582
  pipeline. These frames might indicate, for example, that we want the
601
583
  pipeline to be stopped (e.g. EndTaskFrame) in which case we would send
602
584
  an EndFrame down the pipeline.
603
-
604
585
  """
605
- while True:
606
- frame = await self._up_queue.get()
607
-
608
- if isinstance(frame, self._reached_upstream_types):
609
- await self._call_event_handler("on_frame_reached_upstream", frame)
610
-
611
- if isinstance(frame, EndTaskFrame):
612
- # Tell the task we should end nicely.
613
- await self.queue_frame(EndFrame())
614
- elif isinstance(frame, CancelTaskFrame):
615
- # Tell the task we should end right away.
586
+ if isinstance(frame, self._reached_upstream_types):
587
+ await self._call_event_handler("on_frame_reached_upstream", frame)
588
+
589
+ if isinstance(frame, EndTaskFrame):
590
+ # Tell the task we should end nicely.
591
+ await self.queue_frame(EndFrame())
592
+ elif isinstance(frame, CancelTaskFrame):
593
+ # Tell the task we should end right away.
594
+ await self.queue_frame(CancelFrame())
595
+ elif isinstance(frame, StopTaskFrame):
596
+ # Tell the task we should stop nicely.
597
+ await self.queue_frame(StopFrame())
598
+ elif isinstance(frame, ErrorFrame):
599
+ if frame.fatal:
600
+ logger.error(f"A fatal error occurred: {frame}")
601
+ # Cancel all tasks downstream.
616
602
  await self.queue_frame(CancelFrame())
617
- elif isinstance(frame, StopTaskFrame):
618
- # Tell the task we should stop nicely.
619
- await self.queue_frame(StopFrame())
620
- elif isinstance(frame, ErrorFrame):
621
- if frame.fatal:
622
- logger.error(
623
- "A fatal error occurred: {}", str(frame), call_id=self._conversation_id
624
- )
625
- # Cancel all tasks downstream.
626
- await self.queue_frame(CancelFrame())
627
- # Tell the task we should stop.
628
- await self.queue_frame(StopTaskFrame())
629
- else:
630
- logger.warning(
631
- "Something went wrong: {}", str(frame), call_id=self._conversation_id
632
- )
633
- self._up_queue.task_done()
603
+ # Tell the task we should stop.
604
+ await self.queue_frame(StopTaskFrame())
605
+ else:
606
+ logger.warning(f"Something went wrong: {frame}")
634
607
 
635
- async def _process_down_queue(self):
636
- """This tasks process frames coming downstream from the pipeline. For
608
+ async def _sink_push_frame(self, frame: Frame, direction: FrameDirection):
609
+ """Process frames coming downstream from the pipeline.
610
+
611
+ This tasks process frames coming downstream from the pipeline. For
637
612
  example, heartbeat frames or an EndFrame which would indicate all
638
613
  processors have handled the EndFrame and therefore we can exit the task
639
614
  cleanly.
640
-
641
615
  """
642
- while True:
643
- frame = await self._down_queue.get()
644
-
645
- # Queue received frame to the idle queue so we can monitor idle
646
- # pipelines.
647
- await self._idle_queue.put(frame)
648
-
649
- if isinstance(frame, self._reached_downstream_types):
650
- await self._call_event_handler("on_frame_reached_downstream", frame)
651
-
652
- if isinstance(frame, StartFrame):
653
- await self._call_event_handler("on_pipeline_started", frame)
654
-
655
- # Start heartbeat tasks now that StartFrame has been processed
656
- # by all processors in the pipeline
657
- self._maybe_start_heartbeat_tasks()
658
- elif isinstance(frame, EndFrame):
659
- await self._call_event_handler("on_pipeline_ended", frame)
660
- self._pipeline_end_event.set()
661
- elif isinstance(frame, StopFrame):
662
- await self._call_event_handler("on_pipeline_stopped", frame)
663
- self._pipeline_end_event.set()
664
- elif isinstance(frame, CancelFrame):
665
- await self._call_event_handler("on_pipeline_cancelled", frame)
666
- elif isinstance(frame, HeartbeatFrame):
667
- await self._heartbeat_queue.put(frame)
668
- self._down_queue.task_done()
616
+ # Queue received frame to the idle queue so we can monitor idle
617
+ # pipelines.
618
+ await self._idle_queue.put(frame)
619
+
620
+ if isinstance(frame, self._reached_downstream_types):
621
+ await self._call_event_handler("on_frame_reached_downstream", frame)
622
+
623
+ if isinstance(frame, StartFrame):
624
+ await self._call_event_handler("on_pipeline_started", frame)
625
+
626
+ # Start heartbeat tasks now that StartFrame has been processed
627
+ # by all processors in the pipeline
628
+ self._maybe_start_heartbeat_tasks()
629
+ elif isinstance(frame, EndFrame):
630
+ await self._call_event_handler("on_pipeline_ended", frame)
631
+ self._pipeline_end_event.set()
632
+ elif isinstance(frame, StopFrame):
633
+ await self._call_event_handler("on_pipeline_stopped", frame)
634
+ self._pipeline_end_event.set()
635
+ elif isinstance(frame, CancelFrame):
636
+ await self._call_event_handler("on_pipeline_cancelled", frame)
637
+ self._pipeline_end_event.set()
638
+ elif isinstance(frame, HeartbeatFrame):
639
+ await self._heartbeat_queue.put(frame)
669
640
 
670
641
  async def _heartbeat_push_handler(self):
671
- """This tasks pushes a heartbeat frame every heartbeat period."""
642
+ """Push heartbeat frames at regular intervals."""
672
643
  while True:
673
644
  # Don't use `queue_frame()` because if an EndFrame is queued the
674
645
  # task will just stop waiting for the pipeline to finish not
675
646
  # allowing more frames to be pushed.
676
- await self._source.queue_frame(HeartbeatFrame(timestamp=self._clock.get_time()))
647
+ await self._pipeline.queue_frame(HeartbeatFrame(timestamp=self._clock.get_time()))
677
648
  await asyncio.sleep(self._params.heartbeats_period_secs)
678
649
 
679
650
  async def _heartbeat_monitor_handler(self):
680
- """This tasks monitors heartbeat frames. If a heartbeat frame has not
651
+ """Monitor heartbeat frames for processing time and timeout detection.
652
+
653
+ This task monitors heartbeat frames. If a heartbeat frame has not
681
654
  been received for a long period a warning will be logged. It also logs
682
655
  the time that a heartbeat frame takes to processes, that is how long it
683
656
  takes for the heartbeat frame to traverse all the pipeline.
684
-
685
657
  """
686
658
  wait_time = HEARTBEAT_MONITOR_SECONDS
687
659
  while True:
@@ -697,9 +669,12 @@ class PipelineTask(BasePipelineTask):
697
669
  )
698
670
 
699
671
  async def _idle_monitor_handler(self):
700
- """This tasks monitors activity in the pipeline. If no frames are
701
- received (heartbeats don't count) the pipeline is considered idle.
672
+ """Monitor pipeline activity and detect idle conditions.
702
673
 
674
+ Tracks frame activity and triggers idle timeout events when the
675
+ pipeline hasn't received relevant frames within the timeout period.
676
+
677
+ Note: Heartbeats are excluded from idle detection.
703
678
  """
704
679
  running = True
705
680
  last_frame_time = 0
@@ -737,11 +712,18 @@ class PipelineTask(BasePipelineTask):
737
712
  running = await self._idle_timeout_detected(frame_buffer)
738
713
 
739
714
  async def _idle_timeout_detected(self, last_frames: Deque[Frame]) -> bool:
740
- """Logic for when the pipeline is idle.
715
+ """Handle idle timeout detection and optional cancellation.
716
+
717
+ Args:
718
+ last_frames: Recent frames received before timeout for debugging.
741
719
 
742
720
  Returns:
743
- bool: Whther the pipeline task is being cancelled or not.
721
+ Whether the pipeline task should continue running.
744
722
  """
723
+ # If we are cancelling, just exit the task.
724
+ if self._cancelled:
725
+ return True
726
+
745
727
  logger.warning("Idle timeout detected. Last 10 frames received:")
746
728
  for i, frame in enumerate(last_frames, 1):
747
729
  logger.warning(f"Frame {i}: {frame}")
@@ -757,6 +739,7 @@ class PipelineTask(BasePipelineTask):
757
739
  return True
758
740
 
759
741
  def _print_dangling_tasks(self):
742
+ """Log any dangling tasks that haven't been properly cleaned up."""
760
743
  tasks = [t.get_name() for t in self._task_manager.current_tasks()]
761
744
  if tasks:
762
745
  logger.warning(f"Dangling tasks detected: {tasks}", call_id=self._conversation_id)