dv-pipecat-ai 0.0.74.dev770__py3-none-any.whl → 0.0.82.dev776__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

Files changed (244) hide show
  1. {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/METADATA +137 -93
  2. dv_pipecat_ai-0.0.82.dev776.dist-info/RECORD +340 -0
  3. pipecat/__init__.py +17 -0
  4. pipecat/adapters/base_llm_adapter.py +36 -1
  5. pipecat/adapters/schemas/direct_function.py +296 -0
  6. pipecat/adapters/schemas/function_schema.py +15 -6
  7. pipecat/adapters/schemas/tools_schema.py +55 -7
  8. pipecat/adapters/services/anthropic_adapter.py +22 -3
  9. pipecat/adapters/services/aws_nova_sonic_adapter.py +23 -3
  10. pipecat/adapters/services/bedrock_adapter.py +22 -3
  11. pipecat/adapters/services/gemini_adapter.py +16 -3
  12. pipecat/adapters/services/open_ai_adapter.py +17 -2
  13. pipecat/adapters/services/open_ai_realtime_adapter.py +23 -3
  14. pipecat/audio/filters/base_audio_filter.py +30 -6
  15. pipecat/audio/filters/koala_filter.py +37 -2
  16. pipecat/audio/filters/krisp_filter.py +59 -6
  17. pipecat/audio/filters/noisereduce_filter.py +37 -0
  18. pipecat/audio/interruptions/base_interruption_strategy.py +25 -5
  19. pipecat/audio/interruptions/min_words_interruption_strategy.py +21 -4
  20. pipecat/audio/mixers/base_audio_mixer.py +30 -7
  21. pipecat/audio/mixers/soundfile_mixer.py +53 -6
  22. pipecat/audio/resamplers/base_audio_resampler.py +17 -9
  23. pipecat/audio/resamplers/resampy_resampler.py +26 -1
  24. pipecat/audio/resamplers/soxr_resampler.py +32 -1
  25. pipecat/audio/resamplers/soxr_stream_resampler.py +101 -0
  26. pipecat/audio/utils.py +194 -1
  27. pipecat/audio/vad/silero.py +60 -3
  28. pipecat/audio/vad/vad_analyzer.py +114 -30
  29. pipecat/clocks/base_clock.py +19 -0
  30. pipecat/clocks/system_clock.py +25 -0
  31. pipecat/extensions/voicemail/__init__.py +0 -0
  32. pipecat/extensions/voicemail/voicemail_detector.py +707 -0
  33. pipecat/frames/frames.py +590 -156
  34. pipecat/metrics/metrics.py +64 -1
  35. pipecat/observers/base_observer.py +58 -19
  36. pipecat/observers/loggers/debug_log_observer.py +56 -64
  37. pipecat/observers/loggers/llm_log_observer.py +8 -1
  38. pipecat/observers/loggers/transcription_log_observer.py +19 -7
  39. pipecat/observers/loggers/user_bot_latency_log_observer.py +32 -5
  40. pipecat/observers/turn_tracking_observer.py +26 -1
  41. pipecat/pipeline/base_pipeline.py +5 -7
  42. pipecat/pipeline/base_task.py +52 -9
  43. pipecat/pipeline/parallel_pipeline.py +121 -177
  44. pipecat/pipeline/pipeline.py +129 -20
  45. pipecat/pipeline/runner.py +50 -1
  46. pipecat/pipeline/sync_parallel_pipeline.py +132 -32
  47. pipecat/pipeline/task.py +263 -280
  48. pipecat/pipeline/task_observer.py +85 -34
  49. pipecat/pipeline/to_be_updated/merge_pipeline.py +32 -2
  50. pipecat/processors/aggregators/dtmf_aggregator.py +29 -22
  51. pipecat/processors/aggregators/gated.py +25 -24
  52. pipecat/processors/aggregators/gated_openai_llm_context.py +22 -2
  53. pipecat/processors/aggregators/llm_response.py +398 -89
  54. pipecat/processors/aggregators/openai_llm_context.py +161 -13
  55. pipecat/processors/aggregators/sentence.py +25 -14
  56. pipecat/processors/aggregators/user_response.py +28 -3
  57. pipecat/processors/aggregators/vision_image_frame.py +24 -14
  58. pipecat/processors/async_generator.py +28 -0
  59. pipecat/processors/audio/audio_buffer_processor.py +78 -37
  60. pipecat/processors/consumer_processor.py +25 -6
  61. pipecat/processors/filters/frame_filter.py +23 -0
  62. pipecat/processors/filters/function_filter.py +30 -0
  63. pipecat/processors/filters/identity_filter.py +17 -2
  64. pipecat/processors/filters/null_filter.py +24 -1
  65. pipecat/processors/filters/stt_mute_filter.py +56 -21
  66. pipecat/processors/filters/wake_check_filter.py +46 -3
  67. pipecat/processors/filters/wake_notifier_filter.py +21 -3
  68. pipecat/processors/frame_processor.py +488 -131
  69. pipecat/processors/frameworks/langchain.py +38 -3
  70. pipecat/processors/frameworks/rtvi.py +719 -34
  71. pipecat/processors/gstreamer/pipeline_source.py +41 -0
  72. pipecat/processors/idle_frame_processor.py +26 -3
  73. pipecat/processors/logger.py +23 -0
  74. pipecat/processors/metrics/frame_processor_metrics.py +77 -4
  75. pipecat/processors/metrics/sentry.py +42 -4
  76. pipecat/processors/producer_processor.py +34 -14
  77. pipecat/processors/text_transformer.py +22 -10
  78. pipecat/processors/transcript_processor.py +48 -29
  79. pipecat/processors/user_idle_processor.py +31 -21
  80. pipecat/runner/__init__.py +1 -0
  81. pipecat/runner/daily.py +132 -0
  82. pipecat/runner/livekit.py +148 -0
  83. pipecat/runner/run.py +543 -0
  84. pipecat/runner/types.py +67 -0
  85. pipecat/runner/utils.py +515 -0
  86. pipecat/serializers/base_serializer.py +42 -0
  87. pipecat/serializers/exotel.py +17 -6
  88. pipecat/serializers/genesys.py +95 -0
  89. pipecat/serializers/livekit.py +33 -0
  90. pipecat/serializers/plivo.py +16 -15
  91. pipecat/serializers/protobuf.py +37 -1
  92. pipecat/serializers/telnyx.py +18 -17
  93. pipecat/serializers/twilio.py +32 -16
  94. pipecat/services/ai_service.py +5 -3
  95. pipecat/services/anthropic/llm.py +113 -43
  96. pipecat/services/assemblyai/models.py +63 -5
  97. pipecat/services/assemblyai/stt.py +64 -11
  98. pipecat/services/asyncai/__init__.py +0 -0
  99. pipecat/services/asyncai/tts.py +501 -0
  100. pipecat/services/aws/llm.py +185 -111
  101. pipecat/services/aws/stt.py +217 -23
  102. pipecat/services/aws/tts.py +118 -52
  103. pipecat/services/aws/utils.py +101 -5
  104. pipecat/services/aws_nova_sonic/aws.py +82 -64
  105. pipecat/services/aws_nova_sonic/context.py +15 -6
  106. pipecat/services/azure/common.py +10 -2
  107. pipecat/services/azure/image.py +32 -0
  108. pipecat/services/azure/llm.py +9 -7
  109. pipecat/services/azure/stt.py +65 -2
  110. pipecat/services/azure/tts.py +154 -23
  111. pipecat/services/cartesia/stt.py +125 -8
  112. pipecat/services/cartesia/tts.py +102 -38
  113. pipecat/services/cerebras/llm.py +15 -23
  114. pipecat/services/deepgram/stt.py +19 -11
  115. pipecat/services/deepgram/tts.py +36 -0
  116. pipecat/services/deepseek/llm.py +14 -23
  117. pipecat/services/elevenlabs/tts.py +330 -64
  118. pipecat/services/fal/image.py +43 -0
  119. pipecat/services/fal/stt.py +48 -10
  120. pipecat/services/fireworks/llm.py +14 -21
  121. pipecat/services/fish/tts.py +109 -9
  122. pipecat/services/gemini_multimodal_live/__init__.py +1 -0
  123. pipecat/services/gemini_multimodal_live/events.py +83 -2
  124. pipecat/services/gemini_multimodal_live/file_api.py +189 -0
  125. pipecat/services/gemini_multimodal_live/gemini.py +218 -21
  126. pipecat/services/gladia/config.py +17 -10
  127. pipecat/services/gladia/stt.py +82 -36
  128. pipecat/services/google/frames.py +40 -0
  129. pipecat/services/google/google.py +2 -0
  130. pipecat/services/google/image.py +39 -2
  131. pipecat/services/google/llm.py +176 -58
  132. pipecat/services/google/llm_openai.py +26 -4
  133. pipecat/services/google/llm_vertex.py +37 -15
  134. pipecat/services/google/rtvi.py +41 -0
  135. pipecat/services/google/stt.py +65 -17
  136. pipecat/services/google/test-google-chirp.py +45 -0
  137. pipecat/services/google/tts.py +390 -19
  138. pipecat/services/grok/llm.py +8 -6
  139. pipecat/services/groq/llm.py +8 -6
  140. pipecat/services/groq/stt.py +13 -9
  141. pipecat/services/groq/tts.py +40 -0
  142. pipecat/services/hamsa/__init__.py +9 -0
  143. pipecat/services/hamsa/stt.py +241 -0
  144. pipecat/services/heygen/__init__.py +5 -0
  145. pipecat/services/heygen/api.py +281 -0
  146. pipecat/services/heygen/client.py +620 -0
  147. pipecat/services/heygen/video.py +338 -0
  148. pipecat/services/image_service.py +5 -3
  149. pipecat/services/inworld/__init__.py +1 -0
  150. pipecat/services/inworld/tts.py +592 -0
  151. pipecat/services/llm_service.py +127 -45
  152. pipecat/services/lmnt/tts.py +80 -7
  153. pipecat/services/mcp_service.py +85 -44
  154. pipecat/services/mem0/memory.py +42 -13
  155. pipecat/services/minimax/tts.py +74 -15
  156. pipecat/services/mistral/__init__.py +0 -0
  157. pipecat/services/mistral/llm.py +185 -0
  158. pipecat/services/moondream/vision.py +55 -10
  159. pipecat/services/neuphonic/tts.py +275 -48
  160. pipecat/services/nim/llm.py +8 -6
  161. pipecat/services/ollama/llm.py +27 -7
  162. pipecat/services/openai/base_llm.py +54 -16
  163. pipecat/services/openai/image.py +30 -0
  164. pipecat/services/openai/llm.py +7 -5
  165. pipecat/services/openai/stt.py +13 -9
  166. pipecat/services/openai/tts.py +42 -10
  167. pipecat/services/openai_realtime_beta/azure.py +11 -9
  168. pipecat/services/openai_realtime_beta/context.py +7 -5
  169. pipecat/services/openai_realtime_beta/events.py +10 -7
  170. pipecat/services/openai_realtime_beta/openai.py +37 -18
  171. pipecat/services/openpipe/llm.py +30 -24
  172. pipecat/services/openrouter/llm.py +9 -7
  173. pipecat/services/perplexity/llm.py +15 -19
  174. pipecat/services/piper/tts.py +26 -12
  175. pipecat/services/playht/tts.py +227 -65
  176. pipecat/services/qwen/llm.py +8 -6
  177. pipecat/services/rime/tts.py +128 -17
  178. pipecat/services/riva/stt.py +160 -22
  179. pipecat/services/riva/tts.py +67 -2
  180. pipecat/services/sambanova/llm.py +19 -17
  181. pipecat/services/sambanova/stt.py +14 -8
  182. pipecat/services/sarvam/tts.py +60 -13
  183. pipecat/services/simli/video.py +82 -21
  184. pipecat/services/soniox/__init__.py +0 -0
  185. pipecat/services/soniox/stt.py +398 -0
  186. pipecat/services/speechmatics/stt.py +29 -17
  187. pipecat/services/stt_service.py +47 -11
  188. pipecat/services/tavus/video.py +94 -25
  189. pipecat/services/together/llm.py +8 -6
  190. pipecat/services/tts_service.py +77 -53
  191. pipecat/services/ultravox/stt.py +46 -43
  192. pipecat/services/vision_service.py +5 -3
  193. pipecat/services/websocket_service.py +12 -11
  194. pipecat/services/whisper/base_stt.py +58 -12
  195. pipecat/services/whisper/stt.py +69 -58
  196. pipecat/services/xtts/tts.py +59 -2
  197. pipecat/sync/base_notifier.py +19 -0
  198. pipecat/sync/event_notifier.py +24 -0
  199. pipecat/tests/utils.py +73 -5
  200. pipecat/transcriptions/language.py +24 -0
  201. pipecat/transports/base_input.py +112 -8
  202. pipecat/transports/base_output.py +235 -13
  203. pipecat/transports/base_transport.py +119 -0
  204. pipecat/transports/local/audio.py +76 -0
  205. pipecat/transports/local/tk.py +84 -0
  206. pipecat/transports/network/fastapi_websocket.py +174 -15
  207. pipecat/transports/network/small_webrtc.py +383 -39
  208. pipecat/transports/network/webrtc_connection.py +214 -8
  209. pipecat/transports/network/websocket_client.py +171 -1
  210. pipecat/transports/network/websocket_server.py +147 -9
  211. pipecat/transports/services/daily.py +792 -70
  212. pipecat/transports/services/helpers/daily_rest.py +122 -129
  213. pipecat/transports/services/livekit.py +339 -4
  214. pipecat/transports/services/tavus.py +273 -38
  215. pipecat/utils/asyncio/task_manager.py +92 -186
  216. pipecat/utils/base_object.py +83 -1
  217. pipecat/utils/network.py +2 -0
  218. pipecat/utils/string.py +114 -58
  219. pipecat/utils/text/base_text_aggregator.py +44 -13
  220. pipecat/utils/text/base_text_filter.py +46 -0
  221. pipecat/utils/text/markdown_text_filter.py +70 -14
  222. pipecat/utils/text/pattern_pair_aggregator.py +18 -14
  223. pipecat/utils/text/simple_text_aggregator.py +43 -2
  224. pipecat/utils/text/skip_tags_aggregator.py +21 -13
  225. pipecat/utils/time.py +36 -0
  226. pipecat/utils/tracing/class_decorators.py +32 -7
  227. pipecat/utils/tracing/conversation_context_provider.py +12 -2
  228. pipecat/utils/tracing/service_attributes.py +80 -64
  229. pipecat/utils/tracing/service_decorators.py +48 -21
  230. pipecat/utils/tracing/setup.py +13 -7
  231. pipecat/utils/tracing/turn_context_provider.py +12 -2
  232. pipecat/utils/tracing/turn_trace_observer.py +27 -0
  233. pipecat/utils/utils.py +14 -14
  234. dv_pipecat_ai-0.0.74.dev770.dist-info/RECORD +0 -319
  235. pipecat/examples/daily_runner.py +0 -64
  236. pipecat/examples/run.py +0 -265
  237. pipecat/utils/asyncio/watchdog_async_iterator.py +0 -72
  238. pipecat/utils/asyncio/watchdog_event.py +0 -42
  239. pipecat/utils/asyncio/watchdog_priority_queue.py +0 -48
  240. pipecat/utils/asyncio/watchdog_queue.py +0 -48
  241. {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/WHEEL +0 -0
  242. {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/licenses/LICENSE +0 -0
  243. {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/top_level.txt +0 -0
  244. /pipecat/{examples → extensions}/__init__.py +0 -0
@@ -32,6 +32,8 @@ from pipecat.frames.frames import (
32
32
  TranscriptionFrame,
33
33
  UserStartedSpeakingFrame,
34
34
  UserStoppedSpeakingFrame,
35
+ VADUserStartedSpeakingFrame,
36
+ VADUserStoppedSpeakingFrame,
35
37
  )
36
38
  from pipecat.processors.frame_processor import FrameDirection, FrameProcessor
37
39
 
@@ -39,12 +41,17 @@ from pipecat.processors.frame_processor import FrameDirection, FrameProcessor
39
41
  class STTMuteStrategy(Enum):
40
42
  """Strategies determining when STT should be muted.
41
43
 
42
- Attributes:
43
- FIRST_SPEECH: Mute only during first detected bot speech
44
- MUTE_UNTIL_FIRST_BOT_COMPLETE: Start muted and remain muted until first bot speech completes
45
- FUNCTION_CALL: Mute during function calls
46
- ALWAYS: Mute during all bot speech
47
- CUSTOM: Allow custom logic via callback
44
+ Each strategy defines different conditions under which speech-to-text
45
+ processing should be temporarily disabled to prevent unwanted audio
46
+ processing during specific conversation states.
47
+
48
+ Parameters:
49
+ FIRST_SPEECH: Mute STT until the first bot speech is detected.
50
+ MUTE_UNTIL_FIRST_BOT_COMPLETE: Mute STT until the first bot completes speaking,
51
+ regardless of whether it is the first speech.
52
+ FUNCTION_CALL: Mute STT during function calls to prevent interruptions.
53
+ ALWAYS: Always mute STT when the bot is speaking.
54
+ CUSTOM: Use a custom callback to determine muting logic dynamically.
48
55
  """
49
56
 
50
57
  FIRST_SPEECH = "first_speech"
@@ -58,10 +65,15 @@ class STTMuteStrategy(Enum):
58
65
  class STTMuteConfig:
59
66
  """Configuration for STT muting behavior.
60
67
 
61
- Args:
62
- strategies: Set of muting strategies to apply
68
+ Defines which muting strategies to apply and provides optional custom
69
+ callback for advanced muting logic. Multiple strategies can be combined
70
+ to create sophisticated muting behavior.
71
+
72
+ Parameters:
73
+ strategies: Set of muting strategies to apply simultaneously.
63
74
  should_mute_callback: Optional callback for custom muting logic.
64
- Only required when using STTMuteStrategy.CUSTOM
75
+ Only required when using STTMuteStrategy.CUSTOM. Called with
76
+ the STTMuteFilter instance to determine muting state.
65
77
 
66
78
  Note:
67
79
  MUTE_UNTIL_FIRST_BOT_COMPLETE and FIRST_SPEECH strategies should not be used together
@@ -69,10 +81,14 @@ class STTMuteConfig:
69
81
  """
70
82
 
71
83
  strategies: set[STTMuteStrategy]
72
- # Optional callback for custom muting logic
73
84
  should_mute_callback: Optional[Callable[["STTMuteFilter"], Awaitable[bool]]] = None
74
85
 
75
86
  def __post_init__(self):
87
+ """Validate configuration after initialization.
88
+
89
+ Raises:
90
+ ValueError: If incompatible strategies are used together.
91
+ """
76
92
  if (
77
93
  STTMuteStrategy.MUTE_UNTIL_FIRST_BOT_COMPLETE in self.strategies
78
94
  and STTMuteStrategy.FIRST_SPEECH in self.strategies
@@ -86,15 +102,18 @@ class STTMuteFilter(FrameProcessor):
86
102
  """A processor that handles STT muting and interruption control.
87
103
 
88
104
  This processor combines STT muting and interruption control as a coordinated
89
- feature. When STT is muted, interruptions are automatically disabled.
90
-
91
- Args:
92
- config: Configuration specifying muting strategies
93
- stt_service: STT service instance (deprecated, will be removed in future version)
94
- **kwargs: Additional arguments passed to parent class
105
+ feature. When STT is muted, interruptions are automatically disabled by
106
+ suppressing VAD-related frames. This prevents unwanted speech detection
107
+ during bot speech, function calls, or other specified conditions.
95
108
  """
96
109
 
97
110
  def __init__(self, *, config: STTMuteConfig, **kwargs):
111
+ """Initialize the STT mute filter.
112
+
113
+ Args:
114
+ config: Configuration specifying muting strategies and behavior.
115
+ **kwargs: Additional arguments passed to parent class.
116
+ """
98
117
  super().__init__(**kwargs)
99
118
  self._config = config
100
119
  self._first_speech_handled = False
@@ -105,18 +124,23 @@ class STTMuteFilter(FrameProcessor):
105
124
 
106
125
  @property
107
126
  def is_muted(self) -> bool:
108
- """Returns whether STT is currently muted."""
127
+ """Check if STT is currently muted.
128
+
129
+ Returns:
130
+ True if STT is currently muted and audio frames are being suppressed.
131
+ """
109
132
  return self._is_muted
110
133
 
111
134
  async def _handle_mute_state(self, should_mute: bool):
112
- """Handles both STT muting and interruption control."""
135
+ """Handle STT muting and interruption control state changes."""
113
136
  if should_mute != self.is_muted:
114
137
  self.logger.debug(f"STTMuteFilter {'muting' if should_mute else 'unmuting'}")
115
138
  self._is_muted = should_mute
116
- await self.push_frame(STTMuteFrame(mute=should_mute))
139
+ await self.push_frame(STTMuteFrame(mute=should_mute), FrameDirection.UPSTREAM)
140
+ await self.push_frame(STTMuteFrame(mute=should_mute), FrameDirection.DOWNSTREAM)
117
141
 
118
142
  async def _should_mute(self) -> bool:
119
- """Determines if STT should be muted based on current state and strategy."""
143
+ """Determine if STT should be muted based on current state and strategies."""
120
144
  for strategy in self._config.strategies:
121
145
  match strategy:
122
146
  case STTMuteStrategy.FUNCTION_CALL:
@@ -145,7 +169,16 @@ class STTMuteFilter(FrameProcessor):
145
169
  return False
146
170
 
147
171
  async def process_frame(self, frame: Frame, direction: FrameDirection):
148
- """Processes incoming frames and manages muting state."""
172
+ """Process incoming frames and manage muting state.
173
+
174
+ Monitors conversation state through frame types and applies muting
175
+ strategies accordingly. Suppresses VAD-related frames when muted
176
+ while allowing other frames to pass through.
177
+
178
+ Args:
179
+ frame: The incoming frame to process.
180
+ direction: The direction of frame flow in the pipeline.
181
+ """
149
182
  await super().process_frame(frame, direction)
150
183
 
151
184
  # Determine if we need to change mute state based on frame type
@@ -181,6 +214,8 @@ class STTMuteFilter(FrameProcessor):
181
214
  suppression_types = (
182
215
  StartInterruptionFrame,
183
216
  StopInterruptionFrame,
217
+ VADUserStartedSpeakingFrame,
218
+ VADUserStoppedSpeakingFrame,
184
219
  UserStartedSpeakingFrame,
185
220
  UserStoppedSpeakingFrame,
186
221
  InterimTranscriptionFrame,
@@ -4,6 +4,13 @@
4
4
  # SPDX-License-Identifier: BSD 2-Clause License
5
5
  #
6
6
 
7
+ """Wake phrase detection filter for Pipecat transcription processing.
8
+
9
+ This module provides a frame processor that filters transcription frames,
10
+ only allowing them through after wake phrases have been detected. Includes
11
+ keepalive functionality to maintain conversation flow after wake detection.
12
+ """
13
+
7
14
  import re
8
15
  import time
9
16
  from enum import Enum
@@ -16,23 +23,53 @@ from pipecat.processors.frame_processor import FrameDirection, FrameProcessor
16
23
 
17
24
 
18
25
  class WakeCheckFilter(FrameProcessor):
19
- """This filter looks for wake phrases in the transcription frames and only passes through frames
20
- after a wake phrase has been detected. It also has a keepalive timeout to allow for a brief
21
- period of continued conversation after a wake phrase has been detected.
26
+ """Frame processor that filters transcription frames based on wake phrase detection.
27
+
28
+ This filter monitors transcription frames for configured wake phrases and only
29
+ passes frames through after a wake phrase has been detected. Maintains a
30
+ keepalive timeout to allow continued conversation after wake detection.
22
31
  """
23
32
 
24
33
  class WakeState(Enum):
34
+ """Enumeration of wake detection states.
35
+
36
+ Parameters:
37
+ IDLE: No wake phrase detected, filtering active.
38
+ AWAKE: Wake phrase detected, allowing frames through.
39
+ """
40
+
25
41
  IDLE = 1
26
42
  AWAKE = 2
27
43
 
28
44
  class ParticipantState:
45
+ """State tracking for individual participants.
46
+
47
+ Parameters:
48
+ participant_id: Unique identifier for the participant.
49
+ state: Current wake state (IDLE or AWAKE).
50
+ wake_timer: Timestamp of last wake phrase detection.
51
+ accumulator: Accumulated text for wake phrase matching.
52
+ """
53
+
29
54
  def __init__(self, participant_id: str):
55
+ """Initialize participant state.
56
+
57
+ Args:
58
+ participant_id: Unique identifier for the participant.
59
+ """
30
60
  self.participant_id = participant_id
31
61
  self.state = WakeCheckFilter.WakeState.IDLE
32
62
  self.wake_timer = 0.0
33
63
  self.accumulator = ""
34
64
 
35
65
  def __init__(self, wake_phrases: List[str], keepalive_timeout: float = 3):
66
+ """Initialize the wake phrase filter.
67
+
68
+ Args:
69
+ wake_phrases: List of wake phrases to detect in transcriptions.
70
+ keepalive_timeout: Duration in seconds to keep passing frames after
71
+ wake detection. Defaults to 3 seconds.
72
+ """
36
73
  super().__init__()
37
74
  self._participant_states = {}
38
75
  self._keepalive_timeout = keepalive_timeout
@@ -44,6 +81,12 @@ class WakeCheckFilter(FrameProcessor):
44
81
  self._wake_patterns.append(pattern)
45
82
 
46
83
  async def process_frame(self, frame: Frame, direction: FrameDirection):
84
+ """Process incoming frames, filtering transcriptions based on wake detection.
85
+
86
+ Args:
87
+ frame: The frame to process.
88
+ direction: The direction of frame flow in the pipeline.
89
+ """
47
90
  await super().process_frame(frame, direction)
48
91
 
49
92
  try:
@@ -4,6 +4,8 @@
4
4
  # SPDX-License-Identifier: BSD 2-Clause License
5
5
  #
6
6
 
7
+ """Wake notifier filter for conditional frame-based notifications."""
8
+
7
9
  from typing import Awaitable, Callable, Tuple, Type
8
10
 
9
11
  from pipecat.frames.frames import Frame
@@ -12,10 +14,11 @@ from pipecat.sync.base_notifier import BaseNotifier
12
14
 
13
15
 
14
16
  class WakeNotifierFilter(FrameProcessor):
15
- """This processor expects a list of frame types and will execute a given
16
- callback predicate when a frame of any of those type is being processed. If
17
- the callback returns true the notifier will be notified.
17
+ """Frame processor that conditionally triggers notifications based on frame types and filters.
18
18
 
19
+ This processor monitors frames of specified types and executes a callback predicate
20
+ when such frames are processed. If the callback returns True, the associated
21
+ notifier is triggered, allowing for conditional wake-up or notification scenarios.
19
22
  """
20
23
 
21
24
  def __init__(
@@ -26,12 +29,27 @@ class WakeNotifierFilter(FrameProcessor):
26
29
  filter: Callable[[Frame], Awaitable[bool]],
27
30
  **kwargs,
28
31
  ):
32
+ """Initialize the wake notifier filter.
33
+
34
+ Args:
35
+ notifier: The notifier to trigger when conditions are met.
36
+ types: Tuple of frame types to monitor for potential notifications.
37
+ filter: Async callback that determines whether to trigger notification.
38
+ Should return True to trigger notification, False otherwise.
39
+ **kwargs: Additional arguments passed to parent FrameProcessor.
40
+ """
29
41
  super().__init__(**kwargs)
30
42
  self._notifier = notifier
31
43
  self._types = types
32
44
  self._filter = filter
33
45
 
34
46
  async def process_frame(self, frame: Frame, direction: FrameDirection):
47
+ """Process frames and conditionally trigger notifications.
48
+
49
+ Args:
50
+ frame: The frame to process.
51
+ direction: The direction of frame flow in the pipeline.
52
+ """
35
53
  await super().process_frame(frame, direction)
36
54
 
37
55
  if isinstance(frame, self._types) and await self._filter(frame):