dv-pipecat-ai 0.0.85.dev7__py3-none-any.whl → 0.0.85.dev698__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

Files changed (156) hide show
  1. {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/METADATA +78 -117
  2. {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/RECORD +156 -122
  3. pipecat/adapters/base_llm_adapter.py +38 -1
  4. pipecat/adapters/services/anthropic_adapter.py +9 -14
  5. pipecat/adapters/services/aws_nova_sonic_adapter.py +5 -0
  6. pipecat/adapters/services/bedrock_adapter.py +236 -13
  7. pipecat/adapters/services/gemini_adapter.py +12 -8
  8. pipecat/adapters/services/open_ai_adapter.py +19 -7
  9. pipecat/adapters/services/open_ai_realtime_adapter.py +5 -0
  10. pipecat/audio/filters/krisp_viva_filter.py +193 -0
  11. pipecat/audio/filters/noisereduce_filter.py +15 -0
  12. pipecat/audio/turn/base_turn_analyzer.py +9 -1
  13. pipecat/audio/turn/smart_turn/base_smart_turn.py +14 -8
  14. pipecat/audio/turn/smart_turn/data/__init__.py +0 -0
  15. pipecat/audio/turn/smart_turn/data/smart-turn-v3.0.onnx +0 -0
  16. pipecat/audio/turn/smart_turn/http_smart_turn.py +6 -2
  17. pipecat/audio/turn/smart_turn/local_smart_turn.py +1 -1
  18. pipecat/audio/turn/smart_turn/local_smart_turn_v2.py +1 -1
  19. pipecat/audio/turn/smart_turn/local_smart_turn_v3.py +124 -0
  20. pipecat/audio/vad/data/README.md +10 -0
  21. pipecat/audio/vad/vad_analyzer.py +13 -1
  22. pipecat/extensions/voicemail/voicemail_detector.py +5 -5
  23. pipecat/frames/frames.py +120 -87
  24. pipecat/observers/loggers/debug_log_observer.py +3 -3
  25. pipecat/observers/loggers/llm_log_observer.py +7 -3
  26. pipecat/observers/loggers/user_bot_latency_log_observer.py +22 -10
  27. pipecat/pipeline/runner.py +12 -4
  28. pipecat/pipeline/service_switcher.py +64 -36
  29. pipecat/pipeline/task.py +85 -24
  30. pipecat/processors/aggregators/dtmf_aggregator.py +28 -22
  31. pipecat/processors/aggregators/{gated_openai_llm_context.py → gated_llm_context.py} +9 -9
  32. pipecat/processors/aggregators/gated_open_ai_llm_context.py +12 -0
  33. pipecat/processors/aggregators/llm_response.py +6 -7
  34. pipecat/processors/aggregators/llm_response_universal.py +19 -15
  35. pipecat/processors/aggregators/user_response.py +6 -6
  36. pipecat/processors/aggregators/vision_image_frame.py +24 -2
  37. pipecat/processors/audio/audio_buffer_processor.py +43 -8
  38. pipecat/processors/filters/stt_mute_filter.py +2 -0
  39. pipecat/processors/frame_processor.py +103 -17
  40. pipecat/processors/frameworks/langchain.py +8 -2
  41. pipecat/processors/frameworks/rtvi.py +209 -68
  42. pipecat/processors/frameworks/strands_agents.py +170 -0
  43. pipecat/processors/logger.py +2 -2
  44. pipecat/processors/transcript_processor.py +4 -4
  45. pipecat/processors/user_idle_processor.py +3 -6
  46. pipecat/runner/run.py +270 -50
  47. pipecat/runner/types.py +2 -0
  48. pipecat/runner/utils.py +51 -10
  49. pipecat/serializers/exotel.py +5 -5
  50. pipecat/serializers/livekit.py +20 -0
  51. pipecat/serializers/plivo.py +6 -9
  52. pipecat/serializers/protobuf.py +6 -5
  53. pipecat/serializers/telnyx.py +2 -2
  54. pipecat/serializers/twilio.py +43 -23
  55. pipecat/services/ai_service.py +2 -6
  56. pipecat/services/anthropic/llm.py +2 -25
  57. pipecat/services/asyncai/tts.py +2 -3
  58. pipecat/services/aws/__init__.py +1 -0
  59. pipecat/services/aws/llm.py +122 -97
  60. pipecat/services/aws/nova_sonic/__init__.py +0 -0
  61. pipecat/services/aws/nova_sonic/context.py +367 -0
  62. pipecat/services/aws/nova_sonic/frames.py +25 -0
  63. pipecat/services/aws/nova_sonic/llm.py +1155 -0
  64. pipecat/services/aws/stt.py +1 -3
  65. pipecat/services/aws_nova_sonic/__init__.py +19 -1
  66. pipecat/services/aws_nova_sonic/aws.py +11 -1151
  67. pipecat/services/aws_nova_sonic/context.py +13 -355
  68. pipecat/services/aws_nova_sonic/frames.py +13 -17
  69. pipecat/services/azure/realtime/__init__.py +0 -0
  70. pipecat/services/azure/realtime/llm.py +65 -0
  71. pipecat/services/azure/stt.py +15 -0
  72. pipecat/services/cartesia/tts.py +2 -2
  73. pipecat/services/deepgram/__init__.py +1 -0
  74. pipecat/services/deepgram/flux/__init__.py +0 -0
  75. pipecat/services/deepgram/flux/stt.py +636 -0
  76. pipecat/services/elevenlabs/__init__.py +2 -1
  77. pipecat/services/elevenlabs/stt.py +254 -276
  78. pipecat/services/elevenlabs/tts.py +5 -5
  79. pipecat/services/fish/tts.py +2 -2
  80. pipecat/services/gemini_multimodal_live/events.py +38 -524
  81. pipecat/services/gemini_multimodal_live/file_api.py +23 -173
  82. pipecat/services/gemini_multimodal_live/gemini.py +41 -1403
  83. pipecat/services/gladia/stt.py +56 -72
  84. pipecat/services/google/__init__.py +1 -0
  85. pipecat/services/google/gemini_live/__init__.py +3 -0
  86. pipecat/services/google/gemini_live/file_api.py +189 -0
  87. pipecat/services/google/gemini_live/llm.py +1582 -0
  88. pipecat/services/google/gemini_live/llm_vertex.py +184 -0
  89. pipecat/services/google/llm.py +15 -11
  90. pipecat/services/google/llm_openai.py +3 -3
  91. pipecat/services/google/llm_vertex.py +86 -16
  92. pipecat/services/google/tts.py +7 -3
  93. pipecat/services/heygen/api.py +2 -0
  94. pipecat/services/heygen/client.py +8 -4
  95. pipecat/services/heygen/video.py +2 -0
  96. pipecat/services/hume/__init__.py +5 -0
  97. pipecat/services/hume/tts.py +220 -0
  98. pipecat/services/inworld/tts.py +6 -6
  99. pipecat/services/llm_service.py +15 -5
  100. pipecat/services/lmnt/tts.py +2 -2
  101. pipecat/services/mcp_service.py +4 -2
  102. pipecat/services/mem0/memory.py +6 -5
  103. pipecat/services/mistral/llm.py +29 -8
  104. pipecat/services/moondream/vision.py +42 -16
  105. pipecat/services/neuphonic/tts.py +2 -2
  106. pipecat/services/openai/__init__.py +1 -0
  107. pipecat/services/openai/base_llm.py +27 -20
  108. pipecat/services/openai/realtime/__init__.py +0 -0
  109. pipecat/services/openai/realtime/context.py +272 -0
  110. pipecat/services/openai/realtime/events.py +1106 -0
  111. pipecat/services/openai/realtime/frames.py +37 -0
  112. pipecat/services/openai/realtime/llm.py +829 -0
  113. pipecat/services/openai/tts.py +16 -8
  114. pipecat/services/openai_realtime/__init__.py +27 -0
  115. pipecat/services/openai_realtime/azure.py +21 -0
  116. pipecat/services/openai_realtime/context.py +21 -0
  117. pipecat/services/openai_realtime/events.py +21 -0
  118. pipecat/services/openai_realtime/frames.py +21 -0
  119. pipecat/services/openai_realtime_beta/azure.py +16 -0
  120. pipecat/services/openai_realtime_beta/openai.py +17 -5
  121. pipecat/services/playht/tts.py +31 -4
  122. pipecat/services/rime/tts.py +3 -4
  123. pipecat/services/sarvam/tts.py +2 -6
  124. pipecat/services/simli/video.py +2 -2
  125. pipecat/services/speechmatics/stt.py +1 -7
  126. pipecat/services/stt_service.py +34 -0
  127. pipecat/services/tavus/video.py +2 -2
  128. pipecat/services/tts_service.py +9 -9
  129. pipecat/services/vision_service.py +7 -6
  130. pipecat/tests/utils.py +4 -4
  131. pipecat/transcriptions/language.py +41 -1
  132. pipecat/transports/base_input.py +17 -42
  133. pipecat/transports/base_output.py +42 -26
  134. pipecat/transports/daily/transport.py +199 -26
  135. pipecat/transports/heygen/__init__.py +0 -0
  136. pipecat/transports/heygen/transport.py +381 -0
  137. pipecat/transports/livekit/transport.py +228 -63
  138. pipecat/transports/local/audio.py +6 -1
  139. pipecat/transports/local/tk.py +11 -2
  140. pipecat/transports/network/fastapi_websocket.py +1 -1
  141. pipecat/transports/smallwebrtc/connection.py +98 -19
  142. pipecat/transports/smallwebrtc/request_handler.py +204 -0
  143. pipecat/transports/smallwebrtc/transport.py +65 -23
  144. pipecat/transports/tavus/transport.py +23 -12
  145. pipecat/transports/websocket/client.py +41 -5
  146. pipecat/transports/websocket/fastapi.py +21 -11
  147. pipecat/transports/websocket/server.py +14 -7
  148. pipecat/transports/whatsapp/api.py +8 -0
  149. pipecat/transports/whatsapp/client.py +47 -0
  150. pipecat/utils/base_object.py +54 -22
  151. pipecat/utils/string.py +12 -1
  152. pipecat/utils/tracing/service_decorators.py +21 -21
  153. {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/WHEEL +0 -0
  154. {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/licenses/LICENSE +0 -0
  155. {dv_pipecat_ai-0.0.85.dev7.dist-info → dv_pipecat_ai-0.0.85.dev698.dist-info}/top_level.txt +0 -0
  156. /pipecat/services/{aws_nova_sonic → aws/nova_sonic}/ready.wav +0 -0
@@ -0,0 +1,367 @@
1
+ #
2
+ # Copyright (c) 2025, Daily
3
+ #
4
+ # SPDX-License-Identifier: BSD 2-Clause License
5
+ #
6
+
7
+ """Context management for AWS Nova Sonic LLM service.
8
+
9
+ This module provides specialized context aggregators and message handling for AWS Nova Sonic,
10
+ including conversation history management and role-specific message processing.
11
+ """
12
+
13
+ import copy
14
+ from dataclasses import dataclass, field
15
+ from enum import Enum
16
+
17
+ from loguru import logger
18
+
19
+ from pipecat.frames.frames import (
20
+ BotStoppedSpeakingFrame,
21
+ DataFrame,
22
+ Frame,
23
+ FunctionCallResultFrame,
24
+ InterruptionFrame,
25
+ LLMFullResponseEndFrame,
26
+ LLMFullResponseStartFrame,
27
+ LLMMessagesAppendFrame,
28
+ LLMMessagesUpdateFrame,
29
+ LLMSetToolChoiceFrame,
30
+ LLMSetToolsFrame,
31
+ TextFrame,
32
+ UserImageRawFrame,
33
+ )
34
+ from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
35
+ from pipecat.processors.frame_processor import FrameDirection
36
+ from pipecat.services.aws.nova_sonic.frames import AWSNovaSonicFunctionCallResultFrame
37
+ from pipecat.services.openai.llm import (
38
+ OpenAIAssistantContextAggregator,
39
+ OpenAIUserContextAggregator,
40
+ )
41
+
42
+
43
+ class Role(Enum):
44
+ """Roles supported in AWS Nova Sonic conversations.
45
+
46
+ Parameters:
47
+ SYSTEM: System-level messages (not used in conversation history).
48
+ USER: Messages sent by the user.
49
+ ASSISTANT: Messages sent by the assistant.
50
+ TOOL: Messages sent by tools (not used in conversation history).
51
+ """
52
+
53
+ SYSTEM = "SYSTEM"
54
+ USER = "USER"
55
+ ASSISTANT = "ASSISTANT"
56
+ TOOL = "TOOL"
57
+
58
+
59
+ @dataclass
60
+ class AWSNovaSonicConversationHistoryMessage:
61
+ """A single message in AWS Nova Sonic conversation history.
62
+
63
+ Parameters:
64
+ role: The role of the message sender (USER or ASSISTANT only).
65
+ text: The text content of the message.
66
+ """
67
+
68
+ role: Role # only USER and ASSISTANT
69
+ text: str
70
+
71
+
72
+ @dataclass
73
+ class AWSNovaSonicConversationHistory:
74
+ """Complete conversation history for AWS Nova Sonic initialization.
75
+
76
+ Parameters:
77
+ system_instruction: System-level instruction for the conversation.
78
+ messages: List of conversation messages between user and assistant.
79
+ """
80
+
81
+ system_instruction: str = None
82
+ messages: list[AWSNovaSonicConversationHistoryMessage] = field(default_factory=list)
83
+
84
+
85
+ class AWSNovaSonicLLMContext(OpenAILLMContext):
86
+ """Specialized LLM context for AWS Nova Sonic service.
87
+
88
+ Extends OpenAI context with Nova Sonic-specific message handling,
89
+ conversation history management, and text buffering capabilities.
90
+ """
91
+
92
+ def __init__(self, messages=None, tools=None, **kwargs):
93
+ """Initialize AWS Nova Sonic LLM context.
94
+
95
+ Args:
96
+ messages: Initial messages for the context.
97
+ tools: Available tools for the context.
98
+ **kwargs: Additional arguments passed to parent class.
99
+ """
100
+ super().__init__(messages=messages, tools=tools, **kwargs)
101
+ self.__setup_local()
102
+
103
+ def __setup_local(self, system_instruction: str = ""):
104
+ self._assistant_text = ""
105
+ self._user_text = ""
106
+ self._system_instruction = system_instruction
107
+
108
+ @staticmethod
109
+ def upgrade_to_nova_sonic(
110
+ obj: OpenAILLMContext, system_instruction: str
111
+ ) -> "AWSNovaSonicLLMContext":
112
+ """Upgrade an OpenAI context to AWS Nova Sonic context.
113
+
114
+ Args:
115
+ obj: The OpenAI context to upgrade.
116
+ system_instruction: System instruction for the context.
117
+
118
+ Returns:
119
+ The upgraded AWS Nova Sonic context.
120
+ """
121
+ if isinstance(obj, OpenAILLMContext) and not isinstance(obj, AWSNovaSonicLLMContext):
122
+ obj.__class__ = AWSNovaSonicLLMContext
123
+ obj.__setup_local(system_instruction)
124
+ return obj
125
+
126
+ # NOTE: this method has the side-effect of updating _system_instruction from messages
127
+ def get_messages_for_initializing_history(self) -> AWSNovaSonicConversationHistory:
128
+ """Get conversation history for initializing AWS Nova Sonic session.
129
+
130
+ Processes stored messages and extracts system instruction and conversation
131
+ history in the format expected by AWS Nova Sonic.
132
+
133
+ Returns:
134
+ Formatted conversation history with system instruction and messages.
135
+ """
136
+ history = AWSNovaSonicConversationHistory(system_instruction=self._system_instruction)
137
+
138
+ # Bail if there are no messages
139
+ if not self.messages:
140
+ return history
141
+
142
+ messages = copy.deepcopy(self.messages)
143
+
144
+ # If we have a "system" message as our first message, let's pull that out into "instruction"
145
+ if messages[0].get("role") == "system":
146
+ system = messages.pop(0)
147
+ content = system.get("content")
148
+ if isinstance(content, str):
149
+ history.system_instruction = content
150
+ elif isinstance(content, list):
151
+ history.system_instruction = content[0].get("text")
152
+ if history.system_instruction:
153
+ self._system_instruction = history.system_instruction
154
+
155
+ # Process remaining messages to fill out conversation history.
156
+ # Nova Sonic supports "user" and "assistant" messages in history.
157
+ for message in messages:
158
+ history_message = self.from_standard_message(message)
159
+ if history_message:
160
+ history.messages.append(history_message)
161
+
162
+ return history
163
+
164
+ def get_messages_for_persistent_storage(self):
165
+ """Get messages formatted for persistent storage.
166
+
167
+ Returns:
168
+ List of messages including system instruction if present.
169
+ """
170
+ messages = super().get_messages_for_persistent_storage()
171
+ # If we have a system instruction and messages doesn't already contain it, add it
172
+ if self._system_instruction and not (messages and messages[0].get("role") == "system"):
173
+ messages.insert(0, {"role": "system", "content": self._system_instruction})
174
+ return messages
175
+
176
+ def from_standard_message(self, message) -> AWSNovaSonicConversationHistoryMessage:
177
+ """Convert standard message format to Nova Sonic format.
178
+
179
+ Args:
180
+ message: Standard message dictionary to convert.
181
+
182
+ Returns:
183
+ Nova Sonic conversation history message, or None if not convertible.
184
+ """
185
+ role = message.get("role")
186
+ if message.get("role") == "user" or message.get("role") == "assistant":
187
+ content = message.get("content")
188
+ if isinstance(message.get("content"), list):
189
+ content = ""
190
+ for c in message.get("content"):
191
+ if c.get("type") == "text":
192
+ content += " " + c.get("text")
193
+ else:
194
+ logger.error(
195
+ f"Unhandled content type in context message: {c.get('type')} - {message}"
196
+ )
197
+ # There won't be content if this is an assistant tool call entry.
198
+ # We're ignoring those since they can't be loaded into AWS Nova Sonic conversation
199
+ # history
200
+ if content:
201
+ return AWSNovaSonicConversationHistoryMessage(role=Role[role.upper()], text=content)
202
+ # NOTE: we're ignoring messages with role "tool" since they can't be loaded into AWS Nova
203
+ # Sonic conversation history
204
+
205
+ def buffer_user_text(self, text):
206
+ """Buffer user text for later flushing to context.
207
+
208
+ Args:
209
+ text: User text to buffer.
210
+ """
211
+ self._user_text += f" {text}" if self._user_text else text
212
+ # logger.debug(f"User text buffered: {self._user_text}")
213
+
214
+ def flush_aggregated_user_text(self) -> str:
215
+ """Flush buffered user text to context as a complete message.
216
+
217
+ Returns:
218
+ The flushed user text, or empty string if no text was buffered.
219
+ """
220
+ if not self._user_text:
221
+ return ""
222
+ user_text = self._user_text
223
+ message = {
224
+ "role": "user",
225
+ "content": [{"type": "text", "text": user_text}],
226
+ }
227
+ self._user_text = ""
228
+ self.add_message(message)
229
+ # logger.debug(f"Context updated (user): {self.get_messages_for_logging()}")
230
+ return user_text
231
+
232
+ def buffer_assistant_text(self, text):
233
+ """Buffer assistant text for later flushing to context.
234
+
235
+ Args:
236
+ text: Assistant text to buffer.
237
+ """
238
+ self._assistant_text += text
239
+ # logger.debug(f"Assistant text buffered: {self._assistant_text}")
240
+
241
+ def flush_aggregated_assistant_text(self):
242
+ """Flush buffered assistant text to context as a complete message."""
243
+ if not self._assistant_text:
244
+ return
245
+ message = {
246
+ "role": "assistant",
247
+ "content": [{"type": "text", "text": self._assistant_text}],
248
+ }
249
+ self._assistant_text = ""
250
+ self.add_message(message)
251
+ # logger.debug(f"Context updated (assistant): {self.get_messages_for_logging()}")
252
+
253
+
254
+ @dataclass
255
+ class AWSNovaSonicMessagesUpdateFrame(DataFrame):
256
+ """Frame containing updated AWS Nova Sonic context.
257
+
258
+ Parameters:
259
+ context: The updated AWS Nova Sonic LLM context.
260
+ """
261
+
262
+ context: AWSNovaSonicLLMContext
263
+
264
+
265
+ class AWSNovaSonicUserContextAggregator(OpenAIUserContextAggregator):
266
+ """Context aggregator for user messages in AWS Nova Sonic conversations.
267
+
268
+ Extends the OpenAI user context aggregator to emit Nova Sonic-specific
269
+ context update frames.
270
+ """
271
+
272
+ async def process_frame(
273
+ self, frame: Frame, direction: FrameDirection = FrameDirection.DOWNSTREAM
274
+ ):
275
+ """Process frames and emit Nova Sonic-specific context updates.
276
+
277
+ Args:
278
+ frame: The frame to process.
279
+ direction: The direction the frame is traveling.
280
+ """
281
+ await super().process_frame(frame, direction)
282
+
283
+ # Parent does not push LLMMessagesUpdateFrame
284
+ if isinstance(frame, LLMMessagesUpdateFrame):
285
+ await self.push_frame(AWSNovaSonicMessagesUpdateFrame(context=self._context))
286
+
287
+
288
+ class AWSNovaSonicAssistantContextAggregator(OpenAIAssistantContextAggregator):
289
+ """Context aggregator for assistant messages in AWS Nova Sonic conversations.
290
+
291
+ Provides specialized handling for assistant responses and function calls
292
+ in AWS Nova Sonic context, with custom frame processing logic.
293
+ """
294
+
295
+ async def process_frame(self, frame: Frame, direction: FrameDirection):
296
+ """Process frames with Nova Sonic-specific logic.
297
+
298
+ Args:
299
+ frame: The frame to process.
300
+ direction: The direction the frame is traveling.
301
+ """
302
+ # HACK: For now, disable the context aggregator by making it just pass through all frames
303
+ # that the parent handles (except the function call stuff, which we still need).
304
+ # For an explanation of this hack, see
305
+ # AWSNovaSonicLLMService._report_assistant_response_text_added.
306
+ if isinstance(
307
+ frame,
308
+ (
309
+ InterruptionFrame,
310
+ LLMFullResponseStartFrame,
311
+ LLMFullResponseEndFrame,
312
+ TextFrame,
313
+ LLMMessagesAppendFrame,
314
+ LLMMessagesUpdateFrame,
315
+ LLMSetToolsFrame,
316
+ LLMSetToolChoiceFrame,
317
+ UserImageRawFrame,
318
+ BotStoppedSpeakingFrame,
319
+ ),
320
+ ):
321
+ await self.push_frame(frame, direction)
322
+ else:
323
+ await super().process_frame(frame, direction)
324
+
325
+ async def handle_function_call_result(self, frame: FunctionCallResultFrame):
326
+ """Handle function call results for AWS Nova Sonic.
327
+
328
+ Args:
329
+ frame: The function call result frame to handle.
330
+ """
331
+ await super().handle_function_call_result(frame)
332
+
333
+ # The standard function callback code path pushes the FunctionCallResultFrame from the LLM
334
+ # itself, so we didn't have a chance to add the result to the AWS Nova Sonic server-side
335
+ # context. Let's push a special frame to do that.
336
+ await self.push_frame(
337
+ AWSNovaSonicFunctionCallResultFrame(result_frame=frame), FrameDirection.UPSTREAM
338
+ )
339
+
340
+
341
+ @dataclass
342
+ class AWSNovaSonicContextAggregatorPair:
343
+ """Pair of user and assistant context aggregators for AWS Nova Sonic.
344
+
345
+ Parameters:
346
+ _user: The user context aggregator.
347
+ _assistant: The assistant context aggregator.
348
+ """
349
+
350
+ _user: AWSNovaSonicUserContextAggregator
351
+ _assistant: AWSNovaSonicAssistantContextAggregator
352
+
353
+ def user(self) -> AWSNovaSonicUserContextAggregator:
354
+ """Get the user context aggregator.
355
+
356
+ Returns:
357
+ The user context aggregator instance.
358
+ """
359
+ return self._user
360
+
361
+ def assistant(self) -> AWSNovaSonicAssistantContextAggregator:
362
+ """Get the assistant context aggregator.
363
+
364
+ Returns:
365
+ The assistant context aggregator instance.
366
+ """
367
+ return self._assistant
@@ -0,0 +1,25 @@
1
+ #
2
+ # Copyright (c) 2025, Daily
3
+ #
4
+ # SPDX-License-Identifier: BSD 2-Clause License
5
+ #
6
+
7
+ """Custom frames for AWS Nova Sonic LLM service."""
8
+
9
+ from dataclasses import dataclass
10
+
11
+ from pipecat.frames.frames import DataFrame, FunctionCallResultFrame
12
+
13
+
14
+ @dataclass
15
+ class AWSNovaSonicFunctionCallResultFrame(DataFrame):
16
+ """Frame containing function call result for AWS Nova Sonic processing.
17
+
18
+ This frame wraps a standard function call result frame to enable
19
+ AWS Nova Sonic-specific handling and context updates.
20
+
21
+ Parameters:
22
+ result_frame: The underlying function call result frame.
23
+ """
24
+
25
+ result_frame: FunctionCallResultFrame