dv-pipecat-ai 0.0.74.dev770__py3-none-any.whl → 0.0.82.dev776__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

Files changed (244) hide show
  1. {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/METADATA +137 -93
  2. dv_pipecat_ai-0.0.82.dev776.dist-info/RECORD +340 -0
  3. pipecat/__init__.py +17 -0
  4. pipecat/adapters/base_llm_adapter.py +36 -1
  5. pipecat/adapters/schemas/direct_function.py +296 -0
  6. pipecat/adapters/schemas/function_schema.py +15 -6
  7. pipecat/adapters/schemas/tools_schema.py +55 -7
  8. pipecat/adapters/services/anthropic_adapter.py +22 -3
  9. pipecat/adapters/services/aws_nova_sonic_adapter.py +23 -3
  10. pipecat/adapters/services/bedrock_adapter.py +22 -3
  11. pipecat/adapters/services/gemini_adapter.py +16 -3
  12. pipecat/adapters/services/open_ai_adapter.py +17 -2
  13. pipecat/adapters/services/open_ai_realtime_adapter.py +23 -3
  14. pipecat/audio/filters/base_audio_filter.py +30 -6
  15. pipecat/audio/filters/koala_filter.py +37 -2
  16. pipecat/audio/filters/krisp_filter.py +59 -6
  17. pipecat/audio/filters/noisereduce_filter.py +37 -0
  18. pipecat/audio/interruptions/base_interruption_strategy.py +25 -5
  19. pipecat/audio/interruptions/min_words_interruption_strategy.py +21 -4
  20. pipecat/audio/mixers/base_audio_mixer.py +30 -7
  21. pipecat/audio/mixers/soundfile_mixer.py +53 -6
  22. pipecat/audio/resamplers/base_audio_resampler.py +17 -9
  23. pipecat/audio/resamplers/resampy_resampler.py +26 -1
  24. pipecat/audio/resamplers/soxr_resampler.py +32 -1
  25. pipecat/audio/resamplers/soxr_stream_resampler.py +101 -0
  26. pipecat/audio/utils.py +194 -1
  27. pipecat/audio/vad/silero.py +60 -3
  28. pipecat/audio/vad/vad_analyzer.py +114 -30
  29. pipecat/clocks/base_clock.py +19 -0
  30. pipecat/clocks/system_clock.py +25 -0
  31. pipecat/extensions/voicemail/__init__.py +0 -0
  32. pipecat/extensions/voicemail/voicemail_detector.py +707 -0
  33. pipecat/frames/frames.py +590 -156
  34. pipecat/metrics/metrics.py +64 -1
  35. pipecat/observers/base_observer.py +58 -19
  36. pipecat/observers/loggers/debug_log_observer.py +56 -64
  37. pipecat/observers/loggers/llm_log_observer.py +8 -1
  38. pipecat/observers/loggers/transcription_log_observer.py +19 -7
  39. pipecat/observers/loggers/user_bot_latency_log_observer.py +32 -5
  40. pipecat/observers/turn_tracking_observer.py +26 -1
  41. pipecat/pipeline/base_pipeline.py +5 -7
  42. pipecat/pipeline/base_task.py +52 -9
  43. pipecat/pipeline/parallel_pipeline.py +121 -177
  44. pipecat/pipeline/pipeline.py +129 -20
  45. pipecat/pipeline/runner.py +50 -1
  46. pipecat/pipeline/sync_parallel_pipeline.py +132 -32
  47. pipecat/pipeline/task.py +263 -280
  48. pipecat/pipeline/task_observer.py +85 -34
  49. pipecat/pipeline/to_be_updated/merge_pipeline.py +32 -2
  50. pipecat/processors/aggregators/dtmf_aggregator.py +29 -22
  51. pipecat/processors/aggregators/gated.py +25 -24
  52. pipecat/processors/aggregators/gated_openai_llm_context.py +22 -2
  53. pipecat/processors/aggregators/llm_response.py +398 -89
  54. pipecat/processors/aggregators/openai_llm_context.py +161 -13
  55. pipecat/processors/aggregators/sentence.py +25 -14
  56. pipecat/processors/aggregators/user_response.py +28 -3
  57. pipecat/processors/aggregators/vision_image_frame.py +24 -14
  58. pipecat/processors/async_generator.py +28 -0
  59. pipecat/processors/audio/audio_buffer_processor.py +78 -37
  60. pipecat/processors/consumer_processor.py +25 -6
  61. pipecat/processors/filters/frame_filter.py +23 -0
  62. pipecat/processors/filters/function_filter.py +30 -0
  63. pipecat/processors/filters/identity_filter.py +17 -2
  64. pipecat/processors/filters/null_filter.py +24 -1
  65. pipecat/processors/filters/stt_mute_filter.py +56 -21
  66. pipecat/processors/filters/wake_check_filter.py +46 -3
  67. pipecat/processors/filters/wake_notifier_filter.py +21 -3
  68. pipecat/processors/frame_processor.py +488 -131
  69. pipecat/processors/frameworks/langchain.py +38 -3
  70. pipecat/processors/frameworks/rtvi.py +719 -34
  71. pipecat/processors/gstreamer/pipeline_source.py +41 -0
  72. pipecat/processors/idle_frame_processor.py +26 -3
  73. pipecat/processors/logger.py +23 -0
  74. pipecat/processors/metrics/frame_processor_metrics.py +77 -4
  75. pipecat/processors/metrics/sentry.py +42 -4
  76. pipecat/processors/producer_processor.py +34 -14
  77. pipecat/processors/text_transformer.py +22 -10
  78. pipecat/processors/transcript_processor.py +48 -29
  79. pipecat/processors/user_idle_processor.py +31 -21
  80. pipecat/runner/__init__.py +1 -0
  81. pipecat/runner/daily.py +132 -0
  82. pipecat/runner/livekit.py +148 -0
  83. pipecat/runner/run.py +543 -0
  84. pipecat/runner/types.py +67 -0
  85. pipecat/runner/utils.py +515 -0
  86. pipecat/serializers/base_serializer.py +42 -0
  87. pipecat/serializers/exotel.py +17 -6
  88. pipecat/serializers/genesys.py +95 -0
  89. pipecat/serializers/livekit.py +33 -0
  90. pipecat/serializers/plivo.py +16 -15
  91. pipecat/serializers/protobuf.py +37 -1
  92. pipecat/serializers/telnyx.py +18 -17
  93. pipecat/serializers/twilio.py +32 -16
  94. pipecat/services/ai_service.py +5 -3
  95. pipecat/services/anthropic/llm.py +113 -43
  96. pipecat/services/assemblyai/models.py +63 -5
  97. pipecat/services/assemblyai/stt.py +64 -11
  98. pipecat/services/asyncai/__init__.py +0 -0
  99. pipecat/services/asyncai/tts.py +501 -0
  100. pipecat/services/aws/llm.py +185 -111
  101. pipecat/services/aws/stt.py +217 -23
  102. pipecat/services/aws/tts.py +118 -52
  103. pipecat/services/aws/utils.py +101 -5
  104. pipecat/services/aws_nova_sonic/aws.py +82 -64
  105. pipecat/services/aws_nova_sonic/context.py +15 -6
  106. pipecat/services/azure/common.py +10 -2
  107. pipecat/services/azure/image.py +32 -0
  108. pipecat/services/azure/llm.py +9 -7
  109. pipecat/services/azure/stt.py +65 -2
  110. pipecat/services/azure/tts.py +154 -23
  111. pipecat/services/cartesia/stt.py +125 -8
  112. pipecat/services/cartesia/tts.py +102 -38
  113. pipecat/services/cerebras/llm.py +15 -23
  114. pipecat/services/deepgram/stt.py +19 -11
  115. pipecat/services/deepgram/tts.py +36 -0
  116. pipecat/services/deepseek/llm.py +14 -23
  117. pipecat/services/elevenlabs/tts.py +330 -64
  118. pipecat/services/fal/image.py +43 -0
  119. pipecat/services/fal/stt.py +48 -10
  120. pipecat/services/fireworks/llm.py +14 -21
  121. pipecat/services/fish/tts.py +109 -9
  122. pipecat/services/gemini_multimodal_live/__init__.py +1 -0
  123. pipecat/services/gemini_multimodal_live/events.py +83 -2
  124. pipecat/services/gemini_multimodal_live/file_api.py +189 -0
  125. pipecat/services/gemini_multimodal_live/gemini.py +218 -21
  126. pipecat/services/gladia/config.py +17 -10
  127. pipecat/services/gladia/stt.py +82 -36
  128. pipecat/services/google/frames.py +40 -0
  129. pipecat/services/google/google.py +2 -0
  130. pipecat/services/google/image.py +39 -2
  131. pipecat/services/google/llm.py +176 -58
  132. pipecat/services/google/llm_openai.py +26 -4
  133. pipecat/services/google/llm_vertex.py +37 -15
  134. pipecat/services/google/rtvi.py +41 -0
  135. pipecat/services/google/stt.py +65 -17
  136. pipecat/services/google/test-google-chirp.py +45 -0
  137. pipecat/services/google/tts.py +390 -19
  138. pipecat/services/grok/llm.py +8 -6
  139. pipecat/services/groq/llm.py +8 -6
  140. pipecat/services/groq/stt.py +13 -9
  141. pipecat/services/groq/tts.py +40 -0
  142. pipecat/services/hamsa/__init__.py +9 -0
  143. pipecat/services/hamsa/stt.py +241 -0
  144. pipecat/services/heygen/__init__.py +5 -0
  145. pipecat/services/heygen/api.py +281 -0
  146. pipecat/services/heygen/client.py +620 -0
  147. pipecat/services/heygen/video.py +338 -0
  148. pipecat/services/image_service.py +5 -3
  149. pipecat/services/inworld/__init__.py +1 -0
  150. pipecat/services/inworld/tts.py +592 -0
  151. pipecat/services/llm_service.py +127 -45
  152. pipecat/services/lmnt/tts.py +80 -7
  153. pipecat/services/mcp_service.py +85 -44
  154. pipecat/services/mem0/memory.py +42 -13
  155. pipecat/services/minimax/tts.py +74 -15
  156. pipecat/services/mistral/__init__.py +0 -0
  157. pipecat/services/mistral/llm.py +185 -0
  158. pipecat/services/moondream/vision.py +55 -10
  159. pipecat/services/neuphonic/tts.py +275 -48
  160. pipecat/services/nim/llm.py +8 -6
  161. pipecat/services/ollama/llm.py +27 -7
  162. pipecat/services/openai/base_llm.py +54 -16
  163. pipecat/services/openai/image.py +30 -0
  164. pipecat/services/openai/llm.py +7 -5
  165. pipecat/services/openai/stt.py +13 -9
  166. pipecat/services/openai/tts.py +42 -10
  167. pipecat/services/openai_realtime_beta/azure.py +11 -9
  168. pipecat/services/openai_realtime_beta/context.py +7 -5
  169. pipecat/services/openai_realtime_beta/events.py +10 -7
  170. pipecat/services/openai_realtime_beta/openai.py +37 -18
  171. pipecat/services/openpipe/llm.py +30 -24
  172. pipecat/services/openrouter/llm.py +9 -7
  173. pipecat/services/perplexity/llm.py +15 -19
  174. pipecat/services/piper/tts.py +26 -12
  175. pipecat/services/playht/tts.py +227 -65
  176. pipecat/services/qwen/llm.py +8 -6
  177. pipecat/services/rime/tts.py +128 -17
  178. pipecat/services/riva/stt.py +160 -22
  179. pipecat/services/riva/tts.py +67 -2
  180. pipecat/services/sambanova/llm.py +19 -17
  181. pipecat/services/sambanova/stt.py +14 -8
  182. pipecat/services/sarvam/tts.py +60 -13
  183. pipecat/services/simli/video.py +82 -21
  184. pipecat/services/soniox/__init__.py +0 -0
  185. pipecat/services/soniox/stt.py +398 -0
  186. pipecat/services/speechmatics/stt.py +29 -17
  187. pipecat/services/stt_service.py +47 -11
  188. pipecat/services/tavus/video.py +94 -25
  189. pipecat/services/together/llm.py +8 -6
  190. pipecat/services/tts_service.py +77 -53
  191. pipecat/services/ultravox/stt.py +46 -43
  192. pipecat/services/vision_service.py +5 -3
  193. pipecat/services/websocket_service.py +12 -11
  194. pipecat/services/whisper/base_stt.py +58 -12
  195. pipecat/services/whisper/stt.py +69 -58
  196. pipecat/services/xtts/tts.py +59 -2
  197. pipecat/sync/base_notifier.py +19 -0
  198. pipecat/sync/event_notifier.py +24 -0
  199. pipecat/tests/utils.py +73 -5
  200. pipecat/transcriptions/language.py +24 -0
  201. pipecat/transports/base_input.py +112 -8
  202. pipecat/transports/base_output.py +235 -13
  203. pipecat/transports/base_transport.py +119 -0
  204. pipecat/transports/local/audio.py +76 -0
  205. pipecat/transports/local/tk.py +84 -0
  206. pipecat/transports/network/fastapi_websocket.py +174 -15
  207. pipecat/transports/network/small_webrtc.py +383 -39
  208. pipecat/transports/network/webrtc_connection.py +214 -8
  209. pipecat/transports/network/websocket_client.py +171 -1
  210. pipecat/transports/network/websocket_server.py +147 -9
  211. pipecat/transports/services/daily.py +792 -70
  212. pipecat/transports/services/helpers/daily_rest.py +122 -129
  213. pipecat/transports/services/livekit.py +339 -4
  214. pipecat/transports/services/tavus.py +273 -38
  215. pipecat/utils/asyncio/task_manager.py +92 -186
  216. pipecat/utils/base_object.py +83 -1
  217. pipecat/utils/network.py +2 -0
  218. pipecat/utils/string.py +114 -58
  219. pipecat/utils/text/base_text_aggregator.py +44 -13
  220. pipecat/utils/text/base_text_filter.py +46 -0
  221. pipecat/utils/text/markdown_text_filter.py +70 -14
  222. pipecat/utils/text/pattern_pair_aggregator.py +18 -14
  223. pipecat/utils/text/simple_text_aggregator.py +43 -2
  224. pipecat/utils/text/skip_tags_aggregator.py +21 -13
  225. pipecat/utils/time.py +36 -0
  226. pipecat/utils/tracing/class_decorators.py +32 -7
  227. pipecat/utils/tracing/conversation_context_provider.py +12 -2
  228. pipecat/utils/tracing/service_attributes.py +80 -64
  229. pipecat/utils/tracing/service_decorators.py +48 -21
  230. pipecat/utils/tracing/setup.py +13 -7
  231. pipecat/utils/tracing/turn_context_provider.py +12 -2
  232. pipecat/utils/tracing/turn_trace_observer.py +27 -0
  233. pipecat/utils/utils.py +14 -14
  234. dv_pipecat_ai-0.0.74.dev770.dist-info/RECORD +0 -319
  235. pipecat/examples/daily_runner.py +0 -64
  236. pipecat/examples/run.py +0 -265
  237. pipecat/utils/asyncio/watchdog_async_iterator.py +0 -72
  238. pipecat/utils/asyncio/watchdog_event.py +0 -42
  239. pipecat/utils/asyncio/watchdog_priority_queue.py +0 -48
  240. pipecat/utils/asyncio/watchdog_queue.py +0 -48
  241. {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/WHEEL +0 -0
  242. {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/licenses/LICENSE +0 -0
  243. {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/top_level.txt +0 -0
  244. /pipecat/{examples → extensions}/__init__.py +0 -0
@@ -13,14 +13,13 @@ enabling integration with OpenPipe's fine-tuning and monitoring capabilities.
13
13
  from typing import Dict, List, Optional
14
14
 
15
15
  from loguru import logger
16
- from openai.types.chat import ChatCompletionChunk, ChatCompletionMessageParam
16
+ from openai.types.chat import ChatCompletionMessageParam
17
17
 
18
18
  from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
19
19
  from pipecat.services.openai.llm import OpenAILLMService
20
20
 
21
21
  try:
22
22
  from openpipe import AsyncOpenAI as OpenPipeAI
23
- from openpipe import AsyncStream
24
23
  except ModuleNotFoundError as e:
25
24
  logger.error(f"Exception: {e}")
26
25
  logger.error("In order to use OpenPipe, you need to `pip install pipecat-ai[openpipe]`.")
@@ -33,15 +32,6 @@ class OpenPipeLLMService(OpenAILLMService):
33
32
  Extends OpenAI's LLM service to integrate with OpenPipe's fine-tuning and
34
33
  monitoring platform. Provides enhanced request logging and tagging capabilities
35
34
  for model training and evaluation.
36
-
37
- Args:
38
- model: The model name to use. Defaults to "gpt-4.1".
39
- api_key: OpenAI API key for authentication. If None, reads from environment.
40
- base_url: Custom OpenAI API endpoint URL. Uses default if None.
41
- openpipe_api_key: OpenPipe API key for enhanced features. If None, reads from environment.
42
- openpipe_base_url: OpenPipe API endpoint URL. Defaults to "https://app.openpipe.ai/api/v1".
43
- tags: Optional dictionary of tags to apply to all requests for tracking.
44
- **kwargs: Additional arguments passed to parent OpenAILLMService.
45
35
  """
46
36
 
47
37
  def __init__(
@@ -55,6 +45,17 @@ class OpenPipeLLMService(OpenAILLMService):
55
45
  tags: Optional[Dict[str, str]] = None,
56
46
  **kwargs,
57
47
  ):
48
+ """Initialize OpenPipe LLM service.
49
+
50
+ Args:
51
+ model: The model name to use. Defaults to "gpt-4.1".
52
+ api_key: OpenAI API key for authentication. If None, reads from environment.
53
+ base_url: Custom OpenAI API endpoint URL. Uses default if None.
54
+ openpipe_api_key: OpenPipe API key for enhanced features. If None, reads from environment.
55
+ openpipe_base_url: OpenPipe API endpoint URL. Defaults to "https://app.openpipe.ai/api/v1".
56
+ tags: Optional dictionary of tags to apply to all requests for tracking.
57
+ **kwargs: Additional arguments passed to parent OpenAILLMService.
58
+ """
58
59
  super().__init__(
59
60
  model=model,
60
61
  api_key=api_key,
@@ -85,22 +86,27 @@ class OpenPipeLLMService(OpenAILLMService):
85
86
  )
86
87
  return client
87
88
 
88
- async def get_chat_completions(
89
+ def build_chat_completion_params(
89
90
  self, context: OpenAILLMContext, messages: List[ChatCompletionMessageParam]
90
- ) -> AsyncStream[ChatCompletionChunk]:
91
- """Generate streaming chat completions with OpenPipe logging.
91
+ ) -> dict:
92
+ """Build parameters for OpenPipe chat completion request.
93
+
94
+ Adds OpenPipe-specific logging and tagging parameters.
92
95
 
93
96
  Args:
94
- context: The OpenAI LLM context containing conversation state.
95
- messages: List of chat completion message parameters.
97
+ context: The LLM context containing tools and configuration.
98
+ messages: List of chat completion messages to send.
96
99
 
97
100
  Returns:
98
- Async stream of chat completion chunks.
101
+ Dictionary of parameters for the chat completion request.
99
102
  """
100
- chunks = await self._client.chat.completions.create(
101
- model=self.model_name,
102
- stream=True,
103
- messages=messages,
104
- openpipe={"tags": self._tags, "log_request": True},
105
- )
106
- return chunks
103
+ # Start with base parameters
104
+ params = super().build_chat_completion_params(context, messages)
105
+
106
+ # Add OpenPipe-specific parameters
107
+ params["openpipe"] = {
108
+ "tags": self._tags,
109
+ "log_request": True,
110
+ }
111
+
112
+ return params
@@ -22,13 +22,6 @@ class OpenRouterLLMService(OpenAILLMService):
22
22
 
23
23
  This service extends OpenAILLMService to connect to OpenRouter's API endpoint while
24
24
  maintaining full compatibility with OpenAI's interface and functionality.
25
-
26
- Args:
27
- api_key: The API key for accessing OpenRouter's API. If None, will attempt
28
- to read from environment variables.
29
- model: The model identifier to use. Defaults to "openai/gpt-4o-2024-11-20".
30
- base_url: The base URL for OpenRouter API. Defaults to "https://openrouter.ai/api/v1".
31
- **kwargs: Additional keyword arguments passed to OpenAILLMService.
32
25
  """
33
26
 
34
27
  def __init__(
@@ -39,6 +32,15 @@ class OpenRouterLLMService(OpenAILLMService):
39
32
  base_url: str = "https://openrouter.ai/api/v1",
40
33
  **kwargs,
41
34
  ):
35
+ """Initialize the OpenRouter LLM service.
36
+
37
+ Args:
38
+ api_key: The API key for accessing OpenRouter's API. If None, will attempt
39
+ to read from environment variables.
40
+ model: The model identifier to use. Defaults to "openai/gpt-4o-2024-11-20".
41
+ base_url: The base URL for OpenRouter API. Defaults to "https://openrouter.ai/api/v1".
42
+ **kwargs: Additional keyword arguments passed to OpenAILLMService.
43
+ """
42
44
  super().__init__(
43
45
  api_key=api_key,
44
46
  base_url=base_url,
@@ -13,8 +13,8 @@ reporting patterns while maintaining compatibility with the Pipecat framework.
13
13
 
14
14
  from typing import List
15
15
 
16
- from openai import NOT_GIVEN, AsyncStream
17
- from openai.types.chat import ChatCompletionChunk, ChatCompletionMessageParam
16
+ from openai import NOT_GIVEN
17
+ from openai.types.chat import ChatCompletionMessageParam
18
18
 
19
19
  from pipecat.metrics.metrics import LLMTokenUsage
20
20
  from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
@@ -27,12 +27,6 @@ class PerplexityLLMService(OpenAILLMService):
27
27
  This service extends OpenAILLMService to work with Perplexity's API while maintaining
28
28
  compatibility with the OpenAI-style interface. It specifically handles the difference
29
29
  in token usage reporting between Perplexity (incremental) and OpenAI (final summary).
30
-
31
- Args:
32
- api_key: The API key for accessing Perplexity's API.
33
- base_url: The base URL for Perplexity's API. Defaults to "https://api.perplexity.ai".
34
- model: The model identifier to use. Defaults to "sonar".
35
- **kwargs: Additional keyword arguments passed to OpenAILLMService.
36
30
  """
37
31
 
38
32
  def __init__(
@@ -43,6 +37,14 @@ class PerplexityLLMService(OpenAILLMService):
43
37
  model: str = "sonar",
44
38
  **kwargs,
45
39
  ):
40
+ """Initialize the Perplexity LLM service.
41
+
42
+ Args:
43
+ api_key: The API key for accessing Perplexity's API.
44
+ base_url: The base URL for Perplexity's API. Defaults to "https://api.perplexity.ai".
45
+ model: The model identifier to use. Defaults to "sonar".
46
+ **kwargs: Additional keyword arguments passed to OpenAILLMService.
47
+ """
46
48
  super().__init__(api_key=api_key, base_url=base_url, model=model, **kwargs)
47
49
  # Counters for accumulating token usage metrics
48
50
  self._prompt_tokens = 0
@@ -51,17 +53,12 @@ class PerplexityLLMService(OpenAILLMService):
51
53
  self._has_reported_prompt_tokens = False
52
54
  self._is_processing = False
53
55
 
54
- async def get_chat_completions(
56
+ def build_chat_completion_params(
55
57
  self, context: OpenAILLMContext, messages: List[ChatCompletionMessageParam]
56
- ) -> AsyncStream[ChatCompletionChunk]:
57
- """Get chat completions from Perplexity API using OpenAI-compatible parameters.
58
-
59
- Args:
60
- context: The context containing conversation history and settings.
61
- messages: The messages to send to the API.
58
+ ) -> dict:
59
+ """Build parameters for Perplexity chat completion request.
62
60
 
63
- Returns:
64
- A stream of chat completion chunks from the Perplexity API.
61
+ Perplexity uses a subset of OpenAI parameters and doesn't support tools.
65
62
  """
66
63
  params = {
67
64
  "model": self.model_name,
@@ -81,8 +78,7 @@ class PerplexityLLMService(OpenAILLMService):
81
78
  if self._settings["max_tokens"] is not NOT_GIVEN:
82
79
  params["max_tokens"] = self._settings["max_tokens"]
83
80
 
84
- chunks = await self._client.chat.completions.create(**params)
85
- return chunks
81
+ return params
86
82
 
87
83
  async def _process_context(self, context: OpenAILLMContext):
88
84
  """Process a context through the LLM and accumulate token usage metrics.
@@ -4,6 +4,8 @@
4
4
  # SPDX-License-Identifier: BSD 2-Clause License
5
5
  #
6
6
 
7
+ """Piper TTS service implementation."""
8
+
7
9
  from typing import AsyncGenerator, Optional
8
10
 
9
11
  import aiohttp
@@ -20,16 +22,13 @@ from pipecat.services.tts_service import TTSService
20
22
  from pipecat.utils.tracing.service_decorators import traced_tts
21
23
 
22
24
 
23
- # This assumes a running TTS service running: https://github.com/rhasspy/piper/blob/master/src/python_run/README_http.md
25
+ # This assumes a running TTS service running: https://github.com/OHF-Voice/piper1-gpl/blob/main/docs/API_HTTP.md
24
26
  class PiperTTSService(TTSService):
25
27
  """Piper TTS service implementation.
26
28
 
27
- Provides integration with Piper's TTS server.
28
-
29
- Args:
30
- base_url: API base URL
31
- aiohttp_session: aiohttp ClientSession
32
- sample_rate: Output sample rate
29
+ Provides integration with Piper's HTTP TTS server for text-to-speech
30
+ synthesis. Supports streaming audio generation with configurable sample
31
+ rates and automatic WAV header removal.
33
32
  """
34
33
 
35
34
  def __init__(
@@ -42,6 +41,14 @@ class PiperTTSService(TTSService):
42
41
  sample_rate: Optional[int] = None,
43
42
  **kwargs,
44
43
  ):
44
+ """Initialize the Piper TTS service.
45
+
46
+ Args:
47
+ base_url: Base URL for the Piper TTS HTTP server.
48
+ aiohttp_session: aiohttp ClientSession for making HTTP requests.
49
+ sample_rate: Output sample rate. If None, uses the voice model's native rate.
50
+ **kwargs: Additional arguments passed to the parent TTSService.
51
+ """
45
52
  super().__init__(sample_rate=sample_rate, **kwargs)
46
53
 
47
54
  if base_url.endswith("/"):
@@ -53,26 +60,33 @@ class PiperTTSService(TTSService):
53
60
  self._settings = {"base_url": base_url}
54
61
 
55
62
  def can_generate_metrics(self) -> bool:
63
+ """Check if this service can generate processing metrics.
64
+
65
+ Returns:
66
+ True, as Piper service supports metrics generation.
67
+ """
56
68
  return True
57
69
 
58
70
  @traced_tts
59
71
  async def run_tts(self, text: str) -> AsyncGenerator[Frame, None]:
60
- """Generate speech from text using Piper API.
72
+ """Generate speech from text using Piper's HTTP API.
61
73
 
62
74
  Args:
63
- text: The text to convert to speech
75
+ text: The text to convert to speech.
64
76
 
65
77
  Yields:
66
- Frames containing audio data and status information
78
+ Frame: Audio frames containing the synthesized speech and status frames.
67
79
  """
68
80
  logger.debug(f"{self}: Generating TTS [{text}]")
69
81
  headers = {
70
- "Content-Type": "text/plain",
82
+ "Content-Type": "application/json",
71
83
  }
72
84
  try:
73
85
  await self.start_ttfb_metrics()
74
86
 
75
- async with self._session.post(self._base_url, data=text, headers=headers) as response:
87
+ async with self._session.post(
88
+ self._base_url, json={"text": text}, headers=headers
89
+ ) as response:
76
90
  if response.status != 200:
77
91
  error = await response.text()
78
92
  logger.error(