dv-pipecat-ai 0.0.74.dev770__py3-none-any.whl → 0.0.82.dev776__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/METADATA +137 -93
- dv_pipecat_ai-0.0.82.dev776.dist-info/RECORD +340 -0
- pipecat/__init__.py +17 -0
- pipecat/adapters/base_llm_adapter.py +36 -1
- pipecat/adapters/schemas/direct_function.py +296 -0
- pipecat/adapters/schemas/function_schema.py +15 -6
- pipecat/adapters/schemas/tools_schema.py +55 -7
- pipecat/adapters/services/anthropic_adapter.py +22 -3
- pipecat/adapters/services/aws_nova_sonic_adapter.py +23 -3
- pipecat/adapters/services/bedrock_adapter.py +22 -3
- pipecat/adapters/services/gemini_adapter.py +16 -3
- pipecat/adapters/services/open_ai_adapter.py +17 -2
- pipecat/adapters/services/open_ai_realtime_adapter.py +23 -3
- pipecat/audio/filters/base_audio_filter.py +30 -6
- pipecat/audio/filters/koala_filter.py +37 -2
- pipecat/audio/filters/krisp_filter.py +59 -6
- pipecat/audio/filters/noisereduce_filter.py +37 -0
- pipecat/audio/interruptions/base_interruption_strategy.py +25 -5
- pipecat/audio/interruptions/min_words_interruption_strategy.py +21 -4
- pipecat/audio/mixers/base_audio_mixer.py +30 -7
- pipecat/audio/mixers/soundfile_mixer.py +53 -6
- pipecat/audio/resamplers/base_audio_resampler.py +17 -9
- pipecat/audio/resamplers/resampy_resampler.py +26 -1
- pipecat/audio/resamplers/soxr_resampler.py +32 -1
- pipecat/audio/resamplers/soxr_stream_resampler.py +101 -0
- pipecat/audio/utils.py +194 -1
- pipecat/audio/vad/silero.py +60 -3
- pipecat/audio/vad/vad_analyzer.py +114 -30
- pipecat/clocks/base_clock.py +19 -0
- pipecat/clocks/system_clock.py +25 -0
- pipecat/extensions/voicemail/__init__.py +0 -0
- pipecat/extensions/voicemail/voicemail_detector.py +707 -0
- pipecat/frames/frames.py +590 -156
- pipecat/metrics/metrics.py +64 -1
- pipecat/observers/base_observer.py +58 -19
- pipecat/observers/loggers/debug_log_observer.py +56 -64
- pipecat/observers/loggers/llm_log_observer.py +8 -1
- pipecat/observers/loggers/transcription_log_observer.py +19 -7
- pipecat/observers/loggers/user_bot_latency_log_observer.py +32 -5
- pipecat/observers/turn_tracking_observer.py +26 -1
- pipecat/pipeline/base_pipeline.py +5 -7
- pipecat/pipeline/base_task.py +52 -9
- pipecat/pipeline/parallel_pipeline.py +121 -177
- pipecat/pipeline/pipeline.py +129 -20
- pipecat/pipeline/runner.py +50 -1
- pipecat/pipeline/sync_parallel_pipeline.py +132 -32
- pipecat/pipeline/task.py +263 -280
- pipecat/pipeline/task_observer.py +85 -34
- pipecat/pipeline/to_be_updated/merge_pipeline.py +32 -2
- pipecat/processors/aggregators/dtmf_aggregator.py +29 -22
- pipecat/processors/aggregators/gated.py +25 -24
- pipecat/processors/aggregators/gated_openai_llm_context.py +22 -2
- pipecat/processors/aggregators/llm_response.py +398 -89
- pipecat/processors/aggregators/openai_llm_context.py +161 -13
- pipecat/processors/aggregators/sentence.py +25 -14
- pipecat/processors/aggregators/user_response.py +28 -3
- pipecat/processors/aggregators/vision_image_frame.py +24 -14
- pipecat/processors/async_generator.py +28 -0
- pipecat/processors/audio/audio_buffer_processor.py +78 -37
- pipecat/processors/consumer_processor.py +25 -6
- pipecat/processors/filters/frame_filter.py +23 -0
- pipecat/processors/filters/function_filter.py +30 -0
- pipecat/processors/filters/identity_filter.py +17 -2
- pipecat/processors/filters/null_filter.py +24 -1
- pipecat/processors/filters/stt_mute_filter.py +56 -21
- pipecat/processors/filters/wake_check_filter.py +46 -3
- pipecat/processors/filters/wake_notifier_filter.py +21 -3
- pipecat/processors/frame_processor.py +488 -131
- pipecat/processors/frameworks/langchain.py +38 -3
- pipecat/processors/frameworks/rtvi.py +719 -34
- pipecat/processors/gstreamer/pipeline_source.py +41 -0
- pipecat/processors/idle_frame_processor.py +26 -3
- pipecat/processors/logger.py +23 -0
- pipecat/processors/metrics/frame_processor_metrics.py +77 -4
- pipecat/processors/metrics/sentry.py +42 -4
- pipecat/processors/producer_processor.py +34 -14
- pipecat/processors/text_transformer.py +22 -10
- pipecat/processors/transcript_processor.py +48 -29
- pipecat/processors/user_idle_processor.py +31 -21
- pipecat/runner/__init__.py +1 -0
- pipecat/runner/daily.py +132 -0
- pipecat/runner/livekit.py +148 -0
- pipecat/runner/run.py +543 -0
- pipecat/runner/types.py +67 -0
- pipecat/runner/utils.py +515 -0
- pipecat/serializers/base_serializer.py +42 -0
- pipecat/serializers/exotel.py +17 -6
- pipecat/serializers/genesys.py +95 -0
- pipecat/serializers/livekit.py +33 -0
- pipecat/serializers/plivo.py +16 -15
- pipecat/serializers/protobuf.py +37 -1
- pipecat/serializers/telnyx.py +18 -17
- pipecat/serializers/twilio.py +32 -16
- pipecat/services/ai_service.py +5 -3
- pipecat/services/anthropic/llm.py +113 -43
- pipecat/services/assemblyai/models.py +63 -5
- pipecat/services/assemblyai/stt.py +64 -11
- pipecat/services/asyncai/__init__.py +0 -0
- pipecat/services/asyncai/tts.py +501 -0
- pipecat/services/aws/llm.py +185 -111
- pipecat/services/aws/stt.py +217 -23
- pipecat/services/aws/tts.py +118 -52
- pipecat/services/aws/utils.py +101 -5
- pipecat/services/aws_nova_sonic/aws.py +82 -64
- pipecat/services/aws_nova_sonic/context.py +15 -6
- pipecat/services/azure/common.py +10 -2
- pipecat/services/azure/image.py +32 -0
- pipecat/services/azure/llm.py +9 -7
- pipecat/services/azure/stt.py +65 -2
- pipecat/services/azure/tts.py +154 -23
- pipecat/services/cartesia/stt.py +125 -8
- pipecat/services/cartesia/tts.py +102 -38
- pipecat/services/cerebras/llm.py +15 -23
- pipecat/services/deepgram/stt.py +19 -11
- pipecat/services/deepgram/tts.py +36 -0
- pipecat/services/deepseek/llm.py +14 -23
- pipecat/services/elevenlabs/tts.py +330 -64
- pipecat/services/fal/image.py +43 -0
- pipecat/services/fal/stt.py +48 -10
- pipecat/services/fireworks/llm.py +14 -21
- pipecat/services/fish/tts.py +109 -9
- pipecat/services/gemini_multimodal_live/__init__.py +1 -0
- pipecat/services/gemini_multimodal_live/events.py +83 -2
- pipecat/services/gemini_multimodal_live/file_api.py +189 -0
- pipecat/services/gemini_multimodal_live/gemini.py +218 -21
- pipecat/services/gladia/config.py +17 -10
- pipecat/services/gladia/stt.py +82 -36
- pipecat/services/google/frames.py +40 -0
- pipecat/services/google/google.py +2 -0
- pipecat/services/google/image.py +39 -2
- pipecat/services/google/llm.py +176 -58
- pipecat/services/google/llm_openai.py +26 -4
- pipecat/services/google/llm_vertex.py +37 -15
- pipecat/services/google/rtvi.py +41 -0
- pipecat/services/google/stt.py +65 -17
- pipecat/services/google/test-google-chirp.py +45 -0
- pipecat/services/google/tts.py +390 -19
- pipecat/services/grok/llm.py +8 -6
- pipecat/services/groq/llm.py +8 -6
- pipecat/services/groq/stt.py +13 -9
- pipecat/services/groq/tts.py +40 -0
- pipecat/services/hamsa/__init__.py +9 -0
- pipecat/services/hamsa/stt.py +241 -0
- pipecat/services/heygen/__init__.py +5 -0
- pipecat/services/heygen/api.py +281 -0
- pipecat/services/heygen/client.py +620 -0
- pipecat/services/heygen/video.py +338 -0
- pipecat/services/image_service.py +5 -3
- pipecat/services/inworld/__init__.py +1 -0
- pipecat/services/inworld/tts.py +592 -0
- pipecat/services/llm_service.py +127 -45
- pipecat/services/lmnt/tts.py +80 -7
- pipecat/services/mcp_service.py +85 -44
- pipecat/services/mem0/memory.py +42 -13
- pipecat/services/minimax/tts.py +74 -15
- pipecat/services/mistral/__init__.py +0 -0
- pipecat/services/mistral/llm.py +185 -0
- pipecat/services/moondream/vision.py +55 -10
- pipecat/services/neuphonic/tts.py +275 -48
- pipecat/services/nim/llm.py +8 -6
- pipecat/services/ollama/llm.py +27 -7
- pipecat/services/openai/base_llm.py +54 -16
- pipecat/services/openai/image.py +30 -0
- pipecat/services/openai/llm.py +7 -5
- pipecat/services/openai/stt.py +13 -9
- pipecat/services/openai/tts.py +42 -10
- pipecat/services/openai_realtime_beta/azure.py +11 -9
- pipecat/services/openai_realtime_beta/context.py +7 -5
- pipecat/services/openai_realtime_beta/events.py +10 -7
- pipecat/services/openai_realtime_beta/openai.py +37 -18
- pipecat/services/openpipe/llm.py +30 -24
- pipecat/services/openrouter/llm.py +9 -7
- pipecat/services/perplexity/llm.py +15 -19
- pipecat/services/piper/tts.py +26 -12
- pipecat/services/playht/tts.py +227 -65
- pipecat/services/qwen/llm.py +8 -6
- pipecat/services/rime/tts.py +128 -17
- pipecat/services/riva/stt.py +160 -22
- pipecat/services/riva/tts.py +67 -2
- pipecat/services/sambanova/llm.py +19 -17
- pipecat/services/sambanova/stt.py +14 -8
- pipecat/services/sarvam/tts.py +60 -13
- pipecat/services/simli/video.py +82 -21
- pipecat/services/soniox/__init__.py +0 -0
- pipecat/services/soniox/stt.py +398 -0
- pipecat/services/speechmatics/stt.py +29 -17
- pipecat/services/stt_service.py +47 -11
- pipecat/services/tavus/video.py +94 -25
- pipecat/services/together/llm.py +8 -6
- pipecat/services/tts_service.py +77 -53
- pipecat/services/ultravox/stt.py +46 -43
- pipecat/services/vision_service.py +5 -3
- pipecat/services/websocket_service.py +12 -11
- pipecat/services/whisper/base_stt.py +58 -12
- pipecat/services/whisper/stt.py +69 -58
- pipecat/services/xtts/tts.py +59 -2
- pipecat/sync/base_notifier.py +19 -0
- pipecat/sync/event_notifier.py +24 -0
- pipecat/tests/utils.py +73 -5
- pipecat/transcriptions/language.py +24 -0
- pipecat/transports/base_input.py +112 -8
- pipecat/transports/base_output.py +235 -13
- pipecat/transports/base_transport.py +119 -0
- pipecat/transports/local/audio.py +76 -0
- pipecat/transports/local/tk.py +84 -0
- pipecat/transports/network/fastapi_websocket.py +174 -15
- pipecat/transports/network/small_webrtc.py +383 -39
- pipecat/transports/network/webrtc_connection.py +214 -8
- pipecat/transports/network/websocket_client.py +171 -1
- pipecat/transports/network/websocket_server.py +147 -9
- pipecat/transports/services/daily.py +792 -70
- pipecat/transports/services/helpers/daily_rest.py +122 -129
- pipecat/transports/services/livekit.py +339 -4
- pipecat/transports/services/tavus.py +273 -38
- pipecat/utils/asyncio/task_manager.py +92 -186
- pipecat/utils/base_object.py +83 -1
- pipecat/utils/network.py +2 -0
- pipecat/utils/string.py +114 -58
- pipecat/utils/text/base_text_aggregator.py +44 -13
- pipecat/utils/text/base_text_filter.py +46 -0
- pipecat/utils/text/markdown_text_filter.py +70 -14
- pipecat/utils/text/pattern_pair_aggregator.py +18 -14
- pipecat/utils/text/simple_text_aggregator.py +43 -2
- pipecat/utils/text/skip_tags_aggregator.py +21 -13
- pipecat/utils/time.py +36 -0
- pipecat/utils/tracing/class_decorators.py +32 -7
- pipecat/utils/tracing/conversation_context_provider.py +12 -2
- pipecat/utils/tracing/service_attributes.py +80 -64
- pipecat/utils/tracing/service_decorators.py +48 -21
- pipecat/utils/tracing/setup.py +13 -7
- pipecat/utils/tracing/turn_context_provider.py +12 -2
- pipecat/utils/tracing/turn_trace_observer.py +27 -0
- pipecat/utils/utils.py +14 -14
- dv_pipecat_ai-0.0.74.dev770.dist-info/RECORD +0 -319
- pipecat/examples/daily_runner.py +0 -64
- pipecat/examples/run.py +0 -265
- pipecat/utils/asyncio/watchdog_async_iterator.py +0 -72
- pipecat/utils/asyncio/watchdog_event.py +0 -42
- pipecat/utils/asyncio/watchdog_priority_queue.py +0 -48
- pipecat/utils/asyncio/watchdog_queue.py +0 -48
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.74.dev770.dist-info → dv_pipecat_ai-0.0.82.dev776.dist-info}/top_level.txt +0 -0
- /pipecat/{examples → extensions}/__init__.py +0 -0
pipecat/services/openpipe/llm.py
CHANGED
|
@@ -13,14 +13,13 @@ enabling integration with OpenPipe's fine-tuning and monitoring capabilities.
|
|
|
13
13
|
from typing import Dict, List, Optional
|
|
14
14
|
|
|
15
15
|
from loguru import logger
|
|
16
|
-
from openai.types.chat import
|
|
16
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
17
17
|
|
|
18
18
|
from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
|
|
19
19
|
from pipecat.services.openai.llm import OpenAILLMService
|
|
20
20
|
|
|
21
21
|
try:
|
|
22
22
|
from openpipe import AsyncOpenAI as OpenPipeAI
|
|
23
|
-
from openpipe import AsyncStream
|
|
24
23
|
except ModuleNotFoundError as e:
|
|
25
24
|
logger.error(f"Exception: {e}")
|
|
26
25
|
logger.error("In order to use OpenPipe, you need to `pip install pipecat-ai[openpipe]`.")
|
|
@@ -33,15 +32,6 @@ class OpenPipeLLMService(OpenAILLMService):
|
|
|
33
32
|
Extends OpenAI's LLM service to integrate with OpenPipe's fine-tuning and
|
|
34
33
|
monitoring platform. Provides enhanced request logging and tagging capabilities
|
|
35
34
|
for model training and evaluation.
|
|
36
|
-
|
|
37
|
-
Args:
|
|
38
|
-
model: The model name to use. Defaults to "gpt-4.1".
|
|
39
|
-
api_key: OpenAI API key for authentication. If None, reads from environment.
|
|
40
|
-
base_url: Custom OpenAI API endpoint URL. Uses default if None.
|
|
41
|
-
openpipe_api_key: OpenPipe API key for enhanced features. If None, reads from environment.
|
|
42
|
-
openpipe_base_url: OpenPipe API endpoint URL. Defaults to "https://app.openpipe.ai/api/v1".
|
|
43
|
-
tags: Optional dictionary of tags to apply to all requests for tracking.
|
|
44
|
-
**kwargs: Additional arguments passed to parent OpenAILLMService.
|
|
45
35
|
"""
|
|
46
36
|
|
|
47
37
|
def __init__(
|
|
@@ -55,6 +45,17 @@ class OpenPipeLLMService(OpenAILLMService):
|
|
|
55
45
|
tags: Optional[Dict[str, str]] = None,
|
|
56
46
|
**kwargs,
|
|
57
47
|
):
|
|
48
|
+
"""Initialize OpenPipe LLM service.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
model: The model name to use. Defaults to "gpt-4.1".
|
|
52
|
+
api_key: OpenAI API key for authentication. If None, reads from environment.
|
|
53
|
+
base_url: Custom OpenAI API endpoint URL. Uses default if None.
|
|
54
|
+
openpipe_api_key: OpenPipe API key for enhanced features. If None, reads from environment.
|
|
55
|
+
openpipe_base_url: OpenPipe API endpoint URL. Defaults to "https://app.openpipe.ai/api/v1".
|
|
56
|
+
tags: Optional dictionary of tags to apply to all requests for tracking.
|
|
57
|
+
**kwargs: Additional arguments passed to parent OpenAILLMService.
|
|
58
|
+
"""
|
|
58
59
|
super().__init__(
|
|
59
60
|
model=model,
|
|
60
61
|
api_key=api_key,
|
|
@@ -85,22 +86,27 @@ class OpenPipeLLMService(OpenAILLMService):
|
|
|
85
86
|
)
|
|
86
87
|
return client
|
|
87
88
|
|
|
88
|
-
|
|
89
|
+
def build_chat_completion_params(
|
|
89
90
|
self, context: OpenAILLMContext, messages: List[ChatCompletionMessageParam]
|
|
90
|
-
) ->
|
|
91
|
-
"""
|
|
91
|
+
) -> dict:
|
|
92
|
+
"""Build parameters for OpenPipe chat completion request.
|
|
93
|
+
|
|
94
|
+
Adds OpenPipe-specific logging and tagging parameters.
|
|
92
95
|
|
|
93
96
|
Args:
|
|
94
|
-
context: The
|
|
95
|
-
messages: List of chat completion
|
|
97
|
+
context: The LLM context containing tools and configuration.
|
|
98
|
+
messages: List of chat completion messages to send.
|
|
96
99
|
|
|
97
100
|
Returns:
|
|
98
|
-
|
|
101
|
+
Dictionary of parameters for the chat completion request.
|
|
99
102
|
"""
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
103
|
+
# Start with base parameters
|
|
104
|
+
params = super().build_chat_completion_params(context, messages)
|
|
105
|
+
|
|
106
|
+
# Add OpenPipe-specific parameters
|
|
107
|
+
params["openpipe"] = {
|
|
108
|
+
"tags": self._tags,
|
|
109
|
+
"log_request": True,
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
return params
|
|
@@ -22,13 +22,6 @@ class OpenRouterLLMService(OpenAILLMService):
|
|
|
22
22
|
|
|
23
23
|
This service extends OpenAILLMService to connect to OpenRouter's API endpoint while
|
|
24
24
|
maintaining full compatibility with OpenAI's interface and functionality.
|
|
25
|
-
|
|
26
|
-
Args:
|
|
27
|
-
api_key: The API key for accessing OpenRouter's API. If None, will attempt
|
|
28
|
-
to read from environment variables.
|
|
29
|
-
model: The model identifier to use. Defaults to "openai/gpt-4o-2024-11-20".
|
|
30
|
-
base_url: The base URL for OpenRouter API. Defaults to "https://openrouter.ai/api/v1".
|
|
31
|
-
**kwargs: Additional keyword arguments passed to OpenAILLMService.
|
|
32
25
|
"""
|
|
33
26
|
|
|
34
27
|
def __init__(
|
|
@@ -39,6 +32,15 @@ class OpenRouterLLMService(OpenAILLMService):
|
|
|
39
32
|
base_url: str = "https://openrouter.ai/api/v1",
|
|
40
33
|
**kwargs,
|
|
41
34
|
):
|
|
35
|
+
"""Initialize the OpenRouter LLM service.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
api_key: The API key for accessing OpenRouter's API. If None, will attempt
|
|
39
|
+
to read from environment variables.
|
|
40
|
+
model: The model identifier to use. Defaults to "openai/gpt-4o-2024-11-20".
|
|
41
|
+
base_url: The base URL for OpenRouter API. Defaults to "https://openrouter.ai/api/v1".
|
|
42
|
+
**kwargs: Additional keyword arguments passed to OpenAILLMService.
|
|
43
|
+
"""
|
|
42
44
|
super().__init__(
|
|
43
45
|
api_key=api_key,
|
|
44
46
|
base_url=base_url,
|
|
@@ -13,8 +13,8 @@ reporting patterns while maintaining compatibility with the Pipecat framework.
|
|
|
13
13
|
|
|
14
14
|
from typing import List
|
|
15
15
|
|
|
16
|
-
from openai import NOT_GIVEN
|
|
17
|
-
from openai.types.chat import
|
|
16
|
+
from openai import NOT_GIVEN
|
|
17
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
18
18
|
|
|
19
19
|
from pipecat.metrics.metrics import LLMTokenUsage
|
|
20
20
|
from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
|
|
@@ -27,12 +27,6 @@ class PerplexityLLMService(OpenAILLMService):
|
|
|
27
27
|
This service extends OpenAILLMService to work with Perplexity's API while maintaining
|
|
28
28
|
compatibility with the OpenAI-style interface. It specifically handles the difference
|
|
29
29
|
in token usage reporting between Perplexity (incremental) and OpenAI (final summary).
|
|
30
|
-
|
|
31
|
-
Args:
|
|
32
|
-
api_key: The API key for accessing Perplexity's API.
|
|
33
|
-
base_url: The base URL for Perplexity's API. Defaults to "https://api.perplexity.ai".
|
|
34
|
-
model: The model identifier to use. Defaults to "sonar".
|
|
35
|
-
**kwargs: Additional keyword arguments passed to OpenAILLMService.
|
|
36
30
|
"""
|
|
37
31
|
|
|
38
32
|
def __init__(
|
|
@@ -43,6 +37,14 @@ class PerplexityLLMService(OpenAILLMService):
|
|
|
43
37
|
model: str = "sonar",
|
|
44
38
|
**kwargs,
|
|
45
39
|
):
|
|
40
|
+
"""Initialize the Perplexity LLM service.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
api_key: The API key for accessing Perplexity's API.
|
|
44
|
+
base_url: The base URL for Perplexity's API. Defaults to "https://api.perplexity.ai".
|
|
45
|
+
model: The model identifier to use. Defaults to "sonar".
|
|
46
|
+
**kwargs: Additional keyword arguments passed to OpenAILLMService.
|
|
47
|
+
"""
|
|
46
48
|
super().__init__(api_key=api_key, base_url=base_url, model=model, **kwargs)
|
|
47
49
|
# Counters for accumulating token usage metrics
|
|
48
50
|
self._prompt_tokens = 0
|
|
@@ -51,17 +53,12 @@ class PerplexityLLMService(OpenAILLMService):
|
|
|
51
53
|
self._has_reported_prompt_tokens = False
|
|
52
54
|
self._is_processing = False
|
|
53
55
|
|
|
54
|
-
|
|
56
|
+
def build_chat_completion_params(
|
|
55
57
|
self, context: OpenAILLMContext, messages: List[ChatCompletionMessageParam]
|
|
56
|
-
) ->
|
|
57
|
-
"""
|
|
58
|
-
|
|
59
|
-
Args:
|
|
60
|
-
context: The context containing conversation history and settings.
|
|
61
|
-
messages: The messages to send to the API.
|
|
58
|
+
) -> dict:
|
|
59
|
+
"""Build parameters for Perplexity chat completion request.
|
|
62
60
|
|
|
63
|
-
|
|
64
|
-
A stream of chat completion chunks from the Perplexity API.
|
|
61
|
+
Perplexity uses a subset of OpenAI parameters and doesn't support tools.
|
|
65
62
|
"""
|
|
66
63
|
params = {
|
|
67
64
|
"model": self.model_name,
|
|
@@ -81,8 +78,7 @@ class PerplexityLLMService(OpenAILLMService):
|
|
|
81
78
|
if self._settings["max_tokens"] is not NOT_GIVEN:
|
|
82
79
|
params["max_tokens"] = self._settings["max_tokens"]
|
|
83
80
|
|
|
84
|
-
|
|
85
|
-
return chunks
|
|
81
|
+
return params
|
|
86
82
|
|
|
87
83
|
async def _process_context(self, context: OpenAILLMContext):
|
|
88
84
|
"""Process a context through the LLM and accumulate token usage metrics.
|
pipecat/services/piper/tts.py
CHANGED
|
@@ -4,6 +4,8 @@
|
|
|
4
4
|
# SPDX-License-Identifier: BSD 2-Clause License
|
|
5
5
|
#
|
|
6
6
|
|
|
7
|
+
"""Piper TTS service implementation."""
|
|
8
|
+
|
|
7
9
|
from typing import AsyncGenerator, Optional
|
|
8
10
|
|
|
9
11
|
import aiohttp
|
|
@@ -20,16 +22,13 @@ from pipecat.services.tts_service import TTSService
|
|
|
20
22
|
from pipecat.utils.tracing.service_decorators import traced_tts
|
|
21
23
|
|
|
22
24
|
|
|
23
|
-
# This assumes a running TTS service running: https://github.com/
|
|
25
|
+
# This assumes a running TTS service running: https://github.com/OHF-Voice/piper1-gpl/blob/main/docs/API_HTTP.md
|
|
24
26
|
class PiperTTSService(TTSService):
|
|
25
27
|
"""Piper TTS service implementation.
|
|
26
28
|
|
|
27
|
-
Provides integration with Piper's TTS server
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
base_url: API base URL
|
|
31
|
-
aiohttp_session: aiohttp ClientSession
|
|
32
|
-
sample_rate: Output sample rate
|
|
29
|
+
Provides integration with Piper's HTTP TTS server for text-to-speech
|
|
30
|
+
synthesis. Supports streaming audio generation with configurable sample
|
|
31
|
+
rates and automatic WAV header removal.
|
|
33
32
|
"""
|
|
34
33
|
|
|
35
34
|
def __init__(
|
|
@@ -42,6 +41,14 @@ class PiperTTSService(TTSService):
|
|
|
42
41
|
sample_rate: Optional[int] = None,
|
|
43
42
|
**kwargs,
|
|
44
43
|
):
|
|
44
|
+
"""Initialize the Piper TTS service.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
base_url: Base URL for the Piper TTS HTTP server.
|
|
48
|
+
aiohttp_session: aiohttp ClientSession for making HTTP requests.
|
|
49
|
+
sample_rate: Output sample rate. If None, uses the voice model's native rate.
|
|
50
|
+
**kwargs: Additional arguments passed to the parent TTSService.
|
|
51
|
+
"""
|
|
45
52
|
super().__init__(sample_rate=sample_rate, **kwargs)
|
|
46
53
|
|
|
47
54
|
if base_url.endswith("/"):
|
|
@@ -53,26 +60,33 @@ class PiperTTSService(TTSService):
|
|
|
53
60
|
self._settings = {"base_url": base_url}
|
|
54
61
|
|
|
55
62
|
def can_generate_metrics(self) -> bool:
|
|
63
|
+
"""Check if this service can generate processing metrics.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
True, as Piper service supports metrics generation.
|
|
67
|
+
"""
|
|
56
68
|
return True
|
|
57
69
|
|
|
58
70
|
@traced_tts
|
|
59
71
|
async def run_tts(self, text: str) -> AsyncGenerator[Frame, None]:
|
|
60
|
-
"""Generate speech from text using Piper API.
|
|
72
|
+
"""Generate speech from text using Piper's HTTP API.
|
|
61
73
|
|
|
62
74
|
Args:
|
|
63
|
-
text: The text to convert to speech
|
|
75
|
+
text: The text to convert to speech.
|
|
64
76
|
|
|
65
77
|
Yields:
|
|
66
|
-
|
|
78
|
+
Frame: Audio frames containing the synthesized speech and status frames.
|
|
67
79
|
"""
|
|
68
80
|
logger.debug(f"{self}: Generating TTS [{text}]")
|
|
69
81
|
headers = {
|
|
70
|
-
"Content-Type": "
|
|
82
|
+
"Content-Type": "application/json",
|
|
71
83
|
}
|
|
72
84
|
try:
|
|
73
85
|
await self.start_ttfb_metrics()
|
|
74
86
|
|
|
75
|
-
async with self._session.post(
|
|
87
|
+
async with self._session.post(
|
|
88
|
+
self._base_url, json={"text": text}, headers=headers
|
|
89
|
+
) as response:
|
|
76
90
|
if response.status != 200:
|
|
77
91
|
error = await response.text()
|
|
78
92
|
logger.error(
|