dv-pipecat-ai 0.0.82.dev857__py3-none-any.whl → 0.0.85.dev837__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.82.dev857.dist-info → dv_pipecat_ai-0.0.85.dev837.dist-info}/METADATA +98 -130
- {dv_pipecat_ai-0.0.82.dev857.dist-info → dv_pipecat_ai-0.0.85.dev837.dist-info}/RECORD +192 -140
- pipecat/adapters/base_llm_adapter.py +38 -1
- pipecat/adapters/services/anthropic_adapter.py +9 -14
- pipecat/adapters/services/aws_nova_sonic_adapter.py +120 -5
- pipecat/adapters/services/bedrock_adapter.py +236 -13
- pipecat/adapters/services/gemini_adapter.py +12 -8
- pipecat/adapters/services/open_ai_adapter.py +19 -7
- pipecat/adapters/services/open_ai_realtime_adapter.py +5 -0
- pipecat/audio/dtmf/dtmf-0.wav +0 -0
- pipecat/audio/dtmf/dtmf-1.wav +0 -0
- pipecat/audio/dtmf/dtmf-2.wav +0 -0
- pipecat/audio/dtmf/dtmf-3.wav +0 -0
- pipecat/audio/dtmf/dtmf-4.wav +0 -0
- pipecat/audio/dtmf/dtmf-5.wav +0 -0
- pipecat/audio/dtmf/dtmf-6.wav +0 -0
- pipecat/audio/dtmf/dtmf-7.wav +0 -0
- pipecat/audio/dtmf/dtmf-8.wav +0 -0
- pipecat/audio/dtmf/dtmf-9.wav +0 -0
- pipecat/audio/dtmf/dtmf-pound.wav +0 -0
- pipecat/audio/dtmf/dtmf-star.wav +0 -0
- pipecat/audio/filters/krisp_viva_filter.py +193 -0
- pipecat/audio/filters/noisereduce_filter.py +15 -0
- pipecat/audio/turn/base_turn_analyzer.py +9 -1
- pipecat/audio/turn/smart_turn/base_smart_turn.py +14 -8
- pipecat/audio/turn/smart_turn/data/__init__.py +0 -0
- pipecat/audio/turn/smart_turn/data/smart-turn-v3.0.onnx +0 -0
- pipecat/audio/turn/smart_turn/http_smart_turn.py +6 -2
- pipecat/audio/turn/smart_turn/local_smart_turn.py +1 -1
- pipecat/audio/turn/smart_turn/local_smart_turn_v2.py +1 -1
- pipecat/audio/turn/smart_turn/local_smart_turn_v3.py +124 -0
- pipecat/audio/vad/data/README.md +10 -0
- pipecat/audio/vad/data/silero_vad_v2.onnx +0 -0
- pipecat/audio/vad/silero.py +9 -3
- pipecat/audio/vad/vad_analyzer.py +13 -1
- pipecat/extensions/voicemail/voicemail_detector.py +5 -5
- pipecat/frames/frames.py +277 -86
- pipecat/observers/loggers/debug_log_observer.py +3 -3
- pipecat/observers/loggers/llm_log_observer.py +7 -3
- pipecat/observers/loggers/user_bot_latency_log_observer.py +22 -10
- pipecat/pipeline/runner.py +18 -6
- pipecat/pipeline/service_switcher.py +64 -36
- pipecat/pipeline/task.py +125 -79
- pipecat/pipeline/tts_switcher.py +30 -0
- pipecat/processors/aggregators/dtmf_aggregator.py +2 -3
- pipecat/processors/aggregators/{gated_openai_llm_context.py → gated_llm_context.py} +9 -9
- pipecat/processors/aggregators/gated_open_ai_llm_context.py +12 -0
- pipecat/processors/aggregators/llm_context.py +40 -2
- pipecat/processors/aggregators/llm_response.py +32 -15
- pipecat/processors/aggregators/llm_response_universal.py +19 -15
- pipecat/processors/aggregators/user_response.py +6 -6
- pipecat/processors/aggregators/vision_image_frame.py +24 -2
- pipecat/processors/audio/audio_buffer_processor.py +43 -8
- pipecat/processors/dtmf_aggregator.py +174 -77
- pipecat/processors/filters/stt_mute_filter.py +17 -0
- pipecat/processors/frame_processor.py +110 -24
- pipecat/processors/frameworks/langchain.py +8 -2
- pipecat/processors/frameworks/rtvi.py +210 -68
- pipecat/processors/frameworks/strands_agents.py +170 -0
- pipecat/processors/logger.py +2 -2
- pipecat/processors/transcript_processor.py +26 -5
- pipecat/processors/user_idle_processor.py +35 -11
- pipecat/runner/daily.py +59 -20
- pipecat/runner/run.py +395 -93
- pipecat/runner/types.py +6 -4
- pipecat/runner/utils.py +51 -10
- pipecat/serializers/__init__.py +5 -1
- pipecat/serializers/asterisk.py +16 -2
- pipecat/serializers/convox.py +41 -4
- pipecat/serializers/custom.py +257 -0
- pipecat/serializers/exotel.py +5 -5
- pipecat/serializers/livekit.py +20 -0
- pipecat/serializers/plivo.py +5 -5
- pipecat/serializers/protobuf.py +6 -5
- pipecat/serializers/telnyx.py +2 -2
- pipecat/serializers/twilio.py +43 -23
- pipecat/serializers/vi.py +324 -0
- pipecat/services/ai_service.py +2 -6
- pipecat/services/anthropic/llm.py +2 -25
- pipecat/services/assemblyai/models.py +6 -0
- pipecat/services/assemblyai/stt.py +13 -5
- pipecat/services/asyncai/tts.py +5 -3
- pipecat/services/aws/__init__.py +1 -0
- pipecat/services/aws/llm.py +147 -105
- pipecat/services/aws/nova_sonic/__init__.py +0 -0
- pipecat/services/aws/nova_sonic/context.py +436 -0
- pipecat/services/aws/nova_sonic/frames.py +25 -0
- pipecat/services/aws/nova_sonic/llm.py +1265 -0
- pipecat/services/aws/stt.py +3 -3
- pipecat/services/aws_nova_sonic/__init__.py +19 -1
- pipecat/services/aws_nova_sonic/aws.py +11 -1151
- pipecat/services/aws_nova_sonic/context.py +8 -354
- pipecat/services/aws_nova_sonic/frames.py +13 -17
- pipecat/services/azure/llm.py +51 -1
- pipecat/services/azure/realtime/__init__.py +0 -0
- pipecat/services/azure/realtime/llm.py +65 -0
- pipecat/services/azure/stt.py +15 -0
- pipecat/services/cartesia/stt.py +77 -70
- pipecat/services/cartesia/tts.py +80 -13
- pipecat/services/deepgram/__init__.py +1 -0
- pipecat/services/deepgram/flux/__init__.py +0 -0
- pipecat/services/deepgram/flux/stt.py +640 -0
- pipecat/services/elevenlabs/__init__.py +4 -1
- pipecat/services/elevenlabs/stt.py +339 -0
- pipecat/services/elevenlabs/tts.py +87 -46
- pipecat/services/fish/tts.py +5 -2
- pipecat/services/gemini_multimodal_live/events.py +38 -524
- pipecat/services/gemini_multimodal_live/file_api.py +23 -173
- pipecat/services/gemini_multimodal_live/gemini.py +41 -1403
- pipecat/services/gladia/stt.py +56 -72
- pipecat/services/google/__init__.py +1 -0
- pipecat/services/google/gemini_live/__init__.py +3 -0
- pipecat/services/google/gemini_live/file_api.py +189 -0
- pipecat/services/google/gemini_live/llm.py +1582 -0
- pipecat/services/google/gemini_live/llm_vertex.py +184 -0
- pipecat/services/google/llm.py +15 -11
- pipecat/services/google/llm_openai.py +3 -3
- pipecat/services/google/llm_vertex.py +86 -16
- pipecat/services/google/stt.py +4 -0
- pipecat/services/google/tts.py +7 -3
- pipecat/services/heygen/api.py +2 -0
- pipecat/services/heygen/client.py +8 -4
- pipecat/services/heygen/video.py +2 -0
- pipecat/services/hume/__init__.py +5 -0
- pipecat/services/hume/tts.py +220 -0
- pipecat/services/inworld/tts.py +6 -6
- pipecat/services/llm_service.py +15 -5
- pipecat/services/lmnt/tts.py +4 -2
- pipecat/services/mcp_service.py +4 -2
- pipecat/services/mem0/memory.py +6 -5
- pipecat/services/mistral/llm.py +29 -8
- pipecat/services/moondream/vision.py +42 -16
- pipecat/services/neuphonic/tts.py +5 -2
- pipecat/services/openai/__init__.py +1 -0
- pipecat/services/openai/base_llm.py +27 -20
- pipecat/services/openai/realtime/__init__.py +0 -0
- pipecat/services/openai/realtime/context.py +272 -0
- pipecat/services/openai/realtime/events.py +1106 -0
- pipecat/services/openai/realtime/frames.py +37 -0
- pipecat/services/openai/realtime/llm.py +829 -0
- pipecat/services/openai/tts.py +49 -10
- pipecat/services/openai_realtime/__init__.py +27 -0
- pipecat/services/openai_realtime/azure.py +21 -0
- pipecat/services/openai_realtime/context.py +21 -0
- pipecat/services/openai_realtime/events.py +21 -0
- pipecat/services/openai_realtime/frames.py +21 -0
- pipecat/services/openai_realtime_beta/azure.py +16 -0
- pipecat/services/openai_realtime_beta/openai.py +17 -5
- pipecat/services/piper/tts.py +7 -9
- pipecat/services/playht/tts.py +34 -4
- pipecat/services/rime/tts.py +12 -12
- pipecat/services/riva/stt.py +3 -1
- pipecat/services/salesforce/__init__.py +9 -0
- pipecat/services/salesforce/llm.py +700 -0
- pipecat/services/sarvam/__init__.py +7 -0
- pipecat/services/sarvam/stt.py +540 -0
- pipecat/services/sarvam/tts.py +97 -13
- pipecat/services/simli/video.py +2 -2
- pipecat/services/speechmatics/stt.py +22 -10
- pipecat/services/stt_service.py +47 -0
- pipecat/services/tavus/video.py +2 -2
- pipecat/services/tts_service.py +75 -22
- pipecat/services/vision_service.py +7 -6
- pipecat/services/vistaar/llm.py +51 -9
- pipecat/tests/utils.py +4 -4
- pipecat/transcriptions/language.py +41 -1
- pipecat/transports/base_input.py +13 -34
- pipecat/transports/base_output.py +140 -104
- pipecat/transports/daily/transport.py +199 -26
- pipecat/transports/heygen/__init__.py +0 -0
- pipecat/transports/heygen/transport.py +381 -0
- pipecat/transports/livekit/transport.py +228 -63
- pipecat/transports/local/audio.py +6 -1
- pipecat/transports/local/tk.py +11 -2
- pipecat/transports/network/fastapi_websocket.py +1 -1
- pipecat/transports/smallwebrtc/connection.py +103 -19
- pipecat/transports/smallwebrtc/request_handler.py +246 -0
- pipecat/transports/smallwebrtc/transport.py +65 -23
- pipecat/transports/tavus/transport.py +23 -12
- pipecat/transports/websocket/client.py +41 -5
- pipecat/transports/websocket/fastapi.py +21 -11
- pipecat/transports/websocket/server.py +14 -7
- pipecat/transports/whatsapp/api.py +8 -0
- pipecat/transports/whatsapp/client.py +47 -0
- pipecat/utils/base_object.py +54 -22
- pipecat/utils/redis.py +58 -0
- pipecat/utils/string.py +13 -1
- pipecat/utils/tracing/service_decorators.py +21 -21
- pipecat/serializers/genesys.py +0 -95
- pipecat/services/google/test-google-chirp.py +0 -45
- pipecat/services/openai.py +0 -698
- {dv_pipecat_ai-0.0.82.dev857.dist-info → dv_pipecat_ai-0.0.85.dev837.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.82.dev857.dist-info → dv_pipecat_ai-0.0.85.dev837.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.82.dev857.dist-info → dv_pipecat_ai-0.0.85.dev837.dist-info}/top_level.txt +0 -0
- /pipecat/services/{aws_nova_sonic → aws/nova_sonic}/ready.wav +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dv-pipecat-ai
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.85.dev837
|
|
4
4
|
Summary: An open source framework for voice (and multimodal) assistants
|
|
5
5
|
License-Expression: BSD-2-Clause
|
|
6
6
|
Project-URL: Source, https://github.com/pipecat-ai/pipecat
|
|
@@ -29,7 +29,7 @@ Requires-Dist: pydantic<3,>=2.10.6
|
|
|
29
29
|
Requires-Dist: pyloudnorm~=0.1.1
|
|
30
30
|
Requires-Dist: resampy~=0.4.3
|
|
31
31
|
Requires-Dist: soxr~=0.5.0
|
|
32
|
-
Requires-Dist: openai
|
|
32
|
+
Requires-Dist: openai<3,>=1.74.0
|
|
33
33
|
Requires-Dist: numba==0.61.2
|
|
34
34
|
Requires-Dist: wait_for2>=0.4.1; python_version < "3.12"
|
|
35
35
|
Provides-Extra: aic
|
|
@@ -37,48 +37,50 @@ Requires-Dist: aic-sdk~=1.0.1; extra == "aic"
|
|
|
37
37
|
Provides-Extra: anthropic
|
|
38
38
|
Requires-Dist: anthropic~=0.49.0; extra == "anthropic"
|
|
39
39
|
Provides-Extra: assemblyai
|
|
40
|
-
Requires-Dist: websockets
|
|
40
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "assemblyai"
|
|
41
41
|
Provides-Extra: asyncai
|
|
42
|
-
Requires-Dist: websockets
|
|
42
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "asyncai"
|
|
43
43
|
Provides-Extra: aws
|
|
44
44
|
Requires-Dist: aioboto3~=15.0.0; extra == "aws"
|
|
45
|
-
Requires-Dist: websockets
|
|
45
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "aws"
|
|
46
46
|
Provides-Extra: aws-nova-sonic
|
|
47
|
-
Requires-Dist: aws_sdk_bedrock_runtime~=0.0
|
|
47
|
+
Requires-Dist: aws_sdk_bedrock_runtime~=0.1.0; python_version >= "3.12" and extra == "aws-nova-sonic"
|
|
48
48
|
Provides-Extra: azure
|
|
49
49
|
Requires-Dist: azure-cognitiveservices-speech~=1.42.0; extra == "azure"
|
|
50
50
|
Provides-Extra: cartesia
|
|
51
51
|
Requires-Dist: cartesia~=2.0.3; extra == "cartesia"
|
|
52
|
-
Requires-Dist: websockets
|
|
52
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "cartesia"
|
|
53
53
|
Provides-Extra: cerebras
|
|
54
54
|
Provides-Extra: deepseek
|
|
55
55
|
Provides-Extra: daily
|
|
56
|
-
Requires-Dist: daily-python~=0.
|
|
56
|
+
Requires-Dist: daily-python~=0.20.0; extra == "daily"
|
|
57
57
|
Provides-Extra: deepgram
|
|
58
58
|
Requires-Dist: deepgram-sdk~=4.7.0; extra == "deepgram"
|
|
59
59
|
Provides-Extra: elevenlabs
|
|
60
|
-
Requires-Dist: websockets
|
|
60
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "elevenlabs"
|
|
61
61
|
Provides-Extra: fal
|
|
62
62
|
Requires-Dist: fal-client~=0.5.9; extra == "fal"
|
|
63
63
|
Provides-Extra: fireworks
|
|
64
64
|
Provides-Extra: fish
|
|
65
65
|
Requires-Dist: ormsgpack~=1.7.0; extra == "fish"
|
|
66
|
-
Requires-Dist: websockets
|
|
66
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "fish"
|
|
67
67
|
Provides-Extra: gladia
|
|
68
|
-
Requires-Dist: websockets
|
|
68
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "gladia"
|
|
69
69
|
Provides-Extra: google
|
|
70
|
-
Requires-Dist: google-cloud-speech
|
|
71
|
-
Requires-Dist: google-cloud-texttospeech
|
|
72
|
-
Requires-Dist: google-genai
|
|
73
|
-
Requires-Dist: websockets
|
|
70
|
+
Requires-Dist: google-cloud-speech<3,>=2.33.0; extra == "google"
|
|
71
|
+
Requires-Dist: google-cloud-texttospeech<3,>=2.31.0; extra == "google"
|
|
72
|
+
Requires-Dist: google-genai<2,>=1.41.0; extra == "google"
|
|
73
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "google"
|
|
74
74
|
Provides-Extra: grok
|
|
75
75
|
Provides-Extra: groq
|
|
76
76
|
Requires-Dist: groq~=0.23.0; extra == "groq"
|
|
77
77
|
Provides-Extra: gstreamer
|
|
78
78
|
Requires-Dist: pygobject~=3.50.0; extra == "gstreamer"
|
|
79
79
|
Provides-Extra: heygen
|
|
80
|
-
Requires-Dist: livekit>=0.
|
|
81
|
-
Requires-Dist: websockets
|
|
80
|
+
Requires-Dist: livekit>=1.0.13; extra == "heygen"
|
|
81
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "heygen"
|
|
82
|
+
Provides-Extra: hume
|
|
83
|
+
Requires-Dist: hume>=0.11.2; extra == "hume"
|
|
82
84
|
Provides-Extra: inworld
|
|
83
85
|
Provides-Extra: krisp
|
|
84
86
|
Requires-Dist: pipecat-ai-krisp~=0.4.0; extra == "krisp"
|
|
@@ -89,15 +91,15 @@ Requires-Dist: langchain~=0.3.20; extra == "langchain"
|
|
|
89
91
|
Requires-Dist: langchain-community~=0.3.20; extra == "langchain"
|
|
90
92
|
Requires-Dist: langchain-openai~=0.3.9; extra == "langchain"
|
|
91
93
|
Provides-Extra: livekit
|
|
92
|
-
Requires-Dist: livekit~=0.
|
|
93
|
-
Requires-Dist: livekit-api~=0.
|
|
94
|
+
Requires-Dist: livekit~=1.0.13; extra == "livekit"
|
|
95
|
+
Requires-Dist: livekit-api~=1.0.5; extra == "livekit"
|
|
94
96
|
Requires-Dist: tenacity<10.0.0,>=8.2.3; extra == "livekit"
|
|
95
97
|
Provides-Extra: lmnt
|
|
96
|
-
Requires-Dist: websockets
|
|
98
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "lmnt"
|
|
97
99
|
Provides-Extra: local
|
|
98
100
|
Requires-Dist: pyaudio~=0.2.14; extra == "local"
|
|
99
101
|
Provides-Extra: mcp
|
|
100
|
-
Requires-Dist: mcp[cli]
|
|
102
|
+
Requires-Dist: mcp[cli]<2,>=1.11.0; extra == "mcp"
|
|
101
103
|
Provides-Extra: mem0
|
|
102
104
|
Requires-Dist: mem0ai~=0.1.94; extra == "mem0"
|
|
103
105
|
Provides-Extra: mistral
|
|
@@ -111,20 +113,20 @@ Requires-Dist: timm~=1.0.13; extra == "moondream"
|
|
|
111
113
|
Requires-Dist: transformers>=4.48.0; extra == "moondream"
|
|
112
114
|
Provides-Extra: nim
|
|
113
115
|
Provides-Extra: neuphonic
|
|
114
|
-
Requires-Dist: websockets
|
|
116
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "neuphonic"
|
|
115
117
|
Provides-Extra: noisereduce
|
|
116
118
|
Requires-Dist: noisereduce~=3.0.3; extra == "noisereduce"
|
|
117
119
|
Provides-Extra: openai
|
|
118
|
-
Requires-Dist: websockets
|
|
120
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "openai"
|
|
119
121
|
Provides-Extra: openpipe
|
|
120
|
-
Requires-Dist: openpipe
|
|
122
|
+
Requires-Dist: openpipe<6,>=4.50.0; extra == "openpipe"
|
|
121
123
|
Provides-Extra: openrouter
|
|
122
124
|
Provides-Extra: perplexity
|
|
123
125
|
Provides-Extra: playht
|
|
124
|
-
Requires-Dist: websockets
|
|
126
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "playht"
|
|
125
127
|
Provides-Extra: qwen
|
|
126
128
|
Provides-Extra: rime
|
|
127
|
-
Requires-Dist: websockets
|
|
129
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "rime"
|
|
128
130
|
Provides-Extra: riva
|
|
129
131
|
Requires-Dist: nvidia-riva-client~=2.21.1; extra == "riva"
|
|
130
132
|
Provides-Extra: runner
|
|
@@ -134,25 +136,31 @@ Requires-Dist: fastapi<0.117.0,>=0.115.6; extra == "runner"
|
|
|
134
136
|
Requires-Dist: pipecat-ai-small-webrtc-prebuilt>=1.0.0; extra == "runner"
|
|
135
137
|
Provides-Extra: sambanova
|
|
136
138
|
Provides-Extra: sarvam
|
|
139
|
+
Requires-Dist: sarvamai<1,>=0.1.19; extra == "sarvam"
|
|
137
140
|
Requires-Dist: websockets<15.0,>=13.1; extra == "sarvam"
|
|
138
141
|
Provides-Extra: sentry
|
|
139
|
-
Requires-Dist: sentry-sdk
|
|
142
|
+
Requires-Dist: sentry-sdk<3,>=2.28.0; extra == "sentry"
|
|
140
143
|
Provides-Extra: local-smart-turn
|
|
141
144
|
Requires-Dist: coremltools>=8.0; extra == "local-smart-turn"
|
|
142
145
|
Requires-Dist: transformers; extra == "local-smart-turn"
|
|
143
146
|
Requires-Dist: torch<3,>=2.5.0; extra == "local-smart-turn"
|
|
144
147
|
Requires-Dist: torchaudio<3,>=2.5.0; extra == "local-smart-turn"
|
|
148
|
+
Provides-Extra: local-smart-turn-v3
|
|
149
|
+
Requires-Dist: transformers; extra == "local-smart-turn-v3"
|
|
150
|
+
Requires-Dist: onnxruntime<2,>=1.20.1; extra == "local-smart-turn-v3"
|
|
145
151
|
Provides-Extra: remote-smart-turn
|
|
146
152
|
Provides-Extra: silero
|
|
147
|
-
Requires-Dist: onnxruntime
|
|
153
|
+
Requires-Dist: onnxruntime<2,>=1.20.1; extra == "silero"
|
|
148
154
|
Provides-Extra: simli
|
|
149
155
|
Requires-Dist: simli-ai~=0.1.10; extra == "simli"
|
|
150
156
|
Provides-Extra: soniox
|
|
151
|
-
Requires-Dist: websockets
|
|
157
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "soniox"
|
|
152
158
|
Provides-Extra: soundfile
|
|
153
159
|
Requires-Dist: soundfile~=0.13.0; extra == "soundfile"
|
|
154
160
|
Provides-Extra: speechmatics
|
|
155
|
-
Requires-Dist: speechmatics-rt>=0.
|
|
161
|
+
Requires-Dist: speechmatics-rt>=0.5.0; extra == "speechmatics"
|
|
162
|
+
Provides-Extra: strands
|
|
163
|
+
Requires-Dist: strands-agents<2,>=1.9.1; extra == "strands"
|
|
156
164
|
Provides-Extra: tavus
|
|
157
165
|
Provides-Extra: together
|
|
158
166
|
Provides-Extra: tracing
|
|
@@ -163,11 +171,13 @@ Provides-Extra: ultravox
|
|
|
163
171
|
Requires-Dist: transformers>=4.48.0; extra == "ultravox"
|
|
164
172
|
Requires-Dist: vllm>=0.9.0; extra == "ultravox"
|
|
165
173
|
Provides-Extra: webrtc
|
|
166
|
-
Requires-Dist: aiortc
|
|
167
|
-
Requires-Dist: opencv-python
|
|
174
|
+
Requires-Dist: aiortc<2,>=1.13.0; extra == "webrtc"
|
|
175
|
+
Requires-Dist: opencv-python<5,>=4.11.0.86; extra == "webrtc"
|
|
168
176
|
Provides-Extra: websocket
|
|
169
|
-
Requires-Dist: websockets
|
|
177
|
+
Requires-Dist: pipecat-ai[websockets-base]; extra == "websocket"
|
|
170
178
|
Requires-Dist: fastapi<0.117.0,>=0.115.6; extra == "websocket"
|
|
179
|
+
Provides-Extra: websockets-base
|
|
180
|
+
Requires-Dist: websockets<16.0,>=13.1; extra == "websockets-base"
|
|
171
181
|
Provides-Extra: whisper
|
|
172
182
|
Requires-Dist: faster-whisper~=1.1.1; extra == "whisper"
|
|
173
183
|
Dynamic: license-file
|
|
@@ -176,11 +186,12 @@ Dynamic: license-file
|
|
|
176
186
|
<img alt="pipecat" width="300px" height="auto" src="https://raw.githubusercontent.com/pipecat-ai/pipecat/main/pipecat.png">
|
|
177
187
|
</div></h1>
|
|
178
188
|
|
|
179
|
-
[](https://pypi.org/project/pipecat-ai)  [](https://codecov.io/gh/pipecat-ai/pipecat) [](https://docs.pipecat.ai) [](https://discord.gg/pipecat)
|
|
189
|
+
[](https://pypi.org/project/pipecat-ai)  [](https://codecov.io/gh/pipecat-ai/pipecat) [](https://docs.pipecat.ai) [](https://discord.gg/pipecat) [](https://deepwiki.com/pipecat-ai/pipecat)
|
|
190
|
+
[](https://getmanta.ai/pipecat)
|
|
180
191
|
|
|
181
192
|
# 🎙️ Pipecat: Real-Time Voice & Multimodal AI Agents
|
|
182
193
|
|
|
183
|
-
**Pipecat** is an open-source Python framework for building real-time voice and multimodal conversational agents. Orchestrate audio and video, AI services, different transports, and conversation pipelines effortlessly—so you can focus on what makes your agent unique
|
|
194
|
+
**Pipecat** is an open-source Python framework for building real-time voice and multimodal conversational agents. Orchestrate audio and video, AI services, different transports, and conversation pipelines effortlessly—so you can focus on what makes your agent unique.
|
|
184
195
|
|
|
185
196
|
> Want to dive right in? Try the [quickstart](https://docs.pipecat.ai/getting-started/quickstart).
|
|
186
197
|
|
|
@@ -193,8 +204,6 @@ Dynamic: license-file
|
|
|
193
204
|
- **Business Agents** – customer intake, support bots, guided flows
|
|
194
205
|
- **Complex Dialog Systems** – design logic with structured conversations
|
|
195
206
|
|
|
196
|
-
🧭 Looking to build structured conversations? Check out [Pipecat Flows](https://github.com/pipecat-ai/pipecat-flows) for managing complex conversational states and transitions.
|
|
197
|
-
|
|
198
207
|
## 🧠 Why Pipecat?
|
|
199
208
|
|
|
200
209
|
- **Voice-first**: Integrates speech recognition, text-to-speech, and conversation handling
|
|
@@ -202,6 +211,39 @@ Dynamic: license-file
|
|
|
202
211
|
- **Composable Pipelines**: Build complex behavior from modular components
|
|
203
212
|
- **Real-Time**: Ultra-low latency interaction with different transports (e.g. WebSockets or WebRTC)
|
|
204
213
|
|
|
214
|
+
## 🌐 Pipecat Ecosystem
|
|
215
|
+
|
|
216
|
+
### 📱 Client SDKs
|
|
217
|
+
|
|
218
|
+
Building client applications? You can connect to Pipecat from any platform using our official SDKs:
|
|
219
|
+
|
|
220
|
+
<a href="https://docs.pipecat.ai/client/js/introduction">JavaScript</a> | <a href="https://docs.pipecat.ai/client/react/introduction">React</a> | <a href="https://docs.pipecat.ai/client/react-native/introduction">React Native</a> |
|
|
221
|
+
<a href="https://docs.pipecat.ai/client/ios/introduction">Swift</a> | <a href="https://docs.pipecat.ai/client/android/introduction">Kotlin</a> | <a href="https://docs.pipecat.ai/client/c++/introduction">C++</a> | <a href="https://github.com/pipecat-ai/pipecat-esp32">ESP32</a>
|
|
222
|
+
|
|
223
|
+
### 🧭 Structured conversations
|
|
224
|
+
|
|
225
|
+
Looking to build structured conversations? Check out [Pipecat Flows](https://github.com/pipecat-ai/pipecat-flows) for managing complex conversational states and transitions.
|
|
226
|
+
|
|
227
|
+
### 🪄 Beautiful UIs
|
|
228
|
+
|
|
229
|
+
Want to build beautiful and engaging experiences? Checkout the [Voice UI Kit](https://github.com/pipecat-ai/voice-ui-kit), a collection of components, hooks and templates for building voice AI applications quickly.
|
|
230
|
+
|
|
231
|
+
### 🛠️ Create and deploy projects
|
|
232
|
+
|
|
233
|
+
Create a new project in under a minute with the [Pipecat CLI](https://github.com/pipecat-ai/pipecat-cli). Then use the CLI to monitor and deploy your agent to production.
|
|
234
|
+
|
|
235
|
+
### 🔍 Debugging
|
|
236
|
+
|
|
237
|
+
Looking for help debugging your pipeline and processors? Check out [Whisker](https://github.com/pipecat-ai/whisker), a real-time Pipecat debugger.
|
|
238
|
+
|
|
239
|
+
### 🖥️ Terminal
|
|
240
|
+
|
|
241
|
+
Love terminal applications? Check out [Tail](https://github.com/pipecat-ai/tail), a terminal dashboard for Pipecat.
|
|
242
|
+
|
|
243
|
+
### 📺️ Pipecat TV Channel
|
|
244
|
+
|
|
245
|
+
Catch new features, interviews, and how-tos on our [Pipecat TV](https://www.youtube.com/playlist?list=PLzU2zoMTQIHjqC3v4q2XVSR3hGSzwKFwH) channel.
|
|
246
|
+
|
|
205
247
|
## 🎬 See it in action
|
|
206
248
|
|
|
207
249
|
<p float="left">
|
|
@@ -209,35 +251,24 @@ Dynamic: license-file
|
|
|
209
251
|
<a href="https://github.com/pipecat-ai/pipecat-examples/tree/main/storytelling-chatbot"><img src="https://raw.githubusercontent.com/pipecat-ai/pipecat-examples/main/storytelling-chatbot/image.png" width="400" /></a>
|
|
210
252
|
<br/>
|
|
211
253
|
<a href="https://github.com/pipecat-ai/pipecat-examples/tree/main/translation-chatbot"><img src="https://raw.githubusercontent.com/pipecat-ai/pipecat-examples/main/translation-chatbot/image.png" width="400" /></a>
|
|
212
|
-
<a href="https://github.com/pipecat-ai/pipecat
|
|
254
|
+
<a href="https://github.com/pipecat-ai/pipecat/blob/main/examples/foundational/12-describe-video.py"><img src="https://github.com/pipecat-ai/pipecat/blob/main/examples/foundational/assets/moondream.png" width="400" /></a>
|
|
213
255
|
</p>
|
|
214
256
|
|
|
215
|
-
## 📱 Client SDKs
|
|
216
|
-
|
|
217
|
-
You can connect to Pipecat from any platform using our official SDKs:
|
|
218
|
-
|
|
219
|
-
| Platform | SDK Repo | Description |
|
|
220
|
-
| -------- | ------------------------------------------------------------------------------ | -------------------------------- |
|
|
221
|
-
| Web | [pipecat-client-web](https://github.com/pipecat-ai/pipecat-client-web) | JavaScript and React client SDKs |
|
|
222
|
-
| iOS | [pipecat-client-ios](https://github.com/pipecat-ai/pipecat-client-ios) | Swift SDK for iOS |
|
|
223
|
-
| Android | [pipecat-client-android](https://github.com/pipecat-ai/pipecat-client-android) | Kotlin SDK for Android |
|
|
224
|
-
| C++ | [pipecat-client-cxx](https://github.com/pipecat-ai/pipecat-client-cxx) | C++ client SDK |
|
|
225
|
-
|
|
226
257
|
## 🧩 Available services
|
|
227
258
|
|
|
228
|
-
| Category | Services
|
|
229
|
-
| ------------------- |
|
|
230
|
-
| Speech-to-Text | [AssemblyAI](https://docs.pipecat.ai/server/services/stt/assemblyai), [AWS](https://docs.pipecat.ai/server/services/stt/aws), [Azure](https://docs.pipecat.ai/server/services/stt/azure), [Cartesia](https://docs.pipecat.ai/server/services/stt/cartesia), [Deepgram](https://docs.pipecat.ai/server/services/stt/deepgram), [Fal Wizper](https://docs.pipecat.ai/server/services/stt/fal), [Gladia](https://docs.pipecat.ai/server/services/stt/gladia), [Google](https://docs.pipecat.ai/server/services/stt/google), [Groq (Whisper)](https://docs.pipecat.ai/server/services/stt/groq), [NVIDIA Riva](https://docs.pipecat.ai/server/services/stt/riva), [OpenAI (Whisper)](https://docs.pipecat.ai/server/services/stt/openai), [SambaNova (Whisper)](https://docs.pipecat.ai/server/services/stt/sambanova), [Soniox](https://docs.pipecat.ai/server/services/stt/soniox), [Speechmatics](https://docs.pipecat.ai/server/services/stt/speechmatics), [Ultravox](https://docs.pipecat.ai/server/services/stt/ultravox), [Whisper](https://docs.pipecat.ai/server/services/stt/whisper)
|
|
231
|
-
| LLMs | [Anthropic](https://docs.pipecat.ai/server/services/llm/anthropic), [AWS](https://docs.pipecat.ai/server/services/llm/aws), [Azure](https://docs.pipecat.ai/server/services/llm/azure), [Cerebras](https://docs.pipecat.ai/server/services/llm/cerebras), [DeepSeek](https://docs.pipecat.ai/server/services/llm/deepseek), [Fireworks AI](https://docs.pipecat.ai/server/services/llm/fireworks), [Gemini](https://docs.pipecat.ai/server/services/llm/gemini), [Grok](https://docs.pipecat.ai/server/services/llm/grok), [Groq](https://docs.pipecat.ai/server/services/llm/groq), [Mistral](https://docs.pipecat.ai/server/services/llm/mistral), [NVIDIA NIM](https://docs.pipecat.ai/server/services/llm/nim), [Ollama](https://docs.pipecat.ai/server/services/llm/ollama), [OpenAI](https://docs.pipecat.ai/server/services/llm/openai), [OpenRouter](https://docs.pipecat.ai/server/services/llm/openrouter), [Perplexity](https://docs.pipecat.ai/server/services/llm/perplexity), [Qwen](https://docs.pipecat.ai/server/services/llm/qwen), [SambaNova](https://docs.pipecat.ai/server/services/llm/sambanova) [Together AI](https://docs.pipecat.ai/server/services/llm/together)
|
|
232
|
-
| Text-to-Speech | [Async](https://docs.pipecat.ai/server/services/tts/asyncai), [AWS](https://docs.pipecat.ai/server/services/tts/aws), [Azure](https://docs.pipecat.ai/server/services/tts/azure), [Cartesia](https://docs.pipecat.ai/server/services/tts/cartesia), [Deepgram](https://docs.pipecat.ai/server/services/tts/deepgram), [ElevenLabs](https://docs.pipecat.ai/server/services/tts/elevenlabs), [Fish](https://docs.pipecat.ai/server/services/tts/fish), [Google](https://docs.pipecat.ai/server/services/tts/google), [Groq](https://docs.pipecat.ai/server/services/tts/groq), [Inworld](https://docs.pipecat.ai/server/services/tts/inworld), [LMNT](https://docs.pipecat.ai/server/services/tts/lmnt), [MiniMax](https://docs.pipecat.ai/server/services/tts/minimax), [Neuphonic](https://docs.pipecat.ai/server/services/tts/neuphonic), [NVIDIA Riva](https://docs.pipecat.ai/server/services/tts/riva), [OpenAI](https://docs.pipecat.ai/server/services/tts/openai), [Piper](https://docs.pipecat.ai/server/services/tts/piper), [PlayHT](https://docs.pipecat.ai/server/services/tts/playht), [Rime](https://docs.pipecat.ai/server/services/tts/rime), [Sarvam](https://docs.pipecat.ai/server/services/tts/sarvam), [XTTS](https://docs.pipecat.ai/server/services/tts/xtts) |
|
|
233
|
-
| Speech-to-Speech | [AWS Nova Sonic](https://docs.pipecat.ai/server/services/s2s/aws), [Gemini Multimodal Live](https://docs.pipecat.ai/server/services/s2s/gemini), [OpenAI Realtime](https://docs.pipecat.ai/server/services/s2s/openai)
|
|
234
|
-
| Transport | [Daily (WebRTC)](https://docs.pipecat.ai/server/services/transport/daily), [FastAPI Websocket](https://docs.pipecat.ai/server/services/transport/fastapi-websocket), [SmallWebRTCTransport](https://docs.pipecat.ai/server/services/transport/small-webrtc), [WebSocket Server](https://docs.pipecat.ai/server/services/transport/websocket-server), Local
|
|
235
|
-
| Serializers | [Plivo](https://docs.pipecat.ai/server/utilities/serializers/plivo), [Twilio](https://docs.pipecat.ai/server/utilities/serializers/twilio), [Telnyx](https://docs.pipecat.ai/server/utilities/serializers/telnyx)
|
|
236
|
-
| Video | [HeyGen](https://docs.pipecat.ai/server/services/video/heygen), [Tavus](https://docs.pipecat.ai/server/services/video/tavus), [Simli](https://docs.pipecat.ai/server/services/video/simli)
|
|
237
|
-
| Memory | [mem0](https://docs.pipecat.ai/server/services/memory/mem0)
|
|
238
|
-
| Vision & Image | [fal](https://docs.pipecat.ai/server/services/image-generation/fal), [Google Imagen](https://docs.pipecat.ai/server/services/image-generation/fal), [Moondream](https://docs.pipecat.ai/server/services/vision/moondream)
|
|
239
|
-
| Audio Processing | [Silero VAD](https://docs.pipecat.ai/server/utilities/audio/silero-vad-analyzer), [Krisp](https://docs.pipecat.ai/server/utilities/audio/krisp-filter), [Koala](https://docs.pipecat.ai/server/utilities/audio/koala-filter), [ai-coustics](https://docs.pipecat.ai/server/utilities/audio/aic-filter)
|
|
240
|
-
| Analytics & Metrics | [OpenTelemetry](https://docs.pipecat.ai/server/utilities/opentelemetry), [Sentry](https://docs.pipecat.ai/server/services/analytics/sentry)
|
|
259
|
+
| Category | Services |
|
|
260
|
+
| ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
261
|
+
| Speech-to-Text | [AssemblyAI](https://docs.pipecat.ai/server/services/stt/assemblyai), [AWS](https://docs.pipecat.ai/server/services/stt/aws), [Azure](https://docs.pipecat.ai/server/services/stt/azure), [Cartesia](https://docs.pipecat.ai/server/services/stt/cartesia), [Deepgram](https://docs.pipecat.ai/server/services/stt/deepgram), [ElevenLabs](https://docs.pipecat.ai/server/services/stt/elevenlabs), [Fal Wizper](https://docs.pipecat.ai/server/services/stt/fal), [Gladia](https://docs.pipecat.ai/server/services/stt/gladia), [Google](https://docs.pipecat.ai/server/services/stt/google), [Groq (Whisper)](https://docs.pipecat.ai/server/services/stt/groq), [NVIDIA Riva](https://docs.pipecat.ai/server/services/stt/riva), [OpenAI (Whisper)](https://docs.pipecat.ai/server/services/stt/openai), [SambaNova (Whisper)](https://docs.pipecat.ai/server/services/stt/sambanova), [Soniox](https://docs.pipecat.ai/server/services/stt/soniox), [Speechmatics](https://docs.pipecat.ai/server/services/stt/speechmatics), [Ultravox](https://docs.pipecat.ai/server/services/stt/ultravox), [Whisper](https://docs.pipecat.ai/server/services/stt/whisper) |
|
|
262
|
+
| LLMs | [Anthropic](https://docs.pipecat.ai/server/services/llm/anthropic), [AWS](https://docs.pipecat.ai/server/services/llm/aws), [Azure](https://docs.pipecat.ai/server/services/llm/azure), [Cerebras](https://docs.pipecat.ai/server/services/llm/cerebras), [DeepSeek](https://docs.pipecat.ai/server/services/llm/deepseek), [Fireworks AI](https://docs.pipecat.ai/server/services/llm/fireworks), [Gemini](https://docs.pipecat.ai/server/services/llm/gemini), [Grok](https://docs.pipecat.ai/server/services/llm/grok), [Groq](https://docs.pipecat.ai/server/services/llm/groq), [Mistral](https://docs.pipecat.ai/server/services/llm/mistral), [NVIDIA NIM](https://docs.pipecat.ai/server/services/llm/nim), [Ollama](https://docs.pipecat.ai/server/services/llm/ollama), [OpenAI](https://docs.pipecat.ai/server/services/llm/openai), [OpenRouter](https://docs.pipecat.ai/server/services/llm/openrouter), [Perplexity](https://docs.pipecat.ai/server/services/llm/perplexity), [Qwen](https://docs.pipecat.ai/server/services/llm/qwen), [SambaNova](https://docs.pipecat.ai/server/services/llm/sambanova) [Together AI](https://docs.pipecat.ai/server/services/llm/together) |
|
|
263
|
+
| Text-to-Speech | [Async](https://docs.pipecat.ai/server/services/tts/asyncai), [AWS](https://docs.pipecat.ai/server/services/tts/aws), [Azure](https://docs.pipecat.ai/server/services/tts/azure), [Cartesia](https://docs.pipecat.ai/server/services/tts/cartesia), [Deepgram](https://docs.pipecat.ai/server/services/tts/deepgram), [ElevenLabs](https://docs.pipecat.ai/server/services/tts/elevenlabs), [Fish](https://docs.pipecat.ai/server/services/tts/fish), [Google](https://docs.pipecat.ai/server/services/tts/google), [Groq](https://docs.pipecat.ai/server/services/tts/groq), [Hume](https://docs.pipecat.ai/server/services/tts/hume), [Inworld](https://docs.pipecat.ai/server/services/tts/inworld), [LMNT](https://docs.pipecat.ai/server/services/tts/lmnt), [MiniMax](https://docs.pipecat.ai/server/services/tts/minimax), [Neuphonic](https://docs.pipecat.ai/server/services/tts/neuphonic), [NVIDIA Riva](https://docs.pipecat.ai/server/services/tts/riva), [OpenAI](https://docs.pipecat.ai/server/services/tts/openai), [Piper](https://docs.pipecat.ai/server/services/tts/piper), [PlayHT](https://docs.pipecat.ai/server/services/tts/playht), [Rime](https://docs.pipecat.ai/server/services/tts/rime), [Sarvam](https://docs.pipecat.ai/server/services/tts/sarvam), [XTTS](https://docs.pipecat.ai/server/services/tts/xtts) |
|
|
264
|
+
| Speech-to-Speech | [AWS Nova Sonic](https://docs.pipecat.ai/server/services/s2s/aws), [Gemini Multimodal Live](https://docs.pipecat.ai/server/services/s2s/gemini), [OpenAI Realtime](https://docs.pipecat.ai/server/services/s2s/openai) |
|
|
265
|
+
| Transport | [Daily (WebRTC)](https://docs.pipecat.ai/server/services/transport/daily), [FastAPI Websocket](https://docs.pipecat.ai/server/services/transport/fastapi-websocket), [SmallWebRTCTransport](https://docs.pipecat.ai/server/services/transport/small-webrtc), [WebSocket Server](https://docs.pipecat.ai/server/services/transport/websocket-server), Local |
|
|
266
|
+
| Serializers | [Plivo](https://docs.pipecat.ai/server/utilities/serializers/plivo), [Twilio](https://docs.pipecat.ai/server/utilities/serializers/twilio), [Telnyx](https://docs.pipecat.ai/server/utilities/serializers/telnyx) |
|
|
267
|
+
| Video | [HeyGen](https://docs.pipecat.ai/server/services/video/heygen), [Tavus](https://docs.pipecat.ai/server/services/video/tavus), [Simli](https://docs.pipecat.ai/server/services/video/simli) |
|
|
268
|
+
| Memory | [mem0](https://docs.pipecat.ai/server/services/memory/mem0) |
|
|
269
|
+
| Vision & Image | [fal](https://docs.pipecat.ai/server/services/image-generation/fal), [Google Imagen](https://docs.pipecat.ai/server/services/image-generation/fal), [Moondream](https://docs.pipecat.ai/server/services/vision/moondream) |
|
|
270
|
+
| Audio Processing | [Silero VAD](https://docs.pipecat.ai/server/utilities/audio/silero-vad-analyzer), [Krisp](https://docs.pipecat.ai/server/utilities/audio/krisp-filter), [Koala](https://docs.pipecat.ai/server/utilities/audio/koala-filter), [ai-coustics](https://docs.pipecat.ai/server/utilities/audio/aic-filter) |
|
|
271
|
+
| Analytics & Metrics | [OpenTelemetry](https://docs.pipecat.ai/server/utilities/opentelemetry), [Sentry](https://docs.pipecat.ai/server/services/analytics/sentry) |
|
|
241
272
|
|
|
242
273
|
📚 [View full services documentation →](https://docs.pipecat.ai/server/services/supported-services)
|
|
243
274
|
|
|
@@ -303,7 +334,11 @@ You can get started with Pipecat running on your local machine, then move your a
|
|
|
303
334
|
2. Install development and testing dependencies:
|
|
304
335
|
|
|
305
336
|
```bash
|
|
306
|
-
uv sync --group dev --all-extras
|
|
337
|
+
uv sync --group dev --all-extras \
|
|
338
|
+
--no-extra gstreamer \
|
|
339
|
+
--no-extra krisp \
|
|
340
|
+
--no-extra local \
|
|
341
|
+
--no-extra ultravox # (ultravox not fully supported on macOS)
|
|
307
342
|
```
|
|
308
343
|
|
|
309
344
|
3. Install the git pre-commit hooks:
|
|
@@ -312,23 +347,6 @@ You can get started with Pipecat running on your local machine, then move your a
|
|
|
312
347
|
uv run pre-commit install
|
|
313
348
|
```
|
|
314
349
|
|
|
315
|
-
### Python 3.13+ Compatibility
|
|
316
|
-
|
|
317
|
-
Some features require PyTorch, which doesn't yet support Python 3.13+. Install using:
|
|
318
|
-
|
|
319
|
-
```bash
|
|
320
|
-
uv sync --group dev --all-extras \
|
|
321
|
-
--no-extra gstreamer \
|
|
322
|
-
--no-extra krisp \
|
|
323
|
-
--no-extra local \
|
|
324
|
-
--no-extra local-smart-turn \
|
|
325
|
-
--no-extra mlx-whisper \
|
|
326
|
-
--no-extra moondream \
|
|
327
|
-
--no-extra ultravox
|
|
328
|
-
```
|
|
329
|
-
|
|
330
|
-
> **Tip:** For full compatibility, use Python 3.12: `uv python pin 3.12`
|
|
331
|
-
|
|
332
350
|
> **Note**: Some extras (local, gstreamer) require system dependencies. See documentation if you encounter build errors.
|
|
333
351
|
|
|
334
352
|
### Running tests
|
|
@@ -345,54 +363,6 @@ Run a specific test suite:
|
|
|
345
363
|
uv run pytest tests/test_name.py
|
|
346
364
|
```
|
|
347
365
|
|
|
348
|
-
### Setting up your editor
|
|
349
|
-
|
|
350
|
-
This project uses strict [PEP 8](https://peps.python.org/pep-0008/) formatting via [Ruff](https://github.com/astral-sh/ruff).
|
|
351
|
-
|
|
352
|
-
#### Emacs
|
|
353
|
-
|
|
354
|
-
You can use [use-package](https://github.com/jwiegley/use-package) to install [emacs-lazy-ruff](https://github.com/christophermadsen/emacs-lazy-ruff) package and configure `ruff` arguments:
|
|
355
|
-
|
|
356
|
-
```elisp
|
|
357
|
-
(use-package lazy-ruff
|
|
358
|
-
:ensure t
|
|
359
|
-
:hook ((python-mode . lazy-ruff-mode))
|
|
360
|
-
:config
|
|
361
|
-
(setq lazy-ruff-format-command "ruff format")
|
|
362
|
-
(setq lazy-ruff-check-command "ruff check --select I"))
|
|
363
|
-
```
|
|
364
|
-
|
|
365
|
-
`ruff` was installed in the `venv` environment described before, so you should be able to use [pyvenv-auto](https://github.com/ryotaro612/pyvenv-auto) to automatically load that environment inside Emacs.
|
|
366
|
-
|
|
367
|
-
```elisp
|
|
368
|
-
(use-package pyvenv-auto
|
|
369
|
-
:ensure t
|
|
370
|
-
:defer t
|
|
371
|
-
:hook ((python-mode . pyvenv-auto-run)))
|
|
372
|
-
```
|
|
373
|
-
|
|
374
|
-
#### Visual Studio Code
|
|
375
|
-
|
|
376
|
-
Install the
|
|
377
|
-
[Ruff](https://marketplace.visualstudio.com/items?itemName=charliermarsh.ruff) extension. Then edit the user settings (_Ctrl-Shift-P_ `Open User Settings (JSON)`) and set it as the default Python formatter, and enable formatting on save:
|
|
378
|
-
|
|
379
|
-
```json
|
|
380
|
-
"[python]": {
|
|
381
|
-
"editor.defaultFormatter": "charliermarsh.ruff",
|
|
382
|
-
"editor.formatOnSave": true
|
|
383
|
-
}
|
|
384
|
-
```
|
|
385
|
-
|
|
386
|
-
#### PyCharm
|
|
387
|
-
|
|
388
|
-
`ruff` was installed in the `venv` environment described before, now to enable autoformatting on save, go to `File` -> `Settings` -> `Tools` -> `File Watchers` and add a new watcher with the following settings:
|
|
389
|
-
|
|
390
|
-
1. **Name**: `Ruff formatter`
|
|
391
|
-
2. **File type**: `Python`
|
|
392
|
-
3. **Working directory**: `$ContentRoot$`
|
|
393
|
-
4. **Arguments**: `format $FilePath$`
|
|
394
|
-
5. **Program**: `$PyInterpreterDirectory$/ruff`
|
|
395
|
-
|
|
396
366
|
## 🤝 Contributing
|
|
397
367
|
|
|
398
368
|
We welcome contributions from the community! Whether you're fixing bugs, improving documentation, or adding new features, here's how you can help:
|
|
@@ -413,5 +383,3 @@ We aim to review all contributions promptly and provide constructive feedback to
|
|
|
413
383
|
➡️ [Read the docs](https://docs.pipecat.ai)
|
|
414
384
|
|
|
415
385
|
➡️ [Reach us on X](https://x.com/pipecat_ai)
|
|
416
|
-
|
|
417
|
-
|