dv-pipecat-ai 0.0.85.dev852__py3-none-any.whl → 0.0.85.dev854__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.85.dev852.dist-info → dv_pipecat_ai-0.0.85.dev854.dist-info}/METADATA +1 -1
- {dv_pipecat_ai-0.0.85.dev852.dist-info → dv_pipecat_ai-0.0.85.dev854.dist-info}/RECORD +7 -7
- pipecat/serializers/vi.py +2 -0
- pipecat/services/openai/base_llm.py +28 -7
- {dv_pipecat_ai-0.0.85.dev852.dist-info → dv_pipecat_ai-0.0.85.dev854.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.85.dev852.dist-info → dv_pipecat_ai-0.0.85.dev854.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.85.dev852.dist-info → dv_pipecat_ai-0.0.85.dev854.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
dv_pipecat_ai-0.0.85.
|
|
1
|
+
dv_pipecat_ai-0.0.85.dev854.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
|
|
2
2
|
pipecat/__init__.py,sha256=j0Xm6adxHhd7D06dIyyPV_GlBYLlBnTAERVvD_jAARQ,861
|
|
3
3
|
pipecat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
pipecat/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -164,7 +164,7 @@ pipecat/serializers/plivo.py,sha256=ie6VUhZDTJ7KlAuJyHNeIeMtJ3ScDq_2js1SZtz7jLI,
|
|
|
164
164
|
pipecat/serializers/protobuf.py,sha256=L0jSqvgTdkfxsu6JWjYK8QSTVji9nhzmgRsEEbGU7xY,5223
|
|
165
165
|
pipecat/serializers/telnyx.py,sha256=eFkC7dExDFildYLR8DPvgfHbgXlCwdSPd1vc11yxyok,10847
|
|
166
166
|
pipecat/serializers/twilio.py,sha256=0emSzXVw8DU_N5RPruMekbBKku9Q429-0z1PMuYejSk,10823
|
|
167
|
-
pipecat/serializers/vi.py,sha256=
|
|
167
|
+
pipecat/serializers/vi.py,sha256=XdnyNwhpxo8CvgS4v1_9DmetF0BkRmx8KoSVdjCdPPQ,11247
|
|
168
168
|
pipecat/services/__init__.py,sha256=8e3Ta-8_BOPozhDB3l0GJkNXs5PWhib6yqZQUof2Kvw,1209
|
|
169
169
|
pipecat/services/ai_service.py,sha256=yE386fm2Id-yD4fCNfkmEMtg0lTA7PB17n2x_A_jwTg,5896
|
|
170
170
|
pipecat/services/ai_services.py,sha256=_RrDWfM8adV17atzY9RxK0nXRVM5kbUkKrvN90GAWYM,795
|
|
@@ -280,7 +280,7 @@ pipecat/services/nim/llm.py,sha256=o4WPGI6kOmSiMV7WwOZ0cNEAoq9hW4Aqs2R8X7c9i94,4
|
|
|
280
280
|
pipecat/services/ollama/__init__.py,sha256=aw-25zYsR8LR74OFFlMKMTnJjaKwOzdPWVsClueNRkI,255
|
|
281
281
|
pipecat/services/ollama/llm.py,sha256=rfpG92LRHGJlpENKhF6ld8CLVS9DxlKW-WRVNldOIGs,1605
|
|
282
282
|
pipecat/services/openai/__init__.py,sha256=V0ZVa8PzEm3hmcStYICbAsYwfgk4ytZ6kiQoq9UZPmI,354
|
|
283
|
-
pipecat/services/openai/base_llm.py,sha256=
|
|
283
|
+
pipecat/services/openai/base_llm.py,sha256=mrHRwYL-0lYr0BhJMg_ByTfqY2no6T8DOrhqMDztBlY,21518
|
|
284
284
|
pipecat/services/openai/image.py,sha256=3e3h-dVQ6DQuQE7fp8akXwRMd-oYOdGuZg7RCOjHu9A,2994
|
|
285
285
|
pipecat/services/openai/llm.py,sha256=_aKtz1VebSFUUenT3tH6mBW9pSCm65_u45cDu_dkTzs,7396
|
|
286
286
|
pipecat/services/openai/stt.py,sha256=Idf0k73kxFyDgNRBt62MFpoKKNsBV9bwvJteJ6MGWzQ,2419
|
|
@@ -416,7 +416,7 @@ pipecat/utils/tracing/service_decorators.py,sha256=fwzxFpi8DJl6BJbK74G0UEB4ccMJg
|
|
|
416
416
|
pipecat/utils/tracing/setup.py,sha256=7TEgPNpq6M8lww8OQvf0P9FzYc5A30xICGklVA-fua0,2892
|
|
417
417
|
pipecat/utils/tracing/turn_context_provider.py,sha256=ikon3plFOx0XbMrH6DdeHttNpb-U0gzMZIm3bWLc9eI,2485
|
|
418
418
|
pipecat/utils/tracing/turn_trace_observer.py,sha256=dma16SBJpYSOE58YDWy89QzHyQFc_9gQZszKeWixuwc,9725
|
|
419
|
-
dv_pipecat_ai-0.0.85.
|
|
420
|
-
dv_pipecat_ai-0.0.85.
|
|
421
|
-
dv_pipecat_ai-0.0.85.
|
|
422
|
-
dv_pipecat_ai-0.0.85.
|
|
419
|
+
dv_pipecat_ai-0.0.85.dev854.dist-info/METADATA,sha256=bO3NZWI6qyHGO6asSfaEk4uIxnYPnOAp3BQDzr-sf1w,32955
|
|
420
|
+
dv_pipecat_ai-0.0.85.dev854.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
421
|
+
dv_pipecat_ai-0.0.85.dev854.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
|
|
422
|
+
dv_pipecat_ai-0.0.85.dev854.dist-info/RECORD,,
|
pipecat/serializers/vi.py
CHANGED
|
@@ -173,6 +173,8 @@ class VIFrameSerializer(FrameSerializer):
|
|
|
173
173
|
},
|
|
174
174
|
}
|
|
175
175
|
|
|
176
|
+
logger.debug(f"VI: Sending media event {message} for stream_id: {self._stream_id}")
|
|
177
|
+
|
|
176
178
|
return json.dumps(message)
|
|
177
179
|
|
|
178
180
|
elif isinstance(frame, (TransportMessageFrame, TransportMessageUrgentFrame)):
|
|
@@ -18,6 +18,7 @@ from openai import (
|
|
|
18
18
|
APITimeoutError,
|
|
19
19
|
AsyncOpenAI,
|
|
20
20
|
AsyncStream,
|
|
21
|
+
BadRequestError,
|
|
21
22
|
DefaultAsyncHttpxClient,
|
|
22
23
|
)
|
|
23
24
|
from openai.types.chat import ChatCompletionChunk, ChatCompletionMessageParam
|
|
@@ -100,6 +101,7 @@ class BaseOpenAILLMService(LLMService):
|
|
|
100
101
|
params: Optional[InputParams] = None,
|
|
101
102
|
retry_timeout_secs: Optional[float] = 5.0,
|
|
102
103
|
retry_on_timeout: Optional[bool] = False,
|
|
104
|
+
enable_warmup: bool = False,
|
|
103
105
|
**kwargs,
|
|
104
106
|
):
|
|
105
107
|
"""Initialize the BaseOpenAILLMService.
|
|
@@ -114,6 +116,7 @@ class BaseOpenAILLMService(LLMService):
|
|
|
114
116
|
params: Input parameters for model configuration and behavior.
|
|
115
117
|
retry_timeout_secs: Request timeout in seconds. Defaults to 5.0 seconds.
|
|
116
118
|
retry_on_timeout: Whether to retry the request once if it times out.
|
|
119
|
+
enable_warmup: Whether to enable LLM cache warmup. Defaults to False.
|
|
117
120
|
**kwargs: Additional arguments passed to the parent LLMService.
|
|
118
121
|
"""
|
|
119
122
|
super().__init__(**kwargs)
|
|
@@ -133,6 +136,7 @@ class BaseOpenAILLMService(LLMService):
|
|
|
133
136
|
}
|
|
134
137
|
self._retry_timeout_secs = retry_timeout_secs
|
|
135
138
|
self._retry_on_timeout = retry_on_timeout
|
|
139
|
+
self._enable_warmup = enable_warmup
|
|
136
140
|
self.set_model_name(model)
|
|
137
141
|
self._client = self.create_client(
|
|
138
142
|
api_key=api_key,
|
|
@@ -477,6 +481,11 @@ class BaseOpenAILLMService(LLMService):
|
|
|
477
481
|
await self.stop_processing_metrics()
|
|
478
482
|
await self.push_frame(LLMFullResponseEndFrame())
|
|
479
483
|
|
|
484
|
+
def _is_gpt5_model(self) -> bool:
|
|
485
|
+
"""Check if the current model is a GPT-5 series model that requires max_completion_tokens."""
|
|
486
|
+
model = (self.model_name or "").lower()
|
|
487
|
+
return model.startswith("gpt-5")
|
|
488
|
+
|
|
480
489
|
async def _handle_warmup_frame(self, frame: WarmupLLMFrame):
|
|
481
490
|
"""Handle WarmupLLMFrame to prime the LLM cache without emitting responses.
|
|
482
491
|
|
|
@@ -486,18 +495,30 @@ class BaseOpenAILLMService(LLMService):
|
|
|
486
495
|
Args:
|
|
487
496
|
frame: WarmupLLMFrame containing the messages to cache.
|
|
488
497
|
"""
|
|
498
|
+
# Skip warmup if disabled
|
|
499
|
+
if not self._enable_warmup:
|
|
500
|
+
self.logger.debug("LLM warmup is disabled, skipping")
|
|
501
|
+
return
|
|
502
|
+
|
|
489
503
|
try:
|
|
490
504
|
# Use the provided messages for warmup
|
|
491
505
|
messages: List[ChatCompletionMessageParam] = frame.messages # type: ignore
|
|
492
506
|
|
|
493
507
|
# Make a non-streaming call to warm the cache
|
|
494
|
-
# We use a minimal
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
stream
|
|
500
|
-
|
|
508
|
+
# We use a minimal token limit to reduce latency and cost
|
|
509
|
+
# GPT-5 series models require max_completion_tokens instead of max_tokens
|
|
510
|
+
warmup_params = {
|
|
511
|
+
"model": self.model_name,
|
|
512
|
+
"messages": messages,
|
|
513
|
+
"stream": False,
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
if self._is_gpt5_model():
|
|
517
|
+
warmup_params["max_completion_tokens"] = 10
|
|
518
|
+
else:
|
|
519
|
+
warmup_params["max_tokens"] = 10
|
|
520
|
+
|
|
521
|
+
await self._client.chat.completions.create(**warmup_params)
|
|
501
522
|
|
|
502
523
|
self.logger.info("LLM cache warmed successfully")
|
|
503
524
|
# Intentionally don't emit any frames - this is a silent warmup
|
|
File without changes
|
{dv_pipecat_ai-0.0.85.dev852.dist-info → dv_pipecat_ai-0.0.85.dev854.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
{dv_pipecat_ai-0.0.85.dev852.dist-info → dv_pipecat_ai-0.0.85.dev854.dist-info}/top_level.txt
RENAMED
|
File without changes
|