dv-pipecat-ai 0.0.82.dev870__py3-none-any.whl → 0.0.82.dev878__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dv-pipecat-ai might be problematic. Click here for more details.
- {dv_pipecat_ai-0.0.82.dev870.dist-info → dv_pipecat_ai-0.0.82.dev878.dist-info}/METADATA +1 -1
- {dv_pipecat_ai-0.0.82.dev870.dist-info → dv_pipecat_ai-0.0.82.dev878.dist-info}/RECORD +6 -6
- pipecat/services/azure/llm.py +77 -1
- {dv_pipecat_ai-0.0.82.dev870.dist-info → dv_pipecat_ai-0.0.82.dev878.dist-info}/WHEEL +0 -0
- {dv_pipecat_ai-0.0.82.dev870.dist-info → dv_pipecat_ai-0.0.82.dev878.dist-info}/licenses/LICENSE +0 -0
- {dv_pipecat_ai-0.0.82.dev870.dist-info → dv_pipecat_ai-0.0.82.dev878.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
dv_pipecat_ai-0.0.82.
|
|
1
|
+
dv_pipecat_ai-0.0.82.dev878.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
|
|
2
2
|
pipecat/__init__.py,sha256=j0Xm6adxHhd7D06dIyyPV_GlBYLlBnTAERVvD_jAARQ,861
|
|
3
3
|
pipecat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
pipecat/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -186,7 +186,7 @@ pipecat/services/aws_nova_sonic/ready.wav,sha256=pxdKxZtYRV2IVv63v7K1EPkxyV_Oxoc
|
|
|
186
186
|
pipecat/services/azure/__init__.py,sha256=mgnoJYeqKqwRvr18UZJhFZ2FTkGyob7r6IdtEiOeT3k,301
|
|
187
187
|
pipecat/services/azure/common.py,sha256=JKGDYYW1zpRaWy_l_5ZL6viHj2Ch-mKMoVx2gdCKpeo,9893
|
|
188
188
|
pipecat/services/azure/image.py,sha256=yP7_Uelz9gq2-nhRbjTNOJ6s-DrsjsGaqXPq-8Ud4q4,4191
|
|
189
|
-
pipecat/services/azure/llm.py,sha256=
|
|
189
|
+
pipecat/services/azure/llm.py,sha256=rsysuFtC3oL2ozYaP0SrF1QKqAzK9b3MCwwzzMAmAbk,5489
|
|
190
190
|
pipecat/services/azure/stt.py,sha256=POhS5XTS-Z0SlKJDdGf18eR_5Nvbq0SnjG3R2xRcykg,12772
|
|
191
191
|
pipecat/services/azure/tts.py,sha256=ytgXcYvdVkshC30K88ZGbYFtK8SmSV22h9jQEYKf9ew,19233
|
|
192
192
|
pipecat/services/cartesia/__init__.py,sha256=vzh0jBnfPwWdxFfV-tu0x1HFoOTgr9s91GYmD-CJUtY,284
|
|
@@ -377,7 +377,7 @@ pipecat/utils/tracing/service_decorators.py,sha256=HwDCqLGijhYD3F8nxDuQmEw-YkRw0
|
|
|
377
377
|
pipecat/utils/tracing/setup.py,sha256=7TEgPNpq6M8lww8OQvf0P9FzYc5A30xICGklVA-fua0,2892
|
|
378
378
|
pipecat/utils/tracing/turn_context_provider.py,sha256=ikon3plFOx0XbMrH6DdeHttNpb-U0gzMZIm3bWLc9eI,2485
|
|
379
379
|
pipecat/utils/tracing/turn_trace_observer.py,sha256=dma16SBJpYSOE58YDWy89QzHyQFc_9gQZszKeWixuwc,9725
|
|
380
|
-
dv_pipecat_ai-0.0.82.
|
|
381
|
-
dv_pipecat_ai-0.0.82.
|
|
382
|
-
dv_pipecat_ai-0.0.82.
|
|
383
|
-
dv_pipecat_ai-0.0.82.
|
|
380
|
+
dv_pipecat_ai-0.0.82.dev878.dist-info/METADATA,sha256=79zfMdkh5ZK6UwXYUi8LCnQ0PAOzLIHmhweMTd3F0AE,32639
|
|
381
|
+
dv_pipecat_ai-0.0.82.dev878.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
382
|
+
dv_pipecat_ai-0.0.82.dev878.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
|
|
383
|
+
dv_pipecat_ai-0.0.82.dev878.dist-info/RECORD,,
|
pipecat/services/azure/llm.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
#
|
|
2
1
|
# Copyright (c) 2024–2025, Daily
|
|
3
2
|
#
|
|
4
3
|
# SPDX-License-Identifier: BSD 2-Clause License
|
|
@@ -6,9 +5,14 @@
|
|
|
6
5
|
|
|
7
6
|
"""Azure OpenAI service implementation for the Pipecat AI framework."""
|
|
8
7
|
|
|
8
|
+
from typing import Any, Dict, List, Optional
|
|
9
|
+
|
|
9
10
|
from loguru import logger
|
|
10
11
|
from openai import AsyncAzureOpenAI
|
|
12
|
+
from openai._streaming import AsyncStream
|
|
13
|
+
from openai.types.chat import ChatCompletionChunk, ChatCompletionMessageParam
|
|
11
14
|
|
|
15
|
+
from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
|
|
12
16
|
from pipecat.services.openai.llm import OpenAILLMService
|
|
13
17
|
|
|
14
18
|
|
|
@@ -17,6 +21,16 @@ class AzureLLMService(OpenAILLMService):
|
|
|
17
21
|
|
|
18
22
|
This service extends OpenAILLMService to connect to Azure's OpenAI endpoint while
|
|
19
23
|
maintaining full compatibility with OpenAI's interface and functionality.
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
api_key: The API key for accessing Azure OpenAI.
|
|
28
|
+
endpoint: The Azure endpoint URL.
|
|
29
|
+
model: The model identifier to use.
|
|
30
|
+
api_version: Azure API version. Defaults to "2024-09-01-preview".
|
|
31
|
+
reasoning_effort: If provided for reasoning models, sets the effort (e.g. "minimal").
|
|
32
|
+
**kwargs: Additional keyword arguments passed to OpenAILLMService.
|
|
33
|
+
|
|
20
34
|
"""
|
|
21
35
|
|
|
22
36
|
def __init__(
|
|
@@ -26,6 +40,7 @@ class AzureLLMService(OpenAILLMService):
|
|
|
26
40
|
endpoint: str,
|
|
27
41
|
model: str,
|
|
28
42
|
api_version: str = "2024-09-01-preview",
|
|
43
|
+
reasoning_effort: Optional[str] = None,
|
|
29
44
|
**kwargs,
|
|
30
45
|
):
|
|
31
46
|
"""Initialize the Azure LLM service.
|
|
@@ -41,6 +56,7 @@ class AzureLLMService(OpenAILLMService):
|
|
|
41
56
|
# will call create_client() and we need those values there.
|
|
42
57
|
self._endpoint = endpoint
|
|
43
58
|
self._api_version = api_version
|
|
59
|
+
self._reasoning_effort = reasoning_effort
|
|
44
60
|
super().__init__(api_key=api_key, model=model, **kwargs)
|
|
45
61
|
|
|
46
62
|
def create_client(self, api_key=None, base_url=None, **kwargs):
|
|
@@ -62,3 +78,63 @@ class AzureLLMService(OpenAILLMService):
|
|
|
62
78
|
api_version=self._api_version,
|
|
63
79
|
azure_deployment=azure_deployment,
|
|
64
80
|
)
|
|
81
|
+
|
|
82
|
+
def _is_reasoning_model(self) -> bool:
|
|
83
|
+
"""Check if the current model supports reasoning parameters.
|
|
84
|
+
|
|
85
|
+
Based on search results:
|
|
86
|
+
- GPT-5, GPT-5-mini, and GPT-5-nano are reasoning models
|
|
87
|
+
- GPT-5-chat is a standard chat model that doesn't use reasoning by default
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
True if model supports reasoning parameters.
|
|
91
|
+
"""
|
|
92
|
+
model_name_lower = self.model_name.lower()
|
|
93
|
+
|
|
94
|
+
# Reasoning-capable models
|
|
95
|
+
reasoning_models = {"gpt-5-nano", "gpt-5", "gpt-5-mini"}
|
|
96
|
+
return model_name_lower in reasoning_models
|
|
97
|
+
|
|
98
|
+
async def get_chat_completions(
|
|
99
|
+
self, context: OpenAILLMContext, messages: List[ChatCompletionMessageParam]
|
|
100
|
+
) -> AsyncStream[ChatCompletionChunk]:
|
|
101
|
+
"""Get streaming chat completions from Azure OpenAI API.
|
|
102
|
+
|
|
103
|
+
Handles both reasoning and standard models according to Azure AI Foundry documentation.
|
|
104
|
+
Reasoning models use automatic chain of thought and have parameter limitations.
|
|
105
|
+
"""
|
|
106
|
+
params = {
|
|
107
|
+
"model": self.model_name,
|
|
108
|
+
"stream": True,
|
|
109
|
+
"messages": messages,
|
|
110
|
+
"tools": context.tools,
|
|
111
|
+
"tool_choice": context.tool_choice,
|
|
112
|
+
"stream_options": {"include_usage": True},
|
|
113
|
+
"max_tokens": self._settings["max_tokens"],
|
|
114
|
+
"max_completion_tokens": self._settings["max_completion_tokens"],
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
if self._is_reasoning_model():
|
|
118
|
+
# Reasoning models generally do NOT support temperature, presence_penalty, top_p
|
|
119
|
+
if self._reasoning_effort:
|
|
120
|
+
params["reasoning_effort"] = self._reasoning_effort
|
|
121
|
+
if self._settings.get("seed"):
|
|
122
|
+
params["seed"] = self._settings["seed"]
|
|
123
|
+
else:
|
|
124
|
+
# Standard models support all parameters
|
|
125
|
+
params.update(
|
|
126
|
+
{
|
|
127
|
+
"frequency_penalty": self._settings["frequency_penalty"],
|
|
128
|
+
"presence_penalty": self._settings["presence_penalty"],
|
|
129
|
+
"seed": self._settings["seed"],
|
|
130
|
+
"temperature": self._settings["temperature"],
|
|
131
|
+
"top_p": self._settings["top_p"],
|
|
132
|
+
}
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
# Add any extra parameters from settings
|
|
136
|
+
extra_params = self._settings.get("extra", {})
|
|
137
|
+
params.update(extra_params)
|
|
138
|
+
|
|
139
|
+
chunks = await self._client.chat.completions.create(**params)
|
|
140
|
+
return chunks
|
|
File without changes
|
{dv_pipecat_ai-0.0.82.dev870.dist-info → dv_pipecat_ai-0.0.82.dev878.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
{dv_pipecat_ai-0.0.82.dev870.dist-info → dv_pipecat_ai-0.0.82.dev878.dist-info}/top_level.txt
RENAMED
|
File without changes
|