dv-pipecat-ai 0.0.82.dev19__py3-none-any.whl → 0.0.82.dev23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dv-pipecat-ai might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dv-pipecat-ai
3
- Version: 0.0.82.dev19
3
+ Version: 0.0.82.dev23
4
4
  Summary: An open source framework for voice (and multimodal) assistants
5
5
  License-Expression: BSD-2-Clause
6
6
  Project-URL: Source, https://github.com/pipecat-ai/pipecat
@@ -1,4 +1,4 @@
1
- dv_pipecat_ai-0.0.82.dev19.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
1
+ dv_pipecat_ai-0.0.82.dev23.dist-info/licenses/LICENSE,sha256=DWY2QGf2eMCFhuu2ChairtT6CB7BEFffNVhXWc4Od08,1301
2
2
  pipecat/__init__.py,sha256=j0Xm6adxHhd7D06dIyyPV_GlBYLlBnTAERVvD_jAARQ,861
3
3
  pipecat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  pipecat/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -185,7 +185,7 @@ pipecat/services/aws_nova_sonic/ready.wav,sha256=pxdKxZtYRV2IVv63v7K1EPkxyV_Oxoc
185
185
  pipecat/services/azure/__init__.py,sha256=mgnoJYeqKqwRvr18UZJhFZ2FTkGyob7r6IdtEiOeT3k,301
186
186
  pipecat/services/azure/common.py,sha256=JKGDYYW1zpRaWy_l_5ZL6viHj2Ch-mKMoVx2gdCKpeo,9893
187
187
  pipecat/services/azure/image.py,sha256=yP7_Uelz9gq2-nhRbjTNOJ6s-DrsjsGaqXPq-8Ud4q4,4191
188
- pipecat/services/azure/llm.py,sha256=rsysuFtC3oL2ozYaP0SrF1QKqAzK9b3MCwwzzMAmAbk,5489
188
+ pipecat/services/azure/llm.py,sha256=6nQrAsAVdCY5V7Tj_pbt7tMj2zvbeqEFjoZuoeWFpIE,4504
189
189
  pipecat/services/azure/stt.py,sha256=POhS5XTS-Z0SlKJDdGf18eR_5Nvbq0SnjG3R2xRcykg,12772
190
190
  pipecat/services/azure/tts.py,sha256=ytgXcYvdVkshC30K88ZGbYFtK8SmSV22h9jQEYKf9ew,19233
191
191
  pipecat/services/cartesia/__init__.py,sha256=vzh0jBnfPwWdxFfV-tu0x1HFoOTgr9s91GYmD-CJUtY,284
@@ -375,7 +375,7 @@ pipecat/utils/tracing/service_decorators.py,sha256=HwDCqLGijhYD3F8nxDuQmEw-YkRw0
375
375
  pipecat/utils/tracing/setup.py,sha256=7TEgPNpq6M8lww8OQvf0P9FzYc5A30xICGklVA-fua0,2892
376
376
  pipecat/utils/tracing/turn_context_provider.py,sha256=ikon3plFOx0XbMrH6DdeHttNpb-U0gzMZIm3bWLc9eI,2485
377
377
  pipecat/utils/tracing/turn_trace_observer.py,sha256=dma16SBJpYSOE58YDWy89QzHyQFc_9gQZszKeWixuwc,9725
378
- dv_pipecat_ai-0.0.82.dev19.dist-info/METADATA,sha256=inDBZhQuCRU6ePiT-sC4mFRgeofQG5hahqD0ywQbZJI,32638
379
- dv_pipecat_ai-0.0.82.dev19.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
380
- dv_pipecat_ai-0.0.82.dev19.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
381
- dv_pipecat_ai-0.0.82.dev19.dist-info/RECORD,,
378
+ dv_pipecat_ai-0.0.82.dev23.dist-info/METADATA,sha256=lmTcfrxN9Q-wacbMamUvgViyO-Yk29eONFWJ-gGzHh4,32638
379
+ dv_pipecat_ai-0.0.82.dev23.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
380
+ dv_pipecat_ai-0.0.82.dev23.dist-info/top_level.txt,sha256=kQzG20CxGf-nSsHmtXHx3hY2-8zHA3jYg8jk0TajqXc,8
381
+ dv_pipecat_ai-0.0.82.dev23.dist-info/RECORD,,
@@ -4,7 +4,7 @@
4
4
  #
5
5
 
6
6
  """Azure OpenAI service implementation for the Pipecat AI framework."""
7
-
7
+ from pipecat.adapters.services.open_ai_adapter import OpenAILLMInvocationParams
8
8
  from typing import Any, Dict, List, Optional
9
9
 
10
10
  from loguru import logger
@@ -95,46 +95,22 @@ class AzureLLMService(OpenAILLMService):
95
95
  reasoning_models = {"gpt-5-nano", "gpt-5", "gpt-5-mini"}
96
96
  return model_name_lower in reasoning_models
97
97
 
98
- async def get_chat_completions(
99
- self, context: OpenAILLMContext, messages: List[ChatCompletionMessageParam]
100
- ) -> AsyncStream[ChatCompletionChunk]:
101
- """Get streaming chat completions from Azure OpenAI API.
102
-
103
- Handles both reasoning and standard models according to Azure AI Foundry documentation.
104
- Reasoning models use automatic chain of thought and have parameter limitations.
105
- """
106
- params = {
107
- "model": self.model_name,
108
- "stream": True,
109
- "messages": messages,
110
- "tools": context.tools,
111
- "tool_choice": context.tool_choice,
112
- "stream_options": {"include_usage": True},
113
- "max_tokens": self._settings["max_tokens"],
114
- "max_completion_tokens": self._settings["max_completion_tokens"],
115
- }
98
+ def build_chat_completion_params(self, params_from_context: OpenAILLMInvocationParams) -> dict:
99
+ #include base params
100
+ params = super().build_chat_completion_params(params_from_context)
116
101
 
117
102
  if self._is_reasoning_model():
118
- # Reasoning models generally do NOT support temperature, presence_penalty, top_p
103
+ #not required for reasoning models
104
+ for k in ("frequency_penalty", "presence_penalty", "temperature", "top_p"):
105
+ if k in params:
106
+ params.pop(k, None)
119
107
  if self._reasoning_effort:
120
108
  params["reasoning_effort"] = self._reasoning_effort
121
- if self._settings.get("seed"):
122
- params["seed"] = self._settings["seed"]
109
+ seed = self._settings.get("seed")
110
+ if seed is not None:
111
+ params["seed"] = seed
123
112
  else:
124
- # Standard models support all parameters
125
- params.update(
126
- {
127
- "frequency_penalty": self._settings["frequency_penalty"],
128
- "presence_penalty": self._settings["presence_penalty"],
129
- "seed": self._settings["seed"],
130
- "temperature": self._settings["temperature"],
131
- "top_p": self._settings["top_p"],
132
- }
133
- )
134
-
135
- # Add any extra parameters from settings
136
- extra_params = self._settings.get("extra", {})
137
- params.update(extra_params)
138
-
139
- chunks = await self._client.chat.completions.create(**params)
140
- return chunks
113
+ # Standard models are fine with the defaults from the base class
114
+ pass
115
+
116
+ return params