mito-ai 0.1.28__py3-none-any.whl → 0.1.30__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mito-ai might be problematic. Click here for more details.
- mito_ai/_version.py +1 -1
- mito_ai/openai_client.py +22 -6
- mito_ai/tests/providers/test_azure.py +635 -0
- mito_ai/utils/anthropic_utils.py +3 -0
- mito_ai/utils/open_ai_utils.py +0 -4
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +1 -1
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
- mito_ai-0.1.28.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.114d2b34bc18a45df338.js → mito_ai-0.1.30.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.5c78616c48ffde147e05.js +225 -72
- mito_ai-0.1.30.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.5c78616c48ffde147e05.js.map +1 -0
- mito_ai-0.1.28.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.92c6411fdc4075df549b.js → mito_ai-0.1.30.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.017b9e90c5534a403f4b.js +3 -3
- mito_ai-0.1.28.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.92c6411fdc4075df549b.js.map → mito_ai-0.1.30.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.017b9e90c5534a403f4b.js.map +1 -1
- {mito_ai-0.1.28.dist-info → mito_ai-0.1.30.dist-info}/METADATA +1 -1
- {mito_ai-0.1.28.dist-info → mito_ai-0.1.30.dist-info}/RECORD +28 -27
- mito_ai-0.1.28.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.114d2b34bc18a45df338.js.map +0 -1
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js +0 -0
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js.map +0 -0
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js +0 -0
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js.map +0 -0
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js +0 -0
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -0
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
- {mito_ai-0.1.28.data → mito_ai-0.1.30.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
- {mito_ai-0.1.28.dist-info → mito_ai-0.1.30.dist-info}/WHEEL +0 -0
- {mito_ai-0.1.28.dist-info → mito_ai-0.1.30.dist-info}/entry_points.txt +0 -0
- {mito_ai-0.1.28.dist-info → mito_ai-0.1.30.dist-info}/licenses/LICENSE +0 -0
mito_ai/_version.py
CHANGED
mito_ai/openai_client.py
CHANGED
|
@@ -197,7 +197,7 @@ This attribute is observed by the websocket provider to push the error to the cl
|
|
|
197
197
|
return openai.AsyncAzureOpenAI(
|
|
198
198
|
api_key=constants.AZURE_OPENAI_API_KEY,
|
|
199
199
|
api_version=constants.AZURE_OPENAI_API_VERSION,
|
|
200
|
-
azure_endpoint=constants.AZURE_OPENAI_ENDPOINT
|
|
200
|
+
azure_endpoint=constants.AZURE_OPENAI_ENDPOINT, # type: ignore
|
|
201
201
|
max_retries=self.max_retries,
|
|
202
202
|
timeout=self.timeout,
|
|
203
203
|
)
|
|
@@ -222,13 +222,25 @@ This attribute is observed by the websocket provider to push the error to the cl
|
|
|
222
222
|
)
|
|
223
223
|
return client
|
|
224
224
|
|
|
225
|
-
def _resolve_model(self, model: Optional[str] = None) -> str:
|
|
225
|
+
def _resolve_model(self, model: Optional[str] = None, response_format_info: Optional[ResponseFormatInfo] = None) -> str:
|
|
226
|
+
|
|
227
|
+
# If they have set an Azure OpenAI model, then we always use it
|
|
226
228
|
if is_azure_openai_configured() and constants.AZURE_OPENAI_MODEL is not None:
|
|
229
|
+
self.log.debug(f"Resolving to Azure OpenAI model: {constants.AZURE_OPENAI_MODEL}")
|
|
227
230
|
return constants.AZURE_OPENAI_MODEL
|
|
228
|
-
|
|
231
|
+
|
|
232
|
+
# Otherwise, we use the fast model for anything other than the agent mode
|
|
233
|
+
if response_format_info:
|
|
234
|
+
return OPENAI_FAST_MODEL
|
|
235
|
+
|
|
236
|
+
# If they have set an Ollama model, then we use it
|
|
237
|
+
if constants.OLLAMA_MODEL is not None:
|
|
229
238
|
return constants.OLLAMA_MODEL
|
|
230
|
-
|
|
239
|
+
|
|
240
|
+
# If they have set a model, then we use it
|
|
241
|
+
if model:
|
|
231
242
|
return model
|
|
243
|
+
|
|
232
244
|
return OPENAI_MODEL_FALLBACK
|
|
233
245
|
|
|
234
246
|
async def request_completions(
|
|
@@ -253,7 +265,11 @@ This attribute is observed by the websocket provider to push the error to the cl
|
|
|
253
265
|
completion = None
|
|
254
266
|
|
|
255
267
|
try:
|
|
256
|
-
|
|
268
|
+
|
|
269
|
+
# Make sure we are using the correct model
|
|
270
|
+
# TODO: If we bring back inline completions or another action that needs to
|
|
271
|
+
# respond fast, we must require the user to configure a fast model with Azure as well.
|
|
272
|
+
model = self._resolve_model(model, response_format_info)
|
|
257
273
|
|
|
258
274
|
# Handle other providers as before
|
|
259
275
|
completion_function_params = get_open_ai_completion_function_params(
|
|
@@ -301,7 +317,7 @@ This attribute is observed by the websocket provider to push the error to the cl
|
|
|
301
317
|
accumulated_response = ""
|
|
302
318
|
|
|
303
319
|
# Validate that the model is supported.
|
|
304
|
-
model = self._resolve_model(model)
|
|
320
|
+
model = self._resolve_model(model, response_format_info)
|
|
305
321
|
|
|
306
322
|
# Send initial acknowledgment
|
|
307
323
|
reply_fn(CompletionReply(
|