mistralai 1.9.11__py3-none-any.whl → 1.10.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_hooks/registration.py +5 -0
- mistralai/_hooks/tracing.py +75 -0
- mistralai/_version.py +2 -2
- mistralai/accesses.py +8 -8
- mistralai/agents.py +29 -17
- mistralai/chat.py +41 -29
- mistralai/classifiers.py +13 -1
- mistralai/conversations.py +294 -62
- mistralai/documents.py +19 -3
- mistralai/embeddings.py +13 -7
- mistralai/extra/README.md +1 -1
- mistralai/extra/mcp/auth.py +10 -11
- mistralai/extra/mcp/base.py +17 -16
- mistralai/extra/mcp/sse.py +13 -15
- mistralai/extra/mcp/stdio.py +5 -6
- mistralai/extra/observability/__init__.py +15 -0
- mistralai/extra/observability/otel.py +372 -0
- mistralai/extra/run/context.py +33 -43
- mistralai/extra/run/result.py +29 -30
- mistralai/extra/run/tools.py +34 -23
- mistralai/extra/struct_chat.py +15 -8
- mistralai/extra/utils/response_format.py +5 -3
- mistralai/files.py +6 -0
- mistralai/fim.py +17 -5
- mistralai/mistral_agents.py +229 -1
- mistralai/mistral_jobs.py +39 -13
- mistralai/models/__init__.py +99 -3
- mistralai/models/agent.py +15 -2
- mistralai/models/agentconversation.py +11 -3
- mistralai/models/agentcreationrequest.py +6 -2
- mistralai/models/agents_api_v1_agents_deleteop.py +16 -0
- mistralai/models/agents_api_v1_agents_getop.py +40 -3
- mistralai/models/agents_api_v1_agents_listop.py +72 -2
- mistralai/models/agents_api_v1_conversations_deleteop.py +18 -0
- mistralai/models/agents_api_v1_conversations_listop.py +39 -2
- mistralai/models/agentscompletionrequest.py +21 -6
- mistralai/models/agentscompletionstreamrequest.py +21 -6
- mistralai/models/agentupdaterequest.py +18 -2
- mistralai/models/audioencoding.py +13 -0
- mistralai/models/audioformat.py +19 -0
- mistralai/models/audiotranscriptionrequest.py +2 -0
- mistralai/models/batchjobin.py +26 -5
- mistralai/models/batchjobout.py +5 -0
- mistralai/models/batchrequest.py +48 -0
- mistralai/models/chatcompletionrequest.py +22 -5
- mistralai/models/chatcompletionstreamrequest.py +22 -5
- mistralai/models/classificationrequest.py +37 -3
- mistralai/models/conversationrequest.py +15 -4
- mistralai/models/conversationrestartrequest.py +50 -2
- mistralai/models/conversationrestartstreamrequest.py +50 -2
- mistralai/models/conversationstreamrequest.py +15 -4
- mistralai/models/documentout.py +26 -10
- mistralai/models/documentupdatein.py +24 -3
- mistralai/models/embeddingrequest.py +19 -11
- mistralai/models/files_api_routes_list_filesop.py +7 -0
- mistralai/models/fimcompletionrequest.py +8 -9
- mistralai/models/fimcompletionstreamrequest.py +8 -9
- mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +40 -3
- mistralai/models/libraries_documents_list_v1op.py +15 -2
- mistralai/models/libraryout.py +10 -7
- mistralai/models/listfilesout.py +35 -4
- mistralai/models/modelcapabilities.py +13 -4
- mistralai/models/modelconversation.py +8 -2
- mistralai/models/ocrpageobject.py +26 -5
- mistralai/models/ocrrequest.py +17 -1
- mistralai/models/ocrtableobject.py +31 -0
- mistralai/models/prediction.py +4 -0
- mistralai/models/requestsource.py +7 -0
- mistralai/models/responseformat.py +4 -2
- mistralai/models/responseformats.py +0 -1
- mistralai/models/sharingdelete.py +36 -5
- mistralai/models/sharingin.py +36 -5
- mistralai/models/sharingout.py +3 -3
- mistralai/models/toolexecutiondeltaevent.py +13 -4
- mistralai/models/toolexecutiondoneevent.py +13 -4
- mistralai/models/toolexecutionentry.py +9 -4
- mistralai/models/toolexecutionstartedevent.py +13 -4
- mistralai/models/toolfilechunk.py +11 -4
- mistralai/models/toolreferencechunk.py +13 -4
- mistralai/models_.py +2 -14
- mistralai/ocr.py +18 -0
- mistralai/transcriptions.py +4 -4
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/METADATA +162 -152
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/RECORD +168 -144
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/WHEEL +1 -1
- mistralai_azure/_version.py +3 -3
- mistralai_azure/basesdk.py +15 -5
- mistralai_azure/chat.py +59 -98
- mistralai_azure/models/__init__.py +50 -3
- mistralai_azure/models/chatcompletionrequest.py +16 -4
- mistralai_azure/models/chatcompletionstreamrequest.py +16 -4
- mistralai_azure/models/httpvalidationerror.py +11 -6
- mistralai_azure/models/mistralazureerror.py +26 -0
- mistralai_azure/models/no_response_error.py +13 -0
- mistralai_azure/models/prediction.py +4 -0
- mistralai_azure/models/responseformat.py +4 -2
- mistralai_azure/models/responseformats.py +0 -1
- mistralai_azure/models/responsevalidationerror.py +25 -0
- mistralai_azure/models/sdkerror.py +30 -14
- mistralai_azure/models/systemmessage.py +7 -3
- mistralai_azure/models/systemmessagecontentchunks.py +21 -0
- mistralai_azure/models/thinkchunk.py +35 -0
- mistralai_azure/ocr.py +15 -36
- mistralai_azure/utils/__init__.py +18 -5
- mistralai_azure/utils/eventstreaming.py +10 -0
- mistralai_azure/utils/serializers.py +3 -2
- mistralai_azure/utils/unmarshal_json_response.py +24 -0
- mistralai_gcp/_hooks/types.py +7 -0
- mistralai_gcp/_version.py +4 -4
- mistralai_gcp/basesdk.py +27 -25
- mistralai_gcp/chat.py +75 -98
- mistralai_gcp/fim.py +39 -74
- mistralai_gcp/httpclient.py +6 -16
- mistralai_gcp/models/__init__.py +321 -116
- mistralai_gcp/models/assistantmessage.py +1 -1
- mistralai_gcp/models/chatcompletionrequest.py +36 -7
- mistralai_gcp/models/chatcompletionresponse.py +6 -6
- mistralai_gcp/models/chatcompletionstreamrequest.py +36 -7
- mistralai_gcp/models/completionresponsestreamchoice.py +1 -1
- mistralai_gcp/models/deltamessage.py +1 -1
- mistralai_gcp/models/fimcompletionrequest.py +3 -9
- mistralai_gcp/models/fimcompletionresponse.py +6 -6
- mistralai_gcp/models/fimcompletionstreamrequest.py +3 -9
- mistralai_gcp/models/httpvalidationerror.py +11 -6
- mistralai_gcp/models/imageurl.py +1 -1
- mistralai_gcp/models/jsonschema.py +1 -1
- mistralai_gcp/models/mistralgcperror.py +26 -0
- mistralai_gcp/models/mistralpromptmode.py +8 -0
- mistralai_gcp/models/no_response_error.py +13 -0
- mistralai_gcp/models/prediction.py +4 -0
- mistralai_gcp/models/responseformat.py +5 -3
- mistralai_gcp/models/responseformats.py +0 -1
- mistralai_gcp/models/responsevalidationerror.py +25 -0
- mistralai_gcp/models/sdkerror.py +30 -14
- mistralai_gcp/models/systemmessage.py +7 -3
- mistralai_gcp/models/systemmessagecontentchunks.py +21 -0
- mistralai_gcp/models/thinkchunk.py +35 -0
- mistralai_gcp/models/toolmessage.py +1 -1
- mistralai_gcp/models/usageinfo.py +71 -8
- mistralai_gcp/models/usermessage.py +1 -1
- mistralai_gcp/sdk.py +12 -10
- mistralai_gcp/sdkconfiguration.py +0 -7
- mistralai_gcp/types/basemodel.py +3 -3
- mistralai_gcp/utils/__init__.py +143 -45
- mistralai_gcp/utils/datetimes.py +23 -0
- mistralai_gcp/utils/enums.py +67 -27
- mistralai_gcp/utils/eventstreaming.py +10 -0
- mistralai_gcp/utils/forms.py +49 -28
- mistralai_gcp/utils/serializers.py +33 -3
- mistralai_gcp/utils/unmarshal_json_response.py +24 -0
- {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/licenses/LICENSE +0 -0
mistralai_gcp/fim.py
CHANGED
|
@@ -5,6 +5,7 @@ from mistralai_gcp import models, utils
|
|
|
5
5
|
from mistralai_gcp._hooks import HookContext
|
|
6
6
|
from mistralai_gcp.types import OptionalNullable, UNSET
|
|
7
7
|
from mistralai_gcp.utils import eventstreaming
|
|
8
|
+
from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response
|
|
8
9
|
from typing import Any, Mapping, Optional, Union
|
|
9
10
|
|
|
10
11
|
|
|
@@ -38,7 +39,7 @@ class Fim(BaseSDK):
|
|
|
38
39
|
|
|
39
40
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
40
41
|
|
|
41
|
-
:param model: ID of the model to use.
|
|
42
|
+
:param model: ID of the model with FIM to use.
|
|
42
43
|
:param prompt: The text/code to complete.
|
|
43
44
|
:param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
|
|
44
45
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -105,6 +106,7 @@ class Fim(BaseSDK):
|
|
|
105
106
|
|
|
106
107
|
http_res = self.do_request(
|
|
107
108
|
hook_ctx=HookContext(
|
|
109
|
+
config=self.sdk_configuration,
|
|
108
110
|
base_url=base_url or "",
|
|
109
111
|
operation_id="stream_fim",
|
|
110
112
|
oauth2_scopes=[],
|
|
@@ -122,32 +124,23 @@ class Fim(BaseSDK):
|
|
|
122
124
|
http_res,
|
|
123
125
|
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
|
|
124
126
|
sentinel="[DONE]",
|
|
127
|
+
client_ref=self,
|
|
125
128
|
)
|
|
126
129
|
if utils.match_response(http_res, "422", "application/json"):
|
|
127
130
|
http_res_text = utils.stream_to_text(http_res)
|
|
128
|
-
response_data =
|
|
129
|
-
|
|
131
|
+
response_data = unmarshal_json_response(
|
|
132
|
+
models.HTTPValidationErrorData, http_res, http_res_text
|
|
130
133
|
)
|
|
131
|
-
raise models.HTTPValidationError(
|
|
134
|
+
raise models.HTTPValidationError(response_data, http_res, http_res_text)
|
|
132
135
|
if utils.match_response(http_res, "4XX", "*"):
|
|
133
136
|
http_res_text = utils.stream_to_text(http_res)
|
|
134
|
-
raise models.SDKError(
|
|
135
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
136
|
-
)
|
|
137
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
137
138
|
if utils.match_response(http_res, "5XX", "*"):
|
|
138
139
|
http_res_text = utils.stream_to_text(http_res)
|
|
139
|
-
raise models.SDKError(
|
|
140
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
141
|
-
)
|
|
140
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
142
141
|
|
|
143
|
-
content_type = http_res.headers.get("Content-Type")
|
|
144
142
|
http_res_text = utils.stream_to_text(http_res)
|
|
145
|
-
raise models.SDKError(
|
|
146
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
147
|
-
http_res.status_code,
|
|
148
|
-
http_res_text,
|
|
149
|
-
http_res,
|
|
150
|
-
)
|
|
143
|
+
raise models.SDKError("Unexpected response received", http_res, http_res_text)
|
|
151
144
|
|
|
152
145
|
async def stream_async(
|
|
153
146
|
self,
|
|
@@ -176,7 +169,7 @@ class Fim(BaseSDK):
|
|
|
176
169
|
|
|
177
170
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
178
171
|
|
|
179
|
-
:param model: ID of the model to use.
|
|
172
|
+
:param model: ID of the model with FIM to use.
|
|
180
173
|
:param prompt: The text/code to complete.
|
|
181
174
|
:param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
|
|
182
175
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -243,6 +236,7 @@ class Fim(BaseSDK):
|
|
|
243
236
|
|
|
244
237
|
http_res = await self.do_request_async(
|
|
245
238
|
hook_ctx=HookContext(
|
|
239
|
+
config=self.sdk_configuration,
|
|
246
240
|
base_url=base_url or "",
|
|
247
241
|
operation_id="stream_fim",
|
|
248
242
|
oauth2_scopes=[],
|
|
@@ -260,32 +254,23 @@ class Fim(BaseSDK):
|
|
|
260
254
|
http_res,
|
|
261
255
|
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
|
|
262
256
|
sentinel="[DONE]",
|
|
257
|
+
client_ref=self,
|
|
263
258
|
)
|
|
264
259
|
if utils.match_response(http_res, "422", "application/json"):
|
|
265
260
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
266
|
-
response_data =
|
|
267
|
-
|
|
261
|
+
response_data = unmarshal_json_response(
|
|
262
|
+
models.HTTPValidationErrorData, http_res, http_res_text
|
|
268
263
|
)
|
|
269
|
-
raise models.HTTPValidationError(
|
|
264
|
+
raise models.HTTPValidationError(response_data, http_res, http_res_text)
|
|
270
265
|
if utils.match_response(http_res, "4XX", "*"):
|
|
271
266
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
272
|
-
raise models.SDKError(
|
|
273
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
274
|
-
)
|
|
267
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
275
268
|
if utils.match_response(http_res, "5XX", "*"):
|
|
276
269
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
277
|
-
raise models.SDKError(
|
|
278
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
279
|
-
)
|
|
270
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
280
271
|
|
|
281
|
-
content_type = http_res.headers.get("Content-Type")
|
|
282
272
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
283
|
-
raise models.SDKError(
|
|
284
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
285
|
-
http_res.status_code,
|
|
286
|
-
http_res_text,
|
|
287
|
-
http_res,
|
|
288
|
-
)
|
|
273
|
+
raise models.SDKError("Unexpected response received", http_res, http_res_text)
|
|
289
274
|
|
|
290
275
|
def complete(
|
|
291
276
|
self,
|
|
@@ -314,7 +299,7 @@ class Fim(BaseSDK):
|
|
|
314
299
|
|
|
315
300
|
FIM completion.
|
|
316
301
|
|
|
317
|
-
:param model: ID of the model to use.
|
|
302
|
+
:param model: ID of the model with FIM to use.
|
|
318
303
|
:param prompt: The text/code to complete.
|
|
319
304
|
:param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
|
|
320
305
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -381,6 +366,7 @@ class Fim(BaseSDK):
|
|
|
381
366
|
|
|
382
367
|
http_res = self.do_request(
|
|
383
368
|
hook_ctx=HookContext(
|
|
369
|
+
config=self.sdk_configuration,
|
|
384
370
|
base_url=base_url or "",
|
|
385
371
|
operation_id="fim_completion_v1_fim_completions_post",
|
|
386
372
|
oauth2_scopes=[],
|
|
@@ -393,33 +379,22 @@ class Fim(BaseSDK):
|
|
|
393
379
|
|
|
394
380
|
response_data: Any = None
|
|
395
381
|
if utils.match_response(http_res, "200", "application/json"):
|
|
396
|
-
return
|
|
397
|
-
|
|
382
|
+
return unmarshal_json_response(
|
|
383
|
+
Optional[models.FIMCompletionResponse], http_res
|
|
398
384
|
)
|
|
399
385
|
if utils.match_response(http_res, "422", "application/json"):
|
|
400
|
-
response_data =
|
|
401
|
-
|
|
386
|
+
response_data = unmarshal_json_response(
|
|
387
|
+
models.HTTPValidationErrorData, http_res
|
|
402
388
|
)
|
|
403
|
-
raise models.HTTPValidationError(
|
|
389
|
+
raise models.HTTPValidationError(response_data, http_res)
|
|
404
390
|
if utils.match_response(http_res, "4XX", "*"):
|
|
405
391
|
http_res_text = utils.stream_to_text(http_res)
|
|
406
|
-
raise models.SDKError(
|
|
407
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
408
|
-
)
|
|
392
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
409
393
|
if utils.match_response(http_res, "5XX", "*"):
|
|
410
394
|
http_res_text = utils.stream_to_text(http_res)
|
|
411
|
-
raise models.SDKError(
|
|
412
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
413
|
-
)
|
|
395
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
414
396
|
|
|
415
|
-
|
|
416
|
-
http_res_text = utils.stream_to_text(http_res)
|
|
417
|
-
raise models.SDKError(
|
|
418
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
419
|
-
http_res.status_code,
|
|
420
|
-
http_res_text,
|
|
421
|
-
http_res,
|
|
422
|
-
)
|
|
397
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
423
398
|
|
|
424
399
|
async def complete_async(
|
|
425
400
|
self,
|
|
@@ -448,7 +423,7 @@ class Fim(BaseSDK):
|
|
|
448
423
|
|
|
449
424
|
FIM completion.
|
|
450
425
|
|
|
451
|
-
:param model: ID of the model to use.
|
|
426
|
+
:param model: ID of the model with FIM to use.
|
|
452
427
|
:param prompt: The text/code to complete.
|
|
453
428
|
:param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
|
|
454
429
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -515,6 +490,7 @@ class Fim(BaseSDK):
|
|
|
515
490
|
|
|
516
491
|
http_res = await self.do_request_async(
|
|
517
492
|
hook_ctx=HookContext(
|
|
493
|
+
config=self.sdk_configuration,
|
|
518
494
|
base_url=base_url or "",
|
|
519
495
|
operation_id="fim_completion_v1_fim_completions_post",
|
|
520
496
|
oauth2_scopes=[],
|
|
@@ -527,30 +503,19 @@ class Fim(BaseSDK):
|
|
|
527
503
|
|
|
528
504
|
response_data: Any = None
|
|
529
505
|
if utils.match_response(http_res, "200", "application/json"):
|
|
530
|
-
return
|
|
531
|
-
|
|
506
|
+
return unmarshal_json_response(
|
|
507
|
+
Optional[models.FIMCompletionResponse], http_res
|
|
532
508
|
)
|
|
533
509
|
if utils.match_response(http_res, "422", "application/json"):
|
|
534
|
-
response_data =
|
|
535
|
-
|
|
510
|
+
response_data = unmarshal_json_response(
|
|
511
|
+
models.HTTPValidationErrorData, http_res
|
|
536
512
|
)
|
|
537
|
-
raise models.HTTPValidationError(
|
|
513
|
+
raise models.HTTPValidationError(response_data, http_res)
|
|
538
514
|
if utils.match_response(http_res, "4XX", "*"):
|
|
539
515
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
540
|
-
raise models.SDKError(
|
|
541
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
542
|
-
)
|
|
516
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
543
517
|
if utils.match_response(http_res, "5XX", "*"):
|
|
544
518
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
545
|
-
raise models.SDKError(
|
|
546
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
547
|
-
)
|
|
519
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
548
520
|
|
|
549
|
-
|
|
550
|
-
http_res_text = await utils.stream_to_text_async(http_res)
|
|
551
|
-
raise models.SDKError(
|
|
552
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
553
|
-
http_res.status_code,
|
|
554
|
-
http_res_text,
|
|
555
|
-
http_res,
|
|
556
|
-
)
|
|
521
|
+
raise models.SDKError("Unexpected response received", http_res)
|
mistralai_gcp/httpclient.py
CHANGED
|
@@ -2,7 +2,6 @@
|
|
|
2
2
|
|
|
3
3
|
# pyright: reportReturnType = false
|
|
4
4
|
import asyncio
|
|
5
|
-
from concurrent.futures import ThreadPoolExecutor
|
|
6
5
|
from typing_extensions import Protocol, runtime_checkable
|
|
7
6
|
import httpx
|
|
8
7
|
from typing import Any, Optional, Union
|
|
@@ -116,21 +115,12 @@ def close_clients(
|
|
|
116
115
|
pass
|
|
117
116
|
|
|
118
117
|
if async_client is not None and not async_client_supplied:
|
|
119
|
-
is_async = False
|
|
120
118
|
try:
|
|
121
|
-
asyncio.get_running_loop()
|
|
122
|
-
|
|
119
|
+
loop = asyncio.get_running_loop()
|
|
120
|
+
asyncio.run_coroutine_threadsafe(async_client.aclose(), loop)
|
|
123
121
|
except RuntimeError:
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
try:
|
|
127
|
-
# If this function is called in an async loop then start another
|
|
128
|
-
# loop in a separate thread to close the async http client.
|
|
129
|
-
if is_async:
|
|
130
|
-
with ThreadPoolExecutor(max_workers=1) as executor:
|
|
131
|
-
future = executor.submit(asyncio.run, async_client.aclose())
|
|
132
|
-
future.result()
|
|
133
|
-
else:
|
|
122
|
+
try:
|
|
134
123
|
asyncio.run(async_client.aclose())
|
|
135
|
-
|
|
136
|
-
|
|
124
|
+
except RuntimeError:
|
|
125
|
+
# best effort
|
|
126
|
+
pass
|