mistralai 1.9.11__py3-none-any.whl → 1.10.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (151) hide show
  1. mistralai/_hooks/registration.py +5 -0
  2. mistralai/_hooks/tracing.py +75 -0
  3. mistralai/_version.py +2 -2
  4. mistralai/accesses.py +8 -8
  5. mistralai/agents.py +29 -17
  6. mistralai/chat.py +41 -29
  7. mistralai/classifiers.py +13 -1
  8. mistralai/conversations.py +294 -62
  9. mistralai/documents.py +19 -3
  10. mistralai/embeddings.py +13 -7
  11. mistralai/extra/README.md +1 -1
  12. mistralai/extra/mcp/auth.py +10 -11
  13. mistralai/extra/mcp/base.py +17 -16
  14. mistralai/extra/mcp/sse.py +13 -15
  15. mistralai/extra/mcp/stdio.py +5 -6
  16. mistralai/extra/observability/__init__.py +15 -0
  17. mistralai/extra/observability/otel.py +372 -0
  18. mistralai/extra/run/context.py +33 -43
  19. mistralai/extra/run/result.py +29 -30
  20. mistralai/extra/run/tools.py +34 -23
  21. mistralai/extra/struct_chat.py +15 -8
  22. mistralai/extra/utils/response_format.py +5 -3
  23. mistralai/files.py +6 -0
  24. mistralai/fim.py +17 -5
  25. mistralai/mistral_agents.py +229 -1
  26. mistralai/mistral_jobs.py +39 -13
  27. mistralai/models/__init__.py +99 -3
  28. mistralai/models/agent.py +15 -2
  29. mistralai/models/agentconversation.py +11 -3
  30. mistralai/models/agentcreationrequest.py +6 -2
  31. mistralai/models/agents_api_v1_agents_deleteop.py +16 -0
  32. mistralai/models/agents_api_v1_agents_getop.py +40 -3
  33. mistralai/models/agents_api_v1_agents_listop.py +72 -2
  34. mistralai/models/agents_api_v1_conversations_deleteop.py +18 -0
  35. mistralai/models/agents_api_v1_conversations_listop.py +39 -2
  36. mistralai/models/agentscompletionrequest.py +21 -6
  37. mistralai/models/agentscompletionstreamrequest.py +21 -6
  38. mistralai/models/agentupdaterequest.py +18 -2
  39. mistralai/models/audioencoding.py +13 -0
  40. mistralai/models/audioformat.py +19 -0
  41. mistralai/models/audiotranscriptionrequest.py +2 -0
  42. mistralai/models/batchjobin.py +26 -5
  43. mistralai/models/batchjobout.py +5 -0
  44. mistralai/models/batchrequest.py +48 -0
  45. mistralai/models/chatcompletionrequest.py +22 -5
  46. mistralai/models/chatcompletionstreamrequest.py +22 -5
  47. mistralai/models/classificationrequest.py +37 -3
  48. mistralai/models/conversationrequest.py +15 -4
  49. mistralai/models/conversationrestartrequest.py +50 -2
  50. mistralai/models/conversationrestartstreamrequest.py +50 -2
  51. mistralai/models/conversationstreamrequest.py +15 -4
  52. mistralai/models/documentout.py +26 -10
  53. mistralai/models/documentupdatein.py +24 -3
  54. mistralai/models/embeddingrequest.py +19 -11
  55. mistralai/models/files_api_routes_list_filesop.py +7 -0
  56. mistralai/models/fimcompletionrequest.py +8 -9
  57. mistralai/models/fimcompletionstreamrequest.py +8 -9
  58. mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +40 -3
  59. mistralai/models/libraries_documents_list_v1op.py +15 -2
  60. mistralai/models/libraryout.py +10 -7
  61. mistralai/models/listfilesout.py +35 -4
  62. mistralai/models/modelcapabilities.py +13 -4
  63. mistralai/models/modelconversation.py +8 -2
  64. mistralai/models/ocrpageobject.py +26 -5
  65. mistralai/models/ocrrequest.py +17 -1
  66. mistralai/models/ocrtableobject.py +31 -0
  67. mistralai/models/prediction.py +4 -0
  68. mistralai/models/requestsource.py +7 -0
  69. mistralai/models/responseformat.py +4 -2
  70. mistralai/models/responseformats.py +0 -1
  71. mistralai/models/sharingdelete.py +36 -5
  72. mistralai/models/sharingin.py +36 -5
  73. mistralai/models/sharingout.py +3 -3
  74. mistralai/models/toolexecutiondeltaevent.py +13 -4
  75. mistralai/models/toolexecutiondoneevent.py +13 -4
  76. mistralai/models/toolexecutionentry.py +9 -4
  77. mistralai/models/toolexecutionstartedevent.py +13 -4
  78. mistralai/models/toolfilechunk.py +11 -4
  79. mistralai/models/toolreferencechunk.py +13 -4
  80. mistralai/models_.py +2 -14
  81. mistralai/ocr.py +18 -0
  82. mistralai/transcriptions.py +4 -4
  83. {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/METADATA +162 -152
  84. {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/RECORD +168 -144
  85. {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/WHEEL +1 -1
  86. mistralai_azure/_version.py +3 -3
  87. mistralai_azure/basesdk.py +15 -5
  88. mistralai_azure/chat.py +59 -98
  89. mistralai_azure/models/__init__.py +50 -3
  90. mistralai_azure/models/chatcompletionrequest.py +16 -4
  91. mistralai_azure/models/chatcompletionstreamrequest.py +16 -4
  92. mistralai_azure/models/httpvalidationerror.py +11 -6
  93. mistralai_azure/models/mistralazureerror.py +26 -0
  94. mistralai_azure/models/no_response_error.py +13 -0
  95. mistralai_azure/models/prediction.py +4 -0
  96. mistralai_azure/models/responseformat.py +4 -2
  97. mistralai_azure/models/responseformats.py +0 -1
  98. mistralai_azure/models/responsevalidationerror.py +25 -0
  99. mistralai_azure/models/sdkerror.py +30 -14
  100. mistralai_azure/models/systemmessage.py +7 -3
  101. mistralai_azure/models/systemmessagecontentchunks.py +21 -0
  102. mistralai_azure/models/thinkchunk.py +35 -0
  103. mistralai_azure/ocr.py +15 -36
  104. mistralai_azure/utils/__init__.py +18 -5
  105. mistralai_azure/utils/eventstreaming.py +10 -0
  106. mistralai_azure/utils/serializers.py +3 -2
  107. mistralai_azure/utils/unmarshal_json_response.py +24 -0
  108. mistralai_gcp/_hooks/types.py +7 -0
  109. mistralai_gcp/_version.py +4 -4
  110. mistralai_gcp/basesdk.py +27 -25
  111. mistralai_gcp/chat.py +75 -98
  112. mistralai_gcp/fim.py +39 -74
  113. mistralai_gcp/httpclient.py +6 -16
  114. mistralai_gcp/models/__init__.py +321 -116
  115. mistralai_gcp/models/assistantmessage.py +1 -1
  116. mistralai_gcp/models/chatcompletionrequest.py +36 -7
  117. mistralai_gcp/models/chatcompletionresponse.py +6 -6
  118. mistralai_gcp/models/chatcompletionstreamrequest.py +36 -7
  119. mistralai_gcp/models/completionresponsestreamchoice.py +1 -1
  120. mistralai_gcp/models/deltamessage.py +1 -1
  121. mistralai_gcp/models/fimcompletionrequest.py +3 -9
  122. mistralai_gcp/models/fimcompletionresponse.py +6 -6
  123. mistralai_gcp/models/fimcompletionstreamrequest.py +3 -9
  124. mistralai_gcp/models/httpvalidationerror.py +11 -6
  125. mistralai_gcp/models/imageurl.py +1 -1
  126. mistralai_gcp/models/jsonschema.py +1 -1
  127. mistralai_gcp/models/mistralgcperror.py +26 -0
  128. mistralai_gcp/models/mistralpromptmode.py +8 -0
  129. mistralai_gcp/models/no_response_error.py +13 -0
  130. mistralai_gcp/models/prediction.py +4 -0
  131. mistralai_gcp/models/responseformat.py +5 -3
  132. mistralai_gcp/models/responseformats.py +0 -1
  133. mistralai_gcp/models/responsevalidationerror.py +25 -0
  134. mistralai_gcp/models/sdkerror.py +30 -14
  135. mistralai_gcp/models/systemmessage.py +7 -3
  136. mistralai_gcp/models/systemmessagecontentchunks.py +21 -0
  137. mistralai_gcp/models/thinkchunk.py +35 -0
  138. mistralai_gcp/models/toolmessage.py +1 -1
  139. mistralai_gcp/models/usageinfo.py +71 -8
  140. mistralai_gcp/models/usermessage.py +1 -1
  141. mistralai_gcp/sdk.py +12 -10
  142. mistralai_gcp/sdkconfiguration.py +0 -7
  143. mistralai_gcp/types/basemodel.py +3 -3
  144. mistralai_gcp/utils/__init__.py +143 -45
  145. mistralai_gcp/utils/datetimes.py +23 -0
  146. mistralai_gcp/utils/enums.py +67 -27
  147. mistralai_gcp/utils/eventstreaming.py +10 -0
  148. mistralai_gcp/utils/forms.py +49 -28
  149. mistralai_gcp/utils/serializers.py +33 -3
  150. mistralai_gcp/utils/unmarshal_json_response.py +24 -0
  151. {mistralai-1.9.11.dist-info → mistralai-1.10.1.dist-info}/licenses/LICENSE +0 -0
mistralai_gcp/fim.py CHANGED
@@ -5,6 +5,7 @@ from mistralai_gcp import models, utils
5
5
  from mistralai_gcp._hooks import HookContext
6
6
  from mistralai_gcp.types import OptionalNullable, UNSET
7
7
  from mistralai_gcp.utils import eventstreaming
8
+ from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response
8
9
  from typing import Any, Mapping, Optional, Union
9
10
 
10
11
 
@@ -38,7 +39,7 @@ class Fim(BaseSDK):
38
39
 
39
40
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
40
41
 
41
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
42
+ :param model: ID of the model with FIM to use.
42
43
  :param prompt: The text/code to complete.
43
44
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
44
45
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -105,6 +106,7 @@ class Fim(BaseSDK):
105
106
 
106
107
  http_res = self.do_request(
107
108
  hook_ctx=HookContext(
109
+ config=self.sdk_configuration,
108
110
  base_url=base_url or "",
109
111
  operation_id="stream_fim",
110
112
  oauth2_scopes=[],
@@ -122,32 +124,23 @@ class Fim(BaseSDK):
122
124
  http_res,
123
125
  lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
124
126
  sentinel="[DONE]",
127
+ client_ref=self,
125
128
  )
126
129
  if utils.match_response(http_res, "422", "application/json"):
127
130
  http_res_text = utils.stream_to_text(http_res)
128
- response_data = utils.unmarshal_json(
129
- http_res_text, models.HTTPValidationErrorData
131
+ response_data = unmarshal_json_response(
132
+ models.HTTPValidationErrorData, http_res, http_res_text
130
133
  )
131
- raise models.HTTPValidationError(data=response_data)
134
+ raise models.HTTPValidationError(response_data, http_res, http_res_text)
132
135
  if utils.match_response(http_res, "4XX", "*"):
133
136
  http_res_text = utils.stream_to_text(http_res)
134
- raise models.SDKError(
135
- "API error occurred", http_res.status_code, http_res_text, http_res
136
- )
137
+ raise models.SDKError("API error occurred", http_res, http_res_text)
137
138
  if utils.match_response(http_res, "5XX", "*"):
138
139
  http_res_text = utils.stream_to_text(http_res)
139
- raise models.SDKError(
140
- "API error occurred", http_res.status_code, http_res_text, http_res
141
- )
140
+ raise models.SDKError("API error occurred", http_res, http_res_text)
142
141
 
143
- content_type = http_res.headers.get("Content-Type")
144
142
  http_res_text = utils.stream_to_text(http_res)
145
- raise models.SDKError(
146
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
147
- http_res.status_code,
148
- http_res_text,
149
- http_res,
150
- )
143
+ raise models.SDKError("Unexpected response received", http_res, http_res_text)
151
144
 
152
145
  async def stream_async(
153
146
  self,
@@ -176,7 +169,7 @@ class Fim(BaseSDK):
176
169
 
177
170
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
178
171
 
179
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
172
+ :param model: ID of the model with FIM to use.
180
173
  :param prompt: The text/code to complete.
181
174
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
182
175
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -243,6 +236,7 @@ class Fim(BaseSDK):
243
236
 
244
237
  http_res = await self.do_request_async(
245
238
  hook_ctx=HookContext(
239
+ config=self.sdk_configuration,
246
240
  base_url=base_url or "",
247
241
  operation_id="stream_fim",
248
242
  oauth2_scopes=[],
@@ -260,32 +254,23 @@ class Fim(BaseSDK):
260
254
  http_res,
261
255
  lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
262
256
  sentinel="[DONE]",
257
+ client_ref=self,
263
258
  )
264
259
  if utils.match_response(http_res, "422", "application/json"):
265
260
  http_res_text = await utils.stream_to_text_async(http_res)
266
- response_data = utils.unmarshal_json(
267
- http_res_text, models.HTTPValidationErrorData
261
+ response_data = unmarshal_json_response(
262
+ models.HTTPValidationErrorData, http_res, http_res_text
268
263
  )
269
- raise models.HTTPValidationError(data=response_data)
264
+ raise models.HTTPValidationError(response_data, http_res, http_res_text)
270
265
  if utils.match_response(http_res, "4XX", "*"):
271
266
  http_res_text = await utils.stream_to_text_async(http_res)
272
- raise models.SDKError(
273
- "API error occurred", http_res.status_code, http_res_text, http_res
274
- )
267
+ raise models.SDKError("API error occurred", http_res, http_res_text)
275
268
  if utils.match_response(http_res, "5XX", "*"):
276
269
  http_res_text = await utils.stream_to_text_async(http_res)
277
- raise models.SDKError(
278
- "API error occurred", http_res.status_code, http_res_text, http_res
279
- )
270
+ raise models.SDKError("API error occurred", http_res, http_res_text)
280
271
 
281
- content_type = http_res.headers.get("Content-Type")
282
272
  http_res_text = await utils.stream_to_text_async(http_res)
283
- raise models.SDKError(
284
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
285
- http_res.status_code,
286
- http_res_text,
287
- http_res,
288
- )
273
+ raise models.SDKError("Unexpected response received", http_res, http_res_text)
289
274
 
290
275
  def complete(
291
276
  self,
@@ -314,7 +299,7 @@ class Fim(BaseSDK):
314
299
 
315
300
  FIM completion.
316
301
 
317
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
302
+ :param model: ID of the model with FIM to use.
318
303
  :param prompt: The text/code to complete.
319
304
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
320
305
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -381,6 +366,7 @@ class Fim(BaseSDK):
381
366
 
382
367
  http_res = self.do_request(
383
368
  hook_ctx=HookContext(
369
+ config=self.sdk_configuration,
384
370
  base_url=base_url or "",
385
371
  operation_id="fim_completion_v1_fim_completions_post",
386
372
  oauth2_scopes=[],
@@ -393,33 +379,22 @@ class Fim(BaseSDK):
393
379
 
394
380
  response_data: Any = None
395
381
  if utils.match_response(http_res, "200", "application/json"):
396
- return utils.unmarshal_json(
397
- http_res.text, Optional[models.FIMCompletionResponse]
382
+ return unmarshal_json_response(
383
+ Optional[models.FIMCompletionResponse], http_res
398
384
  )
399
385
  if utils.match_response(http_res, "422", "application/json"):
400
- response_data = utils.unmarshal_json(
401
- http_res.text, models.HTTPValidationErrorData
386
+ response_data = unmarshal_json_response(
387
+ models.HTTPValidationErrorData, http_res
402
388
  )
403
- raise models.HTTPValidationError(data=response_data)
389
+ raise models.HTTPValidationError(response_data, http_res)
404
390
  if utils.match_response(http_res, "4XX", "*"):
405
391
  http_res_text = utils.stream_to_text(http_res)
406
- raise models.SDKError(
407
- "API error occurred", http_res.status_code, http_res_text, http_res
408
- )
392
+ raise models.SDKError("API error occurred", http_res, http_res_text)
409
393
  if utils.match_response(http_res, "5XX", "*"):
410
394
  http_res_text = utils.stream_to_text(http_res)
411
- raise models.SDKError(
412
- "API error occurred", http_res.status_code, http_res_text, http_res
413
- )
395
+ raise models.SDKError("API error occurred", http_res, http_res_text)
414
396
 
415
- content_type = http_res.headers.get("Content-Type")
416
- http_res_text = utils.stream_to_text(http_res)
417
- raise models.SDKError(
418
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
419
- http_res.status_code,
420
- http_res_text,
421
- http_res,
422
- )
397
+ raise models.SDKError("Unexpected response received", http_res)
423
398
 
424
399
  async def complete_async(
425
400
  self,
@@ -448,7 +423,7 @@ class Fim(BaseSDK):
448
423
 
449
424
  FIM completion.
450
425
 
451
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
426
+ :param model: ID of the model with FIM to use.
452
427
  :param prompt: The text/code to complete.
453
428
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
454
429
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -515,6 +490,7 @@ class Fim(BaseSDK):
515
490
 
516
491
  http_res = await self.do_request_async(
517
492
  hook_ctx=HookContext(
493
+ config=self.sdk_configuration,
518
494
  base_url=base_url or "",
519
495
  operation_id="fim_completion_v1_fim_completions_post",
520
496
  oauth2_scopes=[],
@@ -527,30 +503,19 @@ class Fim(BaseSDK):
527
503
 
528
504
  response_data: Any = None
529
505
  if utils.match_response(http_res, "200", "application/json"):
530
- return utils.unmarshal_json(
531
- http_res.text, Optional[models.FIMCompletionResponse]
506
+ return unmarshal_json_response(
507
+ Optional[models.FIMCompletionResponse], http_res
532
508
  )
533
509
  if utils.match_response(http_res, "422", "application/json"):
534
- response_data = utils.unmarshal_json(
535
- http_res.text, models.HTTPValidationErrorData
510
+ response_data = unmarshal_json_response(
511
+ models.HTTPValidationErrorData, http_res
536
512
  )
537
- raise models.HTTPValidationError(data=response_data)
513
+ raise models.HTTPValidationError(response_data, http_res)
538
514
  if utils.match_response(http_res, "4XX", "*"):
539
515
  http_res_text = await utils.stream_to_text_async(http_res)
540
- raise models.SDKError(
541
- "API error occurred", http_res.status_code, http_res_text, http_res
542
- )
516
+ raise models.SDKError("API error occurred", http_res, http_res_text)
543
517
  if utils.match_response(http_res, "5XX", "*"):
544
518
  http_res_text = await utils.stream_to_text_async(http_res)
545
- raise models.SDKError(
546
- "API error occurred", http_res.status_code, http_res_text, http_res
547
- )
519
+ raise models.SDKError("API error occurred", http_res, http_res_text)
548
520
 
549
- content_type = http_res.headers.get("Content-Type")
550
- http_res_text = await utils.stream_to_text_async(http_res)
551
- raise models.SDKError(
552
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
553
- http_res.status_code,
554
- http_res_text,
555
- http_res,
556
- )
521
+ raise models.SDKError("Unexpected response received", http_res)
@@ -2,7 +2,6 @@
2
2
 
3
3
  # pyright: reportReturnType = false
4
4
  import asyncio
5
- from concurrent.futures import ThreadPoolExecutor
6
5
  from typing_extensions import Protocol, runtime_checkable
7
6
  import httpx
8
7
  from typing import Any, Optional, Union
@@ -116,21 +115,12 @@ def close_clients(
116
115
  pass
117
116
 
118
117
  if async_client is not None and not async_client_supplied:
119
- is_async = False
120
118
  try:
121
- asyncio.get_running_loop()
122
- is_async = True
119
+ loop = asyncio.get_running_loop()
120
+ asyncio.run_coroutine_threadsafe(async_client.aclose(), loop)
123
121
  except RuntimeError:
124
- pass
125
-
126
- try:
127
- # If this function is called in an async loop then start another
128
- # loop in a separate thread to close the async http client.
129
- if is_async:
130
- with ThreadPoolExecutor(max_workers=1) as executor:
131
- future = executor.submit(asyncio.run, async_client.aclose())
132
- future.result()
133
- else:
122
+ try:
134
123
  asyncio.run(async_client.aclose())
135
- except Exception:
136
- pass
124
+ except RuntimeError:
125
+ # best effort
126
+ pass