mistralai 1.10.0__py3-none-any.whl → 1.10.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. mistralai/_hooks/tracing.py +28 -3
  2. mistralai/_version.py +2 -2
  3. mistralai/classifiers.py +13 -1
  4. mistralai/embeddings.py +7 -1
  5. mistralai/extra/README.md +1 -1
  6. mistralai/extra/mcp/auth.py +10 -11
  7. mistralai/extra/mcp/base.py +17 -16
  8. mistralai/extra/mcp/sse.py +13 -15
  9. mistralai/extra/mcp/stdio.py +5 -6
  10. mistralai/extra/observability/otel.py +47 -68
  11. mistralai/extra/run/context.py +33 -43
  12. mistralai/extra/run/result.py +29 -30
  13. mistralai/extra/run/tools.py +8 -9
  14. mistralai/extra/struct_chat.py +15 -8
  15. mistralai/extra/utils/response_format.py +5 -3
  16. mistralai/mistral_jobs.py +31 -5
  17. mistralai/models/__init__.py +30 -1
  18. mistralai/models/agents_api_v1_agents_listop.py +1 -1
  19. mistralai/models/agents_api_v1_conversations_listop.py +1 -1
  20. mistralai/models/audioencoding.py +13 -0
  21. mistralai/models/audioformat.py +19 -0
  22. mistralai/models/batchjobin.py +17 -6
  23. mistralai/models/batchjobout.py +5 -0
  24. mistralai/models/batchrequest.py +48 -0
  25. mistralai/models/classificationrequest.py +37 -3
  26. mistralai/models/embeddingrequest.py +11 -3
  27. mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +40 -3
  28. mistralai/models/toolfilechunk.py +11 -4
  29. mistralai/models/toolreferencechunk.py +13 -4
  30. {mistralai-1.10.0.dist-info → mistralai-1.10.1.dist-info}/METADATA +142 -150
  31. {mistralai-1.10.0.dist-info → mistralai-1.10.1.dist-info}/RECORD +122 -105
  32. {mistralai-1.10.0.dist-info → mistralai-1.10.1.dist-info}/WHEEL +1 -1
  33. mistralai_azure/_version.py +3 -3
  34. mistralai_azure/basesdk.py +15 -5
  35. mistralai_azure/chat.py +59 -98
  36. mistralai_azure/models/__init__.py +50 -3
  37. mistralai_azure/models/chatcompletionrequest.py +16 -4
  38. mistralai_azure/models/chatcompletionstreamrequest.py +16 -4
  39. mistralai_azure/models/httpvalidationerror.py +11 -6
  40. mistralai_azure/models/mistralazureerror.py +26 -0
  41. mistralai_azure/models/no_response_error.py +13 -0
  42. mistralai_azure/models/prediction.py +4 -0
  43. mistralai_azure/models/responseformat.py +4 -2
  44. mistralai_azure/models/responseformats.py +0 -1
  45. mistralai_azure/models/responsevalidationerror.py +25 -0
  46. mistralai_azure/models/sdkerror.py +30 -14
  47. mistralai_azure/models/systemmessage.py +7 -3
  48. mistralai_azure/models/systemmessagecontentchunks.py +21 -0
  49. mistralai_azure/models/thinkchunk.py +35 -0
  50. mistralai_azure/ocr.py +15 -36
  51. mistralai_azure/utils/__init__.py +18 -5
  52. mistralai_azure/utils/eventstreaming.py +10 -0
  53. mistralai_azure/utils/serializers.py +3 -2
  54. mistralai_azure/utils/unmarshal_json_response.py +24 -0
  55. mistralai_gcp/_hooks/types.py +7 -0
  56. mistralai_gcp/_version.py +4 -4
  57. mistralai_gcp/basesdk.py +27 -25
  58. mistralai_gcp/chat.py +75 -98
  59. mistralai_gcp/fim.py +39 -74
  60. mistralai_gcp/httpclient.py +6 -16
  61. mistralai_gcp/models/__init__.py +321 -116
  62. mistralai_gcp/models/assistantmessage.py +1 -1
  63. mistralai_gcp/models/chatcompletionrequest.py +36 -7
  64. mistralai_gcp/models/chatcompletionresponse.py +6 -6
  65. mistralai_gcp/models/chatcompletionstreamrequest.py +36 -7
  66. mistralai_gcp/models/completionresponsestreamchoice.py +1 -1
  67. mistralai_gcp/models/deltamessage.py +1 -1
  68. mistralai_gcp/models/fimcompletionrequest.py +3 -9
  69. mistralai_gcp/models/fimcompletionresponse.py +6 -6
  70. mistralai_gcp/models/fimcompletionstreamrequest.py +3 -9
  71. mistralai_gcp/models/httpvalidationerror.py +11 -6
  72. mistralai_gcp/models/imageurl.py +1 -1
  73. mistralai_gcp/models/jsonschema.py +1 -1
  74. mistralai_gcp/models/mistralgcperror.py +26 -0
  75. mistralai_gcp/models/mistralpromptmode.py +8 -0
  76. mistralai_gcp/models/no_response_error.py +13 -0
  77. mistralai_gcp/models/prediction.py +4 -0
  78. mistralai_gcp/models/responseformat.py +5 -3
  79. mistralai_gcp/models/responseformats.py +0 -1
  80. mistralai_gcp/models/responsevalidationerror.py +25 -0
  81. mistralai_gcp/models/sdkerror.py +30 -14
  82. mistralai_gcp/models/systemmessage.py +7 -3
  83. mistralai_gcp/models/systemmessagecontentchunks.py +21 -0
  84. mistralai_gcp/models/thinkchunk.py +35 -0
  85. mistralai_gcp/models/toolmessage.py +1 -1
  86. mistralai_gcp/models/usageinfo.py +71 -8
  87. mistralai_gcp/models/usermessage.py +1 -1
  88. mistralai_gcp/sdk.py +12 -10
  89. mistralai_gcp/sdkconfiguration.py +0 -7
  90. mistralai_gcp/types/basemodel.py +3 -3
  91. mistralai_gcp/utils/__init__.py +143 -45
  92. mistralai_gcp/utils/datetimes.py +23 -0
  93. mistralai_gcp/utils/enums.py +67 -27
  94. mistralai_gcp/utils/eventstreaming.py +10 -0
  95. mistralai_gcp/utils/forms.py +49 -28
  96. mistralai_gcp/utils/serializers.py +33 -3
  97. mistralai_gcp/utils/unmarshal_json_response.py +24 -0
  98. {mistralai-1.10.0.dist-info → mistralai-1.10.1.dist-info}/licenses/LICENSE +0 -0
mistralai_gcp/chat.py CHANGED
@@ -5,6 +5,7 @@ from mistralai_gcp import models, utils
5
5
  from mistralai_gcp._hooks import HookContext
6
6
  from mistralai_gcp.types import OptionalNullable, UNSET
7
7
  from mistralai_gcp.utils import eventstreaming
8
+ from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response
8
9
  from typing import Any, List, Mapping, Optional, Union
9
10
 
10
11
 
@@ -41,6 +42,7 @@ class Chat(BaseSDK):
41
42
  Union[models.Prediction, models.PredictionTypedDict]
42
43
  ] = None,
43
44
  parallel_tool_calls: Optional[bool] = None,
45
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
44
46
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
45
47
  server_url: Optional[str] = None,
46
48
  timeout_ms: Optional[int] = None,
@@ -58,14 +60,15 @@ class Chat(BaseSDK):
58
60
  :param stream:
59
61
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
60
62
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
61
- :param response_format:
62
- :param tools:
63
- :param tool_choice:
64
- :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
65
- :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
63
+ :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
64
+ :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
65
+ :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
66
+ :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
67
+ :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
66
68
  :param n: Number of completions to return for each request, input tokens are only billed once.
67
- :param prediction:
68
- :param parallel_tool_calls:
69
+ :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.
70
+ :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.
71
+ :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
69
72
  :param retries: Override the default retry configuration for this method
70
73
  :param server_url: Override the default server URL for this method
71
74
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -104,6 +107,7 @@ class Chat(BaseSDK):
104
107
  prediction, Optional[models.Prediction]
105
108
  ),
106
109
  parallel_tool_calls=parallel_tool_calls,
110
+ prompt_mode=prompt_mode,
107
111
  )
108
112
 
109
113
  req = self._build_request(
@@ -135,6 +139,7 @@ class Chat(BaseSDK):
135
139
 
136
140
  http_res = self.do_request(
137
141
  hook_ctx=HookContext(
142
+ config=self.sdk_configuration,
138
143
  base_url=base_url or "",
139
144
  operation_id="stream_chat",
140
145
  oauth2_scopes=[],
@@ -152,32 +157,23 @@ class Chat(BaseSDK):
152
157
  http_res,
153
158
  lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
154
159
  sentinel="[DONE]",
160
+ client_ref=self,
155
161
  )
156
162
  if utils.match_response(http_res, "422", "application/json"):
157
163
  http_res_text = utils.stream_to_text(http_res)
158
- response_data = utils.unmarshal_json(
159
- http_res_text, models.HTTPValidationErrorData
164
+ response_data = unmarshal_json_response(
165
+ models.HTTPValidationErrorData, http_res, http_res_text
160
166
  )
161
- raise models.HTTPValidationError(data=response_data)
167
+ raise models.HTTPValidationError(response_data, http_res, http_res_text)
162
168
  if utils.match_response(http_res, "4XX", "*"):
163
169
  http_res_text = utils.stream_to_text(http_res)
164
- raise models.SDKError(
165
- "API error occurred", http_res.status_code, http_res_text, http_res
166
- )
170
+ raise models.SDKError("API error occurred", http_res, http_res_text)
167
171
  if utils.match_response(http_res, "5XX", "*"):
168
172
  http_res_text = utils.stream_to_text(http_res)
169
- raise models.SDKError(
170
- "API error occurred", http_res.status_code, http_res_text, http_res
171
- )
173
+ raise models.SDKError("API error occurred", http_res, http_res_text)
172
174
 
173
- content_type = http_res.headers.get("Content-Type")
174
175
  http_res_text = utils.stream_to_text(http_res)
175
- raise models.SDKError(
176
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
177
- http_res.status_code,
178
- http_res_text,
179
- http_res,
180
- )
176
+ raise models.SDKError("Unexpected response received", http_res, http_res_text)
181
177
 
182
178
  async def stream_async(
183
179
  self,
@@ -209,6 +205,7 @@ class Chat(BaseSDK):
209
205
  Union[models.Prediction, models.PredictionTypedDict]
210
206
  ] = None,
211
207
  parallel_tool_calls: Optional[bool] = None,
208
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
212
209
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
213
210
  server_url: Optional[str] = None,
214
211
  timeout_ms: Optional[int] = None,
@@ -226,14 +223,15 @@ class Chat(BaseSDK):
226
223
  :param stream:
227
224
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
228
225
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
229
- :param response_format:
230
- :param tools:
231
- :param tool_choice:
232
- :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
233
- :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
226
+ :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
227
+ :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
228
+ :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
229
+ :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
230
+ :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
234
231
  :param n: Number of completions to return for each request, input tokens are only billed once.
235
- :param prediction:
236
- :param parallel_tool_calls:
232
+ :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.
233
+ :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.
234
+ :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
237
235
  :param retries: Override the default retry configuration for this method
238
236
  :param server_url: Override the default server URL for this method
239
237
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -272,6 +270,7 @@ class Chat(BaseSDK):
272
270
  prediction, Optional[models.Prediction]
273
271
  ),
274
272
  parallel_tool_calls=parallel_tool_calls,
273
+ prompt_mode=prompt_mode,
275
274
  )
276
275
 
277
276
  req = self._build_request_async(
@@ -303,6 +302,7 @@ class Chat(BaseSDK):
303
302
 
304
303
  http_res = await self.do_request_async(
305
304
  hook_ctx=HookContext(
305
+ config=self.sdk_configuration,
306
306
  base_url=base_url or "",
307
307
  operation_id="stream_chat",
308
308
  oauth2_scopes=[],
@@ -320,32 +320,23 @@ class Chat(BaseSDK):
320
320
  http_res,
321
321
  lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
322
322
  sentinel="[DONE]",
323
+ client_ref=self,
323
324
  )
324
325
  if utils.match_response(http_res, "422", "application/json"):
325
326
  http_res_text = await utils.stream_to_text_async(http_res)
326
- response_data = utils.unmarshal_json(
327
- http_res_text, models.HTTPValidationErrorData
327
+ response_data = unmarshal_json_response(
328
+ models.HTTPValidationErrorData, http_res, http_res_text
328
329
  )
329
- raise models.HTTPValidationError(data=response_data)
330
+ raise models.HTTPValidationError(response_data, http_res, http_res_text)
330
331
  if utils.match_response(http_res, "4XX", "*"):
331
332
  http_res_text = await utils.stream_to_text_async(http_res)
332
- raise models.SDKError(
333
- "API error occurred", http_res.status_code, http_res_text, http_res
334
- )
333
+ raise models.SDKError("API error occurred", http_res, http_res_text)
335
334
  if utils.match_response(http_res, "5XX", "*"):
336
335
  http_res_text = await utils.stream_to_text_async(http_res)
337
- raise models.SDKError(
338
- "API error occurred", http_res.status_code, http_res_text, http_res
339
- )
336
+ raise models.SDKError("API error occurred", http_res, http_res_text)
340
337
 
341
- content_type = http_res.headers.get("Content-Type")
342
338
  http_res_text = await utils.stream_to_text_async(http_res)
343
- raise models.SDKError(
344
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
345
- http_res.status_code,
346
- http_res_text,
347
- http_res,
348
- )
339
+ raise models.SDKError("Unexpected response received", http_res, http_res_text)
349
340
 
350
341
  def complete(
351
342
  self,
@@ -385,6 +376,7 @@ class Chat(BaseSDK):
385
376
  Union[models.Prediction, models.PredictionTypedDict]
386
377
  ] = None,
387
378
  parallel_tool_calls: Optional[bool] = None,
379
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
388
380
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
389
381
  server_url: Optional[str] = None,
390
382
  timeout_ms: Optional[int] = None,
@@ -400,14 +392,15 @@ class Chat(BaseSDK):
400
392
  :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
401
393
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
402
394
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
403
- :param response_format:
404
- :param tools:
405
- :param tool_choice:
406
- :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
407
- :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
395
+ :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
396
+ :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
397
+ :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
398
+ :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
399
+ :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
408
400
  :param n: Number of completions to return for each request, input tokens are only billed once.
409
- :param prediction:
410
- :param parallel_tool_calls:
401
+ :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.
402
+ :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.
403
+ :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
411
404
  :param retries: Override the default retry configuration for this method
412
405
  :param server_url: Override the default server URL for this method
413
406
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -448,6 +441,7 @@ class Chat(BaseSDK):
448
441
  prediction, Optional[models.Prediction]
449
442
  ),
450
443
  parallel_tool_calls=parallel_tool_calls,
444
+ prompt_mode=prompt_mode,
451
445
  )
452
446
 
453
447
  req = self._build_request(
@@ -479,6 +473,7 @@ class Chat(BaseSDK):
479
473
 
480
474
  http_res = self.do_request(
481
475
  hook_ctx=HookContext(
476
+ config=self.sdk_configuration,
482
477
  base_url=base_url or "",
483
478
  operation_id="chat_completion_v1_chat_completions_post",
484
479
  oauth2_scopes=[],
@@ -491,33 +486,22 @@ class Chat(BaseSDK):
491
486
 
492
487
  response_data: Any = None
493
488
  if utils.match_response(http_res, "200", "application/json"):
494
- return utils.unmarshal_json(
495
- http_res.text, Optional[models.ChatCompletionResponse]
489
+ return unmarshal_json_response(
490
+ Optional[models.ChatCompletionResponse], http_res
496
491
  )
497
492
  if utils.match_response(http_res, "422", "application/json"):
498
- response_data = utils.unmarshal_json(
499
- http_res.text, models.HTTPValidationErrorData
493
+ response_data = unmarshal_json_response(
494
+ models.HTTPValidationErrorData, http_res
500
495
  )
501
- raise models.HTTPValidationError(data=response_data)
496
+ raise models.HTTPValidationError(response_data, http_res)
502
497
  if utils.match_response(http_res, "4XX", "*"):
503
498
  http_res_text = utils.stream_to_text(http_res)
504
- raise models.SDKError(
505
- "API error occurred", http_res.status_code, http_res_text, http_res
506
- )
499
+ raise models.SDKError("API error occurred", http_res, http_res_text)
507
500
  if utils.match_response(http_res, "5XX", "*"):
508
501
  http_res_text = utils.stream_to_text(http_res)
509
- raise models.SDKError(
510
- "API error occurred", http_res.status_code, http_res_text, http_res
511
- )
502
+ raise models.SDKError("API error occurred", http_res, http_res_text)
512
503
 
513
- content_type = http_res.headers.get("Content-Type")
514
- http_res_text = utils.stream_to_text(http_res)
515
- raise models.SDKError(
516
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
517
- http_res.status_code,
518
- http_res_text,
519
- http_res,
520
- )
504
+ raise models.SDKError("Unexpected response received", http_res)
521
505
 
522
506
  async def complete_async(
523
507
  self,
@@ -557,6 +541,7 @@ class Chat(BaseSDK):
557
541
  Union[models.Prediction, models.PredictionTypedDict]
558
542
  ] = None,
559
543
  parallel_tool_calls: Optional[bool] = None,
544
+ prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
560
545
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
561
546
  server_url: Optional[str] = None,
562
547
  timeout_ms: Optional[int] = None,
@@ -572,14 +557,15 @@ class Chat(BaseSDK):
572
557
  :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
573
558
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
574
559
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
575
- :param response_format:
576
- :param tools:
577
- :param tool_choice:
578
- :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
579
- :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
560
+ :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
561
+ :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
562
+ :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
563
+ :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.
564
+ :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
580
565
  :param n: Number of completions to return for each request, input tokens are only billed once.
581
- :param prediction:
582
- :param parallel_tool_calls:
566
+ :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.
567
+ :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.
568
+ :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.
583
569
  :param retries: Override the default retry configuration for this method
584
570
  :param server_url: Override the default server URL for this method
585
571
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -620,6 +606,7 @@ class Chat(BaseSDK):
620
606
  prediction, Optional[models.Prediction]
621
607
  ),
622
608
  parallel_tool_calls=parallel_tool_calls,
609
+ prompt_mode=prompt_mode,
623
610
  )
624
611
 
625
612
  req = self._build_request_async(
@@ -651,6 +638,7 @@ class Chat(BaseSDK):
651
638
 
652
639
  http_res = await self.do_request_async(
653
640
  hook_ctx=HookContext(
641
+ config=self.sdk_configuration,
654
642
  base_url=base_url or "",
655
643
  operation_id="chat_completion_v1_chat_completions_post",
656
644
  oauth2_scopes=[],
@@ -663,30 +651,19 @@ class Chat(BaseSDK):
663
651
 
664
652
  response_data: Any = None
665
653
  if utils.match_response(http_res, "200", "application/json"):
666
- return utils.unmarshal_json(
667
- http_res.text, Optional[models.ChatCompletionResponse]
654
+ return unmarshal_json_response(
655
+ Optional[models.ChatCompletionResponse], http_res
668
656
  )
669
657
  if utils.match_response(http_res, "422", "application/json"):
670
- response_data = utils.unmarshal_json(
671
- http_res.text, models.HTTPValidationErrorData
658
+ response_data = unmarshal_json_response(
659
+ models.HTTPValidationErrorData, http_res
672
660
  )
673
- raise models.HTTPValidationError(data=response_data)
661
+ raise models.HTTPValidationError(response_data, http_res)
674
662
  if utils.match_response(http_res, "4XX", "*"):
675
663
  http_res_text = await utils.stream_to_text_async(http_res)
676
- raise models.SDKError(
677
- "API error occurred", http_res.status_code, http_res_text, http_res
678
- )
664
+ raise models.SDKError("API error occurred", http_res, http_res_text)
679
665
  if utils.match_response(http_res, "5XX", "*"):
680
666
  http_res_text = await utils.stream_to_text_async(http_res)
681
- raise models.SDKError(
682
- "API error occurred", http_res.status_code, http_res_text, http_res
683
- )
667
+ raise models.SDKError("API error occurred", http_res, http_res_text)
684
668
 
685
- content_type = http_res.headers.get("Content-Type")
686
- http_res_text = await utils.stream_to_text_async(http_res)
687
- raise models.SDKError(
688
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
689
- http_res.status_code,
690
- http_res_text,
691
- http_res,
692
- )
669
+ raise models.SDKError("Unexpected response received", http_res)
mistralai_gcp/fim.py CHANGED
@@ -5,6 +5,7 @@ from mistralai_gcp import models, utils
5
5
  from mistralai_gcp._hooks import HookContext
6
6
  from mistralai_gcp.types import OptionalNullable, UNSET
7
7
  from mistralai_gcp.utils import eventstreaming
8
+ from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response
8
9
  from typing import Any, Mapping, Optional, Union
9
10
 
10
11
 
@@ -38,7 +39,7 @@ class Fim(BaseSDK):
38
39
 
39
40
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
40
41
 
41
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
42
+ :param model: ID of the model with FIM to use.
42
43
  :param prompt: The text/code to complete.
43
44
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
44
45
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -105,6 +106,7 @@ class Fim(BaseSDK):
105
106
 
106
107
  http_res = self.do_request(
107
108
  hook_ctx=HookContext(
109
+ config=self.sdk_configuration,
108
110
  base_url=base_url or "",
109
111
  operation_id="stream_fim",
110
112
  oauth2_scopes=[],
@@ -122,32 +124,23 @@ class Fim(BaseSDK):
122
124
  http_res,
123
125
  lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
124
126
  sentinel="[DONE]",
127
+ client_ref=self,
125
128
  )
126
129
  if utils.match_response(http_res, "422", "application/json"):
127
130
  http_res_text = utils.stream_to_text(http_res)
128
- response_data = utils.unmarshal_json(
129
- http_res_text, models.HTTPValidationErrorData
131
+ response_data = unmarshal_json_response(
132
+ models.HTTPValidationErrorData, http_res, http_res_text
130
133
  )
131
- raise models.HTTPValidationError(data=response_data)
134
+ raise models.HTTPValidationError(response_data, http_res, http_res_text)
132
135
  if utils.match_response(http_res, "4XX", "*"):
133
136
  http_res_text = utils.stream_to_text(http_res)
134
- raise models.SDKError(
135
- "API error occurred", http_res.status_code, http_res_text, http_res
136
- )
137
+ raise models.SDKError("API error occurred", http_res, http_res_text)
137
138
  if utils.match_response(http_res, "5XX", "*"):
138
139
  http_res_text = utils.stream_to_text(http_res)
139
- raise models.SDKError(
140
- "API error occurred", http_res.status_code, http_res_text, http_res
141
- )
140
+ raise models.SDKError("API error occurred", http_res, http_res_text)
142
141
 
143
- content_type = http_res.headers.get("Content-Type")
144
142
  http_res_text = utils.stream_to_text(http_res)
145
- raise models.SDKError(
146
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
147
- http_res.status_code,
148
- http_res_text,
149
- http_res,
150
- )
143
+ raise models.SDKError("Unexpected response received", http_res, http_res_text)
151
144
 
152
145
  async def stream_async(
153
146
  self,
@@ -176,7 +169,7 @@ class Fim(BaseSDK):
176
169
 
177
170
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
178
171
 
179
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
172
+ :param model: ID of the model with FIM to use.
180
173
  :param prompt: The text/code to complete.
181
174
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
182
175
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -243,6 +236,7 @@ class Fim(BaseSDK):
243
236
 
244
237
  http_res = await self.do_request_async(
245
238
  hook_ctx=HookContext(
239
+ config=self.sdk_configuration,
246
240
  base_url=base_url or "",
247
241
  operation_id="stream_fim",
248
242
  oauth2_scopes=[],
@@ -260,32 +254,23 @@ class Fim(BaseSDK):
260
254
  http_res,
261
255
  lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
262
256
  sentinel="[DONE]",
257
+ client_ref=self,
263
258
  )
264
259
  if utils.match_response(http_res, "422", "application/json"):
265
260
  http_res_text = await utils.stream_to_text_async(http_res)
266
- response_data = utils.unmarshal_json(
267
- http_res_text, models.HTTPValidationErrorData
261
+ response_data = unmarshal_json_response(
262
+ models.HTTPValidationErrorData, http_res, http_res_text
268
263
  )
269
- raise models.HTTPValidationError(data=response_data)
264
+ raise models.HTTPValidationError(response_data, http_res, http_res_text)
270
265
  if utils.match_response(http_res, "4XX", "*"):
271
266
  http_res_text = await utils.stream_to_text_async(http_res)
272
- raise models.SDKError(
273
- "API error occurred", http_res.status_code, http_res_text, http_res
274
- )
267
+ raise models.SDKError("API error occurred", http_res, http_res_text)
275
268
  if utils.match_response(http_res, "5XX", "*"):
276
269
  http_res_text = await utils.stream_to_text_async(http_res)
277
- raise models.SDKError(
278
- "API error occurred", http_res.status_code, http_res_text, http_res
279
- )
270
+ raise models.SDKError("API error occurred", http_res, http_res_text)
280
271
 
281
- content_type = http_res.headers.get("Content-Type")
282
272
  http_res_text = await utils.stream_to_text_async(http_res)
283
- raise models.SDKError(
284
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
285
- http_res.status_code,
286
- http_res_text,
287
- http_res,
288
- )
273
+ raise models.SDKError("Unexpected response received", http_res, http_res_text)
289
274
 
290
275
  def complete(
291
276
  self,
@@ -314,7 +299,7 @@ class Fim(BaseSDK):
314
299
 
315
300
  FIM completion.
316
301
 
317
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
302
+ :param model: ID of the model with FIM to use.
318
303
  :param prompt: The text/code to complete.
319
304
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
320
305
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -381,6 +366,7 @@ class Fim(BaseSDK):
381
366
 
382
367
  http_res = self.do_request(
383
368
  hook_ctx=HookContext(
369
+ config=self.sdk_configuration,
384
370
  base_url=base_url or "",
385
371
  operation_id="fim_completion_v1_fim_completions_post",
386
372
  oauth2_scopes=[],
@@ -393,33 +379,22 @@ class Fim(BaseSDK):
393
379
 
394
380
  response_data: Any = None
395
381
  if utils.match_response(http_res, "200", "application/json"):
396
- return utils.unmarshal_json(
397
- http_res.text, Optional[models.FIMCompletionResponse]
382
+ return unmarshal_json_response(
383
+ Optional[models.FIMCompletionResponse], http_res
398
384
  )
399
385
  if utils.match_response(http_res, "422", "application/json"):
400
- response_data = utils.unmarshal_json(
401
- http_res.text, models.HTTPValidationErrorData
386
+ response_data = unmarshal_json_response(
387
+ models.HTTPValidationErrorData, http_res
402
388
  )
403
- raise models.HTTPValidationError(data=response_data)
389
+ raise models.HTTPValidationError(response_data, http_res)
404
390
  if utils.match_response(http_res, "4XX", "*"):
405
391
  http_res_text = utils.stream_to_text(http_res)
406
- raise models.SDKError(
407
- "API error occurred", http_res.status_code, http_res_text, http_res
408
- )
392
+ raise models.SDKError("API error occurred", http_res, http_res_text)
409
393
  if utils.match_response(http_res, "5XX", "*"):
410
394
  http_res_text = utils.stream_to_text(http_res)
411
- raise models.SDKError(
412
- "API error occurred", http_res.status_code, http_res_text, http_res
413
- )
395
+ raise models.SDKError("API error occurred", http_res, http_res_text)
414
396
 
415
- content_type = http_res.headers.get("Content-Type")
416
- http_res_text = utils.stream_to_text(http_res)
417
- raise models.SDKError(
418
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
419
- http_res.status_code,
420
- http_res_text,
421
- http_res,
422
- )
397
+ raise models.SDKError("Unexpected response received", http_res)
423
398
 
424
399
  async def complete_async(
425
400
  self,
@@ -448,7 +423,7 @@ class Fim(BaseSDK):
448
423
 
449
424
  FIM completion.
450
425
 
451
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
426
+ :param model: ID of the model with FIM to use.
452
427
  :param prompt: The text/code to complete.
453
428
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
454
429
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -515,6 +490,7 @@ class Fim(BaseSDK):
515
490
 
516
491
  http_res = await self.do_request_async(
517
492
  hook_ctx=HookContext(
493
+ config=self.sdk_configuration,
518
494
  base_url=base_url or "",
519
495
  operation_id="fim_completion_v1_fim_completions_post",
520
496
  oauth2_scopes=[],
@@ -527,30 +503,19 @@ class Fim(BaseSDK):
527
503
 
528
504
  response_data: Any = None
529
505
  if utils.match_response(http_res, "200", "application/json"):
530
- return utils.unmarshal_json(
531
- http_res.text, Optional[models.FIMCompletionResponse]
506
+ return unmarshal_json_response(
507
+ Optional[models.FIMCompletionResponse], http_res
532
508
  )
533
509
  if utils.match_response(http_res, "422", "application/json"):
534
- response_data = utils.unmarshal_json(
535
- http_res.text, models.HTTPValidationErrorData
510
+ response_data = unmarshal_json_response(
511
+ models.HTTPValidationErrorData, http_res
536
512
  )
537
- raise models.HTTPValidationError(data=response_data)
513
+ raise models.HTTPValidationError(response_data, http_res)
538
514
  if utils.match_response(http_res, "4XX", "*"):
539
515
  http_res_text = await utils.stream_to_text_async(http_res)
540
- raise models.SDKError(
541
- "API error occurred", http_res.status_code, http_res_text, http_res
542
- )
516
+ raise models.SDKError("API error occurred", http_res, http_res_text)
543
517
  if utils.match_response(http_res, "5XX", "*"):
544
518
  http_res_text = await utils.stream_to_text_async(http_res)
545
- raise models.SDKError(
546
- "API error occurred", http_res.status_code, http_res_text, http_res
547
- )
519
+ raise models.SDKError("API error occurred", http_res, http_res_text)
548
520
 
549
- content_type = http_res.headers.get("Content-Type")
550
- http_res_text = await utils.stream_to_text_async(http_res)
551
- raise models.SDKError(
552
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
553
- http_res.status_code,
554
- http_res_text,
555
- http_res,
556
- )
521
+ raise models.SDKError("Unexpected response received", http_res)
@@ -2,7 +2,6 @@
2
2
 
3
3
  # pyright: reportReturnType = false
4
4
  import asyncio
5
- from concurrent.futures import ThreadPoolExecutor
6
5
  from typing_extensions import Protocol, runtime_checkable
7
6
  import httpx
8
7
  from typing import Any, Optional, Union
@@ -116,21 +115,12 @@ def close_clients(
116
115
  pass
117
116
 
118
117
  if async_client is not None and not async_client_supplied:
119
- is_async = False
120
118
  try:
121
- asyncio.get_running_loop()
122
- is_async = True
119
+ loop = asyncio.get_running_loop()
120
+ asyncio.run_coroutine_threadsafe(async_client.aclose(), loop)
123
121
  except RuntimeError:
124
- pass
125
-
126
- try:
127
- # If this function is called in an async loop then start another
128
- # loop in a separate thread to close the async http client.
129
- if is_async:
130
- with ThreadPoolExecutor(max_workers=1) as executor:
131
- future = executor.submit(asyncio.run, async_client.aclose())
132
- future.result()
133
- else:
122
+ try:
134
123
  asyncio.run(async_client.aclose())
135
- except Exception:
136
- pass
124
+ except RuntimeError:
125
+ # best effort
126
+ pass