mistralai 1.10.1__py3-none-any.whl → 1.11.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (233) hide show
  1. mistralai/_version.py +3 -3
  2. mistralai/accesses.py +22 -12
  3. mistralai/agents.py +88 -44
  4. mistralai/basesdk.py +6 -0
  5. mistralai/chat.py +96 -40
  6. mistralai/classifiers.py +35 -22
  7. mistralai/conversations.py +186 -64
  8. mistralai/documents.py +72 -26
  9. mistralai/embeddings.py +17 -8
  10. mistralai/files.py +58 -24
  11. mistralai/fim.py +20 -12
  12. mistralai/httpclient.py +0 -1
  13. mistralai/jobs.py +65 -26
  14. mistralai/libraries.py +20 -10
  15. mistralai/mistral_agents.py +438 -30
  16. mistralai/mistral_jobs.py +33 -14
  17. mistralai/models/__init__.py +16 -0
  18. mistralai/models/agent.py +1 -1
  19. mistralai/models/agentconversation.py +1 -1
  20. mistralai/models/agenthandoffdoneevent.py +1 -1
  21. mistralai/models/agenthandoffentry.py +3 -2
  22. mistralai/models/agenthandoffstartedevent.py +1 -1
  23. mistralai/models/agents_api_v1_agents_get_versionop.py +21 -0
  24. mistralai/models/agents_api_v1_agents_list_versionsop.py +33 -0
  25. mistralai/models/agents_api_v1_agents_listop.py +4 -0
  26. mistralai/models/agentscompletionrequest.py +2 -5
  27. mistralai/models/agentscompletionstreamrequest.py +2 -5
  28. mistralai/models/archiveftmodelout.py +1 -1
  29. mistralai/models/assistantmessage.py +1 -1
  30. mistralai/models/audiochunk.py +1 -1
  31. mistralai/models/audioencoding.py +6 -1
  32. mistralai/models/audioformat.py +2 -4
  33. mistralai/models/basemodelcard.py +1 -1
  34. mistralai/models/batchjobin.py +2 -4
  35. mistralai/models/batchjobout.py +1 -1
  36. mistralai/models/batchjobsout.py +1 -1
  37. mistralai/models/chatcompletionchoice.py +10 -5
  38. mistralai/models/chatcompletionrequest.py +2 -5
  39. mistralai/models/chatcompletionstreamrequest.py +2 -5
  40. mistralai/models/classifierdetailedjobout.py +4 -2
  41. mistralai/models/classifierftmodelout.py +3 -2
  42. mistralai/models/classifierjobout.py +4 -2
  43. mistralai/models/codeinterpretertool.py +1 -1
  44. mistralai/models/completiondetailedjobout.py +5 -2
  45. mistralai/models/completionftmodelout.py +3 -2
  46. mistralai/models/completionjobout.py +5 -2
  47. mistralai/models/completionresponsestreamchoice.py +9 -8
  48. mistralai/models/conversationappendrequest.py +4 -1
  49. mistralai/models/conversationappendstreamrequest.py +4 -1
  50. mistralai/models/conversationhistory.py +2 -1
  51. mistralai/models/conversationmessages.py +1 -1
  52. mistralai/models/conversationrequest.py +5 -1
  53. mistralai/models/conversationresponse.py +2 -1
  54. mistralai/models/conversationrestartrequest.py +4 -1
  55. mistralai/models/conversationrestartstreamrequest.py +4 -1
  56. mistralai/models/conversationstreamrequest.py +5 -1
  57. mistralai/models/documentlibrarytool.py +1 -1
  58. mistralai/models/documenturlchunk.py +1 -1
  59. mistralai/models/embeddingdtype.py +7 -1
  60. mistralai/models/encodingformat.py +4 -1
  61. mistralai/models/entitytype.py +8 -1
  62. mistralai/models/filepurpose.py +8 -1
  63. mistralai/models/files_api_routes_list_filesop.py +4 -11
  64. mistralai/models/files_api_routes_upload_fileop.py +2 -6
  65. mistralai/models/fileschema.py +3 -5
  66. mistralai/models/finetuneablemodeltype.py +4 -1
  67. mistralai/models/ftclassifierlossfunction.py +4 -1
  68. mistralai/models/ftmodelcard.py +1 -1
  69. mistralai/models/functioncallentry.py +3 -2
  70. mistralai/models/functioncallevent.py +1 -1
  71. mistralai/models/functionresultentry.py +3 -2
  72. mistralai/models/functiontool.py +1 -1
  73. mistralai/models/githubrepositoryin.py +1 -1
  74. mistralai/models/githubrepositoryout.py +1 -1
  75. mistralai/models/httpvalidationerror.py +4 -2
  76. mistralai/models/imagegenerationtool.py +1 -1
  77. mistralai/models/imageurlchunk.py +1 -1
  78. mistralai/models/jobsout.py +1 -1
  79. mistralai/models/legacyjobmetadataout.py +1 -1
  80. mistralai/models/messageinputentry.py +9 -3
  81. mistralai/models/messageoutputentry.py +6 -3
  82. mistralai/models/messageoutputevent.py +4 -2
  83. mistralai/models/mistralerror.py +11 -7
  84. mistralai/models/mistralpromptmode.py +1 -1
  85. mistralai/models/modelconversation.py +1 -1
  86. mistralai/models/no_response_error.py +5 -1
  87. mistralai/models/ocrrequest.py +11 -1
  88. mistralai/models/ocrtableobject.py +4 -1
  89. mistralai/models/referencechunk.py +1 -1
  90. mistralai/models/requestsource.py +5 -1
  91. mistralai/models/responsedoneevent.py +1 -1
  92. mistralai/models/responseerrorevent.py +1 -1
  93. mistralai/models/responseformats.py +5 -1
  94. mistralai/models/responsestartedevent.py +1 -1
  95. mistralai/models/responsevalidationerror.py +2 -0
  96. mistralai/models/retrievefileout.py +3 -5
  97. mistralai/models/sampletype.py +7 -1
  98. mistralai/models/sdkerror.py +2 -0
  99. mistralai/models/shareenum.py +7 -1
  100. mistralai/models/sharingdelete.py +2 -4
  101. mistralai/models/sharingin.py +3 -5
  102. mistralai/models/source.py +8 -1
  103. mistralai/models/systemmessage.py +1 -1
  104. mistralai/models/textchunk.py +1 -1
  105. mistralai/models/thinkchunk.py +1 -1
  106. mistralai/models/timestampgranularity.py +1 -1
  107. mistralai/models/tool.py +2 -6
  108. mistralai/models/toolcall.py +2 -6
  109. mistralai/models/toolchoice.py +2 -6
  110. mistralai/models/toolchoiceenum.py +6 -1
  111. mistralai/models/toolexecutiondeltaevent.py +2 -1
  112. mistralai/models/toolexecutiondoneevent.py +2 -1
  113. mistralai/models/toolexecutionentry.py +4 -2
  114. mistralai/models/toolexecutionstartedevent.py +2 -1
  115. mistralai/models/toolfilechunk.py +2 -1
  116. mistralai/models/toolmessage.py +1 -1
  117. mistralai/models/toolreferencechunk.py +2 -1
  118. mistralai/models/tooltypes.py +1 -1
  119. mistralai/models/transcriptionsegmentchunk.py +1 -1
  120. mistralai/models/transcriptionstreamdone.py +1 -1
  121. mistralai/models/transcriptionstreamlanguage.py +1 -1
  122. mistralai/models/transcriptionstreamsegmentdelta.py +1 -1
  123. mistralai/models/transcriptionstreamtextdelta.py +1 -1
  124. mistralai/models/unarchiveftmodelout.py +1 -1
  125. mistralai/models/uploadfileout.py +3 -5
  126. mistralai/models/usermessage.py +1 -1
  127. mistralai/models/wandbintegration.py +1 -1
  128. mistralai/models/wandbintegrationout.py +1 -1
  129. mistralai/models/websearchpremiumtool.py +1 -1
  130. mistralai/models/websearchtool.py +1 -1
  131. mistralai/models_.py +24 -12
  132. mistralai/ocr.py +38 -10
  133. mistralai/sdk.py +2 -2
  134. mistralai/transcriptions.py +28 -12
  135. mistralai/types/basemodel.py +41 -3
  136. mistralai/utils/__init__.py +0 -3
  137. mistralai/utils/annotations.py +32 -8
  138. mistralai/utils/enums.py +60 -0
  139. mistralai/utils/forms.py +21 -10
  140. mistralai/utils/queryparams.py +14 -2
  141. mistralai/utils/requestbodies.py +3 -3
  142. mistralai/utils/retries.py +69 -5
  143. mistralai/utils/serializers.py +0 -20
  144. mistralai/utils/unmarshal_json_response.py +15 -1
  145. {mistralai-1.10.1.dist-info → mistralai-1.11.1.dist-info}/METADATA +24 -31
  146. {mistralai-1.10.1.dist-info → mistralai-1.11.1.dist-info}/RECORD +233 -230
  147. mistralai_azure/_version.py +3 -3
  148. mistralai_azure/basesdk.py +6 -0
  149. mistralai_azure/chat.py +27 -15
  150. mistralai_azure/httpclient.py +0 -1
  151. mistralai_azure/models/__init__.py +16 -1
  152. mistralai_azure/models/assistantmessage.py +1 -1
  153. mistralai_azure/models/chatcompletionchoice.py +10 -7
  154. mistralai_azure/models/chatcompletionrequest.py +8 -6
  155. mistralai_azure/models/chatcompletionstreamrequest.py +8 -6
  156. mistralai_azure/models/completionresponsestreamchoice.py +11 -7
  157. mistralai_azure/models/documenturlchunk.py +1 -1
  158. mistralai_azure/models/httpvalidationerror.py +4 -2
  159. mistralai_azure/models/imageurlchunk.py +1 -1
  160. mistralai_azure/models/mistralazureerror.py +11 -7
  161. mistralai_azure/models/mistralpromptmode.py +1 -1
  162. mistralai_azure/models/no_response_error.py +5 -1
  163. mistralai_azure/models/ocrpageobject.py +32 -5
  164. mistralai_azure/models/ocrrequest.py +20 -1
  165. mistralai_azure/models/ocrtableobject.py +34 -0
  166. mistralai_azure/models/referencechunk.py +1 -1
  167. mistralai_azure/models/responseformats.py +5 -1
  168. mistralai_azure/models/responsevalidationerror.py +2 -0
  169. mistralai_azure/models/sdkerror.py +2 -0
  170. mistralai_azure/models/systemmessage.py +1 -1
  171. mistralai_azure/models/textchunk.py +1 -1
  172. mistralai_azure/models/thinkchunk.py +1 -1
  173. mistralai_azure/models/tool.py +2 -6
  174. mistralai_azure/models/toolcall.py +2 -6
  175. mistralai_azure/models/toolchoice.py +2 -6
  176. mistralai_azure/models/toolchoiceenum.py +6 -1
  177. mistralai_azure/models/toolmessage.py +1 -1
  178. mistralai_azure/models/tooltypes.py +1 -1
  179. mistralai_azure/models/usermessage.py +1 -1
  180. mistralai_azure/ocr.py +26 -6
  181. mistralai_azure/types/basemodel.py +41 -3
  182. mistralai_azure/utils/__init__.py +0 -3
  183. mistralai_azure/utils/annotations.py +32 -8
  184. mistralai_azure/utils/enums.py +60 -0
  185. mistralai_azure/utils/forms.py +21 -10
  186. mistralai_azure/utils/queryparams.py +14 -2
  187. mistralai_azure/utils/requestbodies.py +3 -3
  188. mistralai_azure/utils/retries.py +69 -5
  189. mistralai_azure/utils/serializers.py +0 -20
  190. mistralai_azure/utils/unmarshal_json_response.py +15 -1
  191. mistralai_gcp/_version.py +3 -3
  192. mistralai_gcp/basesdk.py +6 -0
  193. mistralai_gcp/chat.py +27 -15
  194. mistralai_gcp/fim.py +27 -15
  195. mistralai_gcp/httpclient.py +0 -1
  196. mistralai_gcp/models/assistantmessage.py +1 -1
  197. mistralai_gcp/models/chatcompletionchoice.py +10 -7
  198. mistralai_gcp/models/chatcompletionrequest.py +8 -6
  199. mistralai_gcp/models/chatcompletionstreamrequest.py +8 -6
  200. mistralai_gcp/models/completionresponsestreamchoice.py +11 -7
  201. mistralai_gcp/models/fimcompletionrequest.py +6 -1
  202. mistralai_gcp/models/fimcompletionstreamrequest.py +6 -1
  203. mistralai_gcp/models/httpvalidationerror.py +4 -2
  204. mistralai_gcp/models/imageurlchunk.py +1 -1
  205. mistralai_gcp/models/mistralgcperror.py +11 -7
  206. mistralai_gcp/models/mistralpromptmode.py +1 -1
  207. mistralai_gcp/models/no_response_error.py +5 -1
  208. mistralai_gcp/models/referencechunk.py +1 -1
  209. mistralai_gcp/models/responseformats.py +5 -1
  210. mistralai_gcp/models/responsevalidationerror.py +2 -0
  211. mistralai_gcp/models/sdkerror.py +2 -0
  212. mistralai_gcp/models/systemmessage.py +1 -1
  213. mistralai_gcp/models/textchunk.py +1 -1
  214. mistralai_gcp/models/thinkchunk.py +1 -1
  215. mistralai_gcp/models/tool.py +2 -6
  216. mistralai_gcp/models/toolcall.py +2 -6
  217. mistralai_gcp/models/toolchoice.py +2 -6
  218. mistralai_gcp/models/toolchoiceenum.py +6 -1
  219. mistralai_gcp/models/toolmessage.py +1 -1
  220. mistralai_gcp/models/tooltypes.py +1 -1
  221. mistralai_gcp/models/usermessage.py +1 -1
  222. mistralai_gcp/types/basemodel.py +41 -3
  223. mistralai_gcp/utils/__init__.py +0 -3
  224. mistralai_gcp/utils/annotations.py +32 -8
  225. mistralai_gcp/utils/enums.py +60 -0
  226. mistralai_gcp/utils/forms.py +21 -10
  227. mistralai_gcp/utils/queryparams.py +14 -2
  228. mistralai_gcp/utils/requestbodies.py +3 -3
  229. mistralai_gcp/utils/retries.py +69 -5
  230. mistralai_gcp/utils/serializers.py +0 -20
  231. mistralai_gcp/utils/unmarshal_json_response.py +15 -1
  232. {mistralai-1.10.1.dist-info → mistralai-1.11.1.dist-info}/WHEEL +0 -0
  233. {mistralai-1.10.1.dist-info → mistralai-1.11.1.dist-info}/licenses/LICENSE +0 -0
mistralai_gcp/fim.py CHANGED
@@ -6,7 +6,7 @@ from mistralai_gcp._hooks import HookContext
6
6
  from mistralai_gcp.types import OptionalNullable, UNSET
7
7
  from mistralai_gcp.utils import eventstreaming
8
8
  from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response
9
- from typing import Any, Mapping, Optional, Union
9
+ from typing import Any, Dict, Mapping, Optional, Union
10
10
 
11
11
 
12
12
  class Fim(BaseSDK):
@@ -28,13 +28,14 @@ class Fim(BaseSDK):
28
28
  ]
29
29
  ] = None,
30
30
  random_seed: OptionalNullable[int] = UNSET,
31
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
31
32
  suffix: OptionalNullable[str] = UNSET,
32
33
  min_tokens: OptionalNullable[int] = UNSET,
33
34
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
34
35
  server_url: Optional[str] = None,
35
36
  timeout_ms: Optional[int] = None,
36
37
  http_headers: Optional[Mapping[str, str]] = None,
37
- ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]:
38
+ ) -> eventstreaming.EventStream[models.CompletionEvent]:
38
39
  r"""Stream fim completion
39
40
 
40
41
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
@@ -47,6 +48,7 @@ class Fim(BaseSDK):
47
48
  :param stream:
48
49
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
49
50
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
51
+ :param metadata:
50
52
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
51
53
  :param min_tokens: The minimum number of tokens to generate in the completion.
52
54
  :param retries: Override the default retry configuration for this method
@@ -72,6 +74,7 @@ class Fim(BaseSDK):
72
74
  stream=stream,
73
75
  stop=stop,
74
76
  random_seed=random_seed,
77
+ metadata=metadata,
75
78
  prompt=prompt,
76
79
  suffix=suffix,
77
80
  min_tokens=min_tokens,
@@ -93,6 +96,7 @@ class Fim(BaseSDK):
93
96
  get_serialized_body=lambda: utils.serialize_request_body(
94
97
  request, False, False, "json", models.FIMCompletionStreamRequest
95
98
  ),
99
+ allow_empty_value=None,
96
100
  timeout_ms=timeout_ms,
97
101
  )
98
102
 
@@ -109,7 +113,7 @@ class Fim(BaseSDK):
109
113
  config=self.sdk_configuration,
110
114
  base_url=base_url or "",
111
115
  operation_id="stream_fim",
112
- oauth2_scopes=[],
116
+ oauth2_scopes=None,
113
117
  security_source=self.sdk_configuration.security,
114
118
  ),
115
119
  request=req,
@@ -158,13 +162,14 @@ class Fim(BaseSDK):
158
162
  ]
159
163
  ] = None,
160
164
  random_seed: OptionalNullable[int] = UNSET,
165
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
161
166
  suffix: OptionalNullable[str] = UNSET,
162
167
  min_tokens: OptionalNullable[int] = UNSET,
163
168
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
164
169
  server_url: Optional[str] = None,
165
170
  timeout_ms: Optional[int] = None,
166
171
  http_headers: Optional[Mapping[str, str]] = None,
167
- ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]:
172
+ ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]:
168
173
  r"""Stream fim completion
169
174
 
170
175
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
@@ -177,6 +182,7 @@ class Fim(BaseSDK):
177
182
  :param stream:
178
183
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
179
184
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
185
+ :param metadata:
180
186
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
181
187
  :param min_tokens: The minimum number of tokens to generate in the completion.
182
188
  :param retries: Override the default retry configuration for this method
@@ -202,6 +208,7 @@ class Fim(BaseSDK):
202
208
  stream=stream,
203
209
  stop=stop,
204
210
  random_seed=random_seed,
211
+ metadata=metadata,
205
212
  prompt=prompt,
206
213
  suffix=suffix,
207
214
  min_tokens=min_tokens,
@@ -223,6 +230,7 @@ class Fim(BaseSDK):
223
230
  get_serialized_body=lambda: utils.serialize_request_body(
224
231
  request, False, False, "json", models.FIMCompletionStreamRequest
225
232
  ),
233
+ allow_empty_value=None,
226
234
  timeout_ms=timeout_ms,
227
235
  )
228
236
 
@@ -239,7 +247,7 @@ class Fim(BaseSDK):
239
247
  config=self.sdk_configuration,
240
248
  base_url=base_url or "",
241
249
  operation_id="stream_fim",
242
- oauth2_scopes=[],
250
+ oauth2_scopes=None,
243
251
  security_source=self.sdk_configuration.security,
244
252
  ),
245
253
  request=req,
@@ -288,13 +296,14 @@ class Fim(BaseSDK):
288
296
  ]
289
297
  ] = None,
290
298
  random_seed: OptionalNullable[int] = UNSET,
299
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
291
300
  suffix: OptionalNullable[str] = UNSET,
292
301
  min_tokens: OptionalNullable[int] = UNSET,
293
302
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
294
303
  server_url: Optional[str] = None,
295
304
  timeout_ms: Optional[int] = None,
296
305
  http_headers: Optional[Mapping[str, str]] = None,
297
- ) -> Optional[models.FIMCompletionResponse]:
306
+ ) -> models.FIMCompletionResponse:
298
307
  r"""Fim Completion
299
308
 
300
309
  FIM completion.
@@ -307,6 +316,7 @@ class Fim(BaseSDK):
307
316
  :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
308
317
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
309
318
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
319
+ :param metadata:
310
320
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
311
321
  :param min_tokens: The minimum number of tokens to generate in the completion.
312
322
  :param retries: Override the default retry configuration for this method
@@ -332,6 +342,7 @@ class Fim(BaseSDK):
332
342
  stream=stream,
333
343
  stop=stop,
334
344
  random_seed=random_seed,
345
+ metadata=metadata,
335
346
  prompt=prompt,
336
347
  suffix=suffix,
337
348
  min_tokens=min_tokens,
@@ -353,6 +364,7 @@ class Fim(BaseSDK):
353
364
  get_serialized_body=lambda: utils.serialize_request_body(
354
365
  request, False, False, "json", models.FIMCompletionRequest
355
366
  ),
367
+ allow_empty_value=None,
356
368
  timeout_ms=timeout_ms,
357
369
  )
358
370
 
@@ -369,7 +381,7 @@ class Fim(BaseSDK):
369
381
  config=self.sdk_configuration,
370
382
  base_url=base_url or "",
371
383
  operation_id="fim_completion_v1_fim_completions_post",
372
- oauth2_scopes=[],
384
+ oauth2_scopes=None,
373
385
  security_source=self.sdk_configuration.security,
374
386
  ),
375
387
  request=req,
@@ -379,9 +391,7 @@ class Fim(BaseSDK):
379
391
 
380
392
  response_data: Any = None
381
393
  if utils.match_response(http_res, "200", "application/json"):
382
- return unmarshal_json_response(
383
- Optional[models.FIMCompletionResponse], http_res
384
- )
394
+ return unmarshal_json_response(models.FIMCompletionResponse, http_res)
385
395
  if utils.match_response(http_res, "422", "application/json"):
386
396
  response_data = unmarshal_json_response(
387
397
  models.HTTPValidationErrorData, http_res
@@ -412,13 +422,14 @@ class Fim(BaseSDK):
412
422
  ]
413
423
  ] = None,
414
424
  random_seed: OptionalNullable[int] = UNSET,
425
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
415
426
  suffix: OptionalNullable[str] = UNSET,
416
427
  min_tokens: OptionalNullable[int] = UNSET,
417
428
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
418
429
  server_url: Optional[str] = None,
419
430
  timeout_ms: Optional[int] = None,
420
431
  http_headers: Optional[Mapping[str, str]] = None,
421
- ) -> Optional[models.FIMCompletionResponse]:
432
+ ) -> models.FIMCompletionResponse:
422
433
  r"""Fim Completion
423
434
 
424
435
  FIM completion.
@@ -431,6 +442,7 @@ class Fim(BaseSDK):
431
442
  :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
432
443
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
433
444
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
445
+ :param metadata:
434
446
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
435
447
  :param min_tokens: The minimum number of tokens to generate in the completion.
436
448
  :param retries: Override the default retry configuration for this method
@@ -456,6 +468,7 @@ class Fim(BaseSDK):
456
468
  stream=stream,
457
469
  stop=stop,
458
470
  random_seed=random_seed,
471
+ metadata=metadata,
459
472
  prompt=prompt,
460
473
  suffix=suffix,
461
474
  min_tokens=min_tokens,
@@ -477,6 +490,7 @@ class Fim(BaseSDK):
477
490
  get_serialized_body=lambda: utils.serialize_request_body(
478
491
  request, False, False, "json", models.FIMCompletionRequest
479
492
  ),
493
+ allow_empty_value=None,
480
494
  timeout_ms=timeout_ms,
481
495
  )
482
496
 
@@ -493,7 +507,7 @@ class Fim(BaseSDK):
493
507
  config=self.sdk_configuration,
494
508
  base_url=base_url or "",
495
509
  operation_id="fim_completion_v1_fim_completions_post",
496
- oauth2_scopes=[],
510
+ oauth2_scopes=None,
497
511
  security_source=self.sdk_configuration.security,
498
512
  ),
499
513
  request=req,
@@ -503,9 +517,7 @@ class Fim(BaseSDK):
503
517
 
504
518
  response_data: Any = None
505
519
  if utils.match_response(http_res, "200", "application/json"):
506
- return unmarshal_json_response(
507
- Optional[models.FIMCompletionResponse], http_res
508
- )
520
+ return unmarshal_json_response(models.FIMCompletionResponse, http_res)
509
521
  if utils.match_response(http_res, "422", "application/json"):
510
522
  response_data = unmarshal_json_response(
511
523
  models.HTTPValidationErrorData, http_res
@@ -107,7 +107,6 @@ def close_clients(
107
107
  # to them from the owning SDK instance and they can be reaped.
108
108
  owner.client = None
109
109
  owner.async_client = None
110
-
111
110
  if sync_client is not None and not sync_client_supplied:
112
111
  try:
113
112
  sync_client.close()
@@ -25,7 +25,7 @@ AssistantMessageContent = TypeAliasType(
25
25
  )
26
26
 
27
27
 
28
- AssistantMessageRole = Literal["assistant"]
28
+ AssistantMessageRole = Literal["assistant",]
29
29
 
30
30
 
31
31
  class AssistantMessageTypedDict(TypedDict):
@@ -3,14 +3,19 @@
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
5
  from mistralai_gcp.types import BaseModel, UnrecognizedStr
6
- from mistralai_gcp.utils import validate_open_enum
7
- from pydantic.functional_validators import PlainValidator
8
6
  from typing import Literal, Union
9
- from typing_extensions import Annotated, TypedDict
7
+ from typing_extensions import TypedDict
10
8
 
11
9
 
12
10
  ChatCompletionChoiceFinishReason = Union[
13
- Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr
11
+ Literal[
12
+ "stop",
13
+ "length",
14
+ "model_length",
15
+ "error",
16
+ "tool_calls",
17
+ ],
18
+ UnrecognizedStr,
14
19
  ]
15
20
 
16
21
 
@@ -25,6 +30,4 @@ class ChatCompletionChoice(BaseModel):
25
30
 
26
31
  message: AssistantMessage
27
32
 
28
- finish_reason: Annotated[
29
- ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False))
30
- ]
33
+ finish_reason: ChatCompletionChoiceFinishReason
@@ -18,10 +18,9 @@ from mistralai_gcp.types import (
18
18
  UNSET,
19
19
  UNSET_SENTINEL,
20
20
  )
21
- from mistralai_gcp.utils import get_discriminator, validate_open_enum
21
+ from mistralai_gcp.utils import get_discriminator
22
22
  from pydantic import Discriminator, Tag, model_serializer
23
- from pydantic.functional_validators import PlainValidator
24
- from typing import List, Optional, Union
23
+ from typing import Any, Dict, List, Optional, Union
25
24
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
26
25
 
27
26
 
@@ -89,6 +88,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
89
88
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
90
89
  random_seed: NotRequired[Nullable[int]]
91
90
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
91
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
92
92
  response_format: NotRequired[ResponseFormatTypedDict]
93
93
  r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
94
94
  tools: NotRequired[Nullable[List[ToolTypedDict]]]
@@ -134,6 +134,8 @@ class ChatCompletionRequest(BaseModel):
134
134
  random_seed: OptionalNullable[int] = UNSET
135
135
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
136
136
 
137
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
138
+
137
139
  response_format: Optional[ResponseFormat] = None
138
140
  r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
139
141
 
@@ -158,9 +160,7 @@ class ChatCompletionRequest(BaseModel):
158
160
  parallel_tool_calls: Optional[bool] = None
159
161
  r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
160
162
 
161
- prompt_mode: Annotated[
162
- OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
163
- ] = UNSET
163
+ prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
164
164
  r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
165
165
 
166
166
  @model_serializer(mode="wrap")
@@ -172,6 +172,7 @@ class ChatCompletionRequest(BaseModel):
172
172
  "stream",
173
173
  "stop",
174
174
  "random_seed",
175
+ "metadata",
175
176
  "response_format",
176
177
  "tools",
177
178
  "tool_choice",
@@ -186,6 +187,7 @@ class ChatCompletionRequest(BaseModel):
186
187
  "temperature",
187
188
  "max_tokens",
188
189
  "random_seed",
190
+ "metadata",
189
191
  "tools",
190
192
  "n",
191
193
  "prompt_mode",
@@ -18,10 +18,9 @@ from mistralai_gcp.types import (
18
18
  UNSET,
19
19
  UNSET_SENTINEL,
20
20
  )
21
- from mistralai_gcp.utils import get_discriminator, validate_open_enum
21
+ from mistralai_gcp.utils import get_discriminator
22
22
  from pydantic import Discriminator, Tag, model_serializer
23
- from pydantic.functional_validators import PlainValidator
24
- from typing import List, Optional, Union
23
+ from typing import Any, Dict, List, Optional, Union
25
24
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
26
25
 
27
26
 
@@ -84,6 +83,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
84
83
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
85
84
  random_seed: NotRequired[Nullable[int]]
86
85
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
86
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
87
87
  response_format: NotRequired[ResponseFormatTypedDict]
88
88
  r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
89
89
  tools: NotRequired[Nullable[List[ToolTypedDict]]]
@@ -128,6 +128,8 @@ class ChatCompletionStreamRequest(BaseModel):
128
128
  random_seed: OptionalNullable[int] = UNSET
129
129
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
130
130
 
131
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
132
+
131
133
  response_format: Optional[ResponseFormat] = None
132
134
  r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
133
135
 
@@ -152,9 +154,7 @@ class ChatCompletionStreamRequest(BaseModel):
152
154
  parallel_tool_calls: Optional[bool] = None
153
155
  r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
154
156
 
155
- prompt_mode: Annotated[
156
- OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
157
- ] = UNSET
157
+ prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
158
158
  r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
159
159
 
160
160
  @model_serializer(mode="wrap")
@@ -166,6 +166,7 @@ class ChatCompletionStreamRequest(BaseModel):
166
166
  "stream",
167
167
  "stop",
168
168
  "random_seed",
169
+ "metadata",
169
170
  "response_format",
170
171
  "tools",
171
172
  "tool_choice",
@@ -180,6 +181,7 @@ class ChatCompletionStreamRequest(BaseModel):
180
181
  "temperature",
181
182
  "max_tokens",
182
183
  "random_seed",
184
+ "metadata",
183
185
  "tools",
184
186
  "n",
185
187
  "prompt_mode",
@@ -3,14 +3,20 @@
3
3
  from __future__ import annotations
4
4
  from .deltamessage import DeltaMessage, DeltaMessageTypedDict
5
5
  from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr
6
- from mistralai_gcp.utils import validate_open_enum
7
6
  from pydantic import model_serializer
8
- from pydantic.functional_validators import PlainValidator
9
7
  from typing import Literal, Union
10
- from typing_extensions import Annotated, TypedDict
8
+ from typing_extensions import TypedDict
11
9
 
12
10
 
13
- FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr]
11
+ FinishReason = Union[
12
+ Literal[
13
+ "stop",
14
+ "length",
15
+ "error",
16
+ "tool_calls",
17
+ ],
18
+ UnrecognizedStr,
19
+ ]
14
20
 
15
21
 
16
22
  class CompletionResponseStreamChoiceTypedDict(TypedDict):
@@ -24,9 +30,7 @@ class CompletionResponseStreamChoice(BaseModel):
24
30
 
25
31
  delta: DeltaMessage
26
32
 
27
- finish_reason: Annotated[
28
- Nullable[FinishReason], PlainValidator(validate_open_enum(False))
29
- ]
33
+ finish_reason: Nullable[FinishReason]
30
34
 
31
35
  @model_serializer(mode="wrap")
32
36
  def serialize_model(self, handler):
@@ -9,7 +9,7 @@ from mistralai_gcp.types import (
9
9
  UNSET_SENTINEL,
10
10
  )
11
11
  from pydantic import model_serializer
12
- from typing import List, Optional, Union
12
+ from typing import Any, Dict, List, Optional, Union
13
13
  from typing_extensions import NotRequired, TypeAliasType, TypedDict
14
14
 
15
15
 
@@ -42,6 +42,7 @@ class FIMCompletionRequestTypedDict(TypedDict):
42
42
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
43
43
  random_seed: NotRequired[Nullable[int]]
44
44
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
45
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
45
46
  suffix: NotRequired[Nullable[str]]
46
47
  r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
47
48
  min_tokens: NotRequired[Nullable[int]]
@@ -73,6 +74,8 @@ class FIMCompletionRequest(BaseModel):
73
74
  random_seed: OptionalNullable[int] = UNSET
74
75
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
75
76
 
77
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
78
+
76
79
  suffix: OptionalNullable[str] = UNSET
77
80
  r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
78
81
 
@@ -88,6 +91,7 @@ class FIMCompletionRequest(BaseModel):
88
91
  "stream",
89
92
  "stop",
90
93
  "random_seed",
94
+ "metadata",
91
95
  "suffix",
92
96
  "min_tokens",
93
97
  ]
@@ -95,6 +99,7 @@ class FIMCompletionRequest(BaseModel):
95
99
  "temperature",
96
100
  "max_tokens",
97
101
  "random_seed",
102
+ "metadata",
98
103
  "suffix",
99
104
  "min_tokens",
100
105
  ]
@@ -9,7 +9,7 @@ from mistralai_gcp.types import (
9
9
  UNSET_SENTINEL,
10
10
  )
11
11
  from pydantic import model_serializer
12
- from typing import List, Optional, Union
12
+ from typing import Any, Dict, List, Optional, Union
13
13
  from typing_extensions import NotRequired, TypeAliasType, TypedDict
14
14
 
15
15
 
@@ -41,6 +41,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict):
41
41
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
42
42
  random_seed: NotRequired[Nullable[int]]
43
43
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
44
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
44
45
  suffix: NotRequired[Nullable[str]]
45
46
  r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
46
47
  min_tokens: NotRequired[Nullable[int]]
@@ -71,6 +72,8 @@ class FIMCompletionStreamRequest(BaseModel):
71
72
  random_seed: OptionalNullable[int] = UNSET
72
73
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
73
74
 
75
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
76
+
74
77
  suffix: OptionalNullable[str] = UNSET
75
78
  r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
76
79
 
@@ -86,6 +89,7 @@ class FIMCompletionStreamRequest(BaseModel):
86
89
  "stream",
87
90
  "stop",
88
91
  "random_seed",
92
+ "metadata",
89
93
  "suffix",
90
94
  "min_tokens",
91
95
  ]
@@ -93,6 +97,7 @@ class FIMCompletionStreamRequest(BaseModel):
93
97
  "temperature",
94
98
  "max_tokens",
95
99
  "random_seed",
100
+ "metadata",
96
101
  "suffix",
97
102
  "min_tokens",
98
103
  ]
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .validationerror import ValidationError
5
+ from dataclasses import dataclass, field
5
6
  import httpx
6
7
  from mistralai_gcp.models import MistralGcpError
7
8
  from mistralai_gcp.types import BaseModel
@@ -12,8 +13,9 @@ class HTTPValidationErrorData(BaseModel):
12
13
  detail: Optional[List[ValidationError]] = None
13
14
 
14
15
 
16
+ @dataclass(unsafe_hash=True)
15
17
  class HTTPValidationError(MistralGcpError):
16
- data: HTTPValidationErrorData
18
+ data: HTTPValidationErrorData = field(hash=False)
17
19
 
18
20
  def __init__(
19
21
  self,
@@ -23,4 +25,4 @@ class HTTPValidationError(MistralGcpError):
23
25
  ):
24
26
  message = body or raw_response.text
25
27
  super().__init__(message, raw_response, body)
26
- self.data = data
28
+ object.__setattr__(self, "data", data)
@@ -15,7 +15,7 @@ ImageURLChunkImageURLTypedDict = TypeAliasType(
15
15
  ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str])
16
16
 
17
17
 
18
- ImageURLChunkType = Literal["image_url"]
18
+ ImageURLChunkType = Literal["image_url",]
19
19
 
20
20
 
21
21
  class ImageURLChunkTypedDict(TypedDict):
@@ -2,25 +2,29 @@
2
2
 
3
3
  import httpx
4
4
  from typing import Optional
5
+ from dataclasses import dataclass, field
5
6
 
6
7
 
8
+ @dataclass(unsafe_hash=True)
7
9
  class MistralGcpError(Exception):
8
10
  """The base class for all HTTP error responses."""
9
11
 
10
12
  message: str
11
13
  status_code: int
12
14
  body: str
13
- headers: httpx.Headers
14
- raw_response: httpx.Response
15
+ headers: httpx.Headers = field(hash=False)
16
+ raw_response: httpx.Response = field(hash=False)
15
17
 
16
18
  def __init__(
17
19
  self, message: str, raw_response: httpx.Response, body: Optional[str] = None
18
20
  ):
19
- self.message = message
20
- self.status_code = raw_response.status_code
21
- self.body = body if body is not None else raw_response.text
22
- self.headers = raw_response.headers
23
- self.raw_response = raw_response
21
+ object.__setattr__(self, "message", message)
22
+ object.__setattr__(self, "status_code", raw_response.status_code)
23
+ object.__setattr__(
24
+ self, "body", body if body is not None else raw_response.text
25
+ )
26
+ object.__setattr__(self, "headers", raw_response.headers)
27
+ object.__setattr__(self, "raw_response", raw_response)
24
28
 
25
29
  def __str__(self):
26
30
  return self.message
@@ -5,4 +5,4 @@ from mistralai_gcp.types import UnrecognizedStr
5
5
  from typing import Literal, Union
6
6
 
7
7
 
8
- MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr]
8
+ MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr]
@@ -1,12 +1,16 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
+ from dataclasses import dataclass
4
+
5
+
6
+ @dataclass(unsafe_hash=True)
3
7
  class NoResponseError(Exception):
4
8
  """Error raised when no HTTP response is received from the server."""
5
9
 
6
10
  message: str
7
11
 
8
12
  def __init__(self, message: str = "No response received"):
9
- self.message = message
13
+ object.__setattr__(self, "message", message)
10
14
  super().__init__(message)
11
15
 
12
16
  def __str__(self):
@@ -6,7 +6,7 @@ from typing import List, Literal, Optional
6
6
  from typing_extensions import NotRequired, TypedDict
7
7
 
8
8
 
9
- ReferenceChunkType = Literal["reference"]
9
+ ReferenceChunkType = Literal["reference",]
10
10
 
11
11
 
12
12
  class ReferenceChunkTypedDict(TypedDict):
@@ -4,4 +4,8 @@ from __future__ import annotations
4
4
  from typing import Literal
5
5
 
6
6
 
7
- ResponseFormats = Literal["text", "json_object", "json_schema"]
7
+ ResponseFormats = Literal[
8
+ "text",
9
+ "json_object",
10
+ "json_schema",
11
+ ]
@@ -2,10 +2,12 @@
2
2
 
3
3
  import httpx
4
4
  from typing import Optional
5
+ from dataclasses import dataclass
5
6
 
6
7
  from mistralai_gcp.models import MistralGcpError
7
8
 
8
9
 
10
+ @dataclass(unsafe_hash=True)
9
11
  class ResponseValidationError(MistralGcpError):
10
12
  """Error raised when there is a type mismatch between the response data and the expected Pydantic model."""
11
13
 
@@ -2,12 +2,14 @@
2
2
 
3
3
  import httpx
4
4
  from typing import Optional
5
+ from dataclasses import dataclass
5
6
 
6
7
  from mistralai_gcp.models import MistralGcpError
7
8
 
8
9
  MAX_MESSAGE_LEN = 10_000
9
10
 
10
11
 
12
+ @dataclass(unsafe_hash=True)
11
13
  class SDKError(MistralGcpError):
12
14
  """The fallback error class if no more specific error class is matched."""
13
15