mistralai 1.10.0__py3-none-any.whl → 1.11.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (270) hide show
  1. mistralai/_hooks/tracing.py +28 -3
  2. mistralai/_version.py +3 -3
  3. mistralai/accesses.py +22 -12
  4. mistralai/agents.py +88 -44
  5. mistralai/basesdk.py +6 -0
  6. mistralai/chat.py +96 -40
  7. mistralai/classifiers.py +48 -23
  8. mistralai/conversations.py +186 -64
  9. mistralai/documents.py +72 -26
  10. mistralai/embeddings.py +24 -9
  11. mistralai/extra/README.md +1 -1
  12. mistralai/extra/mcp/auth.py +10 -11
  13. mistralai/extra/mcp/base.py +17 -16
  14. mistralai/extra/mcp/sse.py +13 -15
  15. mistralai/extra/mcp/stdio.py +5 -6
  16. mistralai/extra/observability/otel.py +47 -68
  17. mistralai/extra/run/context.py +33 -43
  18. mistralai/extra/run/result.py +29 -30
  19. mistralai/extra/run/tools.py +8 -9
  20. mistralai/extra/struct_chat.py +15 -8
  21. mistralai/extra/utils/response_format.py +5 -3
  22. mistralai/files.py +58 -24
  23. mistralai/fim.py +20 -12
  24. mistralai/httpclient.py +0 -1
  25. mistralai/jobs.py +65 -26
  26. mistralai/libraries.py +20 -10
  27. mistralai/mistral_agents.py +438 -30
  28. mistralai/mistral_jobs.py +62 -17
  29. mistralai/models/__init__.py +46 -1
  30. mistralai/models/agent.py +1 -1
  31. mistralai/models/agentconversation.py +1 -1
  32. mistralai/models/agenthandoffdoneevent.py +1 -1
  33. mistralai/models/agenthandoffentry.py +3 -2
  34. mistralai/models/agenthandoffstartedevent.py +1 -1
  35. mistralai/models/agents_api_v1_agents_get_versionop.py +21 -0
  36. mistralai/models/agents_api_v1_agents_list_versionsop.py +33 -0
  37. mistralai/models/agents_api_v1_agents_listop.py +5 -1
  38. mistralai/models/agents_api_v1_conversations_listop.py +1 -1
  39. mistralai/models/agentscompletionrequest.py +2 -5
  40. mistralai/models/agentscompletionstreamrequest.py +2 -5
  41. mistralai/models/archiveftmodelout.py +1 -1
  42. mistralai/models/assistantmessage.py +1 -1
  43. mistralai/models/audiochunk.py +1 -1
  44. mistralai/models/audioencoding.py +18 -0
  45. mistralai/models/audioformat.py +17 -0
  46. mistralai/models/basemodelcard.py +1 -1
  47. mistralai/models/batchjobin.py +18 -9
  48. mistralai/models/batchjobout.py +6 -1
  49. mistralai/models/batchjobsout.py +1 -1
  50. mistralai/models/batchrequest.py +48 -0
  51. mistralai/models/chatcompletionchoice.py +10 -5
  52. mistralai/models/chatcompletionrequest.py +2 -5
  53. mistralai/models/chatcompletionstreamrequest.py +2 -5
  54. mistralai/models/classificationrequest.py +37 -3
  55. mistralai/models/classifierdetailedjobout.py +4 -2
  56. mistralai/models/classifierftmodelout.py +3 -2
  57. mistralai/models/classifierjobout.py +4 -2
  58. mistralai/models/codeinterpretertool.py +1 -1
  59. mistralai/models/completiondetailedjobout.py +5 -2
  60. mistralai/models/completionftmodelout.py +3 -2
  61. mistralai/models/completionjobout.py +5 -2
  62. mistralai/models/completionresponsestreamchoice.py +9 -8
  63. mistralai/models/conversationappendrequest.py +4 -1
  64. mistralai/models/conversationappendstreamrequest.py +4 -1
  65. mistralai/models/conversationhistory.py +2 -1
  66. mistralai/models/conversationmessages.py +1 -1
  67. mistralai/models/conversationrequest.py +5 -1
  68. mistralai/models/conversationresponse.py +2 -1
  69. mistralai/models/conversationrestartrequest.py +4 -1
  70. mistralai/models/conversationrestartstreamrequest.py +4 -1
  71. mistralai/models/conversationstreamrequest.py +5 -1
  72. mistralai/models/documentlibrarytool.py +1 -1
  73. mistralai/models/documenturlchunk.py +1 -1
  74. mistralai/models/embeddingdtype.py +7 -1
  75. mistralai/models/embeddingrequest.py +11 -3
  76. mistralai/models/encodingformat.py +4 -1
  77. mistralai/models/entitytype.py +8 -1
  78. mistralai/models/filepurpose.py +8 -1
  79. mistralai/models/files_api_routes_list_filesop.py +4 -11
  80. mistralai/models/files_api_routes_upload_fileop.py +2 -6
  81. mistralai/models/fileschema.py +3 -5
  82. mistralai/models/finetuneablemodeltype.py +4 -1
  83. mistralai/models/ftclassifierlossfunction.py +4 -1
  84. mistralai/models/ftmodelcard.py +1 -1
  85. mistralai/models/functioncallentry.py +3 -2
  86. mistralai/models/functioncallevent.py +1 -1
  87. mistralai/models/functionresultentry.py +3 -2
  88. mistralai/models/functiontool.py +1 -1
  89. mistralai/models/githubrepositoryin.py +1 -1
  90. mistralai/models/githubrepositoryout.py +1 -1
  91. mistralai/models/httpvalidationerror.py +4 -2
  92. mistralai/models/imagegenerationtool.py +1 -1
  93. mistralai/models/imageurlchunk.py +1 -1
  94. mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +40 -3
  95. mistralai/models/jobsout.py +1 -1
  96. mistralai/models/legacyjobmetadataout.py +1 -1
  97. mistralai/models/messageinputentry.py +9 -3
  98. mistralai/models/messageoutputentry.py +6 -3
  99. mistralai/models/messageoutputevent.py +4 -2
  100. mistralai/models/mistralerror.py +11 -7
  101. mistralai/models/mistralpromptmode.py +1 -1
  102. mistralai/models/modelconversation.py +1 -1
  103. mistralai/models/no_response_error.py +5 -1
  104. mistralai/models/ocrrequest.py +11 -1
  105. mistralai/models/ocrtableobject.py +4 -1
  106. mistralai/models/referencechunk.py +1 -1
  107. mistralai/models/requestsource.py +5 -1
  108. mistralai/models/responsedoneevent.py +1 -1
  109. mistralai/models/responseerrorevent.py +1 -1
  110. mistralai/models/responseformats.py +5 -1
  111. mistralai/models/responsestartedevent.py +1 -1
  112. mistralai/models/responsevalidationerror.py +2 -0
  113. mistralai/models/retrievefileout.py +3 -5
  114. mistralai/models/sampletype.py +7 -1
  115. mistralai/models/sdkerror.py +2 -0
  116. mistralai/models/shareenum.py +7 -1
  117. mistralai/models/sharingdelete.py +2 -4
  118. mistralai/models/sharingin.py +3 -5
  119. mistralai/models/source.py +8 -1
  120. mistralai/models/systemmessage.py +1 -1
  121. mistralai/models/textchunk.py +1 -1
  122. mistralai/models/thinkchunk.py +1 -1
  123. mistralai/models/timestampgranularity.py +1 -1
  124. mistralai/models/tool.py +2 -6
  125. mistralai/models/toolcall.py +2 -6
  126. mistralai/models/toolchoice.py +2 -6
  127. mistralai/models/toolchoiceenum.py +6 -1
  128. mistralai/models/toolexecutiondeltaevent.py +2 -1
  129. mistralai/models/toolexecutiondoneevent.py +2 -1
  130. mistralai/models/toolexecutionentry.py +4 -2
  131. mistralai/models/toolexecutionstartedevent.py +2 -1
  132. mistralai/models/toolfilechunk.py +13 -5
  133. mistralai/models/toolmessage.py +1 -1
  134. mistralai/models/toolreferencechunk.py +15 -5
  135. mistralai/models/tooltypes.py +1 -1
  136. mistralai/models/transcriptionsegmentchunk.py +1 -1
  137. mistralai/models/transcriptionstreamdone.py +1 -1
  138. mistralai/models/transcriptionstreamlanguage.py +1 -1
  139. mistralai/models/transcriptionstreamsegmentdelta.py +1 -1
  140. mistralai/models/transcriptionstreamtextdelta.py +1 -1
  141. mistralai/models/unarchiveftmodelout.py +1 -1
  142. mistralai/models/uploadfileout.py +3 -5
  143. mistralai/models/usermessage.py +1 -1
  144. mistralai/models/wandbintegration.py +1 -1
  145. mistralai/models/wandbintegrationout.py +1 -1
  146. mistralai/models/websearchpremiumtool.py +1 -1
  147. mistralai/models/websearchtool.py +1 -1
  148. mistralai/models_.py +24 -12
  149. mistralai/ocr.py +38 -10
  150. mistralai/sdk.py +2 -2
  151. mistralai/transcriptions.py +28 -12
  152. mistralai/types/basemodel.py +41 -3
  153. mistralai/utils/__init__.py +0 -3
  154. mistralai/utils/annotations.py +32 -8
  155. mistralai/utils/enums.py +60 -0
  156. mistralai/utils/forms.py +21 -10
  157. mistralai/utils/queryparams.py +14 -2
  158. mistralai/utils/requestbodies.py +3 -3
  159. mistralai/utils/retries.py +69 -5
  160. mistralai/utils/serializers.py +0 -20
  161. mistralai/utils/unmarshal_json_response.py +15 -1
  162. {mistralai-1.10.0.dist-info → mistralai-1.11.1.dist-info}/METADATA +144 -159
  163. mistralai-1.11.1.dist-info/RECORD +495 -0
  164. {mistralai-1.10.0.dist-info → mistralai-1.11.1.dist-info}/WHEEL +1 -1
  165. mistralai_azure/_version.py +3 -3
  166. mistralai_azure/basesdk.py +21 -5
  167. mistralai_azure/chat.py +82 -109
  168. mistralai_azure/httpclient.py +0 -1
  169. mistralai_azure/models/__init__.py +66 -4
  170. mistralai_azure/models/assistantmessage.py +1 -1
  171. mistralai_azure/models/chatcompletionchoice.py +10 -7
  172. mistralai_azure/models/chatcompletionrequest.py +24 -10
  173. mistralai_azure/models/chatcompletionstreamrequest.py +24 -10
  174. mistralai_azure/models/completionresponsestreamchoice.py +11 -7
  175. mistralai_azure/models/documenturlchunk.py +1 -1
  176. mistralai_azure/models/httpvalidationerror.py +15 -8
  177. mistralai_azure/models/imageurlchunk.py +1 -1
  178. mistralai_azure/models/mistralazureerror.py +30 -0
  179. mistralai_azure/models/mistralpromptmode.py +1 -1
  180. mistralai_azure/models/no_response_error.py +17 -0
  181. mistralai_azure/models/ocrpageobject.py +32 -5
  182. mistralai_azure/models/ocrrequest.py +20 -1
  183. mistralai_azure/models/ocrtableobject.py +34 -0
  184. mistralai_azure/models/prediction.py +4 -0
  185. mistralai_azure/models/referencechunk.py +1 -1
  186. mistralai_azure/models/responseformat.py +4 -2
  187. mistralai_azure/models/responseformats.py +5 -2
  188. mistralai_azure/models/responsevalidationerror.py +27 -0
  189. mistralai_azure/models/sdkerror.py +32 -14
  190. mistralai_azure/models/systemmessage.py +8 -4
  191. mistralai_azure/models/systemmessagecontentchunks.py +21 -0
  192. mistralai_azure/models/textchunk.py +1 -1
  193. mistralai_azure/models/thinkchunk.py +35 -0
  194. mistralai_azure/models/tool.py +2 -6
  195. mistralai_azure/models/toolcall.py +2 -6
  196. mistralai_azure/models/toolchoice.py +2 -6
  197. mistralai_azure/models/toolchoiceenum.py +6 -1
  198. mistralai_azure/models/toolmessage.py +1 -1
  199. mistralai_azure/models/tooltypes.py +1 -1
  200. mistralai_azure/models/usermessage.py +1 -1
  201. mistralai_azure/ocr.py +39 -40
  202. mistralai_azure/types/basemodel.py +41 -3
  203. mistralai_azure/utils/__init__.py +18 -8
  204. mistralai_azure/utils/annotations.py +32 -8
  205. mistralai_azure/utils/enums.py +60 -0
  206. mistralai_azure/utils/eventstreaming.py +10 -0
  207. mistralai_azure/utils/forms.py +21 -10
  208. mistralai_azure/utils/queryparams.py +14 -2
  209. mistralai_azure/utils/requestbodies.py +3 -3
  210. mistralai_azure/utils/retries.py +69 -5
  211. mistralai_azure/utils/serializers.py +3 -22
  212. mistralai_azure/utils/unmarshal_json_response.py +38 -0
  213. mistralai_gcp/_hooks/types.py +7 -0
  214. mistralai_gcp/_version.py +4 -4
  215. mistralai_gcp/basesdk.py +33 -25
  216. mistralai_gcp/chat.py +98 -109
  217. mistralai_gcp/fim.py +62 -85
  218. mistralai_gcp/httpclient.py +6 -17
  219. mistralai_gcp/models/__init__.py +321 -116
  220. mistralai_gcp/models/assistantmessage.py +2 -2
  221. mistralai_gcp/models/chatcompletionchoice.py +10 -7
  222. mistralai_gcp/models/chatcompletionrequest.py +38 -7
  223. mistralai_gcp/models/chatcompletionresponse.py +6 -6
  224. mistralai_gcp/models/chatcompletionstreamrequest.py +38 -7
  225. mistralai_gcp/models/completionresponsestreamchoice.py +12 -8
  226. mistralai_gcp/models/deltamessage.py +1 -1
  227. mistralai_gcp/models/fimcompletionrequest.py +9 -10
  228. mistralai_gcp/models/fimcompletionresponse.py +6 -6
  229. mistralai_gcp/models/fimcompletionstreamrequest.py +9 -10
  230. mistralai_gcp/models/httpvalidationerror.py +15 -8
  231. mistralai_gcp/models/imageurl.py +1 -1
  232. mistralai_gcp/models/imageurlchunk.py +1 -1
  233. mistralai_gcp/models/jsonschema.py +1 -1
  234. mistralai_gcp/models/mistralgcperror.py +30 -0
  235. mistralai_gcp/models/mistralpromptmode.py +8 -0
  236. mistralai_gcp/models/no_response_error.py +17 -0
  237. mistralai_gcp/models/prediction.py +4 -0
  238. mistralai_gcp/models/referencechunk.py +1 -1
  239. mistralai_gcp/models/responseformat.py +5 -3
  240. mistralai_gcp/models/responseformats.py +5 -2
  241. mistralai_gcp/models/responsevalidationerror.py +27 -0
  242. mistralai_gcp/models/sdkerror.py +32 -14
  243. mistralai_gcp/models/systemmessage.py +8 -4
  244. mistralai_gcp/models/systemmessagecontentchunks.py +21 -0
  245. mistralai_gcp/models/textchunk.py +1 -1
  246. mistralai_gcp/models/thinkchunk.py +35 -0
  247. mistralai_gcp/models/tool.py +2 -6
  248. mistralai_gcp/models/toolcall.py +2 -6
  249. mistralai_gcp/models/toolchoice.py +2 -6
  250. mistralai_gcp/models/toolchoiceenum.py +6 -1
  251. mistralai_gcp/models/toolmessage.py +2 -2
  252. mistralai_gcp/models/tooltypes.py +1 -1
  253. mistralai_gcp/models/usageinfo.py +71 -8
  254. mistralai_gcp/models/usermessage.py +2 -2
  255. mistralai_gcp/sdk.py +12 -10
  256. mistralai_gcp/sdkconfiguration.py +0 -7
  257. mistralai_gcp/types/basemodel.py +41 -3
  258. mistralai_gcp/utils/__init__.py +141 -46
  259. mistralai_gcp/utils/annotations.py +32 -8
  260. mistralai_gcp/utils/datetimes.py +23 -0
  261. mistralai_gcp/utils/enums.py +125 -25
  262. mistralai_gcp/utils/eventstreaming.py +10 -0
  263. mistralai_gcp/utils/forms.py +62 -30
  264. mistralai_gcp/utils/queryparams.py +14 -2
  265. mistralai_gcp/utils/requestbodies.py +3 -3
  266. mistralai_gcp/utils/retries.py +69 -5
  267. mistralai_gcp/utils/serializers.py +33 -23
  268. mistralai_gcp/utils/unmarshal_json_response.py +38 -0
  269. mistralai-1.10.0.dist-info/RECORD +0 -475
  270. {mistralai-1.10.0.dist-info → mistralai-1.11.1.dist-info}/licenses/LICENSE +0 -0
mistralai/chat.py CHANGED
@@ -3,6 +3,14 @@
3
3
  from .basesdk import BaseSDK
4
4
  from mistralai import models, utils
5
5
  from mistralai._hooks import HookContext
6
+ from mistralai.models import (
7
+ chatcompletionrequest as models_chatcompletionrequest,
8
+ chatcompletionstreamrequest as models_chatcompletionstreamrequest,
9
+ mistralpromptmode as models_mistralpromptmode,
10
+ prediction as models_prediction,
11
+ responseformat as models_responseformat,
12
+ tool as models_tool,
13
+ )
6
14
  from mistralai.types import OptionalNullable, UNSET
7
15
  from mistralai.utils import eventstreaming, get_security_from_env
8
16
  from mistralai.utils.unmarshal_json_response import unmarshal_json_response
@@ -98,34 +106,47 @@ class Chat(BaseSDK):
98
106
  self,
99
107
  *,
100
108
  model: str,
101
- messages: Union[List[models.Messages], List[models.MessagesTypedDict]],
109
+ messages: Union[
110
+ List[models_chatcompletionrequest.Messages],
111
+ List[models_chatcompletionrequest.MessagesTypedDict],
112
+ ],
102
113
  temperature: OptionalNullable[float] = UNSET,
103
114
  top_p: Optional[float] = None,
104
115
  max_tokens: OptionalNullable[int] = UNSET,
105
116
  stream: Optional[bool] = False,
106
- stop: Optional[Union[models.Stop, models.StopTypedDict]] = None,
117
+ stop: Optional[
118
+ Union[
119
+ models_chatcompletionrequest.Stop,
120
+ models_chatcompletionrequest.StopTypedDict,
121
+ ]
122
+ ] = None,
107
123
  random_seed: OptionalNullable[int] = UNSET,
108
124
  metadata: OptionalNullable[Dict[str, Any]] = UNSET,
109
125
  response_format: Optional[
110
- Union[models.ResponseFormat, models.ResponseFormatTypedDict]
126
+ Union[
127
+ models_responseformat.ResponseFormat,
128
+ models_responseformat.ResponseFormatTypedDict,
129
+ ]
111
130
  ] = None,
112
131
  tools: OptionalNullable[
113
- Union[List[models.Tool], List[models.ToolTypedDict]]
132
+ Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]]
114
133
  ] = UNSET,
115
134
  tool_choice: Optional[
116
135
  Union[
117
- models.ChatCompletionRequestToolChoice,
118
- models.ChatCompletionRequestToolChoiceTypedDict,
136
+ models_chatcompletionrequest.ChatCompletionRequestToolChoice,
137
+ models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict,
119
138
  ]
120
139
  ] = None,
121
140
  presence_penalty: Optional[float] = None,
122
141
  frequency_penalty: Optional[float] = None,
123
142
  n: OptionalNullable[int] = UNSET,
124
143
  prediction: Optional[
125
- Union[models.Prediction, models.PredictionTypedDict]
144
+ Union[models_prediction.Prediction, models_prediction.PredictionTypedDict]
126
145
  ] = None,
127
146
  parallel_tool_calls: Optional[bool] = None,
128
- prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
147
+ prompt_mode: OptionalNullable[
148
+ models_mistralpromptmode.MistralPromptMode
149
+ ] = UNSET,
129
150
  safe_prompt: Optional[bool] = None,
130
151
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
131
152
  server_url: Optional[str] = None,
@@ -212,6 +233,7 @@ class Chat(BaseSDK):
212
233
  get_serialized_body=lambda: utils.serialize_request_body(
213
234
  request, False, False, "json", models.ChatCompletionRequest
214
235
  ),
236
+ allow_empty_value=None,
215
237
  timeout_ms=timeout_ms,
216
238
  )
217
239
 
@@ -228,7 +250,7 @@ class Chat(BaseSDK):
228
250
  config=self.sdk_configuration,
229
251
  base_url=base_url or "",
230
252
  operation_id="chat_completion_v1_chat_completions_post",
231
- oauth2_scopes=[],
253
+ oauth2_scopes=None,
232
254
  security_source=get_security_from_env(
233
255
  self.sdk_configuration.security, models.Security
234
256
  ),
@@ -259,34 +281,47 @@ class Chat(BaseSDK):
259
281
  self,
260
282
  *,
261
283
  model: str,
262
- messages: Union[List[models.Messages], List[models.MessagesTypedDict]],
284
+ messages: Union[
285
+ List[models_chatcompletionrequest.Messages],
286
+ List[models_chatcompletionrequest.MessagesTypedDict],
287
+ ],
263
288
  temperature: OptionalNullable[float] = UNSET,
264
289
  top_p: Optional[float] = None,
265
290
  max_tokens: OptionalNullable[int] = UNSET,
266
291
  stream: Optional[bool] = False,
267
- stop: Optional[Union[models.Stop, models.StopTypedDict]] = None,
292
+ stop: Optional[
293
+ Union[
294
+ models_chatcompletionrequest.Stop,
295
+ models_chatcompletionrequest.StopTypedDict,
296
+ ]
297
+ ] = None,
268
298
  random_seed: OptionalNullable[int] = UNSET,
269
299
  metadata: OptionalNullable[Dict[str, Any]] = UNSET,
270
300
  response_format: Optional[
271
- Union[models.ResponseFormat, models.ResponseFormatTypedDict]
301
+ Union[
302
+ models_responseformat.ResponseFormat,
303
+ models_responseformat.ResponseFormatTypedDict,
304
+ ]
272
305
  ] = None,
273
306
  tools: OptionalNullable[
274
- Union[List[models.Tool], List[models.ToolTypedDict]]
307
+ Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]]
275
308
  ] = UNSET,
276
309
  tool_choice: Optional[
277
310
  Union[
278
- models.ChatCompletionRequestToolChoice,
279
- models.ChatCompletionRequestToolChoiceTypedDict,
311
+ models_chatcompletionrequest.ChatCompletionRequestToolChoice,
312
+ models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict,
280
313
  ]
281
314
  ] = None,
282
315
  presence_penalty: Optional[float] = None,
283
316
  frequency_penalty: Optional[float] = None,
284
317
  n: OptionalNullable[int] = UNSET,
285
318
  prediction: Optional[
286
- Union[models.Prediction, models.PredictionTypedDict]
319
+ Union[models_prediction.Prediction, models_prediction.PredictionTypedDict]
287
320
  ] = None,
288
321
  parallel_tool_calls: Optional[bool] = None,
289
- prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
322
+ prompt_mode: OptionalNullable[
323
+ models_mistralpromptmode.MistralPromptMode
324
+ ] = UNSET,
290
325
  safe_prompt: Optional[bool] = None,
291
326
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
292
327
  server_url: Optional[str] = None,
@@ -373,6 +408,7 @@ class Chat(BaseSDK):
373
408
  get_serialized_body=lambda: utils.serialize_request_body(
374
409
  request, False, False, "json", models.ChatCompletionRequest
375
410
  ),
411
+ allow_empty_value=None,
376
412
  timeout_ms=timeout_ms,
377
413
  )
378
414
 
@@ -389,7 +425,7 @@ class Chat(BaseSDK):
389
425
  config=self.sdk_configuration,
390
426
  base_url=base_url or "",
391
427
  operation_id="chat_completion_v1_chat_completions_post",
392
- oauth2_scopes=[],
428
+ oauth2_scopes=None,
393
429
  security_source=get_security_from_env(
394
430
  self.sdk_configuration.security, models.Security
395
431
  ),
@@ -421,8 +457,12 @@ class Chat(BaseSDK):
421
457
  *,
422
458
  model: str,
423
459
  messages: Union[
424
- List[models.ChatCompletionStreamRequestMessages],
425
- List[models.ChatCompletionStreamRequestMessagesTypedDict],
460
+ List[
461
+ models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages
462
+ ],
463
+ List[
464
+ models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict
465
+ ],
426
466
  ],
427
467
  temperature: OptionalNullable[float] = UNSET,
428
468
  top_p: Optional[float] = None,
@@ -430,32 +470,37 @@ class Chat(BaseSDK):
430
470
  stream: Optional[bool] = True,
431
471
  stop: Optional[
432
472
  Union[
433
- models.ChatCompletionStreamRequestStop,
434
- models.ChatCompletionStreamRequestStopTypedDict,
473
+ models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop,
474
+ models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict,
435
475
  ]
436
476
  ] = None,
437
477
  random_seed: OptionalNullable[int] = UNSET,
438
478
  metadata: OptionalNullable[Dict[str, Any]] = UNSET,
439
479
  response_format: Optional[
440
- Union[models.ResponseFormat, models.ResponseFormatTypedDict]
480
+ Union[
481
+ models_responseformat.ResponseFormat,
482
+ models_responseformat.ResponseFormatTypedDict,
483
+ ]
441
484
  ] = None,
442
485
  tools: OptionalNullable[
443
- Union[List[models.Tool], List[models.ToolTypedDict]]
486
+ Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]]
444
487
  ] = UNSET,
445
488
  tool_choice: Optional[
446
489
  Union[
447
- models.ChatCompletionStreamRequestToolChoice,
448
- models.ChatCompletionStreamRequestToolChoiceTypedDict,
490
+ models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice,
491
+ models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict,
449
492
  ]
450
493
  ] = None,
451
494
  presence_penalty: Optional[float] = None,
452
495
  frequency_penalty: Optional[float] = None,
453
496
  n: OptionalNullable[int] = UNSET,
454
497
  prediction: Optional[
455
- Union[models.Prediction, models.PredictionTypedDict]
498
+ Union[models_prediction.Prediction, models_prediction.PredictionTypedDict]
456
499
  ] = None,
457
500
  parallel_tool_calls: Optional[bool] = None,
458
- prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
501
+ prompt_mode: OptionalNullable[
502
+ models_mistralpromptmode.MistralPromptMode
503
+ ] = UNSET,
459
504
  safe_prompt: Optional[bool] = None,
460
505
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
461
506
  server_url: Optional[str] = None,
@@ -546,6 +591,7 @@ class Chat(BaseSDK):
546
591
  get_serialized_body=lambda: utils.serialize_request_body(
547
592
  request, False, False, "json", models.ChatCompletionStreamRequest
548
593
  ),
594
+ allow_empty_value=None,
549
595
  timeout_ms=timeout_ms,
550
596
  )
551
597
 
@@ -562,7 +608,7 @@ class Chat(BaseSDK):
562
608
  config=self.sdk_configuration,
563
609
  base_url=base_url or "",
564
610
  operation_id="stream_chat",
565
- oauth2_scopes=[],
611
+ oauth2_scopes=None,
566
612
  security_source=get_security_from_env(
567
613
  self.sdk_configuration.security, models.Security
568
614
  ),
@@ -602,8 +648,12 @@ class Chat(BaseSDK):
602
648
  *,
603
649
  model: str,
604
650
  messages: Union[
605
- List[models.ChatCompletionStreamRequestMessages],
606
- List[models.ChatCompletionStreamRequestMessagesTypedDict],
651
+ List[
652
+ models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages
653
+ ],
654
+ List[
655
+ models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict
656
+ ],
607
657
  ],
608
658
  temperature: OptionalNullable[float] = UNSET,
609
659
  top_p: Optional[float] = None,
@@ -611,32 +661,37 @@ class Chat(BaseSDK):
611
661
  stream: Optional[bool] = True,
612
662
  stop: Optional[
613
663
  Union[
614
- models.ChatCompletionStreamRequestStop,
615
- models.ChatCompletionStreamRequestStopTypedDict,
664
+ models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop,
665
+ models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict,
616
666
  ]
617
667
  ] = None,
618
668
  random_seed: OptionalNullable[int] = UNSET,
619
669
  metadata: OptionalNullable[Dict[str, Any]] = UNSET,
620
670
  response_format: Optional[
621
- Union[models.ResponseFormat, models.ResponseFormatTypedDict]
671
+ Union[
672
+ models_responseformat.ResponseFormat,
673
+ models_responseformat.ResponseFormatTypedDict,
674
+ ]
622
675
  ] = None,
623
676
  tools: OptionalNullable[
624
- Union[List[models.Tool], List[models.ToolTypedDict]]
677
+ Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]]
625
678
  ] = UNSET,
626
679
  tool_choice: Optional[
627
680
  Union[
628
- models.ChatCompletionStreamRequestToolChoice,
629
- models.ChatCompletionStreamRequestToolChoiceTypedDict,
681
+ models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice,
682
+ models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict,
630
683
  ]
631
684
  ] = None,
632
685
  presence_penalty: Optional[float] = None,
633
686
  frequency_penalty: Optional[float] = None,
634
687
  n: OptionalNullable[int] = UNSET,
635
688
  prediction: Optional[
636
- Union[models.Prediction, models.PredictionTypedDict]
689
+ Union[models_prediction.Prediction, models_prediction.PredictionTypedDict]
637
690
  ] = None,
638
691
  parallel_tool_calls: Optional[bool] = None,
639
- prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET,
692
+ prompt_mode: OptionalNullable[
693
+ models_mistralpromptmode.MistralPromptMode
694
+ ] = UNSET,
640
695
  safe_prompt: Optional[bool] = None,
641
696
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
642
697
  server_url: Optional[str] = None,
@@ -727,6 +782,7 @@ class Chat(BaseSDK):
727
782
  get_serialized_body=lambda: utils.serialize_request_body(
728
783
  request, False, False, "json", models.ChatCompletionStreamRequest
729
784
  ),
785
+ allow_empty_value=None,
730
786
  timeout_ms=timeout_ms,
731
787
  )
732
788
 
@@ -743,7 +799,7 @@ class Chat(BaseSDK):
743
799
  config=self.sdk_configuration,
744
800
  base_url=base_url or "",
745
801
  operation_id="stream_chat",
746
- oauth2_scopes=[],
802
+ oauth2_scopes=None,
747
803
  security_source=get_security_from_env(
748
804
  self.sdk_configuration.security, models.Security
749
805
  ),
mistralai/classifiers.py CHANGED
@@ -3,10 +3,15 @@
3
3
  from .basesdk import BaseSDK
4
4
  from mistralai import models, utils
5
5
  from mistralai._hooks import HookContext
6
+ from mistralai.models import (
7
+ chatmoderationrequest as models_chatmoderationrequest,
8
+ classificationrequest as models_classificationrequest,
9
+ inputs as models_inputs,
10
+ )
6
11
  from mistralai.types import OptionalNullable, UNSET
7
12
  from mistralai.utils import get_security_from_env
8
13
  from mistralai.utils.unmarshal_json_response import unmarshal_json_response
9
- from typing import Any, Mapping, Optional, Union
14
+ from typing import Any, Dict, Mapping, Optional, Union
10
15
 
11
16
 
12
17
  class Classifiers(BaseSDK):
@@ -17,9 +22,10 @@ class Classifiers(BaseSDK):
17
22
  *,
18
23
  model: str,
19
24
  inputs: Union[
20
- models.ClassificationRequestInputs,
21
- models.ClassificationRequestInputsTypedDict,
25
+ models_classificationrequest.ClassificationRequestInputs,
26
+ models_classificationrequest.ClassificationRequestInputsTypedDict,
22
27
  ],
28
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
23
29
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
24
30
  server_url: Optional[str] = None,
25
31
  timeout_ms: Optional[int] = None,
@@ -29,6 +35,7 @@ class Classifiers(BaseSDK):
29
35
 
30
36
  :param model: ID of the model to use.
31
37
  :param inputs: Text to classify.
38
+ :param metadata:
32
39
  :param retries: Override the default retry configuration for this method
33
40
  :param server_url: Override the default server URL for this method
34
41
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -46,6 +53,7 @@ class Classifiers(BaseSDK):
46
53
 
47
54
  request = models.ClassificationRequest(
48
55
  model=model,
56
+ metadata=metadata,
49
57
  inputs=inputs,
50
58
  )
51
59
 
@@ -65,6 +73,7 @@ class Classifiers(BaseSDK):
65
73
  get_serialized_body=lambda: utils.serialize_request_body(
66
74
  request, False, False, "json", models.ClassificationRequest
67
75
  ),
76
+ allow_empty_value=None,
68
77
  timeout_ms=timeout_ms,
69
78
  )
70
79
 
@@ -81,7 +90,7 @@ class Classifiers(BaseSDK):
81
90
  config=self.sdk_configuration,
82
91
  base_url=base_url or "",
83
92
  operation_id="moderations_v1_moderations_post",
84
- oauth2_scopes=[],
93
+ oauth2_scopes=None,
85
94
  security_source=get_security_from_env(
86
95
  self.sdk_configuration.security, models.Security
87
96
  ),
@@ -113,9 +122,10 @@ class Classifiers(BaseSDK):
113
122
  *,
114
123
  model: str,
115
124
  inputs: Union[
116
- models.ClassificationRequestInputs,
117
- models.ClassificationRequestInputsTypedDict,
125
+ models_classificationrequest.ClassificationRequestInputs,
126
+ models_classificationrequest.ClassificationRequestInputsTypedDict,
118
127
  ],
128
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
119
129
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
120
130
  server_url: Optional[str] = None,
121
131
  timeout_ms: Optional[int] = None,
@@ -125,6 +135,7 @@ class Classifiers(BaseSDK):
125
135
 
126
136
  :param model: ID of the model to use.
127
137
  :param inputs: Text to classify.
138
+ :param metadata:
128
139
  :param retries: Override the default retry configuration for this method
129
140
  :param server_url: Override the default server URL for this method
130
141
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -142,6 +153,7 @@ class Classifiers(BaseSDK):
142
153
 
143
154
  request = models.ClassificationRequest(
144
155
  model=model,
156
+ metadata=metadata,
145
157
  inputs=inputs,
146
158
  )
147
159
 
@@ -161,6 +173,7 @@ class Classifiers(BaseSDK):
161
173
  get_serialized_body=lambda: utils.serialize_request_body(
162
174
  request, False, False, "json", models.ClassificationRequest
163
175
  ),
176
+ allow_empty_value=None,
164
177
  timeout_ms=timeout_ms,
165
178
  )
166
179
 
@@ -177,7 +190,7 @@ class Classifiers(BaseSDK):
177
190
  config=self.sdk_configuration,
178
191
  base_url=base_url or "",
179
192
  operation_id="moderations_v1_moderations_post",
180
- oauth2_scopes=[],
193
+ oauth2_scopes=None,
181
194
  security_source=get_security_from_env(
182
195
  self.sdk_configuration.security, models.Security
183
196
  ),
@@ -208,8 +221,8 @@ class Classifiers(BaseSDK):
208
221
  self,
209
222
  *,
210
223
  inputs: Union[
211
- models.ChatModerationRequestInputs,
212
- models.ChatModerationRequestInputsTypedDict,
224
+ models_chatmoderationrequest.ChatModerationRequestInputs,
225
+ models_chatmoderationrequest.ChatModerationRequestInputsTypedDict,
213
226
  ],
214
227
  model: str,
215
228
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
@@ -257,6 +270,7 @@ class Classifiers(BaseSDK):
257
270
  get_serialized_body=lambda: utils.serialize_request_body(
258
271
  request, False, False, "json", models.ChatModerationRequest
259
272
  ),
273
+ allow_empty_value=None,
260
274
  timeout_ms=timeout_ms,
261
275
  )
262
276
 
@@ -273,7 +287,7 @@ class Classifiers(BaseSDK):
273
287
  config=self.sdk_configuration,
274
288
  base_url=base_url or "",
275
289
  operation_id="chat_moderations_v1_chat_moderations_post",
276
- oauth2_scopes=[],
290
+ oauth2_scopes=None,
277
291
  security_source=get_security_from_env(
278
292
  self.sdk_configuration.security, models.Security
279
293
  ),
@@ -304,8 +318,8 @@ class Classifiers(BaseSDK):
304
318
  self,
305
319
  *,
306
320
  inputs: Union[
307
- models.ChatModerationRequestInputs,
308
- models.ChatModerationRequestInputsTypedDict,
321
+ models_chatmoderationrequest.ChatModerationRequestInputs,
322
+ models_chatmoderationrequest.ChatModerationRequestInputsTypedDict,
309
323
  ],
310
324
  model: str,
311
325
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
@@ -353,6 +367,7 @@ class Classifiers(BaseSDK):
353
367
  get_serialized_body=lambda: utils.serialize_request_body(
354
368
  request, False, False, "json", models.ChatModerationRequest
355
369
  ),
370
+ allow_empty_value=None,
356
371
  timeout_ms=timeout_ms,
357
372
  )
358
373
 
@@ -369,7 +384,7 @@ class Classifiers(BaseSDK):
369
384
  config=self.sdk_configuration,
370
385
  base_url=base_url or "",
371
386
  operation_id="chat_moderations_v1_chat_moderations_post",
372
- oauth2_scopes=[],
387
+ oauth2_scopes=None,
373
388
  security_source=get_security_from_env(
374
389
  self.sdk_configuration.security, models.Security
375
390
  ),
@@ -401,9 +416,10 @@ class Classifiers(BaseSDK):
401
416
  *,
402
417
  model: str,
403
418
  inputs: Union[
404
- models.ClassificationRequestInputs,
405
- models.ClassificationRequestInputsTypedDict,
419
+ models_classificationrequest.ClassificationRequestInputs,
420
+ models_classificationrequest.ClassificationRequestInputsTypedDict,
406
421
  ],
422
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
407
423
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
408
424
  server_url: Optional[str] = None,
409
425
  timeout_ms: Optional[int] = None,
@@ -413,6 +429,7 @@ class Classifiers(BaseSDK):
413
429
 
414
430
  :param model: ID of the model to use.
415
431
  :param inputs: Text to classify.
432
+ :param metadata:
416
433
  :param retries: Override the default retry configuration for this method
417
434
  :param server_url: Override the default server URL for this method
418
435
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -430,6 +447,7 @@ class Classifiers(BaseSDK):
430
447
 
431
448
  request = models.ClassificationRequest(
432
449
  model=model,
450
+ metadata=metadata,
433
451
  inputs=inputs,
434
452
  )
435
453
 
@@ -449,6 +467,7 @@ class Classifiers(BaseSDK):
449
467
  get_serialized_body=lambda: utils.serialize_request_body(
450
468
  request, False, False, "json", models.ClassificationRequest
451
469
  ),
470
+ allow_empty_value=None,
452
471
  timeout_ms=timeout_ms,
453
472
  )
454
473
 
@@ -465,7 +484,7 @@ class Classifiers(BaseSDK):
465
484
  config=self.sdk_configuration,
466
485
  base_url=base_url or "",
467
486
  operation_id="classifications_v1_classifications_post",
468
- oauth2_scopes=[],
487
+ oauth2_scopes=None,
469
488
  security_source=get_security_from_env(
470
489
  self.sdk_configuration.security, models.Security
471
490
  ),
@@ -497,9 +516,10 @@ class Classifiers(BaseSDK):
497
516
  *,
498
517
  model: str,
499
518
  inputs: Union[
500
- models.ClassificationRequestInputs,
501
- models.ClassificationRequestInputsTypedDict,
519
+ models_classificationrequest.ClassificationRequestInputs,
520
+ models_classificationrequest.ClassificationRequestInputsTypedDict,
502
521
  ],
522
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
503
523
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
504
524
  server_url: Optional[str] = None,
505
525
  timeout_ms: Optional[int] = None,
@@ -509,6 +529,7 @@ class Classifiers(BaseSDK):
509
529
 
510
530
  :param model: ID of the model to use.
511
531
  :param inputs: Text to classify.
532
+ :param metadata:
512
533
  :param retries: Override the default retry configuration for this method
513
534
  :param server_url: Override the default server URL for this method
514
535
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -526,6 +547,7 @@ class Classifiers(BaseSDK):
526
547
 
527
548
  request = models.ClassificationRequest(
528
549
  model=model,
550
+ metadata=metadata,
529
551
  inputs=inputs,
530
552
  )
531
553
 
@@ -545,6 +567,7 @@ class Classifiers(BaseSDK):
545
567
  get_serialized_body=lambda: utils.serialize_request_body(
546
568
  request, False, False, "json", models.ClassificationRequest
547
569
  ),
570
+ allow_empty_value=None,
548
571
  timeout_ms=timeout_ms,
549
572
  )
550
573
 
@@ -561,7 +584,7 @@ class Classifiers(BaseSDK):
561
584
  config=self.sdk_configuration,
562
585
  base_url=base_url or "",
563
586
  operation_id="classifications_v1_classifications_post",
564
- oauth2_scopes=[],
587
+ oauth2_scopes=None,
565
588
  security_source=get_security_from_env(
566
589
  self.sdk_configuration.security, models.Security
567
590
  ),
@@ -592,7 +615,7 @@ class Classifiers(BaseSDK):
592
615
  self,
593
616
  *,
594
617
  model: str,
595
- inputs: Union[models.Inputs, models.InputsTypedDict],
618
+ inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict],
596
619
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
597
620
  server_url: Optional[str] = None,
598
621
  timeout_ms: Optional[int] = None,
@@ -638,6 +661,7 @@ class Classifiers(BaseSDK):
638
661
  get_serialized_body=lambda: utils.serialize_request_body(
639
662
  request, False, False, "json", models.ChatClassificationRequest
640
663
  ),
664
+ allow_empty_value=None,
641
665
  timeout_ms=timeout_ms,
642
666
  )
643
667
 
@@ -654,7 +678,7 @@ class Classifiers(BaseSDK):
654
678
  config=self.sdk_configuration,
655
679
  base_url=base_url or "",
656
680
  operation_id="chat_classifications_v1_chat_classifications_post",
657
- oauth2_scopes=[],
681
+ oauth2_scopes=None,
658
682
  security_source=get_security_from_env(
659
683
  self.sdk_configuration.security, models.Security
660
684
  ),
@@ -685,7 +709,7 @@ class Classifiers(BaseSDK):
685
709
  self,
686
710
  *,
687
711
  model: str,
688
- inputs: Union[models.Inputs, models.InputsTypedDict],
712
+ inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict],
689
713
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
690
714
  server_url: Optional[str] = None,
691
715
  timeout_ms: Optional[int] = None,
@@ -731,6 +755,7 @@ class Classifiers(BaseSDK):
731
755
  get_serialized_body=lambda: utils.serialize_request_body(
732
756
  request, False, False, "json", models.ChatClassificationRequest
733
757
  ),
758
+ allow_empty_value=None,
734
759
  timeout_ms=timeout_ms,
735
760
  )
736
761
 
@@ -747,7 +772,7 @@ class Classifiers(BaseSDK):
747
772
  config=self.sdk_configuration,
748
773
  base_url=base_url or "",
749
774
  operation_id="chat_classifications_v1_chat_classifications_post",
750
- oauth2_scopes=[],
775
+ oauth2_scopes=None,
751
776
  security_source=get_security_from_env(
752
777
  self.sdk_configuration.security, models.Security
753
778
  ),