mistralai 1.10.1__py3-none-any.whl → 1.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (251) hide show
  1. mistralai/_version.py +3 -3
  2. mistralai/accesses.py +22 -12
  3. mistralai/agents.py +88 -44
  4. mistralai/audio.py +20 -0
  5. mistralai/basesdk.py +6 -0
  6. mistralai/chat.py +96 -40
  7. mistralai/classifiers.py +35 -22
  8. mistralai/conversations.py +234 -72
  9. mistralai/documents.py +72 -26
  10. mistralai/embeddings.py +17 -8
  11. mistralai/extra/__init__.py +48 -0
  12. mistralai/extra/exceptions.py +49 -4
  13. mistralai/extra/realtime/__init__.py +25 -0
  14. mistralai/extra/realtime/connection.py +207 -0
  15. mistralai/extra/realtime/transcription.py +271 -0
  16. mistralai/files.py +64 -24
  17. mistralai/fim.py +20 -12
  18. mistralai/httpclient.py +0 -1
  19. mistralai/jobs.py +65 -26
  20. mistralai/libraries.py +20 -10
  21. mistralai/mistral_agents.py +825 -34
  22. mistralai/mistral_jobs.py +33 -14
  23. mistralai/models/__init__.py +119 -0
  24. mistralai/models/agent.py +1 -1
  25. mistralai/models/agentaliasresponse.py +23 -0
  26. mistralai/models/agentconversation.py +15 -5
  27. mistralai/models/agenthandoffdoneevent.py +1 -1
  28. mistralai/models/agenthandoffentry.py +3 -2
  29. mistralai/models/agenthandoffstartedevent.py +1 -1
  30. mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py +26 -0
  31. mistralai/models/agents_api_v1_agents_get_versionop.py +21 -0
  32. mistralai/models/agents_api_v1_agents_getop.py +12 -3
  33. mistralai/models/agents_api_v1_agents_list_version_aliasesop.py +16 -0
  34. mistralai/models/agents_api_v1_agents_list_versionsop.py +33 -0
  35. mistralai/models/agents_api_v1_agents_listop.py +4 -0
  36. mistralai/models/agentscompletionrequest.py +2 -5
  37. mistralai/models/agentscompletionstreamrequest.py +2 -5
  38. mistralai/models/archiveftmodelout.py +1 -1
  39. mistralai/models/assistantmessage.py +1 -1
  40. mistralai/models/audiochunk.py +1 -1
  41. mistralai/models/audioencoding.py +6 -1
  42. mistralai/models/audioformat.py +2 -4
  43. mistralai/models/audiotranscriptionrequest.py +8 -0
  44. mistralai/models/audiotranscriptionrequeststream.py +8 -0
  45. mistralai/models/basemodelcard.py +1 -1
  46. mistralai/models/batchjobin.py +2 -4
  47. mistralai/models/batchjobout.py +1 -1
  48. mistralai/models/batchjobsout.py +1 -1
  49. mistralai/models/chatcompletionchoice.py +10 -5
  50. mistralai/models/chatcompletionrequest.py +2 -5
  51. mistralai/models/chatcompletionstreamrequest.py +2 -5
  52. mistralai/models/classifierdetailedjobout.py +4 -2
  53. mistralai/models/classifierftmodelout.py +3 -2
  54. mistralai/models/classifierjobout.py +4 -2
  55. mistralai/models/codeinterpretertool.py +1 -1
  56. mistralai/models/completiondetailedjobout.py +5 -2
  57. mistralai/models/completionftmodelout.py +3 -2
  58. mistralai/models/completionjobout.py +5 -2
  59. mistralai/models/completionresponsestreamchoice.py +9 -8
  60. mistralai/models/conversationappendrequest.py +4 -1
  61. mistralai/models/conversationappendstreamrequest.py +4 -1
  62. mistralai/models/conversationhistory.py +2 -1
  63. mistralai/models/conversationmessages.py +1 -1
  64. mistralai/models/conversationrequest.py +13 -3
  65. mistralai/models/conversationresponse.py +2 -1
  66. mistralai/models/conversationrestartrequest.py +22 -5
  67. mistralai/models/conversationrestartstreamrequest.py +24 -5
  68. mistralai/models/conversationstreamrequest.py +17 -3
  69. mistralai/models/documentlibrarytool.py +1 -1
  70. mistralai/models/documenturlchunk.py +1 -1
  71. mistralai/models/embeddingdtype.py +7 -1
  72. mistralai/models/encodingformat.py +4 -1
  73. mistralai/models/entitytype.py +8 -1
  74. mistralai/models/filepurpose.py +8 -1
  75. mistralai/models/files_api_routes_list_filesop.py +12 -12
  76. mistralai/models/files_api_routes_upload_fileop.py +2 -6
  77. mistralai/models/fileschema.py +3 -5
  78. mistralai/models/finetuneablemodeltype.py +4 -1
  79. mistralai/models/ftclassifierlossfunction.py +4 -1
  80. mistralai/models/ftmodelcard.py +1 -1
  81. mistralai/models/functioncallentry.py +3 -2
  82. mistralai/models/functioncallevent.py +1 -1
  83. mistralai/models/functionresultentry.py +3 -2
  84. mistralai/models/functiontool.py +1 -1
  85. mistralai/models/githubrepositoryin.py +1 -1
  86. mistralai/models/githubrepositoryout.py +1 -1
  87. mistralai/models/httpvalidationerror.py +4 -2
  88. mistralai/models/imagegenerationtool.py +1 -1
  89. mistralai/models/imageurlchunk.py +1 -1
  90. mistralai/models/jobsout.py +1 -1
  91. mistralai/models/legacyjobmetadataout.py +1 -1
  92. mistralai/models/messageinputentry.py +9 -3
  93. mistralai/models/messageoutputentry.py +6 -3
  94. mistralai/models/messageoutputevent.py +4 -2
  95. mistralai/models/mistralerror.py +11 -7
  96. mistralai/models/mistralpromptmode.py +5 -1
  97. mistralai/models/modelcapabilities.py +3 -0
  98. mistralai/models/modelconversation.py +1 -1
  99. mistralai/models/no_response_error.py +5 -1
  100. mistralai/models/ocrrequest.py +11 -1
  101. mistralai/models/ocrtableobject.py +4 -1
  102. mistralai/models/realtimetranscriptionerror.py +27 -0
  103. mistralai/models/realtimetranscriptionerrordetail.py +29 -0
  104. mistralai/models/realtimetranscriptionsession.py +20 -0
  105. mistralai/models/realtimetranscriptionsessioncreated.py +30 -0
  106. mistralai/models/realtimetranscriptionsessionupdated.py +30 -0
  107. mistralai/models/referencechunk.py +1 -1
  108. mistralai/models/requestsource.py +5 -1
  109. mistralai/models/responsedoneevent.py +1 -1
  110. mistralai/models/responseerrorevent.py +1 -1
  111. mistralai/models/responseformats.py +5 -1
  112. mistralai/models/responsestartedevent.py +1 -1
  113. mistralai/models/responsevalidationerror.py +2 -0
  114. mistralai/models/retrievefileout.py +3 -5
  115. mistralai/models/sampletype.py +7 -1
  116. mistralai/models/sdkerror.py +2 -0
  117. mistralai/models/shareenum.py +7 -1
  118. mistralai/models/sharingdelete.py +2 -4
  119. mistralai/models/sharingin.py +3 -5
  120. mistralai/models/source.py +8 -1
  121. mistralai/models/systemmessage.py +1 -1
  122. mistralai/models/textchunk.py +1 -1
  123. mistralai/models/thinkchunk.py +1 -1
  124. mistralai/models/timestampgranularity.py +4 -1
  125. mistralai/models/tool.py +2 -6
  126. mistralai/models/toolcall.py +2 -6
  127. mistralai/models/toolchoice.py +2 -6
  128. mistralai/models/toolchoiceenum.py +6 -1
  129. mistralai/models/toolexecutiondeltaevent.py +2 -1
  130. mistralai/models/toolexecutiondoneevent.py +2 -1
  131. mistralai/models/toolexecutionentry.py +4 -2
  132. mistralai/models/toolexecutionstartedevent.py +2 -1
  133. mistralai/models/toolfilechunk.py +2 -1
  134. mistralai/models/toolmessage.py +1 -1
  135. mistralai/models/toolreferencechunk.py +2 -1
  136. mistralai/models/tooltypes.py +1 -1
  137. mistralai/models/transcriptionsegmentchunk.py +42 -3
  138. mistralai/models/transcriptionstreamdone.py +1 -1
  139. mistralai/models/transcriptionstreamlanguage.py +1 -1
  140. mistralai/models/transcriptionstreamsegmentdelta.py +39 -3
  141. mistralai/models/transcriptionstreamtextdelta.py +1 -1
  142. mistralai/models/unarchiveftmodelout.py +1 -1
  143. mistralai/models/uploadfileout.py +3 -5
  144. mistralai/models/usermessage.py +1 -1
  145. mistralai/models/wandbintegration.py +1 -1
  146. mistralai/models/wandbintegrationout.py +1 -1
  147. mistralai/models/websearchpremiumtool.py +1 -1
  148. mistralai/models/websearchtool.py +1 -1
  149. mistralai/models_.py +24 -12
  150. mistralai/ocr.py +38 -10
  151. mistralai/sdk.py +2 -2
  152. mistralai/transcriptions.py +52 -12
  153. mistralai/types/basemodel.py +41 -3
  154. mistralai/utils/__init__.py +0 -3
  155. mistralai/utils/annotations.py +32 -8
  156. mistralai/utils/enums.py +60 -0
  157. mistralai/utils/forms.py +21 -10
  158. mistralai/utils/queryparams.py +14 -2
  159. mistralai/utils/requestbodies.py +3 -3
  160. mistralai/utils/retries.py +69 -5
  161. mistralai/utils/serializers.py +0 -20
  162. mistralai/utils/unmarshal_json_response.py +15 -1
  163. {mistralai-1.10.1.dist-info → mistralai-1.12.0.dist-info}/METADATA +28 -31
  164. {mistralai-1.10.1.dist-info → mistralai-1.12.0.dist-info}/RECORD +251 -237
  165. mistralai_azure/_version.py +3 -3
  166. mistralai_azure/basesdk.py +6 -0
  167. mistralai_azure/chat.py +27 -15
  168. mistralai_azure/httpclient.py +0 -1
  169. mistralai_azure/models/__init__.py +16 -1
  170. mistralai_azure/models/assistantmessage.py +1 -1
  171. mistralai_azure/models/chatcompletionchoice.py +10 -7
  172. mistralai_azure/models/chatcompletionrequest.py +8 -6
  173. mistralai_azure/models/chatcompletionstreamrequest.py +8 -6
  174. mistralai_azure/models/completionresponsestreamchoice.py +11 -7
  175. mistralai_azure/models/documenturlchunk.py +1 -1
  176. mistralai_azure/models/httpvalidationerror.py +4 -2
  177. mistralai_azure/models/imageurlchunk.py +1 -1
  178. mistralai_azure/models/mistralazureerror.py +11 -7
  179. mistralai_azure/models/mistralpromptmode.py +1 -1
  180. mistralai_azure/models/no_response_error.py +5 -1
  181. mistralai_azure/models/ocrpageobject.py +32 -5
  182. mistralai_azure/models/ocrrequest.py +20 -1
  183. mistralai_azure/models/ocrtableobject.py +34 -0
  184. mistralai_azure/models/referencechunk.py +1 -1
  185. mistralai_azure/models/responseformats.py +5 -1
  186. mistralai_azure/models/responsevalidationerror.py +2 -0
  187. mistralai_azure/models/sdkerror.py +2 -0
  188. mistralai_azure/models/systemmessage.py +1 -1
  189. mistralai_azure/models/textchunk.py +1 -1
  190. mistralai_azure/models/thinkchunk.py +1 -1
  191. mistralai_azure/models/tool.py +2 -6
  192. mistralai_azure/models/toolcall.py +2 -6
  193. mistralai_azure/models/toolchoice.py +2 -6
  194. mistralai_azure/models/toolchoiceenum.py +6 -1
  195. mistralai_azure/models/toolmessage.py +1 -1
  196. mistralai_azure/models/tooltypes.py +1 -1
  197. mistralai_azure/models/usermessage.py +1 -1
  198. mistralai_azure/ocr.py +26 -6
  199. mistralai_azure/types/basemodel.py +41 -3
  200. mistralai_azure/utils/__init__.py +0 -3
  201. mistralai_azure/utils/annotations.py +32 -8
  202. mistralai_azure/utils/enums.py +60 -0
  203. mistralai_azure/utils/forms.py +21 -10
  204. mistralai_azure/utils/queryparams.py +14 -2
  205. mistralai_azure/utils/requestbodies.py +3 -3
  206. mistralai_azure/utils/retries.py +69 -5
  207. mistralai_azure/utils/serializers.py +0 -20
  208. mistralai_azure/utils/unmarshal_json_response.py +15 -1
  209. mistralai_gcp/_version.py +3 -3
  210. mistralai_gcp/basesdk.py +6 -0
  211. mistralai_gcp/chat.py +27 -15
  212. mistralai_gcp/fim.py +27 -15
  213. mistralai_gcp/httpclient.py +0 -1
  214. mistralai_gcp/models/assistantmessage.py +1 -1
  215. mistralai_gcp/models/chatcompletionchoice.py +10 -7
  216. mistralai_gcp/models/chatcompletionrequest.py +8 -6
  217. mistralai_gcp/models/chatcompletionstreamrequest.py +8 -6
  218. mistralai_gcp/models/completionresponsestreamchoice.py +11 -7
  219. mistralai_gcp/models/fimcompletionrequest.py +6 -1
  220. mistralai_gcp/models/fimcompletionstreamrequest.py +6 -1
  221. mistralai_gcp/models/httpvalidationerror.py +4 -2
  222. mistralai_gcp/models/imageurlchunk.py +1 -1
  223. mistralai_gcp/models/mistralgcperror.py +11 -7
  224. mistralai_gcp/models/mistralpromptmode.py +1 -1
  225. mistralai_gcp/models/no_response_error.py +5 -1
  226. mistralai_gcp/models/referencechunk.py +1 -1
  227. mistralai_gcp/models/responseformats.py +5 -1
  228. mistralai_gcp/models/responsevalidationerror.py +2 -0
  229. mistralai_gcp/models/sdkerror.py +2 -0
  230. mistralai_gcp/models/systemmessage.py +1 -1
  231. mistralai_gcp/models/textchunk.py +1 -1
  232. mistralai_gcp/models/thinkchunk.py +1 -1
  233. mistralai_gcp/models/tool.py +2 -6
  234. mistralai_gcp/models/toolcall.py +2 -6
  235. mistralai_gcp/models/toolchoice.py +2 -6
  236. mistralai_gcp/models/toolchoiceenum.py +6 -1
  237. mistralai_gcp/models/toolmessage.py +1 -1
  238. mistralai_gcp/models/tooltypes.py +1 -1
  239. mistralai_gcp/models/usermessage.py +1 -1
  240. mistralai_gcp/types/basemodel.py +41 -3
  241. mistralai_gcp/utils/__init__.py +0 -3
  242. mistralai_gcp/utils/annotations.py +32 -8
  243. mistralai_gcp/utils/enums.py +60 -0
  244. mistralai_gcp/utils/forms.py +21 -10
  245. mistralai_gcp/utils/queryparams.py +14 -2
  246. mistralai_gcp/utils/requestbodies.py +3 -3
  247. mistralai_gcp/utils/retries.py +69 -5
  248. mistralai_gcp/utils/serializers.py +0 -20
  249. mistralai_gcp/utils/unmarshal_json_response.py +15 -1
  250. {mistralai-1.10.1.dist-info → mistralai-1.12.0.dist-info}/WHEEL +0 -0
  251. {mistralai-1.10.1.dist-info → mistralai-1.12.0.dist-info}/licenses/LICENSE +0 -0
@@ -3,10 +3,10 @@
3
3
  import importlib.metadata
4
4
 
5
5
  __title__: str = "mistralai_azure"
6
- __version__: str = "1.7.0"
6
+ __version__: str = "1.8.1"
7
7
  __openapi_doc_version__: str = "1.0.0"
8
- __gen_version__: str = "2.687.13"
9
- __user_agent__: str = "speakeasy-sdk/python 1.7.0 2.687.13 1.0.0 mistralai_azure"
8
+ __gen_version__: str = "2.794.1"
9
+ __user_agent__: str = "speakeasy-sdk/python 1.8.1 2.794.1 1.0.0 mistralai_azure"
10
10
 
11
11
  try:
12
12
  if __package__ is not None:
@@ -60,6 +60,7 @@ class BaseSDK:
60
60
  ] = None,
61
61
  url_override: Optional[str] = None,
62
62
  http_headers: Optional[Mapping[str, str]] = None,
63
+ allow_empty_value: Optional[List[str]] = None,
63
64
  ) -> httpx.Request:
64
65
  client = self.sdk_configuration.async_client
65
66
  return self._build_request_with_client(
@@ -80,6 +81,7 @@ class BaseSDK:
80
81
  get_serialized_body,
81
82
  url_override,
82
83
  http_headers,
84
+ allow_empty_value,
83
85
  )
84
86
 
85
87
  def _build_request(
@@ -102,6 +104,7 @@ class BaseSDK:
102
104
  ] = None,
103
105
  url_override: Optional[str] = None,
104
106
  http_headers: Optional[Mapping[str, str]] = None,
107
+ allow_empty_value: Optional[List[str]] = None,
105
108
  ) -> httpx.Request:
106
109
  client = self.sdk_configuration.client
107
110
  return self._build_request_with_client(
@@ -122,6 +125,7 @@ class BaseSDK:
122
125
  get_serialized_body,
123
126
  url_override,
124
127
  http_headers,
128
+ allow_empty_value,
125
129
  )
126
130
 
127
131
  def _build_request_with_client(
@@ -145,6 +149,7 @@ class BaseSDK:
145
149
  ] = None,
146
150
  url_override: Optional[str] = None,
147
151
  http_headers: Optional[Mapping[str, str]] = None,
152
+ allow_empty_value: Optional[List[str]] = None,
148
153
  ) -> httpx.Request:
149
154
  query_params = {}
150
155
 
@@ -160,6 +165,7 @@ class BaseSDK:
160
165
  query_params = utils.get_query_params(
161
166
  request if request_has_query_params else None,
162
167
  _globals if request_has_query_params else None,
168
+ allow_empty_value,
163
169
  )
164
170
  else:
165
171
  # Pick up the query parameter from the override so they can be
mistralai_azure/chat.py CHANGED
@@ -6,7 +6,7 @@ from mistralai_azure._hooks import HookContext
6
6
  from mistralai_azure.types import OptionalNullable, UNSET
7
7
  from mistralai_azure.utils import eventstreaming
8
8
  from mistralai_azure.utils.unmarshal_json_response import unmarshal_json_response
9
- from typing import Any, List, Mapping, Optional, Union
9
+ from typing import Any, Dict, List, Mapping, Optional, Union
10
10
 
11
11
 
12
12
  class Chat(BaseSDK):
@@ -23,6 +23,7 @@ class Chat(BaseSDK):
23
23
  stream: Optional[bool] = True,
24
24
  stop: Optional[Union[models.Stop, models.StopTypedDict]] = None,
25
25
  random_seed: OptionalNullable[int] = UNSET,
26
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
26
27
  response_format: Optional[
27
28
  Union[models.ResponseFormat, models.ResponseFormatTypedDict]
28
29
  ] = None,
@@ -48,7 +49,7 @@ class Chat(BaseSDK):
48
49
  server_url: Optional[str] = None,
49
50
  timeout_ms: Optional[int] = None,
50
51
  http_headers: Optional[Mapping[str, str]] = None,
51
- ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]:
52
+ ) -> eventstreaming.EventStream[models.CompletionEvent]:
52
53
  r"""Stream chat completion
53
54
 
54
55
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
@@ -61,6 +62,7 @@ class Chat(BaseSDK):
61
62
  :param stream:
62
63
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
63
64
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
65
+ :param metadata:
64
66
  :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
65
67
  :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
66
68
  :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
@@ -94,6 +96,7 @@ class Chat(BaseSDK):
94
96
  stream=stream,
95
97
  stop=stop,
96
98
  random_seed=random_seed,
99
+ metadata=metadata,
97
100
  messages=utils.get_pydantic_model(messages, List[models.Messages]),
98
101
  response_format=utils.get_pydantic_model(
99
102
  response_format, Optional[models.ResponseFormat]
@@ -129,6 +132,7 @@ class Chat(BaseSDK):
129
132
  get_serialized_body=lambda: utils.serialize_request_body(
130
133
  request, False, False, "json", models.ChatCompletionStreamRequest
131
134
  ),
135
+ allow_empty_value=None,
132
136
  timeout_ms=timeout_ms,
133
137
  )
134
138
 
@@ -145,7 +149,7 @@ class Chat(BaseSDK):
145
149
  config=self.sdk_configuration,
146
150
  base_url=base_url or "",
147
151
  operation_id="stream_chat",
148
- oauth2_scopes=[],
152
+ oauth2_scopes=None,
149
153
  security_source=self.sdk_configuration.security,
150
154
  ),
151
155
  request=req,
@@ -189,6 +193,7 @@ class Chat(BaseSDK):
189
193
  stream: Optional[bool] = True,
190
194
  stop: Optional[Union[models.Stop, models.StopTypedDict]] = None,
191
195
  random_seed: OptionalNullable[int] = UNSET,
196
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
192
197
  response_format: Optional[
193
198
  Union[models.ResponseFormat, models.ResponseFormatTypedDict]
194
199
  ] = None,
@@ -214,7 +219,7 @@ class Chat(BaseSDK):
214
219
  server_url: Optional[str] = None,
215
220
  timeout_ms: Optional[int] = None,
216
221
  http_headers: Optional[Mapping[str, str]] = None,
217
- ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]:
222
+ ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]:
218
223
  r"""Stream chat completion
219
224
 
220
225
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
@@ -227,6 +232,7 @@ class Chat(BaseSDK):
227
232
  :param stream:
228
233
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
229
234
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
235
+ :param metadata:
230
236
  :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
231
237
  :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
232
238
  :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
@@ -260,6 +266,7 @@ class Chat(BaseSDK):
260
266
  stream=stream,
261
267
  stop=stop,
262
268
  random_seed=random_seed,
269
+ metadata=metadata,
263
270
  messages=utils.get_pydantic_model(messages, List[models.Messages]),
264
271
  response_format=utils.get_pydantic_model(
265
272
  response_format, Optional[models.ResponseFormat]
@@ -295,6 +302,7 @@ class Chat(BaseSDK):
295
302
  get_serialized_body=lambda: utils.serialize_request_body(
296
303
  request, False, False, "json", models.ChatCompletionStreamRequest
297
304
  ),
305
+ allow_empty_value=None,
298
306
  timeout_ms=timeout_ms,
299
307
  )
300
308
 
@@ -311,7 +319,7 @@ class Chat(BaseSDK):
311
319
  config=self.sdk_configuration,
312
320
  base_url=base_url or "",
313
321
  operation_id="stream_chat",
314
- oauth2_scopes=[],
322
+ oauth2_scopes=None,
315
323
  security_source=self.sdk_configuration.security,
316
324
  ),
317
325
  request=req,
@@ -363,6 +371,7 @@ class Chat(BaseSDK):
363
371
  ]
364
372
  ] = None,
365
373
  random_seed: OptionalNullable[int] = UNSET,
374
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
366
375
  response_format: Optional[
367
376
  Union[models.ResponseFormat, models.ResponseFormatTypedDict]
368
377
  ] = None,
@@ -388,7 +397,7 @@ class Chat(BaseSDK):
388
397
  server_url: Optional[str] = None,
389
398
  timeout_ms: Optional[int] = None,
390
399
  http_headers: Optional[Mapping[str, str]] = None,
391
- ) -> Optional[models.ChatCompletionResponse]:
400
+ ) -> models.ChatCompletionResponse:
392
401
  r"""Chat Completion
393
402
 
394
403
  :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
@@ -399,6 +408,7 @@ class Chat(BaseSDK):
399
408
  :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
400
409
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
401
410
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
411
+ :param metadata:
402
412
  :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
403
413
  :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
404
414
  :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
@@ -432,6 +442,7 @@ class Chat(BaseSDK):
432
442
  stream=stream,
433
443
  stop=stop,
434
444
  random_seed=random_seed,
445
+ metadata=metadata,
435
446
  messages=utils.get_pydantic_model(
436
447
  messages, List[models.ChatCompletionRequestMessages]
437
448
  ),
@@ -469,6 +480,7 @@ class Chat(BaseSDK):
469
480
  get_serialized_body=lambda: utils.serialize_request_body(
470
481
  request, False, False, "json", models.ChatCompletionRequest
471
482
  ),
483
+ allow_empty_value=None,
472
484
  timeout_ms=timeout_ms,
473
485
  )
474
486
 
@@ -485,7 +497,7 @@ class Chat(BaseSDK):
485
497
  config=self.sdk_configuration,
486
498
  base_url=base_url or "",
487
499
  operation_id="chat_completion_v1_chat_completions_post",
488
- oauth2_scopes=[],
500
+ oauth2_scopes=None,
489
501
  security_source=self.sdk_configuration.security,
490
502
  ),
491
503
  request=req,
@@ -495,9 +507,7 @@ class Chat(BaseSDK):
495
507
 
496
508
  response_data: Any = None
497
509
  if utils.match_response(http_res, "200", "application/json"):
498
- return unmarshal_json_response(
499
- Optional[models.ChatCompletionResponse], http_res
500
- )
510
+ return unmarshal_json_response(models.ChatCompletionResponse, http_res)
501
511
  if utils.match_response(http_res, "422", "application/json"):
502
512
  response_data = unmarshal_json_response(
503
513
  models.HTTPValidationErrorData, http_res
@@ -531,6 +541,7 @@ class Chat(BaseSDK):
531
541
  ]
532
542
  ] = None,
533
543
  random_seed: OptionalNullable[int] = UNSET,
544
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
534
545
  response_format: Optional[
535
546
  Union[models.ResponseFormat, models.ResponseFormatTypedDict]
536
547
  ] = None,
@@ -556,7 +567,7 @@ class Chat(BaseSDK):
556
567
  server_url: Optional[str] = None,
557
568
  timeout_ms: Optional[int] = None,
558
569
  http_headers: Optional[Mapping[str, str]] = None,
559
- ) -> Optional[models.ChatCompletionResponse]:
570
+ ) -> models.ChatCompletionResponse:
560
571
  r"""Chat Completion
561
572
 
562
573
  :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content.
@@ -567,6 +578,7 @@ class Chat(BaseSDK):
567
578
  :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
568
579
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
569
580
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
581
+ :param metadata:
570
582
  :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.
571
583
  :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.
572
584
  :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.
@@ -600,6 +612,7 @@ class Chat(BaseSDK):
600
612
  stream=stream,
601
613
  stop=stop,
602
614
  random_seed=random_seed,
615
+ metadata=metadata,
603
616
  messages=utils.get_pydantic_model(
604
617
  messages, List[models.ChatCompletionRequestMessages]
605
618
  ),
@@ -637,6 +650,7 @@ class Chat(BaseSDK):
637
650
  get_serialized_body=lambda: utils.serialize_request_body(
638
651
  request, False, False, "json", models.ChatCompletionRequest
639
652
  ),
653
+ allow_empty_value=None,
640
654
  timeout_ms=timeout_ms,
641
655
  )
642
656
 
@@ -653,7 +667,7 @@ class Chat(BaseSDK):
653
667
  config=self.sdk_configuration,
654
668
  base_url=base_url or "",
655
669
  operation_id="chat_completion_v1_chat_completions_post",
656
- oauth2_scopes=[],
670
+ oauth2_scopes=None,
657
671
  security_source=self.sdk_configuration.security,
658
672
  ),
659
673
  request=req,
@@ -663,9 +677,7 @@ class Chat(BaseSDK):
663
677
 
664
678
  response_data: Any = None
665
679
  if utils.match_response(http_res, "200", "application/json"):
666
- return unmarshal_json_response(
667
- Optional[models.ChatCompletionResponse], http_res
668
- )
680
+ return unmarshal_json_response(models.ChatCompletionResponse, http_res)
669
681
  if utils.match_response(http_res, "422", "application/json"):
670
682
  response_data = unmarshal_json_response(
671
683
  models.HTTPValidationErrorData, http_res
@@ -107,7 +107,6 @@ def close_clients(
107
107
  # to them from the owning SDK instance and they can be reaped.
108
108
  owner.client = None
109
109
  owner.async_client = None
110
-
111
110
  if sync_client is not None and not sync_client_supplied:
112
111
  try:
113
112
  sync_client.close()
@@ -86,8 +86,15 @@ if TYPE_CHECKING:
86
86
  from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict
87
87
  from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict
88
88
  from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict
89
- from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict
89
+ from .ocrrequest import (
90
+ Document,
91
+ DocumentTypedDict,
92
+ OCRRequest,
93
+ OCRRequestTypedDict,
94
+ TableFormat,
95
+ )
90
96
  from .ocrresponse import OCRResponse, OCRResponseTypedDict
97
+ from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict
91
98
  from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict
92
99
  from .prediction import Prediction, PredictionTypedDict
93
100
  from .referencechunk import (
@@ -191,6 +198,7 @@ __all__ = [
191
198
  "FileChunk",
192
199
  "FileChunkTypedDict",
193
200
  "FinishReason",
201
+ "Format",
194
202
  "Function",
195
203
  "FunctionCall",
196
204
  "FunctionCallTypedDict",
@@ -225,6 +233,8 @@ __all__ = [
225
233
  "OCRRequestTypedDict",
226
234
  "OCRResponse",
227
235
  "OCRResponseTypedDict",
236
+ "OCRTableObject",
237
+ "OCRTableObjectTypedDict",
228
238
  "OCRUsageInfo",
229
239
  "OCRUsageInfoTypedDict",
230
240
  "Prediction",
@@ -248,6 +258,7 @@ __all__ = [
248
258
  "SystemMessageContentChunksTypedDict",
249
259
  "SystemMessageContentTypedDict",
250
260
  "SystemMessageTypedDict",
261
+ "TableFormat",
251
262
  "TextChunk",
252
263
  "TextChunkTypedDict",
253
264
  "ThinkChunk",
@@ -356,8 +367,12 @@ _dynamic_imports: dict[str, str] = {
356
367
  "DocumentTypedDict": ".ocrrequest",
357
368
  "OCRRequest": ".ocrrequest",
358
369
  "OCRRequestTypedDict": ".ocrrequest",
370
+ "TableFormat": ".ocrrequest",
359
371
  "OCRResponse": ".ocrresponse",
360
372
  "OCRResponseTypedDict": ".ocrresponse",
373
+ "Format": ".ocrtableobject",
374
+ "OCRTableObject": ".ocrtableobject",
375
+ "OCRTableObjectTypedDict": ".ocrtableobject",
361
376
  "OCRUsageInfo": ".ocrusageinfo",
362
377
  "OCRUsageInfoTypedDict": ".ocrusageinfo",
363
378
  "Prediction": ".prediction",
@@ -25,7 +25,7 @@ AssistantMessageContent = TypeAliasType(
25
25
  )
26
26
 
27
27
 
28
- AssistantMessageRole = Literal["assistant"]
28
+ AssistantMessageRole = Literal["assistant",]
29
29
 
30
30
 
31
31
  class AssistantMessageTypedDict(TypedDict):
@@ -3,14 +3,19 @@
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
5
  from mistralai_azure.types import BaseModel, UnrecognizedStr
6
- from mistralai_azure.utils import validate_open_enum
7
- from pydantic.functional_validators import PlainValidator
8
6
  from typing import Literal, Union
9
- from typing_extensions import Annotated, TypedDict
7
+ from typing_extensions import TypedDict
10
8
 
11
9
 
12
10
  ChatCompletionChoiceFinishReason = Union[
13
- Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr
11
+ Literal[
12
+ "stop",
13
+ "length",
14
+ "model_length",
15
+ "error",
16
+ "tool_calls",
17
+ ],
18
+ UnrecognizedStr,
14
19
  ]
15
20
 
16
21
 
@@ -25,6 +30,4 @@ class ChatCompletionChoice(BaseModel):
25
30
 
26
31
  message: AssistantMessage
27
32
 
28
- finish_reason: Annotated[
29
- ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False))
30
- ]
33
+ finish_reason: ChatCompletionChoiceFinishReason
@@ -18,10 +18,9 @@ from mistralai_azure.types import (
18
18
  UNSET,
19
19
  UNSET_SENTINEL,
20
20
  )
21
- from mistralai_azure.utils import get_discriminator, validate_open_enum
21
+ from mistralai_azure.utils import get_discriminator
22
22
  from pydantic import Discriminator, Tag, model_serializer
23
- from pydantic.functional_validators import PlainValidator
24
- from typing import List, Optional, Union
23
+ from typing import Any, Dict, List, Optional, Union
25
24
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
26
25
 
27
26
 
@@ -89,6 +88,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
89
88
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
90
89
  random_seed: NotRequired[Nullable[int]]
91
90
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
91
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
92
92
  response_format: NotRequired[ResponseFormatTypedDict]
93
93
  r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
94
94
  tools: NotRequired[Nullable[List[ToolTypedDict]]]
@@ -136,6 +136,8 @@ class ChatCompletionRequest(BaseModel):
136
136
  random_seed: OptionalNullable[int] = UNSET
137
137
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
138
138
 
139
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
140
+
139
141
  response_format: Optional[ResponseFormat] = None
140
142
  r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
141
143
 
@@ -160,9 +162,7 @@ class ChatCompletionRequest(BaseModel):
160
162
  parallel_tool_calls: Optional[bool] = None
161
163
  r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
162
164
 
163
- prompt_mode: Annotated[
164
- OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
165
- ] = UNSET
165
+ prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
166
166
  r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
167
167
 
168
168
  safe_prompt: Optional[bool] = None
@@ -178,6 +178,7 @@ class ChatCompletionRequest(BaseModel):
178
178
  "stream",
179
179
  "stop",
180
180
  "random_seed",
181
+ "metadata",
181
182
  "response_format",
182
183
  "tools",
183
184
  "tool_choice",
@@ -193,6 +194,7 @@ class ChatCompletionRequest(BaseModel):
193
194
  "temperature",
194
195
  "max_tokens",
195
196
  "random_seed",
197
+ "metadata",
196
198
  "tools",
197
199
  "n",
198
200
  "prompt_mode",
@@ -18,10 +18,9 @@ from mistralai_azure.types import (
18
18
  UNSET,
19
19
  UNSET_SENTINEL,
20
20
  )
21
- from mistralai_azure.utils import get_discriminator, validate_open_enum
21
+ from mistralai_azure.utils import get_discriminator
22
22
  from pydantic import Discriminator, Tag, model_serializer
23
- from pydantic.functional_validators import PlainValidator
24
- from typing import List, Optional, Union
23
+ from typing import Any, Dict, List, Optional, Union
25
24
  from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
26
25
 
27
26
 
@@ -84,6 +83,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
84
83
  r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
85
84
  random_seed: NotRequired[Nullable[int]]
86
85
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
86
+ metadata: NotRequired[Nullable[Dict[str, Any]]]
87
87
  response_format: NotRequired[ResponseFormatTypedDict]
88
88
  r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
89
89
  tools: NotRequired[Nullable[List[ToolTypedDict]]]
@@ -130,6 +130,8 @@ class ChatCompletionStreamRequest(BaseModel):
130
130
  random_seed: OptionalNullable[int] = UNSET
131
131
  r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
132
132
 
133
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET
134
+
133
135
  response_format: Optional[ResponseFormat] = None
134
136
  r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
135
137
 
@@ -154,9 +156,7 @@ class ChatCompletionStreamRequest(BaseModel):
154
156
  parallel_tool_calls: Optional[bool] = None
155
157
  r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
156
158
 
157
- prompt_mode: Annotated[
158
- OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
159
- ] = UNSET
159
+ prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
160
160
  r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
161
161
 
162
162
  safe_prompt: Optional[bool] = None
@@ -172,6 +172,7 @@ class ChatCompletionStreamRequest(BaseModel):
172
172
  "stream",
173
173
  "stop",
174
174
  "random_seed",
175
+ "metadata",
175
176
  "response_format",
176
177
  "tools",
177
178
  "tool_choice",
@@ -187,6 +188,7 @@ class ChatCompletionStreamRequest(BaseModel):
187
188
  "temperature",
188
189
  "max_tokens",
189
190
  "random_seed",
191
+ "metadata",
190
192
  "tools",
191
193
  "n",
192
194
  "prompt_mode",
@@ -3,14 +3,20 @@
3
3
  from __future__ import annotations
4
4
  from .deltamessage import DeltaMessage, DeltaMessageTypedDict
5
5
  from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr
6
- from mistralai_azure.utils import validate_open_enum
7
6
  from pydantic import model_serializer
8
- from pydantic.functional_validators import PlainValidator
9
7
  from typing import Literal, Union
10
- from typing_extensions import Annotated, TypedDict
8
+ from typing_extensions import TypedDict
11
9
 
12
10
 
13
- FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr]
11
+ FinishReason = Union[
12
+ Literal[
13
+ "stop",
14
+ "length",
15
+ "error",
16
+ "tool_calls",
17
+ ],
18
+ UnrecognizedStr,
19
+ ]
14
20
 
15
21
 
16
22
  class CompletionResponseStreamChoiceTypedDict(TypedDict):
@@ -24,9 +30,7 @@ class CompletionResponseStreamChoice(BaseModel):
24
30
 
25
31
  delta: DeltaMessage
26
32
 
27
- finish_reason: Annotated[
28
- Nullable[FinishReason], PlainValidator(validate_open_enum(False))
29
- ]
33
+ finish_reason: Nullable[FinishReason]
30
34
 
31
35
  @model_serializer(mode="wrap")
32
36
  def serialize_model(self, handler):
@@ -13,7 +13,7 @@ from typing import Literal, Optional
13
13
  from typing_extensions import NotRequired, TypedDict
14
14
 
15
15
 
16
- DocumentURLChunkType = Literal["document_url"]
16
+ DocumentURLChunkType = Literal["document_url",]
17
17
 
18
18
 
19
19
  class DocumentURLChunkTypedDict(TypedDict):
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .validationerror import ValidationError
5
+ from dataclasses import dataclass, field
5
6
  import httpx
6
7
  from mistralai_azure.models import MistralAzureError
7
8
  from mistralai_azure.types import BaseModel
@@ -12,8 +13,9 @@ class HTTPValidationErrorData(BaseModel):
12
13
  detail: Optional[List[ValidationError]] = None
13
14
 
14
15
 
16
+ @dataclass(unsafe_hash=True)
15
17
  class HTTPValidationError(MistralAzureError):
16
- data: HTTPValidationErrorData
18
+ data: HTTPValidationErrorData = field(hash=False)
17
19
 
18
20
  def __init__(
19
21
  self,
@@ -23,4 +25,4 @@ class HTTPValidationError(MistralAzureError):
23
25
  ):
24
26
  message = body or raw_response.text
25
27
  super().__init__(message, raw_response, body)
26
- self.data = data
28
+ object.__setattr__(self, "data", data)
@@ -15,7 +15,7 @@ ImageURLChunkImageURLTypedDict = TypeAliasType(
15
15
  ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str])
16
16
 
17
17
 
18
- ImageURLChunkType = Literal["image_url"]
18
+ ImageURLChunkType = Literal["image_url",]
19
19
 
20
20
 
21
21
  class ImageURLChunkTypedDict(TypedDict):
@@ -2,25 +2,29 @@
2
2
 
3
3
  import httpx
4
4
  from typing import Optional
5
+ from dataclasses import dataclass, field
5
6
 
6
7
 
8
+ @dataclass(unsafe_hash=True)
7
9
  class MistralAzureError(Exception):
8
10
  """The base class for all HTTP error responses."""
9
11
 
10
12
  message: str
11
13
  status_code: int
12
14
  body: str
13
- headers: httpx.Headers
14
- raw_response: httpx.Response
15
+ headers: httpx.Headers = field(hash=False)
16
+ raw_response: httpx.Response = field(hash=False)
15
17
 
16
18
  def __init__(
17
19
  self, message: str, raw_response: httpx.Response, body: Optional[str] = None
18
20
  ):
19
- self.message = message
20
- self.status_code = raw_response.status_code
21
- self.body = body if body is not None else raw_response.text
22
- self.headers = raw_response.headers
23
- self.raw_response = raw_response
21
+ object.__setattr__(self, "message", message)
22
+ object.__setattr__(self, "status_code", raw_response.status_code)
23
+ object.__setattr__(
24
+ self, "body", body if body is not None else raw_response.text
25
+ )
26
+ object.__setattr__(self, "headers", raw_response.headers)
27
+ object.__setattr__(self, "raw_response", raw_response)
24
28
 
25
29
  def __str__(self):
26
30
  return self.message
@@ -5,4 +5,4 @@ from mistralai_azure.types import UnrecognizedStr
5
5
  from typing import Literal, Union
6
6
 
7
7
 
8
- MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr]
8
+ MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr]
@@ -1,12 +1,16 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
+ from dataclasses import dataclass
4
+
5
+
6
+ @dataclass(unsafe_hash=True)
3
7
  class NoResponseError(Exception):
4
8
  """Error raised when no HTTP response is received from the server."""
5
9
 
6
10
  message: str
7
11
 
8
12
  def __init__(self, message: str = "No response received"):
9
- self.message = message
13
+ object.__setattr__(self, "message", message)
10
14
  super().__init__(message)
11
15
 
12
16
  def __str__(self):