mistralai 1.10.0__py3-none-any.whl → 1.11.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (270) hide show
  1. mistralai/_hooks/tracing.py +28 -3
  2. mistralai/_version.py +3 -3
  3. mistralai/accesses.py +22 -12
  4. mistralai/agents.py +88 -44
  5. mistralai/basesdk.py +6 -0
  6. mistralai/chat.py +96 -40
  7. mistralai/classifiers.py +48 -23
  8. mistralai/conversations.py +186 -64
  9. mistralai/documents.py +72 -26
  10. mistralai/embeddings.py +24 -9
  11. mistralai/extra/README.md +1 -1
  12. mistralai/extra/mcp/auth.py +10 -11
  13. mistralai/extra/mcp/base.py +17 -16
  14. mistralai/extra/mcp/sse.py +13 -15
  15. mistralai/extra/mcp/stdio.py +5 -6
  16. mistralai/extra/observability/otel.py +47 -68
  17. mistralai/extra/run/context.py +33 -43
  18. mistralai/extra/run/result.py +29 -30
  19. mistralai/extra/run/tools.py +8 -9
  20. mistralai/extra/struct_chat.py +15 -8
  21. mistralai/extra/utils/response_format.py +5 -3
  22. mistralai/files.py +58 -24
  23. mistralai/fim.py +20 -12
  24. mistralai/httpclient.py +0 -1
  25. mistralai/jobs.py +65 -26
  26. mistralai/libraries.py +20 -10
  27. mistralai/mistral_agents.py +438 -30
  28. mistralai/mistral_jobs.py +62 -17
  29. mistralai/models/__init__.py +46 -1
  30. mistralai/models/agent.py +1 -1
  31. mistralai/models/agentconversation.py +1 -1
  32. mistralai/models/agenthandoffdoneevent.py +1 -1
  33. mistralai/models/agenthandoffentry.py +3 -2
  34. mistralai/models/agenthandoffstartedevent.py +1 -1
  35. mistralai/models/agents_api_v1_agents_get_versionop.py +21 -0
  36. mistralai/models/agents_api_v1_agents_list_versionsop.py +33 -0
  37. mistralai/models/agents_api_v1_agents_listop.py +5 -1
  38. mistralai/models/agents_api_v1_conversations_listop.py +1 -1
  39. mistralai/models/agentscompletionrequest.py +2 -5
  40. mistralai/models/agentscompletionstreamrequest.py +2 -5
  41. mistralai/models/archiveftmodelout.py +1 -1
  42. mistralai/models/assistantmessage.py +1 -1
  43. mistralai/models/audiochunk.py +1 -1
  44. mistralai/models/audioencoding.py +18 -0
  45. mistralai/models/audioformat.py +17 -0
  46. mistralai/models/basemodelcard.py +1 -1
  47. mistralai/models/batchjobin.py +18 -9
  48. mistralai/models/batchjobout.py +6 -1
  49. mistralai/models/batchjobsout.py +1 -1
  50. mistralai/models/batchrequest.py +48 -0
  51. mistralai/models/chatcompletionchoice.py +10 -5
  52. mistralai/models/chatcompletionrequest.py +2 -5
  53. mistralai/models/chatcompletionstreamrequest.py +2 -5
  54. mistralai/models/classificationrequest.py +37 -3
  55. mistralai/models/classifierdetailedjobout.py +4 -2
  56. mistralai/models/classifierftmodelout.py +3 -2
  57. mistralai/models/classifierjobout.py +4 -2
  58. mistralai/models/codeinterpretertool.py +1 -1
  59. mistralai/models/completiondetailedjobout.py +5 -2
  60. mistralai/models/completionftmodelout.py +3 -2
  61. mistralai/models/completionjobout.py +5 -2
  62. mistralai/models/completionresponsestreamchoice.py +9 -8
  63. mistralai/models/conversationappendrequest.py +4 -1
  64. mistralai/models/conversationappendstreamrequest.py +4 -1
  65. mistralai/models/conversationhistory.py +2 -1
  66. mistralai/models/conversationmessages.py +1 -1
  67. mistralai/models/conversationrequest.py +5 -1
  68. mistralai/models/conversationresponse.py +2 -1
  69. mistralai/models/conversationrestartrequest.py +4 -1
  70. mistralai/models/conversationrestartstreamrequest.py +4 -1
  71. mistralai/models/conversationstreamrequest.py +5 -1
  72. mistralai/models/documentlibrarytool.py +1 -1
  73. mistralai/models/documenturlchunk.py +1 -1
  74. mistralai/models/embeddingdtype.py +7 -1
  75. mistralai/models/embeddingrequest.py +11 -3
  76. mistralai/models/encodingformat.py +4 -1
  77. mistralai/models/entitytype.py +8 -1
  78. mistralai/models/filepurpose.py +8 -1
  79. mistralai/models/files_api_routes_list_filesop.py +4 -11
  80. mistralai/models/files_api_routes_upload_fileop.py +2 -6
  81. mistralai/models/fileschema.py +3 -5
  82. mistralai/models/finetuneablemodeltype.py +4 -1
  83. mistralai/models/ftclassifierlossfunction.py +4 -1
  84. mistralai/models/ftmodelcard.py +1 -1
  85. mistralai/models/functioncallentry.py +3 -2
  86. mistralai/models/functioncallevent.py +1 -1
  87. mistralai/models/functionresultentry.py +3 -2
  88. mistralai/models/functiontool.py +1 -1
  89. mistralai/models/githubrepositoryin.py +1 -1
  90. mistralai/models/githubrepositoryout.py +1 -1
  91. mistralai/models/httpvalidationerror.py +4 -2
  92. mistralai/models/imagegenerationtool.py +1 -1
  93. mistralai/models/imageurlchunk.py +1 -1
  94. mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +40 -3
  95. mistralai/models/jobsout.py +1 -1
  96. mistralai/models/legacyjobmetadataout.py +1 -1
  97. mistralai/models/messageinputentry.py +9 -3
  98. mistralai/models/messageoutputentry.py +6 -3
  99. mistralai/models/messageoutputevent.py +4 -2
  100. mistralai/models/mistralerror.py +11 -7
  101. mistralai/models/mistralpromptmode.py +1 -1
  102. mistralai/models/modelconversation.py +1 -1
  103. mistralai/models/no_response_error.py +5 -1
  104. mistralai/models/ocrrequest.py +11 -1
  105. mistralai/models/ocrtableobject.py +4 -1
  106. mistralai/models/referencechunk.py +1 -1
  107. mistralai/models/requestsource.py +5 -1
  108. mistralai/models/responsedoneevent.py +1 -1
  109. mistralai/models/responseerrorevent.py +1 -1
  110. mistralai/models/responseformats.py +5 -1
  111. mistralai/models/responsestartedevent.py +1 -1
  112. mistralai/models/responsevalidationerror.py +2 -0
  113. mistralai/models/retrievefileout.py +3 -5
  114. mistralai/models/sampletype.py +7 -1
  115. mistralai/models/sdkerror.py +2 -0
  116. mistralai/models/shareenum.py +7 -1
  117. mistralai/models/sharingdelete.py +2 -4
  118. mistralai/models/sharingin.py +3 -5
  119. mistralai/models/source.py +8 -1
  120. mistralai/models/systemmessage.py +1 -1
  121. mistralai/models/textchunk.py +1 -1
  122. mistralai/models/thinkchunk.py +1 -1
  123. mistralai/models/timestampgranularity.py +1 -1
  124. mistralai/models/tool.py +2 -6
  125. mistralai/models/toolcall.py +2 -6
  126. mistralai/models/toolchoice.py +2 -6
  127. mistralai/models/toolchoiceenum.py +6 -1
  128. mistralai/models/toolexecutiondeltaevent.py +2 -1
  129. mistralai/models/toolexecutiondoneevent.py +2 -1
  130. mistralai/models/toolexecutionentry.py +4 -2
  131. mistralai/models/toolexecutionstartedevent.py +2 -1
  132. mistralai/models/toolfilechunk.py +13 -5
  133. mistralai/models/toolmessage.py +1 -1
  134. mistralai/models/toolreferencechunk.py +15 -5
  135. mistralai/models/tooltypes.py +1 -1
  136. mistralai/models/transcriptionsegmentchunk.py +1 -1
  137. mistralai/models/transcriptionstreamdone.py +1 -1
  138. mistralai/models/transcriptionstreamlanguage.py +1 -1
  139. mistralai/models/transcriptionstreamsegmentdelta.py +1 -1
  140. mistralai/models/transcriptionstreamtextdelta.py +1 -1
  141. mistralai/models/unarchiveftmodelout.py +1 -1
  142. mistralai/models/uploadfileout.py +3 -5
  143. mistralai/models/usermessage.py +1 -1
  144. mistralai/models/wandbintegration.py +1 -1
  145. mistralai/models/wandbintegrationout.py +1 -1
  146. mistralai/models/websearchpremiumtool.py +1 -1
  147. mistralai/models/websearchtool.py +1 -1
  148. mistralai/models_.py +24 -12
  149. mistralai/ocr.py +38 -10
  150. mistralai/sdk.py +2 -2
  151. mistralai/transcriptions.py +28 -12
  152. mistralai/types/basemodel.py +41 -3
  153. mistralai/utils/__init__.py +0 -3
  154. mistralai/utils/annotations.py +32 -8
  155. mistralai/utils/enums.py +60 -0
  156. mistralai/utils/forms.py +21 -10
  157. mistralai/utils/queryparams.py +14 -2
  158. mistralai/utils/requestbodies.py +3 -3
  159. mistralai/utils/retries.py +69 -5
  160. mistralai/utils/serializers.py +0 -20
  161. mistralai/utils/unmarshal_json_response.py +15 -1
  162. {mistralai-1.10.0.dist-info → mistralai-1.11.1.dist-info}/METADATA +144 -159
  163. mistralai-1.11.1.dist-info/RECORD +495 -0
  164. {mistralai-1.10.0.dist-info → mistralai-1.11.1.dist-info}/WHEEL +1 -1
  165. mistralai_azure/_version.py +3 -3
  166. mistralai_azure/basesdk.py +21 -5
  167. mistralai_azure/chat.py +82 -109
  168. mistralai_azure/httpclient.py +0 -1
  169. mistralai_azure/models/__init__.py +66 -4
  170. mistralai_azure/models/assistantmessage.py +1 -1
  171. mistralai_azure/models/chatcompletionchoice.py +10 -7
  172. mistralai_azure/models/chatcompletionrequest.py +24 -10
  173. mistralai_azure/models/chatcompletionstreamrequest.py +24 -10
  174. mistralai_azure/models/completionresponsestreamchoice.py +11 -7
  175. mistralai_azure/models/documenturlchunk.py +1 -1
  176. mistralai_azure/models/httpvalidationerror.py +15 -8
  177. mistralai_azure/models/imageurlchunk.py +1 -1
  178. mistralai_azure/models/mistralazureerror.py +30 -0
  179. mistralai_azure/models/mistralpromptmode.py +1 -1
  180. mistralai_azure/models/no_response_error.py +17 -0
  181. mistralai_azure/models/ocrpageobject.py +32 -5
  182. mistralai_azure/models/ocrrequest.py +20 -1
  183. mistralai_azure/models/ocrtableobject.py +34 -0
  184. mistralai_azure/models/prediction.py +4 -0
  185. mistralai_azure/models/referencechunk.py +1 -1
  186. mistralai_azure/models/responseformat.py +4 -2
  187. mistralai_azure/models/responseformats.py +5 -2
  188. mistralai_azure/models/responsevalidationerror.py +27 -0
  189. mistralai_azure/models/sdkerror.py +32 -14
  190. mistralai_azure/models/systemmessage.py +8 -4
  191. mistralai_azure/models/systemmessagecontentchunks.py +21 -0
  192. mistralai_azure/models/textchunk.py +1 -1
  193. mistralai_azure/models/thinkchunk.py +35 -0
  194. mistralai_azure/models/tool.py +2 -6
  195. mistralai_azure/models/toolcall.py +2 -6
  196. mistralai_azure/models/toolchoice.py +2 -6
  197. mistralai_azure/models/toolchoiceenum.py +6 -1
  198. mistralai_azure/models/toolmessage.py +1 -1
  199. mistralai_azure/models/tooltypes.py +1 -1
  200. mistralai_azure/models/usermessage.py +1 -1
  201. mistralai_azure/ocr.py +39 -40
  202. mistralai_azure/types/basemodel.py +41 -3
  203. mistralai_azure/utils/__init__.py +18 -8
  204. mistralai_azure/utils/annotations.py +32 -8
  205. mistralai_azure/utils/enums.py +60 -0
  206. mistralai_azure/utils/eventstreaming.py +10 -0
  207. mistralai_azure/utils/forms.py +21 -10
  208. mistralai_azure/utils/queryparams.py +14 -2
  209. mistralai_azure/utils/requestbodies.py +3 -3
  210. mistralai_azure/utils/retries.py +69 -5
  211. mistralai_azure/utils/serializers.py +3 -22
  212. mistralai_azure/utils/unmarshal_json_response.py +38 -0
  213. mistralai_gcp/_hooks/types.py +7 -0
  214. mistralai_gcp/_version.py +4 -4
  215. mistralai_gcp/basesdk.py +33 -25
  216. mistralai_gcp/chat.py +98 -109
  217. mistralai_gcp/fim.py +62 -85
  218. mistralai_gcp/httpclient.py +6 -17
  219. mistralai_gcp/models/__init__.py +321 -116
  220. mistralai_gcp/models/assistantmessage.py +2 -2
  221. mistralai_gcp/models/chatcompletionchoice.py +10 -7
  222. mistralai_gcp/models/chatcompletionrequest.py +38 -7
  223. mistralai_gcp/models/chatcompletionresponse.py +6 -6
  224. mistralai_gcp/models/chatcompletionstreamrequest.py +38 -7
  225. mistralai_gcp/models/completionresponsestreamchoice.py +12 -8
  226. mistralai_gcp/models/deltamessage.py +1 -1
  227. mistralai_gcp/models/fimcompletionrequest.py +9 -10
  228. mistralai_gcp/models/fimcompletionresponse.py +6 -6
  229. mistralai_gcp/models/fimcompletionstreamrequest.py +9 -10
  230. mistralai_gcp/models/httpvalidationerror.py +15 -8
  231. mistralai_gcp/models/imageurl.py +1 -1
  232. mistralai_gcp/models/imageurlchunk.py +1 -1
  233. mistralai_gcp/models/jsonschema.py +1 -1
  234. mistralai_gcp/models/mistralgcperror.py +30 -0
  235. mistralai_gcp/models/mistralpromptmode.py +8 -0
  236. mistralai_gcp/models/no_response_error.py +17 -0
  237. mistralai_gcp/models/prediction.py +4 -0
  238. mistralai_gcp/models/referencechunk.py +1 -1
  239. mistralai_gcp/models/responseformat.py +5 -3
  240. mistralai_gcp/models/responseformats.py +5 -2
  241. mistralai_gcp/models/responsevalidationerror.py +27 -0
  242. mistralai_gcp/models/sdkerror.py +32 -14
  243. mistralai_gcp/models/systemmessage.py +8 -4
  244. mistralai_gcp/models/systemmessagecontentchunks.py +21 -0
  245. mistralai_gcp/models/textchunk.py +1 -1
  246. mistralai_gcp/models/thinkchunk.py +35 -0
  247. mistralai_gcp/models/tool.py +2 -6
  248. mistralai_gcp/models/toolcall.py +2 -6
  249. mistralai_gcp/models/toolchoice.py +2 -6
  250. mistralai_gcp/models/toolchoiceenum.py +6 -1
  251. mistralai_gcp/models/toolmessage.py +2 -2
  252. mistralai_gcp/models/tooltypes.py +1 -1
  253. mistralai_gcp/models/usageinfo.py +71 -8
  254. mistralai_gcp/models/usermessage.py +2 -2
  255. mistralai_gcp/sdk.py +12 -10
  256. mistralai_gcp/sdkconfiguration.py +0 -7
  257. mistralai_gcp/types/basemodel.py +41 -3
  258. mistralai_gcp/utils/__init__.py +141 -46
  259. mistralai_gcp/utils/annotations.py +32 -8
  260. mistralai_gcp/utils/datetimes.py +23 -0
  261. mistralai_gcp/utils/enums.py +125 -25
  262. mistralai_gcp/utils/eventstreaming.py +10 -0
  263. mistralai_gcp/utils/forms.py +62 -30
  264. mistralai_gcp/utils/queryparams.py +14 -2
  265. mistralai_gcp/utils/requestbodies.py +3 -3
  266. mistralai_gcp/utils/retries.py +69 -5
  267. mistralai_gcp/utils/serializers.py +33 -23
  268. mistralai_gcp/utils/unmarshal_json_response.py +38 -0
  269. mistralai-1.10.0.dist-info/RECORD +0 -475
  270. {mistralai-1.10.0.dist-info → mistralai-1.11.1.dist-info}/licenses/LICENSE +0 -0
mistralai_gcp/fim.py CHANGED
@@ -5,7 +5,8 @@ from mistralai_gcp import models, utils
5
5
  from mistralai_gcp._hooks import HookContext
6
6
  from mistralai_gcp.types import OptionalNullable, UNSET
7
7
  from mistralai_gcp.utils import eventstreaming
8
- from typing import Any, Mapping, Optional, Union
8
+ from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response
9
+ from typing import Any, Dict, Mapping, Optional, Union
9
10
 
10
11
 
11
12
  class Fim(BaseSDK):
@@ -27,18 +28,19 @@ class Fim(BaseSDK):
27
28
  ]
28
29
  ] = None,
29
30
  random_seed: OptionalNullable[int] = UNSET,
31
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
30
32
  suffix: OptionalNullable[str] = UNSET,
31
33
  min_tokens: OptionalNullable[int] = UNSET,
32
34
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
33
35
  server_url: Optional[str] = None,
34
36
  timeout_ms: Optional[int] = None,
35
37
  http_headers: Optional[Mapping[str, str]] = None,
36
- ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]:
38
+ ) -> eventstreaming.EventStream[models.CompletionEvent]:
37
39
  r"""Stream fim completion
38
40
 
39
41
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
40
42
 
41
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
43
+ :param model: ID of the model with FIM to use.
42
44
  :param prompt: The text/code to complete.
43
45
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
44
46
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -46,6 +48,7 @@ class Fim(BaseSDK):
46
48
  :param stream:
47
49
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
48
50
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
51
+ :param metadata:
49
52
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
50
53
  :param min_tokens: The minimum number of tokens to generate in the completion.
51
54
  :param retries: Override the default retry configuration for this method
@@ -71,6 +74,7 @@ class Fim(BaseSDK):
71
74
  stream=stream,
72
75
  stop=stop,
73
76
  random_seed=random_seed,
77
+ metadata=metadata,
74
78
  prompt=prompt,
75
79
  suffix=suffix,
76
80
  min_tokens=min_tokens,
@@ -92,6 +96,7 @@ class Fim(BaseSDK):
92
96
  get_serialized_body=lambda: utils.serialize_request_body(
93
97
  request, False, False, "json", models.FIMCompletionStreamRequest
94
98
  ),
99
+ allow_empty_value=None,
95
100
  timeout_ms=timeout_ms,
96
101
  )
97
102
 
@@ -105,9 +110,10 @@ class Fim(BaseSDK):
105
110
 
106
111
  http_res = self.do_request(
107
112
  hook_ctx=HookContext(
113
+ config=self.sdk_configuration,
108
114
  base_url=base_url or "",
109
115
  operation_id="stream_fim",
110
- oauth2_scopes=[],
116
+ oauth2_scopes=None,
111
117
  security_source=self.sdk_configuration.security,
112
118
  ),
113
119
  request=req,
@@ -122,32 +128,23 @@ class Fim(BaseSDK):
122
128
  http_res,
123
129
  lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
124
130
  sentinel="[DONE]",
131
+ client_ref=self,
125
132
  )
126
133
  if utils.match_response(http_res, "422", "application/json"):
127
134
  http_res_text = utils.stream_to_text(http_res)
128
- response_data = utils.unmarshal_json(
129
- http_res_text, models.HTTPValidationErrorData
135
+ response_data = unmarshal_json_response(
136
+ models.HTTPValidationErrorData, http_res, http_res_text
130
137
  )
131
- raise models.HTTPValidationError(data=response_data)
138
+ raise models.HTTPValidationError(response_data, http_res, http_res_text)
132
139
  if utils.match_response(http_res, "4XX", "*"):
133
140
  http_res_text = utils.stream_to_text(http_res)
134
- raise models.SDKError(
135
- "API error occurred", http_res.status_code, http_res_text, http_res
136
- )
141
+ raise models.SDKError("API error occurred", http_res, http_res_text)
137
142
  if utils.match_response(http_res, "5XX", "*"):
138
143
  http_res_text = utils.stream_to_text(http_res)
139
- raise models.SDKError(
140
- "API error occurred", http_res.status_code, http_res_text, http_res
141
- )
144
+ raise models.SDKError("API error occurred", http_res, http_res_text)
142
145
 
143
- content_type = http_res.headers.get("Content-Type")
144
146
  http_res_text = utils.stream_to_text(http_res)
145
- raise models.SDKError(
146
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
147
- http_res.status_code,
148
- http_res_text,
149
- http_res,
150
- )
147
+ raise models.SDKError("Unexpected response received", http_res, http_res_text)
151
148
 
152
149
  async def stream_async(
153
150
  self,
@@ -165,18 +162,19 @@ class Fim(BaseSDK):
165
162
  ]
166
163
  ] = None,
167
164
  random_seed: OptionalNullable[int] = UNSET,
165
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
168
166
  suffix: OptionalNullable[str] = UNSET,
169
167
  min_tokens: OptionalNullable[int] = UNSET,
170
168
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
171
169
  server_url: Optional[str] = None,
172
170
  timeout_ms: Optional[int] = None,
173
171
  http_headers: Optional[Mapping[str, str]] = None,
174
- ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]:
172
+ ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]:
175
173
  r"""Stream fim completion
176
174
 
177
175
  Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
178
176
 
179
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
177
+ :param model: ID of the model with FIM to use.
180
178
  :param prompt: The text/code to complete.
181
179
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
182
180
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -184,6 +182,7 @@ class Fim(BaseSDK):
184
182
  :param stream:
185
183
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
186
184
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
185
+ :param metadata:
187
186
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
188
187
  :param min_tokens: The minimum number of tokens to generate in the completion.
189
188
  :param retries: Override the default retry configuration for this method
@@ -209,6 +208,7 @@ class Fim(BaseSDK):
209
208
  stream=stream,
210
209
  stop=stop,
211
210
  random_seed=random_seed,
211
+ metadata=metadata,
212
212
  prompt=prompt,
213
213
  suffix=suffix,
214
214
  min_tokens=min_tokens,
@@ -230,6 +230,7 @@ class Fim(BaseSDK):
230
230
  get_serialized_body=lambda: utils.serialize_request_body(
231
231
  request, False, False, "json", models.FIMCompletionStreamRequest
232
232
  ),
233
+ allow_empty_value=None,
233
234
  timeout_ms=timeout_ms,
234
235
  )
235
236
 
@@ -243,9 +244,10 @@ class Fim(BaseSDK):
243
244
 
244
245
  http_res = await self.do_request_async(
245
246
  hook_ctx=HookContext(
247
+ config=self.sdk_configuration,
246
248
  base_url=base_url or "",
247
249
  operation_id="stream_fim",
248
- oauth2_scopes=[],
250
+ oauth2_scopes=None,
249
251
  security_source=self.sdk_configuration.security,
250
252
  ),
251
253
  request=req,
@@ -260,32 +262,23 @@ class Fim(BaseSDK):
260
262
  http_res,
261
263
  lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
262
264
  sentinel="[DONE]",
265
+ client_ref=self,
263
266
  )
264
267
  if utils.match_response(http_res, "422", "application/json"):
265
268
  http_res_text = await utils.stream_to_text_async(http_res)
266
- response_data = utils.unmarshal_json(
267
- http_res_text, models.HTTPValidationErrorData
269
+ response_data = unmarshal_json_response(
270
+ models.HTTPValidationErrorData, http_res, http_res_text
268
271
  )
269
- raise models.HTTPValidationError(data=response_data)
272
+ raise models.HTTPValidationError(response_data, http_res, http_res_text)
270
273
  if utils.match_response(http_res, "4XX", "*"):
271
274
  http_res_text = await utils.stream_to_text_async(http_res)
272
- raise models.SDKError(
273
- "API error occurred", http_res.status_code, http_res_text, http_res
274
- )
275
+ raise models.SDKError("API error occurred", http_res, http_res_text)
275
276
  if utils.match_response(http_res, "5XX", "*"):
276
277
  http_res_text = await utils.stream_to_text_async(http_res)
277
- raise models.SDKError(
278
- "API error occurred", http_res.status_code, http_res_text, http_res
279
- )
278
+ raise models.SDKError("API error occurred", http_res, http_res_text)
280
279
 
281
- content_type = http_res.headers.get("Content-Type")
282
280
  http_res_text = await utils.stream_to_text_async(http_res)
283
- raise models.SDKError(
284
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
285
- http_res.status_code,
286
- http_res_text,
287
- http_res,
288
- )
281
+ raise models.SDKError("Unexpected response received", http_res, http_res_text)
289
282
 
290
283
  def complete(
291
284
  self,
@@ -303,18 +296,19 @@ class Fim(BaseSDK):
303
296
  ]
304
297
  ] = None,
305
298
  random_seed: OptionalNullable[int] = UNSET,
299
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
306
300
  suffix: OptionalNullable[str] = UNSET,
307
301
  min_tokens: OptionalNullable[int] = UNSET,
308
302
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
309
303
  server_url: Optional[str] = None,
310
304
  timeout_ms: Optional[int] = None,
311
305
  http_headers: Optional[Mapping[str, str]] = None,
312
- ) -> Optional[models.FIMCompletionResponse]:
306
+ ) -> models.FIMCompletionResponse:
313
307
  r"""Fim Completion
314
308
 
315
309
  FIM completion.
316
310
 
317
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
311
+ :param model: ID of the model with FIM to use.
318
312
  :param prompt: The text/code to complete.
319
313
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
320
314
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -322,6 +316,7 @@ class Fim(BaseSDK):
322
316
  :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
323
317
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
324
318
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
319
+ :param metadata:
325
320
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
326
321
  :param min_tokens: The minimum number of tokens to generate in the completion.
327
322
  :param retries: Override the default retry configuration for this method
@@ -347,6 +342,7 @@ class Fim(BaseSDK):
347
342
  stream=stream,
348
343
  stop=stop,
349
344
  random_seed=random_seed,
345
+ metadata=metadata,
350
346
  prompt=prompt,
351
347
  suffix=suffix,
352
348
  min_tokens=min_tokens,
@@ -368,6 +364,7 @@ class Fim(BaseSDK):
368
364
  get_serialized_body=lambda: utils.serialize_request_body(
369
365
  request, False, False, "json", models.FIMCompletionRequest
370
366
  ),
367
+ allow_empty_value=None,
371
368
  timeout_ms=timeout_ms,
372
369
  )
373
370
 
@@ -381,9 +378,10 @@ class Fim(BaseSDK):
381
378
 
382
379
  http_res = self.do_request(
383
380
  hook_ctx=HookContext(
381
+ config=self.sdk_configuration,
384
382
  base_url=base_url or "",
385
383
  operation_id="fim_completion_v1_fim_completions_post",
386
- oauth2_scopes=[],
384
+ oauth2_scopes=None,
387
385
  security_source=self.sdk_configuration.security,
388
386
  ),
389
387
  request=req,
@@ -393,33 +391,20 @@ class Fim(BaseSDK):
393
391
 
394
392
  response_data: Any = None
395
393
  if utils.match_response(http_res, "200", "application/json"):
396
- return utils.unmarshal_json(
397
- http_res.text, Optional[models.FIMCompletionResponse]
398
- )
394
+ return unmarshal_json_response(models.FIMCompletionResponse, http_res)
399
395
  if utils.match_response(http_res, "422", "application/json"):
400
- response_data = utils.unmarshal_json(
401
- http_res.text, models.HTTPValidationErrorData
396
+ response_data = unmarshal_json_response(
397
+ models.HTTPValidationErrorData, http_res
402
398
  )
403
- raise models.HTTPValidationError(data=response_data)
399
+ raise models.HTTPValidationError(response_data, http_res)
404
400
  if utils.match_response(http_res, "4XX", "*"):
405
401
  http_res_text = utils.stream_to_text(http_res)
406
- raise models.SDKError(
407
- "API error occurred", http_res.status_code, http_res_text, http_res
408
- )
402
+ raise models.SDKError("API error occurred", http_res, http_res_text)
409
403
  if utils.match_response(http_res, "5XX", "*"):
410
404
  http_res_text = utils.stream_to_text(http_res)
411
- raise models.SDKError(
412
- "API error occurred", http_res.status_code, http_res_text, http_res
413
- )
405
+ raise models.SDKError("API error occurred", http_res, http_res_text)
414
406
 
415
- content_type = http_res.headers.get("Content-Type")
416
- http_res_text = utils.stream_to_text(http_res)
417
- raise models.SDKError(
418
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
419
- http_res.status_code,
420
- http_res_text,
421
- http_res,
422
- )
407
+ raise models.SDKError("Unexpected response received", http_res)
423
408
 
424
409
  async def complete_async(
425
410
  self,
@@ -437,18 +422,19 @@ class Fim(BaseSDK):
437
422
  ]
438
423
  ] = None,
439
424
  random_seed: OptionalNullable[int] = UNSET,
425
+ metadata: OptionalNullable[Dict[str, Any]] = UNSET,
440
426
  suffix: OptionalNullable[str] = UNSET,
441
427
  min_tokens: OptionalNullable[int] = UNSET,
442
428
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
443
429
  server_url: Optional[str] = None,
444
430
  timeout_ms: Optional[int] = None,
445
431
  http_headers: Optional[Mapping[str, str]] = None,
446
- ) -> Optional[models.FIMCompletionResponse]:
432
+ ) -> models.FIMCompletionResponse:
447
433
  r"""Fim Completion
448
434
 
449
435
  FIM completion.
450
436
 
451
- :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest`
437
+ :param model: ID of the model with FIM to use.
452
438
  :param prompt: The text/code to complete.
453
439
  :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
454
440
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
@@ -456,6 +442,7 @@ class Fim(BaseSDK):
456
442
  :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
457
443
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
458
444
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
445
+ :param metadata:
459
446
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
460
447
  :param min_tokens: The minimum number of tokens to generate in the completion.
461
448
  :param retries: Override the default retry configuration for this method
@@ -481,6 +468,7 @@ class Fim(BaseSDK):
481
468
  stream=stream,
482
469
  stop=stop,
483
470
  random_seed=random_seed,
471
+ metadata=metadata,
484
472
  prompt=prompt,
485
473
  suffix=suffix,
486
474
  min_tokens=min_tokens,
@@ -502,6 +490,7 @@ class Fim(BaseSDK):
502
490
  get_serialized_body=lambda: utils.serialize_request_body(
503
491
  request, False, False, "json", models.FIMCompletionRequest
504
492
  ),
493
+ allow_empty_value=None,
505
494
  timeout_ms=timeout_ms,
506
495
  )
507
496
 
@@ -515,9 +504,10 @@ class Fim(BaseSDK):
515
504
 
516
505
  http_res = await self.do_request_async(
517
506
  hook_ctx=HookContext(
507
+ config=self.sdk_configuration,
518
508
  base_url=base_url or "",
519
509
  operation_id="fim_completion_v1_fim_completions_post",
520
- oauth2_scopes=[],
510
+ oauth2_scopes=None,
521
511
  security_source=self.sdk_configuration.security,
522
512
  ),
523
513
  request=req,
@@ -527,30 +517,17 @@ class Fim(BaseSDK):
527
517
 
528
518
  response_data: Any = None
529
519
  if utils.match_response(http_res, "200", "application/json"):
530
- return utils.unmarshal_json(
531
- http_res.text, Optional[models.FIMCompletionResponse]
532
- )
520
+ return unmarshal_json_response(models.FIMCompletionResponse, http_res)
533
521
  if utils.match_response(http_res, "422", "application/json"):
534
- response_data = utils.unmarshal_json(
535
- http_res.text, models.HTTPValidationErrorData
522
+ response_data = unmarshal_json_response(
523
+ models.HTTPValidationErrorData, http_res
536
524
  )
537
- raise models.HTTPValidationError(data=response_data)
525
+ raise models.HTTPValidationError(response_data, http_res)
538
526
  if utils.match_response(http_res, "4XX", "*"):
539
527
  http_res_text = await utils.stream_to_text_async(http_res)
540
- raise models.SDKError(
541
- "API error occurred", http_res.status_code, http_res_text, http_res
542
- )
528
+ raise models.SDKError("API error occurred", http_res, http_res_text)
543
529
  if utils.match_response(http_res, "5XX", "*"):
544
530
  http_res_text = await utils.stream_to_text_async(http_res)
545
- raise models.SDKError(
546
- "API error occurred", http_res.status_code, http_res_text, http_res
547
- )
531
+ raise models.SDKError("API error occurred", http_res, http_res_text)
548
532
 
549
- content_type = http_res.headers.get("Content-Type")
550
- http_res_text = await utils.stream_to_text_async(http_res)
551
- raise models.SDKError(
552
- f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
553
- http_res.status_code,
554
- http_res_text,
555
- http_res,
556
- )
533
+ raise models.SDKError("Unexpected response received", http_res)
@@ -2,7 +2,6 @@
2
2
 
3
3
  # pyright: reportReturnType = false
4
4
  import asyncio
5
- from concurrent.futures import ThreadPoolExecutor
6
5
  from typing_extensions import Protocol, runtime_checkable
7
6
  import httpx
8
7
  from typing import Any, Optional, Union
@@ -108,7 +107,6 @@ def close_clients(
108
107
  # to them from the owning SDK instance and they can be reaped.
109
108
  owner.client = None
110
109
  owner.async_client = None
111
-
112
110
  if sync_client is not None and not sync_client_supplied:
113
111
  try:
114
112
  sync_client.close()
@@ -116,21 +114,12 @@ def close_clients(
116
114
  pass
117
115
 
118
116
  if async_client is not None and not async_client_supplied:
119
- is_async = False
120
117
  try:
121
- asyncio.get_running_loop()
122
- is_async = True
118
+ loop = asyncio.get_running_loop()
119
+ asyncio.run_coroutine_threadsafe(async_client.aclose(), loop)
123
120
  except RuntimeError:
124
- pass
125
-
126
- try:
127
- # If this function is called in an async loop then start another
128
- # loop in a separate thread to close the async http client.
129
- if is_async:
130
- with ThreadPoolExecutor(max_workers=1) as executor:
131
- future = executor.submit(asyncio.run, async_client.aclose())
132
- future.result()
133
- else:
121
+ try:
134
122
  asyncio.run(async_client.aclose())
135
- except Exception:
136
- pass
123
+ except RuntimeError:
124
+ # best effort
125
+ pass