mistralai 1.0.3__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (199) hide show
  1. mistralai/_hooks/sdkhooks.py +23 -4
  2. mistralai/_hooks/types.py +27 -9
  3. mistralai/agents.py +286 -150
  4. mistralai/basesdk.py +90 -5
  5. mistralai/chat.py +260 -144
  6. mistralai/embeddings.py +73 -53
  7. mistralai/files.py +252 -192
  8. mistralai/fim.py +174 -110
  9. mistralai/fine_tuning.py +3 -2
  10. mistralai/jobs.py +372 -263
  11. mistralai/models/__init__.py +499 -46
  12. mistralai/models/agentscompletionrequest.py +47 -11
  13. mistralai/models/agentscompletionstreamrequest.py +49 -11
  14. mistralai/models/archiveftmodelout.py +6 -2
  15. mistralai/models/assistantmessage.py +11 -4
  16. mistralai/models/{modelcard.py → basemodelcard.py} +37 -14
  17. mistralai/models/chatcompletionchoice.py +4 -2
  18. mistralai/models/chatcompletionrequest.py +57 -11
  19. mistralai/models/chatcompletionresponse.py +6 -2
  20. mistralai/models/chatcompletionstreamrequest.py +59 -11
  21. mistralai/models/checkpointout.py +3 -2
  22. mistralai/models/completionchunk.py +10 -3
  23. mistralai/models/completionevent.py +1 -2
  24. mistralai/models/completionresponsestreamchoice.py +13 -5
  25. mistralai/models/contentchunk.py +13 -10
  26. mistralai/models/delete_model_v1_models_model_id_deleteop.py +4 -3
  27. mistralai/models/deletefileout.py +3 -2
  28. mistralai/models/deletemodelout.py +3 -2
  29. mistralai/models/deltamessage.py +9 -4
  30. mistralai/models/detailedjobout.py +59 -7
  31. mistralai/models/embeddingrequest.py +9 -4
  32. mistralai/models/embeddingresponse.py +5 -2
  33. mistralai/models/embeddingresponsedata.py +3 -2
  34. mistralai/models/eventout.py +9 -4
  35. mistralai/models/files_api_routes_delete_fileop.py +4 -3
  36. mistralai/models/files_api_routes_retrieve_fileop.py +4 -3
  37. mistralai/models/files_api_routes_upload_fileop.py +27 -8
  38. mistralai/models/fileschema.py +26 -5
  39. mistralai/models/fimcompletionrequest.py +26 -5
  40. mistralai/models/fimcompletionresponse.py +6 -2
  41. mistralai/models/fimcompletionstreamrequest.py +26 -5
  42. mistralai/models/finetuneablemodel.py +7 -1
  43. mistralai/models/ftmodelcapabilitiesout.py +4 -2
  44. mistralai/models/ftmodelcard.py +103 -0
  45. mistralai/models/ftmodelout.py +32 -6
  46. mistralai/models/function.py +3 -2
  47. mistralai/models/functioncall.py +2 -2
  48. mistralai/models/functionname.py +17 -0
  49. mistralai/models/githubrepositoryin.py +15 -4
  50. mistralai/models/githubrepositoryout.py +15 -4
  51. mistralai/models/httpvalidationerror.py +2 -2
  52. mistralai/models/imageurl.py +48 -0
  53. mistralai/models/imageurlchunk.py +32 -0
  54. mistralai/models/jobin.py +22 -5
  55. mistralai/models/jobmetadataout.py +31 -6
  56. mistralai/models/jobout.py +55 -7
  57. mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +4 -3
  58. mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +4 -3
  59. mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +3 -2
  60. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +4 -3
  61. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +83 -16
  62. mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +4 -3
  63. mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +4 -3
  64. mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +9 -4
  65. mistralai/models/jobsout.py +6 -2
  66. mistralai/models/legacyjobmetadataout.py +45 -6
  67. mistralai/models/listfilesout.py +2 -2
  68. mistralai/models/metricout.py +11 -6
  69. mistralai/models/modelcapabilities.py +7 -2
  70. mistralai/models/modellist.py +21 -7
  71. mistralai/models/responseformat.py +6 -7
  72. mistralai/models/responseformats.py +8 -0
  73. mistralai/models/retrieve_model_v1_models_model_id_getop.py +24 -5
  74. mistralai/models/retrievefileout.py +26 -5
  75. mistralai/models/security.py +12 -3
  76. mistralai/models/systemmessage.py +6 -5
  77. mistralai/models/textchunk.py +9 -4
  78. mistralai/models/tool.py +9 -8
  79. mistralai/models/toolcall.py +9 -7
  80. mistralai/models/toolchoice.py +29 -0
  81. mistralai/models/toolchoiceenum.py +7 -0
  82. mistralai/models/toolmessage.py +11 -4
  83. mistralai/models/tooltypes.py +8 -0
  84. mistralai/models/trainingfile.py +2 -2
  85. mistralai/models/trainingparameters.py +27 -6
  86. mistralai/models/trainingparametersin.py +29 -8
  87. mistralai/models/unarchiveftmodelout.py +6 -2
  88. mistralai/models/updateftmodelin.py +8 -4
  89. mistralai/models/uploadfileout.py +26 -5
  90. mistralai/models/usageinfo.py +3 -2
  91. mistralai/models/usermessage.py +6 -5
  92. mistralai/models/validationerror.py +3 -2
  93. mistralai/models/wandbintegration.py +14 -4
  94. mistralai/models/wandbintegrationout.py +13 -4
  95. mistralai/models_.py +392 -294
  96. mistralai/sdk.py +24 -19
  97. mistralai/sdkconfiguration.py +6 -8
  98. mistralai/utils/__init__.py +6 -1
  99. mistralai/utils/logger.py +4 -1
  100. mistralai/utils/retries.py +2 -1
  101. mistralai/utils/security.py +13 -6
  102. {mistralai-1.0.3.dist-info → mistralai-1.1.0.dist-info}/METADATA +102 -73
  103. mistralai-1.1.0.dist-info/RECORD +254 -0
  104. mistralai_azure/_hooks/sdkhooks.py +23 -4
  105. mistralai_azure/_hooks/types.py +27 -9
  106. mistralai_azure/basesdk.py +91 -6
  107. mistralai_azure/chat.py +252 -144
  108. mistralai_azure/models/__init__.py +157 -15
  109. mistralai_azure/models/assistantmessage.py +18 -5
  110. mistralai_azure/models/chatcompletionchoice.py +7 -3
  111. mistralai_azure/models/chatcompletionrequest.py +65 -12
  112. mistralai_azure/models/chatcompletionresponse.py +6 -2
  113. mistralai_azure/models/chatcompletionstreamrequest.py +67 -12
  114. mistralai_azure/models/completionchunk.py +10 -3
  115. mistralai_azure/models/completionevent.py +1 -2
  116. mistralai_azure/models/completionresponsestreamchoice.py +10 -4
  117. mistralai_azure/models/contentchunk.py +4 -11
  118. mistralai_azure/models/deltamessage.py +16 -5
  119. mistralai_azure/models/function.py +3 -2
  120. mistralai_azure/models/functioncall.py +2 -2
  121. mistralai_azure/models/functionname.py +17 -0
  122. mistralai_azure/models/httpvalidationerror.py +2 -2
  123. mistralai_azure/models/responseformat.py +6 -7
  124. mistralai_azure/models/responseformats.py +8 -0
  125. mistralai_azure/models/security.py +12 -3
  126. mistralai_azure/models/systemmessage.py +6 -5
  127. mistralai_azure/models/textchunk.py +9 -4
  128. mistralai_azure/models/tool.py +9 -8
  129. mistralai_azure/models/toolcall.py +9 -7
  130. mistralai_azure/models/toolchoice.py +29 -0
  131. mistralai_azure/models/toolchoiceenum.py +7 -0
  132. mistralai_azure/models/toolmessage.py +18 -5
  133. mistralai_azure/models/tooltypes.py +8 -0
  134. mistralai_azure/models/usageinfo.py +3 -2
  135. mistralai_azure/models/usermessage.py +6 -5
  136. mistralai_azure/models/validationerror.py +3 -2
  137. mistralai_azure/sdkconfiguration.py +6 -8
  138. mistralai_azure/utils/__init__.py +8 -3
  139. mistralai_azure/utils/forms.py +10 -9
  140. mistralai_azure/utils/headers.py +8 -8
  141. mistralai_azure/utils/logger.py +6 -0
  142. mistralai_azure/utils/queryparams.py +16 -14
  143. mistralai_azure/utils/retries.py +2 -1
  144. mistralai_azure/utils/security.py +12 -6
  145. mistralai_azure/utils/serializers.py +17 -8
  146. mistralai_azure/utils/url.py +13 -8
  147. mistralai_azure/utils/values.py +6 -0
  148. mistralai_gcp/_hooks/sdkhooks.py +23 -4
  149. mistralai_gcp/_hooks/types.py +27 -9
  150. mistralai_gcp/basesdk.py +91 -6
  151. mistralai_gcp/chat.py +252 -144
  152. mistralai_gcp/fim.py +166 -110
  153. mistralai_gcp/models/__init__.py +179 -17
  154. mistralai_gcp/models/assistantmessage.py +18 -5
  155. mistralai_gcp/models/chatcompletionchoice.py +7 -3
  156. mistralai_gcp/models/chatcompletionrequest.py +62 -12
  157. mistralai_gcp/models/chatcompletionresponse.py +6 -2
  158. mistralai_gcp/models/chatcompletionstreamrequest.py +64 -12
  159. mistralai_gcp/models/completionchunk.py +10 -3
  160. mistralai_gcp/models/completionevent.py +1 -2
  161. mistralai_gcp/models/completionresponsestreamchoice.py +10 -4
  162. mistralai_gcp/models/contentchunk.py +4 -11
  163. mistralai_gcp/models/deltamessage.py +16 -5
  164. mistralai_gcp/models/fimcompletionrequest.py +33 -6
  165. mistralai_gcp/models/fimcompletionresponse.py +6 -2
  166. mistralai_gcp/models/fimcompletionstreamrequest.py +33 -6
  167. mistralai_gcp/models/function.py +3 -2
  168. mistralai_gcp/models/functioncall.py +2 -2
  169. mistralai_gcp/models/functionname.py +17 -0
  170. mistralai_gcp/models/httpvalidationerror.py +2 -2
  171. mistralai_gcp/models/responseformat.py +6 -7
  172. mistralai_gcp/models/responseformats.py +8 -0
  173. mistralai_gcp/models/security.py +12 -3
  174. mistralai_gcp/models/systemmessage.py +6 -5
  175. mistralai_gcp/models/textchunk.py +9 -4
  176. mistralai_gcp/models/tool.py +9 -8
  177. mistralai_gcp/models/toolcall.py +9 -7
  178. mistralai_gcp/models/toolchoice.py +29 -0
  179. mistralai_gcp/models/toolchoiceenum.py +7 -0
  180. mistralai_gcp/models/toolmessage.py +18 -5
  181. mistralai_gcp/models/tooltypes.py +8 -0
  182. mistralai_gcp/models/usageinfo.py +3 -2
  183. mistralai_gcp/models/usermessage.py +6 -5
  184. mistralai_gcp/models/validationerror.py +3 -2
  185. mistralai_gcp/sdk.py +6 -7
  186. mistralai_gcp/sdkconfiguration.py +6 -8
  187. mistralai_gcp/utils/__init__.py +8 -3
  188. mistralai_gcp/utils/forms.py +10 -9
  189. mistralai_gcp/utils/headers.py +8 -8
  190. mistralai_gcp/utils/logger.py +6 -0
  191. mistralai_gcp/utils/queryparams.py +16 -14
  192. mistralai_gcp/utils/retries.py +2 -1
  193. mistralai_gcp/utils/security.py +12 -6
  194. mistralai_gcp/utils/serializers.py +17 -8
  195. mistralai_gcp/utils/url.py +13 -8
  196. mistralai_gcp/utils/values.py +6 -0
  197. mistralai-1.0.3.dist-info/RECORD +0 -236
  198. {mistralai-1.0.3.dist-info → mistralai-1.1.0.dist-info}/LICENSE +0 -0
  199. {mistralai-1.0.3.dist-info → mistralai-1.1.0.dist-info}/WHEEL +0 -0
mistralai/fim.py CHANGED
@@ -7,12 +7,13 @@ from mistralai.types import Nullable, OptionalNullable, UNSET
7
7
  from mistralai.utils import eventstreaming, get_security_from_env
8
8
  from typing import Any, AsyncGenerator, Generator, Optional, Union
9
9
 
10
+
10
11
  class Fim(BaseSDK):
11
12
  r"""Fill-in-the-middle API."""
12
-
13
-
13
+
14
14
  def complete(
15
- self, *,
15
+ self,
16
+ *,
16
17
  model: Nullable[str],
17
18
  prompt: str,
18
19
  temperature: Optional[float] = 0.7,
@@ -20,7 +21,12 @@ class Fim(BaseSDK):
20
21
  max_tokens: OptionalNullable[int] = UNSET,
21
22
  min_tokens: OptionalNullable[int] = UNSET,
22
23
  stream: Optional[bool] = False,
23
- stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None,
24
+ stop: Optional[
25
+ Union[
26
+ models.FIMCompletionRequestStop,
27
+ models.FIMCompletionRequestStopTypedDict,
28
+ ]
29
+ ] = None,
24
30
  random_seed: OptionalNullable[int] = UNSET,
25
31
  suffix: OptionalNullable[str] = UNSET,
26
32
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
@@ -49,10 +55,10 @@ class Fim(BaseSDK):
49
55
  url_variables = None
50
56
  if timeout_ms is None:
51
57
  timeout_ms = self.sdk_configuration.timeout_ms
52
-
58
+
53
59
  if server_url is not None:
54
60
  base_url = server_url
55
-
61
+
56
62
  request = models.FIMCompletionRequest(
57
63
  model=model,
58
64
  temperature=temperature,
@@ -65,7 +71,7 @@ class Fim(BaseSDK):
65
71
  prompt=prompt,
66
72
  suffix=suffix,
67
73
  )
68
-
74
+
69
75
  req = self.build_request(
70
76
  method="POST",
71
77
  path="/v1/fim/completions",
@@ -78,47 +84,57 @@ class Fim(BaseSDK):
78
84
  user_agent_header="user-agent",
79
85
  accept_header_value="application/json",
80
86
  security=self.sdk_configuration.security,
81
- get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest),
87
+ get_serialized_body=lambda: utils.serialize_request_body(
88
+ request, False, False, "json", models.FIMCompletionRequest
89
+ ),
82
90
  timeout_ms=timeout_ms,
83
91
  )
84
-
92
+
85
93
  if retries == UNSET:
86
94
  if self.sdk_configuration.retry_config is not UNSET:
87
95
  retries = self.sdk_configuration.retry_config
88
96
 
89
97
  retry_config = None
90
98
  if isinstance(retries, utils.RetryConfig):
91
- retry_config = (retries, [
92
- "429",
93
- "500",
94
- "502",
95
- "503",
96
- "504"
97
- ])
98
-
99
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
100
+
99
101
  http_res = self.do_request(
100
- hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)),
102
+ hook_ctx=HookContext(
103
+ operation_id="fim_completion_v1_fim_completions_post",
104
+ oauth2_scopes=[],
105
+ security_source=get_security_from_env(
106
+ self.sdk_configuration.security, models.Security
107
+ ),
108
+ ),
101
109
  request=req,
102
- error_status_codes=["422","4XX","5XX"],
103
- retry_config=retry_config
110
+ error_status_codes=["422", "4XX", "5XX"],
111
+ retry_config=retry_config,
104
112
  )
105
-
113
+
106
114
  data: Any = None
107
115
  if utils.match_response(http_res, "200", "application/json"):
108
- return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse])
116
+ return utils.unmarshal_json(
117
+ http_res.text, Optional[models.FIMCompletionResponse]
118
+ )
109
119
  if utils.match_response(http_res, "422", "application/json"):
110
120
  data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData)
111
121
  raise models.HTTPValidationError(data=data)
112
- if utils.match_response(http_res, ["4XX","5XX"], "*"):
113
- raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res)
114
-
122
+ if utils.match_response(http_res, ["4XX", "5XX"], "*"):
123
+ raise models.SDKError(
124
+ "API error occurred", http_res.status_code, http_res.text, http_res
125
+ )
126
+
115
127
  content_type = http_res.headers.get("Content-Type")
116
- raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res)
128
+ raise models.SDKError(
129
+ f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
130
+ http_res.status_code,
131
+ http_res.text,
132
+ http_res,
133
+ )
117
134
 
118
-
119
-
120
135
  async def complete_async(
121
- self, *,
136
+ self,
137
+ *,
122
138
  model: Nullable[str],
123
139
  prompt: str,
124
140
  temperature: Optional[float] = 0.7,
@@ -126,7 +142,12 @@ class Fim(BaseSDK):
126
142
  max_tokens: OptionalNullable[int] = UNSET,
127
143
  min_tokens: OptionalNullable[int] = UNSET,
128
144
  stream: Optional[bool] = False,
129
- stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None,
145
+ stop: Optional[
146
+ Union[
147
+ models.FIMCompletionRequestStop,
148
+ models.FIMCompletionRequestStopTypedDict,
149
+ ]
150
+ ] = None,
130
151
  random_seed: OptionalNullable[int] = UNSET,
131
152
  suffix: OptionalNullable[str] = UNSET,
132
153
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
@@ -155,10 +176,10 @@ class Fim(BaseSDK):
155
176
  url_variables = None
156
177
  if timeout_ms is None:
157
178
  timeout_ms = self.sdk_configuration.timeout_ms
158
-
179
+
159
180
  if server_url is not None:
160
181
  base_url = server_url
161
-
182
+
162
183
  request = models.FIMCompletionRequest(
163
184
  model=model,
164
185
  temperature=temperature,
@@ -171,8 +192,8 @@ class Fim(BaseSDK):
171
192
  prompt=prompt,
172
193
  suffix=suffix,
173
194
  )
174
-
175
- req = self.build_request(
195
+
196
+ req = self.build_request_async(
176
197
  method="POST",
177
198
  path="/v1/fim/completions",
178
199
  base_url=base_url,
@@ -184,47 +205,57 @@ class Fim(BaseSDK):
184
205
  user_agent_header="user-agent",
185
206
  accept_header_value="application/json",
186
207
  security=self.sdk_configuration.security,
187
- get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest),
208
+ get_serialized_body=lambda: utils.serialize_request_body(
209
+ request, False, False, "json", models.FIMCompletionRequest
210
+ ),
188
211
  timeout_ms=timeout_ms,
189
212
  )
190
-
213
+
191
214
  if retries == UNSET:
192
215
  if self.sdk_configuration.retry_config is not UNSET:
193
216
  retries = self.sdk_configuration.retry_config
194
217
 
195
218
  retry_config = None
196
219
  if isinstance(retries, utils.RetryConfig):
197
- retry_config = (retries, [
198
- "429",
199
- "500",
200
- "502",
201
- "503",
202
- "504"
203
- ])
204
-
220
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
221
+
205
222
  http_res = await self.do_request_async(
206
- hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)),
223
+ hook_ctx=HookContext(
224
+ operation_id="fim_completion_v1_fim_completions_post",
225
+ oauth2_scopes=[],
226
+ security_source=get_security_from_env(
227
+ self.sdk_configuration.security, models.Security
228
+ ),
229
+ ),
207
230
  request=req,
208
- error_status_codes=["422","4XX","5XX"],
209
- retry_config=retry_config
231
+ error_status_codes=["422", "4XX", "5XX"],
232
+ retry_config=retry_config,
210
233
  )
211
-
234
+
212
235
  data: Any = None
213
236
  if utils.match_response(http_res, "200", "application/json"):
214
- return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse])
237
+ return utils.unmarshal_json(
238
+ http_res.text, Optional[models.FIMCompletionResponse]
239
+ )
215
240
  if utils.match_response(http_res, "422", "application/json"):
216
241
  data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData)
217
242
  raise models.HTTPValidationError(data=data)
218
- if utils.match_response(http_res, ["4XX","5XX"], "*"):
219
- raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res)
220
-
243
+ if utils.match_response(http_res, ["4XX", "5XX"], "*"):
244
+ raise models.SDKError(
245
+ "API error occurred", http_res.status_code, http_res.text, http_res
246
+ )
247
+
221
248
  content_type = http_res.headers.get("Content-Type")
222
- raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res)
249
+ raise models.SDKError(
250
+ f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
251
+ http_res.status_code,
252
+ http_res.text,
253
+ http_res,
254
+ )
223
255
 
224
-
225
-
226
256
  def stream(
227
- self, *,
257
+ self,
258
+ *,
228
259
  model: Nullable[str],
229
260
  prompt: str,
230
261
  temperature: Optional[float] = 0.7,
@@ -232,7 +263,12 @@ class Fim(BaseSDK):
232
263
  max_tokens: OptionalNullable[int] = UNSET,
233
264
  min_tokens: OptionalNullable[int] = UNSET,
234
265
  stream: Optional[bool] = True,
235
- stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None,
266
+ stop: Optional[
267
+ Union[
268
+ models.FIMCompletionStreamRequestStop,
269
+ models.FIMCompletionStreamRequestStopTypedDict,
270
+ ]
271
+ ] = None,
236
272
  random_seed: OptionalNullable[int] = UNSET,
237
273
  suffix: OptionalNullable[str] = UNSET,
238
274
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
@@ -249,7 +285,7 @@ class Fim(BaseSDK):
249
285
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
250
286
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
251
287
  :param min_tokens: The minimum number of tokens to generate in the completion.
252
- :param stream:
288
+ :param stream:
253
289
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
254
290
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
255
291
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
@@ -261,10 +297,10 @@ class Fim(BaseSDK):
261
297
  url_variables = None
262
298
  if timeout_ms is None:
263
299
  timeout_ms = self.sdk_configuration.timeout_ms
264
-
300
+
265
301
  if server_url is not None:
266
302
  base_url = server_url
267
-
303
+
268
304
  request = models.FIMCompletionStreamRequest(
269
305
  model=model,
270
306
  temperature=temperature,
@@ -277,7 +313,7 @@ class Fim(BaseSDK):
277
313
  prompt=prompt,
278
314
  suffix=suffix,
279
315
  )
280
-
316
+
281
317
  req = self.build_request(
282
318
  method="POST",
283
319
  path="/v1/fim/completions#stream",
@@ -290,48 +326,60 @@ class Fim(BaseSDK):
290
326
  user_agent_header="user-agent",
291
327
  accept_header_value="text/event-stream",
292
328
  security=self.sdk_configuration.security,
293
- get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest),
329
+ get_serialized_body=lambda: utils.serialize_request_body(
330
+ request, False, False, "json", models.FIMCompletionStreamRequest
331
+ ),
294
332
  timeout_ms=timeout_ms,
295
333
  )
296
-
334
+
297
335
  if retries == UNSET:
298
336
  if self.sdk_configuration.retry_config is not UNSET:
299
337
  retries = self.sdk_configuration.retry_config
300
338
 
301
339
  retry_config = None
302
340
  if isinstance(retries, utils.RetryConfig):
303
- retry_config = (retries, [
304
- "429",
305
- "500",
306
- "502",
307
- "503",
308
- "504"
309
- ])
310
-
341
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
342
+
311
343
  http_res = self.do_request(
312
- hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)),
344
+ hook_ctx=HookContext(
345
+ operation_id="stream_fim",
346
+ oauth2_scopes=[],
347
+ security_source=get_security_from_env(
348
+ self.sdk_configuration.security, models.Security
349
+ ),
350
+ ),
313
351
  request=req,
314
- error_status_codes=["422","4XX","5XX"],
352
+ error_status_codes=["422", "4XX", "5XX"],
315
353
  stream=True,
316
- retry_config=retry_config
354
+ retry_config=retry_config,
317
355
  )
318
-
356
+
319
357
  data: Any = None
320
358
  if utils.match_response(http_res, "200", "text/event-stream"):
321
- return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]")
359
+ return eventstreaming.stream_events(
360
+ http_res,
361
+ lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
362
+ sentinel="[DONE]",
363
+ )
322
364
  if utils.match_response(http_res, "422", "application/json"):
323
365
  data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData)
324
366
  raise models.HTTPValidationError(data=data)
325
- if utils.match_response(http_res, ["4XX","5XX"], "*"):
326
- raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res)
327
-
367
+ if utils.match_response(http_res, ["4XX", "5XX"], "*"):
368
+ raise models.SDKError(
369
+ "API error occurred", http_res.status_code, http_res.text, http_res
370
+ )
371
+
328
372
  content_type = http_res.headers.get("Content-Type")
329
- raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res)
373
+ raise models.SDKError(
374
+ f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
375
+ http_res.status_code,
376
+ http_res.text,
377
+ http_res,
378
+ )
330
379
 
331
-
332
-
333
380
  async def stream_async(
334
- self, *,
381
+ self,
382
+ *,
335
383
  model: Nullable[str],
336
384
  prompt: str,
337
385
  temperature: Optional[float] = 0.7,
@@ -339,7 +387,12 @@ class Fim(BaseSDK):
339
387
  max_tokens: OptionalNullable[int] = UNSET,
340
388
  min_tokens: OptionalNullable[int] = UNSET,
341
389
  stream: Optional[bool] = True,
342
- stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None,
390
+ stop: Optional[
391
+ Union[
392
+ models.FIMCompletionStreamRequestStop,
393
+ models.FIMCompletionStreamRequestStopTypedDict,
394
+ ]
395
+ ] = None,
343
396
  random_seed: OptionalNullable[int] = UNSET,
344
397
  suffix: OptionalNullable[str] = UNSET,
345
398
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
@@ -356,7 +409,7 @@ class Fim(BaseSDK):
356
409
  :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
357
410
  :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
358
411
  :param min_tokens: The minimum number of tokens to generate in the completion.
359
- :param stream:
412
+ :param stream:
360
413
  :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
361
414
  :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
362
415
  :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
@@ -368,10 +421,10 @@ class Fim(BaseSDK):
368
421
  url_variables = None
369
422
  if timeout_ms is None:
370
423
  timeout_ms = self.sdk_configuration.timeout_ms
371
-
424
+
372
425
  if server_url is not None:
373
426
  base_url = server_url
374
-
427
+
375
428
  request = models.FIMCompletionStreamRequest(
376
429
  model=model,
377
430
  temperature=temperature,
@@ -384,8 +437,8 @@ class Fim(BaseSDK):
384
437
  prompt=prompt,
385
438
  suffix=suffix,
386
439
  )
387
-
388
- req = self.build_request(
440
+
441
+ req = self.build_request_async(
389
442
  method="POST",
390
443
  path="/v1/fim/completions#stream",
391
444
  base_url=base_url,
@@ -397,42 +450,53 @@ class Fim(BaseSDK):
397
450
  user_agent_header="user-agent",
398
451
  accept_header_value="text/event-stream",
399
452
  security=self.sdk_configuration.security,
400
- get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest),
453
+ get_serialized_body=lambda: utils.serialize_request_body(
454
+ request, False, False, "json", models.FIMCompletionStreamRequest
455
+ ),
401
456
  timeout_ms=timeout_ms,
402
457
  )
403
-
458
+
404
459
  if retries == UNSET:
405
460
  if self.sdk_configuration.retry_config is not UNSET:
406
461
  retries = self.sdk_configuration.retry_config
407
462
 
408
463
  retry_config = None
409
464
  if isinstance(retries, utils.RetryConfig):
410
- retry_config = (retries, [
411
- "429",
412
- "500",
413
- "502",
414
- "503",
415
- "504"
416
- ])
417
-
465
+ retry_config = (retries, ["429", "500", "502", "503", "504"])
466
+
418
467
  http_res = await self.do_request_async(
419
- hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)),
468
+ hook_ctx=HookContext(
469
+ operation_id="stream_fim",
470
+ oauth2_scopes=[],
471
+ security_source=get_security_from_env(
472
+ self.sdk_configuration.security, models.Security
473
+ ),
474
+ ),
420
475
  request=req,
421
- error_status_codes=["422","4XX","5XX"],
476
+ error_status_codes=["422", "4XX", "5XX"],
422
477
  stream=True,
423
- retry_config=retry_config
478
+ retry_config=retry_config,
424
479
  )
425
-
480
+
426
481
  data: Any = None
427
482
  if utils.match_response(http_res, "200", "text/event-stream"):
428
- return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]")
483
+ return eventstreaming.stream_events_async(
484
+ http_res,
485
+ lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
486
+ sentinel="[DONE]",
487
+ )
429
488
  if utils.match_response(http_res, "422", "application/json"):
430
489
  data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData)
431
490
  raise models.HTTPValidationError(data=data)
432
- if utils.match_response(http_res, ["4XX","5XX"], "*"):
433
- raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res)
434
-
435
- content_type = http_res.headers.get("Content-Type")
436
- raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res)
491
+ if utils.match_response(http_res, ["4XX", "5XX"], "*"):
492
+ raise models.SDKError(
493
+ "API error occurred", http_res.status_code, http_res.text, http_res
494
+ )
437
495
 
438
-
496
+ content_type = http_res.headers.get("Content-Type")
497
+ raise models.SDKError(
498
+ f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
499
+ http_res.status_code,
500
+ http_res.text,
501
+ http_res,
502
+ )
mistralai/fine_tuning.py CHANGED
@@ -4,13 +4,14 @@ from .basesdk import BaseSDK
4
4
  from .sdkconfiguration import SDKConfiguration
5
5
  from mistralai.jobs import Jobs
6
6
 
7
+
7
8
  class FineTuning(BaseSDK):
8
9
  jobs: Jobs
10
+
9
11
  def __init__(self, sdk_config: SDKConfiguration) -> None:
10
12
  BaseSDK.__init__(self, sdk_config)
11
13
  self.sdk_configuration = sdk_config
12
14
  self._init_sdks()
13
-
15
+
14
16
  def _init_sdks(self):
15
17
  self.jobs = Jobs(self.sdk_configuration)
16
-