mistralai 1.9.10__py3-none-any.whl → 1.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_hooks/registration.py +5 -0
- mistralai/_hooks/tracing.py +50 -0
- mistralai/_version.py +3 -3
- mistralai/accesses.py +51 -116
- mistralai/agents.py +58 -85
- mistralai/audio.py +8 -3
- mistralai/basesdk.py +15 -5
- mistralai/batch.py +6 -3
- mistralai/beta.py +10 -5
- mistralai/chat.py +70 -97
- mistralai/classifiers.py +57 -144
- mistralai/conversations.py +435 -412
- mistralai/documents.py +156 -359
- mistralai/embeddings.py +21 -42
- mistralai/extra/observability/__init__.py +15 -0
- mistralai/extra/observability/otel.py +393 -0
- mistralai/extra/run/tools.py +28 -16
- mistralai/files.py +53 -176
- mistralai/fim.py +46 -73
- mistralai/fine_tuning.py +6 -3
- mistralai/jobs.py +49 -158
- mistralai/libraries.py +71 -178
- mistralai/mistral_agents.py +298 -179
- mistralai/mistral_jobs.py +51 -138
- mistralai/models/__init__.py +94 -5
- mistralai/models/agent.py +15 -2
- mistralai/models/agentconversation.py +11 -3
- mistralai/models/agentcreationrequest.py +6 -2
- mistralai/models/agents_api_v1_agents_deleteop.py +16 -0
- mistralai/models/agents_api_v1_agents_getop.py +40 -3
- mistralai/models/agents_api_v1_agents_listop.py +72 -2
- mistralai/models/agents_api_v1_conversations_deleteop.py +18 -0
- mistralai/models/agents_api_v1_conversations_listop.py +39 -2
- mistralai/models/agentscompletionrequest.py +21 -6
- mistralai/models/agentscompletionstreamrequest.py +21 -6
- mistralai/models/agentupdaterequest.py +18 -2
- mistralai/models/audiotranscriptionrequest.py +2 -0
- mistralai/models/batchjobin.py +10 -0
- mistralai/models/chatcompletionrequest.py +22 -5
- mistralai/models/chatcompletionstreamrequest.py +22 -5
- mistralai/models/conversationrequest.py +15 -4
- mistralai/models/conversationrestartrequest.py +50 -2
- mistralai/models/conversationrestartstreamrequest.py +50 -2
- mistralai/models/conversationstreamrequest.py +15 -4
- mistralai/models/documentout.py +26 -10
- mistralai/models/documentupdatein.py +24 -3
- mistralai/models/embeddingrequest.py +8 -8
- mistralai/models/files_api_routes_list_filesop.py +7 -0
- mistralai/models/fimcompletionrequest.py +8 -9
- mistralai/models/fimcompletionstreamrequest.py +8 -9
- mistralai/models/httpvalidationerror.py +11 -6
- mistralai/models/libraries_documents_list_v1op.py +15 -2
- mistralai/models/libraryout.py +10 -7
- mistralai/models/listfilesout.py +35 -4
- mistralai/models/mistralerror.py +26 -0
- mistralai/models/modelcapabilities.py +13 -4
- mistralai/models/modelconversation.py +8 -2
- mistralai/models/no_response_error.py +13 -0
- mistralai/models/ocrpageobject.py +26 -5
- mistralai/models/ocrrequest.py +17 -1
- mistralai/models/ocrtableobject.py +31 -0
- mistralai/models/prediction.py +4 -0
- mistralai/models/requestsource.py +7 -0
- mistralai/models/responseformat.py +4 -2
- mistralai/models/responseformats.py +0 -1
- mistralai/models/responsevalidationerror.py +25 -0
- mistralai/models/sdkerror.py +30 -14
- mistralai/models/sharingdelete.py +36 -5
- mistralai/models/sharingin.py +36 -5
- mistralai/models/sharingout.py +3 -3
- mistralai/models/toolexecutiondeltaevent.py +13 -4
- mistralai/models/toolexecutiondoneevent.py +13 -4
- mistralai/models/toolexecutionentry.py +9 -4
- mistralai/models/toolexecutionstartedevent.py +13 -4
- mistralai/models_.py +67 -212
- mistralai/ocr.py +33 -36
- mistralai/sdk.py +15 -2
- mistralai/transcriptions.py +21 -60
- mistralai/utils/__init__.py +18 -5
- mistralai/utils/eventstreaming.py +10 -0
- mistralai/utils/serializers.py +3 -2
- mistralai/utils/unmarshal_json_response.py +24 -0
- {mistralai-1.9.10.dist-info → mistralai-1.10.0.dist-info}/METADATA +89 -40
- {mistralai-1.9.10.dist-info → mistralai-1.10.0.dist-info}/RECORD +86 -75
- {mistralai-1.9.10.dist-info → mistralai-1.10.0.dist-info}/WHEEL +1 -1
- {mistralai-1.9.10.dist-info → mistralai-1.10.0.dist-info/licenses}/LICENSE +0 -0
mistralai/fim.py
CHANGED
|
@@ -5,7 +5,8 @@ from mistralai import models, utils
|
|
|
5
5
|
from mistralai._hooks import HookContext
|
|
6
6
|
from mistralai.types import OptionalNullable, UNSET
|
|
7
7
|
from mistralai.utils import eventstreaming, get_security_from_env
|
|
8
|
-
from
|
|
8
|
+
from mistralai.utils.unmarshal_json_response import unmarshal_json_response
|
|
9
|
+
from typing import Any, Dict, Mapping, Optional, Union
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
class Fim(BaseSDK):
|
|
@@ -27,6 +28,7 @@ class Fim(BaseSDK):
|
|
|
27
28
|
]
|
|
28
29
|
] = None,
|
|
29
30
|
random_seed: OptionalNullable[int] = UNSET,
|
|
31
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
30
32
|
suffix: OptionalNullable[str] = UNSET,
|
|
31
33
|
min_tokens: OptionalNullable[int] = UNSET,
|
|
32
34
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
@@ -38,7 +40,7 @@ class Fim(BaseSDK):
|
|
|
38
40
|
|
|
39
41
|
FIM completion.
|
|
40
42
|
|
|
41
|
-
:param model: ID of the model to use.
|
|
43
|
+
:param model: ID of the model with FIM to use.
|
|
42
44
|
:param prompt: The text/code to complete.
|
|
43
45
|
:param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
|
|
44
46
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -46,6 +48,7 @@ class Fim(BaseSDK):
|
|
|
46
48
|
:param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
47
49
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
48
50
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
51
|
+
:param metadata:
|
|
49
52
|
:param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
|
|
50
53
|
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
51
54
|
:param retries: Override the default retry configuration for this method
|
|
@@ -71,6 +74,7 @@ class Fim(BaseSDK):
|
|
|
71
74
|
stream=stream,
|
|
72
75
|
stop=stop,
|
|
73
76
|
random_seed=random_seed,
|
|
77
|
+
metadata=metadata,
|
|
74
78
|
prompt=prompt,
|
|
75
79
|
suffix=suffix,
|
|
76
80
|
min_tokens=min_tokens,
|
|
@@ -120,31 +124,20 @@ class Fim(BaseSDK):
|
|
|
120
124
|
|
|
121
125
|
response_data: Any = None
|
|
122
126
|
if utils.match_response(http_res, "200", "application/json"):
|
|
123
|
-
return
|
|
127
|
+
return unmarshal_json_response(models.FIMCompletionResponse, http_res)
|
|
124
128
|
if utils.match_response(http_res, "422", "application/json"):
|
|
125
|
-
response_data =
|
|
126
|
-
|
|
129
|
+
response_data = unmarshal_json_response(
|
|
130
|
+
models.HTTPValidationErrorData, http_res
|
|
127
131
|
)
|
|
128
|
-
raise models.HTTPValidationError(
|
|
132
|
+
raise models.HTTPValidationError(response_data, http_res)
|
|
129
133
|
if utils.match_response(http_res, "4XX", "*"):
|
|
130
134
|
http_res_text = utils.stream_to_text(http_res)
|
|
131
|
-
raise models.SDKError(
|
|
132
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
133
|
-
)
|
|
135
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
134
136
|
if utils.match_response(http_res, "5XX", "*"):
|
|
135
137
|
http_res_text = utils.stream_to_text(http_res)
|
|
136
|
-
raise models.SDKError(
|
|
137
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
138
|
-
)
|
|
138
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
139
139
|
|
|
140
|
-
|
|
141
|
-
http_res_text = utils.stream_to_text(http_res)
|
|
142
|
-
raise models.SDKError(
|
|
143
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
144
|
-
http_res.status_code,
|
|
145
|
-
http_res_text,
|
|
146
|
-
http_res,
|
|
147
|
-
)
|
|
140
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
148
141
|
|
|
149
142
|
async def complete_async(
|
|
150
143
|
self,
|
|
@@ -162,6 +155,7 @@ class Fim(BaseSDK):
|
|
|
162
155
|
]
|
|
163
156
|
] = None,
|
|
164
157
|
random_seed: OptionalNullable[int] = UNSET,
|
|
158
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
165
159
|
suffix: OptionalNullable[str] = UNSET,
|
|
166
160
|
min_tokens: OptionalNullable[int] = UNSET,
|
|
167
161
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
@@ -173,7 +167,7 @@ class Fim(BaseSDK):
|
|
|
173
167
|
|
|
174
168
|
FIM completion.
|
|
175
169
|
|
|
176
|
-
:param model: ID of the model to use.
|
|
170
|
+
:param model: ID of the model with FIM to use.
|
|
177
171
|
:param prompt: The text/code to complete.
|
|
178
172
|
:param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
|
|
179
173
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -181,6 +175,7 @@ class Fim(BaseSDK):
|
|
|
181
175
|
:param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
182
176
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
183
177
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
178
|
+
:param metadata:
|
|
184
179
|
:param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
|
|
185
180
|
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
186
181
|
:param retries: Override the default retry configuration for this method
|
|
@@ -206,6 +201,7 @@ class Fim(BaseSDK):
|
|
|
206
201
|
stream=stream,
|
|
207
202
|
stop=stop,
|
|
208
203
|
random_seed=random_seed,
|
|
204
|
+
metadata=metadata,
|
|
209
205
|
prompt=prompt,
|
|
210
206
|
suffix=suffix,
|
|
211
207
|
min_tokens=min_tokens,
|
|
@@ -255,31 +251,20 @@ class Fim(BaseSDK):
|
|
|
255
251
|
|
|
256
252
|
response_data: Any = None
|
|
257
253
|
if utils.match_response(http_res, "200", "application/json"):
|
|
258
|
-
return
|
|
254
|
+
return unmarshal_json_response(models.FIMCompletionResponse, http_res)
|
|
259
255
|
if utils.match_response(http_res, "422", "application/json"):
|
|
260
|
-
response_data =
|
|
261
|
-
|
|
256
|
+
response_data = unmarshal_json_response(
|
|
257
|
+
models.HTTPValidationErrorData, http_res
|
|
262
258
|
)
|
|
263
|
-
raise models.HTTPValidationError(
|
|
259
|
+
raise models.HTTPValidationError(response_data, http_res)
|
|
264
260
|
if utils.match_response(http_res, "4XX", "*"):
|
|
265
261
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
266
|
-
raise models.SDKError(
|
|
267
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
268
|
-
)
|
|
262
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
269
263
|
if utils.match_response(http_res, "5XX", "*"):
|
|
270
264
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
271
|
-
raise models.SDKError(
|
|
272
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
273
|
-
)
|
|
265
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
274
266
|
|
|
275
|
-
|
|
276
|
-
http_res_text = await utils.stream_to_text_async(http_res)
|
|
277
|
-
raise models.SDKError(
|
|
278
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
279
|
-
http_res.status_code,
|
|
280
|
-
http_res_text,
|
|
281
|
-
http_res,
|
|
282
|
-
)
|
|
267
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
283
268
|
|
|
284
269
|
def stream(
|
|
285
270
|
self,
|
|
@@ -297,6 +282,7 @@ class Fim(BaseSDK):
|
|
|
297
282
|
]
|
|
298
283
|
] = None,
|
|
299
284
|
random_seed: OptionalNullable[int] = UNSET,
|
|
285
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
300
286
|
suffix: OptionalNullable[str] = UNSET,
|
|
301
287
|
min_tokens: OptionalNullable[int] = UNSET,
|
|
302
288
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
@@ -308,7 +294,7 @@ class Fim(BaseSDK):
|
|
|
308
294
|
|
|
309
295
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
310
296
|
|
|
311
|
-
:param model: ID of the model to use.
|
|
297
|
+
:param model: ID of the model with FIM to use.
|
|
312
298
|
:param prompt: The text/code to complete.
|
|
313
299
|
:param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
|
|
314
300
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -316,6 +302,7 @@ class Fim(BaseSDK):
|
|
|
316
302
|
:param stream:
|
|
317
303
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
318
304
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
305
|
+
:param metadata:
|
|
319
306
|
:param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
|
|
320
307
|
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
321
308
|
:param retries: Override the default retry configuration for this method
|
|
@@ -341,6 +328,7 @@ class Fim(BaseSDK):
|
|
|
341
328
|
stream=stream,
|
|
342
329
|
stop=stop,
|
|
343
330
|
random_seed=random_seed,
|
|
331
|
+
metadata=metadata,
|
|
344
332
|
prompt=prompt,
|
|
345
333
|
suffix=suffix,
|
|
346
334
|
min_tokens=min_tokens,
|
|
@@ -395,32 +383,23 @@ class Fim(BaseSDK):
|
|
|
395
383
|
http_res,
|
|
396
384
|
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
|
|
397
385
|
sentinel="[DONE]",
|
|
386
|
+
client_ref=self,
|
|
398
387
|
)
|
|
399
388
|
if utils.match_response(http_res, "422", "application/json"):
|
|
400
389
|
http_res_text = utils.stream_to_text(http_res)
|
|
401
|
-
response_data =
|
|
402
|
-
|
|
390
|
+
response_data = unmarshal_json_response(
|
|
391
|
+
models.HTTPValidationErrorData, http_res, http_res_text
|
|
403
392
|
)
|
|
404
|
-
raise models.HTTPValidationError(
|
|
393
|
+
raise models.HTTPValidationError(response_data, http_res, http_res_text)
|
|
405
394
|
if utils.match_response(http_res, "4XX", "*"):
|
|
406
395
|
http_res_text = utils.stream_to_text(http_res)
|
|
407
|
-
raise models.SDKError(
|
|
408
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
409
|
-
)
|
|
396
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
410
397
|
if utils.match_response(http_res, "5XX", "*"):
|
|
411
398
|
http_res_text = utils.stream_to_text(http_res)
|
|
412
|
-
raise models.SDKError(
|
|
413
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
414
|
-
)
|
|
399
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
415
400
|
|
|
416
|
-
content_type = http_res.headers.get("Content-Type")
|
|
417
401
|
http_res_text = utils.stream_to_text(http_res)
|
|
418
|
-
raise models.SDKError(
|
|
419
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
420
|
-
http_res.status_code,
|
|
421
|
-
http_res_text,
|
|
422
|
-
http_res,
|
|
423
|
-
)
|
|
402
|
+
raise models.SDKError("Unexpected response received", http_res, http_res_text)
|
|
424
403
|
|
|
425
404
|
async def stream_async(
|
|
426
405
|
self,
|
|
@@ -438,6 +417,7 @@ class Fim(BaseSDK):
|
|
|
438
417
|
]
|
|
439
418
|
] = None,
|
|
440
419
|
random_seed: OptionalNullable[int] = UNSET,
|
|
420
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
441
421
|
suffix: OptionalNullable[str] = UNSET,
|
|
442
422
|
min_tokens: OptionalNullable[int] = UNSET,
|
|
443
423
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
@@ -449,7 +429,7 @@ class Fim(BaseSDK):
|
|
|
449
429
|
|
|
450
430
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
451
431
|
|
|
452
|
-
:param model: ID of the model to use.
|
|
432
|
+
:param model: ID of the model with FIM to use.
|
|
453
433
|
:param prompt: The text/code to complete.
|
|
454
434
|
:param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.
|
|
455
435
|
:param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
|
@@ -457,6 +437,7 @@ class Fim(BaseSDK):
|
|
|
457
437
|
:param stream:
|
|
458
438
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
459
439
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
440
|
+
:param metadata:
|
|
460
441
|
:param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
|
|
461
442
|
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
462
443
|
:param retries: Override the default retry configuration for this method
|
|
@@ -482,6 +463,7 @@ class Fim(BaseSDK):
|
|
|
482
463
|
stream=stream,
|
|
483
464
|
stop=stop,
|
|
484
465
|
random_seed=random_seed,
|
|
466
|
+
metadata=metadata,
|
|
485
467
|
prompt=prompt,
|
|
486
468
|
suffix=suffix,
|
|
487
469
|
min_tokens=min_tokens,
|
|
@@ -536,29 +518,20 @@ class Fim(BaseSDK):
|
|
|
536
518
|
http_res,
|
|
537
519
|
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
|
|
538
520
|
sentinel="[DONE]",
|
|
521
|
+
client_ref=self,
|
|
539
522
|
)
|
|
540
523
|
if utils.match_response(http_res, "422", "application/json"):
|
|
541
524
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
542
|
-
response_data =
|
|
543
|
-
|
|
525
|
+
response_data = unmarshal_json_response(
|
|
526
|
+
models.HTTPValidationErrorData, http_res, http_res_text
|
|
544
527
|
)
|
|
545
|
-
raise models.HTTPValidationError(
|
|
528
|
+
raise models.HTTPValidationError(response_data, http_res, http_res_text)
|
|
546
529
|
if utils.match_response(http_res, "4XX", "*"):
|
|
547
530
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
548
|
-
raise models.SDKError(
|
|
549
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
550
|
-
)
|
|
531
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
551
532
|
if utils.match_response(http_res, "5XX", "*"):
|
|
552
533
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
553
|
-
raise models.SDKError(
|
|
554
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
555
|
-
)
|
|
534
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
556
535
|
|
|
557
|
-
content_type = http_res.headers.get("Content-Type")
|
|
558
536
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
559
|
-
raise models.SDKError(
|
|
560
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
561
|
-
http_res.status_code,
|
|
562
|
-
http_res_text,
|
|
563
|
-
http_res,
|
|
564
|
-
)
|
|
537
|
+
raise models.SDKError("Unexpected response received", http_res, http_res_text)
|
mistralai/fine_tuning.py
CHANGED
|
@@ -3,15 +3,18 @@
|
|
|
3
3
|
from .basesdk import BaseSDK
|
|
4
4
|
from .sdkconfiguration import SDKConfiguration
|
|
5
5
|
from mistralai.jobs import Jobs
|
|
6
|
+
from typing import Optional
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
class FineTuning(BaseSDK):
|
|
9
10
|
jobs: Jobs
|
|
10
11
|
|
|
11
|
-
def __init__(
|
|
12
|
-
|
|
12
|
+
def __init__(
|
|
13
|
+
self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None
|
|
14
|
+
) -> None:
|
|
15
|
+
BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref)
|
|
13
16
|
self.sdk_configuration = sdk_config
|
|
14
17
|
self._init_sdks()
|
|
15
18
|
|
|
16
19
|
def _init_sdks(self):
|
|
17
|
-
self.jobs = Jobs(self.sdk_configuration)
|
|
20
|
+
self.jobs = Jobs(self.sdk_configuration, parent_ref=self.parent_ref)
|
mistralai/jobs.py
CHANGED
|
@@ -6,6 +6,7 @@ from mistralai import models, utils
|
|
|
6
6
|
from mistralai._hooks import HookContext
|
|
7
7
|
from mistralai.types import OptionalNullable, UNSET
|
|
8
8
|
from mistralai.utils import get_security_from_env
|
|
9
|
+
from mistralai.utils.unmarshal_json_response import unmarshal_json_response
|
|
9
10
|
from typing import List, Mapping, Optional, Union
|
|
10
11
|
|
|
11
12
|
|
|
@@ -110,26 +111,15 @@ class Jobs(BaseSDK):
|
|
|
110
111
|
)
|
|
111
112
|
|
|
112
113
|
if utils.match_response(http_res, "200", "application/json"):
|
|
113
|
-
return
|
|
114
|
+
return unmarshal_json_response(models.JobsOut, http_res)
|
|
114
115
|
if utils.match_response(http_res, "4XX", "*"):
|
|
115
116
|
http_res_text = utils.stream_to_text(http_res)
|
|
116
|
-
raise models.SDKError(
|
|
117
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
118
|
-
)
|
|
117
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
119
118
|
if utils.match_response(http_res, "5XX", "*"):
|
|
120
119
|
http_res_text = utils.stream_to_text(http_res)
|
|
121
|
-
raise models.SDKError(
|
|
122
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
123
|
-
)
|
|
120
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
124
121
|
|
|
125
|
-
|
|
126
|
-
http_res_text = utils.stream_to_text(http_res)
|
|
127
|
-
raise models.SDKError(
|
|
128
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
129
|
-
http_res.status_code,
|
|
130
|
-
http_res_text,
|
|
131
|
-
http_res,
|
|
132
|
-
)
|
|
122
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
133
123
|
|
|
134
124
|
async def list_async(
|
|
135
125
|
self,
|
|
@@ -231,26 +221,15 @@ class Jobs(BaseSDK):
|
|
|
231
221
|
)
|
|
232
222
|
|
|
233
223
|
if utils.match_response(http_res, "200", "application/json"):
|
|
234
|
-
return
|
|
224
|
+
return unmarshal_json_response(models.JobsOut, http_res)
|
|
235
225
|
if utils.match_response(http_res, "4XX", "*"):
|
|
236
226
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
237
|
-
raise models.SDKError(
|
|
238
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
239
|
-
)
|
|
227
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
240
228
|
if utils.match_response(http_res, "5XX", "*"):
|
|
241
229
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
242
|
-
raise models.SDKError(
|
|
243
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
244
|
-
)
|
|
230
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
245
231
|
|
|
246
|
-
|
|
247
|
-
http_res_text = await utils.stream_to_text_async(http_res)
|
|
248
|
-
raise models.SDKError(
|
|
249
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
250
|
-
http_res.status_code,
|
|
251
|
-
http_res_text,
|
|
252
|
-
http_res,
|
|
253
|
-
)
|
|
232
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
254
233
|
|
|
255
234
|
def create(
|
|
256
235
|
self,
|
|
@@ -383,28 +362,17 @@ class Jobs(BaseSDK):
|
|
|
383
362
|
)
|
|
384
363
|
|
|
385
364
|
if utils.match_response(http_res, "200", "application/json"):
|
|
386
|
-
return
|
|
387
|
-
|
|
365
|
+
return unmarshal_json_response(
|
|
366
|
+
models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res
|
|
388
367
|
)
|
|
389
368
|
if utils.match_response(http_res, "4XX", "*"):
|
|
390
369
|
http_res_text = utils.stream_to_text(http_res)
|
|
391
|
-
raise models.SDKError(
|
|
392
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
393
|
-
)
|
|
370
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
394
371
|
if utils.match_response(http_res, "5XX", "*"):
|
|
395
372
|
http_res_text = utils.stream_to_text(http_res)
|
|
396
|
-
raise models.SDKError(
|
|
397
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
398
|
-
)
|
|
373
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
399
374
|
|
|
400
|
-
|
|
401
|
-
http_res_text = utils.stream_to_text(http_res)
|
|
402
|
-
raise models.SDKError(
|
|
403
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
404
|
-
http_res.status_code,
|
|
405
|
-
http_res_text,
|
|
406
|
-
http_res,
|
|
407
|
-
)
|
|
375
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
408
376
|
|
|
409
377
|
async def create_async(
|
|
410
378
|
self,
|
|
@@ -537,28 +505,17 @@ class Jobs(BaseSDK):
|
|
|
537
505
|
)
|
|
538
506
|
|
|
539
507
|
if utils.match_response(http_res, "200", "application/json"):
|
|
540
|
-
return
|
|
541
|
-
|
|
508
|
+
return unmarshal_json_response(
|
|
509
|
+
models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res
|
|
542
510
|
)
|
|
543
511
|
if utils.match_response(http_res, "4XX", "*"):
|
|
544
512
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
545
|
-
raise models.SDKError(
|
|
546
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
547
|
-
)
|
|
513
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
548
514
|
if utils.match_response(http_res, "5XX", "*"):
|
|
549
515
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
550
|
-
raise models.SDKError(
|
|
551
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
552
|
-
)
|
|
516
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
553
517
|
|
|
554
|
-
|
|
555
|
-
http_res_text = await utils.stream_to_text_async(http_res)
|
|
556
|
-
raise models.SDKError(
|
|
557
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
558
|
-
http_res.status_code,
|
|
559
|
-
http_res_text,
|
|
560
|
-
http_res,
|
|
561
|
-
)
|
|
518
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
562
519
|
|
|
563
520
|
def get(
|
|
564
521
|
self,
|
|
@@ -633,28 +590,17 @@ class Jobs(BaseSDK):
|
|
|
633
590
|
)
|
|
634
591
|
|
|
635
592
|
if utils.match_response(http_res, "200", "application/json"):
|
|
636
|
-
return
|
|
637
|
-
|
|
593
|
+
return unmarshal_json_response(
|
|
594
|
+
models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res
|
|
638
595
|
)
|
|
639
596
|
if utils.match_response(http_res, "4XX", "*"):
|
|
640
597
|
http_res_text = utils.stream_to_text(http_res)
|
|
641
|
-
raise models.SDKError(
|
|
642
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
643
|
-
)
|
|
598
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
644
599
|
if utils.match_response(http_res, "5XX", "*"):
|
|
645
600
|
http_res_text = utils.stream_to_text(http_res)
|
|
646
|
-
raise models.SDKError(
|
|
647
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
648
|
-
)
|
|
601
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
649
602
|
|
|
650
|
-
|
|
651
|
-
http_res_text = utils.stream_to_text(http_res)
|
|
652
|
-
raise models.SDKError(
|
|
653
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
654
|
-
http_res.status_code,
|
|
655
|
-
http_res_text,
|
|
656
|
-
http_res,
|
|
657
|
-
)
|
|
603
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
658
604
|
|
|
659
605
|
async def get_async(
|
|
660
606
|
self,
|
|
@@ -729,28 +675,17 @@ class Jobs(BaseSDK):
|
|
|
729
675
|
)
|
|
730
676
|
|
|
731
677
|
if utils.match_response(http_res, "200", "application/json"):
|
|
732
|
-
return
|
|
733
|
-
|
|
678
|
+
return unmarshal_json_response(
|
|
679
|
+
models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res
|
|
734
680
|
)
|
|
735
681
|
if utils.match_response(http_res, "4XX", "*"):
|
|
736
682
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
737
|
-
raise models.SDKError(
|
|
738
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
739
|
-
)
|
|
683
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
740
684
|
if utils.match_response(http_res, "5XX", "*"):
|
|
741
685
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
742
|
-
raise models.SDKError(
|
|
743
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
744
|
-
)
|
|
686
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
745
687
|
|
|
746
|
-
|
|
747
|
-
http_res_text = await utils.stream_to_text_async(http_res)
|
|
748
|
-
raise models.SDKError(
|
|
749
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
750
|
-
http_res.status_code,
|
|
751
|
-
http_res_text,
|
|
752
|
-
http_res,
|
|
753
|
-
)
|
|
688
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
754
689
|
|
|
755
690
|
def cancel(
|
|
756
691
|
self,
|
|
@@ -825,28 +760,17 @@ class Jobs(BaseSDK):
|
|
|
825
760
|
)
|
|
826
761
|
|
|
827
762
|
if utils.match_response(http_res, "200", "application/json"):
|
|
828
|
-
return
|
|
829
|
-
|
|
763
|
+
return unmarshal_json_response(
|
|
764
|
+
models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res
|
|
830
765
|
)
|
|
831
766
|
if utils.match_response(http_res, "4XX", "*"):
|
|
832
767
|
http_res_text = utils.stream_to_text(http_res)
|
|
833
|
-
raise models.SDKError(
|
|
834
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
835
|
-
)
|
|
768
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
836
769
|
if utils.match_response(http_res, "5XX", "*"):
|
|
837
770
|
http_res_text = utils.stream_to_text(http_res)
|
|
838
|
-
raise models.SDKError(
|
|
839
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
840
|
-
)
|
|
771
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
841
772
|
|
|
842
|
-
|
|
843
|
-
http_res_text = utils.stream_to_text(http_res)
|
|
844
|
-
raise models.SDKError(
|
|
845
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
846
|
-
http_res.status_code,
|
|
847
|
-
http_res_text,
|
|
848
|
-
http_res,
|
|
849
|
-
)
|
|
773
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
850
774
|
|
|
851
775
|
async def cancel_async(
|
|
852
776
|
self,
|
|
@@ -921,28 +845,17 @@ class Jobs(BaseSDK):
|
|
|
921
845
|
)
|
|
922
846
|
|
|
923
847
|
if utils.match_response(http_res, "200", "application/json"):
|
|
924
|
-
return
|
|
925
|
-
|
|
848
|
+
return unmarshal_json_response(
|
|
849
|
+
models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res
|
|
926
850
|
)
|
|
927
851
|
if utils.match_response(http_res, "4XX", "*"):
|
|
928
852
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
929
|
-
raise models.SDKError(
|
|
930
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
931
|
-
)
|
|
853
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
932
854
|
if utils.match_response(http_res, "5XX", "*"):
|
|
933
855
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
934
|
-
raise models.SDKError(
|
|
935
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
936
|
-
)
|
|
856
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
937
857
|
|
|
938
|
-
|
|
939
|
-
http_res_text = await utils.stream_to_text_async(http_res)
|
|
940
|
-
raise models.SDKError(
|
|
941
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
942
|
-
http_res.status_code,
|
|
943
|
-
http_res_text,
|
|
944
|
-
http_res,
|
|
945
|
-
)
|
|
858
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
946
859
|
|
|
947
860
|
def start(
|
|
948
861
|
self,
|
|
@@ -1017,28 +930,17 @@ class Jobs(BaseSDK):
|
|
|
1017
930
|
)
|
|
1018
931
|
|
|
1019
932
|
if utils.match_response(http_res, "200", "application/json"):
|
|
1020
|
-
return
|
|
1021
|
-
|
|
933
|
+
return unmarshal_json_response(
|
|
934
|
+
models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res
|
|
1022
935
|
)
|
|
1023
936
|
if utils.match_response(http_res, "4XX", "*"):
|
|
1024
937
|
http_res_text = utils.stream_to_text(http_res)
|
|
1025
|
-
raise models.SDKError(
|
|
1026
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1027
|
-
)
|
|
938
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
1028
939
|
if utils.match_response(http_res, "5XX", "*"):
|
|
1029
940
|
http_res_text = utils.stream_to_text(http_res)
|
|
1030
|
-
raise models.SDKError(
|
|
1031
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1032
|
-
)
|
|
941
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
1033
942
|
|
|
1034
|
-
|
|
1035
|
-
http_res_text = utils.stream_to_text(http_res)
|
|
1036
|
-
raise models.SDKError(
|
|
1037
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
1038
|
-
http_res.status_code,
|
|
1039
|
-
http_res_text,
|
|
1040
|
-
http_res,
|
|
1041
|
-
)
|
|
943
|
+
raise models.SDKError("Unexpected response received", http_res)
|
|
1042
944
|
|
|
1043
945
|
async def start_async(
|
|
1044
946
|
self,
|
|
@@ -1113,25 +1015,14 @@ class Jobs(BaseSDK):
|
|
|
1113
1015
|
)
|
|
1114
1016
|
|
|
1115
1017
|
if utils.match_response(http_res, "200", "application/json"):
|
|
1116
|
-
return
|
|
1117
|
-
|
|
1018
|
+
return unmarshal_json_response(
|
|
1019
|
+
models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res
|
|
1118
1020
|
)
|
|
1119
1021
|
if utils.match_response(http_res, "4XX", "*"):
|
|
1120
1022
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1121
|
-
raise models.SDKError(
|
|
1122
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1123
|
-
)
|
|
1023
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
1124
1024
|
if utils.match_response(http_res, "5XX", "*"):
|
|
1125
1025
|
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1126
|
-
raise models.SDKError(
|
|
1127
|
-
"API error occurred", http_res.status_code, http_res_text, http_res
|
|
1128
|
-
)
|
|
1026
|
+
raise models.SDKError("API error occurred", http_res, http_res_text)
|
|
1129
1027
|
|
|
1130
|
-
|
|
1131
|
-
http_res_text = await utils.stream_to_text_async(http_res)
|
|
1132
|
-
raise models.SDKError(
|
|
1133
|
-
f"Unexpected response received (code: {http_res.status_code}, type: {content_type})",
|
|
1134
|
-
http_res.status_code,
|
|
1135
|
-
http_res_text,
|
|
1136
|
-
http_res,
|
|
1137
|
-
)
|
|
1028
|
+
raise models.SDKError("Unexpected response received", http_res)
|