mistralai 1.3.1__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. mistralai/__init__.py +10 -1
  2. mistralai/_version.py +4 -1
  3. mistralai/agents.py +58 -14
  4. mistralai/chat.py +140 -14
  5. mistralai/classifiers.py +32 -20
  6. mistralai/embeddings.py +16 -10
  7. mistralai/extra/README.md +56 -0
  8. mistralai/extra/__init__.py +5 -0
  9. mistralai/extra/struct_chat.py +41 -0
  10. mistralai/extra/tests/__init__.py +0 -0
  11. mistralai/extra/tests/test_struct_chat.py +103 -0
  12. mistralai/extra/tests/test_utils.py +162 -0
  13. mistralai/extra/utils/__init__.py +3 -0
  14. mistralai/extra/utils/_pydantic_helper.py +20 -0
  15. mistralai/extra/utils/response_format.py +24 -0
  16. mistralai/files.py +94 -34
  17. mistralai/fim.py +30 -14
  18. mistralai/httpclient.py +50 -0
  19. mistralai/jobs.py +80 -32
  20. mistralai/mistral_jobs.py +64 -24
  21. mistralai/models/__init__.py +8 -0
  22. mistralai/models/agentscompletionrequest.py +5 -0
  23. mistralai/models/agentscompletionstreamrequest.py +5 -0
  24. mistralai/models/chatcompletionrequest.py +5 -0
  25. mistralai/models/chatcompletionstreamrequest.py +5 -0
  26. mistralai/models/fileschema.py +3 -2
  27. mistralai/models/function.py +3 -0
  28. mistralai/models/jsonschema.py +55 -0
  29. mistralai/models/prediction.py +26 -0
  30. mistralai/models/responseformat.py +36 -1
  31. mistralai/models/responseformats.py +1 -1
  32. mistralai/models/retrievefileout.py +3 -2
  33. mistralai/models/toolcall.py +3 -0
  34. mistralai/models/uploadfileout.py +3 -2
  35. mistralai/models_.py +92 -48
  36. mistralai/sdk.py +13 -3
  37. mistralai/sdkconfiguration.py +10 -4
  38. {mistralai-1.3.1.dist-info → mistralai-1.5.0.dist-info}/METADATA +41 -42
  39. {mistralai-1.3.1.dist-info → mistralai-1.5.0.dist-info}/RECORD +43 -33
  40. {mistralai-1.3.1.dist-info → mistralai-1.5.0.dist-info}/WHEEL +1 -1
  41. mistralai_azure/_hooks/custom_user_agent.py +1 -1
  42. mistralai_gcp/sdk.py +1 -2
  43. py.typed +0 -1
  44. {mistralai-1.3.1.dist-info → mistralai-1.5.0.dist-info}/LICENSE +0 -0
mistralai/jobs.py CHANGED
@@ -26,7 +26,7 @@ class Jobs(BaseSDK):
26
26
  server_url: Optional[str] = None,
27
27
  timeout_ms: Optional[int] = None,
28
28
  http_headers: Optional[Mapping[str, str]] = None,
29
- ) -> Optional[models.JobsOut]:
29
+ ) -> models.JobsOut:
30
30
  r"""Get Fine Tuning Jobs
31
31
 
32
32
  Get a list of fine-tuning jobs for your organization and user.
@@ -103,8 +103,13 @@ class Jobs(BaseSDK):
103
103
  )
104
104
 
105
105
  if utils.match_response(http_res, "200", "application/json"):
106
- return utils.unmarshal_json(http_res.text, Optional[models.JobsOut])
107
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
106
+ return utils.unmarshal_json(http_res.text, models.JobsOut)
107
+ if utils.match_response(http_res, "4XX", "*"):
108
+ http_res_text = utils.stream_to_text(http_res)
109
+ raise models.SDKError(
110
+ "API error occurred", http_res.status_code, http_res_text, http_res
111
+ )
112
+ if utils.match_response(http_res, "5XX", "*"):
108
113
  http_res_text = utils.stream_to_text(http_res)
109
114
  raise models.SDKError(
110
115
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -135,7 +140,7 @@ class Jobs(BaseSDK):
135
140
  server_url: Optional[str] = None,
136
141
  timeout_ms: Optional[int] = None,
137
142
  http_headers: Optional[Mapping[str, str]] = None,
138
- ) -> Optional[models.JobsOut]:
143
+ ) -> models.JobsOut:
139
144
  r"""Get Fine Tuning Jobs
140
145
 
141
146
  Get a list of fine-tuning jobs for your organization and user.
@@ -212,8 +217,13 @@ class Jobs(BaseSDK):
212
217
  )
213
218
 
214
219
  if utils.match_response(http_res, "200", "application/json"):
215
- return utils.unmarshal_json(http_res.text, Optional[models.JobsOut])
216
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
220
+ return utils.unmarshal_json(http_res.text, models.JobsOut)
221
+ if utils.match_response(http_res, "4XX", "*"):
222
+ http_res_text = await utils.stream_to_text_async(http_res)
223
+ raise models.SDKError(
224
+ "API error occurred", http_res.status_code, http_res_text, http_res
225
+ )
226
+ if utils.match_response(http_res, "5XX", "*"):
217
227
  http_res_text = await utils.stream_to_text_async(http_res)
218
228
  raise models.SDKError(
219
229
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -255,7 +265,7 @@ class Jobs(BaseSDK):
255
265
  server_url: Optional[str] = None,
256
266
  timeout_ms: Optional[int] = None,
257
267
  http_headers: Optional[Mapping[str, str]] = None,
258
- ) -> Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]:
268
+ ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse:
259
269
  r"""Create Fine Tuning Job
260
270
 
261
271
  Create a new fine-tuning job, it will be queued for processing.
@@ -342,10 +352,14 @@ class Jobs(BaseSDK):
342
352
 
343
353
  if utils.match_response(http_res, "200", "application/json"):
344
354
  return utils.unmarshal_json(
345
- http_res.text,
346
- Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse],
355
+ http_res.text, models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse
356
+ )
357
+ if utils.match_response(http_res, "4XX", "*"):
358
+ http_res_text = utils.stream_to_text(http_res)
359
+ raise models.SDKError(
360
+ "API error occurred", http_res.status_code, http_res_text, http_res
347
361
  )
348
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
362
+ if utils.match_response(http_res, "5XX", "*"):
349
363
  http_res_text = utils.stream_to_text(http_res)
350
364
  raise models.SDKError(
351
365
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -387,7 +401,7 @@ class Jobs(BaseSDK):
387
401
  server_url: Optional[str] = None,
388
402
  timeout_ms: Optional[int] = None,
389
403
  http_headers: Optional[Mapping[str, str]] = None,
390
- ) -> Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]:
404
+ ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse:
391
405
  r"""Create Fine Tuning Job
392
406
 
393
407
  Create a new fine-tuning job, it will be queued for processing.
@@ -474,10 +488,14 @@ class Jobs(BaseSDK):
474
488
 
475
489
  if utils.match_response(http_res, "200", "application/json"):
476
490
  return utils.unmarshal_json(
477
- http_res.text,
478
- Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse],
491
+ http_res.text, models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse
492
+ )
493
+ if utils.match_response(http_res, "4XX", "*"):
494
+ http_res_text = await utils.stream_to_text_async(http_res)
495
+ raise models.SDKError(
496
+ "API error occurred", http_res.status_code, http_res_text, http_res
479
497
  )
480
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
498
+ if utils.match_response(http_res, "5XX", "*"):
481
499
  http_res_text = await utils.stream_to_text_async(http_res)
482
500
  raise models.SDKError(
483
501
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -500,7 +518,7 @@ class Jobs(BaseSDK):
500
518
  server_url: Optional[str] = None,
501
519
  timeout_ms: Optional[int] = None,
502
520
  http_headers: Optional[Mapping[str, str]] = None,
503
- ) -> Optional[models.DetailedJobOut]:
521
+ ) -> models.DetailedJobOut:
504
522
  r"""Get Fine Tuning Job
505
523
 
506
524
  Get a fine-tuned job details by its UUID.
@@ -561,8 +579,13 @@ class Jobs(BaseSDK):
561
579
  )
562
580
 
563
581
  if utils.match_response(http_res, "200", "application/json"):
564
- return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut])
565
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
582
+ return utils.unmarshal_json(http_res.text, models.DetailedJobOut)
583
+ if utils.match_response(http_res, "4XX", "*"):
584
+ http_res_text = utils.stream_to_text(http_res)
585
+ raise models.SDKError(
586
+ "API error occurred", http_res.status_code, http_res_text, http_res
587
+ )
588
+ if utils.match_response(http_res, "5XX", "*"):
566
589
  http_res_text = utils.stream_to_text(http_res)
567
590
  raise models.SDKError(
568
591
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -585,7 +608,7 @@ class Jobs(BaseSDK):
585
608
  server_url: Optional[str] = None,
586
609
  timeout_ms: Optional[int] = None,
587
610
  http_headers: Optional[Mapping[str, str]] = None,
588
- ) -> Optional[models.DetailedJobOut]:
611
+ ) -> models.DetailedJobOut:
589
612
  r"""Get Fine Tuning Job
590
613
 
591
614
  Get a fine-tuned job details by its UUID.
@@ -646,8 +669,13 @@ class Jobs(BaseSDK):
646
669
  )
647
670
 
648
671
  if utils.match_response(http_res, "200", "application/json"):
649
- return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut])
650
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
672
+ return utils.unmarshal_json(http_res.text, models.DetailedJobOut)
673
+ if utils.match_response(http_res, "4XX", "*"):
674
+ http_res_text = await utils.stream_to_text_async(http_res)
675
+ raise models.SDKError(
676
+ "API error occurred", http_res.status_code, http_res_text, http_res
677
+ )
678
+ if utils.match_response(http_res, "5XX", "*"):
651
679
  http_res_text = await utils.stream_to_text_async(http_res)
652
680
  raise models.SDKError(
653
681
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -670,7 +698,7 @@ class Jobs(BaseSDK):
670
698
  server_url: Optional[str] = None,
671
699
  timeout_ms: Optional[int] = None,
672
700
  http_headers: Optional[Mapping[str, str]] = None,
673
- ) -> Optional[models.DetailedJobOut]:
701
+ ) -> models.DetailedJobOut:
674
702
  r"""Cancel Fine Tuning Job
675
703
 
676
704
  Request the cancellation of a fine tuning job.
@@ -731,8 +759,13 @@ class Jobs(BaseSDK):
731
759
  )
732
760
 
733
761
  if utils.match_response(http_res, "200", "application/json"):
734
- return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut])
735
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
762
+ return utils.unmarshal_json(http_res.text, models.DetailedJobOut)
763
+ if utils.match_response(http_res, "4XX", "*"):
764
+ http_res_text = utils.stream_to_text(http_res)
765
+ raise models.SDKError(
766
+ "API error occurred", http_res.status_code, http_res_text, http_res
767
+ )
768
+ if utils.match_response(http_res, "5XX", "*"):
736
769
  http_res_text = utils.stream_to_text(http_res)
737
770
  raise models.SDKError(
738
771
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -755,7 +788,7 @@ class Jobs(BaseSDK):
755
788
  server_url: Optional[str] = None,
756
789
  timeout_ms: Optional[int] = None,
757
790
  http_headers: Optional[Mapping[str, str]] = None,
758
- ) -> Optional[models.DetailedJobOut]:
791
+ ) -> models.DetailedJobOut:
759
792
  r"""Cancel Fine Tuning Job
760
793
 
761
794
  Request the cancellation of a fine tuning job.
@@ -816,8 +849,13 @@ class Jobs(BaseSDK):
816
849
  )
817
850
 
818
851
  if utils.match_response(http_res, "200", "application/json"):
819
- return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut])
820
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
852
+ return utils.unmarshal_json(http_res.text, models.DetailedJobOut)
853
+ if utils.match_response(http_res, "4XX", "*"):
854
+ http_res_text = await utils.stream_to_text_async(http_res)
855
+ raise models.SDKError(
856
+ "API error occurred", http_res.status_code, http_res_text, http_res
857
+ )
858
+ if utils.match_response(http_res, "5XX", "*"):
821
859
  http_res_text = await utils.stream_to_text_async(http_res)
822
860
  raise models.SDKError(
823
861
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -840,7 +878,7 @@ class Jobs(BaseSDK):
840
878
  server_url: Optional[str] = None,
841
879
  timeout_ms: Optional[int] = None,
842
880
  http_headers: Optional[Mapping[str, str]] = None,
843
- ) -> Optional[models.DetailedJobOut]:
881
+ ) -> models.DetailedJobOut:
844
882
  r"""Start Fine Tuning Job
845
883
 
846
884
  Request the start of a validated fine tuning job.
@@ -901,8 +939,13 @@ class Jobs(BaseSDK):
901
939
  )
902
940
 
903
941
  if utils.match_response(http_res, "200", "application/json"):
904
- return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut])
905
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
942
+ return utils.unmarshal_json(http_res.text, models.DetailedJobOut)
943
+ if utils.match_response(http_res, "4XX", "*"):
944
+ http_res_text = utils.stream_to_text(http_res)
945
+ raise models.SDKError(
946
+ "API error occurred", http_res.status_code, http_res_text, http_res
947
+ )
948
+ if utils.match_response(http_res, "5XX", "*"):
906
949
  http_res_text = utils.stream_to_text(http_res)
907
950
  raise models.SDKError(
908
951
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -925,7 +968,7 @@ class Jobs(BaseSDK):
925
968
  server_url: Optional[str] = None,
926
969
  timeout_ms: Optional[int] = None,
927
970
  http_headers: Optional[Mapping[str, str]] = None,
928
- ) -> Optional[models.DetailedJobOut]:
971
+ ) -> models.DetailedJobOut:
929
972
  r"""Start Fine Tuning Job
930
973
 
931
974
  Request the start of a validated fine tuning job.
@@ -986,8 +1029,13 @@ class Jobs(BaseSDK):
986
1029
  )
987
1030
 
988
1031
  if utils.match_response(http_res, "200", "application/json"):
989
- return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut])
990
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
1032
+ return utils.unmarshal_json(http_res.text, models.DetailedJobOut)
1033
+ if utils.match_response(http_res, "4XX", "*"):
1034
+ http_res_text = await utils.stream_to_text_async(http_res)
1035
+ raise models.SDKError(
1036
+ "API error occurred", http_res.status_code, http_res_text, http_res
1037
+ )
1038
+ if utils.match_response(http_res, "5XX", "*"):
991
1039
  http_res_text = await utils.stream_to_text_async(http_res)
992
1040
  raise models.SDKError(
993
1041
  "API error occurred", http_res.status_code, http_res_text, http_res
mistralai/mistral_jobs.py CHANGED
@@ -24,7 +24,7 @@ class MistralJobs(BaseSDK):
24
24
  server_url: Optional[str] = None,
25
25
  timeout_ms: Optional[int] = None,
26
26
  http_headers: Optional[Mapping[str, str]] = None,
27
- ) -> Optional[models.BatchJobsOut]:
27
+ ) -> models.BatchJobsOut:
28
28
  r"""Get Batch Jobs
29
29
 
30
30
  Get a list of batch jobs for your organization and user.
@@ -97,8 +97,13 @@ class MistralJobs(BaseSDK):
97
97
  )
98
98
 
99
99
  if utils.match_response(http_res, "200", "application/json"):
100
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobsOut])
101
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
100
+ return utils.unmarshal_json(http_res.text, models.BatchJobsOut)
101
+ if utils.match_response(http_res, "4XX", "*"):
102
+ http_res_text = utils.stream_to_text(http_res)
103
+ raise models.SDKError(
104
+ "API error occurred", http_res.status_code, http_res_text, http_res
105
+ )
106
+ if utils.match_response(http_res, "5XX", "*"):
102
107
  http_res_text = utils.stream_to_text(http_res)
103
108
  raise models.SDKError(
104
109
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -127,7 +132,7 @@ class MistralJobs(BaseSDK):
127
132
  server_url: Optional[str] = None,
128
133
  timeout_ms: Optional[int] = None,
129
134
  http_headers: Optional[Mapping[str, str]] = None,
130
- ) -> Optional[models.BatchJobsOut]:
135
+ ) -> models.BatchJobsOut:
131
136
  r"""Get Batch Jobs
132
137
 
133
138
  Get a list of batch jobs for your organization and user.
@@ -200,8 +205,13 @@ class MistralJobs(BaseSDK):
200
205
  )
201
206
 
202
207
  if utils.match_response(http_res, "200", "application/json"):
203
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobsOut])
204
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
208
+ return utils.unmarshal_json(http_res.text, models.BatchJobsOut)
209
+ if utils.match_response(http_res, "4XX", "*"):
210
+ http_res_text = await utils.stream_to_text_async(http_res)
211
+ raise models.SDKError(
212
+ "API error occurred", http_res.status_code, http_res_text, http_res
213
+ )
214
+ if utils.match_response(http_res, "5XX", "*"):
205
215
  http_res_text = await utils.stream_to_text_async(http_res)
206
216
  raise models.SDKError(
207
217
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -228,7 +238,7 @@ class MistralJobs(BaseSDK):
228
238
  server_url: Optional[str] = None,
229
239
  timeout_ms: Optional[int] = None,
230
240
  http_headers: Optional[Mapping[str, str]] = None,
231
- ) -> Optional[models.BatchJobOut]:
241
+ ) -> models.BatchJobOut:
232
242
  r"""Create Batch Job
233
243
 
234
244
  Create a new batch job, it will be queued for processing.
@@ -300,8 +310,13 @@ class MistralJobs(BaseSDK):
300
310
  )
301
311
 
302
312
  if utils.match_response(http_res, "200", "application/json"):
303
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut])
304
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
313
+ return utils.unmarshal_json(http_res.text, models.BatchJobOut)
314
+ if utils.match_response(http_res, "4XX", "*"):
315
+ http_res_text = utils.stream_to_text(http_res)
316
+ raise models.SDKError(
317
+ "API error occurred", http_res.status_code, http_res_text, http_res
318
+ )
319
+ if utils.match_response(http_res, "5XX", "*"):
305
320
  http_res_text = utils.stream_to_text(http_res)
306
321
  raise models.SDKError(
307
322
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -328,7 +343,7 @@ class MistralJobs(BaseSDK):
328
343
  server_url: Optional[str] = None,
329
344
  timeout_ms: Optional[int] = None,
330
345
  http_headers: Optional[Mapping[str, str]] = None,
331
- ) -> Optional[models.BatchJobOut]:
346
+ ) -> models.BatchJobOut:
332
347
  r"""Create Batch Job
333
348
 
334
349
  Create a new batch job, it will be queued for processing.
@@ -400,8 +415,13 @@ class MistralJobs(BaseSDK):
400
415
  )
401
416
 
402
417
  if utils.match_response(http_res, "200", "application/json"):
403
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut])
404
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
418
+ return utils.unmarshal_json(http_res.text, models.BatchJobOut)
419
+ if utils.match_response(http_res, "4XX", "*"):
420
+ http_res_text = await utils.stream_to_text_async(http_res)
421
+ raise models.SDKError(
422
+ "API error occurred", http_res.status_code, http_res_text, http_res
423
+ )
424
+ if utils.match_response(http_res, "5XX", "*"):
405
425
  http_res_text = await utils.stream_to_text_async(http_res)
406
426
  raise models.SDKError(
407
427
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -424,7 +444,7 @@ class MistralJobs(BaseSDK):
424
444
  server_url: Optional[str] = None,
425
445
  timeout_ms: Optional[int] = None,
426
446
  http_headers: Optional[Mapping[str, str]] = None,
427
- ) -> Optional[models.BatchJobOut]:
447
+ ) -> models.BatchJobOut:
428
448
  r"""Get Batch Job
429
449
 
430
450
  Get a batch job details by its UUID.
@@ -485,8 +505,13 @@ class MistralJobs(BaseSDK):
485
505
  )
486
506
 
487
507
  if utils.match_response(http_res, "200", "application/json"):
488
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut])
489
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
508
+ return utils.unmarshal_json(http_res.text, models.BatchJobOut)
509
+ if utils.match_response(http_res, "4XX", "*"):
510
+ http_res_text = utils.stream_to_text(http_res)
511
+ raise models.SDKError(
512
+ "API error occurred", http_res.status_code, http_res_text, http_res
513
+ )
514
+ if utils.match_response(http_res, "5XX", "*"):
490
515
  http_res_text = utils.stream_to_text(http_res)
491
516
  raise models.SDKError(
492
517
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -509,7 +534,7 @@ class MistralJobs(BaseSDK):
509
534
  server_url: Optional[str] = None,
510
535
  timeout_ms: Optional[int] = None,
511
536
  http_headers: Optional[Mapping[str, str]] = None,
512
- ) -> Optional[models.BatchJobOut]:
537
+ ) -> models.BatchJobOut:
513
538
  r"""Get Batch Job
514
539
 
515
540
  Get a batch job details by its UUID.
@@ -570,8 +595,13 @@ class MistralJobs(BaseSDK):
570
595
  )
571
596
 
572
597
  if utils.match_response(http_res, "200", "application/json"):
573
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut])
574
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
598
+ return utils.unmarshal_json(http_res.text, models.BatchJobOut)
599
+ if utils.match_response(http_res, "4XX", "*"):
600
+ http_res_text = await utils.stream_to_text_async(http_res)
601
+ raise models.SDKError(
602
+ "API error occurred", http_res.status_code, http_res_text, http_res
603
+ )
604
+ if utils.match_response(http_res, "5XX", "*"):
575
605
  http_res_text = await utils.stream_to_text_async(http_res)
576
606
  raise models.SDKError(
577
607
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -594,7 +624,7 @@ class MistralJobs(BaseSDK):
594
624
  server_url: Optional[str] = None,
595
625
  timeout_ms: Optional[int] = None,
596
626
  http_headers: Optional[Mapping[str, str]] = None,
597
- ) -> Optional[models.BatchJobOut]:
627
+ ) -> models.BatchJobOut:
598
628
  r"""Cancel Batch Job
599
629
 
600
630
  Request the cancellation of a batch job.
@@ -655,8 +685,13 @@ class MistralJobs(BaseSDK):
655
685
  )
656
686
 
657
687
  if utils.match_response(http_res, "200", "application/json"):
658
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut])
659
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
688
+ return utils.unmarshal_json(http_res.text, models.BatchJobOut)
689
+ if utils.match_response(http_res, "4XX", "*"):
690
+ http_res_text = utils.stream_to_text(http_res)
691
+ raise models.SDKError(
692
+ "API error occurred", http_res.status_code, http_res_text, http_res
693
+ )
694
+ if utils.match_response(http_res, "5XX", "*"):
660
695
  http_res_text = utils.stream_to_text(http_res)
661
696
  raise models.SDKError(
662
697
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -679,7 +714,7 @@ class MistralJobs(BaseSDK):
679
714
  server_url: Optional[str] = None,
680
715
  timeout_ms: Optional[int] = None,
681
716
  http_headers: Optional[Mapping[str, str]] = None,
682
- ) -> Optional[models.BatchJobOut]:
717
+ ) -> models.BatchJobOut:
683
718
  r"""Cancel Batch Job
684
719
 
685
720
  Request the cancellation of a batch job.
@@ -740,8 +775,13 @@ class MistralJobs(BaseSDK):
740
775
  )
741
776
 
742
777
  if utils.match_response(http_res, "200", "application/json"):
743
- return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut])
744
- if utils.match_response(http_res, ["4XX", "5XX"], "*"):
778
+ return utils.unmarshal_json(http_res.text, models.BatchJobOut)
779
+ if utils.match_response(http_res, "4XX", "*"):
780
+ http_res_text = await utils.stream_to_text_async(http_res)
781
+ raise models.SDKError(
782
+ "API error occurred", http_res.status_code, http_res_text, http_res
783
+ )
784
+ if utils.match_response(http_res, "5XX", "*"):
745
785
  http_res_text = await utils.stream_to_text_async(http_res)
746
786
  raise models.SDKError(
747
787
  "API error occurred", http_res.status_code, http_res_text, http_res
@@ -264,6 +264,7 @@ from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import (
264
264
  JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict,
265
265
  )
266
266
  from .jobsout import JobsOut, JobsOutObject, JobsOutTypedDict
267
+ from .jsonschema import JSONSchema, JSONSchemaTypedDict
267
268
  from .legacyjobmetadataout import (
268
269
  LegacyJobMetadataOut,
269
270
  LegacyJobMetadataOutObject,
@@ -273,6 +274,7 @@ from .listfilesout import ListFilesOut, ListFilesOutTypedDict
273
274
  from .metricout import MetricOut, MetricOutTypedDict
274
275
  from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict
275
276
  from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict
277
+ from .prediction import Prediction, PredictionType, PredictionTypedDict
276
278
  from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict
277
279
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
278
280
  from .responseformats import ResponseFormats
@@ -342,6 +344,7 @@ from .wandbintegrationout import (
342
344
  WandbIntegrationOutTypedDict,
343
345
  )
344
346
 
347
+
345
348
  __all__ = [
346
349
  "APIEndpoint",
347
350
  "AgentsCompletionRequest",
@@ -513,6 +516,8 @@ __all__ = [
513
516
  "InputsTypedDict",
514
517
  "Integrations",
515
518
  "IntegrationsTypedDict",
519
+ "JSONSchema",
520
+ "JSONSchemaTypedDict",
516
521
  "JobIn",
517
522
  "JobInIntegrations",
518
523
  "JobInIntegrationsTypedDict",
@@ -566,6 +571,9 @@ __all__ = [
566
571
  "Object",
567
572
  "One",
568
573
  "OneTypedDict",
574
+ "Prediction",
575
+ "PredictionType",
576
+ "PredictionTypedDict",
569
577
  "QueryParamStatus",
570
578
  "ReferenceChunk",
571
579
  "ReferenceChunkType",
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .prediction import Prediction, PredictionTypedDict
5
6
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
6
7
  from .systemmessage import SystemMessage, SystemMessageTypedDict
7
8
  from .tool import Tool, ToolTypedDict
@@ -83,6 +84,7 @@ class AgentsCompletionRequestTypedDict(TypedDict):
83
84
  r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
84
85
  n: NotRequired[Nullable[int]]
85
86
  r"""Number of completions to return for each request, input tokens are only billed once."""
87
+ prediction: NotRequired[PredictionTypedDict]
86
88
 
87
89
 
88
90
  class AgentsCompletionRequest(BaseModel):
@@ -119,6 +121,8 @@ class AgentsCompletionRequest(BaseModel):
119
121
  n: OptionalNullable[int] = UNSET
120
122
  r"""Number of completions to return for each request, input tokens are only billed once."""
121
123
 
124
+ prediction: Optional[Prediction] = None
125
+
122
126
  @model_serializer(mode="wrap")
123
127
  def serialize_model(self, handler):
124
128
  optional_fields = [
@@ -132,6 +136,7 @@ class AgentsCompletionRequest(BaseModel):
132
136
  "presence_penalty",
133
137
  "frequency_penalty",
134
138
  "n",
139
+ "prediction",
135
140
  ]
136
141
  nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
137
142
  null_default_fields = []
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .prediction import Prediction, PredictionTypedDict
5
6
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
6
7
  from .systemmessage import SystemMessage, SystemMessageTypedDict
7
8
  from .tool import Tool, ToolTypedDict
@@ -82,6 +83,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict):
82
83
  r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
83
84
  n: NotRequired[Nullable[int]]
84
85
  r"""Number of completions to return for each request, input tokens are only billed once."""
86
+ prediction: NotRequired[PredictionTypedDict]
85
87
 
86
88
 
87
89
  class AgentsCompletionStreamRequest(BaseModel):
@@ -117,6 +119,8 @@ class AgentsCompletionStreamRequest(BaseModel):
117
119
  n: OptionalNullable[int] = UNSET
118
120
  r"""Number of completions to return for each request, input tokens are only billed once."""
119
121
 
122
+ prediction: Optional[Prediction] = None
123
+
120
124
  @model_serializer(mode="wrap")
121
125
  def serialize_model(self, handler):
122
126
  optional_fields = [
@@ -130,6 +134,7 @@ class AgentsCompletionStreamRequest(BaseModel):
130
134
  "presence_penalty",
131
135
  "frequency_penalty",
132
136
  "n",
137
+ "prediction",
133
138
  ]
134
139
  nullable_fields = ["max_tokens", "random_seed", "tools", "n"]
135
140
  null_default_fields = []
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .prediction import Prediction, PredictionTypedDict
5
6
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
6
7
  from .systemmessage import SystemMessage, SystemMessageTypedDict
7
8
  from .tool import Tool, ToolTypedDict
@@ -83,6 +84,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
83
84
  r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
84
85
  n: NotRequired[Nullable[int]]
85
86
  r"""Number of completions to return for each request, input tokens are only billed once."""
87
+ prediction: NotRequired[PredictionTypedDict]
86
88
  safe_prompt: NotRequired[bool]
87
89
  r"""Whether to inject a safety prompt before all conversations."""
88
90
 
@@ -127,6 +129,8 @@ class ChatCompletionRequest(BaseModel):
127
129
  n: OptionalNullable[int] = UNSET
128
130
  r"""Number of completions to return for each request, input tokens are only billed once."""
129
131
 
132
+ prediction: Optional[Prediction] = None
133
+
130
134
  safe_prompt: Optional[bool] = None
131
135
  r"""Whether to inject a safety prompt before all conversations."""
132
136
 
@@ -145,6 +149,7 @@ class ChatCompletionRequest(BaseModel):
145
149
  "presence_penalty",
146
150
  "frequency_penalty",
147
151
  "n",
152
+ "prediction",
148
153
  "safe_prompt",
149
154
  ]
150
155
  nullable_fields = [
@@ -2,6 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
  from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
5
+ from .prediction import Prediction, PredictionTypedDict
5
6
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
6
7
  from .systemmessage import SystemMessage, SystemMessageTypedDict
7
8
  from .tool import Tool, ToolTypedDict
@@ -86,6 +87,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
86
87
  r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition."""
87
88
  n: NotRequired[Nullable[int]]
88
89
  r"""Number of completions to return for each request, input tokens are only billed once."""
90
+ prediction: NotRequired[PredictionTypedDict]
89
91
  safe_prompt: NotRequired[bool]
90
92
  r"""Whether to inject a safety prompt before all conversations."""
91
93
 
@@ -129,6 +131,8 @@ class ChatCompletionStreamRequest(BaseModel):
129
131
  n: OptionalNullable[int] = UNSET
130
132
  r"""Number of completions to return for each request, input tokens are only billed once."""
131
133
 
134
+ prediction: Optional[Prediction] = None
135
+
132
136
  safe_prompt: Optional[bool] = None
133
137
  r"""Whether to inject a safety prompt before all conversations."""
134
138
 
@@ -147,6 +151,7 @@ class ChatCompletionStreamRequest(BaseModel):
147
151
  "presence_penalty",
148
152
  "frequency_penalty",
149
153
  "n",
154
+ "prediction",
150
155
  "safe_prompt",
151
156
  ]
152
157
  nullable_fields = [
@@ -6,6 +6,7 @@ from .sampletype import SampleType
6
6
  from .source import Source
7
7
  from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
8
8
  from mistralai.utils import validate_open_enum
9
+ import pydantic
9
10
  from pydantic import model_serializer
10
11
  from pydantic.functional_validators import PlainValidator
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -16,7 +17,7 @@ class FileSchemaTypedDict(TypedDict):
16
17
  r"""The unique identifier of the file."""
17
18
  object: str
18
19
  r"""The object type, which is always \"file\"."""
19
- bytes: int
20
+ size_bytes: int
20
21
  r"""The size of the file, in bytes."""
21
22
  created_at: int
22
23
  r"""The UNIX timestamp (in seconds) of the event."""
@@ -35,7 +36,7 @@ class FileSchema(BaseModel):
35
36
  object: str
36
37
  r"""The object type, which is always \"file\"."""
37
38
 
38
- bytes: int
39
+ size_bytes: Annotated[int, pydantic.Field(alias="bytes")]
39
40
  r"""The size of the file, in bytes."""
40
41
 
41
42
  created_at: int
@@ -10,6 +10,7 @@ class FunctionTypedDict(TypedDict):
10
10
  name: str
11
11
  parameters: Dict[str, Any]
12
12
  description: NotRequired[str]
13
+ strict: NotRequired[bool]
13
14
 
14
15
 
15
16
  class Function(BaseModel):
@@ -18,3 +19,5 @@ class Function(BaseModel):
18
19
  parameters: Dict[str, Any]
19
20
 
20
21
  description: Optional[str] = ""
22
+
23
+ strict: Optional[bool] = False