mistralai 1.5.0__py3-none-any.whl → 1.5.2rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. mistralai/_hooks/types.py +15 -3
  2. mistralai/_version.py +3 -3
  3. mistralai/agents.py +32 -12
  4. mistralai/basesdk.py +8 -0
  5. mistralai/chat.py +37 -17
  6. mistralai/classifiers.py +59 -37
  7. mistralai/embeddings.py +22 -18
  8. mistralai/extra/utils/response_format.py +3 -3
  9. mistralai/files.py +36 -0
  10. mistralai/fim.py +37 -17
  11. mistralai/httpclient.py +4 -2
  12. mistralai/jobs.py +30 -0
  13. mistralai/mistral_jobs.py +24 -0
  14. mistralai/models/__init__.py +43 -16
  15. mistralai/models/assistantmessage.py +2 -0
  16. mistralai/models/chatcompletionrequest.py +3 -10
  17. mistralai/models/chatcompletionstreamrequest.py +3 -10
  18. mistralai/models/chatmoderationrequest.py +86 -0
  19. mistralai/models/classificationrequest.py +7 -36
  20. mistralai/models/contentchunk.py +8 -1
  21. mistralai/models/documenturlchunk.py +56 -0
  22. mistralai/models/embeddingrequest.py +8 -44
  23. mistralai/models/filepurpose.py +1 -1
  24. mistralai/models/fimcompletionrequest.py +2 -3
  25. mistralai/models/fimcompletionstreamrequest.py +2 -3
  26. mistralai/models/ocrimageobject.py +77 -0
  27. mistralai/models/ocrpagedimensions.py +25 -0
  28. mistralai/models/ocrpageobject.py +64 -0
  29. mistralai/models/ocrrequest.py +97 -0
  30. mistralai/models/ocrresponse.py +26 -0
  31. mistralai/models/ocrusageinfo.py +51 -0
  32. mistralai/models/prediction.py +4 -5
  33. mistralai/models_.py +66 -18
  34. mistralai/ocr.py +248 -0
  35. mistralai/sdk.py +23 -3
  36. mistralai/sdkconfiguration.py +4 -2
  37. mistralai/utils/__init__.py +2 -0
  38. mistralai/utils/serializers.py +10 -6
  39. mistralai/utils/values.py +4 -1
  40. {mistralai-1.5.0.dist-info → mistralai-1.5.2rc1.dist-info}/METADATA +70 -19
  41. {mistralai-1.5.0.dist-info → mistralai-1.5.2rc1.dist-info}/RECORD +88 -76
  42. {mistralai-1.5.0.dist-info → mistralai-1.5.2rc1.dist-info}/WHEEL +1 -1
  43. mistralai_azure/__init__.py +10 -1
  44. mistralai_azure/_hooks/types.py +15 -3
  45. mistralai_azure/_version.py +3 -0
  46. mistralai_azure/basesdk.py +8 -0
  47. mistralai_azure/chat.py +88 -20
  48. mistralai_azure/httpclient.py +52 -0
  49. mistralai_azure/models/__init__.py +7 -0
  50. mistralai_azure/models/assistantmessage.py +2 -0
  51. mistralai_azure/models/chatcompletionrequest.py +8 -10
  52. mistralai_azure/models/chatcompletionstreamrequest.py +8 -10
  53. mistralai_azure/models/function.py +3 -0
  54. mistralai_azure/models/jsonschema.py +61 -0
  55. mistralai_azure/models/prediction.py +25 -0
  56. mistralai_azure/models/responseformat.py +42 -1
  57. mistralai_azure/models/responseformats.py +1 -1
  58. mistralai_azure/models/toolcall.py +3 -0
  59. mistralai_azure/sdk.py +56 -14
  60. mistralai_azure/sdkconfiguration.py +14 -6
  61. mistralai_azure/utils/__init__.py +2 -0
  62. mistralai_azure/utils/serializers.py +10 -6
  63. mistralai_azure/utils/values.py +4 -1
  64. mistralai_gcp/__init__.py +10 -1
  65. mistralai_gcp/_hooks/types.py +15 -3
  66. mistralai_gcp/_version.py +3 -0
  67. mistralai_gcp/basesdk.py +8 -0
  68. mistralai_gcp/chat.py +89 -21
  69. mistralai_gcp/fim.py +61 -21
  70. mistralai_gcp/httpclient.py +52 -0
  71. mistralai_gcp/models/__init__.py +7 -0
  72. mistralai_gcp/models/assistantmessage.py +2 -0
  73. mistralai_gcp/models/chatcompletionrequest.py +8 -10
  74. mistralai_gcp/models/chatcompletionstreamrequest.py +8 -10
  75. mistralai_gcp/models/fimcompletionrequest.py +2 -3
  76. mistralai_gcp/models/fimcompletionstreamrequest.py +2 -3
  77. mistralai_gcp/models/function.py +3 -0
  78. mistralai_gcp/models/jsonschema.py +61 -0
  79. mistralai_gcp/models/prediction.py +25 -0
  80. mistralai_gcp/models/responseformat.py +42 -1
  81. mistralai_gcp/models/responseformats.py +1 -1
  82. mistralai_gcp/models/toolcall.py +3 -0
  83. mistralai_gcp/sdk.py +63 -19
  84. mistralai_gcp/sdkconfiguration.py +14 -6
  85. mistralai_gcp/utils/__init__.py +2 -0
  86. mistralai_gcp/utils/serializers.py +10 -6
  87. mistralai_gcp/utils/values.py +4 -1
  88. mistralai/models/chatclassificationrequest.py +0 -113
  89. {mistralai-1.5.0.dist-info → mistralai-1.5.2rc1.dist-info}/LICENSE +0 -0
mistralai/fim.py CHANGED
@@ -3,7 +3,7 @@
3
3
  from .basesdk import BaseSDK
4
4
  from mistralai import models, utils
5
5
  from mistralai._hooks import HookContext
6
- from mistralai.types import Nullable, OptionalNullable, UNSET
6
+ from mistralai.types import OptionalNullable, UNSET
7
7
  from mistralai.utils import eventstreaming, get_security_from_env
8
8
  from typing import Any, Mapping, Optional, Union
9
9
 
@@ -14,7 +14,7 @@ class Fim(BaseSDK):
14
14
  def complete(
15
15
  self,
16
16
  *,
17
- model: Nullable[str],
17
+ model: str,
18
18
  prompt: str,
19
19
  temperature: OptionalNullable[float] = UNSET,
20
20
  top_p: Optional[float] = 1,
@@ -60,6 +60,8 @@ class Fim(BaseSDK):
60
60
 
61
61
  if server_url is not None:
62
62
  base_url = server_url
63
+ else:
64
+ base_url = self._get_url(base_url, url_variables)
63
65
 
64
66
  request = models.FIMCompletionRequest(
65
67
  model=model,
@@ -103,6 +105,7 @@ class Fim(BaseSDK):
103
105
 
104
106
  http_res = self.do_request(
105
107
  hook_ctx=HookContext(
108
+ base_url=base_url or "",
106
109
  operation_id="fim_completion_v1_fim_completions_post",
107
110
  oauth2_scopes=[],
108
111
  security_source=get_security_from_env(
@@ -114,12 +117,14 @@ class Fim(BaseSDK):
114
117
  retry_config=retry_config,
115
118
  )
116
119
 
117
- data: Any = None
120
+ response_data: Any = None
118
121
  if utils.match_response(http_res, "200", "application/json"):
119
122
  return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse)
120
123
  if utils.match_response(http_res, "422", "application/json"):
121
- data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData)
122
- raise models.HTTPValidationError(data=data)
124
+ response_data = utils.unmarshal_json(
125
+ http_res.text, models.HTTPValidationErrorData
126
+ )
127
+ raise models.HTTPValidationError(data=response_data)
123
128
  if utils.match_response(http_res, "4XX", "*"):
124
129
  http_res_text = utils.stream_to_text(http_res)
125
130
  raise models.SDKError(
@@ -143,7 +148,7 @@ class Fim(BaseSDK):
143
148
  async def complete_async(
144
149
  self,
145
150
  *,
146
- model: Nullable[str],
151
+ model: str,
147
152
  prompt: str,
148
153
  temperature: OptionalNullable[float] = UNSET,
149
154
  top_p: Optional[float] = 1,
@@ -189,6 +194,8 @@ class Fim(BaseSDK):
189
194
 
190
195
  if server_url is not None:
191
196
  base_url = server_url
197
+ else:
198
+ base_url = self._get_url(base_url, url_variables)
192
199
 
193
200
  request = models.FIMCompletionRequest(
194
201
  model=model,
@@ -232,6 +239,7 @@ class Fim(BaseSDK):
232
239
 
233
240
  http_res = await self.do_request_async(
234
241
  hook_ctx=HookContext(
242
+ base_url=base_url or "",
235
243
  operation_id="fim_completion_v1_fim_completions_post",
236
244
  oauth2_scopes=[],
237
245
  security_source=get_security_from_env(
@@ -243,12 +251,14 @@ class Fim(BaseSDK):
243
251
  retry_config=retry_config,
244
252
  )
245
253
 
246
- data: Any = None
254
+ response_data: Any = None
247
255
  if utils.match_response(http_res, "200", "application/json"):
248
256
  return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse)
249
257
  if utils.match_response(http_res, "422", "application/json"):
250
- data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData)
251
- raise models.HTTPValidationError(data=data)
258
+ response_data = utils.unmarshal_json(
259
+ http_res.text, models.HTTPValidationErrorData
260
+ )
261
+ raise models.HTTPValidationError(data=response_data)
252
262
  if utils.match_response(http_res, "4XX", "*"):
253
263
  http_res_text = await utils.stream_to_text_async(http_res)
254
264
  raise models.SDKError(
@@ -272,7 +282,7 @@ class Fim(BaseSDK):
272
282
  def stream(
273
283
  self,
274
284
  *,
275
- model: Nullable[str],
285
+ model: str,
276
286
  prompt: str,
277
287
  temperature: OptionalNullable[float] = UNSET,
278
288
  top_p: Optional[float] = 1,
@@ -318,6 +328,8 @@ class Fim(BaseSDK):
318
328
 
319
329
  if server_url is not None:
320
330
  base_url = server_url
331
+ else:
332
+ base_url = self._get_url(base_url, url_variables)
321
333
 
322
334
  request = models.FIMCompletionStreamRequest(
323
335
  model=model,
@@ -361,6 +373,7 @@ class Fim(BaseSDK):
361
373
 
362
374
  http_res = self.do_request(
363
375
  hook_ctx=HookContext(
376
+ base_url=base_url or "",
364
377
  operation_id="stream_fim",
365
378
  oauth2_scopes=[],
366
379
  security_source=get_security_from_env(
@@ -373,7 +386,7 @@ class Fim(BaseSDK):
373
386
  retry_config=retry_config,
374
387
  )
375
388
 
376
- data: Any = None
389
+ response_data: Any = None
377
390
  if utils.match_response(http_res, "200", "text/event-stream"):
378
391
  return eventstreaming.EventStream(
379
392
  http_res,
@@ -382,8 +395,10 @@ class Fim(BaseSDK):
382
395
  )
383
396
  if utils.match_response(http_res, "422", "application/json"):
384
397
  http_res_text = utils.stream_to_text(http_res)
385
- data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData)
386
- raise models.HTTPValidationError(data=data)
398
+ response_data = utils.unmarshal_json(
399
+ http_res_text, models.HTTPValidationErrorData
400
+ )
401
+ raise models.HTTPValidationError(data=response_data)
387
402
  if utils.match_response(http_res, "4XX", "*"):
388
403
  http_res_text = utils.stream_to_text(http_res)
389
404
  raise models.SDKError(
@@ -407,7 +422,7 @@ class Fim(BaseSDK):
407
422
  async def stream_async(
408
423
  self,
409
424
  *,
410
- model: Nullable[str],
425
+ model: str,
411
426
  prompt: str,
412
427
  temperature: OptionalNullable[float] = UNSET,
413
428
  top_p: Optional[float] = 1,
@@ -453,6 +468,8 @@ class Fim(BaseSDK):
453
468
 
454
469
  if server_url is not None:
455
470
  base_url = server_url
471
+ else:
472
+ base_url = self._get_url(base_url, url_variables)
456
473
 
457
474
  request = models.FIMCompletionStreamRequest(
458
475
  model=model,
@@ -496,6 +513,7 @@ class Fim(BaseSDK):
496
513
 
497
514
  http_res = await self.do_request_async(
498
515
  hook_ctx=HookContext(
516
+ base_url=base_url or "",
499
517
  operation_id="stream_fim",
500
518
  oauth2_scopes=[],
501
519
  security_source=get_security_from_env(
@@ -508,7 +526,7 @@ class Fim(BaseSDK):
508
526
  retry_config=retry_config,
509
527
  )
510
528
 
511
- data: Any = None
529
+ response_data: Any = None
512
530
  if utils.match_response(http_res, "200", "text/event-stream"):
513
531
  return eventstreaming.EventStreamAsync(
514
532
  http_res,
@@ -517,8 +535,10 @@ class Fim(BaseSDK):
517
535
  )
518
536
  if utils.match_response(http_res, "422", "application/json"):
519
537
  http_res_text = await utils.stream_to_text_async(http_res)
520
- data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData)
521
- raise models.HTTPValidationError(data=data)
538
+ response_data = utils.unmarshal_json(
539
+ http_res_text, models.HTTPValidationErrorData
540
+ )
541
+ raise models.HTTPValidationError(data=response_data)
522
542
  if utils.match_response(http_res, "4XX", "*"):
523
543
  http_res_text = await utils.stream_to_text_async(http_res)
524
544
  raise models.SDKError(
mistralai/httpclient.py CHANGED
@@ -94,7 +94,9 @@ class ClientOwner(Protocol):
94
94
  def close_clients(
95
95
  owner: ClientOwner,
96
96
  sync_client: Union[HttpClient, None],
97
+ sync_client_supplied: bool,
97
98
  async_client: Union[AsyncHttpClient, None],
99
+ async_client_supplied: bool,
98
100
  ) -> None:
99
101
  """
100
102
  A finalizer function that is meant to be used with weakref.finalize to close
@@ -107,13 +109,13 @@ def close_clients(
107
109
  owner.client = None
108
110
  owner.async_client = None
109
111
 
110
- if sync_client is not None:
112
+ if sync_client is not None and not sync_client_supplied:
111
113
  try:
112
114
  sync_client.close()
113
115
  except Exception:
114
116
  pass
115
117
 
116
- if async_client is not None:
118
+ if async_client is not None and not async_client_supplied:
117
119
  is_async = False
118
120
  try:
119
121
  asyncio.get_running_loop()
mistralai/jobs.py CHANGED
@@ -52,6 +52,8 @@ class Jobs(BaseSDK):
52
52
 
53
53
  if server_url is not None:
54
54
  base_url = server_url
55
+ else:
56
+ base_url = self._get_url(base_url, url_variables)
55
57
 
56
58
  request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest(
57
59
  page=page,
@@ -91,6 +93,7 @@ class Jobs(BaseSDK):
91
93
 
92
94
  http_res = self.do_request(
93
95
  hook_ctx=HookContext(
96
+ base_url=base_url or "",
94
97
  operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs",
95
98
  oauth2_scopes=[],
96
99
  security_source=get_security_from_env(
@@ -166,6 +169,8 @@ class Jobs(BaseSDK):
166
169
 
167
170
  if server_url is not None:
168
171
  base_url = server_url
172
+ else:
173
+ base_url = self._get_url(base_url, url_variables)
169
174
 
170
175
  request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest(
171
176
  page=page,
@@ -205,6 +210,7 @@ class Jobs(BaseSDK):
205
210
 
206
211
  http_res = await self.do_request_async(
207
212
  hook_ctx=HookContext(
213
+ base_url=base_url or "",
208
214
  operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs",
209
215
  oauth2_scopes=[],
210
216
  security_source=get_security_from_env(
@@ -290,6 +296,8 @@ class Jobs(BaseSDK):
290
296
 
291
297
  if server_url is not None:
292
298
  base_url = server_url
299
+ else:
300
+ base_url = self._get_url(base_url, url_variables)
293
301
 
294
302
  request = models.JobIn(
295
303
  model=model,
@@ -339,6 +347,7 @@ class Jobs(BaseSDK):
339
347
 
340
348
  http_res = self.do_request(
341
349
  hook_ctx=HookContext(
350
+ base_url=base_url or "",
342
351
  operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job",
343
352
  oauth2_scopes=[],
344
353
  security_source=get_security_from_env(
@@ -426,6 +435,8 @@ class Jobs(BaseSDK):
426
435
 
427
436
  if server_url is not None:
428
437
  base_url = server_url
438
+ else:
439
+ base_url = self._get_url(base_url, url_variables)
429
440
 
430
441
  request = models.JobIn(
431
442
  model=model,
@@ -475,6 +486,7 @@ class Jobs(BaseSDK):
475
486
 
476
487
  http_res = await self.do_request_async(
477
488
  hook_ctx=HookContext(
489
+ base_url=base_url or "",
478
490
  operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job",
479
491
  oauth2_scopes=[],
480
492
  security_source=get_security_from_env(
@@ -536,6 +548,8 @@ class Jobs(BaseSDK):
536
548
 
537
549
  if server_url is not None:
538
550
  base_url = server_url
551
+ else:
552
+ base_url = self._get_url(base_url, url_variables)
539
553
 
540
554
  request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest(
541
555
  job_id=job_id,
@@ -567,6 +581,7 @@ class Jobs(BaseSDK):
567
581
 
568
582
  http_res = self.do_request(
569
583
  hook_ctx=HookContext(
584
+ base_url=base_url or "",
570
585
  operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job",
571
586
  oauth2_scopes=[],
572
587
  security_source=get_security_from_env(
@@ -626,6 +641,8 @@ class Jobs(BaseSDK):
626
641
 
627
642
  if server_url is not None:
628
643
  base_url = server_url
644
+ else:
645
+ base_url = self._get_url(base_url, url_variables)
629
646
 
630
647
  request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest(
631
648
  job_id=job_id,
@@ -657,6 +674,7 @@ class Jobs(BaseSDK):
657
674
 
658
675
  http_res = await self.do_request_async(
659
676
  hook_ctx=HookContext(
677
+ base_url=base_url or "",
660
678
  operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job",
661
679
  oauth2_scopes=[],
662
680
  security_source=get_security_from_env(
@@ -716,6 +734,8 @@ class Jobs(BaseSDK):
716
734
 
717
735
  if server_url is not None:
718
736
  base_url = server_url
737
+ else:
738
+ base_url = self._get_url(base_url, url_variables)
719
739
 
720
740
  request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest(
721
741
  job_id=job_id,
@@ -747,6 +767,7 @@ class Jobs(BaseSDK):
747
767
 
748
768
  http_res = self.do_request(
749
769
  hook_ctx=HookContext(
770
+ base_url=base_url or "",
750
771
  operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job",
751
772
  oauth2_scopes=[],
752
773
  security_source=get_security_from_env(
@@ -806,6 +827,8 @@ class Jobs(BaseSDK):
806
827
 
807
828
  if server_url is not None:
808
829
  base_url = server_url
830
+ else:
831
+ base_url = self._get_url(base_url, url_variables)
809
832
 
810
833
  request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest(
811
834
  job_id=job_id,
@@ -837,6 +860,7 @@ class Jobs(BaseSDK):
837
860
 
838
861
  http_res = await self.do_request_async(
839
862
  hook_ctx=HookContext(
863
+ base_url=base_url or "",
840
864
  operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job",
841
865
  oauth2_scopes=[],
842
866
  security_source=get_security_from_env(
@@ -896,6 +920,8 @@ class Jobs(BaseSDK):
896
920
 
897
921
  if server_url is not None:
898
922
  base_url = server_url
923
+ else:
924
+ base_url = self._get_url(base_url, url_variables)
899
925
 
900
926
  request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest(
901
927
  job_id=job_id,
@@ -927,6 +953,7 @@ class Jobs(BaseSDK):
927
953
 
928
954
  http_res = self.do_request(
929
955
  hook_ctx=HookContext(
956
+ base_url=base_url or "",
930
957
  operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job",
931
958
  oauth2_scopes=[],
932
959
  security_source=get_security_from_env(
@@ -986,6 +1013,8 @@ class Jobs(BaseSDK):
986
1013
 
987
1014
  if server_url is not None:
988
1015
  base_url = server_url
1016
+ else:
1017
+ base_url = self._get_url(base_url, url_variables)
989
1018
 
990
1019
  request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest(
991
1020
  job_id=job_id,
@@ -1017,6 +1046,7 @@ class Jobs(BaseSDK):
1017
1046
 
1018
1047
  http_res = await self.do_request_async(
1019
1048
  hook_ctx=HookContext(
1049
+ base_url=base_url or "",
1020
1050
  operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job",
1021
1051
  oauth2_scopes=[],
1022
1052
  security_source=get_security_from_env(
mistralai/mistral_jobs.py CHANGED
@@ -48,6 +48,8 @@ class MistralJobs(BaseSDK):
48
48
 
49
49
  if server_url is not None:
50
50
  base_url = server_url
51
+ else:
52
+ base_url = self._get_url(base_url, url_variables)
51
53
 
52
54
  request = models.JobsAPIRoutesBatchGetBatchJobsRequest(
53
55
  page=page,
@@ -85,6 +87,7 @@ class MistralJobs(BaseSDK):
85
87
 
86
88
  http_res = self.do_request(
87
89
  hook_ctx=HookContext(
90
+ base_url=base_url or "",
88
91
  operation_id="jobs_api_routes_batch_get_batch_jobs",
89
92
  oauth2_scopes=[],
90
93
  security_source=get_security_from_env(
@@ -156,6 +159,8 @@ class MistralJobs(BaseSDK):
156
159
 
157
160
  if server_url is not None:
158
161
  base_url = server_url
162
+ else:
163
+ base_url = self._get_url(base_url, url_variables)
159
164
 
160
165
  request = models.JobsAPIRoutesBatchGetBatchJobsRequest(
161
166
  page=page,
@@ -193,6 +198,7 @@ class MistralJobs(BaseSDK):
193
198
 
194
199
  http_res = await self.do_request_async(
195
200
  hook_ctx=HookContext(
201
+ base_url=base_url or "",
196
202
  operation_id="jobs_api_routes_batch_get_batch_jobs",
197
203
  oauth2_scopes=[],
198
204
  security_source=get_security_from_env(
@@ -260,6 +266,8 @@ class MistralJobs(BaseSDK):
260
266
 
261
267
  if server_url is not None:
262
268
  base_url = server_url
269
+ else:
270
+ base_url = self._get_url(base_url, url_variables)
263
271
 
264
272
  request = models.BatchJobIn(
265
273
  input_files=input_files,
@@ -298,6 +306,7 @@ class MistralJobs(BaseSDK):
298
306
 
299
307
  http_res = self.do_request(
300
308
  hook_ctx=HookContext(
309
+ base_url=base_url or "",
301
310
  operation_id="jobs_api_routes_batch_create_batch_job",
302
311
  oauth2_scopes=[],
303
312
  security_source=get_security_from_env(
@@ -365,6 +374,8 @@ class MistralJobs(BaseSDK):
365
374
 
366
375
  if server_url is not None:
367
376
  base_url = server_url
377
+ else:
378
+ base_url = self._get_url(base_url, url_variables)
368
379
 
369
380
  request = models.BatchJobIn(
370
381
  input_files=input_files,
@@ -403,6 +414,7 @@ class MistralJobs(BaseSDK):
403
414
 
404
415
  http_res = await self.do_request_async(
405
416
  hook_ctx=HookContext(
417
+ base_url=base_url or "",
406
418
  operation_id="jobs_api_routes_batch_create_batch_job",
407
419
  oauth2_scopes=[],
408
420
  security_source=get_security_from_env(
@@ -462,6 +474,8 @@ class MistralJobs(BaseSDK):
462
474
 
463
475
  if server_url is not None:
464
476
  base_url = server_url
477
+ else:
478
+ base_url = self._get_url(base_url, url_variables)
465
479
 
466
480
  request = models.JobsAPIRoutesBatchGetBatchJobRequest(
467
481
  job_id=job_id,
@@ -493,6 +507,7 @@ class MistralJobs(BaseSDK):
493
507
 
494
508
  http_res = self.do_request(
495
509
  hook_ctx=HookContext(
510
+ base_url=base_url or "",
496
511
  operation_id="jobs_api_routes_batch_get_batch_job",
497
512
  oauth2_scopes=[],
498
513
  security_source=get_security_from_env(
@@ -552,6 +567,8 @@ class MistralJobs(BaseSDK):
552
567
 
553
568
  if server_url is not None:
554
569
  base_url = server_url
570
+ else:
571
+ base_url = self._get_url(base_url, url_variables)
555
572
 
556
573
  request = models.JobsAPIRoutesBatchGetBatchJobRequest(
557
574
  job_id=job_id,
@@ -583,6 +600,7 @@ class MistralJobs(BaseSDK):
583
600
 
584
601
  http_res = await self.do_request_async(
585
602
  hook_ctx=HookContext(
603
+ base_url=base_url or "",
586
604
  operation_id="jobs_api_routes_batch_get_batch_job",
587
605
  oauth2_scopes=[],
588
606
  security_source=get_security_from_env(
@@ -642,6 +660,8 @@ class MistralJobs(BaseSDK):
642
660
 
643
661
  if server_url is not None:
644
662
  base_url = server_url
663
+ else:
664
+ base_url = self._get_url(base_url, url_variables)
645
665
 
646
666
  request = models.JobsAPIRoutesBatchCancelBatchJobRequest(
647
667
  job_id=job_id,
@@ -673,6 +693,7 @@ class MistralJobs(BaseSDK):
673
693
 
674
694
  http_res = self.do_request(
675
695
  hook_ctx=HookContext(
696
+ base_url=base_url or "",
676
697
  operation_id="jobs_api_routes_batch_cancel_batch_job",
677
698
  oauth2_scopes=[],
678
699
  security_source=get_security_from_env(
@@ -732,6 +753,8 @@ class MistralJobs(BaseSDK):
732
753
 
733
754
  if server_url is not None:
734
755
  base_url = server_url
756
+ else:
757
+ base_url = self._get_url(base_url, url_variables)
735
758
 
736
759
  request = models.JobsAPIRoutesBatchCancelBatchJobRequest(
737
760
  job_id=job_id,
@@ -763,6 +786,7 @@ class MistralJobs(BaseSDK):
763
786
 
764
787
  http_res = await self.do_request_async(
765
788
  hook_ctx=HookContext(
789
+ base_url=base_url or "",
766
790
  operation_id="jobs_api_routes_batch_cancel_batch_job",
767
791
  oauth2_scopes=[],
768
792
  security_source=get_security_from_env(
@@ -39,16 +39,6 @@ from .batchjobin import BatchJobIn, BatchJobInTypedDict
39
39
  from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict
40
40
  from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict
41
41
  from .batchjobstatus import BatchJobStatus
42
- from .chatclassificationrequest import (
43
- ChatClassificationRequest,
44
- ChatClassificationRequestInputs,
45
- ChatClassificationRequestInputsTypedDict,
46
- ChatClassificationRequestTypedDict,
47
- One,
48
- OneTypedDict,
49
- Two,
50
- TwoTypedDict,
51
- )
52
42
  from .chatcompletionchoice import (
53
43
  ChatCompletionChoice,
54
44
  ChatCompletionChoiceTypedDict,
@@ -78,6 +68,16 @@ from .chatcompletionstreamrequest import (
78
68
  ChatCompletionStreamRequestToolChoiceTypedDict,
79
69
  ChatCompletionStreamRequestTypedDict,
80
70
  )
71
+ from .chatmoderationrequest import (
72
+ ChatModerationRequest,
73
+ ChatModerationRequestInputs,
74
+ ChatModerationRequestInputsTypedDict,
75
+ ChatModerationRequestTypedDict,
76
+ One,
77
+ OneTypedDict,
78
+ Two,
79
+ TwoTypedDict,
80
+ )
81
81
  from .checkpointout import CheckpointOut, CheckpointOutTypedDict
82
82
  from .classificationobject import ClassificationObject, ClassificationObjectTypedDict
83
83
  from .classificationrequest import (
@@ -115,6 +115,11 @@ from .detailedjobout import (
115
115
  DetailedJobOutStatus,
116
116
  DetailedJobOutTypedDict,
117
117
  )
118
+ from .documenturlchunk import (
119
+ DocumentURLChunk,
120
+ DocumentURLChunkType,
121
+ DocumentURLChunkTypedDict,
122
+ )
118
123
  from .embeddingrequest import (
119
124
  EmbeddingRequest,
120
125
  EmbeddingRequestTypedDict,
@@ -274,7 +279,13 @@ from .listfilesout import ListFilesOut, ListFilesOutTypedDict
274
279
  from .metricout import MetricOut, MetricOutTypedDict
275
280
  from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict
276
281
  from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict
277
- from .prediction import Prediction, PredictionType, PredictionTypedDict
282
+ from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict
283
+ from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict
284
+ from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict
285
+ from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict
286
+ from .ocrresponse import OCRResponse, OCRResponseTypedDict
287
+ from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict
288
+ from .prediction import Prediction, PredictionTypedDict
278
289
  from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict
279
290
  from .responseformat import ResponseFormat, ResponseFormatTypedDict
280
291
  from .responseformats import ResponseFormats
@@ -386,10 +397,6 @@ __all__ = [
386
397
  "BatchJobsOut",
387
398
  "BatchJobsOutObject",
388
399
  "BatchJobsOutTypedDict",
389
- "ChatClassificationRequest",
390
- "ChatClassificationRequestInputs",
391
- "ChatClassificationRequestInputsTypedDict",
392
- "ChatClassificationRequestTypedDict",
393
400
  "ChatCompletionChoice",
394
401
  "ChatCompletionChoiceTypedDict",
395
402
  "ChatCompletionRequest",
@@ -406,6 +413,10 @@ __all__ = [
406
413
  "ChatCompletionStreamRequestToolChoice",
407
414
  "ChatCompletionStreamRequestToolChoiceTypedDict",
408
415
  "ChatCompletionStreamRequestTypedDict",
416
+ "ChatModerationRequest",
417
+ "ChatModerationRequestInputs",
418
+ "ChatModerationRequestInputsTypedDict",
419
+ "ChatModerationRequestTypedDict",
409
420
  "CheckpointOut",
410
421
  "CheckpointOutTypedDict",
411
422
  "ClassificationObject",
@@ -445,6 +456,11 @@ __all__ = [
445
456
  "DetailedJobOutRepositoriesTypedDict",
446
457
  "DetailedJobOutStatus",
447
458
  "DetailedJobOutTypedDict",
459
+ "Document",
460
+ "DocumentTypedDict",
461
+ "DocumentURLChunk",
462
+ "DocumentURLChunkType",
463
+ "DocumentURLChunkTypedDict",
448
464
  "EmbeddingRequest",
449
465
  "EmbeddingRequestTypedDict",
450
466
  "EmbeddingResponse",
@@ -568,11 +584,22 @@ __all__ = [
568
584
  "ModelCapabilitiesTypedDict",
569
585
  "ModelList",
570
586
  "ModelListTypedDict",
587
+ "OCRImageObject",
588
+ "OCRImageObjectTypedDict",
589
+ "OCRPageDimensions",
590
+ "OCRPageDimensionsTypedDict",
591
+ "OCRPageObject",
592
+ "OCRPageObjectTypedDict",
593
+ "OCRRequest",
594
+ "OCRRequestTypedDict",
595
+ "OCRResponse",
596
+ "OCRResponseTypedDict",
597
+ "OCRUsageInfo",
598
+ "OCRUsageInfoTypedDict",
571
599
  "Object",
572
600
  "One",
573
601
  "OneTypedDict",
574
602
  "Prediction",
575
- "PredictionType",
576
603
  "PredictionTypedDict",
577
604
  "QueryParamStatus",
578
605
  "ReferenceChunk",
@@ -26,6 +26,7 @@ class AssistantMessageTypedDict(TypedDict):
26
26
  content: NotRequired[Nullable[AssistantMessageContentTypedDict]]
27
27
  tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]]
28
28
  prefix: NotRequired[bool]
29
+ r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message."""
29
30
  role: NotRequired[AssistantMessageRole]
30
31
 
31
32
 
@@ -35,6 +36,7 @@ class AssistantMessage(BaseModel):
35
36
  tool_calls: OptionalNullable[List[ToolCall]] = UNSET
36
37
 
37
38
  prefix: Optional[bool] = False
39
+ r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message."""
38
40
 
39
41
  role: Optional[AssistantMessageRole] = "assistant"
40
42
 
@@ -59,7 +59,7 @@ ChatCompletionRequestToolChoice = TypeAliasType(
59
59
 
60
60
 
61
61
  class ChatCompletionRequestTypedDict(TypedDict):
62
- model: Nullable[str]
62
+ model: str
63
63
  r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
64
64
  messages: List[MessagesTypedDict]
65
65
  r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content."""
@@ -90,7 +90,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
90
90
 
91
91
 
92
92
  class ChatCompletionRequest(BaseModel):
93
- model: Nullable[str]
93
+ model: str
94
94
  r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions."""
95
95
 
96
96
  messages: List[Messages]
@@ -152,14 +152,7 @@ class ChatCompletionRequest(BaseModel):
152
152
  "prediction",
153
153
  "safe_prompt",
154
154
  ]
155
- nullable_fields = [
156
- "model",
157
- "temperature",
158
- "max_tokens",
159
- "random_seed",
160
- "tools",
161
- "n",
162
- ]
155
+ nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
163
156
  null_default_fields = []
164
157
 
165
158
  serialized = handler(self)