mistralai 1.5.2rc1__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. mistralai/_version.py +2 -2
  2. mistralai/agents.py +12 -0
  3. mistralai/chat.py +12 -0
  4. mistralai/classifiers.py +435 -23
  5. mistralai/embeddings.py +6 -2
  6. mistralai/jobs.py +84 -38
  7. mistralai/mistral_jobs.py +2 -2
  8. mistralai/models/__init__.py +197 -46
  9. mistralai/models/agentscompletionrequest.py +4 -0
  10. mistralai/models/agentscompletionstreamrequest.py +4 -0
  11. mistralai/models/archiveftmodelout.py +3 -11
  12. mistralai/models/batchjobout.py +3 -9
  13. mistralai/models/batchjobsout.py +3 -9
  14. mistralai/models/chatclassificationrequest.py +20 -0
  15. mistralai/models/chatcompletionrequest.py +4 -0
  16. mistralai/models/chatcompletionstreamrequest.py +4 -0
  17. mistralai/models/chatmoderationrequest.py +4 -7
  18. mistralai/models/classificationresponse.py +12 -9
  19. mistralai/models/classificationtargetresult.py +14 -0
  20. mistralai/models/classifierdetailedjobout.py +156 -0
  21. mistralai/models/classifierftmodelout.py +101 -0
  22. mistralai/models/classifierjobout.py +165 -0
  23. mistralai/models/classifiertargetin.py +55 -0
  24. mistralai/models/classifiertargetout.py +24 -0
  25. mistralai/models/classifiertrainingparameters.py +73 -0
  26. mistralai/models/classifiertrainingparametersin.py +85 -0
  27. mistralai/models/{detailedjobout.py → completiondetailedjobout.py} +34 -34
  28. mistralai/models/{ftmodelout.py → completionftmodelout.py} +12 -12
  29. mistralai/models/{jobout.py → completionjobout.py} +25 -24
  30. mistralai/models/{trainingparameters.py → completiontrainingparameters.py} +7 -7
  31. mistralai/models/{trainingparametersin.py → completiontrainingparametersin.py} +7 -7
  32. mistralai/models/embeddingrequest.py +6 -4
  33. mistralai/models/finetuneablemodeltype.py +7 -0
  34. mistralai/models/ftclassifierlossfunction.py +7 -0
  35. mistralai/models/ftmodelcapabilitiesout.py +3 -0
  36. mistralai/models/function.py +2 -2
  37. mistralai/models/githubrepositoryin.py +3 -11
  38. mistralai/models/githubrepositoryout.py +3 -11
  39. mistralai/models/inputs.py +54 -0
  40. mistralai/models/instructrequest.py +42 -0
  41. mistralai/models/jobin.py +52 -12
  42. mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +3 -3
  43. mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +29 -2
  44. mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +21 -4
  45. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +29 -2
  46. mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +8 -0
  47. mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +29 -2
  48. mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +28 -2
  49. mistralai/models/jobsout.py +24 -13
  50. mistralai/models/jsonschema.py +1 -1
  51. mistralai/models/legacyjobmetadataout.py +3 -12
  52. mistralai/models/{classificationobject.py → moderationobject.py} +6 -6
  53. mistralai/models/moderationresponse.py +21 -0
  54. mistralai/models/unarchiveftmodelout.py +3 -11
  55. mistralai/models/wandbintegration.py +3 -11
  56. mistralai/models/wandbintegrationout.py +8 -13
  57. mistralai/models_.py +10 -4
  58. {mistralai-1.5.2rc1.dist-info → mistralai-1.7.0.dist-info}/METADATA +4 -2
  59. {mistralai-1.5.2rc1.dist-info → mistralai-1.7.0.dist-info}/RECORD +81 -63
  60. {mistralai-1.5.2rc1.dist-info → mistralai-1.7.0.dist-info}/WHEEL +1 -1
  61. mistralai_azure/_version.py +2 -2
  62. mistralai_azure/chat.py +12 -0
  63. mistralai_azure/models/__init__.py +15 -0
  64. mistralai_azure/models/chatcompletionrequest.py +4 -0
  65. mistralai_azure/models/chatcompletionstreamrequest.py +4 -0
  66. mistralai_azure/models/contentchunk.py +6 -2
  67. mistralai_azure/models/function.py +2 -2
  68. mistralai_azure/models/imageurl.py +53 -0
  69. mistralai_azure/models/imageurlchunk.py +33 -0
  70. mistralai_azure/models/jsonschema.py +1 -1
  71. mistralai_gcp/_version.py +2 -2
  72. mistralai_gcp/chat.py +12 -0
  73. mistralai_gcp/models/__init__.py +15 -0
  74. mistralai_gcp/models/chatcompletionrequest.py +4 -0
  75. mistralai_gcp/models/chatcompletionstreamrequest.py +4 -0
  76. mistralai_gcp/models/contentchunk.py +6 -2
  77. mistralai_gcp/models/function.py +2 -2
  78. mistralai_gcp/models/imageurl.py +53 -0
  79. mistralai_gcp/models/imageurlchunk.py +33 -0
  80. mistralai_gcp/models/jsonschema.py +1 -1
  81. {mistralai-1.5.2rc1.dist-info → mistralai-1.7.0.dist-info}/LICENSE +0 -0
mistralai/jobs.py CHANGED
@@ -17,6 +17,7 @@ class Jobs(BaseSDK):
17
17
  page_size: Optional[int] = 100,
18
18
  model: OptionalNullable[str] = UNSET,
19
19
  created_after: OptionalNullable[datetime] = UNSET,
20
+ created_before: OptionalNullable[datetime] = UNSET,
20
21
  created_by_me: Optional[bool] = False,
21
22
  status: OptionalNullable[models.QueryParamStatus] = UNSET,
22
23
  wandb_project: OptionalNullable[str] = UNSET,
@@ -35,6 +36,7 @@ class Jobs(BaseSDK):
35
36
  :param page_size: The number of items to return per page.
36
37
  :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed.
37
38
  :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed.
39
+ :param created_before:
38
40
  :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed.
39
41
  :param status: The current job state to filter on. When set, the other results are not displayed.
40
42
  :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed.
@@ -60,6 +62,7 @@ class Jobs(BaseSDK):
60
62
  page_size=page_size,
61
63
  model=model,
62
64
  created_after=created_after,
65
+ created_before=created_before,
63
66
  created_by_me=created_by_me,
64
67
  status=status,
65
68
  wandb_project=wandb_project,
@@ -134,6 +137,7 @@ class Jobs(BaseSDK):
134
137
  page_size: Optional[int] = 100,
135
138
  model: OptionalNullable[str] = UNSET,
136
139
  created_after: OptionalNullable[datetime] = UNSET,
140
+ created_before: OptionalNullable[datetime] = UNSET,
137
141
  created_by_me: Optional[bool] = False,
138
142
  status: OptionalNullable[models.QueryParamStatus] = UNSET,
139
143
  wandb_project: OptionalNullable[str] = UNSET,
@@ -152,6 +156,7 @@ class Jobs(BaseSDK):
152
156
  :param page_size: The number of items to return per page.
153
157
  :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed.
154
158
  :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed.
159
+ :param created_before:
155
160
  :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed.
156
161
  :param status: The current job state to filter on. When set, the other results are not displayed.
157
162
  :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed.
@@ -177,6 +182,7 @@ class Jobs(BaseSDK):
177
182
  page_size=page_size,
178
183
  model=model,
179
184
  created_after=created_after,
185
+ created_before=created_before,
180
186
  created_by_me=created_by_me,
181
187
  status=status,
182
188
  wandb_project=wandb_project,
@@ -248,9 +254,7 @@ class Jobs(BaseSDK):
248
254
  self,
249
255
  *,
250
256
  model: str,
251
- hyperparameters: Union[
252
- models.TrainingParametersIn, models.TrainingParametersInTypedDict
253
- ],
257
+ hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict],
254
258
  training_files: Optional[
255
259
  Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]
256
260
  ] = None,
@@ -261,12 +265,20 @@ class Jobs(BaseSDK):
261
265
  List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict]
262
266
  ]
263
267
  ] = UNSET,
264
- repositories: Optional[
268
+ auto_start: Optional[bool] = None,
269
+ invalid_sample_skip_percentage: Optional[float] = 0,
270
+ job_type: OptionalNullable[models.FineTuneableModelType] = UNSET,
271
+ repositories: OptionalNullable[
265
272
  Union[
266
273
  List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict]
267
274
  ]
268
- ] = None,
269
- auto_start: Optional[bool] = None,
275
+ ] = UNSET,
276
+ classifier_targets: OptionalNullable[
277
+ Union[
278
+ List[models.ClassifierTargetIn],
279
+ List[models.ClassifierTargetInTypedDict],
280
+ ]
281
+ ] = UNSET,
270
282
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
271
283
  server_url: Optional[str] = None,
272
284
  timeout_ms: Optional[int] = None,
@@ -277,13 +289,16 @@ class Jobs(BaseSDK):
277
289
  Create a new fine-tuning job, it will be queued for processing.
278
290
 
279
291
  :param model: The name of the model to fine-tune.
280
- :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job.
292
+ :param hyperparameters:
281
293
  :param training_files:
282
294
  :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.
283
295
  :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`
284
296
  :param integrations: A list of integrations to enable for your fine-tuning job.
285
- :param repositories:
286
297
  :param auto_start: This field will be required in a future release.
298
+ :param invalid_sample_skip_percentage:
299
+ :param job_type:
300
+ :param repositories:
301
+ :param classifier_targets:
287
302
  :param retries: Override the default retry configuration for this method
288
303
  :param server_url: Override the default server URL for this method
289
304
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -305,17 +320,22 @@ class Jobs(BaseSDK):
305
320
  training_files, Optional[List[models.TrainingFile]]
306
321
  ),
307
322
  validation_files=validation_files,
308
- hyperparameters=utils.get_pydantic_model(
309
- hyperparameters, models.TrainingParametersIn
310
- ),
311
323
  suffix=suffix,
312
324
  integrations=utils.get_pydantic_model(
313
325
  integrations, OptionalNullable[List[models.JobInIntegrations]]
314
326
  ),
327
+ auto_start=auto_start,
328
+ invalid_sample_skip_percentage=invalid_sample_skip_percentage,
329
+ job_type=job_type,
330
+ hyperparameters=utils.get_pydantic_model(
331
+ hyperparameters, models.Hyperparameters
332
+ ),
315
333
  repositories=utils.get_pydantic_model(
316
- repositories, Optional[List[models.JobInRepositories]]
334
+ repositories, OptionalNullable[List[models.JobInRepositories]]
335
+ ),
336
+ classifier_targets=utils.get_pydantic_model(
337
+ classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]]
317
338
  ),
318
- auto_start=auto_start,
319
339
  )
320
340
 
321
341
  req = self._build_request(
@@ -387,9 +407,7 @@ class Jobs(BaseSDK):
387
407
  self,
388
408
  *,
389
409
  model: str,
390
- hyperparameters: Union[
391
- models.TrainingParametersIn, models.TrainingParametersInTypedDict
392
- ],
410
+ hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict],
393
411
  training_files: Optional[
394
412
  Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]
395
413
  ] = None,
@@ -400,12 +418,20 @@ class Jobs(BaseSDK):
400
418
  List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict]
401
419
  ]
402
420
  ] = UNSET,
403
- repositories: Optional[
421
+ auto_start: Optional[bool] = None,
422
+ invalid_sample_skip_percentage: Optional[float] = 0,
423
+ job_type: OptionalNullable[models.FineTuneableModelType] = UNSET,
424
+ repositories: OptionalNullable[
404
425
  Union[
405
426
  List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict]
406
427
  ]
407
- ] = None,
408
- auto_start: Optional[bool] = None,
428
+ ] = UNSET,
429
+ classifier_targets: OptionalNullable[
430
+ Union[
431
+ List[models.ClassifierTargetIn],
432
+ List[models.ClassifierTargetInTypedDict],
433
+ ]
434
+ ] = UNSET,
409
435
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
410
436
  server_url: Optional[str] = None,
411
437
  timeout_ms: Optional[int] = None,
@@ -416,13 +442,16 @@ class Jobs(BaseSDK):
416
442
  Create a new fine-tuning job, it will be queued for processing.
417
443
 
418
444
  :param model: The name of the model to fine-tune.
419
- :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job.
445
+ :param hyperparameters:
420
446
  :param training_files:
421
447
  :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.
422
448
  :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`
423
449
  :param integrations: A list of integrations to enable for your fine-tuning job.
424
- :param repositories:
425
450
  :param auto_start: This field will be required in a future release.
451
+ :param invalid_sample_skip_percentage:
452
+ :param job_type:
453
+ :param repositories:
454
+ :param classifier_targets:
426
455
  :param retries: Override the default retry configuration for this method
427
456
  :param server_url: Override the default server URL for this method
428
457
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
@@ -444,17 +473,22 @@ class Jobs(BaseSDK):
444
473
  training_files, Optional[List[models.TrainingFile]]
445
474
  ),
446
475
  validation_files=validation_files,
447
- hyperparameters=utils.get_pydantic_model(
448
- hyperparameters, models.TrainingParametersIn
449
- ),
450
476
  suffix=suffix,
451
477
  integrations=utils.get_pydantic_model(
452
478
  integrations, OptionalNullable[List[models.JobInIntegrations]]
453
479
  ),
480
+ auto_start=auto_start,
481
+ invalid_sample_skip_percentage=invalid_sample_skip_percentage,
482
+ job_type=job_type,
483
+ hyperparameters=utils.get_pydantic_model(
484
+ hyperparameters, models.Hyperparameters
485
+ ),
454
486
  repositories=utils.get_pydantic_model(
455
- repositories, Optional[List[models.JobInRepositories]]
487
+ repositories, OptionalNullable[List[models.JobInRepositories]]
488
+ ),
489
+ classifier_targets=utils.get_pydantic_model(
490
+ classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]]
456
491
  ),
457
- auto_start=auto_start,
458
492
  )
459
493
 
460
494
  req = self._build_request_async(
@@ -530,7 +564,7 @@ class Jobs(BaseSDK):
530
564
  server_url: Optional[str] = None,
531
565
  timeout_ms: Optional[int] = None,
532
566
  http_headers: Optional[Mapping[str, str]] = None,
533
- ) -> models.DetailedJobOut:
567
+ ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse:
534
568
  r"""Get Fine Tuning Job
535
569
 
536
570
  Get a fine-tuned job details by its UUID.
@@ -594,7 +628,9 @@ class Jobs(BaseSDK):
594
628
  )
595
629
 
596
630
  if utils.match_response(http_res, "200", "application/json"):
597
- return utils.unmarshal_json(http_res.text, models.DetailedJobOut)
631
+ return utils.unmarshal_json(
632
+ http_res.text, models.JobsAPIRoutesFineTuningGetFineTuningJobResponse
633
+ )
598
634
  if utils.match_response(http_res, "4XX", "*"):
599
635
  http_res_text = utils.stream_to_text(http_res)
600
636
  raise models.SDKError(
@@ -623,7 +659,7 @@ class Jobs(BaseSDK):
623
659
  server_url: Optional[str] = None,
624
660
  timeout_ms: Optional[int] = None,
625
661
  http_headers: Optional[Mapping[str, str]] = None,
626
- ) -> models.DetailedJobOut:
662
+ ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse:
627
663
  r"""Get Fine Tuning Job
628
664
 
629
665
  Get a fine-tuned job details by its UUID.
@@ -687,7 +723,9 @@ class Jobs(BaseSDK):
687
723
  )
688
724
 
689
725
  if utils.match_response(http_res, "200", "application/json"):
690
- return utils.unmarshal_json(http_res.text, models.DetailedJobOut)
726
+ return utils.unmarshal_json(
727
+ http_res.text, models.JobsAPIRoutesFineTuningGetFineTuningJobResponse
728
+ )
691
729
  if utils.match_response(http_res, "4XX", "*"):
692
730
  http_res_text = await utils.stream_to_text_async(http_res)
693
731
  raise models.SDKError(
@@ -716,7 +754,7 @@ class Jobs(BaseSDK):
716
754
  server_url: Optional[str] = None,
717
755
  timeout_ms: Optional[int] = None,
718
756
  http_headers: Optional[Mapping[str, str]] = None,
719
- ) -> models.DetailedJobOut:
757
+ ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse:
720
758
  r"""Cancel Fine Tuning Job
721
759
 
722
760
  Request the cancellation of a fine tuning job.
@@ -780,7 +818,9 @@ class Jobs(BaseSDK):
780
818
  )
781
819
 
782
820
  if utils.match_response(http_res, "200", "application/json"):
783
- return utils.unmarshal_json(http_res.text, models.DetailedJobOut)
821
+ return utils.unmarshal_json(
822
+ http_res.text, models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse
823
+ )
784
824
  if utils.match_response(http_res, "4XX", "*"):
785
825
  http_res_text = utils.stream_to_text(http_res)
786
826
  raise models.SDKError(
@@ -809,7 +849,7 @@ class Jobs(BaseSDK):
809
849
  server_url: Optional[str] = None,
810
850
  timeout_ms: Optional[int] = None,
811
851
  http_headers: Optional[Mapping[str, str]] = None,
812
- ) -> models.DetailedJobOut:
852
+ ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse:
813
853
  r"""Cancel Fine Tuning Job
814
854
 
815
855
  Request the cancellation of a fine tuning job.
@@ -873,7 +913,9 @@ class Jobs(BaseSDK):
873
913
  )
874
914
 
875
915
  if utils.match_response(http_res, "200", "application/json"):
876
- return utils.unmarshal_json(http_res.text, models.DetailedJobOut)
916
+ return utils.unmarshal_json(
917
+ http_res.text, models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse
918
+ )
877
919
  if utils.match_response(http_res, "4XX", "*"):
878
920
  http_res_text = await utils.stream_to_text_async(http_res)
879
921
  raise models.SDKError(
@@ -902,7 +944,7 @@ class Jobs(BaseSDK):
902
944
  server_url: Optional[str] = None,
903
945
  timeout_ms: Optional[int] = None,
904
946
  http_headers: Optional[Mapping[str, str]] = None,
905
- ) -> models.DetailedJobOut:
947
+ ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse:
906
948
  r"""Start Fine Tuning Job
907
949
 
908
950
  Request the start of a validated fine tuning job.
@@ -966,7 +1008,9 @@ class Jobs(BaseSDK):
966
1008
  )
967
1009
 
968
1010
  if utils.match_response(http_res, "200", "application/json"):
969
- return utils.unmarshal_json(http_res.text, models.DetailedJobOut)
1011
+ return utils.unmarshal_json(
1012
+ http_res.text, models.JobsAPIRoutesFineTuningStartFineTuningJobResponse
1013
+ )
970
1014
  if utils.match_response(http_res, "4XX", "*"):
971
1015
  http_res_text = utils.stream_to_text(http_res)
972
1016
  raise models.SDKError(
@@ -995,7 +1039,7 @@ class Jobs(BaseSDK):
995
1039
  server_url: Optional[str] = None,
996
1040
  timeout_ms: Optional[int] = None,
997
1041
  http_headers: Optional[Mapping[str, str]] = None,
998
- ) -> models.DetailedJobOut:
1042
+ ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse:
999
1043
  r"""Start Fine Tuning Job
1000
1044
 
1001
1045
  Request the start of a validated fine tuning job.
@@ -1059,7 +1103,9 @@ class Jobs(BaseSDK):
1059
1103
  )
1060
1104
 
1061
1105
  if utils.match_response(http_res, "200", "application/json"):
1062
- return utils.unmarshal_json(http_res.text, models.DetailedJobOut)
1106
+ return utils.unmarshal_json(
1107
+ http_res.text, models.JobsAPIRoutesFineTuningStartFineTuningJobResponse
1108
+ )
1063
1109
  if utils.match_response(http_res, "4XX", "*"):
1064
1110
  http_res_text = await utils.stream_to_text_async(http_res)
1065
1111
  raise models.SDKError(
mistralai/mistral_jobs.py CHANGED
@@ -19,7 +19,7 @@ class MistralJobs(BaseSDK):
19
19
  metadata: OptionalNullable[Dict[str, Any]] = UNSET,
20
20
  created_after: OptionalNullable[datetime] = UNSET,
21
21
  created_by_me: Optional[bool] = False,
22
- status: OptionalNullable[models.BatchJobStatus] = UNSET,
22
+ status: OptionalNullable[List[models.BatchJobStatus]] = UNSET,
23
23
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
24
24
  server_url: Optional[str] = None,
25
25
  timeout_ms: Optional[int] = None,
@@ -130,7 +130,7 @@ class MistralJobs(BaseSDK):
130
130
  metadata: OptionalNullable[Dict[str, Any]] = UNSET,
131
131
  created_after: OptionalNullable[datetime] = UNSET,
132
132
  created_by_me: Optional[bool] = False,
133
- status: OptionalNullable[models.BatchJobStatus] = UNSET,
133
+ status: OptionalNullable[List[models.BatchJobStatus]] = UNSET,
134
134
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
135
135
  server_url: Optional[str] = None,
136
136
  timeout_ms: Optional[int] = None,