mistralai 1.10.1__py3-none-any.whl → 1.11.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_version.py +3 -3
- mistralai/accesses.py +22 -12
- mistralai/agents.py +88 -44
- mistralai/basesdk.py +6 -0
- mistralai/chat.py +96 -40
- mistralai/classifiers.py +35 -22
- mistralai/conversations.py +186 -64
- mistralai/documents.py +72 -26
- mistralai/embeddings.py +17 -8
- mistralai/files.py +58 -24
- mistralai/fim.py +20 -12
- mistralai/httpclient.py +0 -1
- mistralai/jobs.py +65 -26
- mistralai/libraries.py +20 -10
- mistralai/mistral_agents.py +438 -30
- mistralai/mistral_jobs.py +33 -14
- mistralai/models/__init__.py +16 -0
- mistralai/models/agent.py +1 -1
- mistralai/models/agentconversation.py +1 -1
- mistralai/models/agenthandoffdoneevent.py +1 -1
- mistralai/models/agenthandoffentry.py +3 -2
- mistralai/models/agenthandoffstartedevent.py +1 -1
- mistralai/models/agents_api_v1_agents_get_versionop.py +21 -0
- mistralai/models/agents_api_v1_agents_list_versionsop.py +33 -0
- mistralai/models/agents_api_v1_agents_listop.py +4 -0
- mistralai/models/agentscompletionrequest.py +2 -5
- mistralai/models/agentscompletionstreamrequest.py +2 -5
- mistralai/models/archiveftmodelout.py +1 -1
- mistralai/models/assistantmessage.py +1 -1
- mistralai/models/audiochunk.py +1 -1
- mistralai/models/audioencoding.py +6 -1
- mistralai/models/audioformat.py +2 -4
- mistralai/models/basemodelcard.py +1 -1
- mistralai/models/batchjobin.py +2 -4
- mistralai/models/batchjobout.py +1 -1
- mistralai/models/batchjobsout.py +1 -1
- mistralai/models/chatcompletionchoice.py +10 -5
- mistralai/models/chatcompletionrequest.py +2 -5
- mistralai/models/chatcompletionstreamrequest.py +2 -5
- mistralai/models/classifierdetailedjobout.py +4 -2
- mistralai/models/classifierftmodelout.py +3 -2
- mistralai/models/classifierjobout.py +4 -2
- mistralai/models/codeinterpretertool.py +1 -1
- mistralai/models/completiondetailedjobout.py +5 -2
- mistralai/models/completionftmodelout.py +3 -2
- mistralai/models/completionjobout.py +5 -2
- mistralai/models/completionresponsestreamchoice.py +9 -8
- mistralai/models/conversationappendrequest.py +4 -1
- mistralai/models/conversationappendstreamrequest.py +4 -1
- mistralai/models/conversationhistory.py +2 -1
- mistralai/models/conversationmessages.py +1 -1
- mistralai/models/conversationrequest.py +5 -1
- mistralai/models/conversationresponse.py +2 -1
- mistralai/models/conversationrestartrequest.py +4 -1
- mistralai/models/conversationrestartstreamrequest.py +4 -1
- mistralai/models/conversationstreamrequest.py +5 -1
- mistralai/models/documentlibrarytool.py +1 -1
- mistralai/models/documenturlchunk.py +1 -1
- mistralai/models/embeddingdtype.py +7 -1
- mistralai/models/encodingformat.py +4 -1
- mistralai/models/entitytype.py +8 -1
- mistralai/models/filepurpose.py +8 -1
- mistralai/models/files_api_routes_list_filesop.py +4 -11
- mistralai/models/files_api_routes_upload_fileop.py +2 -6
- mistralai/models/fileschema.py +3 -5
- mistralai/models/finetuneablemodeltype.py +4 -1
- mistralai/models/ftclassifierlossfunction.py +4 -1
- mistralai/models/ftmodelcard.py +1 -1
- mistralai/models/functioncallentry.py +3 -2
- mistralai/models/functioncallevent.py +1 -1
- mistralai/models/functionresultentry.py +3 -2
- mistralai/models/functiontool.py +1 -1
- mistralai/models/githubrepositoryin.py +1 -1
- mistralai/models/githubrepositoryout.py +1 -1
- mistralai/models/httpvalidationerror.py +4 -2
- mistralai/models/imagegenerationtool.py +1 -1
- mistralai/models/imageurlchunk.py +1 -1
- mistralai/models/jobsout.py +1 -1
- mistralai/models/legacyjobmetadataout.py +1 -1
- mistralai/models/messageinputentry.py +9 -3
- mistralai/models/messageoutputentry.py +6 -3
- mistralai/models/messageoutputevent.py +4 -2
- mistralai/models/mistralerror.py +11 -7
- mistralai/models/mistralpromptmode.py +1 -1
- mistralai/models/modelconversation.py +1 -1
- mistralai/models/no_response_error.py +5 -1
- mistralai/models/ocrrequest.py +11 -1
- mistralai/models/ocrtableobject.py +4 -1
- mistralai/models/referencechunk.py +1 -1
- mistralai/models/requestsource.py +5 -1
- mistralai/models/responsedoneevent.py +1 -1
- mistralai/models/responseerrorevent.py +1 -1
- mistralai/models/responseformats.py +5 -1
- mistralai/models/responsestartedevent.py +1 -1
- mistralai/models/responsevalidationerror.py +2 -0
- mistralai/models/retrievefileout.py +3 -5
- mistralai/models/sampletype.py +7 -1
- mistralai/models/sdkerror.py +2 -0
- mistralai/models/shareenum.py +7 -1
- mistralai/models/sharingdelete.py +2 -4
- mistralai/models/sharingin.py +3 -5
- mistralai/models/source.py +8 -1
- mistralai/models/systemmessage.py +1 -1
- mistralai/models/textchunk.py +1 -1
- mistralai/models/thinkchunk.py +1 -1
- mistralai/models/timestampgranularity.py +1 -1
- mistralai/models/tool.py +2 -6
- mistralai/models/toolcall.py +2 -6
- mistralai/models/toolchoice.py +2 -6
- mistralai/models/toolchoiceenum.py +6 -1
- mistralai/models/toolexecutiondeltaevent.py +2 -1
- mistralai/models/toolexecutiondoneevent.py +2 -1
- mistralai/models/toolexecutionentry.py +4 -2
- mistralai/models/toolexecutionstartedevent.py +2 -1
- mistralai/models/toolfilechunk.py +2 -1
- mistralai/models/toolmessage.py +1 -1
- mistralai/models/toolreferencechunk.py +2 -1
- mistralai/models/tooltypes.py +1 -1
- mistralai/models/transcriptionsegmentchunk.py +1 -1
- mistralai/models/transcriptionstreamdone.py +1 -1
- mistralai/models/transcriptionstreamlanguage.py +1 -1
- mistralai/models/transcriptionstreamsegmentdelta.py +1 -1
- mistralai/models/transcriptionstreamtextdelta.py +1 -1
- mistralai/models/unarchiveftmodelout.py +1 -1
- mistralai/models/uploadfileout.py +3 -5
- mistralai/models/usermessage.py +1 -1
- mistralai/models/wandbintegration.py +1 -1
- mistralai/models/wandbintegrationout.py +1 -1
- mistralai/models/websearchpremiumtool.py +1 -1
- mistralai/models/websearchtool.py +1 -1
- mistralai/models_.py +24 -12
- mistralai/ocr.py +38 -10
- mistralai/sdk.py +2 -2
- mistralai/transcriptions.py +28 -12
- mistralai/types/basemodel.py +41 -3
- mistralai/utils/__init__.py +0 -3
- mistralai/utils/annotations.py +32 -8
- mistralai/utils/enums.py +60 -0
- mistralai/utils/forms.py +21 -10
- mistralai/utils/queryparams.py +14 -2
- mistralai/utils/requestbodies.py +3 -3
- mistralai/utils/retries.py +69 -5
- mistralai/utils/serializers.py +0 -20
- mistralai/utils/unmarshal_json_response.py +15 -1
- {mistralai-1.10.1.dist-info → mistralai-1.11.1.dist-info}/METADATA +24 -31
- {mistralai-1.10.1.dist-info → mistralai-1.11.1.dist-info}/RECORD +233 -230
- mistralai_azure/_version.py +3 -3
- mistralai_azure/basesdk.py +6 -0
- mistralai_azure/chat.py +27 -15
- mistralai_azure/httpclient.py +0 -1
- mistralai_azure/models/__init__.py +16 -1
- mistralai_azure/models/assistantmessage.py +1 -1
- mistralai_azure/models/chatcompletionchoice.py +10 -7
- mistralai_azure/models/chatcompletionrequest.py +8 -6
- mistralai_azure/models/chatcompletionstreamrequest.py +8 -6
- mistralai_azure/models/completionresponsestreamchoice.py +11 -7
- mistralai_azure/models/documenturlchunk.py +1 -1
- mistralai_azure/models/httpvalidationerror.py +4 -2
- mistralai_azure/models/imageurlchunk.py +1 -1
- mistralai_azure/models/mistralazureerror.py +11 -7
- mistralai_azure/models/mistralpromptmode.py +1 -1
- mistralai_azure/models/no_response_error.py +5 -1
- mistralai_azure/models/ocrpageobject.py +32 -5
- mistralai_azure/models/ocrrequest.py +20 -1
- mistralai_azure/models/ocrtableobject.py +34 -0
- mistralai_azure/models/referencechunk.py +1 -1
- mistralai_azure/models/responseformats.py +5 -1
- mistralai_azure/models/responsevalidationerror.py +2 -0
- mistralai_azure/models/sdkerror.py +2 -0
- mistralai_azure/models/systemmessage.py +1 -1
- mistralai_azure/models/textchunk.py +1 -1
- mistralai_azure/models/thinkchunk.py +1 -1
- mistralai_azure/models/tool.py +2 -6
- mistralai_azure/models/toolcall.py +2 -6
- mistralai_azure/models/toolchoice.py +2 -6
- mistralai_azure/models/toolchoiceenum.py +6 -1
- mistralai_azure/models/toolmessage.py +1 -1
- mistralai_azure/models/tooltypes.py +1 -1
- mistralai_azure/models/usermessage.py +1 -1
- mistralai_azure/ocr.py +26 -6
- mistralai_azure/types/basemodel.py +41 -3
- mistralai_azure/utils/__init__.py +0 -3
- mistralai_azure/utils/annotations.py +32 -8
- mistralai_azure/utils/enums.py +60 -0
- mistralai_azure/utils/forms.py +21 -10
- mistralai_azure/utils/queryparams.py +14 -2
- mistralai_azure/utils/requestbodies.py +3 -3
- mistralai_azure/utils/retries.py +69 -5
- mistralai_azure/utils/serializers.py +0 -20
- mistralai_azure/utils/unmarshal_json_response.py +15 -1
- mistralai_gcp/_version.py +3 -3
- mistralai_gcp/basesdk.py +6 -0
- mistralai_gcp/chat.py +27 -15
- mistralai_gcp/fim.py +27 -15
- mistralai_gcp/httpclient.py +0 -1
- mistralai_gcp/models/assistantmessage.py +1 -1
- mistralai_gcp/models/chatcompletionchoice.py +10 -7
- mistralai_gcp/models/chatcompletionrequest.py +8 -6
- mistralai_gcp/models/chatcompletionstreamrequest.py +8 -6
- mistralai_gcp/models/completionresponsestreamchoice.py +11 -7
- mistralai_gcp/models/fimcompletionrequest.py +6 -1
- mistralai_gcp/models/fimcompletionstreamrequest.py +6 -1
- mistralai_gcp/models/httpvalidationerror.py +4 -2
- mistralai_gcp/models/imageurlchunk.py +1 -1
- mistralai_gcp/models/mistralgcperror.py +11 -7
- mistralai_gcp/models/mistralpromptmode.py +1 -1
- mistralai_gcp/models/no_response_error.py +5 -1
- mistralai_gcp/models/referencechunk.py +1 -1
- mistralai_gcp/models/responseformats.py +5 -1
- mistralai_gcp/models/responsevalidationerror.py +2 -0
- mistralai_gcp/models/sdkerror.py +2 -0
- mistralai_gcp/models/systemmessage.py +1 -1
- mistralai_gcp/models/textchunk.py +1 -1
- mistralai_gcp/models/thinkchunk.py +1 -1
- mistralai_gcp/models/tool.py +2 -6
- mistralai_gcp/models/toolcall.py +2 -6
- mistralai_gcp/models/toolchoice.py +2 -6
- mistralai_gcp/models/toolchoiceenum.py +6 -1
- mistralai_gcp/models/toolmessage.py +1 -1
- mistralai_gcp/models/tooltypes.py +1 -1
- mistralai_gcp/models/usermessage.py +1 -1
- mistralai_gcp/types/basemodel.py +41 -3
- mistralai_gcp/utils/__init__.py +0 -3
- mistralai_gcp/utils/annotations.py +32 -8
- mistralai_gcp/utils/enums.py +60 -0
- mistralai_gcp/utils/forms.py +21 -10
- mistralai_gcp/utils/queryparams.py +14 -2
- mistralai_gcp/utils/requestbodies.py +3 -3
- mistralai_gcp/utils/retries.py +69 -5
- mistralai_gcp/utils/serializers.py +0 -20
- mistralai_gcp/utils/unmarshal_json_response.py +15 -1
- {mistralai-1.10.1.dist-info → mistralai-1.11.1.dist-info}/WHEEL +0 -0
- {mistralai-1.10.1.dist-info → mistralai-1.11.1.dist-info}/licenses/LICENSE +0 -0
mistralai/mistral_jobs.py
CHANGED
|
@@ -4,6 +4,11 @@ from .basesdk import BaseSDK
|
|
|
4
4
|
from datetime import datetime
|
|
5
5
|
from mistralai import models, utils
|
|
6
6
|
from mistralai._hooks import HookContext
|
|
7
|
+
from mistralai.models import (
|
|
8
|
+
apiendpoint as models_apiendpoint,
|
|
9
|
+
batchjobstatus as models_batchjobstatus,
|
|
10
|
+
batchrequest as models_batchrequest,
|
|
11
|
+
)
|
|
7
12
|
from mistralai.types import OptionalNullable, UNSET
|
|
8
13
|
from mistralai.utils import get_security_from_env
|
|
9
14
|
from mistralai.utils.unmarshal_json_response import unmarshal_json_response
|
|
@@ -21,7 +26,7 @@ class MistralJobs(BaseSDK):
|
|
|
21
26
|
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
22
27
|
created_after: OptionalNullable[datetime] = UNSET,
|
|
23
28
|
created_by_me: Optional[bool] = False,
|
|
24
|
-
status: OptionalNullable[List[
|
|
29
|
+
status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET,
|
|
25
30
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
26
31
|
server_url: Optional[str] = None,
|
|
27
32
|
timeout_ms: Optional[int] = None,
|
|
@@ -78,6 +83,7 @@ class MistralJobs(BaseSDK):
|
|
|
78
83
|
accept_header_value="application/json",
|
|
79
84
|
http_headers=http_headers,
|
|
80
85
|
security=self.sdk_configuration.security,
|
|
86
|
+
allow_empty_value=None,
|
|
81
87
|
timeout_ms=timeout_ms,
|
|
82
88
|
)
|
|
83
89
|
|
|
@@ -94,7 +100,7 @@ class MistralJobs(BaseSDK):
|
|
|
94
100
|
config=self.sdk_configuration,
|
|
95
101
|
base_url=base_url or "",
|
|
96
102
|
operation_id="jobs_api_routes_batch_get_batch_jobs",
|
|
97
|
-
oauth2_scopes=
|
|
103
|
+
oauth2_scopes=None,
|
|
98
104
|
security_source=get_security_from_env(
|
|
99
105
|
self.sdk_configuration.security, models.Security
|
|
100
106
|
),
|
|
@@ -125,7 +131,7 @@ class MistralJobs(BaseSDK):
|
|
|
125
131
|
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
126
132
|
created_after: OptionalNullable[datetime] = UNSET,
|
|
127
133
|
created_by_me: Optional[bool] = False,
|
|
128
|
-
status: OptionalNullable[List[
|
|
134
|
+
status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET,
|
|
129
135
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
130
136
|
server_url: Optional[str] = None,
|
|
131
137
|
timeout_ms: Optional[int] = None,
|
|
@@ -182,6 +188,7 @@ class MistralJobs(BaseSDK):
|
|
|
182
188
|
accept_header_value="application/json",
|
|
183
189
|
http_headers=http_headers,
|
|
184
190
|
security=self.sdk_configuration.security,
|
|
191
|
+
allow_empty_value=None,
|
|
185
192
|
timeout_ms=timeout_ms,
|
|
186
193
|
)
|
|
187
194
|
|
|
@@ -198,7 +205,7 @@ class MistralJobs(BaseSDK):
|
|
|
198
205
|
config=self.sdk_configuration,
|
|
199
206
|
base_url=base_url or "",
|
|
200
207
|
operation_id="jobs_api_routes_batch_get_batch_jobs",
|
|
201
|
-
oauth2_scopes=
|
|
208
|
+
oauth2_scopes=None,
|
|
202
209
|
security_source=get_security_from_env(
|
|
203
210
|
self.sdk_configuration.security, models.Security
|
|
204
211
|
),
|
|
@@ -222,10 +229,13 @@ class MistralJobs(BaseSDK):
|
|
|
222
229
|
def create(
|
|
223
230
|
self,
|
|
224
231
|
*,
|
|
225
|
-
endpoint:
|
|
232
|
+
endpoint: models_apiendpoint.APIEndpoint,
|
|
226
233
|
input_files: OptionalNullable[List[str]] = UNSET,
|
|
227
234
|
requests: OptionalNullable[
|
|
228
|
-
Union[
|
|
235
|
+
Union[
|
|
236
|
+
List[models_batchrequest.BatchRequest],
|
|
237
|
+
List[models_batchrequest.BatchRequestTypedDict],
|
|
238
|
+
]
|
|
229
239
|
] = UNSET,
|
|
230
240
|
model: OptionalNullable[str] = UNSET,
|
|
231
241
|
agent_id: OptionalNullable[str] = UNSET,
|
|
@@ -290,6 +300,7 @@ class MistralJobs(BaseSDK):
|
|
|
290
300
|
get_serialized_body=lambda: utils.serialize_request_body(
|
|
291
301
|
request, False, False, "json", models.BatchJobIn
|
|
292
302
|
),
|
|
303
|
+
allow_empty_value=None,
|
|
293
304
|
timeout_ms=timeout_ms,
|
|
294
305
|
)
|
|
295
306
|
|
|
@@ -306,7 +317,7 @@ class MistralJobs(BaseSDK):
|
|
|
306
317
|
config=self.sdk_configuration,
|
|
307
318
|
base_url=base_url or "",
|
|
308
319
|
operation_id="jobs_api_routes_batch_create_batch_job",
|
|
309
|
-
oauth2_scopes=
|
|
320
|
+
oauth2_scopes=None,
|
|
310
321
|
security_source=get_security_from_env(
|
|
311
322
|
self.sdk_configuration.security, models.Security
|
|
312
323
|
),
|
|
@@ -330,10 +341,13 @@ class MistralJobs(BaseSDK):
|
|
|
330
341
|
async def create_async(
|
|
331
342
|
self,
|
|
332
343
|
*,
|
|
333
|
-
endpoint:
|
|
344
|
+
endpoint: models_apiendpoint.APIEndpoint,
|
|
334
345
|
input_files: OptionalNullable[List[str]] = UNSET,
|
|
335
346
|
requests: OptionalNullable[
|
|
336
|
-
Union[
|
|
347
|
+
Union[
|
|
348
|
+
List[models_batchrequest.BatchRequest],
|
|
349
|
+
List[models_batchrequest.BatchRequestTypedDict],
|
|
350
|
+
]
|
|
337
351
|
] = UNSET,
|
|
338
352
|
model: OptionalNullable[str] = UNSET,
|
|
339
353
|
agent_id: OptionalNullable[str] = UNSET,
|
|
@@ -398,6 +412,7 @@ class MistralJobs(BaseSDK):
|
|
|
398
412
|
get_serialized_body=lambda: utils.serialize_request_body(
|
|
399
413
|
request, False, False, "json", models.BatchJobIn
|
|
400
414
|
),
|
|
415
|
+
allow_empty_value=None,
|
|
401
416
|
timeout_ms=timeout_ms,
|
|
402
417
|
)
|
|
403
418
|
|
|
@@ -414,7 +429,7 @@ class MistralJobs(BaseSDK):
|
|
|
414
429
|
config=self.sdk_configuration,
|
|
415
430
|
base_url=base_url or "",
|
|
416
431
|
operation_id="jobs_api_routes_batch_create_batch_job",
|
|
417
|
-
oauth2_scopes=
|
|
432
|
+
oauth2_scopes=None,
|
|
418
433
|
security_source=get_security_from_env(
|
|
419
434
|
self.sdk_configuration.security, models.Security
|
|
420
435
|
),
|
|
@@ -487,6 +502,7 @@ class MistralJobs(BaseSDK):
|
|
|
487
502
|
accept_header_value="application/json",
|
|
488
503
|
http_headers=http_headers,
|
|
489
504
|
security=self.sdk_configuration.security,
|
|
505
|
+
allow_empty_value=None,
|
|
490
506
|
timeout_ms=timeout_ms,
|
|
491
507
|
)
|
|
492
508
|
|
|
@@ -503,7 +519,7 @@ class MistralJobs(BaseSDK):
|
|
|
503
519
|
config=self.sdk_configuration,
|
|
504
520
|
base_url=base_url or "",
|
|
505
521
|
operation_id="jobs_api_routes_batch_get_batch_job",
|
|
506
|
-
oauth2_scopes=
|
|
522
|
+
oauth2_scopes=None,
|
|
507
523
|
security_source=get_security_from_env(
|
|
508
524
|
self.sdk_configuration.security, models.Security
|
|
509
525
|
),
|
|
@@ -576,6 +592,7 @@ class MistralJobs(BaseSDK):
|
|
|
576
592
|
accept_header_value="application/json",
|
|
577
593
|
http_headers=http_headers,
|
|
578
594
|
security=self.sdk_configuration.security,
|
|
595
|
+
allow_empty_value=None,
|
|
579
596
|
timeout_ms=timeout_ms,
|
|
580
597
|
)
|
|
581
598
|
|
|
@@ -592,7 +609,7 @@ class MistralJobs(BaseSDK):
|
|
|
592
609
|
config=self.sdk_configuration,
|
|
593
610
|
base_url=base_url or "",
|
|
594
611
|
operation_id="jobs_api_routes_batch_get_batch_job",
|
|
595
|
-
oauth2_scopes=
|
|
612
|
+
oauth2_scopes=None,
|
|
596
613
|
security_source=get_security_from_env(
|
|
597
614
|
self.sdk_configuration.security, models.Security
|
|
598
615
|
),
|
|
@@ -659,6 +676,7 @@ class MistralJobs(BaseSDK):
|
|
|
659
676
|
accept_header_value="application/json",
|
|
660
677
|
http_headers=http_headers,
|
|
661
678
|
security=self.sdk_configuration.security,
|
|
679
|
+
allow_empty_value=None,
|
|
662
680
|
timeout_ms=timeout_ms,
|
|
663
681
|
)
|
|
664
682
|
|
|
@@ -675,7 +693,7 @@ class MistralJobs(BaseSDK):
|
|
|
675
693
|
config=self.sdk_configuration,
|
|
676
694
|
base_url=base_url or "",
|
|
677
695
|
operation_id="jobs_api_routes_batch_cancel_batch_job",
|
|
678
|
-
oauth2_scopes=
|
|
696
|
+
oauth2_scopes=None,
|
|
679
697
|
security_source=get_security_from_env(
|
|
680
698
|
self.sdk_configuration.security, models.Security
|
|
681
699
|
),
|
|
@@ -742,6 +760,7 @@ class MistralJobs(BaseSDK):
|
|
|
742
760
|
accept_header_value="application/json",
|
|
743
761
|
http_headers=http_headers,
|
|
744
762
|
security=self.sdk_configuration.security,
|
|
763
|
+
allow_empty_value=None,
|
|
745
764
|
timeout_ms=timeout_ms,
|
|
746
765
|
)
|
|
747
766
|
|
|
@@ -758,7 +777,7 @@ class MistralJobs(BaseSDK):
|
|
|
758
777
|
config=self.sdk_configuration,
|
|
759
778
|
base_url=base_url or "",
|
|
760
779
|
operation_id="jobs_api_routes_batch_cancel_batch_job",
|
|
761
|
-
oauth2_scopes=
|
|
780
|
+
oauth2_scopes=None,
|
|
762
781
|
security_source=get_security_from_env(
|
|
763
782
|
self.sdk_configuration.security, models.Security
|
|
764
783
|
),
|
mistralai/models/__init__.py
CHANGED
|
@@ -45,10 +45,18 @@ if TYPE_CHECKING:
|
|
|
45
45
|
AgentsAPIV1AgentsDeleteRequest,
|
|
46
46
|
AgentsAPIV1AgentsDeleteRequestTypedDict,
|
|
47
47
|
)
|
|
48
|
+
from .agents_api_v1_agents_get_versionop import (
|
|
49
|
+
AgentsAPIV1AgentsGetVersionRequest,
|
|
50
|
+
AgentsAPIV1AgentsGetVersionRequestTypedDict,
|
|
51
|
+
)
|
|
48
52
|
from .agents_api_v1_agents_getop import (
|
|
49
53
|
AgentsAPIV1AgentsGetRequest,
|
|
50
54
|
AgentsAPIV1AgentsGetRequestTypedDict,
|
|
51
55
|
)
|
|
56
|
+
from .agents_api_v1_agents_list_versionsop import (
|
|
57
|
+
AgentsAPIV1AgentsListVersionsRequest,
|
|
58
|
+
AgentsAPIV1AgentsListVersionsRequestTypedDict,
|
|
59
|
+
)
|
|
52
60
|
from .agents_api_v1_agents_listop import (
|
|
53
61
|
AgentsAPIV1AgentsListRequest,
|
|
54
62
|
AgentsAPIV1AgentsListRequestTypedDict,
|
|
@@ -972,8 +980,12 @@ __all__ = [
|
|
|
972
980
|
"AgentsAPIV1AgentsDeleteRequestTypedDict",
|
|
973
981
|
"AgentsAPIV1AgentsGetRequest",
|
|
974
982
|
"AgentsAPIV1AgentsGetRequestTypedDict",
|
|
983
|
+
"AgentsAPIV1AgentsGetVersionRequest",
|
|
984
|
+
"AgentsAPIV1AgentsGetVersionRequestTypedDict",
|
|
975
985
|
"AgentsAPIV1AgentsListRequest",
|
|
976
986
|
"AgentsAPIV1AgentsListRequestTypedDict",
|
|
987
|
+
"AgentsAPIV1AgentsListVersionsRequest",
|
|
988
|
+
"AgentsAPIV1AgentsListVersionsRequestTypedDict",
|
|
977
989
|
"AgentsAPIV1AgentsUpdateRequest",
|
|
978
990
|
"AgentsAPIV1AgentsUpdateRequestTypedDict",
|
|
979
991
|
"AgentsAPIV1AgentsUpdateVersionRequest",
|
|
@@ -1682,8 +1694,12 @@ _dynamic_imports: dict[str, str] = {
|
|
|
1682
1694
|
"AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent",
|
|
1683
1695
|
"AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop",
|
|
1684
1696
|
"AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop",
|
|
1697
|
+
"AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop",
|
|
1698
|
+
"AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop",
|
|
1685
1699
|
"AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop",
|
|
1686
1700
|
"AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop",
|
|
1701
|
+
"AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop",
|
|
1702
|
+
"AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop",
|
|
1687
1703
|
"AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop",
|
|
1688
1704
|
"AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop",
|
|
1689
1705
|
"AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop",
|
mistralai/models/agent.py
CHANGED
|
@@ -8,7 +8,7 @@ from typing import Any, Dict, Literal, Optional
|
|
|
8
8
|
from typing_extensions import NotRequired, TypedDict
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
AgentConversationObject = Literal["conversation"]
|
|
11
|
+
AgentConversationObject = Literal["conversation",]
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
class AgentConversationTypedDict(TypedDict):
|
|
@@ -7,7 +7,7 @@ from typing import Literal, Optional
|
|
|
7
7
|
from typing_extensions import NotRequired, TypedDict
|
|
8
8
|
|
|
9
9
|
|
|
10
|
-
AgentHandoffDoneEventType = Literal["agent.handoff.done"]
|
|
10
|
+
AgentHandoffDoneEventType = Literal["agent.handoff.done",]
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
class AgentHandoffDoneEventTypedDict(TypedDict):
|
|
@@ -8,9 +8,10 @@ from typing import Literal, Optional
|
|
|
8
8
|
from typing_extensions import NotRequired, TypedDict
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
AgentHandoffEntryObject = Literal["entry"]
|
|
11
|
+
AgentHandoffEntryObject = Literal["entry",]
|
|
12
12
|
|
|
13
|
-
|
|
13
|
+
|
|
14
|
+
AgentHandoffEntryType = Literal["agent.handoff",]
|
|
14
15
|
|
|
15
16
|
|
|
16
17
|
class AgentHandoffEntryTypedDict(TypedDict):
|
|
@@ -7,7 +7,7 @@ from typing import Literal, Optional
|
|
|
7
7
|
from typing_extensions import NotRequired, TypedDict
|
|
8
8
|
|
|
9
9
|
|
|
10
|
-
AgentHandoffStartedEventType = Literal["agent.handoff.started"]
|
|
10
|
+
AgentHandoffStartedEventType = Literal["agent.handoff.started",]
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
class AgentHandoffStartedEventTypedDict(TypedDict):
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from mistralai.types import BaseModel
|
|
5
|
+
from mistralai.utils import FieldMetadata, PathParamMetadata
|
|
6
|
+
from typing_extensions import Annotated, TypedDict
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict):
|
|
10
|
+
agent_id: str
|
|
11
|
+
version: int
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class AgentsAPIV1AgentsGetVersionRequest(BaseModel):
|
|
15
|
+
agent_id: Annotated[
|
|
16
|
+
str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
version: Annotated[
|
|
20
|
+
int, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
|
|
21
|
+
]
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from mistralai.types import BaseModel
|
|
5
|
+
from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata
|
|
6
|
+
from typing import Optional
|
|
7
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict):
|
|
11
|
+
agent_id: str
|
|
12
|
+
page: NotRequired[int]
|
|
13
|
+
r"""Page number (0-indexed)"""
|
|
14
|
+
page_size: NotRequired[int]
|
|
15
|
+
r"""Number of versions per page"""
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AgentsAPIV1AgentsListVersionsRequest(BaseModel):
|
|
19
|
+
agent_id: Annotated[
|
|
20
|
+
str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
page: Annotated[
|
|
24
|
+
Optional[int],
|
|
25
|
+
FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
|
|
26
|
+
] = 0
|
|
27
|
+
r"""Page number (0-indexed)"""
|
|
28
|
+
|
|
29
|
+
page_size: Annotated[
|
|
30
|
+
Optional[int],
|
|
31
|
+
FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
|
|
32
|
+
] = 20
|
|
33
|
+
r"""Number of versions per page"""
|
|
@@ -11,7 +11,9 @@ from typing_extensions import Annotated, NotRequired, TypedDict
|
|
|
11
11
|
|
|
12
12
|
class AgentsAPIV1AgentsListRequestTypedDict(TypedDict):
|
|
13
13
|
page: NotRequired[int]
|
|
14
|
+
r"""Page number (0-indexed)"""
|
|
14
15
|
page_size: NotRequired[int]
|
|
16
|
+
r"""Number of agents per page"""
|
|
15
17
|
deployment_chat: NotRequired[Nullable[bool]]
|
|
16
18
|
sources: NotRequired[Nullable[List[RequestSource]]]
|
|
17
19
|
name: NotRequired[Nullable[str]]
|
|
@@ -24,11 +26,13 @@ class AgentsAPIV1AgentsListRequest(BaseModel):
|
|
|
24
26
|
Optional[int],
|
|
25
27
|
FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
|
|
26
28
|
] = 0
|
|
29
|
+
r"""Page number (0-indexed)"""
|
|
27
30
|
|
|
28
31
|
page_size: Annotated[
|
|
29
32
|
Optional[int],
|
|
30
33
|
FieldMetadata(query=QueryParamMetadata(style="form", explode=True)),
|
|
31
34
|
] = 20
|
|
35
|
+
r"""Number of agents per page"""
|
|
32
36
|
|
|
33
37
|
deployment_chat: Annotated[
|
|
34
38
|
OptionalNullable[bool],
|
|
@@ -12,9 +12,8 @@ from .toolchoiceenum import ToolChoiceEnum
|
|
|
12
12
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
13
13
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
14
14
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
15
|
-
from mistralai.utils import get_discriminator
|
|
15
|
+
from mistralai.utils import get_discriminator
|
|
16
16
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
|
-
from pydantic.functional_validators import PlainValidator
|
|
18
17
|
from typing import Any, Dict, List, Optional, Union
|
|
19
18
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
20
19
|
|
|
@@ -137,9 +136,7 @@ class AgentsCompletionRequest(BaseModel):
|
|
|
137
136
|
|
|
138
137
|
parallel_tool_calls: Optional[bool] = None
|
|
139
138
|
|
|
140
|
-
prompt_mode:
|
|
141
|
-
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
142
|
-
] = UNSET
|
|
139
|
+
prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
|
|
143
140
|
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
144
141
|
|
|
145
142
|
@model_serializer(mode="wrap")
|
|
@@ -12,9 +12,8 @@ from .toolchoiceenum import ToolChoiceEnum
|
|
|
12
12
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
13
13
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
14
14
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
15
|
-
from mistralai.utils import get_discriminator
|
|
15
|
+
from mistralai.utils import get_discriminator
|
|
16
16
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
|
-
from pydantic.functional_validators import PlainValidator
|
|
18
17
|
from typing import Any, Dict, List, Optional, Union
|
|
19
18
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
20
19
|
|
|
@@ -135,9 +134,7 @@ class AgentsCompletionStreamRequest(BaseModel):
|
|
|
135
134
|
|
|
136
135
|
parallel_tool_calls: Optional[bool] = None
|
|
137
136
|
|
|
138
|
-
prompt_mode:
|
|
139
|
-
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
140
|
-
] = UNSET
|
|
137
|
+
prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
|
|
141
138
|
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
142
139
|
|
|
143
140
|
@model_serializer(mode="wrap")
|
mistralai/models/audiochunk.py
CHANGED
|
@@ -7,7 +7,12 @@ from typing import Literal, Union
|
|
|
7
7
|
|
|
8
8
|
AudioEncoding = Union[
|
|
9
9
|
Literal[
|
|
10
|
-
"pcm_s16le",
|
|
10
|
+
"pcm_s16le",
|
|
11
|
+
"pcm_s32le",
|
|
12
|
+
"pcm_f16le",
|
|
13
|
+
"pcm_f32le",
|
|
14
|
+
"pcm_mulaw",
|
|
15
|
+
"pcm_alaw",
|
|
11
16
|
],
|
|
12
17
|
UnrecognizedStr,
|
|
13
18
|
]
|
mistralai/models/audioformat.py
CHANGED
|
@@ -3,9 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .audioencoding import AudioEncoding
|
|
5
5
|
from mistralai.types import BaseModel
|
|
6
|
-
from
|
|
7
|
-
from pydantic.functional_validators import PlainValidator
|
|
8
|
-
from typing_extensions import Annotated, TypedDict
|
|
6
|
+
from typing_extensions import TypedDict
|
|
9
7
|
|
|
10
8
|
|
|
11
9
|
class AudioFormatTypedDict(TypedDict):
|
|
@@ -14,6 +12,6 @@ class AudioFormatTypedDict(TypedDict):
|
|
|
14
12
|
|
|
15
13
|
|
|
16
14
|
class AudioFormat(BaseModel):
|
|
17
|
-
encoding:
|
|
15
|
+
encoding: AudioEncoding
|
|
18
16
|
|
|
19
17
|
sample_rate: int
|
mistralai/models/batchjobin.py
CHANGED
|
@@ -4,11 +4,9 @@ from __future__ import annotations
|
|
|
4
4
|
from .apiendpoint import APIEndpoint
|
|
5
5
|
from .batchrequest import BatchRequest, BatchRequestTypedDict
|
|
6
6
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
7
|
-
from mistralai.utils import validate_open_enum
|
|
8
7
|
from pydantic import model_serializer
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
8
|
from typing import Dict, List, Optional
|
|
11
|
-
from typing_extensions import
|
|
9
|
+
from typing_extensions import NotRequired, TypedDict
|
|
12
10
|
|
|
13
11
|
|
|
14
12
|
class BatchJobInTypedDict(TypedDict):
|
|
@@ -27,7 +25,7 @@ class BatchJobInTypedDict(TypedDict):
|
|
|
27
25
|
|
|
28
26
|
|
|
29
27
|
class BatchJobIn(BaseModel):
|
|
30
|
-
endpoint:
|
|
28
|
+
endpoint: APIEndpoint
|
|
31
29
|
|
|
32
30
|
input_files: OptionalNullable[List[str]] = UNSET
|
|
33
31
|
r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```"""
|
mistralai/models/batchjobout.py
CHANGED
mistralai/models/batchjobsout.py
CHANGED
|
@@ -3,14 +3,19 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
5
|
from mistralai.types import BaseModel, UnrecognizedStr
|
|
6
|
-
from mistralai.utils import validate_open_enum
|
|
7
|
-
from pydantic.functional_validators import PlainValidator
|
|
8
6
|
from typing import Literal, Union
|
|
9
|
-
from typing_extensions import
|
|
7
|
+
from typing_extensions import TypedDict
|
|
10
8
|
|
|
11
9
|
|
|
12
10
|
FinishReason = Union[
|
|
13
|
-
Literal[
|
|
11
|
+
Literal[
|
|
12
|
+
"stop",
|
|
13
|
+
"length",
|
|
14
|
+
"model_length",
|
|
15
|
+
"error",
|
|
16
|
+
"tool_calls",
|
|
17
|
+
],
|
|
18
|
+
UnrecognizedStr,
|
|
14
19
|
]
|
|
15
20
|
|
|
16
21
|
|
|
@@ -25,4 +30,4 @@ class ChatCompletionChoice(BaseModel):
|
|
|
25
30
|
|
|
26
31
|
message: AssistantMessage
|
|
27
32
|
|
|
28
|
-
finish_reason:
|
|
33
|
+
finish_reason: FinishReason
|
|
@@ -12,9 +12,8 @@ from .toolchoiceenum import ToolChoiceEnum
|
|
|
12
12
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
13
13
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
14
14
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
15
|
-
from mistralai.utils import get_discriminator
|
|
15
|
+
from mistralai.utils import get_discriminator
|
|
16
16
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
|
-
from pydantic.functional_validators import PlainValidator
|
|
18
17
|
from typing import Any, Dict, List, Optional, Union
|
|
19
18
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
20
19
|
|
|
@@ -153,9 +152,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
153
152
|
parallel_tool_calls: Optional[bool] = None
|
|
154
153
|
r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
|
|
155
154
|
|
|
156
|
-
prompt_mode:
|
|
157
|
-
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
158
|
-
] = UNSET
|
|
155
|
+
prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
|
|
159
156
|
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
160
157
|
|
|
161
158
|
safe_prompt: Optional[bool] = None
|
|
@@ -12,9 +12,8 @@ from .toolchoiceenum import ToolChoiceEnum
|
|
|
12
12
|
from .toolmessage import ToolMessage, ToolMessageTypedDict
|
|
13
13
|
from .usermessage import UserMessage, UserMessageTypedDict
|
|
14
14
|
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
15
|
-
from mistralai.utils import get_discriminator
|
|
15
|
+
from mistralai.utils import get_discriminator
|
|
16
16
|
from pydantic import Discriminator, Tag, model_serializer
|
|
17
|
-
from pydantic.functional_validators import PlainValidator
|
|
18
17
|
from typing import Any, Dict, List, Optional, Union
|
|
19
18
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
20
19
|
|
|
@@ -155,9 +154,7 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
155
154
|
parallel_tool_calls: Optional[bool] = None
|
|
156
155
|
r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
|
|
157
156
|
|
|
158
|
-
prompt_mode:
|
|
159
|
-
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
160
|
-
] = UNSET
|
|
157
|
+
prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
|
|
161
158
|
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
162
159
|
|
|
163
160
|
safe_prompt: Optional[bool] = None
|
|
@@ -29,7 +29,9 @@ ClassifierDetailedJobOutStatus = Literal[
|
|
|
29
29
|
"CANCELLATION_REQUESTED",
|
|
30
30
|
]
|
|
31
31
|
|
|
32
|
-
|
|
32
|
+
|
|
33
|
+
ClassifierDetailedJobOutObject = Literal["job",]
|
|
34
|
+
|
|
33
35
|
|
|
34
36
|
ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict
|
|
35
37
|
|
|
@@ -37,7 +39,7 @@ ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict
|
|
|
37
39
|
ClassifierDetailedJobOutIntegrations = WandbIntegrationOut
|
|
38
40
|
|
|
39
41
|
|
|
40
|
-
ClassifierDetailedJobOutJobType = Literal["classifier"]
|
|
42
|
+
ClassifierDetailedJobOutJobType = Literal["classifier",]
|
|
41
43
|
|
|
42
44
|
|
|
43
45
|
class ClassifierDetailedJobOutTypedDict(TypedDict):
|
|
@@ -12,9 +12,10 @@ from typing import List, Literal, Optional
|
|
|
12
12
|
from typing_extensions import NotRequired, TypedDict
|
|
13
13
|
|
|
14
14
|
|
|
15
|
-
ClassifierFTModelOutObject = Literal["model"]
|
|
15
|
+
ClassifierFTModelOutObject = Literal["model",]
|
|
16
16
|
|
|
17
|
-
|
|
17
|
+
|
|
18
|
+
ClassifierFTModelOutModelType = Literal["classifier",]
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
class ClassifierFTModelOutTypedDict(TypedDict):
|