mistralai 1.5.2rc1__py3-none-any.whl → 1.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_version.py +2 -2
- mistralai/agents.py +12 -0
- mistralai/chat.py +12 -0
- mistralai/classifiers.py +435 -23
- mistralai/embeddings.py +6 -2
- mistralai/jobs.py +84 -38
- mistralai/mistral_jobs.py +2 -2
- mistralai/models/__init__.py +197 -46
- mistralai/models/agentscompletionrequest.py +4 -0
- mistralai/models/agentscompletionstreamrequest.py +4 -0
- mistralai/models/archiveftmodelout.py +3 -11
- mistralai/models/batchjobout.py +3 -9
- mistralai/models/batchjobsout.py +3 -9
- mistralai/models/chatclassificationrequest.py +20 -0
- mistralai/models/chatcompletionrequest.py +4 -0
- mistralai/models/chatcompletionstreamrequest.py +4 -0
- mistralai/models/chatmoderationrequest.py +4 -7
- mistralai/models/classificationresponse.py +12 -9
- mistralai/models/classificationtargetresult.py +14 -0
- mistralai/models/classifierdetailedjobout.py +156 -0
- mistralai/models/classifierftmodelout.py +101 -0
- mistralai/models/classifierjobout.py +165 -0
- mistralai/models/classifiertargetin.py +55 -0
- mistralai/models/classifiertargetout.py +24 -0
- mistralai/models/classifiertrainingparameters.py +73 -0
- mistralai/models/classifiertrainingparametersin.py +85 -0
- mistralai/models/{detailedjobout.py → completiondetailedjobout.py} +34 -34
- mistralai/models/{ftmodelout.py → completionftmodelout.py} +12 -12
- mistralai/models/{jobout.py → completionjobout.py} +25 -24
- mistralai/models/{trainingparameters.py → completiontrainingparameters.py} +7 -7
- mistralai/models/{trainingparametersin.py → completiontrainingparametersin.py} +7 -7
- mistralai/models/embeddingrequest.py +6 -4
- mistralai/models/finetuneablemodeltype.py +7 -0
- mistralai/models/ftclassifierlossfunction.py +7 -0
- mistralai/models/ftmodelcapabilitiesout.py +3 -0
- mistralai/models/function.py +2 -2
- mistralai/models/githubrepositoryin.py +3 -11
- mistralai/models/githubrepositoryout.py +3 -11
- mistralai/models/inputs.py +54 -0
- mistralai/models/instructrequest.py +42 -0
- mistralai/models/jobin.py +52 -12
- mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +3 -3
- mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +29 -2
- mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +21 -4
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +29 -2
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +8 -0
- mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +29 -2
- mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +28 -2
- mistralai/models/jobsout.py +24 -13
- mistralai/models/jsonschema.py +1 -1
- mistralai/models/legacyjobmetadataout.py +3 -12
- mistralai/models/{classificationobject.py → moderationobject.py} +6 -6
- mistralai/models/moderationresponse.py +21 -0
- mistralai/models/unarchiveftmodelout.py +3 -11
- mistralai/models/wandbintegration.py +3 -11
- mistralai/models/wandbintegrationout.py +8 -13
- mistralai/models_.py +10 -4
- {mistralai-1.5.2rc1.dist-info → mistralai-1.7.0.dist-info}/METADATA +4 -2
- {mistralai-1.5.2rc1.dist-info → mistralai-1.7.0.dist-info}/RECORD +81 -63
- {mistralai-1.5.2rc1.dist-info → mistralai-1.7.0.dist-info}/WHEEL +1 -1
- mistralai_azure/_version.py +2 -2
- mistralai_azure/chat.py +12 -0
- mistralai_azure/models/__init__.py +15 -0
- mistralai_azure/models/chatcompletionrequest.py +4 -0
- mistralai_azure/models/chatcompletionstreamrequest.py +4 -0
- mistralai_azure/models/contentchunk.py +6 -2
- mistralai_azure/models/function.py +2 -2
- mistralai_azure/models/imageurl.py +53 -0
- mistralai_azure/models/imageurlchunk.py +33 -0
- mistralai_azure/models/jsonschema.py +1 -1
- mistralai_gcp/_version.py +2 -2
- mistralai_gcp/chat.py +12 -0
- mistralai_gcp/models/__init__.py +15 -0
- mistralai_gcp/models/chatcompletionrequest.py +4 -0
- mistralai_gcp/models/chatcompletionstreamrequest.py +4 -0
- mistralai_gcp/models/contentchunk.py +6 -2
- mistralai_gcp/models/function.py +2 -2
- mistralai_gcp/models/imageurl.py +53 -0
- mistralai_gcp/models/imageurlchunk.py +33 -0
- mistralai_gcp/models/jsonschema.py +1 -1
- {mistralai-1.5.2rc1.dist-info → mistralai-1.7.0.dist-info}/LICENSE +0 -0
|
@@ -95,6 +95,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
95
95
|
n: NotRequired[Nullable[int]]
|
|
96
96
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
97
97
|
prediction: NotRequired[PredictionTypedDict]
|
|
98
|
+
parallel_tool_calls: NotRequired[bool]
|
|
98
99
|
safe_prompt: NotRequired[bool]
|
|
99
100
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
100
101
|
|
|
@@ -141,6 +142,8 @@ class ChatCompletionRequest(BaseModel):
|
|
|
141
142
|
|
|
142
143
|
prediction: Optional[Prediction] = None
|
|
143
144
|
|
|
145
|
+
parallel_tool_calls: Optional[bool] = None
|
|
146
|
+
|
|
144
147
|
safe_prompt: Optional[bool] = None
|
|
145
148
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
146
149
|
|
|
@@ -161,6 +164,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
161
164
|
"frequency_penalty",
|
|
162
165
|
"n",
|
|
163
166
|
"prediction",
|
|
167
|
+
"parallel_tool_calls",
|
|
164
168
|
"safe_prompt",
|
|
165
169
|
]
|
|
166
170
|
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
|
@@ -90,6 +90,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
90
90
|
n: NotRequired[Nullable[int]]
|
|
91
91
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
92
92
|
prediction: NotRequired[PredictionTypedDict]
|
|
93
|
+
parallel_tool_calls: NotRequired[bool]
|
|
93
94
|
safe_prompt: NotRequired[bool]
|
|
94
95
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
95
96
|
|
|
@@ -135,6 +136,8 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
135
136
|
|
|
136
137
|
prediction: Optional[Prediction] = None
|
|
137
138
|
|
|
139
|
+
parallel_tool_calls: Optional[bool] = None
|
|
140
|
+
|
|
138
141
|
safe_prompt: Optional[bool] = None
|
|
139
142
|
r"""Whether to inject a safety prompt before all conversations."""
|
|
140
143
|
|
|
@@ -155,6 +158,7 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
155
158
|
"frequency_penalty",
|
|
156
159
|
"n",
|
|
157
160
|
"prediction",
|
|
161
|
+
"parallel_tool_calls",
|
|
158
162
|
"safe_prompt",
|
|
159
163
|
]
|
|
160
164
|
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict
|
|
4
5
|
from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict
|
|
5
6
|
from .textchunk import TextChunk, TextChunkTypedDict
|
|
6
7
|
from mistralai_azure.utils import get_discriminator
|
|
@@ -10,13 +11,16 @@ from typing_extensions import Annotated, TypeAliasType
|
|
|
10
11
|
|
|
11
12
|
|
|
12
13
|
ContentChunkTypedDict = TypeAliasType(
|
|
13
|
-
"ContentChunkTypedDict",
|
|
14
|
+
"ContentChunkTypedDict",
|
|
15
|
+
Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict],
|
|
14
16
|
)
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
ContentChunk = Annotated[
|
|
18
20
|
Union[
|
|
19
|
-
Annotated[
|
|
21
|
+
Annotated[ImageURLChunk, Tag("image_url")],
|
|
22
|
+
Annotated[TextChunk, Tag("text")],
|
|
23
|
+
Annotated[ReferenceChunk, Tag("reference")],
|
|
20
24
|
],
|
|
21
25
|
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
22
26
|
]
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from mistralai_azure.types import (
|
|
5
|
+
BaseModel,
|
|
6
|
+
Nullable,
|
|
7
|
+
OptionalNullable,
|
|
8
|
+
UNSET,
|
|
9
|
+
UNSET_SENTINEL,
|
|
10
|
+
)
|
|
11
|
+
from pydantic import model_serializer
|
|
12
|
+
from typing_extensions import NotRequired, TypedDict
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ImageURLTypedDict(TypedDict):
|
|
16
|
+
url: str
|
|
17
|
+
detail: NotRequired[Nullable[str]]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ImageURL(BaseModel):
|
|
21
|
+
url: str
|
|
22
|
+
|
|
23
|
+
detail: OptionalNullable[str] = UNSET
|
|
24
|
+
|
|
25
|
+
@model_serializer(mode="wrap")
|
|
26
|
+
def serialize_model(self, handler):
|
|
27
|
+
optional_fields = ["detail"]
|
|
28
|
+
nullable_fields = ["detail"]
|
|
29
|
+
null_default_fields = []
|
|
30
|
+
|
|
31
|
+
serialized = handler(self)
|
|
32
|
+
|
|
33
|
+
m = {}
|
|
34
|
+
|
|
35
|
+
for n, f in self.model_fields.items():
|
|
36
|
+
k = f.alias or n
|
|
37
|
+
val = serialized.get(k)
|
|
38
|
+
serialized.pop(k, None)
|
|
39
|
+
|
|
40
|
+
optional_nullable = k in optional_fields and k in nullable_fields
|
|
41
|
+
is_set = (
|
|
42
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
43
|
+
or k in null_default_fields
|
|
44
|
+
) # pylint: disable=no-member
|
|
45
|
+
|
|
46
|
+
if val is not None and val != UNSET_SENTINEL:
|
|
47
|
+
m[k] = val
|
|
48
|
+
elif val != UNSET_SENTINEL and (
|
|
49
|
+
not k in optional_fields or (optional_nullable and is_set)
|
|
50
|
+
):
|
|
51
|
+
m[k] = val
|
|
52
|
+
|
|
53
|
+
return m
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from .imageurl import ImageURL, ImageURLTypedDict
|
|
5
|
+
from mistralai_azure.types import BaseModel
|
|
6
|
+
from typing import Literal, Optional, Union
|
|
7
|
+
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
ImageURLChunkImageURLTypedDict = TypeAliasType(
|
|
11
|
+
"ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str]
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str])
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
ImageURLChunkType = Literal["image_url"]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ImageURLChunkTypedDict(TypedDict):
|
|
22
|
+
r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0"""
|
|
23
|
+
|
|
24
|
+
image_url: ImageURLChunkImageURLTypedDict
|
|
25
|
+
type: NotRequired[ImageURLChunkType]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ImageURLChunk(BaseModel):
|
|
29
|
+
r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0"""
|
|
30
|
+
|
|
31
|
+
image_url: ImageURLChunkImageURL
|
|
32
|
+
|
|
33
|
+
type: Optional[ImageURLChunkType] = "image_url"
|
mistralai_gcp/_version.py
CHANGED
|
@@ -3,10 +3,10 @@
|
|
|
3
3
|
import importlib.metadata
|
|
4
4
|
|
|
5
5
|
__title__: str = "mistralai-gcp"
|
|
6
|
-
__version__: str = "1.
|
|
6
|
+
__version__: str = "1.6.0"
|
|
7
7
|
__openapi_doc_version__: str = "0.0.2"
|
|
8
8
|
__gen_version__: str = "2.548.6"
|
|
9
|
-
__user_agent__: str = "speakeasy-sdk/python 1.
|
|
9
|
+
__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai-gcp"
|
|
10
10
|
|
|
11
11
|
try:
|
|
12
12
|
if __package__ is not None:
|
mistralai_gcp/chat.py
CHANGED
|
@@ -40,6 +40,7 @@ class Chat(BaseSDK):
|
|
|
40
40
|
prediction: Optional[
|
|
41
41
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
42
42
|
] = None,
|
|
43
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
43
44
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
44
45
|
server_url: Optional[str] = None,
|
|
45
46
|
timeout_ms: Optional[int] = None,
|
|
@@ -64,6 +65,7 @@ class Chat(BaseSDK):
|
|
|
64
65
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
65
66
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
66
67
|
:param prediction:
|
|
68
|
+
:param parallel_tool_calls:
|
|
67
69
|
:param retries: Override the default retry configuration for this method
|
|
68
70
|
:param server_url: Override the default server URL for this method
|
|
69
71
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -101,6 +103,7 @@ class Chat(BaseSDK):
|
|
|
101
103
|
prediction=utils.get_pydantic_model(
|
|
102
104
|
prediction, Optional[models.Prediction]
|
|
103
105
|
),
|
|
106
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
104
107
|
)
|
|
105
108
|
|
|
106
109
|
req = self._build_request(
|
|
@@ -205,6 +208,7 @@ class Chat(BaseSDK):
|
|
|
205
208
|
prediction: Optional[
|
|
206
209
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
207
210
|
] = None,
|
|
211
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
208
212
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
209
213
|
server_url: Optional[str] = None,
|
|
210
214
|
timeout_ms: Optional[int] = None,
|
|
@@ -229,6 +233,7 @@ class Chat(BaseSDK):
|
|
|
229
233
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
230
234
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
231
235
|
:param prediction:
|
|
236
|
+
:param parallel_tool_calls:
|
|
232
237
|
:param retries: Override the default retry configuration for this method
|
|
233
238
|
:param server_url: Override the default server URL for this method
|
|
234
239
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -266,6 +271,7 @@ class Chat(BaseSDK):
|
|
|
266
271
|
prediction=utils.get_pydantic_model(
|
|
267
272
|
prediction, Optional[models.Prediction]
|
|
268
273
|
),
|
|
274
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
269
275
|
)
|
|
270
276
|
|
|
271
277
|
req = self._build_request_async(
|
|
@@ -378,6 +384,7 @@ class Chat(BaseSDK):
|
|
|
378
384
|
prediction: Optional[
|
|
379
385
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
380
386
|
] = None,
|
|
387
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
381
388
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
382
389
|
server_url: Optional[str] = None,
|
|
383
390
|
timeout_ms: Optional[int] = None,
|
|
@@ -400,6 +407,7 @@ class Chat(BaseSDK):
|
|
|
400
407
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
401
408
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
402
409
|
:param prediction:
|
|
410
|
+
:param parallel_tool_calls:
|
|
403
411
|
:param retries: Override the default retry configuration for this method
|
|
404
412
|
:param server_url: Override the default server URL for this method
|
|
405
413
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -439,6 +447,7 @@ class Chat(BaseSDK):
|
|
|
439
447
|
prediction=utils.get_pydantic_model(
|
|
440
448
|
prediction, Optional[models.Prediction]
|
|
441
449
|
),
|
|
450
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
442
451
|
)
|
|
443
452
|
|
|
444
453
|
req = self._build_request(
|
|
@@ -547,6 +556,7 @@ class Chat(BaseSDK):
|
|
|
547
556
|
prediction: Optional[
|
|
548
557
|
Union[models.Prediction, models.PredictionTypedDict]
|
|
549
558
|
] = None,
|
|
559
|
+
parallel_tool_calls: Optional[bool] = None,
|
|
550
560
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
551
561
|
server_url: Optional[str] = None,
|
|
552
562
|
timeout_ms: Optional[int] = None,
|
|
@@ -569,6 +579,7 @@ class Chat(BaseSDK):
|
|
|
569
579
|
:param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.
|
|
570
580
|
:param n: Number of completions to return for each request, input tokens are only billed once.
|
|
571
581
|
:param prediction:
|
|
582
|
+
:param parallel_tool_calls:
|
|
572
583
|
:param retries: Override the default retry configuration for this method
|
|
573
584
|
:param server_url: Override the default server URL for this method
|
|
574
585
|
:param timeout_ms: Override the default request timeout configuration for this method in milliseconds
|
|
@@ -608,6 +619,7 @@ class Chat(BaseSDK):
|
|
|
608
619
|
prediction=utils.get_pydantic_model(
|
|
609
620
|
prediction, Optional[models.Prediction]
|
|
610
621
|
),
|
|
622
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
611
623
|
)
|
|
612
624
|
|
|
613
625
|
req = self._build_request_async(
|
mistralai_gcp/models/__init__.py
CHANGED
|
@@ -67,6 +67,14 @@ from .functioncall import (
|
|
|
67
67
|
)
|
|
68
68
|
from .functionname import FunctionName, FunctionNameTypedDict
|
|
69
69
|
from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData
|
|
70
|
+
from .imageurl import ImageURL, ImageURLTypedDict
|
|
71
|
+
from .imageurlchunk import (
|
|
72
|
+
ImageURLChunk,
|
|
73
|
+
ImageURLChunkImageURL,
|
|
74
|
+
ImageURLChunkImageURLTypedDict,
|
|
75
|
+
ImageURLChunkType,
|
|
76
|
+
ImageURLChunkTypedDict,
|
|
77
|
+
)
|
|
70
78
|
from .jsonschema import JSONSchema, JSONSchemaTypedDict
|
|
71
79
|
from .prediction import Prediction, PredictionTypedDict
|
|
72
80
|
from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict
|
|
@@ -166,6 +174,13 @@ __all__ = [
|
|
|
166
174
|
"FunctionTypedDict",
|
|
167
175
|
"HTTPValidationError",
|
|
168
176
|
"HTTPValidationErrorData",
|
|
177
|
+
"ImageURL",
|
|
178
|
+
"ImageURLChunk",
|
|
179
|
+
"ImageURLChunkImageURL",
|
|
180
|
+
"ImageURLChunkImageURLTypedDict",
|
|
181
|
+
"ImageURLChunkType",
|
|
182
|
+
"ImageURLChunkTypedDict",
|
|
183
|
+
"ImageURLTypedDict",
|
|
169
184
|
"JSONSchema",
|
|
170
185
|
"JSONSchemaTypedDict",
|
|
171
186
|
"Loc",
|
|
@@ -95,6 +95,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
95
95
|
n: NotRequired[Nullable[int]]
|
|
96
96
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
97
97
|
prediction: NotRequired[PredictionTypedDict]
|
|
98
|
+
parallel_tool_calls: NotRequired[bool]
|
|
98
99
|
|
|
99
100
|
|
|
100
101
|
class ChatCompletionRequest(BaseModel):
|
|
@@ -139,6 +140,8 @@ class ChatCompletionRequest(BaseModel):
|
|
|
139
140
|
|
|
140
141
|
prediction: Optional[Prediction] = None
|
|
141
142
|
|
|
143
|
+
parallel_tool_calls: Optional[bool] = None
|
|
144
|
+
|
|
142
145
|
@model_serializer(mode="wrap")
|
|
143
146
|
def serialize_model(self, handler):
|
|
144
147
|
optional_fields = [
|
|
@@ -155,6 +158,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
155
158
|
"frequency_penalty",
|
|
156
159
|
"n",
|
|
157
160
|
"prediction",
|
|
161
|
+
"parallel_tool_calls",
|
|
158
162
|
]
|
|
159
163
|
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
|
160
164
|
null_default_fields = []
|
|
@@ -90,6 +90,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
90
90
|
n: NotRequired[Nullable[int]]
|
|
91
91
|
r"""Number of completions to return for each request, input tokens are only billed once."""
|
|
92
92
|
prediction: NotRequired[PredictionTypedDict]
|
|
93
|
+
parallel_tool_calls: NotRequired[bool]
|
|
93
94
|
|
|
94
95
|
|
|
95
96
|
class ChatCompletionStreamRequest(BaseModel):
|
|
@@ -133,6 +134,8 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
133
134
|
|
|
134
135
|
prediction: Optional[Prediction] = None
|
|
135
136
|
|
|
137
|
+
parallel_tool_calls: Optional[bool] = None
|
|
138
|
+
|
|
136
139
|
@model_serializer(mode="wrap")
|
|
137
140
|
def serialize_model(self, handler):
|
|
138
141
|
optional_fields = [
|
|
@@ -149,6 +152,7 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
149
152
|
"frequency_penalty",
|
|
150
153
|
"n",
|
|
151
154
|
"prediction",
|
|
155
|
+
"parallel_tool_calls",
|
|
152
156
|
]
|
|
153
157
|
nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"]
|
|
154
158
|
null_default_fields = []
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict
|
|
4
5
|
from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict
|
|
5
6
|
from .textchunk import TextChunk, TextChunkTypedDict
|
|
6
7
|
from mistralai_gcp.utils import get_discriminator
|
|
@@ -10,13 +11,16 @@ from typing_extensions import Annotated, TypeAliasType
|
|
|
10
11
|
|
|
11
12
|
|
|
12
13
|
ContentChunkTypedDict = TypeAliasType(
|
|
13
|
-
"ContentChunkTypedDict",
|
|
14
|
+
"ContentChunkTypedDict",
|
|
15
|
+
Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict],
|
|
14
16
|
)
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
ContentChunk = Annotated[
|
|
18
20
|
Union[
|
|
19
|
-
Annotated[
|
|
21
|
+
Annotated[ImageURLChunk, Tag("image_url")],
|
|
22
|
+
Annotated[TextChunk, Tag("text")],
|
|
23
|
+
Annotated[ReferenceChunk, Tag("reference")],
|
|
20
24
|
],
|
|
21
25
|
Discriminator(lambda m: get_discriminator(m, "type", "type")),
|
|
22
26
|
]
|
mistralai_gcp/models/function.py
CHANGED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from mistralai_gcp.types import (
|
|
5
|
+
BaseModel,
|
|
6
|
+
Nullable,
|
|
7
|
+
OptionalNullable,
|
|
8
|
+
UNSET,
|
|
9
|
+
UNSET_SENTINEL,
|
|
10
|
+
)
|
|
11
|
+
from pydantic import model_serializer
|
|
12
|
+
from typing_extensions import NotRequired, TypedDict
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ImageURLTypedDict(TypedDict):
|
|
16
|
+
url: str
|
|
17
|
+
detail: NotRequired[Nullable[str]]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ImageURL(BaseModel):
|
|
21
|
+
url: str
|
|
22
|
+
|
|
23
|
+
detail: OptionalNullable[str] = UNSET
|
|
24
|
+
|
|
25
|
+
@model_serializer(mode="wrap")
|
|
26
|
+
def serialize_model(self, handler):
|
|
27
|
+
optional_fields = ["detail"]
|
|
28
|
+
nullable_fields = ["detail"]
|
|
29
|
+
null_default_fields = []
|
|
30
|
+
|
|
31
|
+
serialized = handler(self)
|
|
32
|
+
|
|
33
|
+
m = {}
|
|
34
|
+
|
|
35
|
+
for n, f in self.model_fields.items():
|
|
36
|
+
k = f.alias or n
|
|
37
|
+
val = serialized.get(k)
|
|
38
|
+
serialized.pop(k, None)
|
|
39
|
+
|
|
40
|
+
optional_nullable = k in optional_fields and k in nullable_fields
|
|
41
|
+
is_set = (
|
|
42
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
43
|
+
or k in null_default_fields
|
|
44
|
+
) # pylint: disable=no-member
|
|
45
|
+
|
|
46
|
+
if val is not None and val != UNSET_SENTINEL:
|
|
47
|
+
m[k] = val
|
|
48
|
+
elif val != UNSET_SENTINEL and (
|
|
49
|
+
not k in optional_fields or (optional_nullable and is_set)
|
|
50
|
+
):
|
|
51
|
+
m[k] = val
|
|
52
|
+
|
|
53
|
+
return m
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from .imageurl import ImageURL, ImageURLTypedDict
|
|
5
|
+
from mistralai_gcp.types import BaseModel
|
|
6
|
+
from typing import Literal, Optional, Union
|
|
7
|
+
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
ImageURLChunkImageURLTypedDict = TypeAliasType(
|
|
11
|
+
"ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str]
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str])
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
ImageURLChunkType = Literal["image_url"]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ImageURLChunkTypedDict(TypedDict):
|
|
22
|
+
r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0"""
|
|
23
|
+
|
|
24
|
+
image_url: ImageURLChunkImageURLTypedDict
|
|
25
|
+
type: NotRequired[ImageURLChunkType]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ImageURLChunk(BaseModel):
|
|
29
|
+
r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0"""
|
|
30
|
+
|
|
31
|
+
image_url: ImageURLChunkImageURL
|
|
32
|
+
|
|
33
|
+
type: Optional[ImageURLChunkType] = "image_url"
|
|
File without changes
|