mistralai 1.10.1__py3-none-any.whl → 1.11.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_version.py +3 -3
- mistralai/accesses.py +22 -12
- mistralai/agents.py +88 -44
- mistralai/basesdk.py +6 -0
- mistralai/chat.py +96 -40
- mistralai/classifiers.py +35 -22
- mistralai/conversations.py +186 -64
- mistralai/documents.py +72 -26
- mistralai/embeddings.py +17 -8
- mistralai/files.py +58 -24
- mistralai/fim.py +20 -12
- mistralai/httpclient.py +0 -1
- mistralai/jobs.py +65 -26
- mistralai/libraries.py +20 -10
- mistralai/mistral_agents.py +438 -30
- mistralai/mistral_jobs.py +33 -14
- mistralai/models/__init__.py +16 -0
- mistralai/models/agent.py +1 -1
- mistralai/models/agentconversation.py +1 -1
- mistralai/models/agenthandoffdoneevent.py +1 -1
- mistralai/models/agenthandoffentry.py +3 -2
- mistralai/models/agenthandoffstartedevent.py +1 -1
- mistralai/models/agents_api_v1_agents_get_versionop.py +21 -0
- mistralai/models/agents_api_v1_agents_list_versionsop.py +33 -0
- mistralai/models/agents_api_v1_agents_listop.py +4 -0
- mistralai/models/agentscompletionrequest.py +2 -5
- mistralai/models/agentscompletionstreamrequest.py +2 -5
- mistralai/models/archiveftmodelout.py +1 -1
- mistralai/models/assistantmessage.py +1 -1
- mistralai/models/audiochunk.py +1 -1
- mistralai/models/audioencoding.py +6 -1
- mistralai/models/audioformat.py +2 -4
- mistralai/models/basemodelcard.py +1 -1
- mistralai/models/batchjobin.py +2 -4
- mistralai/models/batchjobout.py +1 -1
- mistralai/models/batchjobsout.py +1 -1
- mistralai/models/chatcompletionchoice.py +10 -5
- mistralai/models/chatcompletionrequest.py +2 -5
- mistralai/models/chatcompletionstreamrequest.py +2 -5
- mistralai/models/classifierdetailedjobout.py +4 -2
- mistralai/models/classifierftmodelout.py +3 -2
- mistralai/models/classifierjobout.py +4 -2
- mistralai/models/codeinterpretertool.py +1 -1
- mistralai/models/completiondetailedjobout.py +5 -2
- mistralai/models/completionftmodelout.py +3 -2
- mistralai/models/completionjobout.py +5 -2
- mistralai/models/completionresponsestreamchoice.py +9 -8
- mistralai/models/conversationappendrequest.py +4 -1
- mistralai/models/conversationappendstreamrequest.py +4 -1
- mistralai/models/conversationhistory.py +2 -1
- mistralai/models/conversationmessages.py +1 -1
- mistralai/models/conversationrequest.py +5 -1
- mistralai/models/conversationresponse.py +2 -1
- mistralai/models/conversationrestartrequest.py +4 -1
- mistralai/models/conversationrestartstreamrequest.py +4 -1
- mistralai/models/conversationstreamrequest.py +5 -1
- mistralai/models/documentlibrarytool.py +1 -1
- mistralai/models/documenturlchunk.py +1 -1
- mistralai/models/embeddingdtype.py +7 -1
- mistralai/models/encodingformat.py +4 -1
- mistralai/models/entitytype.py +8 -1
- mistralai/models/filepurpose.py +8 -1
- mistralai/models/files_api_routes_list_filesop.py +4 -11
- mistralai/models/files_api_routes_upload_fileop.py +2 -6
- mistralai/models/fileschema.py +3 -5
- mistralai/models/finetuneablemodeltype.py +4 -1
- mistralai/models/ftclassifierlossfunction.py +4 -1
- mistralai/models/ftmodelcard.py +1 -1
- mistralai/models/functioncallentry.py +3 -2
- mistralai/models/functioncallevent.py +1 -1
- mistralai/models/functionresultentry.py +3 -2
- mistralai/models/functiontool.py +1 -1
- mistralai/models/githubrepositoryin.py +1 -1
- mistralai/models/githubrepositoryout.py +1 -1
- mistralai/models/httpvalidationerror.py +4 -2
- mistralai/models/imagegenerationtool.py +1 -1
- mistralai/models/imageurlchunk.py +1 -1
- mistralai/models/jobsout.py +1 -1
- mistralai/models/legacyjobmetadataout.py +1 -1
- mistralai/models/messageinputentry.py +9 -3
- mistralai/models/messageoutputentry.py +6 -3
- mistralai/models/messageoutputevent.py +4 -2
- mistralai/models/mistralerror.py +11 -7
- mistralai/models/mistralpromptmode.py +1 -1
- mistralai/models/modelconversation.py +1 -1
- mistralai/models/no_response_error.py +5 -1
- mistralai/models/ocrrequest.py +11 -1
- mistralai/models/ocrtableobject.py +4 -1
- mistralai/models/referencechunk.py +1 -1
- mistralai/models/requestsource.py +5 -1
- mistralai/models/responsedoneevent.py +1 -1
- mistralai/models/responseerrorevent.py +1 -1
- mistralai/models/responseformats.py +5 -1
- mistralai/models/responsestartedevent.py +1 -1
- mistralai/models/responsevalidationerror.py +2 -0
- mistralai/models/retrievefileout.py +3 -5
- mistralai/models/sampletype.py +7 -1
- mistralai/models/sdkerror.py +2 -0
- mistralai/models/shareenum.py +7 -1
- mistralai/models/sharingdelete.py +2 -4
- mistralai/models/sharingin.py +3 -5
- mistralai/models/source.py +8 -1
- mistralai/models/systemmessage.py +1 -1
- mistralai/models/textchunk.py +1 -1
- mistralai/models/thinkchunk.py +1 -1
- mistralai/models/timestampgranularity.py +1 -1
- mistralai/models/tool.py +2 -6
- mistralai/models/toolcall.py +2 -6
- mistralai/models/toolchoice.py +2 -6
- mistralai/models/toolchoiceenum.py +6 -1
- mistralai/models/toolexecutiondeltaevent.py +2 -1
- mistralai/models/toolexecutiondoneevent.py +2 -1
- mistralai/models/toolexecutionentry.py +4 -2
- mistralai/models/toolexecutionstartedevent.py +2 -1
- mistralai/models/toolfilechunk.py +2 -1
- mistralai/models/toolmessage.py +1 -1
- mistralai/models/toolreferencechunk.py +2 -1
- mistralai/models/tooltypes.py +1 -1
- mistralai/models/transcriptionsegmentchunk.py +1 -1
- mistralai/models/transcriptionstreamdone.py +1 -1
- mistralai/models/transcriptionstreamlanguage.py +1 -1
- mistralai/models/transcriptionstreamsegmentdelta.py +1 -1
- mistralai/models/transcriptionstreamtextdelta.py +1 -1
- mistralai/models/unarchiveftmodelout.py +1 -1
- mistralai/models/uploadfileout.py +3 -5
- mistralai/models/usermessage.py +1 -1
- mistralai/models/wandbintegration.py +1 -1
- mistralai/models/wandbintegrationout.py +1 -1
- mistralai/models/websearchpremiumtool.py +1 -1
- mistralai/models/websearchtool.py +1 -1
- mistralai/models_.py +24 -12
- mistralai/ocr.py +38 -10
- mistralai/sdk.py +2 -2
- mistralai/transcriptions.py +28 -12
- mistralai/types/basemodel.py +41 -3
- mistralai/utils/__init__.py +0 -3
- mistralai/utils/annotations.py +32 -8
- mistralai/utils/enums.py +60 -0
- mistralai/utils/forms.py +21 -10
- mistralai/utils/queryparams.py +14 -2
- mistralai/utils/requestbodies.py +3 -3
- mistralai/utils/retries.py +69 -5
- mistralai/utils/serializers.py +0 -20
- mistralai/utils/unmarshal_json_response.py +15 -1
- {mistralai-1.10.1.dist-info → mistralai-1.11.1.dist-info}/METADATA +24 -31
- {mistralai-1.10.1.dist-info → mistralai-1.11.1.dist-info}/RECORD +233 -230
- mistralai_azure/_version.py +3 -3
- mistralai_azure/basesdk.py +6 -0
- mistralai_azure/chat.py +27 -15
- mistralai_azure/httpclient.py +0 -1
- mistralai_azure/models/__init__.py +16 -1
- mistralai_azure/models/assistantmessage.py +1 -1
- mistralai_azure/models/chatcompletionchoice.py +10 -7
- mistralai_azure/models/chatcompletionrequest.py +8 -6
- mistralai_azure/models/chatcompletionstreamrequest.py +8 -6
- mistralai_azure/models/completionresponsestreamchoice.py +11 -7
- mistralai_azure/models/documenturlchunk.py +1 -1
- mistralai_azure/models/httpvalidationerror.py +4 -2
- mistralai_azure/models/imageurlchunk.py +1 -1
- mistralai_azure/models/mistralazureerror.py +11 -7
- mistralai_azure/models/mistralpromptmode.py +1 -1
- mistralai_azure/models/no_response_error.py +5 -1
- mistralai_azure/models/ocrpageobject.py +32 -5
- mistralai_azure/models/ocrrequest.py +20 -1
- mistralai_azure/models/ocrtableobject.py +34 -0
- mistralai_azure/models/referencechunk.py +1 -1
- mistralai_azure/models/responseformats.py +5 -1
- mistralai_azure/models/responsevalidationerror.py +2 -0
- mistralai_azure/models/sdkerror.py +2 -0
- mistralai_azure/models/systemmessage.py +1 -1
- mistralai_azure/models/textchunk.py +1 -1
- mistralai_azure/models/thinkchunk.py +1 -1
- mistralai_azure/models/tool.py +2 -6
- mistralai_azure/models/toolcall.py +2 -6
- mistralai_azure/models/toolchoice.py +2 -6
- mistralai_azure/models/toolchoiceenum.py +6 -1
- mistralai_azure/models/toolmessage.py +1 -1
- mistralai_azure/models/tooltypes.py +1 -1
- mistralai_azure/models/usermessage.py +1 -1
- mistralai_azure/ocr.py +26 -6
- mistralai_azure/types/basemodel.py +41 -3
- mistralai_azure/utils/__init__.py +0 -3
- mistralai_azure/utils/annotations.py +32 -8
- mistralai_azure/utils/enums.py +60 -0
- mistralai_azure/utils/forms.py +21 -10
- mistralai_azure/utils/queryparams.py +14 -2
- mistralai_azure/utils/requestbodies.py +3 -3
- mistralai_azure/utils/retries.py +69 -5
- mistralai_azure/utils/serializers.py +0 -20
- mistralai_azure/utils/unmarshal_json_response.py +15 -1
- mistralai_gcp/_version.py +3 -3
- mistralai_gcp/basesdk.py +6 -0
- mistralai_gcp/chat.py +27 -15
- mistralai_gcp/fim.py +27 -15
- mistralai_gcp/httpclient.py +0 -1
- mistralai_gcp/models/assistantmessage.py +1 -1
- mistralai_gcp/models/chatcompletionchoice.py +10 -7
- mistralai_gcp/models/chatcompletionrequest.py +8 -6
- mistralai_gcp/models/chatcompletionstreamrequest.py +8 -6
- mistralai_gcp/models/completionresponsestreamchoice.py +11 -7
- mistralai_gcp/models/fimcompletionrequest.py +6 -1
- mistralai_gcp/models/fimcompletionstreamrequest.py +6 -1
- mistralai_gcp/models/httpvalidationerror.py +4 -2
- mistralai_gcp/models/imageurlchunk.py +1 -1
- mistralai_gcp/models/mistralgcperror.py +11 -7
- mistralai_gcp/models/mistralpromptmode.py +1 -1
- mistralai_gcp/models/no_response_error.py +5 -1
- mistralai_gcp/models/referencechunk.py +1 -1
- mistralai_gcp/models/responseformats.py +5 -1
- mistralai_gcp/models/responsevalidationerror.py +2 -0
- mistralai_gcp/models/sdkerror.py +2 -0
- mistralai_gcp/models/systemmessage.py +1 -1
- mistralai_gcp/models/textchunk.py +1 -1
- mistralai_gcp/models/thinkchunk.py +1 -1
- mistralai_gcp/models/tool.py +2 -6
- mistralai_gcp/models/toolcall.py +2 -6
- mistralai_gcp/models/toolchoice.py +2 -6
- mistralai_gcp/models/toolchoiceenum.py +6 -1
- mistralai_gcp/models/toolmessage.py +1 -1
- mistralai_gcp/models/tooltypes.py +1 -1
- mistralai_gcp/models/usermessage.py +1 -1
- mistralai_gcp/types/basemodel.py +41 -3
- mistralai_gcp/utils/__init__.py +0 -3
- mistralai_gcp/utils/annotations.py +32 -8
- mistralai_gcp/utils/enums.py +60 -0
- mistralai_gcp/utils/forms.py +21 -10
- mistralai_gcp/utils/queryparams.py +14 -2
- mistralai_gcp/utils/requestbodies.py +3 -3
- mistralai_gcp/utils/retries.py +69 -5
- mistralai_gcp/utils/serializers.py +0 -20
- mistralai_gcp/utils/unmarshal_json_response.py +15 -1
- {mistralai-1.10.1.dist-info → mistralai-1.11.1.dist-info}/WHEEL +0 -0
- {mistralai-1.10.1.dist-info → mistralai-1.11.1.dist-info}/licenses/LICENSE +0 -0
mistralai_gcp/fim.py
CHANGED
|
@@ -6,7 +6,7 @@ from mistralai_gcp._hooks import HookContext
|
|
|
6
6
|
from mistralai_gcp.types import OptionalNullable, UNSET
|
|
7
7
|
from mistralai_gcp.utils import eventstreaming
|
|
8
8
|
from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response
|
|
9
|
-
from typing import Any, Mapping, Optional, Union
|
|
9
|
+
from typing import Any, Dict, Mapping, Optional, Union
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
class Fim(BaseSDK):
|
|
@@ -28,13 +28,14 @@ class Fim(BaseSDK):
|
|
|
28
28
|
]
|
|
29
29
|
] = None,
|
|
30
30
|
random_seed: OptionalNullable[int] = UNSET,
|
|
31
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
31
32
|
suffix: OptionalNullable[str] = UNSET,
|
|
32
33
|
min_tokens: OptionalNullable[int] = UNSET,
|
|
33
34
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
34
35
|
server_url: Optional[str] = None,
|
|
35
36
|
timeout_ms: Optional[int] = None,
|
|
36
37
|
http_headers: Optional[Mapping[str, str]] = None,
|
|
37
|
-
) ->
|
|
38
|
+
) -> eventstreaming.EventStream[models.CompletionEvent]:
|
|
38
39
|
r"""Stream fim completion
|
|
39
40
|
|
|
40
41
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
@@ -47,6 +48,7 @@ class Fim(BaseSDK):
|
|
|
47
48
|
:param stream:
|
|
48
49
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
49
50
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
51
|
+
:param metadata:
|
|
50
52
|
:param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
|
|
51
53
|
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
52
54
|
:param retries: Override the default retry configuration for this method
|
|
@@ -72,6 +74,7 @@ class Fim(BaseSDK):
|
|
|
72
74
|
stream=stream,
|
|
73
75
|
stop=stop,
|
|
74
76
|
random_seed=random_seed,
|
|
77
|
+
metadata=metadata,
|
|
75
78
|
prompt=prompt,
|
|
76
79
|
suffix=suffix,
|
|
77
80
|
min_tokens=min_tokens,
|
|
@@ -93,6 +96,7 @@ class Fim(BaseSDK):
|
|
|
93
96
|
get_serialized_body=lambda: utils.serialize_request_body(
|
|
94
97
|
request, False, False, "json", models.FIMCompletionStreamRequest
|
|
95
98
|
),
|
|
99
|
+
allow_empty_value=None,
|
|
96
100
|
timeout_ms=timeout_ms,
|
|
97
101
|
)
|
|
98
102
|
|
|
@@ -109,7 +113,7 @@ class Fim(BaseSDK):
|
|
|
109
113
|
config=self.sdk_configuration,
|
|
110
114
|
base_url=base_url or "",
|
|
111
115
|
operation_id="stream_fim",
|
|
112
|
-
oauth2_scopes=
|
|
116
|
+
oauth2_scopes=None,
|
|
113
117
|
security_source=self.sdk_configuration.security,
|
|
114
118
|
),
|
|
115
119
|
request=req,
|
|
@@ -158,13 +162,14 @@ class Fim(BaseSDK):
|
|
|
158
162
|
]
|
|
159
163
|
] = None,
|
|
160
164
|
random_seed: OptionalNullable[int] = UNSET,
|
|
165
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
161
166
|
suffix: OptionalNullable[str] = UNSET,
|
|
162
167
|
min_tokens: OptionalNullable[int] = UNSET,
|
|
163
168
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
164
169
|
server_url: Optional[str] = None,
|
|
165
170
|
timeout_ms: Optional[int] = None,
|
|
166
171
|
http_headers: Optional[Mapping[str, str]] = None,
|
|
167
|
-
) ->
|
|
172
|
+
) -> eventstreaming.EventStreamAsync[models.CompletionEvent]:
|
|
168
173
|
r"""Stream fim completion
|
|
169
174
|
|
|
170
175
|
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
@@ -177,6 +182,7 @@ class Fim(BaseSDK):
|
|
|
177
182
|
:param stream:
|
|
178
183
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
179
184
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
185
|
+
:param metadata:
|
|
180
186
|
:param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
|
|
181
187
|
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
182
188
|
:param retries: Override the default retry configuration for this method
|
|
@@ -202,6 +208,7 @@ class Fim(BaseSDK):
|
|
|
202
208
|
stream=stream,
|
|
203
209
|
stop=stop,
|
|
204
210
|
random_seed=random_seed,
|
|
211
|
+
metadata=metadata,
|
|
205
212
|
prompt=prompt,
|
|
206
213
|
suffix=suffix,
|
|
207
214
|
min_tokens=min_tokens,
|
|
@@ -223,6 +230,7 @@ class Fim(BaseSDK):
|
|
|
223
230
|
get_serialized_body=lambda: utils.serialize_request_body(
|
|
224
231
|
request, False, False, "json", models.FIMCompletionStreamRequest
|
|
225
232
|
),
|
|
233
|
+
allow_empty_value=None,
|
|
226
234
|
timeout_ms=timeout_ms,
|
|
227
235
|
)
|
|
228
236
|
|
|
@@ -239,7 +247,7 @@ class Fim(BaseSDK):
|
|
|
239
247
|
config=self.sdk_configuration,
|
|
240
248
|
base_url=base_url or "",
|
|
241
249
|
operation_id="stream_fim",
|
|
242
|
-
oauth2_scopes=
|
|
250
|
+
oauth2_scopes=None,
|
|
243
251
|
security_source=self.sdk_configuration.security,
|
|
244
252
|
),
|
|
245
253
|
request=req,
|
|
@@ -288,13 +296,14 @@ class Fim(BaseSDK):
|
|
|
288
296
|
]
|
|
289
297
|
] = None,
|
|
290
298
|
random_seed: OptionalNullable[int] = UNSET,
|
|
299
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
291
300
|
suffix: OptionalNullable[str] = UNSET,
|
|
292
301
|
min_tokens: OptionalNullable[int] = UNSET,
|
|
293
302
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
294
303
|
server_url: Optional[str] = None,
|
|
295
304
|
timeout_ms: Optional[int] = None,
|
|
296
305
|
http_headers: Optional[Mapping[str, str]] = None,
|
|
297
|
-
) ->
|
|
306
|
+
) -> models.FIMCompletionResponse:
|
|
298
307
|
r"""Fim Completion
|
|
299
308
|
|
|
300
309
|
FIM completion.
|
|
@@ -307,6 +316,7 @@ class Fim(BaseSDK):
|
|
|
307
316
|
:param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
308
317
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
309
318
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
319
|
+
:param metadata:
|
|
310
320
|
:param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
|
|
311
321
|
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
312
322
|
:param retries: Override the default retry configuration for this method
|
|
@@ -332,6 +342,7 @@ class Fim(BaseSDK):
|
|
|
332
342
|
stream=stream,
|
|
333
343
|
stop=stop,
|
|
334
344
|
random_seed=random_seed,
|
|
345
|
+
metadata=metadata,
|
|
335
346
|
prompt=prompt,
|
|
336
347
|
suffix=suffix,
|
|
337
348
|
min_tokens=min_tokens,
|
|
@@ -353,6 +364,7 @@ class Fim(BaseSDK):
|
|
|
353
364
|
get_serialized_body=lambda: utils.serialize_request_body(
|
|
354
365
|
request, False, False, "json", models.FIMCompletionRequest
|
|
355
366
|
),
|
|
367
|
+
allow_empty_value=None,
|
|
356
368
|
timeout_ms=timeout_ms,
|
|
357
369
|
)
|
|
358
370
|
|
|
@@ -369,7 +381,7 @@ class Fim(BaseSDK):
|
|
|
369
381
|
config=self.sdk_configuration,
|
|
370
382
|
base_url=base_url or "",
|
|
371
383
|
operation_id="fim_completion_v1_fim_completions_post",
|
|
372
|
-
oauth2_scopes=
|
|
384
|
+
oauth2_scopes=None,
|
|
373
385
|
security_source=self.sdk_configuration.security,
|
|
374
386
|
),
|
|
375
387
|
request=req,
|
|
@@ -379,9 +391,7 @@ class Fim(BaseSDK):
|
|
|
379
391
|
|
|
380
392
|
response_data: Any = None
|
|
381
393
|
if utils.match_response(http_res, "200", "application/json"):
|
|
382
|
-
return unmarshal_json_response(
|
|
383
|
-
Optional[models.FIMCompletionResponse], http_res
|
|
384
|
-
)
|
|
394
|
+
return unmarshal_json_response(models.FIMCompletionResponse, http_res)
|
|
385
395
|
if utils.match_response(http_res, "422", "application/json"):
|
|
386
396
|
response_data = unmarshal_json_response(
|
|
387
397
|
models.HTTPValidationErrorData, http_res
|
|
@@ -412,13 +422,14 @@ class Fim(BaseSDK):
|
|
|
412
422
|
]
|
|
413
423
|
] = None,
|
|
414
424
|
random_seed: OptionalNullable[int] = UNSET,
|
|
425
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET,
|
|
415
426
|
suffix: OptionalNullable[str] = UNSET,
|
|
416
427
|
min_tokens: OptionalNullable[int] = UNSET,
|
|
417
428
|
retries: OptionalNullable[utils.RetryConfig] = UNSET,
|
|
418
429
|
server_url: Optional[str] = None,
|
|
419
430
|
timeout_ms: Optional[int] = None,
|
|
420
431
|
http_headers: Optional[Mapping[str, str]] = None,
|
|
421
|
-
) ->
|
|
432
|
+
) -> models.FIMCompletionResponse:
|
|
422
433
|
r"""Fim Completion
|
|
423
434
|
|
|
424
435
|
FIM completion.
|
|
@@ -431,6 +442,7 @@ class Fim(BaseSDK):
|
|
|
431
442
|
:param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
|
|
432
443
|
:param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array
|
|
433
444
|
:param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results.
|
|
445
|
+
:param metadata:
|
|
434
446
|
:param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.
|
|
435
447
|
:param min_tokens: The minimum number of tokens to generate in the completion.
|
|
436
448
|
:param retries: Override the default retry configuration for this method
|
|
@@ -456,6 +468,7 @@ class Fim(BaseSDK):
|
|
|
456
468
|
stream=stream,
|
|
457
469
|
stop=stop,
|
|
458
470
|
random_seed=random_seed,
|
|
471
|
+
metadata=metadata,
|
|
459
472
|
prompt=prompt,
|
|
460
473
|
suffix=suffix,
|
|
461
474
|
min_tokens=min_tokens,
|
|
@@ -477,6 +490,7 @@ class Fim(BaseSDK):
|
|
|
477
490
|
get_serialized_body=lambda: utils.serialize_request_body(
|
|
478
491
|
request, False, False, "json", models.FIMCompletionRequest
|
|
479
492
|
),
|
|
493
|
+
allow_empty_value=None,
|
|
480
494
|
timeout_ms=timeout_ms,
|
|
481
495
|
)
|
|
482
496
|
|
|
@@ -493,7 +507,7 @@ class Fim(BaseSDK):
|
|
|
493
507
|
config=self.sdk_configuration,
|
|
494
508
|
base_url=base_url or "",
|
|
495
509
|
operation_id="fim_completion_v1_fim_completions_post",
|
|
496
|
-
oauth2_scopes=
|
|
510
|
+
oauth2_scopes=None,
|
|
497
511
|
security_source=self.sdk_configuration.security,
|
|
498
512
|
),
|
|
499
513
|
request=req,
|
|
@@ -503,9 +517,7 @@ class Fim(BaseSDK):
|
|
|
503
517
|
|
|
504
518
|
response_data: Any = None
|
|
505
519
|
if utils.match_response(http_res, "200", "application/json"):
|
|
506
|
-
return unmarshal_json_response(
|
|
507
|
-
Optional[models.FIMCompletionResponse], http_res
|
|
508
|
-
)
|
|
520
|
+
return unmarshal_json_response(models.FIMCompletionResponse, http_res)
|
|
509
521
|
if utils.match_response(http_res, "422", "application/json"):
|
|
510
522
|
response_data = unmarshal_json_response(
|
|
511
523
|
models.HTTPValidationErrorData, http_res
|
mistralai_gcp/httpclient.py
CHANGED
|
@@ -3,14 +3,19 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .assistantmessage import AssistantMessage, AssistantMessageTypedDict
|
|
5
5
|
from mistralai_gcp.types import BaseModel, UnrecognizedStr
|
|
6
|
-
from mistralai_gcp.utils import validate_open_enum
|
|
7
|
-
from pydantic.functional_validators import PlainValidator
|
|
8
6
|
from typing import Literal, Union
|
|
9
|
-
from typing_extensions import
|
|
7
|
+
from typing_extensions import TypedDict
|
|
10
8
|
|
|
11
9
|
|
|
12
10
|
ChatCompletionChoiceFinishReason = Union[
|
|
13
|
-
Literal[
|
|
11
|
+
Literal[
|
|
12
|
+
"stop",
|
|
13
|
+
"length",
|
|
14
|
+
"model_length",
|
|
15
|
+
"error",
|
|
16
|
+
"tool_calls",
|
|
17
|
+
],
|
|
18
|
+
UnrecognizedStr,
|
|
14
19
|
]
|
|
15
20
|
|
|
16
21
|
|
|
@@ -25,6 +30,4 @@ class ChatCompletionChoice(BaseModel):
|
|
|
25
30
|
|
|
26
31
|
message: AssistantMessage
|
|
27
32
|
|
|
28
|
-
finish_reason:
|
|
29
|
-
ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False))
|
|
30
|
-
]
|
|
33
|
+
finish_reason: ChatCompletionChoiceFinishReason
|
|
@@ -18,10 +18,9 @@ from mistralai_gcp.types import (
|
|
|
18
18
|
UNSET,
|
|
19
19
|
UNSET_SENTINEL,
|
|
20
20
|
)
|
|
21
|
-
from mistralai_gcp.utils import get_discriminator
|
|
21
|
+
from mistralai_gcp.utils import get_discriminator
|
|
22
22
|
from pydantic import Discriminator, Tag, model_serializer
|
|
23
|
-
from
|
|
24
|
-
from typing import List, Optional, Union
|
|
23
|
+
from typing import Any, Dict, List, Optional, Union
|
|
25
24
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
26
25
|
|
|
27
26
|
|
|
@@ -89,6 +88,7 @@ class ChatCompletionRequestTypedDict(TypedDict):
|
|
|
89
88
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
90
89
|
random_seed: NotRequired[Nullable[int]]
|
|
91
90
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
91
|
+
metadata: NotRequired[Nullable[Dict[str, Any]]]
|
|
92
92
|
response_format: NotRequired[ResponseFormatTypedDict]
|
|
93
93
|
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
94
94
|
tools: NotRequired[Nullable[List[ToolTypedDict]]]
|
|
@@ -134,6 +134,8 @@ class ChatCompletionRequest(BaseModel):
|
|
|
134
134
|
random_seed: OptionalNullable[int] = UNSET
|
|
135
135
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
136
136
|
|
|
137
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET
|
|
138
|
+
|
|
137
139
|
response_format: Optional[ResponseFormat] = None
|
|
138
140
|
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
139
141
|
|
|
@@ -158,9 +160,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
158
160
|
parallel_tool_calls: Optional[bool] = None
|
|
159
161
|
r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
|
|
160
162
|
|
|
161
|
-
prompt_mode:
|
|
162
|
-
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
163
|
-
] = UNSET
|
|
163
|
+
prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
|
|
164
164
|
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
165
165
|
|
|
166
166
|
@model_serializer(mode="wrap")
|
|
@@ -172,6 +172,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
172
172
|
"stream",
|
|
173
173
|
"stop",
|
|
174
174
|
"random_seed",
|
|
175
|
+
"metadata",
|
|
175
176
|
"response_format",
|
|
176
177
|
"tools",
|
|
177
178
|
"tool_choice",
|
|
@@ -186,6 +187,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
186
187
|
"temperature",
|
|
187
188
|
"max_tokens",
|
|
188
189
|
"random_seed",
|
|
190
|
+
"metadata",
|
|
189
191
|
"tools",
|
|
190
192
|
"n",
|
|
191
193
|
"prompt_mode",
|
|
@@ -18,10 +18,9 @@ from mistralai_gcp.types import (
|
|
|
18
18
|
UNSET,
|
|
19
19
|
UNSET_SENTINEL,
|
|
20
20
|
)
|
|
21
|
-
from mistralai_gcp.utils import get_discriminator
|
|
21
|
+
from mistralai_gcp.utils import get_discriminator
|
|
22
22
|
from pydantic import Discriminator, Tag, model_serializer
|
|
23
|
-
from
|
|
24
|
-
from typing import List, Optional, Union
|
|
23
|
+
from typing import Any, Dict, List, Optional, Union
|
|
25
24
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
|
26
25
|
|
|
27
26
|
|
|
@@ -84,6 +83,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict):
|
|
|
84
83
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
85
84
|
random_seed: NotRequired[Nullable[int]]
|
|
86
85
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
86
|
+
metadata: NotRequired[Nullable[Dict[str, Any]]]
|
|
87
87
|
response_format: NotRequired[ResponseFormatTypedDict]
|
|
88
88
|
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
89
89
|
tools: NotRequired[Nullable[List[ToolTypedDict]]]
|
|
@@ -128,6 +128,8 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
128
128
|
random_seed: OptionalNullable[int] = UNSET
|
|
129
129
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
130
130
|
|
|
131
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET
|
|
132
|
+
|
|
131
133
|
response_format: Optional[ResponseFormat] = None
|
|
132
134
|
r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide."""
|
|
133
135
|
|
|
@@ -152,9 +154,7 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
152
154
|
parallel_tool_calls: Optional[bool] = None
|
|
153
155
|
r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel."""
|
|
154
156
|
|
|
155
|
-
prompt_mode:
|
|
156
|
-
OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False))
|
|
157
|
-
] = UNSET
|
|
157
|
+
prompt_mode: OptionalNullable[MistralPromptMode] = UNSET
|
|
158
158
|
r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used."""
|
|
159
159
|
|
|
160
160
|
@model_serializer(mode="wrap")
|
|
@@ -166,6 +166,7 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
166
166
|
"stream",
|
|
167
167
|
"stop",
|
|
168
168
|
"random_seed",
|
|
169
|
+
"metadata",
|
|
169
170
|
"response_format",
|
|
170
171
|
"tools",
|
|
171
172
|
"tool_choice",
|
|
@@ -180,6 +181,7 @@ class ChatCompletionStreamRequest(BaseModel):
|
|
|
180
181
|
"temperature",
|
|
181
182
|
"max_tokens",
|
|
182
183
|
"random_seed",
|
|
184
|
+
"metadata",
|
|
183
185
|
"tools",
|
|
184
186
|
"n",
|
|
185
187
|
"prompt_mode",
|
|
@@ -3,14 +3,20 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .deltamessage import DeltaMessage, DeltaMessageTypedDict
|
|
5
5
|
from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr
|
|
6
|
-
from mistralai_gcp.utils import validate_open_enum
|
|
7
6
|
from pydantic import model_serializer
|
|
8
|
-
from pydantic.functional_validators import PlainValidator
|
|
9
7
|
from typing import Literal, Union
|
|
10
|
-
from typing_extensions import
|
|
8
|
+
from typing_extensions import TypedDict
|
|
11
9
|
|
|
12
10
|
|
|
13
|
-
FinishReason = Union[
|
|
11
|
+
FinishReason = Union[
|
|
12
|
+
Literal[
|
|
13
|
+
"stop",
|
|
14
|
+
"length",
|
|
15
|
+
"error",
|
|
16
|
+
"tool_calls",
|
|
17
|
+
],
|
|
18
|
+
UnrecognizedStr,
|
|
19
|
+
]
|
|
14
20
|
|
|
15
21
|
|
|
16
22
|
class CompletionResponseStreamChoiceTypedDict(TypedDict):
|
|
@@ -24,9 +30,7 @@ class CompletionResponseStreamChoice(BaseModel):
|
|
|
24
30
|
|
|
25
31
|
delta: DeltaMessage
|
|
26
32
|
|
|
27
|
-
finish_reason:
|
|
28
|
-
Nullable[FinishReason], PlainValidator(validate_open_enum(False))
|
|
29
|
-
]
|
|
33
|
+
finish_reason: Nullable[FinishReason]
|
|
30
34
|
|
|
31
35
|
@model_serializer(mode="wrap")
|
|
32
36
|
def serialize_model(self, handler):
|
|
@@ -9,7 +9,7 @@ from mistralai_gcp.types import (
|
|
|
9
9
|
UNSET_SENTINEL,
|
|
10
10
|
)
|
|
11
11
|
from pydantic import model_serializer
|
|
12
|
-
from typing import List, Optional, Union
|
|
12
|
+
from typing import Any, Dict, List, Optional, Union
|
|
13
13
|
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
14
14
|
|
|
15
15
|
|
|
@@ -42,6 +42,7 @@ class FIMCompletionRequestTypedDict(TypedDict):
|
|
|
42
42
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
43
43
|
random_seed: NotRequired[Nullable[int]]
|
|
44
44
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
45
|
+
metadata: NotRequired[Nullable[Dict[str, Any]]]
|
|
45
46
|
suffix: NotRequired[Nullable[str]]
|
|
46
47
|
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
47
48
|
min_tokens: NotRequired[Nullable[int]]
|
|
@@ -73,6 +74,8 @@ class FIMCompletionRequest(BaseModel):
|
|
|
73
74
|
random_seed: OptionalNullable[int] = UNSET
|
|
74
75
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
75
76
|
|
|
77
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET
|
|
78
|
+
|
|
76
79
|
suffix: OptionalNullable[str] = UNSET
|
|
77
80
|
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
78
81
|
|
|
@@ -88,6 +91,7 @@ class FIMCompletionRequest(BaseModel):
|
|
|
88
91
|
"stream",
|
|
89
92
|
"stop",
|
|
90
93
|
"random_seed",
|
|
94
|
+
"metadata",
|
|
91
95
|
"suffix",
|
|
92
96
|
"min_tokens",
|
|
93
97
|
]
|
|
@@ -95,6 +99,7 @@ class FIMCompletionRequest(BaseModel):
|
|
|
95
99
|
"temperature",
|
|
96
100
|
"max_tokens",
|
|
97
101
|
"random_seed",
|
|
102
|
+
"metadata",
|
|
98
103
|
"suffix",
|
|
99
104
|
"min_tokens",
|
|
100
105
|
]
|
|
@@ -9,7 +9,7 @@ from mistralai_gcp.types import (
|
|
|
9
9
|
UNSET_SENTINEL,
|
|
10
10
|
)
|
|
11
11
|
from pydantic import model_serializer
|
|
12
|
-
from typing import List, Optional, Union
|
|
12
|
+
from typing import Any, Dict, List, Optional, Union
|
|
13
13
|
from typing_extensions import NotRequired, TypeAliasType, TypedDict
|
|
14
14
|
|
|
15
15
|
|
|
@@ -41,6 +41,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict):
|
|
|
41
41
|
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
|
|
42
42
|
random_seed: NotRequired[Nullable[int]]
|
|
43
43
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
44
|
+
metadata: NotRequired[Nullable[Dict[str, Any]]]
|
|
44
45
|
suffix: NotRequired[Nullable[str]]
|
|
45
46
|
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
46
47
|
min_tokens: NotRequired[Nullable[int]]
|
|
@@ -71,6 +72,8 @@ class FIMCompletionStreamRequest(BaseModel):
|
|
|
71
72
|
random_seed: OptionalNullable[int] = UNSET
|
|
72
73
|
r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
|
|
73
74
|
|
|
75
|
+
metadata: OptionalNullable[Dict[str, Any]] = UNSET
|
|
76
|
+
|
|
74
77
|
suffix: OptionalNullable[str] = UNSET
|
|
75
78
|
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
76
79
|
|
|
@@ -86,6 +89,7 @@ class FIMCompletionStreamRequest(BaseModel):
|
|
|
86
89
|
"stream",
|
|
87
90
|
"stop",
|
|
88
91
|
"random_seed",
|
|
92
|
+
"metadata",
|
|
89
93
|
"suffix",
|
|
90
94
|
"min_tokens",
|
|
91
95
|
]
|
|
@@ -93,6 +97,7 @@ class FIMCompletionStreamRequest(BaseModel):
|
|
|
93
97
|
"temperature",
|
|
94
98
|
"max_tokens",
|
|
95
99
|
"random_seed",
|
|
100
|
+
"metadata",
|
|
96
101
|
"suffix",
|
|
97
102
|
"min_tokens",
|
|
98
103
|
]
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
from .validationerror import ValidationError
|
|
5
|
+
from dataclasses import dataclass, field
|
|
5
6
|
import httpx
|
|
6
7
|
from mistralai_gcp.models import MistralGcpError
|
|
7
8
|
from mistralai_gcp.types import BaseModel
|
|
@@ -12,8 +13,9 @@ class HTTPValidationErrorData(BaseModel):
|
|
|
12
13
|
detail: Optional[List[ValidationError]] = None
|
|
13
14
|
|
|
14
15
|
|
|
16
|
+
@dataclass(unsafe_hash=True)
|
|
15
17
|
class HTTPValidationError(MistralGcpError):
|
|
16
|
-
data: HTTPValidationErrorData
|
|
18
|
+
data: HTTPValidationErrorData = field(hash=False)
|
|
17
19
|
|
|
18
20
|
def __init__(
|
|
19
21
|
self,
|
|
@@ -23,4 +25,4 @@ class HTTPValidationError(MistralGcpError):
|
|
|
23
25
|
):
|
|
24
26
|
message = body or raw_response.text
|
|
25
27
|
super().__init__(message, raw_response, body)
|
|
26
|
-
self
|
|
28
|
+
object.__setattr__(self, "data", data)
|
|
@@ -15,7 +15,7 @@ ImageURLChunkImageURLTypedDict = TypeAliasType(
|
|
|
15
15
|
ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str])
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
ImageURLChunkType = Literal["image_url"]
|
|
18
|
+
ImageURLChunkType = Literal["image_url",]
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
class ImageURLChunkTypedDict(TypedDict):
|
|
@@ -2,25 +2,29 @@
|
|
|
2
2
|
|
|
3
3
|
import httpx
|
|
4
4
|
from typing import Optional
|
|
5
|
+
from dataclasses import dataclass, field
|
|
5
6
|
|
|
6
7
|
|
|
8
|
+
@dataclass(unsafe_hash=True)
|
|
7
9
|
class MistralGcpError(Exception):
|
|
8
10
|
"""The base class for all HTTP error responses."""
|
|
9
11
|
|
|
10
12
|
message: str
|
|
11
13
|
status_code: int
|
|
12
14
|
body: str
|
|
13
|
-
headers: httpx.Headers
|
|
14
|
-
raw_response: httpx.Response
|
|
15
|
+
headers: httpx.Headers = field(hash=False)
|
|
16
|
+
raw_response: httpx.Response = field(hash=False)
|
|
15
17
|
|
|
16
18
|
def __init__(
|
|
17
19
|
self, message: str, raw_response: httpx.Response, body: Optional[str] = None
|
|
18
20
|
):
|
|
19
|
-
self
|
|
20
|
-
self
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
21
|
+
object.__setattr__(self, "message", message)
|
|
22
|
+
object.__setattr__(self, "status_code", raw_response.status_code)
|
|
23
|
+
object.__setattr__(
|
|
24
|
+
self, "body", body if body is not None else raw_response.text
|
|
25
|
+
)
|
|
26
|
+
object.__setattr__(self, "headers", raw_response.headers)
|
|
27
|
+
object.__setattr__(self, "raw_response", raw_response)
|
|
24
28
|
|
|
25
29
|
def __str__(self):
|
|
26
30
|
return self.message
|
|
@@ -1,12 +1,16 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass(unsafe_hash=True)
|
|
3
7
|
class NoResponseError(Exception):
|
|
4
8
|
"""Error raised when no HTTP response is received from the server."""
|
|
5
9
|
|
|
6
10
|
message: str
|
|
7
11
|
|
|
8
12
|
def __init__(self, message: str = "No response received"):
|
|
9
|
-
self
|
|
13
|
+
object.__setattr__(self, "message", message)
|
|
10
14
|
super().__init__(message)
|
|
11
15
|
|
|
12
16
|
def __str__(self):
|
|
@@ -2,10 +2,12 @@
|
|
|
2
2
|
|
|
3
3
|
import httpx
|
|
4
4
|
from typing import Optional
|
|
5
|
+
from dataclasses import dataclass
|
|
5
6
|
|
|
6
7
|
from mistralai_gcp.models import MistralGcpError
|
|
7
8
|
|
|
8
9
|
|
|
10
|
+
@dataclass(unsafe_hash=True)
|
|
9
11
|
class ResponseValidationError(MistralGcpError):
|
|
10
12
|
"""Error raised when there is a type mismatch between the response data and the expected Pydantic model."""
|
|
11
13
|
|
mistralai_gcp/models/sdkerror.py
CHANGED
|
@@ -2,12 +2,14 @@
|
|
|
2
2
|
|
|
3
3
|
import httpx
|
|
4
4
|
from typing import Optional
|
|
5
|
+
from dataclasses import dataclass
|
|
5
6
|
|
|
6
7
|
from mistralai_gcp.models import MistralGcpError
|
|
7
8
|
|
|
8
9
|
MAX_MESSAGE_LEN = 10_000
|
|
9
10
|
|
|
10
11
|
|
|
12
|
+
@dataclass(unsafe_hash=True)
|
|
11
13
|
class SDKError(MistralGcpError):
|
|
12
14
|
"""The fallback error class if no more specific error class is matched."""
|
|
13
15
|
|